repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
okoala/sublime-bak | Packages/ConvertToUTF8/chardet/euckrprober.py | 2931 | 1675 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCKRDistributionAnalysis
from .mbcssm import EUCKRSMModel
class EUCKRProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCKRSMModel)
self._mDistributionAnalyzer = EUCKRDistributionAnalysis()
self.reset()
def get_charset_name(self):
return "EUC-KR"
| mit |
kevin-intel/scikit-learn | sklearn/gaussian_process/tests/test_gpc.py | 3 | 10065 | """Testing for Gaussian process classification """
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD 3 clause
import warnings
import numpy as np
from scipy.optimize import approx_fprime
import pytest
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels \
import RBF, ConstantKernel as C, WhiteKernel
from sklearn.gaussian_process.tests._mini_sequence_kernel import MiniSeqKernel
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils._testing \
import assert_almost_equal, assert_array_equal
def f(x):
return np.sin(x)
X = np.atleast_2d(np.linspace(0, 10, 30)).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = np.array(f(X).ravel() > 0, dtype=int)
fX = f(X).ravel()
y_mc = np.empty(y.shape, dtype=int) # multi-class
y_mc[fX < -0.35] = 0
y_mc[(fX >= -0.35) & (fX < 0.35)] = 1
y_mc[fX > 0.35] = 2
fixed_kernel = RBF(length_scale=1.0, length_scale_bounds="fixed")
kernels = [RBF(length_scale=0.1), fixed_kernel,
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
C(1.0, (1e-2, 1e2)) *
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3))]
non_fixed_kernels = [kernel for kernel in kernels
if kernel != fixed_kernel]
@pytest.mark.parametrize('kernel', kernels)
def test_predict_consistent(kernel):
# Check binary predict decision has also predicted probability above 0.5.
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
assert_array_equal(gpc.predict(X),
gpc.predict_proba(X)[:, 1] >= 0.5)
def test_predict_consistent_structured():
# Check binary predict decision has also predicted probability above 0.5.
X = ['A', 'AB', 'B']
y = np.array([True, False, True])
kernel = MiniSeqKernel(baseline_similarity_bounds='fixed')
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
assert_array_equal(gpc.predict(X),
gpc.predict_proba(X)[:, 1] >= 0.5)
@pytest.mark.parametrize('kernel', non_fixed_kernels)
def test_lml_improving(kernel):
# Test that hyperparameter-tuning improves log-marginal likelihood.
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
assert (gpc.log_marginal_likelihood(gpc.kernel_.theta) >
gpc.log_marginal_likelihood(kernel.theta))
@pytest.mark.parametrize('kernel', kernels)
def test_lml_precomputed(kernel):
# Test that lml of optimized kernel is stored correctly.
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
assert_almost_equal(gpc.log_marginal_likelihood(gpc.kernel_.theta),
gpc.log_marginal_likelihood(), 7)
@pytest.mark.parametrize('kernel', kernels)
def test_lml_without_cloning_kernel(kernel):
# Test that clone_kernel=False has side-effects of kernel.theta.
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
input_theta = np.ones(gpc.kernel_.theta.shape, dtype=np.float64)
gpc.log_marginal_likelihood(input_theta, clone_kernel=False)
assert_almost_equal(gpc.kernel_.theta, input_theta, 7)
@pytest.mark.parametrize('kernel', non_fixed_kernels)
def test_converged_to_local_maximum(kernel):
# Test that we are in local maximum after hyperparameter-optimization.
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
lml, lml_gradient = \
gpc.log_marginal_likelihood(gpc.kernel_.theta, True)
assert np.all((np.abs(lml_gradient) < 1e-4) |
(gpc.kernel_.theta == gpc.kernel_.bounds[:, 0]) |
(gpc.kernel_.theta == gpc.kernel_.bounds[:, 1]))
@pytest.mark.parametrize('kernel', kernels)
def test_lml_gradient(kernel):
# Compare analytic and numeric gradient of log marginal likelihood.
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
lml, lml_gradient = gpc.log_marginal_likelihood(kernel.theta, True)
lml_gradient_approx = \
approx_fprime(kernel.theta,
lambda theta: gpc.log_marginal_likelihood(theta,
False),
1e-10)
assert_almost_equal(lml_gradient, lml_gradient_approx, 3)
def test_random_starts():
# Test that an increasing number of random-starts of GP fitting only
# increases the log marginal likelihood of the chosen theta.
n_samples, n_features = 25, 2
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = (np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)) > 0
kernel = C(1.0, (1e-2, 1e2)) \
* RBF(length_scale=[1e-3] * n_features,
length_scale_bounds=[(1e-4, 1e+2)] * n_features)
last_lml = -np.inf
for n_restarts_optimizer in range(5):
gp = GaussianProcessClassifier(
kernel=kernel, n_restarts_optimizer=n_restarts_optimizer,
random_state=0).fit(X, y)
lml = gp.log_marginal_likelihood(gp.kernel_.theta)
assert lml > last_lml - np.finfo(np.float32).eps
last_lml = lml
@pytest.mark.parametrize('kernel', non_fixed_kernels)
def test_custom_optimizer(kernel):
# Test that GPC can use externally defined optimizers.
# Define a dummy optimizer that simply tests 10 random hyperparameters
def optimizer(obj_func, initial_theta, bounds):
rng = np.random.RandomState(0)
theta_opt, func_min = \
initial_theta, obj_func(initial_theta, eval_gradient=False)
for _ in range(10):
theta = np.atleast_1d(rng.uniform(np.maximum(-2, bounds[:, 0]),
np.minimum(1, bounds[:, 1])))
f = obj_func(theta, eval_gradient=False)
if f < func_min:
theta_opt, func_min = theta, f
return theta_opt, func_min
gpc = GaussianProcessClassifier(kernel=kernel, optimizer=optimizer)
gpc.fit(X, y_mc)
# Checks that optimizer improved marginal likelihood
assert (gpc.log_marginal_likelihood(gpc.kernel_.theta) >
gpc.log_marginal_likelihood(kernel.theta))
@pytest.mark.parametrize('kernel', kernels)
def test_multi_class(kernel):
# Test GPC for multi-class classification problems.
gpc = GaussianProcessClassifier(kernel=kernel)
gpc.fit(X, y_mc)
y_prob = gpc.predict_proba(X2)
assert_almost_equal(y_prob.sum(1), 1)
y_pred = gpc.predict(X2)
assert_array_equal(np.argmax(y_prob, 1), y_pred)
@pytest.mark.parametrize('kernel', kernels)
def test_multi_class_n_jobs(kernel):
# Test that multi-class GPC produces identical results with n_jobs>1.
gpc = GaussianProcessClassifier(kernel=kernel)
gpc.fit(X, y_mc)
gpc_2 = GaussianProcessClassifier(kernel=kernel, n_jobs=2)
gpc_2.fit(X, y_mc)
y_prob = gpc.predict_proba(X2)
y_prob_2 = gpc_2.predict_proba(X2)
assert_almost_equal(y_prob, y_prob_2)
def test_warning_bounds():
kernel = RBF(length_scale_bounds=[1e-5, 1e-3])
gpc = GaussianProcessClassifier(kernel=kernel)
warning_message = (
"The optimal value found for dimension 0 of parameter "
"length_scale is close to the specified upper bound "
"0.001. Increasing the bound and calling fit again may "
"find a better value."
)
with pytest.warns(ConvergenceWarning, match=warning_message):
gpc.fit(X, y)
kernel_sum = (WhiteKernel(noise_level_bounds=[1e-5, 1e-3]) +
RBF(length_scale_bounds=[1e3, 1e5]))
gpc_sum = GaussianProcessClassifier(kernel=kernel_sum)
with pytest.warns(None) as record:
with warnings.catch_warnings():
# scipy 1.3.0 uses tostring which is deprecated in numpy
warnings.filterwarnings("ignore", "tostring", DeprecationWarning)
gpc_sum.fit(X, y)
assert len(record) == 2
assert record[0].message.args[0] == ("The optimal value found for "
"dimension 0 of parameter "
"k1__noise_level is close to the "
"specified upper bound 0.001. "
"Increasing the bound and calling "
"fit again may find a better value.")
assert record[1].message.args[0] == ("The optimal value found for "
"dimension 0 of parameter "
"k2__length_scale is close to the "
"specified lower bound 1000.0. "
"Decreasing the bound and calling "
"fit again may find a better value.")
X_tile = np.tile(X, 2)
kernel_dims = RBF(length_scale=[1., 2.],
length_scale_bounds=[1e1, 1e2])
gpc_dims = GaussianProcessClassifier(kernel=kernel_dims)
with pytest.warns(None) as record:
with warnings.catch_warnings():
# scipy 1.3.0 uses tostring which is deprecated in numpy
warnings.filterwarnings("ignore", "tostring", DeprecationWarning)
gpc_dims.fit(X_tile, y)
assert len(record) == 2
assert record[0].message.args[0] == ("The optimal value found for "
"dimension 0 of parameter "
"length_scale is close to the "
"specified upper bound 100.0. "
"Increasing the bound and calling "
"fit again may find a better value.")
assert record[1].message.args[0] == ("The optimal value found for "
"dimension 1 of parameter "
"length_scale is close to the "
"specified upper bound 100.0. "
"Increasing the bound and calling "
"fit again may find a better value.")
| bsd-3-clause |
tiborsimko/invenio-pidstore | invenio_pidstore/providers/recordid.py | 2 | 1847 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Record ID provider."""
from __future__ import absolute_import, print_function
from ..models import PIDStatus, RecordIdentifier
from .base import BaseProvider
class RecordIdProvider(BaseProvider):
"""Record identifier provider."""
pid_type = 'recid'
"""Type of persistent identifier."""
pid_provider = None
"""Provider name.
The provider name is not recorded in the PID since the provider does not
provide any additional features besides creation of record ids.
"""
default_status = PIDStatus.RESERVED
"""Record IDs are by default registered immediately.
Default: :attr:`invenio_pidstore.models.PIDStatus.RESERVED`
"""
@classmethod
def create(cls, object_type=None, object_uuid=None, **kwargs):
"""Create a new record identifier.
Note: if the object_type and object_uuid values are passed, then the
PID status will be automatically setted to
:attr:`invenio_pidstore.models.PIDStatus.REGISTERED`.
:param object_type: The object type. (Default: None.)
:param object_uuid: The object identifier. (Default: None).
:param kwargs: You specify the pid_value.
"""
# Request next integer in recid sequence.
assert 'pid_value' not in kwargs
kwargs['pid_value'] = str(RecordIdentifier.next())
kwargs.setdefault('status', cls.default_status)
if object_type and object_uuid:
kwargs['status'] = PIDStatus.REGISTERED
return super(RecordIdProvider, cls).create(
object_type=object_type, object_uuid=object_uuid, **kwargs)
| mit |
linjoahow/W16_test1 | static/Brython3.1.3-20150514-095342/Lib/subprocess.py | 728 | 67282 | # subprocess - Subprocesses with accessible I/O streams
#
# For more information about this module, see PEP 324.
#
# Copyright (c) 2003-2005 by Peter Astrand <astrand@lysator.liu.se>
#
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/2.4/license for licensing details.
r"""subprocess - Subprocesses with accessible I/O streams
This module allows you to spawn processes, connect to their
input/output/error pipes, and obtain their return codes. This module
intends to replace several other, older modules and functions, like:
os.system
os.spawn*
Information about how the subprocess module can be used to replace these
modules and functions can be found below.
Using the subprocess module
===========================
This module defines one class called Popen:
class Popen(args, bufsize=-1, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=True, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0,
restore_signals=True, start_new_session=False, pass_fds=()):
Arguments are:
args should be a string, or a sequence of program arguments. The
program to execute is normally the first item in the args sequence or
string, but can be explicitly set by using the executable argument.
On POSIX, with shell=False (default): In this case, the Popen class
uses os.execvp() to execute the child program. args should normally
be a sequence. A string will be treated as a sequence with the string
as the only item (the program to execute).
On POSIX, with shell=True: If args is a string, it specifies the
command string to execute through the shell. If args is a sequence,
the first item specifies the command string, and any additional items
will be treated as additional shell arguments.
On Windows: the Popen class uses CreateProcess() to execute the child
program, which operates on strings. If args is a sequence, it will be
converted to a string using the list2cmdline method. Please note that
not all MS Windows applications interpret the command line the same
way: The list2cmdline is designed for applications using the same
rules as the MS C runtime.
bufsize will be supplied as the corresponding argument to the io.open()
function when creating the stdin/stdout/stderr pipe file objects:
0 means unbuffered (read & write are one system call and can return short),
1 means line buffered, any other positive value means use a buffer of
approximately that size. A negative bufsize, the default, means the system
default of io.DEFAULT_BUFFER_SIZE will be used.
stdin, stdout and stderr specify the executed programs' standard
input, standard output and standard error file handles, respectively.
Valid values are PIPE, an existing file descriptor (a positive
integer), an existing file object, and None. PIPE indicates that a
new pipe to the child should be created. With None, no redirection
will occur; the child's file handles will be inherited from the
parent. Additionally, stderr can be STDOUT, which indicates that the
stderr data from the applications should be captured into the same
file handle as for stdout.
On POSIX, if preexec_fn is set to a callable object, this object will be
called in the child process just before the child is executed. The use
of preexec_fn is not thread safe, using it in the presence of threads
could lead to a deadlock in the child process before the new executable
is executed.
If close_fds is true, all file descriptors except 0, 1 and 2 will be
closed before the child process is executed. The default for close_fds
varies by platform: Always true on POSIX. True when stdin/stdout/stderr
are None on Windows, false otherwise.
pass_fds is an optional sequence of file descriptors to keep open between the
parent and child. Providing any pass_fds implicitly sets close_fds to true.
if shell is true, the specified command will be executed through the
shell.
If cwd is not None, the current directory will be changed to cwd
before the child is executed.
On POSIX, if restore_signals is True all signals that Python sets to
SIG_IGN are restored to SIG_DFL in the child process before the exec.
Currently this includes the SIGPIPE, SIGXFZ and SIGXFSZ signals. This
parameter does nothing on Windows.
On POSIX, if start_new_session is True, the setsid() system call will be made
in the child process prior to executing the command.
If env is not None, it defines the environment variables for the new
process.
If universal_newlines is false, the file objects stdin, stdout and stderr
are opened as binary files, and no line ending conversion is done.
If universal_newlines is true, the file objects stdout and stderr are
opened as a text files, but lines may be terminated by any of '\n',
the Unix end-of-line convention, '\r', the old Macintosh convention or
'\r\n', the Windows convention. All of these external representations
are seen as '\n' by the Python program. Also, the newlines attribute
of the file objects stdout, stdin and stderr are not updated by the
communicate() method.
The startupinfo and creationflags, if given, will be passed to the
underlying CreateProcess() function. They can specify things such as
appearance of the main window and priority for the new process.
(Windows only)
This module also defines some shortcut functions:
call(*popenargs, **kwargs):
Run command with arguments. Wait for command to complete, then
return the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
>>> retcode = subprocess.call(["ls", "-l"])
check_call(*popenargs, **kwargs):
Run command with arguments. Wait for command to complete. If the
exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
>>> subprocess.check_call(["ls", "-l"])
0
getstatusoutput(cmd):
Return (status, output) of executing cmd in a shell.
Execute the string 'cmd' in a shell with os.popen() and return a 2-tuple
(status, output). cmd is actually run as '{ cmd ; } 2>&1', so that the
returned output will contain output or error messages. A trailing newline
is stripped from the output. The exit status for the command can be
interpreted according to the rules for the C function wait(). Example:
>>> subprocess.getstatusoutput('ls /bin/ls')
(0, '/bin/ls')
>>> subprocess.getstatusoutput('cat /bin/junk')
(256, 'cat: /bin/junk: No such file or directory')
>>> subprocess.getstatusoutput('/bin/junk')
(256, 'sh: /bin/junk: not found')
getoutput(cmd):
Return output (stdout or stderr) of executing cmd in a shell.
Like getstatusoutput(), except the exit status is ignored and the return
value is a string containing the command's output. Example:
>>> subprocess.getoutput('ls /bin/ls')
'/bin/ls'
check_output(*popenargs, **kwargs):
Run command with arguments and return its output.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> output = subprocess.check_output(["ls", "-l", "/dev/null"])
Exceptions
----------
Exceptions raised in the child process, before the new program has
started to execute, will be re-raised in the parent. Additionally,
the exception object will have one extra attribute called
'child_traceback', which is a string containing traceback information
from the child's point of view.
The most common exception raised is OSError. This occurs, for
example, when trying to execute a non-existent file. Applications
should prepare for OSErrors.
A ValueError will be raised if Popen is called with invalid arguments.
Exceptions defined within this module inherit from SubprocessError.
check_call() and check_output() will raise CalledProcessError if the
called process returns a non-zero return code. TimeoutExpired
be raised if a timeout was specified and expired.
Security
--------
Unlike some other popen functions, this implementation will never call
/bin/sh implicitly. This means that all characters, including shell
metacharacters, can safely be passed to child processes.
Popen objects
=============
Instances of the Popen class have the following methods:
poll()
Check if child process has terminated. Returns returncode
attribute.
wait()
Wait for child process to terminate. Returns returncode attribute.
communicate(input=None)
Interact with process: Send data to stdin. Read data from stdout
and stderr, until end-of-file is reached. Wait for process to
terminate. The optional input argument should be a string to be
sent to the child process, or None, if no data should be sent to
the child.
communicate() returns a tuple (stdout, stderr).
Note: The data read is buffered in memory, so do not use this
method if the data size is large or unlimited.
The following attributes are also available:
stdin
If the stdin argument is PIPE, this attribute is a file object
that provides input to the child process. Otherwise, it is None.
stdout
If the stdout argument is PIPE, this attribute is a file object
that provides output from the child process. Otherwise, it is
None.
stderr
If the stderr argument is PIPE, this attribute is file object that
provides error output from the child process. Otherwise, it is
None.
pid
The process ID of the child process.
returncode
The child return code. A None value indicates that the process
hasn't terminated yet. A negative value -N indicates that the
child was terminated by signal N (POSIX only).
Replacing older functions with the subprocess module
====================================================
In this section, "a ==> b" means that b can be used as a replacement
for a.
Note: All functions in this section fail (more or less) silently if
the executed program cannot be found; this module raises an OSError
exception.
In the following examples, we assume that the subprocess module is
imported with "from subprocess import *".
Replacing /bin/sh shell backquote
---------------------------------
output=`mycmd myarg`
==>
output = Popen(["mycmd", "myarg"], stdout=PIPE).communicate()[0]
Replacing shell pipe line
-------------------------
output=`dmesg | grep hda`
==>
p1 = Popen(["dmesg"], stdout=PIPE)
p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE)
output = p2.communicate()[0]
Replacing os.system()
---------------------
sts = os.system("mycmd" + " myarg")
==>
p = Popen("mycmd" + " myarg", shell=True)
pid, sts = os.waitpid(p.pid, 0)
Note:
* Calling the program through the shell is usually not required.
* It's easier to look at the returncode attribute than the
exitstatus.
A more real-world example would look like this:
try:
retcode = call("mycmd" + " myarg", shell=True)
if retcode < 0:
print("Child was terminated by signal", -retcode, file=sys.stderr)
else:
print("Child returned", retcode, file=sys.stderr)
except OSError as e:
print("Execution failed:", e, file=sys.stderr)
Replacing os.spawn*
-------------------
P_NOWAIT example:
pid = os.spawnlp(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg")
==>
pid = Popen(["/bin/mycmd", "myarg"]).pid
P_WAIT example:
retcode = os.spawnlp(os.P_WAIT, "/bin/mycmd", "mycmd", "myarg")
==>
retcode = call(["/bin/mycmd", "myarg"])
Vector example:
os.spawnvp(os.P_NOWAIT, path, args)
==>
Popen([path] + args[1:])
Environment example:
os.spawnlpe(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg", env)
==>
Popen(["/bin/mycmd", "myarg"], env={"PATH": "/usr/bin"})
"""
import sys
mswindows = (sys.platform == "win32")
import io
import os
import time
import traceback
import gc
import signal
import builtins
import warnings
import errno
try:
from time import monotonic as _time
except ImportError:
from time import time as _time
# Exception classes used by this module.
class SubprocessError(Exception): pass
class CalledProcessError(SubprocessError):
"""This exception is raised when a process run by check_call() or
check_output() returns a non-zero exit status.
The exit status will be stored in the returncode attribute;
check_output() will also store the output in the output attribute.
"""
def __init__(self, returncode, cmd, output=None):
self.returncode = returncode
self.cmd = cmd
self.output = output
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode)
class TimeoutExpired(SubprocessError):
"""This exception is raised when the timeout expires while waiting for a
child process.
"""
def __init__(self, cmd, timeout, output=None):
self.cmd = cmd
self.timeout = timeout
self.output = output
def __str__(self):
return ("Command '%s' timed out after %s seconds" %
(self.cmd, self.timeout))
if mswindows:
import threading
import msvcrt
import _winapi
class STARTUPINFO:
dwFlags = 0
hStdInput = None
hStdOutput = None
hStdError = None
wShowWindow = 0
class pywintypes:
error = IOError
else:
import select
_has_poll = hasattr(select, 'poll')
import _posixsubprocess
_create_pipe = _posixsubprocess.cloexec_pipe
# When select or poll has indicated that the file is writable,
# we can write up to _PIPE_BUF bytes without risk of blocking.
# POSIX defines PIPE_BUF as >= 512.
_PIPE_BUF = getattr(select, 'PIPE_BUF', 512)
__all__ = ["Popen", "PIPE", "STDOUT", "call", "check_call", "getstatusoutput",
"getoutput", "check_output", "CalledProcessError", "DEVNULL"]
if mswindows:
from _winapi import (CREATE_NEW_CONSOLE, CREATE_NEW_PROCESS_GROUP,
STD_INPUT_HANDLE, STD_OUTPUT_HANDLE,
STD_ERROR_HANDLE, SW_HIDE,
STARTF_USESTDHANDLES, STARTF_USESHOWWINDOW)
__all__.extend(["CREATE_NEW_CONSOLE", "CREATE_NEW_PROCESS_GROUP",
"STD_INPUT_HANDLE", "STD_OUTPUT_HANDLE",
"STD_ERROR_HANDLE", "SW_HIDE",
"STARTF_USESTDHANDLES", "STARTF_USESHOWWINDOW"])
class Handle(int):
closed = False
def Close(self, CloseHandle=_winapi.CloseHandle):
if not self.closed:
self.closed = True
CloseHandle(self)
def Detach(self):
if not self.closed:
self.closed = True
return int(self)
raise ValueError("already closed")
def __repr__(self):
return "Handle(%d)" % int(self)
__del__ = Close
__str__ = __repr__
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
# This lists holds Popen instances for which the underlying process had not
# exited at the time its __del__ method got called: those processes are wait()ed
# for synchronously from _cleanup() when a new Popen object is created, to avoid
# zombie processes.
_active = []
def _cleanup():
for inst in _active[:]:
res = inst._internal_poll(_deadstate=sys.maxsize)
if res is not None:
try:
_active.remove(inst)
except ValueError:
# This can happen if two threads create a new Popen instance.
# It's harmless that it was already removed, so ignore.
pass
PIPE = -1
STDOUT = -2
DEVNULL = -3
def _eintr_retry_call(func, *args):
while True:
try:
return func(*args)
except InterruptedError:
continue
# XXX This function is only used by multiprocessing and the test suite,
# but it's here so that it can be imported when Python is compiled without
# threads.
def _args_from_interpreter_flags():
"""Return a list of command-line arguments reproducing the current
settings in sys.flags and sys.warnoptions."""
flag_opt_map = {
'debug': 'd',
# 'inspect': 'i',
# 'interactive': 'i',
'optimize': 'O',
'dont_write_bytecode': 'B',
'no_user_site': 's',
'no_site': 'S',
'ignore_environment': 'E',
'verbose': 'v',
'bytes_warning': 'b',
'quiet': 'q',
'hash_randomization': 'R',
}
args = []
for flag, opt in flag_opt_map.items():
v = getattr(sys.flags, flag)
if v > 0:
args.append('-' + opt * v)
for opt in sys.warnoptions:
args.append('-W' + opt)
return args
def call(*popenargs, timeout=None, **kwargs):
"""Run command with arguments. Wait for command to complete or
timeout, then return the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
retcode = call(["ls", "-l"])
"""
with Popen(*popenargs, **kwargs) as p:
try:
return p.wait(timeout=timeout)
except:
p.kill()
p.wait()
raise
def check_call(*popenargs, **kwargs):
"""Run command with arguments. Wait for command to complete. If
the exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.
The arguments are the same as for the call function. Example:
check_call(["ls", "-l"])
"""
retcode = call(*popenargs, **kwargs)
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd)
return 0
def check_output(*popenargs, timeout=None, **kwargs):
r"""Run command with arguments and return its output.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
b'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=STDOUT)
b'ls: non_existent_file: No such file or directory\n'
If universal_newlines=True is passed, the return value will be a
string rather than bytes.
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
with Popen(*popenargs, stdout=PIPE, **kwargs) as process:
try:
output, unused_err = process.communicate(timeout=timeout)
except TimeoutExpired:
process.kill()
output, unused_err = process.communicate()
raise TimeoutExpired(process.args, timeout, output=output)
except:
process.kill()
process.wait()
raise
retcode = process.poll()
if retcode:
raise CalledProcessError(retcode, process.args, output=output)
return output
def list2cmdline(seq):
"""
Translate a sequence of arguments into a command line
string, using the same rules as the MS C runtime:
1) Arguments are delimited by white space, which is either a
space or a tab.
2) A string surrounded by double quotation marks is
interpreted as a single argument, regardless of white space
contained within. A quoted string can be embedded in an
argument.
3) A double quotation mark preceded by a backslash is
interpreted as a literal double quotation mark.
4) Backslashes are interpreted literally, unless they
immediately precede a double quotation mark.
5) If backslashes immediately precede a double quotation mark,
every pair of backslashes is interpreted as a literal
backslash. If the number of backslashes is odd, the last
backslash escapes the next double quotation mark as
described in rule 3.
"""
# See
# http://msdn.microsoft.com/en-us/library/17w5ykft.aspx
# or search http://msdn.microsoft.com for
# "Parsing C++ Command-Line Arguments"
result = []
needquote = False
for arg in seq:
bs_buf = []
# Add a space to separate this argument from the others
if result:
result.append(' ')
needquote = (" " in arg) or ("\t" in arg) or not arg
if needquote:
result.append('"')
for c in arg:
if c == '\\':
# Don't know if we need to double yet.
bs_buf.append(c)
elif c == '"':
# Double backslashes.
result.append('\\' * len(bs_buf)*2)
bs_buf = []
result.append('\\"')
else:
# Normal char
if bs_buf:
result.extend(bs_buf)
bs_buf = []
result.append(c)
# Add remaining backslashes, if any.
if bs_buf:
result.extend(bs_buf)
if needquote:
result.extend(bs_buf)
result.append('"')
return ''.join(result)
# Various tools for executing commands and looking at their output and status.
#
# NB This only works (and is only relevant) for POSIX.
def getstatusoutput(cmd):
"""Return (status, output) of executing cmd in a shell.
Execute the string 'cmd' in a shell with os.popen() and return a 2-tuple
(status, output). cmd is actually run as '{ cmd ; } 2>&1', so that the
returned output will contain output or error messages. A trailing newline
is stripped from the output. The exit status for the command can be
interpreted according to the rules for the C function wait(). Example:
>>> import subprocess
>>> subprocess.getstatusoutput('ls /bin/ls')
(0, '/bin/ls')
>>> subprocess.getstatusoutput('cat /bin/junk')
(256, 'cat: /bin/junk: No such file or directory')
>>> subprocess.getstatusoutput('/bin/junk')
(256, 'sh: /bin/junk: not found')
"""
with os.popen('{ ' + cmd + '; } 2>&1', 'r') as pipe:
try:
text = pipe.read()
sts = pipe.close()
except:
process = pipe._proc
process.kill()
process.wait()
raise
if sts is None:
sts = 0
if text[-1:] == '\n':
text = text[:-1]
return sts, text
def getoutput(cmd):
"""Return output (stdout or stderr) of executing cmd in a shell.
Like getstatusoutput(), except the exit status is ignored and the return
value is a string containing the command's output. Example:
>>> import subprocess
>>> subprocess.getoutput('ls /bin/ls')
'/bin/ls'
"""
return getstatusoutput(cmd)[1]
_PLATFORM_DEFAULT_CLOSE_FDS = object()
class Popen(object):
def __init__(self, args, bufsize=-1, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=_PLATFORM_DEFAULT_CLOSE_FDS,
shell=False, cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0,
restore_signals=True, start_new_session=False,
pass_fds=()):
"""Create new Popen instance."""
_cleanup()
self._child_created = False
self._input = None
self._communication_started = False
if bufsize is None:
bufsize = -1 # Restore default
if not isinstance(bufsize, int):
raise TypeError("bufsize must be an integer")
if mswindows:
if preexec_fn is not None:
raise ValueError("preexec_fn is not supported on Windows "
"platforms")
any_stdio_set = (stdin is not None or stdout is not None or
stderr is not None)
if close_fds is _PLATFORM_DEFAULT_CLOSE_FDS:
if any_stdio_set:
close_fds = False
else:
close_fds = True
elif close_fds and any_stdio_set:
raise ValueError(
"close_fds is not supported on Windows platforms"
" if you redirect stdin/stdout/stderr")
else:
# POSIX
if close_fds is _PLATFORM_DEFAULT_CLOSE_FDS:
close_fds = True
if pass_fds and not close_fds:
warnings.warn("pass_fds overriding close_fds.", RuntimeWarning)
close_fds = True
if startupinfo is not None:
raise ValueError("startupinfo is only supported on Windows "
"platforms")
if creationflags != 0:
raise ValueError("creationflags is only supported on Windows "
"platforms")
self.args = args
self.stdin = None
self.stdout = None
self.stderr = None
self.pid = None
self.returncode = None
self.universal_newlines = universal_newlines
# Input and output objects. The general principle is like
# this:
#
# Parent Child
# ------ -----
# p2cwrite ---stdin---> p2cread
# c2pread <--stdout--- c2pwrite
# errread <--stderr--- errwrite
#
# On POSIX, the child objects are file descriptors. On
# Windows, these are Windows file handles. The parent objects
# are file descriptors on both platforms. The parent objects
# are -1 when not using PIPEs. The child objects are -1
# when not redirecting.
(p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite) = self._get_handles(stdin, stdout, stderr)
# We wrap OS handles *before* launching the child, otherwise a
# quickly terminating child could make our fds unwrappable
# (see #8458).
#fix me brython syntax error
#if mswindows:
# if p2cwrite != -1:
# p2cwrite = msvcrt.open_osfhandle(p2cwrite.Detach(), 0)
# if c2pread != -1:
# c2pread = msvcrt.open_osfhandle(c2pread.Detach(), 0)
# if errread != -1:
# errread = msvcrt.open_osfhandle(errread.Detach(), 0)
if p2cwrite != -1:
self.stdin = io.open(p2cwrite, 'wb', bufsize)
if universal_newlines:
self.stdin = io.TextIOWrapper(self.stdin, write_through=True)
if c2pread != -1:
self.stdout = io.open(c2pread, 'rb', bufsize)
if universal_newlines:
self.stdout = io.TextIOWrapper(self.stdout)
if errread != -1:
self.stderr = io.open(errread, 'rb', bufsize)
if universal_newlines:
self.stderr = io.TextIOWrapper(self.stderr)
self._closed_child_pipe_fds = False
try:
self._execute_child(args, executable, preexec_fn, close_fds,
pass_fds, cwd, env,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite,
restore_signals, start_new_session)
except:
# Cleanup if the child failed starting.
for f in filter(None, (self.stdin, self.stdout, self.stderr)):
try:
f.close()
except EnvironmentError:
pass # Ignore EBADF or other errors.
if not self._closed_child_pipe_fds:
to_close = []
if stdin == PIPE:
to_close.append(p2cread)
if stdout == PIPE:
to_close.append(c2pwrite)
if stderr == PIPE:
to_close.append(errwrite)
if hasattr(self, '_devnull'):
to_close.append(self._devnull)
for fd in to_close:
try:
os.close(fd)
except EnvironmentError:
pass
raise
def _translate_newlines(self, data, encoding):
data = data.decode(encoding)
return data.replace("\r\n", "\n").replace("\r", "\n")
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if self.stdout:
self.stdout.close()
if self.stderr:
self.stderr.close()
if self.stdin:
self.stdin.close()
# Wait for the process to terminate, to avoid zombies.
self.wait()
def __del__(self, _maxsize=sys.maxsize, _active=_active):
# If __init__ hasn't had a chance to execute (e.g. if it
# was passed an undeclared keyword argument), we don't
# have a _child_created attribute at all.
if not getattr(self, '_child_created', False):
# We didn't get to successfully create a child process.
return
# In case the child hasn't been waited on, check if it's done.
self._internal_poll(_deadstate=_maxsize)
if self.returncode is None and _active is not None:
# Child is still running, keep us alive until we can wait on it.
_active.append(self)
def _get_devnull(self):
if not hasattr(self, '_devnull'):
self._devnull = os.open(os.devnull, os.O_RDWR)
return self._devnull
def communicate(self, input=None, timeout=None):
"""Interact with process: Send data to stdin. Read data from
stdout and stderr, until end-of-file is reached. Wait for
process to terminate. The optional input argument should be
bytes to be sent to the child process, or None, if no data
should be sent to the child.
communicate() returns a tuple (stdout, stderr)."""
if self._communication_started and input:
raise ValueError("Cannot send input after starting communication")
# Optimization: If we are not worried about timeouts, we haven't
# started communicating, and we have one or zero pipes, using select()
# or threads is unnecessary.
if (timeout is None and not self._communication_started and
[self.stdin, self.stdout, self.stderr].count(None) >= 2):
stdout = None
stderr = None
if self.stdin:
if input:
try:
self.stdin.write(input)
except IOError as e:
if e.errno != errno.EPIPE and e.errno != errno.EINVAL:
raise
self.stdin.close()
elif self.stdout:
stdout = _eintr_retry_call(self.stdout.read)
self.stdout.close()
elif self.stderr:
stderr = _eintr_retry_call(self.stderr.read)
self.stderr.close()
self.wait()
else:
if timeout is not None:
endtime = _time() + timeout
else:
endtime = None
try:
stdout, stderr = self._communicate(input, endtime, timeout)
finally:
self._communication_started = True
sts = self.wait(timeout=self._remaining_time(endtime))
return (stdout, stderr)
def poll(self):
return self._internal_poll()
def _remaining_time(self, endtime):
"""Convenience for _communicate when computing timeouts."""
if endtime is None:
return None
else:
return endtime - _time()
def _check_timeout(self, endtime, orig_timeout):
"""Convenience for checking if a timeout has expired."""
if endtime is None:
return
if _time() > endtime:
raise TimeoutExpired(self.args, orig_timeout)
if mswindows:
#
# Windows methods
#
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tuple with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
if stdin is None and stdout is None and stderr is None:
return (-1, -1, -1, -1, -1, -1)
p2cread, p2cwrite = -1, -1
c2pread, c2pwrite = -1, -1
errread, errwrite = -1, -1
if stdin is None:
p2cread = _winapi.GetStdHandle(_winapi.STD_INPUT_HANDLE)
if p2cread is None:
p2cread, _ = _winapi.CreatePipe(None, 0)
p2cread = Handle(p2cread)
_winapi.CloseHandle(_)
elif stdin == PIPE:
p2cread, p2cwrite = _winapi.CreatePipe(None, 0)
p2cread, p2cwrite = Handle(p2cread), Handle(p2cwrite)
elif stdin == DEVNULL:
p2cread = msvcrt.get_osfhandle(self._get_devnull())
elif isinstance(stdin, int):
p2cread = msvcrt.get_osfhandle(stdin)
else:
# Assuming file-like object
p2cread = msvcrt.get_osfhandle(stdin.fileno())
p2cread = self._make_inheritable(p2cread)
if stdout is None:
c2pwrite = _winapi.GetStdHandle(_winapi.STD_OUTPUT_HANDLE)
if c2pwrite is None:
_, c2pwrite = _winapi.CreatePipe(None, 0)
c2pwrite = Handle(c2pwrite)
_winapi.CloseHandle(_)
elif stdout == PIPE:
c2pread, c2pwrite = _winapi.CreatePipe(None, 0)
c2pread, c2pwrite = Handle(c2pread), Handle(c2pwrite)
elif stdout == DEVNULL:
c2pwrite = msvcrt.get_osfhandle(self._get_devnull())
elif isinstance(stdout, int):
c2pwrite = msvcrt.get_osfhandle(stdout)
else:
# Assuming file-like object
c2pwrite = msvcrt.get_osfhandle(stdout.fileno())
c2pwrite = self._make_inheritable(c2pwrite)
if stderr is None:
errwrite = _winapi.GetStdHandle(_winapi.STD_ERROR_HANDLE)
if errwrite is None:
_, errwrite = _winapi.CreatePipe(None, 0)
errwrite = Handle(errwrite)
_winapi.CloseHandle(_)
elif stderr == PIPE:
errread, errwrite = _winapi.CreatePipe(None, 0)
errread, errwrite = Handle(errread), Handle(errwrite)
elif stderr == STDOUT:
errwrite = c2pwrite
elif stderr == DEVNULL:
errwrite = msvcrt.get_osfhandle(self._get_devnull())
elif isinstance(stderr, int):
errwrite = msvcrt.get_osfhandle(stderr)
else:
# Assuming file-like object
errwrite = msvcrt.get_osfhandle(stderr.fileno())
errwrite = self._make_inheritable(errwrite)
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
def _make_inheritable(self, handle):
"""Return a duplicate of handle, which is inheritable"""
h = _winapi.DuplicateHandle(
_winapi.GetCurrentProcess(), handle,
_winapi.GetCurrentProcess(), 0, 1,
_winapi.DUPLICATE_SAME_ACCESS)
return Handle(h)
def _find_w9xpopen(self):
"""Find and return absolut path to w9xpopen.exe"""
w9xpopen = os.path.join(
os.path.dirname(_winapi.GetModuleFileName(0)),
"w9xpopen.exe")
if not os.path.exists(w9xpopen):
# Eeek - file-not-found - possibly an embedding
# situation - see if we can locate it in sys.exec_prefix
w9xpopen = os.path.join(os.path.dirname(sys.base_exec_prefix),
"w9xpopen.exe")
if not os.path.exists(w9xpopen):
raise RuntimeError("Cannot locate w9xpopen.exe, which is "
"needed for Popen to work with your "
"shell or platform.")
return w9xpopen
def _execute_child(self, args, executable, preexec_fn, close_fds,
pass_fds, cwd, env,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite,
unused_restore_signals, unused_start_new_session):
"""Execute program (MS Windows version)"""
assert not pass_fds, "pass_fds not supported on Windows."
if not isinstance(args, str):
args = list2cmdline(args)
# Process startup details
if startupinfo is None:
startupinfo = STARTUPINFO()
if -1 not in (p2cread, c2pwrite, errwrite):
startupinfo.dwFlags |= _winapi.STARTF_USESTDHANDLES
startupinfo.hStdInput = p2cread
startupinfo.hStdOutput = c2pwrite
startupinfo.hStdError = errwrite
if shell:
startupinfo.dwFlags |= _winapi.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = _winapi.SW_HIDE
comspec = os.environ.get("COMSPEC", "cmd.exe")
args = '{} /c "{}"'.format (comspec, args)
if (_winapi.GetVersion() >= 0x80000000 or
os.path.basename(comspec).lower() == "command.com"):
# Win9x, or using command.com on NT. We need to
# use the w9xpopen intermediate program. For more
# information, see KB Q150956
# (http://web.archive.org/web/20011105084002/http://support.microsoft.com/support/kb/articles/Q150/9/56.asp)
w9xpopen = self._find_w9xpopen()
args = '"%s" %s' % (w9xpopen, args)
# Not passing CREATE_NEW_CONSOLE has been known to
# cause random failures on win9x. Specifically a
# dialog: "Your program accessed mem currently in
# use at xxx" and a hopeful warning about the
# stability of your system. Cost is Ctrl+C won't
# kill children.
creationflags |= _winapi.CREATE_NEW_CONSOLE
# Start the process
try:
hp, ht, pid, tid = _winapi.CreateProcess(executable, args,
# no special security
None, None,
int(not close_fds),
creationflags,
env,
cwd,
startupinfo)
except pywintypes.error as e:
# Translate pywintypes.error to WindowsError, which is
# a subclass of OSError. FIXME: We should really
# translate errno using _sys_errlist (or similar), but
# how can this be done from Python?
raise WindowsError(*e.args)
finally:
# Child is launched. Close the parent's copy of those pipe
# handles that only the child should have open. You need
# to make sure that no handles to the write end of the
# output pipe are maintained in this process or else the
# pipe will not close when the child process exits and the
# ReadFile will hang.
if p2cread != -1:
p2cread.Close()
if c2pwrite != -1:
c2pwrite.Close()
if errwrite != -1:
errwrite.Close()
if hasattr(self, '_devnull'):
os.close(self._devnull)
# Retain the process handle, but close the thread handle
self._child_created = True
self._handle = Handle(hp)
self.pid = pid
_winapi.CloseHandle(ht)
def _internal_poll(self, _deadstate=None,
_WaitForSingleObject=_winapi.WaitForSingleObject,
_WAIT_OBJECT_0=_winapi.WAIT_OBJECT_0,
_GetExitCodeProcess=_winapi.GetExitCodeProcess):
"""Check if child process has terminated. Returns returncode
attribute.
This method is called by __del__, so it can only refer to objects
in its local scope.
"""
if self.returncode is None:
if _WaitForSingleObject(self._handle, 0) == _WAIT_OBJECT_0:
self.returncode = _GetExitCodeProcess(self._handle)
return self.returncode
def wait(self, timeout=None, endtime=None):
"""Wait for child process to terminate. Returns returncode
attribute."""
if endtime is not None:
timeout = self._remaining_time(endtime)
if timeout is None:
timeout_millis = _winapi.INFINITE
else:
timeout_millis = int(timeout * 1000)
if self.returncode is None:
result = _winapi.WaitForSingleObject(self._handle,
timeout_millis)
if result == _winapi.WAIT_TIMEOUT:
raise TimeoutExpired(self.args, timeout)
self.returncode = _winapi.GetExitCodeProcess(self._handle)
return self.returncode
def _readerthread(self, fh, buffer):
buffer.append(fh.read())
fh.close()
def _communicate(self, input, endtime, orig_timeout):
# Start reader threads feeding into a list hanging off of this
# object, unless they've already been started.
if self.stdout and not hasattr(self, "_stdout_buff"):
self._stdout_buff = []
self.stdout_thread = \
threading.Thread(target=self._readerthread,
args=(self.stdout, self._stdout_buff))
self.stdout_thread.daemon = True
self.stdout_thread.start()
if self.stderr and not hasattr(self, "_stderr_buff"):
self._stderr_buff = []
self.stderr_thread = \
threading.Thread(target=self._readerthread,
args=(self.stderr, self._stderr_buff))
self.stderr_thread.daemon = True
self.stderr_thread.start()
if self.stdin:
if input is not None:
try:
self.stdin.write(input)
except IOError as e:
if e.errno != errno.EPIPE:
raise
self.stdin.close()
# Wait for the reader threads, or time out. If we time out, the
# threads remain reading and the fds left open in case the user
# calls communicate again.
if self.stdout is not None:
self.stdout_thread.join(self._remaining_time(endtime))
if self.stdout_thread.is_alive():
raise TimeoutExpired(self.args, orig_timeout)
if self.stderr is not None:
self.stderr_thread.join(self._remaining_time(endtime))
if self.stderr_thread.is_alive():
raise TimeoutExpired(self.args, orig_timeout)
# Collect the output from and close both pipes, now that we know
# both have been read successfully.
stdout = None
stderr = None
if self.stdout:
stdout = self._stdout_buff
self.stdout.close()
if self.stderr:
stderr = self._stderr_buff
self.stderr.close()
# All data exchanged. Translate lists into strings.
if stdout is not None:
stdout = stdout[0]
if stderr is not None:
stderr = stderr[0]
return (stdout, stderr)
def send_signal(self, sig):
"""Send a signal to the process
"""
if sig == signal.SIGTERM:
self.terminate()
elif sig == signal.CTRL_C_EVENT:
os.kill(self.pid, signal.CTRL_C_EVENT)
elif sig == signal.CTRL_BREAK_EVENT:
os.kill(self.pid, signal.CTRL_BREAK_EVENT)
else:
raise ValueError("Unsupported signal: {}".format(sig))
def terminate(self):
"""Terminates the process
"""
try:
_winapi.TerminateProcess(self._handle, 1)
except PermissionError:
# ERROR_ACCESS_DENIED (winerror 5) is received when the
# process already died.
rc = _winapi.GetExitCodeProcess(self._handle)
if rc == _winapi.STILL_ACTIVE:
raise
self.returncode = rc
kill = terminate
else:
#
# POSIX methods
#
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tuple with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
p2cread, p2cwrite = -1, -1
c2pread, c2pwrite = -1, -1
errread, errwrite = -1, -1
if stdin is None:
pass
elif stdin == PIPE:
p2cread, p2cwrite = _create_pipe()
elif stdin == DEVNULL:
p2cread = self._get_devnull()
elif isinstance(stdin, int):
p2cread = stdin
else:
# Assuming file-like object
p2cread = stdin.fileno()
if stdout is None:
pass
elif stdout == PIPE:
c2pread, c2pwrite = _create_pipe()
elif stdout == DEVNULL:
c2pwrite = self._get_devnull()
elif isinstance(stdout, int):
c2pwrite = stdout
else:
# Assuming file-like object
c2pwrite = stdout.fileno()
if stderr is None:
pass
elif stderr == PIPE:
errread, errwrite = _create_pipe()
elif stderr == STDOUT:
errwrite = c2pwrite
elif stderr == DEVNULL:
errwrite = self._get_devnull()
elif isinstance(stderr, int):
errwrite = stderr
else:
# Assuming file-like object
errwrite = stderr.fileno()
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
def _close_fds(self, fds_to_keep):
start_fd = 3
for fd in sorted(fds_to_keep):
if fd >= start_fd:
os.closerange(start_fd, fd)
start_fd = fd + 1
if start_fd <= MAXFD:
os.closerange(start_fd, MAXFD)
def _execute_child(self, args, executable, preexec_fn, close_fds,
pass_fds, cwd, env,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite,
restore_signals, start_new_session):
"""Execute program (POSIX version)"""
if isinstance(args, (str, bytes)):
args = [args]
else:
args = list(args)
if shell:
args = ["/bin/sh", "-c"] + args
if executable:
args[0] = executable
if executable is None:
executable = args[0]
orig_executable = executable
# For transferring possible exec failure from child to parent.
# Data format: "exception name:hex errno:description"
# Pickle is not used; it is complex and involves memory allocation.
errpipe_read, errpipe_write = _create_pipe()
try:
try:
# We must avoid complex work that could involve
# malloc or free in the child process to avoid
# potential deadlocks, thus we do all this here.
# and pass it to fork_exec()
if env is not None:
env_list = [os.fsencode(k) + b'=' + os.fsencode(v)
for k, v in env.items()]
else:
env_list = None # Use execv instead of execve.
executable = os.fsencode(executable)
if os.path.dirname(executable):
executable_list = (executable,)
else:
# This matches the behavior of os._execvpe().
executable_list = tuple(
os.path.join(os.fsencode(dir), executable)
for dir in os.get_exec_path(env))
fds_to_keep = set(pass_fds)
fds_to_keep.add(errpipe_write)
self.pid = _posixsubprocess.fork_exec(
args, executable_list,
close_fds, sorted(fds_to_keep), cwd, env_list,
p2cread, p2cwrite, c2pread, c2pwrite,
errread, errwrite,
errpipe_read, errpipe_write,
restore_signals, start_new_session, preexec_fn)
self._child_created = True
finally:
# be sure the FD is closed no matter what
os.close(errpipe_write)
# self._devnull is not always defined.
devnull_fd = getattr(self, '_devnull', None)
if p2cread != -1 and p2cwrite != -1 and p2cread != devnull_fd:
os.close(p2cread)
if c2pwrite != -1 and c2pread != -1 and c2pwrite != devnull_fd:
os.close(c2pwrite)
if errwrite != -1 and errread != -1 and errwrite != devnull_fd:
os.close(errwrite)
if devnull_fd is not None:
os.close(devnull_fd)
# Prevent a double close of these fds from __init__ on error.
self._closed_child_pipe_fds = True
# Wait for exec to fail or succeed; possibly raising an
# exception (limited in size)
errpipe_data = bytearray()
while True:
part = _eintr_retry_call(os.read, errpipe_read, 50000)
errpipe_data += part
if not part or len(errpipe_data) > 50000:
break
finally:
# be sure the FD is closed no matter what
os.close(errpipe_read)
if errpipe_data:
try:
_eintr_retry_call(os.waitpid, self.pid, 0)
except OSError as e:
if e.errno != errno.ECHILD:
raise
try:
exception_name, hex_errno, err_msg = (
errpipe_data.split(b':', 2))
except ValueError:
exception_name = b'RuntimeError'
hex_errno = b'0'
err_msg = (b'Bad exception data from child: ' +
repr(errpipe_data))
child_exception_type = getattr(
builtins, exception_name.decode('ascii'),
RuntimeError)
err_msg = err_msg.decode(errors="surrogatepass")
if issubclass(child_exception_type, OSError) and hex_errno:
errno_num = int(hex_errno, 16)
child_exec_never_called = (err_msg == "noexec")
if child_exec_never_called:
err_msg = ""
if errno_num != 0:
err_msg = os.strerror(errno_num)
if errno_num == errno.ENOENT:
if child_exec_never_called:
# The error must be from chdir(cwd).
err_msg += ': ' + repr(cwd)
else:
err_msg += ': ' + repr(orig_executable)
raise child_exception_type(errno_num, err_msg)
raise child_exception_type(err_msg)
def _handle_exitstatus(self, sts, _WIFSIGNALED=os.WIFSIGNALED,
_WTERMSIG=os.WTERMSIG, _WIFEXITED=os.WIFEXITED,
_WEXITSTATUS=os.WEXITSTATUS):
# This method is called (indirectly) by __del__, so it cannot
# refer to anything outside of its local scope."""
if _WIFSIGNALED(sts):
self.returncode = -_WTERMSIG(sts)
elif _WIFEXITED(sts):
self.returncode = _WEXITSTATUS(sts)
else:
# Should never happen
raise RuntimeError("Unknown child exit status!")
def _internal_poll(self, _deadstate=None, _waitpid=os.waitpid,
_WNOHANG=os.WNOHANG, _os_error=os.error, _ECHILD=errno.ECHILD):
"""Check if child process has terminated. Returns returncode
attribute.
This method is called by __del__, so it cannot reference anything
outside of the local scope (nor can any methods it calls).
"""
if self.returncode is None:
try:
pid, sts = _waitpid(self.pid, _WNOHANG)
if pid == self.pid:
self._handle_exitstatus(sts)
except _os_error as e:
if _deadstate is not None:
self.returncode = _deadstate
elif e.errno == _ECHILD:
# This happens if SIGCLD is set to be ignored or
# waiting for child processes has otherwise been
# disabled for our process. This child is dead, we
# can't get the status.
# http://bugs.python.org/issue15756
self.returncode = 0
return self.returncode
def _try_wait(self, wait_flags):
try:
(pid, sts) = _eintr_retry_call(os.waitpid, self.pid, wait_flags)
except OSError as e:
if e.errno != errno.ECHILD:
raise
# This happens if SIGCLD is set to be ignored or waiting
# for child processes has otherwise been disabled for our
# process. This child is dead, we can't get the status.
pid = self.pid
sts = 0
return (pid, sts)
def wait(self, timeout=None, endtime=None):
"""Wait for child process to terminate. Returns returncode
attribute."""
if self.returncode is not None:
return self.returncode
# endtime is preferred to timeout. timeout is only used for
# printing.
if endtime is not None or timeout is not None:
if endtime is None:
endtime = _time() + timeout
elif timeout is None:
timeout = self._remaining_time(endtime)
if endtime is not None:
# Enter a busy loop if we have a timeout. This busy loop was
# cribbed from Lib/threading.py in Thread.wait() at r71065.
delay = 0.0005 # 500 us -> initial delay of 1 ms
while True:
(pid, sts) = self._try_wait(os.WNOHANG)
assert pid == self.pid or pid == 0
if pid == self.pid:
self._handle_exitstatus(sts)
break
remaining = self._remaining_time(endtime)
if remaining <= 0:
raise TimeoutExpired(self.args, timeout)
delay = min(delay * 2, remaining, .05)
time.sleep(delay)
else:
while self.returncode is None:
(pid, sts) = self._try_wait(0)
# Check the pid and loop as waitpid has been known to return
# 0 even without WNOHANG in odd situations. issue14396.
if pid == self.pid:
self._handle_exitstatus(sts)
return self.returncode
def _communicate(self, input, endtime, orig_timeout):
if self.stdin and not self._communication_started:
# Flush stdio buffer. This might block, if the user has
# been writing to .stdin in an uncontrolled fashion.
self.stdin.flush()
if not input:
self.stdin.close()
if _has_poll:
stdout, stderr = self._communicate_with_poll(input, endtime,
orig_timeout)
else:
stdout, stderr = self._communicate_with_select(input, endtime,
orig_timeout)
self.wait(timeout=self._remaining_time(endtime))
# All data exchanged. Translate lists into strings.
if stdout is not None:
stdout = b''.join(stdout)
if stderr is not None:
stderr = b''.join(stderr)
# Translate newlines, if requested.
# This also turns bytes into strings.
if self.universal_newlines:
if stdout is not None:
stdout = self._translate_newlines(stdout,
self.stdout.encoding)
if stderr is not None:
stderr = self._translate_newlines(stderr,
self.stderr.encoding)
return (stdout, stderr)
def _save_input(self, input):
# This method is called from the _communicate_with_*() methods
# so that if we time out while communicating, we can continue
# sending input if we retry.
if self.stdin and self._input is None:
self._input_offset = 0
self._input = input
if self.universal_newlines and input is not None:
self._input = self._input.encode(self.stdin.encoding)
def _communicate_with_poll(self, input, endtime, orig_timeout):
stdout = None # Return
stderr = None # Return
if not self._communication_started:
self._fd2file = {}
poller = select.poll()
def register_and_append(file_obj, eventmask):
poller.register(file_obj.fileno(), eventmask)
self._fd2file[file_obj.fileno()] = file_obj
def close_unregister_and_remove(fd):
poller.unregister(fd)
self._fd2file[fd].close()
self._fd2file.pop(fd)
if self.stdin and input:
register_and_append(self.stdin, select.POLLOUT)
# Only create this mapping if we haven't already.
if not self._communication_started:
self._fd2output = {}
if self.stdout:
self._fd2output[self.stdout.fileno()] = []
if self.stderr:
self._fd2output[self.stderr.fileno()] = []
select_POLLIN_POLLPRI = select.POLLIN | select.POLLPRI
if self.stdout:
register_and_append(self.stdout, select_POLLIN_POLLPRI)
stdout = self._fd2output[self.stdout.fileno()]
if self.stderr:
register_and_append(self.stderr, select_POLLIN_POLLPRI)
stderr = self._fd2output[self.stderr.fileno()]
self._save_input(input)
while self._fd2file:
timeout = self._remaining_time(endtime)
if timeout is not None and timeout < 0:
raise TimeoutExpired(self.args, orig_timeout)
try:
ready = poller.poll(timeout)
except select.error as e:
if e.args[0] == errno.EINTR:
continue
raise
self._check_timeout(endtime, orig_timeout)
# XXX Rewrite these to use non-blocking I/O on the
# file objects; they are no longer using C stdio!
for fd, mode in ready:
if mode & select.POLLOUT:
chunk = self._input[self._input_offset :
self._input_offset + _PIPE_BUF]
try:
self._input_offset += os.write(fd, chunk)
except OSError as e:
if e.errno == errno.EPIPE:
close_unregister_and_remove(fd)
else:
raise
else:
if self._input_offset >= len(self._input):
close_unregister_and_remove(fd)
elif mode & select_POLLIN_POLLPRI:
data = os.read(fd, 4096)
if not data:
close_unregister_and_remove(fd)
self._fd2output[fd].append(data)
else:
# Ignore hang up or errors.
close_unregister_and_remove(fd)
return (stdout, stderr)
def _communicate_with_select(self, input, endtime, orig_timeout):
if not self._communication_started:
self._read_set = []
self._write_set = []
if self.stdin and input:
self._write_set.append(self.stdin)
if self.stdout:
self._read_set.append(self.stdout)
if self.stderr:
self._read_set.append(self.stderr)
self._save_input(input)
stdout = None # Return
stderr = None # Return
if self.stdout:
if not self._communication_started:
self._stdout_buff = []
stdout = self._stdout_buff
if self.stderr:
if not self._communication_started:
self._stderr_buff = []
stderr = self._stderr_buff
while self._read_set or self._write_set:
timeout = self._remaining_time(endtime)
if timeout is not None and timeout < 0:
raise TimeoutExpired(self.args, orig_timeout)
try:
(rlist, wlist, xlist) = \
select.select(self._read_set, self._write_set, [],
timeout)
except select.error as e:
if e.args[0] == errno.EINTR:
continue
raise
# According to the docs, returning three empty lists indicates
# that the timeout expired.
if not (rlist or wlist or xlist):
raise TimeoutExpired(self.args, orig_timeout)
# We also check what time it is ourselves for good measure.
self._check_timeout(endtime, orig_timeout)
# XXX Rewrite these to use non-blocking I/O on the
# file objects; they are no longer using C stdio!
if self.stdin in wlist:
chunk = self._input[self._input_offset :
self._input_offset + _PIPE_BUF]
try:
bytes_written = os.write(self.stdin.fileno(), chunk)
except OSError as e:
if e.errno == errno.EPIPE:
self.stdin.close()
self._write_set.remove(self.stdin)
else:
raise
else:
self._input_offset += bytes_written
if self._input_offset >= len(self._input):
self.stdin.close()
self._write_set.remove(self.stdin)
if self.stdout in rlist:
data = os.read(self.stdout.fileno(), 1024)
if not data:
self.stdout.close()
self._read_set.remove(self.stdout)
stdout.append(data)
if self.stderr in rlist:
data = os.read(self.stderr.fileno(), 1024)
if not data:
self.stderr.close()
self._read_set.remove(self.stderr)
stderr.append(data)
return (stdout, stderr)
def send_signal(self, sig):
"""Send a signal to the process
"""
os.kill(self.pid, sig)
def terminate(self):
"""Terminate the process with SIGTERM
"""
self.send_signal(signal.SIGTERM)
def kill(self):
"""Kill the process with SIGKILL
"""
self.send_signal(signal.SIGKILL)
| gpl-3.0 |
ccw808/mtasa-blue | vendor/google-breakpad/src/tools/python/filter_syms.py | 25 | 8055 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Normalizes and de-duplicates paths within Breakpad symbol files.
When using DWARF for storing debug symbols, some file information will be
stored relative to the current working directory of the current compilation
unit, and may be further relativized based upon how the file was #included.
This helper can be used to parse the Breakpad symbol file generated from such
DWARF files and normalize and de-duplicate the FILE records found within,
updating any references to the FILE records in the other record types.
"""
import macpath
import ntpath
import optparse
import os
import posixpath
import sys
class BreakpadParseError(Exception):
"""Unsupported Breakpad symbol record exception class."""
pass
class SymbolFileParser(object):
"""Parser for Breakpad symbol files.
The format of these files is documented at
https://chromium.googlesource.com/breakpad/breakpad/+/master/docs/symbol_files.md
"""
def __init__(self, input_stream, output_stream, ignored_prefixes=None,
path_handler=os.path):
"""Inits a SymbolFileParser to read symbol records from |input_stream| and
write the processed output to |output_stream|.
|ignored_prefixes| contains a list of optional path prefixes that
should be stripped from the final, normalized path outputs.
For example, if the Breakpad symbol file had all paths starting with a
common prefix, such as:
FILE 1 /b/build/src/foo.cc
FILE 2 /b/build/src/bar.cc
Then adding "/b/build/src" as an ignored prefix would result in an output
file that contained:
FILE 1 foo.cc
FILE 2 bar.cc
Note that |ignored_prefixes| does not necessarily contain file system
paths, as the contents of the DWARF DW_AT_comp_dir attribute is dependent
upon the host system and compiler, and may contain additional information
such as hostname or compiler version.
"""
self.unique_files = {}
self.duplicate_files = {}
self.input_stream = input_stream
self.output_stream = output_stream
self.ignored_prefixes = ignored_prefixes or []
self.path_handler = path_handler
def Process(self):
"""Processes the Breakpad symbol file."""
for line in self.input_stream:
parsed = self._ParseRecord(line.rstrip())
if parsed:
self.output_stream.write(parsed + '\n')
def _ParseRecord(self, record):
"""Parses a single Breakpad symbol record - a single line from the symbol
file.
Returns:
The modified string to write to the output file, or None if no line
should be written.
"""
record_type = record.partition(' ')[0]
if record_type == 'FILE':
return self._ParseFileRecord(record)
elif self._IsLineRecord(record_type):
return self._ParseLineRecord(record)
else:
# Simply pass the record through unaltered.
return record
def _NormalizePath(self, path):
"""Normalizes a file path to its canonical form.
As this may not execute on the machine or file system originally
responsible for compilation, it may be necessary to further correct paths
for symlinks, junctions, or other such file system indirections.
Returns:
A unique, canonical representation for the the file path.
"""
return self.path_handler.normpath(path)
def _AdjustPath(self, path):
"""Adjusts the supplied path after performing path de-duplication.
This may be used to perform secondary adjustments, such as removing a
common prefix, such as "/D/build", or replacing the file system path with
information from the version control system.
Returns:
The actual path to use when writing the FILE record.
"""
return path[len(filter(path.startswith,
self.ignored_prefixes + [''])[0]):]
def _ParseFileRecord(self, file_record):
"""Parses and corrects a FILE record."""
file_info = file_record[5:].split(' ', 3)
if len(file_info) > 2:
raise BreakpadParseError('Unsupported FILE record: ' + file_record)
file_index = int(file_info[0])
file_name = self._NormalizePath(file_info[1])
existing_file_index = self.unique_files.get(file_name)
if existing_file_index is None:
self.unique_files[file_name] = file_index
file_info[1] = self._AdjustPath(file_name)
return 'FILE ' + ' '.join(file_info)
else:
self.duplicate_files[file_index] = existing_file_index
return None
def _IsLineRecord(self, record_type):
"""Determines if the current record type is a Line record"""
try:
line = int(record_type, 16)
except (ValueError, TypeError):
return False
return True
def _ParseLineRecord(self, line_record):
"""Parses and corrects a Line record."""
line_info = line_record.split(' ', 5)
if len(line_info) > 4:
raise BreakpadParseError('Unsupported Line record: ' + line_record)
file_index = int(line_info[3])
line_info[3] = str(self.duplicate_files.get(file_index, file_index))
return ' '.join(line_info)
def main():
option_parser = optparse.OptionParser()
option_parser.add_option("-p", "--prefix",
action="append", dest="prefixes", type="string",
default=[],
help="A path prefix that should be removed from "
"all FILE lines. May be repeated to specify "
"multiple prefixes.")
option_parser.add_option("-t", "--path_type",
action="store", type="choice", dest="path_handler",
choices=['win32', 'posix'],
help="Indicates how file paths should be "
"interpreted. The default is to treat paths "
"the same as the OS running Python (eg: "
"os.path)")
options, args = option_parser.parse_args()
if args:
option_parser.error('Unknown argument: %s' % args)
path_handler = { 'win32': ntpath,
'posix': posixpath }.get(options.path_handler, os.path)
try:
symbol_parser = SymbolFileParser(sys.stdin, sys.stdout, options.prefixes,
path_handler)
symbol_parser.Process()
except BreakpadParseError, e:
print >> sys.stderr, 'Got an error while processing symbol file'
print >> sys.stderr, str(e)
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
| gpl-3.0 |
CasparLi/calibre | src/odf/config.py | 100 | 1418 | # -*- coding: utf-8 -*-
# Copyright (C) 2006-2007 Søren Roug, European Environment Agency
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
from namespaces import CONFIGNS
from element import Element
# Autogenerated
def ConfigItem(**args):
return Element(qname = (CONFIGNS, 'config-item'), **args)
def ConfigItemMapEntry(**args):
return Element(qname = (CONFIGNS,'config-item-map-entry'), **args)
def ConfigItemMapIndexed(**args):
return Element(qname = (CONFIGNS,'config-item-map-indexed'), **args)
def ConfigItemMapNamed(**args):
return Element(qname = (CONFIGNS,'config-item-map-named'), **args)
def ConfigItemSet(**args):
return Element(qname = (CONFIGNS, 'config-item-set'), **args)
| gpl-3.0 |
wilsonxiao/machinekit | scripts/troff-to-asciidoc.py | 18 | 7279 | import os, sys, re
SourceFilePath = sys.argv[1]
TargetFilePath = sys.argv[2]
print "source = " + SourceFilePath
print "target = " + TargetFilePath
# open the source file
SourceLines = open(SourceFilePath, 'rU').readlines()
# new content
ComponentTitle = []
AsciidocLines = []
IndexLines = []
#if the first character is a point, then check further, else get the next line
#.TH Header
re_TH = re.compile(r'^\.TH')
#.TP Paragraph
re_TP = re.compile(r'^\.TP')
#.HP ?
re_HP = re.compile(r'^\.HP')
#.TQ Paragraph
re_TQ = re.compile(r'^\.TQ')
#.SH NAME
#.SH SYNOPSYS
re_SH = re.compile(r'^\.SH\s+(.+)\n')
#.B
re_B = re.compile(r'^\.B')
#.de TQ
re_de_TQ = re.compile(r'^\.de TQ')
#.br
re_br = re.compile(r'^\.br')
#.ns
re_ns = re.compile(r'^\.ns')
#..
re_pointpoint = re.compile(r'^\.\.')
#markup
re_fI = re.compile(r'\\fI')
re_fB = re.compile(r'\\fB')
re_fR = re.compile(r'\\fR')
re_longdash = re.compile(r'\\-')
re_erroneous_markup = re.compile( ('\*\*\*\*|\_\_\_\_') )
#.\" Comments
re_comment = re.compile(r'^\.\\"')
i = 0
while (i < len(SourceLines)):
#/fI until the next /fB or /fR means that until the next markup marker
#
result_re_fI = re_fI.search(SourceLines[i])
result_re_fB = re_fB.search(SourceLines[i])
result_re_fR = re_fR.search(SourceLines[i])
result_re_longdash = re_longdash.search(SourceLines[i])
#first time for gettin into while loop
CurrLine = SourceLines[i]
str_asciiline = ""
int_Character = 0
last_formatting_char = ''
#
while ((result_re_fI != None) \
or (result_re_fB != None) \
or (result_re_fR != None)):
#parse CurrLine
#search for the first delimiter \f with next character 'I', 'B' or 'R'
#when the first characters are found. The part of the string before will
#go to the str_asciiline, the last_formatting_char will be remembered
#because in asciidoc italic needs to be closed like this _italic_
int_Character = CurrLine.find("\\f")
#tempchar1 = CurrLine[int_Character+1]
#tempchar2 = CurrLine[int_Character+2]
if (int_Character != -1) :
if (CurrLine[int_Character+2]=='I'):
str_asciiline = str_asciiline + \
CurrLine[0:int_Character] + \
last_formatting_char + \
"__"
last_formatting_char = "__"
if (CurrLine[int_Character+2]=='B'):
str_asciiline = str_asciiline + \
CurrLine[0:int_Character] + \
last_formatting_char + \
"**"
last_formatting_char = "**"
if (CurrLine[int_Character+2]=='R'):
str_asciiline = str_asciiline + \
CurrLine[0:int_Character] + \
last_formatting_char + \
""
last_formatting_char = ""
#
CurrLine = CurrLine[int_Character+3:len(CurrLine)]
#check for more
result_re_fI = re_fI.search(CurrLine)
result_re_fB = re_fB.search(CurrLine)
result_re_fR = re_fR.search(CurrLine)
#
if ((result_re_fI == None) \
and (result_re_fB == None) \
and (result_re_fR == None)):
#exiting the while loop, the SourceLines[1] now must contain the
#cleaned version of the sentence for further processing
str_asciiline += CurrLine + last_formatting_char
#str_asciiline.append(last_formatting_char)
SourceLines[i] = str_asciiline
#
#check for **** or ____ in str_asciiline and remove these
#this happens for example when the sourcefile has \\fB\\fB in the
#line. This will result in **** and renders faulty in asciidoc
result_re_erroneous_markup = re_erroneous_markup.search(SourceLines[i])
if (result_re_erroneous_markup != None):
SourceLines[i] = re_erroneous_markup.sub('',SourceLines[i])
if (result_re_longdash != None):
#SourceLines[i].replace("\\-","--")
CurrLine = re_longdash.sub("--",SourceLines[i])
SourceLines[i] = CurrLine
#done markup
#
result_re_TH = re_TH.search(SourceLines[i])
if (result_re_TH != None):
# the title must be split from the line, added, and underlined with the
# same amount of '=' signs underneath
CompTitle=SourceLines[i].split(' ')[1]
ComponentTitle.append(CompTitle+'\n')
ComponentTitle.append("="*len(CompTitle)+'\n\n')
#ComponentTitle.append('\n')
#
result_re_SH = re_SH.search(SourceLines[i])
if (result_re_SH != None):
#.SH has been found, get the name, put it in the index and add the line
CompHeader=result_re_SH.groups()[0]
#if the header is between quotes, like: "see also" then strip the
#quotes and change the space to a dash. in the indexlines array
CompHeader = CompHeader.strip('\"')
CompHeaderDashed = CompHeader.replace(" ", "-")
#result_re_between_quotes = re_between_quotes.search(CompHeader)
AsciidocLines.append("\n\n" + "===== " + \
"[[" + CompHeaderDashed.lower() + "]]" \
+ CompHeader + "\n")
IndexLines.append(". <<" + CompHeaderDashed.lower() + "," + \
CompHeader + ">>" + "\n")
#
result_re_B = re_B.search(SourceLines[i])
if (result_re_B != None):
#read the line, remove the ".B " and add to the Asciidoclines
CurrLine=SourceLines[i][3:len(SourceLines[i])]
AsciidocLines.append(CurrLine)
#
#if we are in a paragraph, continue with filling and parsing lines until
#the next paragraph is encountered
#
result_re_TP = re_TP.search(SourceLines[i])
if (result_re_TP != None):
#add empty line
AsciidocLines.append('\n')
#
#these lines should not do anything and are to be ignored
result_re_comment = re_comment.search(SourceLines[i])
result_re_de_TQ = re_de_TQ.search(SourceLines[i])
result_br = re_br.search(SourceLines[i])
result_HP = re_HP.search(SourceLines[i])
result_TQ = re_TQ.search(SourceLines[i])
result_ns = re_ns.search(SourceLines[i])
result_pointpoint = re_pointpoint.search(SourceLines[i])
if not ((result_re_comment != None) \
or (result_re_de_TQ != None) \
or (result_br != None) \
or (result_HP != None) \
or (result_TQ != None) \
or (result_ns != None) \
or (result_pointpoint != None)):
#nothing to be done unless all other results are none
#in that situation just copy the lines
if (result_re_B == None) \
and (result_re_SH == None) \
and (result_re_TP == None) \
and (result_re_TH == None):
AsciidocLines.append(SourceLines[i])
i += 1
#return to while loop
#now write all info into the target file
AsciidocFile = open(TargetFilePath, 'w+')
AsciidocFile.writelines(ComponentTitle)
AsciidocFile.writelines(IndexLines)
AsciidocFile.writelines(AsciidocLines)
AsciidocFile.close()
| gpl-3.0 |
10-01/alc-server | external/requests/packages/charade/latin1prober.py | 950 | 5241 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .constants import eNotMe
from .compat import wrap_ord
FREQ_CAT_NUM = 4
UDF = 0 # undefined
OTH = 1 # other
ASC = 2 # ascii capital letter
ASS = 3 # ascii small letter
ACV = 4 # accent capital vowel
ACO = 5 # accent capital other
ASV = 6 # accent small vowel
ASO = 7 # accent small other
CLASS_NUM = 8 # total classes
Latin1_CharToClass = (
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 00 - 07
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 08 - 0F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 10 - 17
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 18 - 1F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 20 - 27
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 28 - 2F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 30 - 37
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 38 - 3F
OTH, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 40 - 47
ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 48 - 4F
ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 50 - 57
ASC, ASC, ASC, OTH, OTH, OTH, OTH, OTH, # 58 - 5F
OTH, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 60 - 67
ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 68 - 6F
ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 70 - 77
ASS, ASS, ASS, OTH, OTH, OTH, OTH, OTH, # 78 - 7F
OTH, UDF, OTH, ASO, OTH, OTH, OTH, OTH, # 80 - 87
OTH, OTH, ACO, OTH, ACO, UDF, ACO, UDF, # 88 - 8F
UDF, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 90 - 97
OTH, OTH, ASO, OTH, ASO, UDF, ASO, ACO, # 98 - 9F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A0 - A7
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A8 - AF
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B0 - B7
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B8 - BF
ACV, ACV, ACV, ACV, ACV, ACV, ACO, ACO, # C0 - C7
ACV, ACV, ACV, ACV, ACV, ACV, ACV, ACV, # C8 - CF
ACO, ACO, ACV, ACV, ACV, ACV, ACV, OTH, # D0 - D7
ACV, ACV, ACV, ACV, ACV, ACO, ACO, ACO, # D8 - DF
ASV, ASV, ASV, ASV, ASV, ASV, ASO, ASO, # E0 - E7
ASV, ASV, ASV, ASV, ASV, ASV, ASV, ASV, # E8 - EF
ASO, ASO, ASV, ASV, ASV, ASV, ASV, OTH, # F0 - F7
ASV, ASV, ASV, ASV, ASV, ASO, ASO, ASO, # F8 - FF
)
# 0 : illegal
# 1 : very unlikely
# 2 : normal
# 3 : very likely
Latin1ClassModel = (
# UDF OTH ASC ASS ACV ACO ASV ASO
0, 0, 0, 0, 0, 0, 0, 0, # UDF
0, 3, 3, 3, 3, 3, 3, 3, # OTH
0, 3, 3, 3, 3, 3, 3, 3, # ASC
0, 3, 3, 3, 1, 1, 3, 3, # ASS
0, 3, 3, 3, 1, 2, 1, 2, # ACV
0, 3, 3, 3, 3, 3, 3, 3, # ACO
0, 3, 1, 3, 1, 1, 1, 3, # ASV
0, 3, 1, 3, 1, 1, 3, 3, # ASO
)
class Latin1Prober(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self.reset()
def reset(self):
self._mLastCharClass = OTH
self._mFreqCounter = [0] * FREQ_CAT_NUM
CharSetProber.reset(self)
def get_charset_name(self):
return "windows-1252"
def feed(self, aBuf):
aBuf = self.filter_with_english_letters(aBuf)
for c in aBuf:
charClass = Latin1_CharToClass[wrap_ord(c)]
freq = Latin1ClassModel[(self._mLastCharClass * CLASS_NUM)
+ charClass]
if freq == 0:
self._mState = eNotMe
break
self._mFreqCounter[freq] += 1
self._mLastCharClass = charClass
return self.get_state()
def get_confidence(self):
if self.get_state() == eNotMe:
return 0.01
total = sum(self._mFreqCounter)
if total < 0.01:
confidence = 0.0
else:
confidence = ((self._mFreqCounter[3] / total)
- (self._mFreqCounter[1] * 20.0 / total))
if confidence < 0.0:
confidence = 0.0
# lower the confidence of latin1 so that other more accurate
# detector can take priority.
confidence = confidence * 0.5
return confidence
| mit |
jpo/healthcareai-py | healthcareai/common/transformers.py | 1 | 9289 | """Transformers
This module contains transformers for preprocessing data. Most operate on DataFrames and are named appropriately.
"""
import numpy as np
import pandas as pd
from sklearn.base import TransformerMixin
from imblearn.over_sampling import RandomOverSampler
from imblearn.under_sampling import RandomUnderSampler
from sklearn.preprocessing import StandardScaler
class DataFrameImputer(TransformerMixin):
"""
Impute missing values in a dataframe.
Columns of dtype object or category (assumed categorical) are imputed with the mode (most frequent value in column).
Columns of other types (assumed continuous) are imputed with mean of column.
"""
def __init__(self, impute=True, verbose=True):
self.impute = impute
self.object_columns = None
self.fill = None
self.verbose = verbose
def fit(self, X, y=None):
# Return if not imputing
if self.impute is False:
return self
# Grab list of object column names before doing imputation
self.object_columns = X.select_dtypes(include=['object']).columns.values
self.fill = pd.Series([X[c].value_counts().index[0]
if X[c].dtype == np.dtype('O')
or pd.core.common.is_categorical_dtype(X[c])
else X[c].mean() for c in X], index=X.columns)
if self.verbose:
num_nans = sum(X.select_dtypes(include=[np.number]).isnull().sum())
num_total = sum(X.select_dtypes(include=[np.number]).count())
percentage_imputed = num_nans / num_total * 100
print("Percentage Imputed: %.2f%%" % percentage_imputed)
print("Note: Impute will always happen on prediction dataframe, otherwise rows are dropped, and will lead "
"to missing predictions")
# return self for scikit compatibility
return self
def transform(self, X, y=None):
# Return if not imputing
if self.impute is False:
return X
result = X.fillna(self.fill)
for i in self.object_columns:
if result[i].dtype not in ['object', 'category']:
result[i] = result[i].astype('object')
return result
class DataFrameConvertTargetToBinary(TransformerMixin):
# TODO Note that this makes healthcareai only handle N/Y in pred column
"""
Convert classification model's predicted col to 0/1 (otherwise won't work with GridSearchCV). Passes through data
for regression models unchanged. This is to simplify the data pipeline logic. (Though that may be a more appropriate
place for the logic...)
Note that this makes healthcareai only handle N/Y in pred column
"""
def __init__(self, model_type, target_column):
self.model_type = model_type
self.target_column = target_column
def fit(self, X, y=None):
# return self for scikit compatibility
return self
def transform(self, X, y=None):
# TODO: put try/catch here when type = class and predictor is numeric
# TODO this makes healthcareai only handle N/Y in pred column
if self.model_type == 'classification':
# Turn off warning around replace
pd.options.mode.chained_assignment = None # default='warn'
# Replace 'Y'/'N' with 1/0
X[self.target_column].replace(['Y', 'N'], [1, 0], inplace=True)
return X
class DataFrameCreateDummyVariables(TransformerMixin):
"""Convert all categorical columns into dummy/indicator variables. Exclude given columns."""
def __init__(self, excluded_columns=None):
self.excluded_columns = excluded_columns
def fit(self, X, y=None):
# return self for scikit compatibility
return self
def transform(self, X, y=None):
columns_to_dummify = X.select_dtypes(include=[object, 'category'])
# remove excluded columns (if they are still in the list)
for column in columns_to_dummify:
if column in self.excluded_columns:
columns_to_dummify.remove(column)
# Create dummy variables
X = pd.get_dummies(X, columns=columns_to_dummify, drop_first=True, prefix_sep='.')
return X
class DataFrameConvertColumnToNumeric(TransformerMixin):
"""Convert a column into numeric variables."""
def __init__(self, column_name):
self.column_name = column_name
def fit(self, X, y=None):
# return self for scikit compatibility
return self
def transform(self, X, y=None):
X[self.column_name] = pd.to_numeric(arg=X[self.column_name], errors='raise')
return X
class DataFrameUnderSampling(TransformerMixin):
"""
Performs undersampling on a dataframe.
Must be done BEFORE train/test split so that when we split the under/over sampled dataset.
Must be done AFTER imputation, since under/over sampling will not work with missing values (imblearn requires target
column to be converted to numerical values)
"""
def __init__(self, predicted_column, random_seed=0):
self.random_seed = random_seed
self.predicted_column = predicted_column
def fit(self, X, y=None):
# return self for scikit compatibility
return self
def transform(self, X, y=None):
# TODO how do we validate this happens before train/test split? Or do we need to? Can we implement it in the
# TODO simple trainer in the correct order and leave this to advanced users?
# Extract predicted column
y = np.squeeze(X[[self.predicted_column]])
# Copy the dataframe without the predicted column
temp_dataframe = X.drop([self.predicted_column], axis=1)
# Initialize and fit the under sampler
under_sampler = RandomUnderSampler(random_state=self.random_seed)
x_under_sampled, y_under_sampled = under_sampler.fit_sample(temp_dataframe, y)
# Build the resulting under sampled dataframe
result = pd.DataFrame(x_under_sampled)
# Restore the column names
result.columns = temp_dataframe.columns
# Restore the y values
y_under_sampled = pd.Series(y_under_sampled)
result[self.predicted_column] = y_under_sampled
return result
class DataFrameOverSampling(TransformerMixin):
"""
Performs oversampling on a dataframe.
Must be done BEFORE train/test split so that when we split the under/over sampled dataset.
Must be done AFTER imputation, since under/over sampling will not work with missing values (imblearn requires target
column to be converted to numerical values)
"""
def __init__(self, predicted_column, random_seed=0):
self.random_seed = random_seed
self.predicted_column = predicted_column
def fit(self, X, y=None):
# return self for scikit compatibility
return self
def transform(self, X, y=None):
# TODO how do we validate this happens before train/test split? Or do we need to? Can we implement it in the
# TODO simple trainer in the correct order and leave this to advanced users?
# Extract predicted column
y = np.squeeze(X[[self.predicted_column]])
# Copy the dataframe without the predicted column
temp_dataframe = X.drop([self.predicted_column], axis=1)
# Initialize and fit the under sampler
over_sampler = RandomOverSampler(random_state=self.random_seed)
x_over_sampled, y_over_sampled = over_sampler.fit_sample(temp_dataframe, y)
# Build the resulting under sampled dataframe
result = pd.DataFrame(x_over_sampled)
# Restore the column names
result.columns = temp_dataframe.columns
# Restore the y values
y_over_sampled = pd.Series(y_over_sampled)
result[self.predicted_column] = y_over_sampled
return result
class DataFrameDropNaN(TransformerMixin):
"""Remove NaN values. Columns that are NaN or None are removed."""
def __init__(self):
pass
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
# Uses pandas.DataFrame.dropna function where axis=1 is column action, and
# how='all' requires all the values to be NaN or None to be removed.
return X.dropna(axis=1, how='all')
class DataFrameFeatureScaling(TransformerMixin):
"""Scales numeric features. Columns that are numerics are scaled, or otherwise specified."""
def __init__(self, columns_to_scale=None, reuse=None):
self.columns_to_scale = columns_to_scale
self.reuse = reuse
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
# Check if it's reuse, if so, then use the reuse's DataFrameFeatureScaling
if self.reuse:
return self.reuse.fit_transform(X, y)
# Check if we know what columns to scale, if not, then get all the numeric columns' names
if not self.columns_to_scale:
self.columns_to_scale = list(X.select_dtypes(include=[np.number]).columns)
X[self.columns_to_scale] = StandardScaler().fit_transform(X[self.columns_to_scale])
return X
| mit |
HPPTECH/hpp_IOSTressTest | Refer/IOST_OLD_SRC/IOST_0.13/IOST_WRun_StationInfo.py | 2 | 13035 | #!/usr/bin/python
#======================================================================
#
# Project : hpp_IOStressTest
# File : IOST_WRun_StationInfo.py
# Date : Oct 25, 2016
# Author : HuuHoang Nguyen
# Contact : hhnguyen@apm.com
# : hoangnh.hpp@gmail.com
# License : MIT License
# Copyright : 2016
# Description: The hpp_IOStressTest is under the MIT License, a copy of license which may be found in LICENSE
#
#======================================================================
import io
import os
import sys
import time
from IOST_Prepare import IOST_Prepare
from IOST_Config import *
from IOST_Basic import *
import gtk
import gobject
import gtk.glade
#======================================================================
IOST_WRun_StationInfo_Debug_Enable = 1
TEMPERATURE_STR="Temperature ( "+ unichr(186) +"C) :"
print TEMPERATURE_STR
#======================================================================
class IOST_WRun_StationInfo():
"""
This is class to get all informtation of Station object from IOST_WRun_Skylark window and control to these
component
"""
#----------------------------------------------------------------------
def __init__(self, glade_filename, window_name, builder=None):
self.IOST_WRun_StationInfo_window = window_name
if not builder:
self.WRun_StationInfo_Builder = gtk.Builder()
self.WRun_StationInfo_Builder.add_from_file(glade_filename)
self.WRun_StationInfo_Builder.connect_signals(self)
else:
self.WRun_StationInfo_Builder = builder
#----------------------------------------------------------------------
def WRun_GetStationInfo_Obj(self, window_name):
"Get all Station info objecs on WRun window and store into self.IOST_Objs"
self.IOST_Objs[window_name][window_name+"_Summary_StationInfo_ConsoleIP_Value_L"] = self.WRun_StationInfo_Builder.get_object(self.IOST_Objs[window_name]["_Summary_StationInfo_ConsoleIP_Value_L"])
self.IOST_Objs[window_name][window_name+"_Summary_StationInfo_ConsolePort_Value_L"] = self.WRun_StationInfo_Builder.get_object(self.IOST_Objs[window_name]["_Summary_StationInfo_ConsolePort_Value_L"])
self.IOST_Objs[window_name][window_name+"_Summary_StationInfo_SlimproPort_Value_L"] = self.WRun_StationInfo_Builder.get_object(self.IOST_Objs[window_name]["_Summary_StationInfo_SlimproPort_Value_L"])
self.IOST_Objs[window_name][window_name+"_Summary_StationInfo_NPS_IP_Value_L"] = self.WRun_StationInfo_Builder.get_object(self.IOST_Objs[window_name]["_Summary_StationInfo_NPS_IP_Value_L"])
self.IOST_Objs[window_name][window_name+"_Summary_StationInfo_NPS_Port_Value_L"] = self.WRun_StationInfo_Builder.get_object(self.IOST_Objs[window_name]["_Summary_StationInfo_NPS_Port_Value_L"])
self.IOST_Objs[window_name][window_name+"_Summary_StationInfo_ThermalIP_Value_L"] = self.WRun_StationInfo_Builder.get_object(self.IOST_Objs[window_name]["_Summary_StationInfo_ThermalIP_Value_L"])
self.IOST_Objs[window_name][window_name+"_Summary_StationInfo_ThermalPort_Value_L"] = self.WRun_StationInfo_Builder.get_object(self.IOST_Objs[window_name]["_Summary_StationInfo_ThermalPort_Value_L"])
self.IOST_Objs[window_name][window_name+"_Summary_StationInfo_Temperature_Value_L"] = self.WRun_StationInfo_Builder.get_object(self.IOST_Objs[window_name]["_Summary_StationInfo_Temperature_Value_L"])
self.IOST_Objs[window_name][window_name+"_Summary_StationInfo_TimeRun_Value_L"] = self.WRun_StationInfo_Builder.get_object(self.IOST_Objs[window_name]["_Summary_StationInfo_TimeRun_Value_L"])
self.IOST_Objs[window_name][window_name+"_Summary_StationInfo_ServerIP_Value_L"] = self.WRun_StationInfo_Builder.get_object(self.IOST_Objs[window_name]["_Summary_StationInfo_ServerIP_Value_L"])
self.IOST_Objs[window_name][window_name+"_Summary_StationInfo_OCD_Enable_L"] = self.WRun_StationInfo_Builder.get_object(self.IOST_Objs[window_name]["_Summary_StationInfo_OCD_Enable_L"])
self.IOST_Objs[window_name][window_name+"_Summary_StationInfo_OCD_IP_Value_L"] = self.WRun_StationInfo_Builder.get_object(self.IOST_Objs[window_name]["_Summary_StationInfo_OCD_IP_Value_L"])
self.IOST_Objs[window_name][window_name+"_Summary_StationInfo_BDI_Enable_L"] = self.WRun_StationInfo_Builder.get_object(self.IOST_Objs[window_name]["_Summary_StationInfo_BDI_Enable_L"])
self.IOST_Objs[window_name][window_name+"_Summary_StationInfo_BDI_IP_Value_L"] = self.WRun_StationInfo_Builder.get_object(self.IOST_Objs[window_name]["_Summary_StationInfo_BDI_IP_Value_L"])
#----------------------------------------------------------------------
def WRun_InitStationInfo_Obj(self, window_name):
"Initialization all Station info objects when WRun start"
self.IOST_Objs[window_name][window_name+"_Summary_StationInfo_ConsoleIP_Value_L"].set_text(self.IOST_Data["StationInfo"]["ConsoleIP"])
self.WRun_basic.FormatText(self.IOST_Objs[window_name][window_name+"_Summary_StationInfo_ConsoleIP_Value_L"], color="#3399ff")
self.IOST_Objs[window_name][window_name+"_Summary_StationInfo_ConsolePort_Value_L"].set_text(self.IOST_Data["StationInfo"]["ConsolePort"])
self.WRun_basic.FormatText(self.IOST_Objs[window_name][window_name+"_Summary_StationInfo_ConsolePort_Value_L"], color="#3399ff")
self.IOST_Objs[window_name][window_name+"_Summary_StationInfo_SlimproPort_Value_L"].set_text(self.IOST_Data["StationInfo"]["SlimproPort"])
self.WRun_basic.FormatText(self.IOST_Objs[window_name][window_name+"_Summary_StationInfo_SlimproPort_Value_L"], color="#3399ff")
self.IOST_Objs[window_name][window_name+"_Summary_StationInfo_NPS_IP_Value_L"].set_text(self.IOST_Data["StationInfo"]["NPS_IP"])
self.WRun_basic.FormatText(self.IOST_Objs[window_name][window_name+"_Summary_StationInfo_NPS_IP_Value_L"], color="#3399ff")
self.IOST_Objs[window_name][window_name+"_Summary_StationInfo_NPS_Port_Value_L"].set_text(self.IOST_Data["StationInfo"]["NPS_Port"])
self.WRun_basic.FormatText(self.IOST_Objs[window_name][window_name+"_Summary_StationInfo_NPS_Port_Value_L"], color="#3399ff")
self.IOST_Objs[window_name][window_name+"_Summary_StationInfo_ThermalIP_Value_L"].set_text(self.IOST_Data["StationInfo"]["ThermalIP"])
self.WRun_basic.FormatText(self.IOST_Objs[window_name][window_name+"_Summary_StationInfo_ThermalIP_Value_L"], color="#3399ff")
self.IOST_Objs[window_name][window_name+"_Summary_StationInfo_ThermalPort_Value_L"].set_text(self.IOST_Data["StationInfo"]["ThermalPort"])
self.WRun_basic.FormatText(self.IOST_Objs[window_name][window_name+"_Summary_StationInfo_ThermalPort_Value_L"], color="#3399ff")
self.IOST_Objs[window_name][window_name+"_Summary_StationInfo_Temperature_Value_L"].set_text(self.IOST_Data["StationInfo"]["Temperature"])
self.WRun_basic.FormatText(self.IOST_Objs[window_name][window_name+"_Summary_StationInfo_Temperature_Value_L"], color="#3399ff")
self.IOST_Objs[window_name][window_name+"_Summary_StationInfo_TimeRun_Value_L"].set_text(self.IOST_Data["StationInfo"]["TimeRunHour"])
self.WRun_basic.FormatText(self.IOST_Objs[window_name][window_name+"_Summary_StationInfo_TimeRun_Value_L"], color="#3399ff")
self.IOST_Objs[window_name][window_name+"_Summary_StationInfo_ServerIP_Value_L"].set_text(self.IOST_Data["StationInfo"]["ServerIP"])
self.WRun_basic.FormatText(self.IOST_Objs[window_name][window_name+"_Summary_StationInfo_ServerIP_Value_L"], color="#3399ff")
if self.IOST_Data["StationInfo"]["OCD_Enable"] == STATUS_ENABLE:
self.IOST_Objs[window_name][window_name+"_Summary_StationInfo_OCD_Enable_L"].set_text(STATUS_AVAIL)
self.WRun_basic.FormatText(self.IOST_Objs[window_name][window_name+"_Summary_StationInfo_OCD_Enable_L"], color="#3399ff")
self.IOST_Objs[window_name][window_name+"_Summary_StationInfo_OCD_IP_Value_L"].set_text(self.IOST_Data["StationInfo"]["OCD_IP"])
self.WRun_basic.FormatText(self.IOST_Objs[window_name][window_name+"_Summary_StationInfo_OCD_IP_Value_L"], color="#3399ff")
else:
self.IOST_Objs[window_name][window_name+"_Summary_StationInfo_OCD_Enable_L"].set_text(STATUS_NOT_AVAIL)
self.WRun_basic.FormatText(self.IOST_Objs[window_name][window_name+"_Summary_StationInfo_OCD_Enable_L"], color="red")
self.IOST_Objs[window_name][window_name+"_Summary_StationInfo_OCD_IP_Value_L"].set_text(STATUS_N_A)
if self.IOST_Data["StationInfo"]["BDI_Enable"] == STATUS_ENABLE:
self.IOST_Objs[window_name][window_name+"_Summary_StationInfo_BDI_Enable_L"].set_text(STATUS_AVAIL)
self.WRun_basic.FormatText(self.IOST_Objs[window_name][window_name+"_Summary_StationInfo_BDI_Enable_L"], color="#3399ff")
self.IOST_Objs[window_name][window_name+"_Summary_StationInfo_BDI_IP_Value_L"].set_text(self.IOST_Data["StationInfo"]["BDI_IP"])
self.WRun_basic.FormatText(self.IOST_Objs[window_name][window_name+"_Summary_StationInfo_BDI_IP_Value_L"], color="#3399ff")
else:
self.IOST_Objs[window_name][window_name+"_Summary_StationInfo_BDI_Enable_L"].set_text(STATUS_NOT_AVAIL)
self.WRun_basic.FormatText(self.IOST_Objs[window_name][window_name+"_Summary_StationInfo_BDI_Enable_L"], color="red")
self.IOST_Objs[window_name][window_name+"_Summary_StationInfo_BDI_IP_Value_L"].set_text(STATUS_N_A)
#
station_info = self.WRun_StationInfo_Builder.get_object("IOST_WRun_Summary_StationInfo_L").get_text()
station_info += " : \n\tStation Name : " + self.IOST_Data["StationInfo"]["StationName"] \
+ "\n\tBoard Number : " + self.IOST_Data["StationInfo"]["StationBoardNumber"] \
+ "\n\tChip Number : " + self.IOST_Data["StationInfo"]["StationChipNumber"] + "\n"
self.WRun_basic.FormatText(self.WRun_StationInfo_Builder.get_object("IOST_WRun_Summary_StationInfo_L"), color=WRUN_IP_COLOR_DEFAULT, bold=True, text=station_info)
# #0099ff
self.WRun_basic.FormatText(self.WRun_StationInfo_Builder.get_object("IOST_WRun_Summary_StationInfo_ConsoleIP_L"), color="#0099ff")
self.WRun_basic.FormatText(self.WRun_StationInfo_Builder.get_object("IOST_WRun_Summary_StationInfo_ConsolePort_L"), color="#0099ff")
self.WRun_basic.FormatText(self.WRun_StationInfo_Builder.get_object("IOST_WRun_Summary_StationInfo_SlimproPort_L"), color="#0099ff")
self.WRun_basic.FormatText(self.WRun_StationInfo_Builder.get_object("IOST_WRun_Summary_StationInfo_NPS_IP_L"), color="#0099ff")
self.WRun_basic.FormatText(self.WRun_StationInfo_Builder.get_object("IOST_WRun_Summary_StationInfo_NPS_Port_L"), color="#0099ff")
self.WRun_basic.FormatText(self.WRun_StationInfo_Builder.get_object("IOST_WRun_Summary_StationInfo_ThermalIP_L"), color="#0099ff")
self.WRun_basic.FormatText(self.WRun_StationInfo_Builder.get_object("IOST_WRun_Summary_StationInfo_ThermalPort_L"), color="#0099ff")
self.WRun_basic.FormatText(self.WRun_StationInfo_Builder.get_object("IOST_WRun_Summary_StationInfo_Temperature_L"), color="#0099ff", text=TEMPERATURE_STR)
self.WRun_basic.FormatText(self.WRun_StationInfo_Builder.get_object("IOST_WRun_Summary_StationInfo_TimeRun_L"), color="#0099ff")
self.WRun_basic.FormatText(self.WRun_StationInfo_Builder.get_object("IOST_WRun_Summary_StationInfo_ServerIP_L"), color="#0099ff")
self.WRun_basic.FormatText(self.WRun_StationInfo_Builder.get_object("IOST_WRun_Summary_StationInfo_OCD_L"), color="#0099ff")
self.WRun_basic.FormatText(self.WRun_StationInfo_Builder.get_object("IOST_WRun_Summary_StationInfo_OCD_IP_L"), color="#0099ff")
self.WRun_basic.FormatText(self.WRun_StationInfo_Builder.get_object("IOST_WRun_Summary_StationInfo_BDI_L"), color="#0099ff")
self.WRun_basic.FormatText(self.WRun_StationInfo_Builder.get_object("IOST_WRun_Summary_StationInfo_BDI_IP_L"), color="#0099ff")
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
| mit |
amyvmiwei/kbengine | kbe/src/lib/python/Lib/plat-aix4/IN.py | 108 | 3622 | # Generated by h2py from /usr/include/netinet/in.h
# Included from net/nh.h
# Included from sys/machine.h
LITTLE_ENDIAN = 1234
BIG_ENDIAN = 4321
PDP_ENDIAN = 3412
BYTE_ORDER = BIG_ENDIAN
DEFAULT_GPR = 0xDEADBEEF
MSR_EE = 0x8000
MSR_PR = 0x4000
MSR_FP = 0x2000
MSR_ME = 0x1000
MSR_FE = 0x0800
MSR_FE0 = 0x0800
MSR_SE = 0x0400
MSR_BE = 0x0200
MSR_IE = 0x0100
MSR_FE1 = 0x0100
MSR_AL = 0x0080
MSR_IP = 0x0040
MSR_IR = 0x0020
MSR_DR = 0x0010
MSR_PM = 0x0004
DEFAULT_MSR = (MSR_EE | MSR_ME | MSR_AL | MSR_IR | MSR_DR)
DEFAULT_USER_MSR = (DEFAULT_MSR | MSR_PR)
CR_LT = 0x80000000
CR_GT = 0x40000000
CR_EQ = 0x20000000
CR_SO = 0x10000000
CR_FX = 0x08000000
CR_FEX = 0x04000000
CR_VX = 0x02000000
CR_OX = 0x01000000
XER_SO = 0x80000000
XER_OV = 0x40000000
XER_CA = 0x20000000
def XER_COMP_BYTE(xer): return ((xer >> 8) & 0x000000FF)
def XER_LENGTH(xer): return (xer & 0x0000007F)
DSISR_IO = 0x80000000
DSISR_PFT = 0x40000000
DSISR_LOCK = 0x20000000
DSISR_FPIO = 0x10000000
DSISR_PROT = 0x08000000
DSISR_LOOP = 0x04000000
DSISR_DRST = 0x04000000
DSISR_ST = 0x02000000
DSISR_SEGB = 0x01000000
DSISR_DABR = 0x00400000
DSISR_EAR = 0x00100000
SRR_IS_PFT = 0x40000000
SRR_IS_ISPEC = 0x20000000
SRR_IS_IIO = 0x10000000
SRR_IS_GUARD = 0x10000000
SRR_IS_PROT = 0x08000000
SRR_IS_LOOP = 0x04000000
SRR_PR_FPEN = 0x00100000
SRR_PR_INVAL = 0x00080000
SRR_PR_PRIV = 0x00040000
SRR_PR_TRAP = 0x00020000
SRR_PR_IMPRE = 0x00010000
def BUID_7F_SRVAL(raddr): return (0x87F00000 | (((uint)(raddr)) >> 28))
BT_256M = 0x1FFC
BT_128M = 0x0FFC
BT_64M = 0x07FC
BT_32M = 0x03FC
BT_16M = 0x01FC
BT_8M = 0x00FC
BT_4M = 0x007C
BT_2M = 0x003C
BT_1M = 0x001C
BT_512K = 0x000C
BT_256K = 0x0004
BT_128K = 0x0000
BT_NOACCESS = 0x0
BT_RDONLY = 0x1
BT_WRITE = 0x2
BT_VS = 0x2
BT_VP = 0x1
def BAT_ESEG(dbatu): return (((uint)(dbatu) >> 28))
MIN_BAT_SIZE = 0x00020000
MAX_BAT_SIZE = 0x10000000
def ntohl(x): return (x)
def ntohs(x): return (x)
def htonl(x): return (x)
def htons(x): return (x)
IPPROTO_IP = 0
IPPROTO_ICMP = 1
IPPROTO_IGMP = 2
IPPROTO_GGP = 3
IPPROTO_TCP = 6
IPPROTO_EGP = 8
IPPROTO_PUP = 12
IPPROTO_UDP = 17
IPPROTO_IDP = 22
IPPROTO_TP = 29
IPPROTO_LOCAL = 63
IPPROTO_EON = 80
IPPROTO_BIP = 0x53
IPPROTO_RAW = 255
IPPROTO_MAX = 256
IPPORT_RESERVED = 1024
IPPORT_USERRESERVED = 5000
IPPORT_TIMESERVER = 37
def IN_CLASSA(i): return (((int)(i) & 0x80000000) == 0)
IN_CLASSA_NET = 0xff000000
IN_CLASSA_NSHIFT = 24
IN_CLASSA_HOST = 0x00ffffff
IN_CLASSA_MAX = 128
def IN_CLASSB(i): return (((int)(i) & 0xc0000000) == 0x80000000)
IN_CLASSB_NET = 0xffff0000
IN_CLASSB_NSHIFT = 16
IN_CLASSB_HOST = 0x0000ffff
IN_CLASSB_MAX = 65536
def IN_CLASSC(i): return (((int)(i) & 0xe0000000) == 0xc0000000)
IN_CLASSC_NET = 0xffffff00
IN_CLASSC_NSHIFT = 8
IN_CLASSC_HOST = 0x000000ff
def IN_CLASSD(i): return (((int)(i) & 0xf0000000) == 0xe0000000)
def IN_MULTICAST(i): return IN_CLASSD(i)
IN_CLASSD_NET = 0xf0000000
IN_CLASSD_NSHIFT = 28
IN_CLASSD_HOST = 0x0fffffff
INADDR_UNSPEC_GROUP = 0xe0000000
INADDR_ALLHOSTS_GROUP = 0xe0000001
INADDR_MAX_LOCAL_GROUP = 0xe00000ff
def IN_EXPERIMENTAL(i): return (((int)(i) & 0xe0000000) == 0xe0000000)
def IN_BADCLASS(i): return (((int)(i) & 0xf0000000) == 0xf0000000)
INADDR_ANY = 0x00000000
INADDR_BROADCAST = 0xffffffff
INADDR_LOOPBACK = 0x7f000001
INADDR_NONE = 0xffffffff
IN_LOOPBACKNET = 127
IP_OPTIONS = 1
IP_HDRINCL = 2
IP_TOS = 3
IP_TTL = 4
IP_RECVOPTS = 5
IP_RECVRETOPTS = 6
IP_RECVDSTADDR = 7
IP_RETOPTS = 8
IP_MULTICAST_IF = 9
IP_MULTICAST_TTL = 10
IP_MULTICAST_LOOP = 11
IP_ADD_MEMBERSHIP = 12
IP_DROP_MEMBERSHIP = 13
IP_DEFAULT_MULTICAST_TTL = 1
IP_DEFAULT_MULTICAST_LOOP = 1
IP_MAX_MEMBERSHIPS = 20
| lgpl-3.0 |
amrdraz/brython | www/src/Lib/test/test_importlib/source/test_abc_loader.py | 34 | 32554 | import importlib
from importlib import abc
from .. import abc as testing_abc
from .. import util
from . import util as source_util
import imp
import inspect
import io
import marshal
import os
import sys
import types
import unittest
import warnings
class SourceOnlyLoaderMock(abc.SourceLoader):
# Globals that should be defined for all modules.
source = (b"_ = '::'.join([__name__, __file__, __cached__, __package__, "
b"repr(__loader__)])")
def __init__(self, path):
self.path = path
def get_data(self, path):
assert self.path == path
return self.source
def get_filename(self, fullname):
return self.path
def module_repr(self, module):
return '<module>'
class SourceLoaderMock(SourceOnlyLoaderMock):
source_mtime = 1
def __init__(self, path, magic=imp.get_magic()):
super().__init__(path)
self.bytecode_path = imp.cache_from_source(self.path)
self.source_size = len(self.source)
data = bytearray(magic)
data.extend(importlib._w_long(self.source_mtime))
data.extend(importlib._w_long(self.source_size))
code_object = compile(self.source, self.path, 'exec',
dont_inherit=True)
data.extend(marshal.dumps(code_object))
self.bytecode = bytes(data)
self.written = {}
def get_data(self, path):
if path == self.path:
return super().get_data(path)
elif path == self.bytecode_path:
return self.bytecode
else:
raise IOError
def path_stats(self, path):
assert path == self.path
return {'mtime': self.source_mtime, 'size': self.source_size}
def set_data(self, path, data):
self.written[path] = bytes(data)
return path == self.bytecode_path
class PyLoaderMock(abc.PyLoader):
# Globals that should be defined for all modules.
source = (b"_ = '::'.join([__name__, __file__, __package__, "
b"repr(__loader__)])")
def __init__(self, data):
"""Take a dict of 'module_name: path' pairings.
Paths should have no file extension, allowing packages to be denoted by
ending in '__init__'.
"""
self.module_paths = data
self.path_to_module = {val:key for key,val in data.items()}
def get_data(self, path):
if path not in self.path_to_module:
raise IOError
return self.source
def is_package(self, name):
filename = os.path.basename(self.get_filename(name))
return os.path.splitext(filename)[0] == '__init__'
def source_path(self, name):
try:
return self.module_paths[name]
except KeyError:
raise ImportError
def get_filename(self, name):
"""Silence deprecation warning."""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
path = super().get_filename(name)
assert len(w) == 1
assert issubclass(w[0].category, DeprecationWarning)
return path
def module_repr(self):
return '<module>'
class PyLoaderCompatMock(PyLoaderMock):
"""Mock that matches what is suggested to have a loader that is compatible
from Python 3.1 onwards."""
def get_filename(self, fullname):
try:
return self.module_paths[fullname]
except KeyError:
raise ImportError
def source_path(self, fullname):
try:
return self.get_filename(fullname)
except ImportError:
return None
class PyPycLoaderMock(abc.PyPycLoader, PyLoaderMock):
default_mtime = 1
def __init__(self, source, bc={}):
"""Initialize mock.
'bc' is a dict keyed on a module's name. The value is dict with
possible keys of 'path', 'mtime', 'magic', and 'bc'. Except for 'path',
each of those keys control if any part of created bytecode is to
deviate from default values.
"""
super().__init__(source)
self.module_bytecode = {}
self.path_to_bytecode = {}
self.bytecode_to_path = {}
for name, data in bc.items():
self.path_to_bytecode[data['path']] = name
self.bytecode_to_path[name] = data['path']
magic = data.get('magic', imp.get_magic())
mtime = importlib._w_long(data.get('mtime', self.default_mtime))
source_size = importlib._w_long(len(self.source) & 0xFFFFFFFF)
if 'bc' in data:
bc = data['bc']
else:
bc = self.compile_bc(name)
self.module_bytecode[name] = magic + mtime + source_size + bc
def compile_bc(self, name):
source_path = self.module_paths.get(name, '<test>') or '<test>'
code = compile(self.source, source_path, 'exec')
return marshal.dumps(code)
def source_mtime(self, name):
if name in self.module_paths:
return self.default_mtime
elif name in self.module_bytecode:
return None
else:
raise ImportError
def bytecode_path(self, name):
try:
return self.bytecode_to_path[name]
except KeyError:
if name in self.module_paths:
return None
else:
raise ImportError
def write_bytecode(self, name, bytecode):
self.module_bytecode[name] = bytecode
return True
def get_data(self, path):
if path in self.path_to_module:
return super().get_data(path)
elif path in self.path_to_bytecode:
name = self.path_to_bytecode[path]
return self.module_bytecode[name]
else:
raise IOError
def is_package(self, name):
try:
return super().is_package(name)
except TypeError:
return '__init__' in self.bytecode_to_path[name]
def get_code(self, name):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
code_object = super().get_code(name)
assert len(w) == 1
assert issubclass(w[0].category, DeprecationWarning)
return code_object
class PyLoaderTests(testing_abc.LoaderTests):
"""Tests for importlib.abc.PyLoader."""
mocker = PyLoaderMock
def eq_attrs(self, ob, **kwargs):
for attr, val in kwargs.items():
found = getattr(ob, attr)
self.assertEqual(found, val,
"{} attribute: {} != {}".format(attr, found, val))
def test_module(self):
name = '<module>'
path = os.path.join('', 'path', 'to', 'module')
mock = self.mocker({name: path})
with util.uncache(name):
module = mock.load_module(name)
self.assertIn(name, sys.modules)
self.eq_attrs(module, __name__=name, __file__=path, __package__='',
__loader__=mock)
self.assertTrue(not hasattr(module, '__path__'))
return mock, name
def test_package(self):
name = '<pkg>'
path = os.path.join('path', 'to', name, '__init__')
mock = self.mocker({name: path})
with util.uncache(name):
module = mock.load_module(name)
self.assertIn(name, sys.modules)
self.eq_attrs(module, __name__=name, __file__=path,
__path__=[os.path.dirname(path)], __package__=name,
__loader__=mock)
return mock, name
def test_lacking_parent(self):
name = 'pkg.mod'
path = os.path.join('path', 'to', 'pkg', 'mod')
mock = self.mocker({name: path})
with util.uncache(name):
module = mock.load_module(name)
self.assertIn(name, sys.modules)
self.eq_attrs(module, __name__=name, __file__=path, __package__='pkg',
__loader__=mock)
self.assertFalse(hasattr(module, '__path__'))
return mock, name
def test_module_reuse(self):
name = 'mod'
path = os.path.join('path', 'to', 'mod')
module = imp.new_module(name)
mock = self.mocker({name: path})
with util.uncache(name):
sys.modules[name] = module
loaded_module = mock.load_module(name)
self.assertIs(loaded_module, module)
self.assertIs(sys.modules[name], module)
return mock, name
def test_state_after_failure(self):
name = "mod"
module = imp.new_module(name)
module.blah = None
mock = self.mocker({name: os.path.join('path', 'to', 'mod')})
mock.source = b"1/0"
with util.uncache(name):
sys.modules[name] = module
with self.assertRaises(ZeroDivisionError):
mock.load_module(name)
self.assertIs(sys.modules[name], module)
self.assertTrue(hasattr(module, 'blah'))
return mock
def test_unloadable(self):
name = "mod"
mock = self.mocker({name: os.path.join('path', 'to', 'mod')})
mock.source = b"1/0"
with util.uncache(name):
with self.assertRaises(ZeroDivisionError):
mock.load_module(name)
self.assertNotIn(name, sys.modules)
return mock
class PyLoaderCompatTests(PyLoaderTests):
"""Test that the suggested code to make a loader that is compatible from
Python 3.1 forward works."""
mocker = PyLoaderCompatMock
class PyLoaderInterfaceTests(unittest.TestCase):
"""Tests for importlib.abc.PyLoader to make sure that when source_path()
doesn't return a path everything works as expected."""
def test_no_source_path(self):
# No source path should lead to ImportError.
name = 'mod'
mock = PyLoaderMock({})
with util.uncache(name), self.assertRaises(ImportError):
mock.load_module(name)
def test_source_path_is_None(self):
name = 'mod'
mock = PyLoaderMock({name: None})
with util.uncache(name), self.assertRaises(ImportError):
mock.load_module(name)
def test_get_filename_with_source_path(self):
# get_filename() should return what source_path() returns.
name = 'mod'
path = os.path.join('path', 'to', 'source')
mock = PyLoaderMock({name: path})
with util.uncache(name):
self.assertEqual(mock.get_filename(name), path)
def test_get_filename_no_source_path(self):
# get_filename() should raise ImportError if source_path returns None.
name = 'mod'
mock = PyLoaderMock({name: None})
with util.uncache(name), self.assertRaises(ImportError):
mock.get_filename(name)
class PyPycLoaderTests(PyLoaderTests):
"""Tests for importlib.abc.PyPycLoader."""
mocker = PyPycLoaderMock
@source_util.writes_bytecode_files
def verify_bytecode(self, mock, name):
assert name in mock.module_paths
self.assertIn(name, mock.module_bytecode)
magic = mock.module_bytecode[name][:4]
self.assertEqual(magic, imp.get_magic())
mtime = importlib._r_long(mock.module_bytecode[name][4:8])
self.assertEqual(mtime, 1)
source_size = mock.module_bytecode[name][8:12]
self.assertEqual(len(mock.source) & 0xFFFFFFFF,
importlib._r_long(source_size))
bc = mock.module_bytecode[name][12:]
self.assertEqual(bc, mock.compile_bc(name))
def test_module(self):
mock, name = super().test_module()
self.verify_bytecode(mock, name)
def test_package(self):
mock, name = super().test_package()
self.verify_bytecode(mock, name)
def test_lacking_parent(self):
mock, name = super().test_lacking_parent()
self.verify_bytecode(mock, name)
def test_module_reuse(self):
mock, name = super().test_module_reuse()
self.verify_bytecode(mock, name)
def test_state_after_failure(self):
super().test_state_after_failure()
def test_unloadable(self):
super().test_unloadable()
class PyPycLoaderInterfaceTests(unittest.TestCase):
"""Test for the interface of importlib.abc.PyPycLoader."""
def get_filename_check(self, src_path, bc_path, expect):
name = 'mod'
mock = PyPycLoaderMock({name: src_path}, {name: {'path': bc_path}})
with util.uncache(name):
assert mock.source_path(name) == src_path
assert mock.bytecode_path(name) == bc_path
self.assertEqual(mock.get_filename(name), expect)
def test_filename_with_source_bc(self):
# When source and bytecode paths present, return the source path.
self.get_filename_check('source_path', 'bc_path', 'source_path')
def test_filename_with_source_no_bc(self):
# With source but no bc, return source path.
self.get_filename_check('source_path', None, 'source_path')
def test_filename_with_no_source_bc(self):
# With not source but bc, return the bc path.
self.get_filename_check(None, 'bc_path', 'bc_path')
def test_filename_with_no_source_or_bc(self):
# With no source or bc, raise ImportError.
name = 'mod'
mock = PyPycLoaderMock({name: None}, {name: {'path': None}})
with util.uncache(name), self.assertRaises(ImportError):
mock.get_filename(name)
class SkipWritingBytecodeTests(unittest.TestCase):
"""Test that bytecode is properly handled based on
sys.dont_write_bytecode."""
@source_util.writes_bytecode_files
def run_test(self, dont_write_bytecode):
name = 'mod'
mock = PyPycLoaderMock({name: os.path.join('path', 'to', 'mod')})
sys.dont_write_bytecode = dont_write_bytecode
with util.uncache(name):
mock.load_module(name)
self.assertIsNot(name in mock.module_bytecode, dont_write_bytecode)
def test_no_bytecode_written(self):
self.run_test(True)
def test_bytecode_written(self):
self.run_test(False)
class RegeneratedBytecodeTests(unittest.TestCase):
"""Test that bytecode is regenerated as expected."""
@source_util.writes_bytecode_files
def test_different_magic(self):
# A different magic number should lead to new bytecode.
name = 'mod'
bad_magic = b'\x00\x00\x00\x00'
assert bad_magic != imp.get_magic()
mock = PyPycLoaderMock({name: os.path.join('path', 'to', 'mod')},
{name: {'path': os.path.join('path', 'to',
'mod.bytecode'),
'magic': bad_magic}})
with util.uncache(name):
mock.load_module(name)
self.assertIn(name, mock.module_bytecode)
magic = mock.module_bytecode[name][:4]
self.assertEqual(magic, imp.get_magic())
@source_util.writes_bytecode_files
def test_old_mtime(self):
# Bytecode with an older mtime should be regenerated.
name = 'mod'
old_mtime = PyPycLoaderMock.default_mtime - 1
mock = PyPycLoaderMock({name: os.path.join('path', 'to', 'mod')},
{name: {'path': 'path/to/mod.bytecode', 'mtime': old_mtime}})
with util.uncache(name):
mock.load_module(name)
self.assertIn(name, mock.module_bytecode)
mtime = importlib._r_long(mock.module_bytecode[name][4:8])
self.assertEqual(mtime, PyPycLoaderMock.default_mtime)
class BadBytecodeFailureTests(unittest.TestCase):
"""Test import failures when there is no source and parts of the bytecode
is bad."""
def test_bad_magic(self):
# A bad magic number should lead to an ImportError.
name = 'mod'
bad_magic = b'\x00\x00\x00\x00'
bc = {name:
{'path': os.path.join('path', 'to', 'mod'),
'magic': bad_magic}}
mock = PyPycLoaderMock({name: None}, bc)
with util.uncache(name), self.assertRaises(ImportError) as cm:
mock.load_module(name)
self.assertEqual(cm.exception.name, name)
def test_no_bytecode(self):
# Missing code object bytecode should lead to an EOFError.
name = 'mod'
bc = {name: {'path': os.path.join('path', 'to', 'mod'), 'bc': b''}}
mock = PyPycLoaderMock({name: None}, bc)
with util.uncache(name), self.assertRaises(EOFError):
mock.load_module(name)
def test_bad_bytecode(self):
# Malformed code object bytecode should lead to a ValueError.
name = 'mod'
bc = {name: {'path': os.path.join('path', 'to', 'mod'), 'bc': b'1234'}}
mock = PyPycLoaderMock({name: None}, bc)
with util.uncache(name), self.assertRaises(ValueError):
mock.load_module(name)
def raise_ImportError(*args, **kwargs):
raise ImportError
class MissingPathsTests(unittest.TestCase):
"""Test what happens when a source or bytecode path does not exist (either
from *_path returning None or raising ImportError)."""
def test_source_path_None(self):
# Bytecode should be used when source_path returns None, along with
# __file__ being set to the bytecode path.
name = 'mod'
bytecode_path = 'path/to/mod'
mock = PyPycLoaderMock({name: None}, {name: {'path': bytecode_path}})
with util.uncache(name):
module = mock.load_module(name)
self.assertEqual(module.__file__, bytecode_path)
# Testing for bytecode_path returning None handled by all tests where no
# bytecode initially exists.
def test_all_paths_None(self):
# If all *_path methods return None, raise ImportError.
name = 'mod'
mock = PyPycLoaderMock({name: None})
with util.uncache(name), self.assertRaises(ImportError) as cm:
mock.load_module(name)
self.assertEqual(cm.exception.name, name)
def test_source_path_ImportError(self):
# An ImportError from source_path should trigger an ImportError.
name = 'mod'
mock = PyPycLoaderMock({}, {name: {'path': os.path.join('path', 'to',
'mod')}})
with util.uncache(name), self.assertRaises(ImportError):
mock.load_module(name)
def test_bytecode_path_ImportError(self):
# An ImportError from bytecode_path should trigger an ImportError.
name = 'mod'
mock = PyPycLoaderMock({name: os.path.join('path', 'to', 'mod')})
bad_meth = types.MethodType(raise_ImportError, mock)
mock.bytecode_path = bad_meth
with util.uncache(name), self.assertRaises(ImportError) as cm:
mock.load_module(name)
class SourceLoaderTestHarness(unittest.TestCase):
def setUp(self, *, is_package=True, **kwargs):
self.package = 'pkg'
if is_package:
self.path = os.path.join(self.package, '__init__.py')
self.name = self.package
else:
module_name = 'mod'
self.path = os.path.join(self.package, '.'.join(['mod', 'py']))
self.name = '.'.join([self.package, module_name])
self.cached = imp.cache_from_source(self.path)
self.loader = self.loader_mock(self.path, **kwargs)
def verify_module(self, module):
self.assertEqual(module.__name__, self.name)
self.assertEqual(module.__file__, self.path)
self.assertEqual(module.__cached__, self.cached)
self.assertEqual(module.__package__, self.package)
self.assertEqual(module.__loader__, self.loader)
values = module._.split('::')
self.assertEqual(values[0], self.name)
self.assertEqual(values[1], self.path)
self.assertEqual(values[2], self.cached)
self.assertEqual(values[3], self.package)
self.assertEqual(values[4], repr(self.loader))
def verify_code(self, code_object):
module = imp.new_module(self.name)
module.__file__ = self.path
module.__cached__ = self.cached
module.__package__ = self.package
module.__loader__ = self.loader
module.__path__ = []
exec(code_object, module.__dict__)
self.verify_module(module)
class SourceOnlyLoaderTests(SourceLoaderTestHarness):
"""Test importlib.abc.SourceLoader for source-only loading.
Reload testing is subsumed by the tests for
importlib.util.module_for_loader.
"""
loader_mock = SourceOnlyLoaderMock
def test_get_source(self):
# Verify the source code is returned as a string.
# If an IOError is raised by get_data then raise ImportError.
expected_source = self.loader.source.decode('utf-8')
self.assertEqual(self.loader.get_source(self.name), expected_source)
def raise_IOError(path):
raise IOError
self.loader.get_data = raise_IOError
with self.assertRaises(ImportError) as cm:
self.loader.get_source(self.name)
self.assertEqual(cm.exception.name, self.name)
def test_is_package(self):
# Properly detect when loading a package.
self.setUp(is_package=False)
self.assertFalse(self.loader.is_package(self.name))
self.setUp(is_package=True)
self.assertTrue(self.loader.is_package(self.name))
self.assertFalse(self.loader.is_package(self.name + '.__init__'))
def test_get_code(self):
# Verify the code object is created.
code_object = self.loader.get_code(self.name)
self.verify_code(code_object)
def test_load_module(self):
# Loading a module should set __name__, __loader__, __package__,
# __path__ (for packages), __file__, and __cached__.
# The module should also be put into sys.modules.
with util.uncache(self.name):
module = self.loader.load_module(self.name)
self.verify_module(module)
self.assertEqual(module.__path__, [os.path.dirname(self.path)])
self.assertIn(self.name, sys.modules)
def test_package_settings(self):
# __package__ needs to be set, while __path__ is set on if the module
# is a package.
# Testing the values for a package are covered by test_load_module.
self.setUp(is_package=False)
with util.uncache(self.name):
module = self.loader.load_module(self.name)
self.verify_module(module)
self.assertTrue(not hasattr(module, '__path__'))
def test_get_source_encoding(self):
# Source is considered encoded in UTF-8 by default unless otherwise
# specified by an encoding line.
source = "_ = 'ü'"
self.loader.source = source.encode('utf-8')
returned_source = self.loader.get_source(self.name)
self.assertEqual(returned_source, source)
source = "# coding: latin-1\n_ = ü"
self.loader.source = source.encode('latin-1')
returned_source = self.loader.get_source(self.name)
self.assertEqual(returned_source, source)
@unittest.skipIf(sys.dont_write_bytecode, "sys.dont_write_bytecode is true")
class SourceLoaderBytecodeTests(SourceLoaderTestHarness):
"""Test importlib.abc.SourceLoader's use of bytecode.
Source-only testing handled by SourceOnlyLoaderTests.
"""
loader_mock = SourceLoaderMock
def verify_code(self, code_object, *, bytecode_written=False):
super().verify_code(code_object)
if bytecode_written:
self.assertIn(self.cached, self.loader.written)
data = bytearray(imp.get_magic())
data.extend(importlib._w_long(self.loader.source_mtime))
data.extend(importlib._w_long(self.loader.source_size))
data.extend(marshal.dumps(code_object))
self.assertEqual(self.loader.written[self.cached], bytes(data))
def test_code_with_everything(self):
# When everything should work.
code_object = self.loader.get_code(self.name)
self.verify_code(code_object)
def test_no_bytecode(self):
# If no bytecode exists then move on to the source.
self.loader.bytecode_path = "<does not exist>"
# Sanity check
with self.assertRaises(IOError):
bytecode_path = imp.cache_from_source(self.path)
self.loader.get_data(bytecode_path)
code_object = self.loader.get_code(self.name)
self.verify_code(code_object, bytecode_written=True)
def test_code_bad_timestamp(self):
# Bytecode is only used when the timestamp matches the source EXACTLY.
for source_mtime in (0, 2):
assert source_mtime != self.loader.source_mtime
original = self.loader.source_mtime
self.loader.source_mtime = source_mtime
# If bytecode is used then EOFError would be raised by marshal.
self.loader.bytecode = self.loader.bytecode[8:]
code_object = self.loader.get_code(self.name)
self.verify_code(code_object, bytecode_written=True)
self.loader.source_mtime = original
def test_code_bad_magic(self):
# Skip over bytecode with a bad magic number.
self.setUp(magic=b'0000')
# If bytecode is used then EOFError would be raised by marshal.
self.loader.bytecode = self.loader.bytecode[8:]
code_object = self.loader.get_code(self.name)
self.verify_code(code_object, bytecode_written=True)
def test_dont_write_bytecode(self):
# Bytecode is not written if sys.dont_write_bytecode is true.
# Can assume it is false already thanks to the skipIf class decorator.
try:
sys.dont_write_bytecode = True
self.loader.bytecode_path = "<does not exist>"
code_object = self.loader.get_code(self.name)
self.assertNotIn(self.cached, self.loader.written)
finally:
sys.dont_write_bytecode = False
def test_no_set_data(self):
# If set_data is not defined, one can still read bytecode.
self.setUp(magic=b'0000')
original_set_data = self.loader.__class__.set_data
try:
del self.loader.__class__.set_data
code_object = self.loader.get_code(self.name)
self.verify_code(code_object)
finally:
self.loader.__class__.set_data = original_set_data
def test_set_data_raises_exceptions(self):
# Raising NotImplementedError or IOError is okay for set_data.
def raise_exception(exc):
def closure(*args, **kwargs):
raise exc
return closure
self.setUp(magic=b'0000')
self.loader.set_data = raise_exception(NotImplementedError)
code_object = self.loader.get_code(self.name)
self.verify_code(code_object)
class SourceLoaderGetSourceTests(unittest.TestCase):
"""Tests for importlib.abc.SourceLoader.get_source()."""
def test_default_encoding(self):
# Should have no problems with UTF-8 text.
name = 'mod'
mock = SourceOnlyLoaderMock('mod.file')
source = 'x = "ü"'
mock.source = source.encode('utf-8')
returned_source = mock.get_source(name)
self.assertEqual(returned_source, source)
def test_decoded_source(self):
# Decoding should work.
name = 'mod'
mock = SourceOnlyLoaderMock("mod.file")
source = "# coding: Latin-1\nx='ü'"
assert source.encode('latin-1') != source.encode('utf-8')
mock.source = source.encode('latin-1')
returned_source = mock.get_source(name)
self.assertEqual(returned_source, source)
def test_universal_newlines(self):
# PEP 302 says universal newlines should be used.
name = 'mod'
mock = SourceOnlyLoaderMock('mod.file')
source = "x = 42\r\ny = -13\r\n"
mock.source = source.encode('utf-8')
expect = io.IncrementalNewlineDecoder(None, True).decode(source)
self.assertEqual(mock.get_source(name), expect)
class AbstractMethodImplTests(unittest.TestCase):
"""Test the concrete abstractmethod implementations."""
class MetaPathFinder(abc.MetaPathFinder):
def find_module(self, fullname, path):
super().find_module(fullname, path)
class PathEntryFinder(abc.PathEntryFinder):
def find_module(self, _):
super().find_module(_)
def find_loader(self, _):
super().find_loader(_)
class Finder(abc.Finder):
def find_module(self, fullname, path):
super().find_module(fullname, path)
class Loader(abc.Loader):
def load_module(self, fullname):
super().load_module(fullname)
def module_repr(self, module):
super().module_repr(module)
class ResourceLoader(Loader, abc.ResourceLoader):
def get_data(self, _):
super().get_data(_)
class InspectLoader(Loader, abc.InspectLoader):
def is_package(self, _):
super().is_package(_)
def get_code(self, _):
super().get_code(_)
def get_source(self, _):
super().get_source(_)
class ExecutionLoader(InspectLoader, abc.ExecutionLoader):
def get_filename(self, _):
super().get_filename(_)
class SourceLoader(ResourceLoader, ExecutionLoader, abc.SourceLoader):
pass
class PyLoader(ResourceLoader, InspectLoader, abc.PyLoader):
def source_path(self, _):
super().source_path(_)
class PyPycLoader(PyLoader, abc.PyPycLoader):
def bytecode_path(self, _):
super().bytecode_path(_)
def source_mtime(self, _):
super().source_mtime(_)
def write_bytecode(self, _, _2):
super().write_bytecode(_, _2)
def raises_NotImplementedError(self, ins, *args):
for method_name in args:
method = getattr(ins, method_name)
arg_count = len(inspect.getfullargspec(method)[0]) - 1
args = [''] * arg_count
try:
method(*args)
except NotImplementedError:
pass
else:
msg = "{}.{} did not raise NotImplementedError"
self.fail(msg.format(ins.__class__.__name__, method_name))
def test_Loader(self):
self.raises_NotImplementedError(self.Loader(), 'load_module')
# XXX misplaced; should be somewhere else
def test_Finder(self):
self.raises_NotImplementedError(self.Finder(), 'find_module')
def test_ResourceLoader(self):
self.raises_NotImplementedError(self.ResourceLoader(), 'load_module',
'get_data')
def test_InspectLoader(self):
self.raises_NotImplementedError(self.InspectLoader(), 'load_module',
'is_package', 'get_code', 'get_source')
def test_ExecutionLoader(self):
self.raises_NotImplementedError(self.ExecutionLoader(), 'load_module',
'is_package', 'get_code', 'get_source',
'get_filename')
def test_SourceLoader(self):
ins = self.SourceLoader()
# Required abstractmethods.
self.raises_NotImplementedError(ins, 'get_filename', 'get_data')
# Optional abstractmethods.
self.raises_NotImplementedError(ins,'path_stats', 'set_data')
def test_PyLoader(self):
self.raises_NotImplementedError(self.PyLoader(), 'source_path',
'get_data', 'is_package')
def test_PyPycLoader(self):
self.raises_NotImplementedError(self.PyPycLoader(), 'source_path',
'source_mtime', 'bytecode_path',
'write_bytecode')
def test_main():
from test.support import run_unittest
run_unittest(PyLoaderTests, PyLoaderCompatTests,
PyLoaderInterfaceTests,
PyPycLoaderTests, PyPycLoaderInterfaceTests,
SkipWritingBytecodeTests, RegeneratedBytecodeTests,
BadBytecodeFailureTests, MissingPathsTests,
SourceOnlyLoaderTests,
SourceLoaderBytecodeTests,
SourceLoaderGetSourceTests,
AbstractMethodImplTests)
if __name__ == '__main__':
test_main()
| bsd-3-clause |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-2.7.2/Lib/distutils/version.py | 259 | 11433 | #
# distutils/version.py
#
# Implements multiple version numbering conventions for the
# Python Module Distribution Utilities.
#
# $Id$
#
"""Provides classes to represent module version numbers (one class for
each style of version numbering). There are currently two such classes
implemented: StrictVersion and LooseVersion.
Every version number class implements the following interface:
* the 'parse' method takes a string and parses it to some internal
representation; if the string is an invalid version number,
'parse' raises a ValueError exception
* the class constructor takes an optional string argument which,
if supplied, is passed to 'parse'
* __str__ reconstructs the string that was passed to 'parse' (or
an equivalent string -- ie. one that will generate an equivalent
version number instance)
* __repr__ generates Python code to recreate the version number instance
* __cmp__ compares the current instance with either another instance
of the same class or a string (which will be parsed to an instance
of the same class, thus must follow the same rules)
"""
import string, re
from types import StringType
class Version:
"""Abstract base class for version numbering classes. Just provides
constructor (__init__) and reproducer (__repr__), because those
seem to be the same for all version numbering classes.
"""
def __init__ (self, vstring=None):
if vstring:
self.parse(vstring)
def __repr__ (self):
return "%s ('%s')" % (self.__class__.__name__, str(self))
# Interface for version-number classes -- must be implemented
# by the following classes (the concrete ones -- Version should
# be treated as an abstract class).
# __init__ (string) - create and take same action as 'parse'
# (string parameter is optional)
# parse (string) - convert a string representation to whatever
# internal representation is appropriate for
# this style of version numbering
# __str__ (self) - convert back to a string; should be very similar
# (if not identical to) the string supplied to parse
# __repr__ (self) - generate Python code to recreate
# the instance
# __cmp__ (self, other) - compare two version numbers ('other' may
# be an unparsed version string, or another
# instance of your version class)
class StrictVersion (Version):
"""Version numbering for anal retentives and software idealists.
Implements the standard interface for version number classes as
described above. A version number consists of two or three
dot-separated numeric components, with an optional "pre-release" tag
on the end. The pre-release tag consists of the letter 'a' or 'b'
followed by a number. If the numeric components of two version
numbers are equal, then one with a pre-release tag will always
be deemed earlier (lesser) than one without.
The following are valid version numbers (shown in the order that
would be obtained by sorting according to the supplied cmp function):
0.4 0.4.0 (these two are equivalent)
0.4.1
0.5a1
0.5b3
0.5
0.9.6
1.0
1.0.4a3
1.0.4b1
1.0.4
The following are examples of invalid version numbers:
1
2.7.2.2
1.3.a4
1.3pl1
1.3c4
The rationale for this version numbering system will be explained
in the distutils documentation.
"""
version_re = re.compile(r'^(\d+) \. (\d+) (\. (\d+))? ([ab](\d+))?$',
re.VERBOSE)
def parse (self, vstring):
match = self.version_re.match(vstring)
if not match:
raise ValueError, "invalid version number '%s'" % vstring
(major, minor, patch, prerelease, prerelease_num) = \
match.group(1, 2, 4, 5, 6)
if patch:
self.version = tuple(map(string.atoi, [major, minor, patch]))
else:
self.version = tuple(map(string.atoi, [major, minor]) + [0])
if prerelease:
self.prerelease = (prerelease[0], string.atoi(prerelease_num))
else:
self.prerelease = None
def __str__ (self):
if self.version[2] == 0:
vstring = string.join(map(str, self.version[0:2]), '.')
else:
vstring = string.join(map(str, self.version), '.')
if self.prerelease:
vstring = vstring + self.prerelease[0] + str(self.prerelease[1])
return vstring
def __cmp__ (self, other):
if isinstance(other, StringType):
other = StrictVersion(other)
compare = cmp(self.version, other.version)
if (compare == 0): # have to compare prerelease
# case 1: neither has prerelease; they're equal
# case 2: self has prerelease, other doesn't; other is greater
# case 3: self doesn't have prerelease, other does: self is greater
# case 4: both have prerelease: must compare them!
if (not self.prerelease and not other.prerelease):
return 0
elif (self.prerelease and not other.prerelease):
return -1
elif (not self.prerelease and other.prerelease):
return 1
elif (self.prerelease and other.prerelease):
return cmp(self.prerelease, other.prerelease)
else: # numeric versions don't match --
return compare # prerelease stuff doesn't matter
# end class StrictVersion
# The rules according to Greg Stein:
# 1) a version number has 1 or more numbers separated by a period or by
# sequences of letters. If only periods, then these are compared
# left-to-right to determine an ordering.
# 2) sequences of letters are part of the tuple for comparison and are
# compared lexicographically
# 3) recognize the numeric components may have leading zeroes
#
# The LooseVersion class below implements these rules: a version number
# string is split up into a tuple of integer and string components, and
# comparison is a simple tuple comparison. This means that version
# numbers behave in a predictable and obvious way, but a way that might
# not necessarily be how people *want* version numbers to behave. There
# wouldn't be a problem if people could stick to purely numeric version
# numbers: just split on period and compare the numbers as tuples.
# However, people insist on putting letters into their version numbers;
# the most common purpose seems to be:
# - indicating a "pre-release" version
# ('alpha', 'beta', 'a', 'b', 'pre', 'p')
# - indicating a post-release patch ('p', 'pl', 'patch')
# but of course this can't cover all version number schemes, and there's
# no way to know what a programmer means without asking him.
#
# The problem is what to do with letters (and other non-numeric
# characters) in a version number. The current implementation does the
# obvious and predictable thing: keep them as strings and compare
# lexically within a tuple comparison. This has the desired effect if
# an appended letter sequence implies something "post-release":
# eg. "0.99" < "0.99pl14" < "1.0", and "5.001" < "5.001m" < "5.002".
#
# However, if letters in a version number imply a pre-release version,
# the "obvious" thing isn't correct. Eg. you would expect that
# "1.5.1" < "1.5.2a2" < "1.5.2", but under the tuple/lexical comparison
# implemented here, this just isn't so.
#
# Two possible solutions come to mind. The first is to tie the
# comparison algorithm to a particular set of semantic rules, as has
# been done in the StrictVersion class above. This works great as long
# as everyone can go along with bondage and discipline. Hopefully a
# (large) subset of Python module programmers will agree that the
# particular flavour of bondage and discipline provided by StrictVersion
# provides enough benefit to be worth using, and will submit their
# version numbering scheme to its domination. The free-thinking
# anarchists in the lot will never give in, though, and something needs
# to be done to accommodate them.
#
# Perhaps a "moderately strict" version class could be implemented that
# lets almost anything slide (syntactically), and makes some heuristic
# assumptions about non-digits in version number strings. This could
# sink into special-case-hell, though; if I was as talented and
# idiosyncratic as Larry Wall, I'd go ahead and implement a class that
# somehow knows that "1.2.1" < "1.2.2a2" < "1.2.2" < "1.2.2pl3", and is
# just as happy dealing with things like "2g6" and "1.13++". I don't
# think I'm smart enough to do it right though.
#
# In any case, I've coded the test suite for this module (see
# ../test/test_version.py) specifically to fail on things like comparing
# "1.2a2" and "1.2". That's not because the *code* is doing anything
# wrong, it's because the simple, obvious design doesn't match my
# complicated, hairy expectations for real-world version numbers. It
# would be a snap to fix the test suite to say, "Yep, LooseVersion does
# the Right Thing" (ie. the code matches the conception). But I'd rather
# have a conception that matches common notions about version numbers.
class LooseVersion (Version):
"""Version numbering for anarchists and software realists.
Implements the standard interface for version number classes as
described above. A version number consists of a series of numbers,
separated by either periods or strings of letters. When comparing
version numbers, the numeric components will be compared
numerically, and the alphabetic components lexically. The following
are all valid version numbers, in no particular order:
1.5.1
1.5.2b2
161
3.10a
8.02
3.4j
1996.07.12
3.2.pl0
3.1.1.6
2g6
11g
0.960923
2.2beta29
1.13++
5.5.kw
2.0b1pl0
In fact, there is no such thing as an invalid version number under
this scheme; the rules for comparison are simple and predictable,
but may not always give the results you want (for some definition
of "want").
"""
component_re = re.compile(r'(\d+ | [a-z]+ | \.)', re.VERBOSE)
def __init__ (self, vstring=None):
if vstring:
self.parse(vstring)
def parse (self, vstring):
# I've given up on thinking I can reconstruct the version string
# from the parsed tuple -- so I just store the string here for
# use by __str__
self.vstring = vstring
components = filter(lambda x: x and x != '.',
self.component_re.split(vstring))
for i in range(len(components)):
try:
components[i] = int(components[i])
except ValueError:
pass
self.version = components
def __str__ (self):
return self.vstring
def __repr__ (self):
return "LooseVersion ('%s')" % str(self)
def __cmp__ (self, other):
if isinstance(other, StringType):
other = LooseVersion(other)
return cmp(self.version, other.version)
# end class LooseVersion
| mit |
chugunovyar/factoryForBuild | env/lib/python2.7/site-packages/scipy/spatial/distance.py | 15 | 68774 | """
=====================================================
Distance computations (:mod:`scipy.spatial.distance`)
=====================================================
.. sectionauthor:: Damian Eads
Function Reference
------------------
Distance matrix computation from a collection of raw observation vectors
stored in a rectangular array.
.. autosummary::
:toctree: generated/
pdist -- pairwise distances between observation vectors.
cdist -- distances between two collections of observation vectors
squareform -- convert distance matrix to a condensed one and vice versa
Predicates for checking the validity of distance matrices, both
condensed and redundant. Also contained in this module are functions
for computing the number of observations in a distance matrix.
.. autosummary::
:toctree: generated/
is_valid_dm -- checks for a valid distance matrix
is_valid_y -- checks for a valid condensed distance matrix
num_obs_dm -- # of observations in a distance matrix
num_obs_y -- # of observations in a condensed distance matrix
Distance functions between two numeric vectors ``u`` and ``v``. Computing
distances over a large collection of vectors is inefficient for these
functions. Use ``pdist`` for this purpose.
.. autosummary::
:toctree: generated/
braycurtis -- the Bray-Curtis distance.
canberra -- the Canberra distance.
chebyshev -- the Chebyshev distance.
cityblock -- the Manhattan distance.
correlation -- the Correlation distance.
cosine -- the Cosine distance.
euclidean -- the Euclidean distance.
mahalanobis -- the Mahalanobis distance.
minkowski -- the Minkowski distance.
seuclidean -- the normalized Euclidean distance.
sqeuclidean -- the squared Euclidean distance.
wminkowski -- the weighted Minkowski distance.
Distance functions between two boolean vectors (representing sets) ``u`` and
``v``. As in the case of numerical vectors, ``pdist`` is more efficient for
computing the distances between all pairs.
.. autosummary::
:toctree: generated/
dice -- the Dice dissimilarity.
hamming -- the Hamming distance.
jaccard -- the Jaccard distance.
kulsinski -- the Kulsinski distance.
matching -- the matching dissimilarity.
rogerstanimoto -- the Rogers-Tanimoto dissimilarity.
russellrao -- the Russell-Rao dissimilarity.
sokalmichener -- the Sokal-Michener dissimilarity.
sokalsneath -- the Sokal-Sneath dissimilarity.
yule -- the Yule dissimilarity.
:func:`hamming` also operates over discrete numerical vectors.
"""
# Copyright (C) Damian Eads, 2007-2008. New BSD License.
from __future__ import division, print_function, absolute_import
__all__ = [
'braycurtis',
'canberra',
'cdist',
'chebyshev',
'cityblock',
'correlation',
'cosine',
'dice',
'euclidean',
'hamming',
'is_valid_dm',
'is_valid_y',
'jaccard',
'kulsinski',
'mahalanobis',
'matching',
'minkowski',
'num_obs_dm',
'num_obs_y',
'pdist',
'rogerstanimoto',
'russellrao',
'seuclidean',
'sokalmichener',
'sokalsneath',
'sqeuclidean',
'squareform',
'wminkowski',
'yule'
]
import warnings
import numpy as np
from scipy._lib.six import callable, string_types
from scipy._lib.six import xrange
from . import _distance_wrap
from ..linalg import norm
def _copy_array_if_base_present(a):
"""
Copies the array if its base points to a parent array.
"""
if a.base is not None:
return a.copy()
elif np.issubsctype(a, np.float32):
return np.array(a, dtype=np.double)
else:
return a
def _convert_to_bool(X):
if X.dtype != bool:
X = X.astype(bool)
if not X.flags.contiguous:
X = X.copy()
return X
def _convert_to_double(X):
if X.dtype != np.double:
X = X.astype(np.double)
if not X.flags.contiguous:
X = X.copy()
return X
def _validate_vector(u, dtype=None):
# XXX Is order='c' really necessary?
u = np.asarray(u, dtype=dtype, order='c').squeeze()
# Ensure values such as u=1 and u=[1] still return 1-D arrays.
u = np.atleast_1d(u)
if u.ndim > 1:
raise ValueError("Input vector should be 1-D.")
return u
def minkowski(u, v, p):
"""
Computes the Minkowski distance between two 1-D arrays.
The Minkowski distance between 1-D arrays `u` and `v`,
is defined as
.. math::
{||u-v||}_p = (\\sum{|u_i - v_i|^p})^{1/p}.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
p : int
The order of the norm of the difference :math:`{||u-v||}_p`.
Returns
-------
d : double
The Minkowski distance between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
if p < 1:
raise ValueError("p must be at least 1")
dist = norm(u - v, ord=p)
return dist
def wminkowski(u, v, p, w):
"""
Computes the weighted Minkowski distance between two 1-D arrays.
The weighted Minkowski distance between `u` and `v`, defined as
.. math::
\\left(\\sum{(|w_i (u_i - v_i)|^p)}\\right)^{1/p}.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
p : int
The order of the norm of the difference :math:`{||u-v||}_p`.
w : (N,) array_like
The weight vector.
Returns
-------
wminkowski : double
The weighted Minkowski distance between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
w = _validate_vector(w)
if p < 1:
raise ValueError("p must be at least 1")
dist = norm(w * (u - v), ord=p)
return dist
def euclidean(u, v):
"""
Computes the Euclidean distance between two 1-D arrays.
The Euclidean distance between 1-D arrays `u` and `v`, is defined as
.. math::
{||u-v||}_2
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
Returns
-------
euclidean : double
The Euclidean distance between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
dist = norm(u - v)
return dist
def sqeuclidean(u, v):
"""
Computes the squared Euclidean distance between two 1-D arrays.
The squared Euclidean distance between `u` and `v` is defined as
.. math::
{||u-v||}_2^2.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
Returns
-------
sqeuclidean : double
The squared Euclidean distance between vectors `u` and `v`.
"""
# Preserve float dtypes, but convert everything else to np.float64
# for stability.
utype, vtype = None, None
if not (hasattr(u, "dtype") and np.issubdtype(u.dtype, np.inexact)):
utype = np.float64
if not (hasattr(v, "dtype") and np.issubdtype(v.dtype, np.inexact)):
vtype = np.float64
u = _validate_vector(u, dtype=utype)
v = _validate_vector(v, dtype=vtype)
u_v = u - v
return np.dot(u_v, u_v)
def cosine(u, v):
"""
Computes the Cosine distance between 1-D arrays.
The Cosine distance between `u` and `v`, is defined as
.. math::
1 - \\frac{u \\cdot v}
{||u||_2 ||v||_2}.
where :math:`u \\cdot v` is the dot product of :math:`u` and
:math:`v`.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
Returns
-------
cosine : double
The Cosine distance between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
dist = 1.0 - np.dot(u, v) / (norm(u) * norm(v))
return dist
def correlation(u, v):
"""
Computes the correlation distance between two 1-D arrays.
The correlation distance between `u` and `v`, is
defined as
.. math::
1 - \\frac{(u - \\bar{u}) \\cdot (v - \\bar{v})}
{{||(u - \\bar{u})||}_2 {||(v - \\bar{v})||}_2}
where :math:`\\bar{u}` is the mean of the elements of `u`
and :math:`x \\cdot y` is the dot product of :math:`x` and :math:`y`.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
Returns
-------
correlation : double
The correlation distance between 1-D array `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
umu = u.mean()
vmu = v.mean()
um = u - umu
vm = v - vmu
dist = 1.0 - np.dot(um, vm) / (norm(um) * norm(vm))
return dist
def hamming(u, v):
"""
Computes the Hamming distance between two 1-D arrays.
The Hamming distance between 1-D arrays `u` and `v`, is simply the
proportion of disagreeing components in `u` and `v`. If `u` and `v` are
boolean vectors, the Hamming distance is
.. math::
\\frac{c_{01} + c_{10}}{n}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n`.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
Returns
-------
hamming : double
The Hamming distance between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
if u.shape != v.shape:
raise ValueError('The 1d arrays must have equal lengths.')
return (u != v).mean()
def jaccard(u, v):
"""
Computes the Jaccard-Needham dissimilarity between two boolean 1-D arrays.
The Jaccard-Needham dissimilarity between 1-D boolean arrays `u` and `v`,
is defined as
.. math::
\\frac{c_{TF} + c_{FT}}
{c_{TT} + c_{FT} + c_{TF}}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
Returns
-------
jaccard : double
The Jaccard distance between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
dist = (np.double(np.bitwise_and((u != v),
np.bitwise_or(u != 0, v != 0)).sum())
/ np.double(np.bitwise_or(u != 0, v != 0).sum()))
return dist
def kulsinski(u, v):
"""
Computes the Kulsinski dissimilarity between two boolean 1-D arrays.
The Kulsinski dissimilarity between two boolean 1-D arrays `u` and `v`,
is defined as
.. math::
\\frac{c_{TF} + c_{FT} - c_{TT} + n}
{c_{FT} + c_{TF} + n}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
Returns
-------
kulsinski : double
The Kulsinski distance between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
n = float(len(u))
(nff, nft, ntf, ntt) = _nbool_correspond_all(u, v)
return (ntf + nft - ntt + n) / (ntf + nft + n)
def seuclidean(u, v, V):
"""
Returns the standardized Euclidean distance between two 1-D arrays.
The standardized Euclidean distance between `u` and `v`.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
V : (N,) array_like
`V` is an 1-D array of component variances. It is usually computed
among a larger collection vectors.
Returns
-------
seuclidean : double
The standardized Euclidean distance between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
V = _validate_vector(V, dtype=np.float64)
if V.shape[0] != u.shape[0] or u.shape[0] != v.shape[0]:
raise TypeError('V must be a 1-D array of the same dimension '
'as u and v.')
return np.sqrt(((u - v) ** 2 / V).sum())
def cityblock(u, v):
"""
Computes the City Block (Manhattan) distance.
Computes the Manhattan distance between two 1-D arrays `u` and `v`,
which is defined as
.. math::
\\sum_i {\\left| u_i - v_i \\right|}.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
Returns
-------
cityblock : double
The City Block (Manhattan) distance between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
return abs(u - v).sum()
def mahalanobis(u, v, VI):
"""
Computes the Mahalanobis distance between two 1-D arrays.
The Mahalanobis distance between 1-D arrays `u` and `v`, is defined as
.. math::
\\sqrt{ (u-v) V^{-1} (u-v)^T }
where ``V`` is the covariance matrix. Note that the argument `VI`
is the inverse of ``V``.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
VI : ndarray
The inverse of the covariance matrix.
Returns
-------
mahalanobis : double
The Mahalanobis distance between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
VI = np.atleast_2d(VI)
delta = u - v
m = np.dot(np.dot(delta, VI), delta)
return np.sqrt(m)
def chebyshev(u, v):
"""
Computes the Chebyshev distance.
Computes the Chebyshev distance between two 1-D arrays `u` and `v`,
which is defined as
.. math::
\\max_i {|u_i-v_i|}.
Parameters
----------
u : (N,) array_like
Input vector.
v : (N,) array_like
Input vector.
Returns
-------
chebyshev : double
The Chebyshev distance between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
return max(abs(u - v))
def braycurtis(u, v):
"""
Computes the Bray-Curtis distance between two 1-D arrays.
Bray-Curtis distance is defined as
.. math::
\\sum{|u_i-v_i|} / \\sum{|u_i+v_i|}
The Bray-Curtis distance is in the range [0, 1] if all coordinates are
positive, and is undefined if the inputs are of length zero.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
Returns
-------
braycurtis : double
The Bray-Curtis distance between 1-D arrays `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v, dtype=np.float64)
return abs(u - v).sum() / abs(u + v).sum()
def canberra(u, v):
"""
Computes the Canberra distance between two 1-D arrays.
The Canberra distance is defined as
.. math::
d(u,v) = \\sum_i \\frac{|u_i-v_i|}
{|u_i|+|v_i|}.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
Returns
-------
canberra : double
The Canberra distance between vectors `u` and `v`.
Notes
-----
When `u[i]` and `v[i]` are 0 for given i, then the fraction 0/0 = 0 is
used in the calculation.
"""
u = _validate_vector(u)
v = _validate_vector(v, dtype=np.float64)
olderr = np.seterr(invalid='ignore')
try:
d = np.nansum(abs(u - v) / (abs(u) + abs(v)))
finally:
np.seterr(**olderr)
return d
def _nbool_correspond_all(u, v):
if u.dtype != v.dtype:
raise TypeError("Arrays being compared must be of the same data type.")
if u.dtype == int or u.dtype == np.float_ or u.dtype == np.double:
not_u = 1.0 - u
not_v = 1.0 - v
nff = (not_u * not_v).sum()
nft = (not_u * v).sum()
ntf = (u * not_v).sum()
ntt = (u * v).sum()
elif u.dtype == bool:
not_u = ~u
not_v = ~v
nff = (not_u & not_v).sum()
nft = (not_u & v).sum()
ntf = (u & not_v).sum()
ntt = (u & v).sum()
else:
raise TypeError("Arrays being compared have unknown type.")
return (nff, nft, ntf, ntt)
def _nbool_correspond_ft_tf(u, v):
if u.dtype == int or u.dtype == np.float_ or u.dtype == np.double:
not_u = 1.0 - u
not_v = 1.0 - v
nft = (not_u * v).sum()
ntf = (u * not_v).sum()
else:
not_u = ~u
not_v = ~v
nft = (not_u & v).sum()
ntf = (u & not_v).sum()
return (nft, ntf)
def yule(u, v):
"""
Computes the Yule dissimilarity between two boolean 1-D arrays.
The Yule dissimilarity is defined as
.. math::
\\frac{R}{c_{TT} * c_{FF} + \\frac{R}{2}}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n` and :math:`R = 2.0 * c_{TF} * c_{FT}`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
Returns
-------
yule : double
The Yule dissimilarity between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
(nff, nft, ntf, ntt) = _nbool_correspond_all(u, v)
return float(2.0 * ntf * nft) / float(ntt * nff + ntf * nft)
def matching(u, v):
"""
Computes the Hamming distance between two boolean 1-D arrays.
This is a deprecated synonym for :func:`hamming`.
"""
return hamming(u, v)
def dice(u, v):
"""
Computes the Dice dissimilarity between two boolean 1-D arrays.
The Dice dissimilarity between `u` and `v`, is
.. math::
\\frac{c_{TF} + c_{FT}}
{2c_{TT} + c_{FT} + c_{TF}}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n`.
Parameters
----------
u : (N,) ndarray, bool
Input 1-D array.
v : (N,) ndarray, bool
Input 1-D array.
Returns
-------
dice : double
The Dice dissimilarity between 1-D arrays `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
if u.dtype == bool:
ntt = (u & v).sum()
else:
ntt = (u * v).sum()
(nft, ntf) = _nbool_correspond_ft_tf(u, v)
return float(ntf + nft) / float(2.0 * ntt + ntf + nft)
def rogerstanimoto(u, v):
"""
Computes the Rogers-Tanimoto dissimilarity between two boolean 1-D arrays.
The Rogers-Tanimoto dissimilarity between two boolean 1-D arrays
`u` and `v`, is defined as
.. math::
\\frac{R}
{c_{TT} + c_{FF} + R}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n` and :math:`R = 2(c_{TF} + c_{FT})`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
Returns
-------
rogerstanimoto : double
The Rogers-Tanimoto dissimilarity between vectors
`u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
(nff, nft, ntf, ntt) = _nbool_correspond_all(u, v)
return float(2.0 * (ntf + nft)) / float(ntt + nff + (2.0 * (ntf + nft)))
def russellrao(u, v):
"""
Computes the Russell-Rao dissimilarity between two boolean 1-D arrays.
The Russell-Rao dissimilarity between two boolean 1-D arrays, `u` and
`v`, is defined as
.. math::
\\frac{n - c_{TT}}
{n}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
Returns
-------
russellrao : double
The Russell-Rao dissimilarity between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
if u.dtype == bool:
ntt = (u & v).sum()
else:
ntt = (u * v).sum()
return float(len(u) - ntt) / float(len(u))
def sokalmichener(u, v):
"""
Computes the Sokal-Michener dissimilarity between two boolean 1-D arrays.
The Sokal-Michener dissimilarity between boolean 1-D arrays `u` and `v`,
is defined as
.. math::
\\frac{R}
{S + R}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n`, :math:`R = 2 * (c_{TF} + c_{FT})` and
:math:`S = c_{FF} + c_{TT}`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
Returns
-------
sokalmichener : double
The Sokal-Michener dissimilarity between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
if u.dtype == bool:
ntt = (u & v).sum()
nff = (~u & ~v).sum()
else:
ntt = (u * v).sum()
nff = ((1.0 - u) * (1.0 - v)).sum()
(nft, ntf) = _nbool_correspond_ft_tf(u, v)
return float(2.0 * (ntf + nft)) / float(ntt + nff + 2.0 * (ntf + nft))
def sokalsneath(u, v):
"""
Computes the Sokal-Sneath dissimilarity between two boolean 1-D arrays.
The Sokal-Sneath dissimilarity between `u` and `v`,
.. math::
\\frac{R}
{c_{TT} + R}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n` and :math:`R = 2(c_{TF} + c_{FT})`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
Returns
-------
sokalsneath : double
The Sokal-Sneath dissimilarity between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
if u.dtype == bool:
ntt = (u & v).sum()
else:
ntt = (u * v).sum()
(nft, ntf) = _nbool_correspond_ft_tf(u, v)
denom = ntt + 2.0 * (ntf + nft)
if denom == 0:
raise ValueError('Sokal-Sneath dissimilarity is not defined for '
'vectors that are entirely false.')
return float(2.0 * (ntf + nft)) / denom
# Registry of "simple" distance metrics' pdist and cdist implementations,
# meaning the ones that accept one dtype and have no additional arguments.
_SIMPLE_CDIST = {}
_SIMPLE_PDIST = {}
for names, wrap_name in [
(['braycurtis'], "bray_curtis"),
(['canberra'], "canberra"),
(['chebychev', 'chebyshev', 'cheby', 'cheb', 'ch'], "chebyshev"),
(["cityblock", "cblock", "cb", "c"], "city_block"),
(["euclidean", "euclid", "eu", "e"], "euclidean"),
(["sqeuclidean", "sqe", "sqeuclid"], "sqeuclidean"),
]:
cdist_fn = getattr(_distance_wrap, "cdist_%s_wrap" % wrap_name)
pdist_fn = getattr(_distance_wrap, "pdist_%s_wrap" % wrap_name)
for name in names:
_SIMPLE_CDIST[name] = _convert_to_double, cdist_fn
_SIMPLE_PDIST[name] = _convert_to_double, pdist_fn
for name in ["dice", "kulsinski", "matching", "rogerstanimoto", "russellrao",
"sokalmichener", "sokalsneath", "yule"]:
wrap_name = "hamming" if name == "matching" else name
cdist_fn = getattr(_distance_wrap, "cdist_%s_bool_wrap" % wrap_name)
_SIMPLE_CDIST[name] = _convert_to_bool, cdist_fn
pdist_fn = getattr(_distance_wrap, "pdist_%s_bool_wrap" % wrap_name)
_SIMPLE_PDIST[name] = _convert_to_bool, pdist_fn
def pdist(X, metric='euclidean', p=2, w=None, V=None, VI=None):
"""
Pairwise distances between observations in n-dimensional space.
The following are common calling conventions.
1. ``Y = pdist(X, 'euclidean')``
Computes the distance between m points using Euclidean distance
(2-norm) as the distance metric between the points. The points
are arranged as m n-dimensional row vectors in the matrix X.
2. ``Y = pdist(X, 'minkowski', p)``
Computes the distances using the Minkowski distance
:math:`||u-v||_p` (p-norm) where :math:`p \\geq 1`.
3. ``Y = pdist(X, 'cityblock')``
Computes the city block or Manhattan distance between the
points.
4. ``Y = pdist(X, 'seuclidean', V=None)``
Computes the standardized Euclidean distance. The standardized
Euclidean distance between two n-vectors ``u`` and ``v`` is
.. math::
\\sqrt{\\sum {(u_i-v_i)^2 / V[x_i]}}
V is the variance vector; V[i] is the variance computed over all
the i'th components of the points. If not passed, it is
automatically computed.
5. ``Y = pdist(X, 'sqeuclidean')``
Computes the squared Euclidean distance :math:`||u-v||_2^2` between
the vectors.
6. ``Y = pdist(X, 'cosine')``
Computes the cosine distance between vectors u and v,
.. math::
1 - \\frac{u \\cdot v}
{{||u||}_2 {||v||}_2}
where :math:`||*||_2` is the 2-norm of its argument ``*``, and
:math:`u \\cdot v` is the dot product of ``u`` and ``v``.
7. ``Y = pdist(X, 'correlation')``
Computes the correlation distance between vectors u and v. This is
.. math::
1 - \\frac{(u - \\bar{u}) \\cdot (v - \\bar{v})}
{{||(u - \\bar{u})||}_2 {||(v - \\bar{v})||}_2}
where :math:`\\bar{v}` is the mean of the elements of vector v,
and :math:`x \\cdot y` is the dot product of :math:`x` and :math:`y`.
8. ``Y = pdist(X, 'hamming')``
Computes the normalized Hamming distance, or the proportion of
those vector elements between two n-vectors ``u`` and ``v``
which disagree. To save memory, the matrix ``X`` can be of type
boolean.
9. ``Y = pdist(X, 'jaccard')``
Computes the Jaccard distance between the points. Given two
vectors, ``u`` and ``v``, the Jaccard distance is the
proportion of those elements ``u[i]`` and ``v[i]`` that
disagree.
10. ``Y = pdist(X, 'chebyshev')``
Computes the Chebyshev distance between the points. The
Chebyshev distance between two n-vectors ``u`` and ``v`` is the
maximum norm-1 distance between their respective elements. More
precisely, the distance is given by
.. math::
d(u,v) = \\max_i {|u_i-v_i|}
11. ``Y = pdist(X, 'canberra')``
Computes the Canberra distance between the points. The
Canberra distance between two points ``u`` and ``v`` is
.. math::
d(u,v) = \\sum_i \\frac{|u_i-v_i|}
{|u_i|+|v_i|}
12. ``Y = pdist(X, 'braycurtis')``
Computes the Bray-Curtis distance between the points. The
Bray-Curtis distance between two points ``u`` and ``v`` is
.. math::
d(u,v) = \\frac{\\sum_i {u_i-v_i}}
{\\sum_i {u_i+v_i}}
13. ``Y = pdist(X, 'mahalanobis', VI=None)``
Computes the Mahalanobis distance between the points. The
Mahalanobis distance between two points ``u`` and ``v`` is
:math:`(u-v)(1/V)(u-v)^T` where :math:`(1/V)` (the ``VI``
variable) is the inverse covariance. If ``VI`` is not None,
``VI`` will be used as the inverse covariance matrix.
14. ``Y = pdist(X, 'yule')``
Computes the Yule distance between each pair of boolean
vectors. (see yule function documentation)
15. ``Y = pdist(X, 'matching')``
Synonym for 'hamming'.
16. ``Y = pdist(X, 'dice')``
Computes the Dice distance between each pair of boolean
vectors. (see dice function documentation)
17. ``Y = pdist(X, 'kulsinski')``
Computes the Kulsinski distance between each pair of
boolean vectors. (see kulsinski function documentation)
18. ``Y = pdist(X, 'rogerstanimoto')``
Computes the Rogers-Tanimoto distance between each pair of
boolean vectors. (see rogerstanimoto function documentation)
19. ``Y = pdist(X, 'russellrao')``
Computes the Russell-Rao distance between each pair of
boolean vectors. (see russellrao function documentation)
20. ``Y = pdist(X, 'sokalmichener')``
Computes the Sokal-Michener distance between each pair of
boolean vectors. (see sokalmichener function documentation)
21. ``Y = pdist(X, 'sokalsneath')``
Computes the Sokal-Sneath distance between each pair of
boolean vectors. (see sokalsneath function documentation)
22. ``Y = pdist(X, 'wminkowski')``
Computes the weighted Minkowski distance between each pair of
vectors. (see wminkowski function documentation)
23. ``Y = pdist(X, f)``
Computes the distance between all pairs of vectors in X
using the user supplied 2-arity function f. For example,
Euclidean distance between the vectors could be computed
as follows::
dm = pdist(X, lambda u, v: np.sqrt(((u-v)**2).sum()))
Note that you should avoid passing a reference to one of
the distance functions defined in this library. For example,::
dm = pdist(X, sokalsneath)
would calculate the pair-wise distances between the vectors in
X using the Python function sokalsneath. This would result in
sokalsneath being called :math:`{n \\choose 2}` times, which
is inefficient. Instead, the optimized C version is more
efficient, and we call it using the following syntax.::
dm = pdist(X, 'sokalsneath')
Parameters
----------
X : ndarray
An m by n array of m original observations in an
n-dimensional space.
metric : str or function, optional
The distance metric to use. The distance function can
be 'braycurtis', 'canberra', 'chebyshev', 'cityblock',
'correlation', 'cosine', 'dice', 'euclidean', 'hamming',
'jaccard', 'kulsinski', 'mahalanobis', 'matching',
'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule'.
w : ndarray, optional
The weight vector (for weighted Minkowski).
p : double, optional
The p-norm to apply (for Minkowski, weighted and unweighted)
V : ndarray, optional
The variance vector (for standardized Euclidean).
VI : ndarray, optional
The inverse of the covariance matrix (for Mahalanobis).
Returns
-------
Y : ndarray
Returns a condensed distance matrix Y. For
each :math:`i` and :math:`j` (where :math:`i<j<n`), the
metric ``dist(u=X[i], v=X[j])`` is computed and stored in entry ``ij``.
See Also
--------
squareform : converts between condensed distance matrices and
square distance matrices.
Notes
-----
See ``squareform`` for information on how to calculate the index of
this entry or to convert the condensed distance matrix to a
redundant square matrix.
"""
# You can also call this as:
# Y = pdist(X, 'test_abc')
# where 'abc' is the metric being tested. This computes the distance
# between all pairs of vectors in X using the distance metric 'abc' but with
# a more succinct, verifiable, but less efficient implementation.
X = np.asarray(X, order='c')
# The C code doesn't do striding.
X = _copy_array_if_base_present(X)
s = X.shape
if len(s) != 2:
raise ValueError('A 2-dimensional array must be passed.')
m, n = s
dm = np.zeros((m * (m - 1)) // 2, dtype=np.double)
wmink_names = ['wminkowski', 'wmi', 'wm', 'wpnorm']
if w is None and (metric == wminkowski or metric in wmink_names):
raise ValueError('weighted minkowski requires a weight '
'vector `w` to be given.')
if callable(metric):
if metric == minkowski:
def dfun(u, v):
return minkowski(u, v, p)
elif metric == wminkowski:
def dfun(u, v):
return wminkowski(u, v, p, w)
elif metric == seuclidean:
def dfun(u, v):
return seuclidean(u, v, V)
elif metric == mahalanobis:
def dfun(u, v):
return mahalanobis(u, v, V)
else:
dfun = metric
X = _convert_to_double(X)
k = 0
for i in xrange(0, m - 1):
for j in xrange(i + 1, m):
dm[k] = dfun(X[i], X[j])
k = k + 1
elif isinstance(metric, string_types):
mstr = metric.lower()
try:
validate, pdist_fn = _SIMPLE_PDIST[mstr]
X = validate(X)
pdist_fn(X, dm)
return dm
except KeyError:
pass
if mstr in ['hamming', 'hamm', 'ha', 'h']:
if X.dtype == bool:
X = _convert_to_bool(X)
_distance_wrap.pdist_hamming_bool_wrap(X, dm)
else:
X = _convert_to_double(X)
_distance_wrap.pdist_hamming_wrap(X, dm)
elif mstr in ['jaccard', 'jacc', 'ja', 'j']:
if X.dtype == bool:
X = _convert_to_bool(X)
_distance_wrap.pdist_jaccard_bool_wrap(X, dm)
else:
X = _convert_to_double(X)
_distance_wrap.pdist_jaccard_wrap(X, dm)
elif mstr in ['minkowski', 'mi', 'm']:
X = _convert_to_double(X)
_distance_wrap.pdist_minkowski_wrap(X, dm, p)
elif mstr in wmink_names:
X = _convert_to_double(X)
w = _convert_to_double(np.asarray(w))
_distance_wrap.pdist_weighted_minkowski_wrap(X, dm, p, w)
elif mstr in ['seuclidean', 'se', 's']:
X = _convert_to_double(X)
if V is not None:
V = np.asarray(V, order='c')
if V.dtype != np.double:
raise TypeError('Variance vector V must contain doubles.')
if len(V.shape) != 1:
raise ValueError('Variance vector V must '
'be one-dimensional.')
if V.shape[0] != n:
raise ValueError('Variance vector V must be of the same '
'dimension as the vectors on which the distances '
'are computed.')
# The C code doesn't do striding.
VV = _copy_array_if_base_present(_convert_to_double(V))
else:
VV = np.var(X, axis=0, ddof=1)
_distance_wrap.pdist_seuclidean_wrap(X, VV, dm)
elif mstr in ['cosine', 'cos']:
X = _convert_to_double(X)
norms = _row_norms(X)
_distance_wrap.pdist_cosine_wrap(X, dm, norms)
elif mstr in ['old_cosine', 'old_cos']:
X = _convert_to_double(X)
norms = _row_norms(X)
nV = norms.reshape(m, 1)
# The numerator u * v
nm = np.dot(X, X.T)
# The denom. ||u||*||v||
de = np.dot(nV, nV.T)
dm = 1.0 - (nm / de)
dm[xrange(0, m), xrange(0, m)] = 0.0
dm = squareform(dm)
elif mstr in ['correlation', 'co']:
X = _convert_to_double(X)
X2 = X - X.mean(1)[:, np.newaxis]
norms = _row_norms(X2)
_distance_wrap.pdist_cosine_wrap(X2, dm, norms)
elif mstr in ['mahalanobis', 'mahal', 'mah']:
X = _convert_to_double(X)
if VI is not None:
VI = _convert_to_double(np.asarray(VI, order='c'))
VI = _copy_array_if_base_present(VI)
else:
if m <= n:
# There are fewer observations than the dimension of
# the observations.
raise ValueError("The number of observations (%d) is too "
"small; the covariance matrix is "
"singular. For observations with %d "
"dimensions, at least %d observations "
"are required." % (m, n, n + 1))
V = np.atleast_2d(np.cov(X.T))
VI = _convert_to_double(np.linalg.inv(V).T.copy())
# (u-v)V^(-1)(u-v)^T
_distance_wrap.pdist_mahalanobis_wrap(X, VI, dm)
elif metric == 'test_euclidean':
dm = pdist(X, euclidean)
elif metric == 'test_sqeuclidean':
if V is None:
V = np.var(X, axis=0, ddof=1)
else:
V = np.asarray(V, order='c')
dm = pdist(X, lambda u, v: seuclidean(u, v, V))
elif metric == 'test_braycurtis':
dm = pdist(X, braycurtis)
elif metric == 'test_mahalanobis':
if VI is None:
V = np.cov(X.T)
VI = np.linalg.inv(V)
else:
VI = np.asarray(VI, order='c')
VI = _copy_array_if_base_present(VI)
# (u-v)V^(-1)(u-v)^T
dm = pdist(X, (lambda u, v: mahalanobis(u, v, VI)))
elif metric == 'test_canberra':
dm = pdist(X, canberra)
elif metric == 'test_cityblock':
dm = pdist(X, cityblock)
elif metric == 'test_minkowski':
dm = pdist(X, minkowski, p=p)
elif metric == 'test_wminkowski':
dm = pdist(X, wminkowski, p=p, w=w)
elif metric == 'test_cosine':
dm = pdist(X, cosine)
elif metric == 'test_correlation':
dm = pdist(X, correlation)
elif metric == 'test_hamming':
dm = pdist(X, hamming)
elif metric == 'test_jaccard':
dm = pdist(X, jaccard)
elif metric == 'test_chebyshev' or metric == 'test_chebychev':
dm = pdist(X, chebyshev)
elif metric == 'test_yule':
dm = pdist(X, yule)
elif metric == 'test_matching':
dm = pdist(X, matching)
elif metric == 'test_dice':
dm = pdist(X, dice)
elif metric == 'test_kulsinski':
dm = pdist(X, kulsinski)
elif metric == 'test_rogerstanimoto':
dm = pdist(X, rogerstanimoto)
elif metric == 'test_russellrao':
dm = pdist(X, russellrao)
elif metric == 'test_sokalsneath':
dm = pdist(X, sokalsneath)
elif metric == 'test_sokalmichener':
dm = pdist(X, sokalmichener)
else:
raise ValueError('Unknown Distance Metric: %s' % mstr)
else:
raise TypeError('2nd argument metric must be a string identifier '
'or a function.')
return dm
def squareform(X, force="no", checks=True):
"""
Converts a vector-form distance vector to a square-form distance
matrix, and vice-versa.
Parameters
----------
X : ndarray
Either a condensed or redundant distance matrix.
force : str, optional
As with MATLAB(TM), if force is equal to 'tovector' or 'tomatrix',
the input will be treated as a distance matrix or distance vector
respectively.
checks : bool, optional
If `checks` is set to False, no checks will be made for matrix
symmetry nor zero diagonals. This is useful if it is known that
``X - X.T1`` is small and ``diag(X)`` is close to zero.
These values are ignored any way so they do not disrupt the
squareform transformation.
Returns
-------
Y : ndarray
If a condensed distance matrix is passed, a redundant one is
returned, or if a redundant one is passed, a condensed distance
matrix is returned.
Notes
-----
1. v = squareform(X)
Given a square d-by-d symmetric distance matrix X,
``v=squareform(X)`` returns a ``d * (d-1) / 2`` (or
`${n \\choose 2}$`) sized vector v.
v[{n \\choose 2}-{n-i \\choose 2} + (j-i-1)] is the distance
between points i and j. If X is non-square or asymmetric, an error
is returned.
2. X = squareform(v)
Given a d*(d-1)/2 sized v for some integer d>=2 encoding distances
as described, X=squareform(v) returns a d by d distance matrix X. The
X[i, j] and X[j, i] values are set to
v[{n \\choose 2}-{n-i \\choose 2} + (j-i-1)] and all
diagonal elements are zero.
"""
X = _convert_to_double(np.asarray(X, order='c'))
s = X.shape
if force.lower() == 'tomatrix':
if len(s) != 1:
raise ValueError("Forcing 'tomatrix' but input X is not a "
"distance vector.")
elif force.lower() == 'tovector':
if len(s) != 2:
raise ValueError("Forcing 'tovector' but input X is not a "
"distance matrix.")
# X = squareform(v)
if len(s) == 1:
if X.shape[0] == 0:
return np.zeros((1, 1), dtype=np.double)
# Grab the closest value to the square root of the number
# of elements times 2 to see if the number of elements
# is indeed a binomial coefficient.
d = int(np.ceil(np.sqrt(X.shape[0] * 2)))
# Check that v is of valid dimensions.
if d * (d - 1) / 2 != int(s[0]):
raise ValueError('Incompatible vector size. It must be a binomial '
'coefficient n choose 2 for some integer n >= 2.')
# Allocate memory for the distance matrix.
M = np.zeros((d, d), dtype=np.double)
# Since the C code does not support striding using strides.
# The dimensions are used instead.
X = _copy_array_if_base_present(X)
# Fill in the values of the distance matrix.
_distance_wrap.to_squareform_from_vector_wrap(M, X)
# Return the distance matrix.
return M
elif len(s) == 2:
if s[0] != s[1]:
raise ValueError('The matrix argument must be square.')
if checks:
is_valid_dm(X, throw=True, name='X')
# One-side of the dimensions is set here.
d = s[0]
if d <= 1:
return np.array([], dtype=np.double)
# Create a vector.
v = np.zeros((d * (d - 1)) // 2, dtype=np.double)
# Since the C code does not support striding using strides.
# The dimensions are used instead.
X = _copy_array_if_base_present(X)
# Convert the vector to squareform.
_distance_wrap.to_vector_from_squareform_wrap(X, v)
return v
else:
raise ValueError(('The first argument must be one or two dimensional '
'array. A %d-dimensional array is not '
'permitted') % len(s))
def is_valid_dm(D, tol=0.0, throw=False, name="D", warning=False):
"""
Returns True if input array is a valid distance matrix.
Distance matrices must be 2-dimensional numpy arrays containing
doubles. They must have a zero-diagonal, and they must be symmetric.
Parameters
----------
D : ndarray
The candidate object to test for validity.
tol : float, optional
The distance matrix should be symmetric. `tol` is the maximum
difference between entries ``ij`` and ``ji`` for the distance
metric to be considered symmetric.
throw : bool, optional
An exception is thrown if the distance matrix passed is not valid.
name : str, optional
The name of the variable to checked. This is useful if
throw is set to True so the offending variable can be identified
in the exception message when an exception is thrown.
warning : bool, optional
Instead of throwing an exception, a warning message is
raised.
Returns
-------
valid : bool
True if the variable `D` passed is a valid distance matrix.
Notes
-----
Small numerical differences in `D` and `D.T` and non-zeroness of
the diagonal are ignored if they are within the tolerance specified
by `tol`.
"""
D = np.asarray(D, order='c')
valid = True
try:
s = D.shape
if D.dtype != np.double:
if name:
raise TypeError(('Distance matrix \'%s\' must contain doubles '
'(double).') % name)
else:
raise TypeError('Distance matrix must contain doubles '
'(double).')
if len(D.shape) != 2:
if name:
raise ValueError(('Distance matrix \'%s\' must have shape=2 '
'(i.e. be two-dimensional).') % name)
else:
raise ValueError('Distance matrix must have shape=2 (i.e. '
'be two-dimensional).')
if tol == 0.0:
if not (D == D.T).all():
if name:
raise ValueError(('Distance matrix \'%s\' must be '
'symmetric.') % name)
else:
raise ValueError('Distance matrix must be symmetric.')
if not (D[xrange(0, s[0]), xrange(0, s[0])] == 0).all():
if name:
raise ValueError(('Distance matrix \'%s\' diagonal must '
'be zero.') % name)
else:
raise ValueError('Distance matrix diagonal must be zero.')
else:
if not (D - D.T <= tol).all():
if name:
raise ValueError(('Distance matrix \'%s\' must be '
'symmetric within tolerance %5.5f.')
% (name, tol))
else:
raise ValueError('Distance matrix must be symmetric within'
' tolerance %5.5f.' % tol)
if not (D[xrange(0, s[0]), xrange(0, s[0])] <= tol).all():
if name:
raise ValueError(('Distance matrix \'%s\' diagonal must be'
' close to zero within tolerance %5.5f.')
% (name, tol))
else:
raise ValueError(('Distance matrix \'%s\' diagonal must be'
' close to zero within tolerance %5.5f.')
% tol)
except Exception as e:
if throw:
raise
if warning:
warnings.warn(str(e))
valid = False
return valid
def is_valid_y(y, warning=False, throw=False, name=None):
"""
Returns True if the input array is a valid condensed distance matrix.
Condensed distance matrices must be 1-dimensional
numpy arrays containing doubles. Their length must be a binomial
coefficient :math:`{n \\choose 2}` for some positive integer n.
Parameters
----------
y : ndarray
The condensed distance matrix.
warning : bool, optional
Invokes a warning if the variable passed is not a valid
condensed distance matrix. The warning message explains why
the distance matrix is not valid. `name` is used when
referencing the offending variable.
throw : bool, optional
Throws an exception if the variable passed is not a valid
condensed distance matrix.
name : bool, optional
Used when referencing the offending variable in the
warning or exception message.
"""
y = np.asarray(y, order='c')
valid = True
try:
if y.dtype != np.double:
if name:
raise TypeError(('Condensed distance matrix \'%s\' must '
'contain doubles (double).') % name)
else:
raise TypeError('Condensed distance matrix must contain '
'doubles (double).')
if len(y.shape) != 1:
if name:
raise ValueError(('Condensed distance matrix \'%s\' must '
'have shape=1 (i.e. be one-dimensional).')
% name)
else:
raise ValueError('Condensed distance matrix must have shape=1 '
'(i.e. be one-dimensional).')
n = y.shape[0]
d = int(np.ceil(np.sqrt(n * 2)))
if (d * (d - 1) / 2) != n:
if name:
raise ValueError(('Length n of condensed distance matrix '
'\'%s\' must be a binomial coefficient, i.e.'
'there must be a k such that '
'(k \choose 2)=n)!') % name)
else:
raise ValueError('Length n of condensed distance matrix must '
'be a binomial coefficient, i.e. there must '
'be a k such that (k \choose 2)=n)!')
except Exception as e:
if throw:
raise
if warning:
warnings.warn(str(e))
valid = False
return valid
def num_obs_dm(d):
"""
Returns the number of original observations that correspond to a
square, redundant distance matrix.
Parameters
----------
d : ndarray
The target distance matrix.
Returns
-------
num_obs_dm : int
The number of observations in the redundant distance matrix.
"""
d = np.asarray(d, order='c')
is_valid_dm(d, tol=np.inf, throw=True, name='d')
return d.shape[0]
def num_obs_y(Y):
"""
Returns the number of original observations that correspond to a
condensed distance matrix.
Parameters
----------
Y : ndarray
Condensed distance matrix.
Returns
-------
n : int
The number of observations in the condensed distance matrix `Y`.
"""
Y = np.asarray(Y, order='c')
is_valid_y(Y, throw=True, name='Y')
k = Y.shape[0]
if k == 0:
raise ValueError("The number of observations cannot be determined on "
"an empty distance matrix.")
d = int(np.ceil(np.sqrt(k * 2)))
if (d * (d - 1) / 2) != k:
raise ValueError("Invalid condensed distance matrix passed. Must be "
"some k where k=(n choose 2) for some n >= 2.")
return d
def _row_norms(X):
norms = np.einsum('ij,ij->i', X, X, dtype=np.double)
return np.sqrt(norms, out=norms)
def _cosine_cdist(XA, XB, dm):
XA = _convert_to_double(XA)
XB = _convert_to_double(XB)
np.dot(XA, XB.T, out=dm)
dm /= _row_norms(XA).reshape(-1, 1)
dm /= _row_norms(XB)
dm *= -1
dm += 1
def cdist(XA, XB, metric='euclidean', p=2, V=None, VI=None, w=None):
"""
Computes distance between each pair of the two collections of inputs.
The following are common calling conventions:
1. ``Y = cdist(XA, XB, 'euclidean')``
Computes the distance between :math:`m` points using
Euclidean distance (2-norm) as the distance metric between the
points. The points are arranged as :math:`m`
:math:`n`-dimensional row vectors in the matrix X.
2. ``Y = cdist(XA, XB, 'minkowski', p)``
Computes the distances using the Minkowski distance
:math:`||u-v||_p` (:math:`p`-norm) where :math:`p \\geq 1`.
3. ``Y = cdist(XA, XB, 'cityblock')``
Computes the city block or Manhattan distance between the
points.
4. ``Y = cdist(XA, XB, 'seuclidean', V=None)``
Computes the standardized Euclidean distance. The standardized
Euclidean distance between two n-vectors ``u`` and ``v`` is
.. math::
\\sqrt{\\sum {(u_i-v_i)^2 / V[x_i]}}.
V is the variance vector; V[i] is the variance computed over all
the i'th components of the points. If not passed, it is
automatically computed.
5. ``Y = cdist(XA, XB, 'sqeuclidean')``
Computes the squared Euclidean distance :math:`||u-v||_2^2` between
the vectors.
6. ``Y = cdist(XA, XB, 'cosine')``
Computes the cosine distance between vectors u and v,
.. math::
1 - \\frac{u \\cdot v}
{{||u||}_2 {||v||}_2}
where :math:`||*||_2` is the 2-norm of its argument ``*``, and
:math:`u \\cdot v` is the dot product of :math:`u` and :math:`v`.
7. ``Y = cdist(XA, XB, 'correlation')``
Computes the correlation distance between vectors u and v. This is
.. math::
1 - \\frac{(u - \\bar{u}) \\cdot (v - \\bar{v})}
{{||(u - \\bar{u})||}_2 {||(v - \\bar{v})||}_2}
where :math:`\\bar{v}` is the mean of the elements of vector v,
and :math:`x \\cdot y` is the dot product of :math:`x` and :math:`y`.
8. ``Y = cdist(XA, XB, 'hamming')``
Computes the normalized Hamming distance, or the proportion of
those vector elements between two n-vectors ``u`` and ``v``
which disagree. To save memory, the matrix ``X`` can be of type
boolean.
9. ``Y = cdist(XA, XB, 'jaccard')``
Computes the Jaccard distance between the points. Given two
vectors, ``u`` and ``v``, the Jaccard distance is the
proportion of those elements ``u[i]`` and ``v[i]`` that
disagree where at least one of them is non-zero.
10. ``Y = cdist(XA, XB, 'chebyshev')``
Computes the Chebyshev distance between the points. The
Chebyshev distance between two n-vectors ``u`` and ``v`` is the
maximum norm-1 distance between their respective elements. More
precisely, the distance is given by
.. math::
d(u,v) = \\max_i {|u_i-v_i|}.
11. ``Y = cdist(XA, XB, 'canberra')``
Computes the Canberra distance between the points. The
Canberra distance between two points ``u`` and ``v`` is
.. math::
d(u,v) = \\sum_i \\frac{|u_i-v_i|}
{|u_i|+|v_i|}.
12. ``Y = cdist(XA, XB, 'braycurtis')``
Computes the Bray-Curtis distance between the points. The
Bray-Curtis distance between two points ``u`` and ``v`` is
.. math::
d(u,v) = \\frac{\\sum_i (u_i-v_i)}
{\\sum_i (u_i+v_i)}
13. ``Y = cdist(XA, XB, 'mahalanobis', VI=None)``
Computes the Mahalanobis distance between the points. The
Mahalanobis distance between two points ``u`` and ``v`` is
:math:`\\sqrt{(u-v)(1/V)(u-v)^T}` where :math:`(1/V)` (the ``VI``
variable) is the inverse covariance. If ``VI`` is not None,
``VI`` will be used as the inverse covariance matrix.
14. ``Y = cdist(XA, XB, 'yule')``
Computes the Yule distance between the boolean
vectors. (see `yule` function documentation)
15. ``Y = cdist(XA, XB, 'matching')``
Synonym for 'hamming'.
16. ``Y = cdist(XA, XB, 'dice')``
Computes the Dice distance between the boolean vectors. (see
`dice` function documentation)
17. ``Y = cdist(XA, XB, 'kulsinski')``
Computes the Kulsinski distance between the boolean
vectors. (see `kulsinski` function documentation)
18. ``Y = cdist(XA, XB, 'rogerstanimoto')``
Computes the Rogers-Tanimoto distance between the boolean
vectors. (see `rogerstanimoto` function documentation)
19. ``Y = cdist(XA, XB, 'russellrao')``
Computes the Russell-Rao distance between the boolean
vectors. (see `russellrao` function documentation)
20. ``Y = cdist(XA, XB, 'sokalmichener')``
Computes the Sokal-Michener distance between the boolean
vectors. (see `sokalmichener` function documentation)
21. ``Y = cdist(XA, XB, 'sokalsneath')``
Computes the Sokal-Sneath distance between the vectors. (see
`sokalsneath` function documentation)
22. ``Y = cdist(XA, XB, 'wminkowski')``
Computes the weighted Minkowski distance between the
vectors. (see `wminkowski` function documentation)
23. ``Y = cdist(XA, XB, f)``
Computes the distance between all pairs of vectors in X
using the user supplied 2-arity function f. For example,
Euclidean distance between the vectors could be computed
as follows::
dm = cdist(XA, XB, lambda u, v: np.sqrt(((u-v)**2).sum()))
Note that you should avoid passing a reference to one of
the distance functions defined in this library. For example,::
dm = cdist(XA, XB, sokalsneath)
would calculate the pair-wise distances between the vectors in
X using the Python function `sokalsneath`. This would result in
sokalsneath being called :math:`{n \\choose 2}` times, which
is inefficient. Instead, the optimized C version is more
efficient, and we call it using the following syntax::
dm = cdist(XA, XB, 'sokalsneath')
Parameters
----------
XA : ndarray
An :math:`m_A` by :math:`n` array of :math:`m_A`
original observations in an :math:`n`-dimensional space.
Inputs are converted to float type.
XB : ndarray
An :math:`m_B` by :math:`n` array of :math:`m_B`
original observations in an :math:`n`-dimensional space.
Inputs are converted to float type.
metric : str or callable, optional
The distance metric to use. If a string, the distance function can be
'braycurtis', 'canberra', 'chebyshev', 'cityblock', 'correlation',
'cosine', 'dice', 'euclidean', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
'wminkowski', 'yule'.
w : ndarray, optional
The weight vector (for weighted Minkowski).
p : scalar, optional
The p-norm to apply (for Minkowski, weighted and unweighted)
V : ndarray, optional
The variance vector (for standardized Euclidean).
VI : ndarray, optional
The inverse of the covariance matrix (for Mahalanobis).
Returns
-------
Y : ndarray
A :math:`m_A` by :math:`m_B` distance matrix is returned.
For each :math:`i` and :math:`j`, the metric
``dist(u=XA[i], v=XB[j])`` is computed and stored in the
:math:`ij` th entry.
Raises
------
ValueError
An exception is thrown if `XA` and `XB` do not have
the same number of columns.
Examples
--------
Find the Euclidean distances between four 2-D coordinates:
>>> from scipy.spatial import distance
>>> coords = [(35.0456, -85.2672),
... (35.1174, -89.9711),
... (35.9728, -83.9422),
... (36.1667, -86.7833)]
>>> distance.cdist(coords, coords, 'euclidean')
array([[ 0. , 4.7044, 1.6172, 1.8856],
[ 4.7044, 0. , 6.0893, 3.3561],
[ 1.6172, 6.0893, 0. , 2.8477],
[ 1.8856, 3.3561, 2.8477, 0. ]])
Find the Manhattan distance from a 3-D point to the corners of the unit
cube:
>>> a = np.array([[0, 0, 0],
... [0, 0, 1],
... [0, 1, 0],
... [0, 1, 1],
... [1, 0, 0],
... [1, 0, 1],
... [1, 1, 0],
... [1, 1, 1]])
>>> b = np.array([[ 0.1, 0.2, 0.4]])
>>> distance.cdist(a, b, 'cityblock')
array([[ 0.7],
[ 0.9],
[ 1.3],
[ 1.5],
[ 1.5],
[ 1.7],
[ 2.1],
[ 2.3]])
"""
# You can also call this as:
# Y = cdist(XA, XB, 'test_abc')
# where 'abc' is the metric being tested. This computes the distance
# between all pairs of vectors in XA and XB using the distance metric 'abc'
# but with a more succinct, verifiable, but less efficient implementation.
XA = np.asarray(XA, order='c')
XB = np.asarray(XB, order='c')
# The C code doesn't do striding.
XA = _copy_array_if_base_present(_convert_to_double(XA))
XB = _copy_array_if_base_present(_convert_to_double(XB))
s = XA.shape
sB = XB.shape
if len(s) != 2:
raise ValueError('XA must be a 2-dimensional array.')
if len(sB) != 2:
raise ValueError('XB must be a 2-dimensional array.')
if s[1] != sB[1]:
raise ValueError('XA and XB must have the same number of columns '
'(i.e. feature dimension.)')
mA = s[0]
mB = sB[0]
n = s[1]
dm = np.zeros((mA, mB), dtype=np.double)
if callable(metric):
if metric == minkowski:
for i in xrange(0, mA):
for j in xrange(0, mB):
dm[i, j] = minkowski(XA[i, :], XB[j, :], p)
elif metric == wminkowski:
for i in xrange(0, mA):
for j in xrange(0, mB):
dm[i, j] = wminkowski(XA[i, :], XB[j, :], p, w)
elif metric == seuclidean:
for i in xrange(0, mA):
for j in xrange(0, mB):
dm[i, j] = seuclidean(XA[i, :], XB[j, :], V)
elif metric == mahalanobis:
for i in xrange(0, mA):
for j in xrange(0, mB):
dm[i, j] = mahalanobis(XA[i, :], XB[j, :], V)
else:
for i in xrange(0, mA):
for j in xrange(0, mB):
dm[i, j] = metric(XA[i, :], XB[j, :])
elif isinstance(metric, string_types):
mstr = metric.lower()
try:
validate, cdist_fn = _SIMPLE_CDIST[mstr]
XA = validate(XA)
XB = validate(XB)
cdist_fn(XA, XB, dm)
return dm
except KeyError:
pass
if mstr in ['hamming', 'hamm', 'ha', 'h']:
if XA.dtype == bool:
XA = _convert_to_bool(XA)
XB = _convert_to_bool(XB)
_distance_wrap.cdist_hamming_bool_wrap(XA, XB, dm)
else:
XA = _convert_to_double(XA)
XB = _convert_to_double(XB)
_distance_wrap.cdist_hamming_wrap(XA, XB, dm)
elif mstr in ['jaccard', 'jacc', 'ja', 'j']:
if XA.dtype == bool:
XA = _convert_to_bool(XA)
XB = _convert_to_bool(XB)
_distance_wrap.cdist_jaccard_bool_wrap(XA, XB, dm)
else:
XA = _convert_to_double(XA)
XB = _convert_to_double(XB)
_distance_wrap.cdist_jaccard_wrap(XA, XB, dm)
elif mstr in ['minkowski', 'mi', 'm', 'pnorm']:
XA = _convert_to_double(XA)
XB = _convert_to_double(XB)
_distance_wrap.cdist_minkowski_wrap(XA, XB, dm, p)
elif mstr in ['wminkowski', 'wmi', 'wm', 'wpnorm']:
XA = _convert_to_double(XA)
XB = _convert_to_double(XB)
w = _convert_to_double(w)
_distance_wrap.cdist_weighted_minkowski_wrap(XA, XB, dm, p, w)
elif mstr in ['seuclidean', 'se', 's']:
XA = _convert_to_double(XA)
XB = _convert_to_double(XB)
if V is not None:
V = np.asarray(V, order='c')
if V.dtype != np.double:
raise TypeError('Variance vector V must contain doubles.')
if len(V.shape) != 1:
raise ValueError('Variance vector V must be '
'one-dimensional.')
if V.shape[0] != n:
raise ValueError('Variance vector V must be of the same '
'dimension as the vectors on which the '
'distances are computed.')
# The C code doesn't do striding.
VV = _copy_array_if_base_present(_convert_to_double(V))
else:
VV = np.var(np.vstack([XA, XB]), axis=0, ddof=1)
_distance_wrap.cdist_seuclidean_wrap(XA, XB, VV, dm)
elif mstr in ['cosine', 'cos']:
XA = _convert_to_double(XA)
XB = _convert_to_double(XB)
_cosine_cdist(XA, XB, dm)
elif mstr in ['correlation', 'co']:
XA = _convert_to_double(XA)
XB = _convert_to_double(XB)
XA -= XA.mean(axis=1)[:, np.newaxis]
XB -= XB.mean(axis=1)[:, np.newaxis]
_cosine_cdist(XA, XB, dm)
elif mstr in ['mahalanobis', 'mahal', 'mah']:
XA = _convert_to_double(XA)
XB = _convert_to_double(XB)
if VI is not None:
VI = _convert_to_double(np.asarray(VI, order='c'))
VI = _copy_array_if_base_present(VI)
else:
m = mA + mB
if m <= n:
# There are fewer observations than the dimension of
# the observations.
raise ValueError("The number of observations (%d) is too "
"small; the covariance matrix is "
"singular. For observations with %d "
"dimensions, at least %d observations "
"are required." % (m, n, n + 1))
X = np.vstack([XA, XB])
V = np.atleast_2d(np.cov(X.T))
del X
VI = np.linalg.inv(V).T.copy()
# (u-v)V^(-1)(u-v)^T
_distance_wrap.cdist_mahalanobis_wrap(XA, XB, VI, dm)
elif metric == 'test_euclidean':
dm = cdist(XA, XB, euclidean)
elif metric == 'test_seuclidean':
if V is None:
V = np.var(np.vstack([XA, XB]), axis=0, ddof=1)
else:
V = np.asarray(V, order='c')
dm = cdist(XA, XB, lambda u, v: seuclidean(u, v, V))
elif metric == 'test_sqeuclidean':
dm = cdist(XA, XB, lambda u, v: sqeuclidean(u, v))
elif metric == 'test_braycurtis':
dm = cdist(XA, XB, braycurtis)
elif metric == 'test_mahalanobis':
if VI is None:
X = np.vstack([XA, XB])
V = np.cov(X.T)
VI = np.linalg.inv(V)
X = None
del X
else:
VI = np.asarray(VI, order='c')
VI = _copy_array_if_base_present(VI)
# (u-v)V^(-1)(u-v)^T
dm = cdist(XA, XB, (lambda u, v: mahalanobis(u, v, VI)))
elif metric == 'test_canberra':
dm = cdist(XA, XB, canberra)
elif metric == 'test_cityblock':
dm = cdist(XA, XB, cityblock)
elif metric == 'test_minkowski':
dm = cdist(XA, XB, minkowski, p=p)
elif metric == 'test_wminkowski':
dm = cdist(XA, XB, wminkowski, p=p, w=w)
elif metric == 'test_correlation':
dm = cdist(XA, XB, correlation)
elif metric == 'test_hamming':
dm = cdist(XA, XB, hamming)
elif metric == 'test_jaccard':
dm = cdist(XA, XB, jaccard)
elif metric == 'test_chebyshev' or metric == 'test_chebychev':
dm = cdist(XA, XB, chebyshev)
elif metric == 'test_yule':
dm = cdist(XA, XB, yule)
elif metric == 'test_matching':
dm = cdist(XA, XB, matching)
elif metric == 'test_dice':
dm = cdist(XA, XB, dice)
elif metric == 'test_kulsinski':
dm = cdist(XA, XB, kulsinski)
elif metric == 'test_rogerstanimoto':
dm = cdist(XA, XB, rogerstanimoto)
elif metric == 'test_russellrao':
dm = cdist(XA, XB, russellrao)
elif metric == 'test_sokalsneath':
dm = cdist(XA, XB, sokalsneath)
elif metric == 'test_sokalmichener':
dm = cdist(XA, XB, sokalmichener)
else:
raise ValueError('Unknown Distance Metric: %s' % mstr)
else:
raise TypeError('2nd argument metric must be a string identifier '
'or a function.')
return dm
| gpl-3.0 |
Yukarumya/Yukarum-Redfoxes | js/src/vm/make_opcode_doc.py | 1 | 13188 | #!/usr/bin/python -B
""" Usage: make_opcode_doc.py PATH_TO_MOZILLA_CENTRAL
This script generates SpiderMonkey bytecode documentation
from js/src/vm/Opcodes.h.
Output is written to stdout and should be pasted into the following
MDN page:
https://developer.mozilla.org/en-US/docs/SpiderMonkey/Internals/Bytecode
"""
from __future__ import print_function
import re
import sys
from xml.sax.saxutils import escape
SOURCE_BASE = 'http://dxr.mozilla.org/mozilla-central/source'
def error(message):
print("Error: {message}".format(message=message), file=sys.stderr)
sys.exit(1)
quoted_pat = re.compile(r"([^A-Za-z0-9]|^)'([^']+)'")
js_pat = re.compile(r"([^A-Za-z0-9]|^)(JS[A-Z0-9_\*]+)")
def codify(text):
text = re.sub(quoted_pat, '\\1<code>\\2</code>', text)
text = re.sub(js_pat, '\\1<code>\\2</code>', text)
return text
space_star_space_pat = re.compile('^\s*\* ?', re.M)
def get_comment_body(comment):
return re.sub(space_star_space_pat, '', comment).split('\n')
def parse_index(comment):
index = []
current_types = None
category_name = ''
category_pat = re.compile('\[([^\]]+)\]')
for line in get_comment_body(comment):
m = category_pat.search(line)
if m:
category_name = m.group(1)
if category_name == 'Index':
continue
current_types = []
index.append((category_name, current_types))
else:
type_name = line.strip()
if type_name and current_types is not None:
current_types.append((type_name, []))
return index
class OpcodeInfo:
def __init__(self):
self.name = ''
self.value = ''
self.length = ''
self.length_override = ''
self.nuses = ''
self.nuses_override = ''
self.ndefs = ''
self.ndefs_override = ''
self.flags = ''
self.operands = ''
self.stack_uses = ''
self.stack_defs = ''
self.desc = ''
self.category_name = ''
self.type_name = ''
self.group = []
self.sort_key = ''
def find_by_name(list, name):
for (n, body) in list:
if n == name:
return body
return None
def add_to_index(index, opcode):
types = find_by_name(index, opcode.category_name)
if types is None:
error("Category is not listed in index: "
"{name}".format(name=opcode.category_name))
opcodes = find_by_name(types, opcode.type_name)
if opcodes is None:
if opcode.type_name:
error("Type is not listed in {category}: "
"{name}".format(category=opcode.category_name,
name=opcode.type_name))
types.append((opcode.type_name, [opcode]))
return
opcodes.append(opcode)
def format_desc(descs):
current_type = ''
desc = ''
for (type, line) in descs:
if type != current_type:
if current_type:
desc += '</{name}>\n'.format(name=current_type)
current_type = type
if type:
desc += '<{name}>'.format(name=current_type)
if current_type:
desc += line + "\n"
if current_type:
desc += '</{name}>'.format(name=current_type)
return desc
tag_pat = re.compile('^\s*[A-Za-z]+:\s*|\s*$')
def get_tag_value(line):
return re.sub(tag_pat, '', line)
def get_opcodes(dir):
iter_pat = re.compile(r"/\*(.*?)\*/" # either a documentation comment...
r"|"
r"macro\(" # or a macro(...) call
r"([^,]+),\s*" # op
r"([0-9]+),\s*" # val
r"[^,]+,\s*" # name
r"[^,]+,\s*" # image
r"([0-9\-]+),\s*" # length
r"([0-9\-]+),\s*" # nuses
r"([0-9\-]+),\s*" # ndefs
r"([^\)]+)" # format
r"\)", re.S)
stack_pat = re.compile('^(.*?)\s*=>\s*(.*?)$')
index = []
opcode = OpcodeInfo()
merged = opcode
with open('{dir}/js/src/vm/Opcodes.h'.format(dir=dir), 'r') as f:
data = f.read()
for m in re.finditer(iter_pat, data):
comment = m.group(1)
name = m.group(2)
if comment:
if '[Index]' in comment:
index = parse_index(comment)
continue
if 'Operands:' not in comment:
continue
state = 'desc'
stack = ''
descs = []
for line in get_comment_body(comment):
if line.startswith(' Category:'):
state = 'category'
opcode.category_name = get_tag_value(line)
elif line.startswith(' Type:'):
state = 'type'
opcode.type_name = get_tag_value(line)
elif line.startswith(' Operands:'):
state = 'operands'
opcode.operands = get_tag_value(line)
elif line.startswith(' Stack:'):
state = 'stack'
stack = get_tag_value(line)
elif line.startswith(' len:'):
state = 'len'
opcode.length_override = get_tag_value(line)
elif line.startswith(' nuses:'):
state = 'nuses'
opcode.nuses_override = get_tag_value(line)
elif line.startswith(' ndefs:'):
state = 'ndefs'
opcode.ndefs_override = get_tag_value(line)
elif state == 'desc':
if line.startswith(' '):
descs.append(('pre', escape(line[1:])))
else:
line = line.strip()
if line == '':
descs.append(('', line))
else:
descs.append(('p', codify(escape(line))))
elif line.startswith(' '):
if state == 'operands':
opcode.operands += line.strip()
elif state == 'stack':
stack += line.strip()
elif state == 'len':
opcode.length_override += line.strip()
elif state == 'nuses':
opcode.nuses_override += line.strip()
elif state == 'ndefs':
opcode.ndefs_override += line.strip()
opcode.desc = format_desc(descs)
m2 = stack_pat.search(stack)
if m2:
opcode.stack_uses = m2.group(1)
opcode.stack_defs = m2.group(2)
merged = opcode
elif name and not name.startswith('JSOP_UNUSED'):
opcode.name = name
opcode.value = int(m.group(3))
opcode.length = m.group(4)
opcode.nuses = m.group(5)
opcode.ndefs = m.group(6)
flags = []
for flag in m.group(7).split('|'):
if flag != 'JOF_BYTE':
flags.append(flag.replace('JOF_', ''))
opcode.flags = ', '.join(flags)
if merged == opcode:
opcode.sort_key = opcode.name
if opcode.category_name == '':
error("Category is not specified for "
"{name}".format(name=opcode.name))
add_to_index(index, opcode)
else:
if merged.length != opcode.length:
error("length should be same for merged section: "
"{value1}({name1}) != "
"{value2}({name2})".format(name1=merged.name,
value1=merged.length,
name2=opcode.name,
value2=opcode.length))
if merged.nuses != opcode.nuses:
error("nuses should be same for merged section: "
"{value1}({name1}) != "
"{value2}({name2})".format(name1=merged.name,
value1=merged.nuses,
name2=opcode.name,
value2=opcode.nuses))
if merged.ndefs != opcode.ndefs:
error("ndefs should be same for merged section: "
"{value1}({name1}) != "
"{value2}({name2})".format(name1=merged.name,
value1=merged.ndefs,
name2=opcode.name,
value2=opcode.ndefs))
merged.group.append(opcode)
if opcode.name < merged.name:
merged.sort_key = opcode.name
opcode = OpcodeInfo()
return index
def override(value, override_value):
if override_value != '':
return override_value
return value
def format_flags(flags):
if flags == '':
return ''
return ' ({flags})'.format(flags=flags)
def print_opcode(opcode):
names_template = '{name} [-{nuses}, +{ndefs}]{flags}'
opcodes = sorted([opcode] + opcode.group,
key=lambda opcode: opcode.name)
names = map(lambda code: names_template.format(name=escape(code.name),
nuses=override(code.nuses,
opcode.nuses_override),
ndefs=override(code.ndefs,
opcode.ndefs_override),
flags=format_flags(code.flags)),
opcodes)
if len(opcodes) == 1:
values = ['{value} (0x{value:02x})'.format(value=opcode.value)]
else:
values_template = '{name}: {value} (0x{value:02x})'
values = map(lambda code: values_template.format(name=escape(code.name),
value=code.value),
opcodes)
print("""<dt id="{id}">{names}</dt>
<dd>
<table class="standard-table">
<tbody>
<tr><th>Value</th><td><code>{values}</code></td></tr>
<tr><th>Operands</th><td><code>{operands}</code></td></tr>
<tr><th>Length</th><td><code>{length}</code></td></tr>
<tr><th>Stack Uses</th><td><code>{stack_uses}</code></td></tr>
<tr><th>Stack Defs</th><td><code>{stack_defs}</code></td></tr>
</tbody>
</table>
{desc}
</dd>
""".format(id=opcodes[0].name,
names='<br>'.join(names),
values='<br>'.join(values),
operands=escape(opcode.operands) or " ",
length=escape(override(opcode.length,
opcode.length_override)),
stack_uses=escape(opcode.stack_uses) or " ",
stack_defs=escape(opcode.stack_defs) or " ",
desc=opcode.desc)) # desc is already escaped
id_cache = dict()
id_count = dict()
def make_element_id(category, type=''):
key = '{}:{}'.format(category, type)
if key in id_cache:
return id_cache[key]
if type == '':
id = category.replace(' ', '_')
else:
id = type.replace(' ', '_')
if id in id_count:
id_count[id] += 1
id = '{}_{}'.format(id, id_count[id])
else:
id_count[id] = 1
id_cache[key] = id
return id
def print_doc(index):
print("""<div>{{{{SpiderMonkeySidebar("Internals")}}}}</div>
<h2 id="Bytecode_Listing">Bytecode Listing</h2>
<p>This document is automatically generated from
<a href="{source_base}/js/src/vm/Opcodes.h">Opcodes.h</a> by
<a href="{source_base}/js/src/vm/make_opcode_doc.py">make_opcode_doc.py</a>.</p>
""".format(source_base=SOURCE_BASE))
for (category_name, types) in index:
print('<h3 id="{id}">{name}</h3>'.format(name=category_name,
id=make_element_id(category_name)))
for (type_name, opcodes) in types:
if type_name:
print('<h4 id="{id}">{name}</h4>'.format(name=type_name,
id=make_element_id(category_name, type_name)))
print('<dl>')
for opcode in sorted(opcodes,
key=lambda opcode: opcode.sort_key):
print_opcode(opcode)
print('</dl>')
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: make_opcode_doc.py PATH_TO_MOZILLA_CENTRAL",
file=sys.stderr)
sys.exit(1)
dir = sys.argv[1]
index = get_opcodes(dir)
print_doc(index)
| mpl-2.0 |
thetimelineproj/autopilot | src/lib/wrappers/frame.py | 2 | 2017 | # Copyright (C) 2009-2015 Contributors as noted in the AUTHORS file
#
# This file is part of Autopilot.
#
# Autopilot is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Autopilot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Autopilot. If not, see <http://www.gnu.org/licenses/>.
import wx
from lib.app.decorators import Overrides
from lib.reporting.logger import Logger
from lib.wrappers.wrapper import Wrapper
wxFrame = wx.Frame
class Frame(wxFrame, Wrapper):
def __init__(self, *args, **kw):
self.set_active_window()
wxFrame.__init__(self, *args, **kw)
Wrapper.__init__(self)
self._shown = False
def name(self):
return self.GetLabel()
def classname(self):
return self.GetClassName()
@Overrides(wxFrame)
def Show(self, *args, **kw):
self._shown = True
Logger.add_open(self)
self.call_when_win_shows(self._explore_and_register)
super(wxFrame, self).Show(*args, **kw)
def _explore_and_register(self):
self._explore()
Frame.register(self)
@Overrides(wxFrame)
def IsShown(self):
return self._shown
@Overrides(wxFrame)
def Destroy(self, *args, **kw):
Logger.add("Destroy called")
self._shown = False
Logger.add_close(self)
wxFrame.Destroy(self, *args, **kw)
def Hide(self):
self._shown = False
Logger.add_close(self)
super(Frame, self).Hide()
@classmethod
def wrap(self, register):
wx.Frame = Frame
Frame.register = register
| gpl-3.0 |
jbedorf/tensorflow | tensorflow/contrib/distributions/python/kernel_tests/bijectors/scale_tril_test.py | 33 | 2411 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ScaleTriL bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
class ScaleTriLBijectorTest(test.TestCase):
"""Tests the correctness of the ScaleTriL bijector."""
def setUp(self):
self._rng = np.random.RandomState(42)
def testComputesCorrectValues(self):
shift = 1.61803398875
x = np.float32(np.array([-1, .5, 2]))
y = np.float32(np.array([[np.exp(2) + shift, 0.],
[.5, np.exp(-1) + shift]]))
b = bijectors.ScaleTriL(diag_bijector=bijectors.Exp(),
diag_shift=shift)
y_ = self.evaluate(b.forward(x))
self.assertAllClose(y, y_)
x_ = self.evaluate(b.inverse(y))
self.assertAllClose(x, x_)
@test_util.run_in_graph_and_eager_modes
def testInvertible(self):
# Generate random inputs from an unconstrained space, with
# event size 6 to specify 3x3 triangular matrices.
batch_shape = [2, 1]
x = np.float32(np.random.randn(*(batch_shape + [6])))
b = bijectors.ScaleTriL(diag_bijector=bijectors.Softplus(),
diag_shift=3.14159)
y = self.evaluate(b.forward(x))
self.assertAllEqual(y.shape, batch_shape + [3, 3])
x_ = self.evaluate(b.inverse(y))
self.assertAllClose(x, x_)
fldj = self.evaluate(b.forward_log_det_jacobian(x, event_ndims=1))
ildj = self.evaluate(b.inverse_log_det_jacobian(y, event_ndims=2))
self.assertAllClose(fldj, -ildj)
if __name__ == "__main__":
test.main()
| apache-2.0 |
TeemuAhola/sailelfcloud | src/qml/python/tasks.py | 1 | 2310 | '''
Created on Aug 22, 2016
@author: Teemu Ahola [teemuahola7@gmail.com]
'''
import uidgenerator
class Task(object):
def __init__(self, startCb=None, completedCb=None, failedCb=None):
self.__uid = uidgenerator.getUid()
self.__startCb = startCb
self.__completedCb = completedCb
self.__failedCb = failedCb
@property
def uid(self):
return self.__uid
@property
def startCb(self):
return self.__startCb
@property
def completedCb(self):
return self.__completedCb
@property
def failedCb(self):
return self.__failedCb
def __eq__(self, other):
if isinstance(other, int):
return other == self.__uid
else:
return self.__dict__ == other.__dict__
class TerminateTask(Task):
def __init__(self, cb=None, *args):
super().__init__(completedCb=cb)
class XferTask(Task):
def __init__(self, startCb, completedCb, failedCb, *args):
super().__init__(startCb, completedCb, failedCb)
self.__running = True
@property
def running(self):
return self.__running
@running.setter
def running(self, r):
self.__running = r
class WaitCompletionTask(Task):
def __init__(self):
self.uid = None
class CancelTask(XferTask):
def __init__(self, uidToCancel, cb):
super().__init__(startCb=None, completedCb=cb, failedCb=None)
self.__uidOfTaskToCancel = uidToCancel
@property
def uidOfTaskToCancel(self):
return self.__uidOfTaskToCancel
class PauseTask(XferTask):
def __init__(self, uidToPause, cb):
super().__init__(startCb=None, completedCb=cb, failedCb=None)
self.__uidOfTaskToPause = uidToPause
@property
def uidOfTaskToPause(self):
return self.__uidOfTaskToPause
class ResumeTask(XferTask):
def __init__(self, uidToResume, cb):
super().__init__(startCb=None, completedCb=cb, failedCb=None)
self.__uidOfTaskToResume = uidToResume
@property
def uidOfTaskToResume(self):
return self.__uidOfTaskToResume
class ListTask(Task):
def __init__(self, cb=None, *args):
super().__init__(completedCb=cb)
| gpl-3.0 |
2014c2g2/2015cdag2_0421 | static/Brython3.1.1-20150328-091302/Lib/datetime.py | 628 | 75044 | """Concrete date/time and related types.
See http://www.iana.org/time-zones/repository/tz-link.html for
time zone and DST data sources.
"""
import time as _time
import math as _math
def _cmp(x, y):
return 0 if x == y else 1 if x > y else -1
MINYEAR = 1
MAXYEAR = 9999
_MAXORDINAL = 3652059 # date.max.toordinal()
# Utility functions, adapted from Python's Demo/classes/Dates.py, which
# also assumes the current Gregorian calendar indefinitely extended in
# both directions. Difference: Dates.py calls January 1 of year 0 day
# number 1. The code here calls January 1 of year 1 day number 1. This is
# to match the definition of the "proleptic Gregorian" calendar in Dershowitz
# and Reingold's "Calendrical Calculations", where it's the base calendar
# for all computations. See the book for algorithms for converting between
# proleptic Gregorian ordinals and many other calendar systems.
_DAYS_IN_MONTH = [None, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
_DAYS_BEFORE_MONTH = [None]
dbm = 0
for dim in _DAYS_IN_MONTH[1:]:
_DAYS_BEFORE_MONTH.append(dbm)
dbm += dim
del dbm, dim
def _is_leap(year):
"year -> 1 if leap year, else 0."
return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)
def _days_before_year(year):
"year -> number of days before January 1st of year."
y = year - 1
return y*365 + y//4 - y//100 + y//400
def _days_in_month(year, month):
"year, month -> number of days in that month in that year."
assert 1 <= month <= 12, month
if month == 2 and _is_leap(year):
return 29
return _DAYS_IN_MONTH[month]
def _days_before_month(year, month):
"year, month -> number of days in year preceding first day of month."
assert 1 <= month <= 12, 'month must be in 1..12'
return _DAYS_BEFORE_MONTH[month] + (month > 2 and _is_leap(year))
def _ymd2ord(year, month, day):
"year, month, day -> ordinal, considering 01-Jan-0001 as day 1."
assert 1 <= month <= 12, 'month must be in 1..12'
dim = _days_in_month(year, month)
assert 1 <= day <= dim, ('day must be in 1..%d' % dim)
return (_days_before_year(year) +
_days_before_month(year, month) +
day)
_DI400Y = _days_before_year(401) # number of days in 400 years
_DI100Y = _days_before_year(101) # " " " " 100 "
_DI4Y = _days_before_year(5) # " " " " 4 "
# A 4-year cycle has an extra leap day over what we'd get from pasting
# together 4 single years.
assert _DI4Y == 4 * 365 + 1
# Similarly, a 400-year cycle has an extra leap day over what we'd get from
# pasting together 4 100-year cycles.
assert _DI400Y == 4 * _DI100Y + 1
# OTOH, a 100-year cycle has one fewer leap day than we'd get from
# pasting together 25 4-year cycles.
assert _DI100Y == 25 * _DI4Y - 1
def _ord2ymd(n):
"ordinal -> (year, month, day), considering 01-Jan-0001 as day 1."
# n is a 1-based index, starting at 1-Jan-1. The pattern of leap years
# repeats exactly every 400 years. The basic strategy is to find the
# closest 400-year boundary at or before n, then work with the offset
# from that boundary to n. Life is much clearer if we subtract 1 from
# n first -- then the values of n at 400-year boundaries are exactly
# those divisible by _DI400Y:
#
# D M Y n n-1
# -- --- ---- ---------- ----------------
# 31 Dec -400 -_DI400Y -_DI400Y -1
# 1 Jan -399 -_DI400Y +1 -_DI400Y 400-year boundary
# ...
# 30 Dec 000 -1 -2
# 31 Dec 000 0 -1
# 1 Jan 001 1 0 400-year boundary
# 2 Jan 001 2 1
# 3 Jan 001 3 2
# ...
# 31 Dec 400 _DI400Y _DI400Y -1
# 1 Jan 401 _DI400Y +1 _DI400Y 400-year boundary
n -= 1
n400, n = divmod(n, _DI400Y)
year = n400 * 400 + 1 # ..., -399, 1, 401, ...
# Now n is the (non-negative) offset, in days, from January 1 of year, to
# the desired date. Now compute how many 100-year cycles precede n.
# Note that it's possible for n100 to equal 4! In that case 4 full
# 100-year cycles precede the desired day, which implies the desired
# day is December 31 at the end of a 400-year cycle.
n100, n = divmod(n, _DI100Y)
# Now compute how many 4-year cycles precede it.
n4, n = divmod(n, _DI4Y)
# And now how many single years. Again n1 can be 4, and again meaning
# that the desired day is December 31 at the end of the 4-year cycle.
n1, n = divmod(n, 365)
year += n100 * 100 + n4 * 4 + n1
if n1 == 4 or n100 == 4:
assert n == 0
return year-1, 12, 31
# Now the year is correct, and n is the offset from January 1. We find
# the month via an estimate that's either exact or one too large.
leapyear = n1 == 3 and (n4 != 24 or n100 == 3)
assert leapyear == _is_leap(year)
month = (n + 50) >> 5
preceding = _DAYS_BEFORE_MONTH[month] + (month > 2 and leapyear)
if preceding > n: # estimate is too large
month -= 1
preceding -= _DAYS_IN_MONTH[month] + (month == 2 and leapyear)
n -= preceding
assert 0 <= n < _days_in_month(year, month)
# Now the year and month are correct, and n is the offset from the
# start of that month: we're done!
return year, month, n+1
# Month and day names. For localized versions, see the calendar module.
_MONTHNAMES = [None, "Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
_DAYNAMES = [None, "Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
def _build_struct_time(y, m, d, hh, mm, ss, dstflag):
wday = (_ymd2ord(y, m, d) + 6) % 7
dnum = _days_before_month(y, m) + d
return _time.struct_time((y, m, d, hh, mm, ss, wday, dnum, dstflag))
def _format_time(hh, mm, ss, us):
# Skip trailing microseconds when us==0.
result = "%02d:%02d:%02d" % (hh, mm, ss)
if us:
result += ".%06d" % us
return result
# Correctly substitute for %z and %Z escapes in strftime formats.
def _wrap_strftime(object, format, timetuple):
# Don't call utcoffset() or tzname() unless actually needed.
freplace = None # the string to use for %f
zreplace = None # the string to use for %z
Zreplace = None # the string to use for %Z
# Scan format for %z and %Z escapes, replacing as needed.
newformat = []
push = newformat.append
i, n = 0, len(format)
while i < n:
ch = format[i]
i += 1
if ch == '%':
if i < n:
ch = format[i]
i += 1
if ch == 'f':
if freplace is None:
freplace = '%06d' % getattr(object,
'microsecond', 0)
newformat.append(freplace)
elif ch == 'z':
if zreplace is None:
zreplace = ""
if hasattr(object, "utcoffset"):
offset = object.utcoffset()
if offset is not None:
sign = '+'
if offset.days < 0:
offset = -offset
sign = '-'
h, m = divmod(offset, timedelta(hours=1))
assert not m % timedelta(minutes=1), "whole minute"
m //= timedelta(minutes=1)
zreplace = '%c%02d%02d' % (sign, h, m)
assert '%' not in zreplace
newformat.append(zreplace)
elif ch == 'Z':
if Zreplace is None:
Zreplace = ""
if hasattr(object, "tzname"):
s = object.tzname()
if s is not None:
# strftime is going to have at this: escape %
Zreplace = s.replace('%', '%%')
newformat.append(Zreplace)
else:
push('%')
push(ch)
else:
push('%')
else:
push(ch)
newformat = "".join(newformat)
return _time.strftime(newformat, timetuple)
def _call_tzinfo_method(tzinfo, methname, tzinfoarg):
if tzinfo is None:
return None
return getattr(tzinfo, methname)(tzinfoarg)
# Just raise TypeError if the arg isn't None or a string.
def _check_tzname(name):
if name is not None and not isinstance(name, str):
raise TypeError("tzinfo.tzname() must return None or string, "
"not '%s'" % type(name))
# name is the offset-producing method, "utcoffset" or "dst".
# offset is what it returned.
# If offset isn't None or timedelta, raises TypeError.
# If offset is None, returns None.
# Else offset is checked for being in range, and a whole # of minutes.
# If it is, its integer value is returned. Else ValueError is raised.
def _check_utc_offset(name, offset):
assert name in ("utcoffset", "dst")
if offset is None:
return
if not isinstance(offset, timedelta):
raise TypeError("tzinfo.%s() must return None "
"or timedelta, not '%s'" % (name, type(offset)))
if offset % timedelta(minutes=1) or offset.microseconds:
raise ValueError("tzinfo.%s() must return a whole number "
"of minutes, got %s" % (name, offset))
if not -timedelta(1) < offset < timedelta(1):
raise ValueError("%s()=%s, must be must be strictly between"
" -timedelta(hours=24) and timedelta(hours=24)"
% (name, offset))
def _check_date_fields(year, month, day):
if not isinstance(year, int):
raise TypeError('int expected')
if not MINYEAR <= year <= MAXYEAR:
raise ValueError('year must be in %d..%d' % (MINYEAR, MAXYEAR), year)
if not 1 <= month <= 12:
raise ValueError('month must be in 1..12', month)
dim = _days_in_month(year, month)
if not 1 <= day <= dim:
raise ValueError('day must be in 1..%d' % dim, day)
def _check_time_fields(hour, minute, second, microsecond):
if not isinstance(hour, int):
raise TypeError('int expected')
if not 0 <= hour <= 23:
raise ValueError('hour must be in 0..23', hour)
if not 0 <= minute <= 59:
raise ValueError('minute must be in 0..59', minute)
if not 0 <= second <= 59:
raise ValueError('second must be in 0..59', second)
if not 0 <= microsecond <= 999999:
raise ValueError('microsecond must be in 0..999999', microsecond)
def _check_tzinfo_arg(tz):
if tz is not None and not isinstance(tz, tzinfo):
raise TypeError("tzinfo argument must be None or of a tzinfo subclass")
def _cmperror(x, y):
raise TypeError("can't compare '%s' to '%s'" % (
type(x).__name__, type(y).__name__))
class timedelta:
"""Represent the difference between two datetime objects.
Supported operators:
- add, subtract timedelta
- unary plus, minus, abs
- compare to timedelta
- multiply, divide by int
In addition, datetime supports subtraction of two datetime objects
returning a timedelta, and addition or subtraction of a datetime
and a timedelta giving a datetime.
Representation: (days, seconds, microseconds). Why? Because I
felt like it.
"""
__slots__ = '_days', '_seconds', '_microseconds'
def __new__(cls, days=0, seconds=0, microseconds=0,
milliseconds=0, minutes=0, hours=0, weeks=0):
# Doing this efficiently and accurately in C is going to be difficult
# and error-prone, due to ubiquitous overflow possibilities, and that
# C double doesn't have enough bits of precision to represent
# microseconds over 10K years faithfully. The code here tries to make
# explicit where go-fast assumptions can be relied on, in order to
# guide the C implementation; it's way more convoluted than speed-
# ignoring auto-overflow-to-long idiomatic Python could be.
# XXX Check that all inputs are ints or floats.
# Final values, all integer.
# s and us fit in 32-bit signed ints; d isn't bounded.
d = s = us = 0
# Normalize everything to days, seconds, microseconds.
days += weeks*7
seconds += minutes*60 + hours*3600
microseconds += milliseconds*1000
# Get rid of all fractions, and normalize s and us.
# Take a deep breath <wink>.
if isinstance(days, float):
dayfrac, days = _math.modf(days)
daysecondsfrac, daysecondswhole = _math.modf(dayfrac * (24.*3600.))
assert daysecondswhole == int(daysecondswhole) # can't overflow
s = int(daysecondswhole)
assert days == int(days)
d = int(days)
else:
daysecondsfrac = 0.0
d = days
assert isinstance(daysecondsfrac, float)
assert abs(daysecondsfrac) <= 1.0
assert isinstance(d, int)
assert abs(s) <= 24 * 3600
# days isn't referenced again before redefinition
if isinstance(seconds, float):
secondsfrac, seconds = _math.modf(seconds)
assert seconds == int(seconds)
seconds = int(seconds)
secondsfrac += daysecondsfrac
assert abs(secondsfrac) <= 2.0
else:
secondsfrac = daysecondsfrac
# daysecondsfrac isn't referenced again
assert isinstance(secondsfrac, float)
assert abs(secondsfrac) <= 2.0
assert isinstance(seconds, int)
days, seconds = divmod(seconds, 24*3600)
d += days
s += int(seconds) # can't overflow
assert isinstance(s, int)
assert abs(s) <= 2 * 24 * 3600
# seconds isn't referenced again before redefinition
usdouble = secondsfrac * 1e6
assert abs(usdouble) < 2.1e6 # exact value not critical
# secondsfrac isn't referenced again
if isinstance(microseconds, float):
microseconds += usdouble
microseconds = round(microseconds, 0)
seconds, microseconds = divmod(microseconds, 1e6)
assert microseconds == int(microseconds)
assert seconds == int(seconds)
days, seconds = divmod(seconds, 24.*3600.)
assert days == int(days)
assert seconds == int(seconds)
d += int(days)
s += int(seconds) # can't overflow
assert isinstance(s, int)
assert abs(s) <= 3 * 24 * 3600
else:
seconds, microseconds = divmod(microseconds, 1000000)
days, seconds = divmod(seconds, 24*3600)
d += days
s += int(seconds) # can't overflow
assert isinstance(s, int)
assert abs(s) <= 3 * 24 * 3600
microseconds = float(microseconds)
microseconds += usdouble
microseconds = round(microseconds, 0)
assert abs(s) <= 3 * 24 * 3600
assert abs(microseconds) < 3.1e6
# Just a little bit of carrying possible for microseconds and seconds.
assert isinstance(microseconds, float)
assert int(microseconds) == microseconds
us = int(microseconds)
seconds, us = divmod(us, 1000000)
s += seconds # cant't overflow
assert isinstance(s, int)
days, s = divmod(s, 24*3600)
d += days
assert isinstance(d, int)
assert isinstance(s, int) and 0 <= s < 24*3600
assert isinstance(us, int) and 0 <= us < 1000000
self = object.__new__(cls)
self._days = d
self._seconds = s
self._microseconds = us
if abs(d) > 999999999:
raise OverflowError("timedelta # of days is too large: %d" % d)
return self
def __repr__(self):
if self._microseconds:
return "%s(%d, %d, %d)" % ('datetime.' + self.__class__.__name__,
self._days,
self._seconds,
self._microseconds)
if self._seconds:
return "%s(%d, %d)" % ('datetime.' + self.__class__.__name__,
self._days,
self._seconds)
return "%s(%d)" % ('datetime.' + self.__class__.__name__, self._days)
def __str__(self):
mm, ss = divmod(self._seconds, 60)
hh, mm = divmod(mm, 60)
s = "%d:%02d:%02d" % (hh, mm, ss)
if self._days:
def plural(n):
return n, abs(n) != 1 and "s" or ""
s = ("%d day%s, " % plural(self._days)) + s
if self._microseconds:
s = s + ".%06d" % self._microseconds
return s
def total_seconds(self):
"""Total seconds in the duration."""
return ((self.days * 86400 + self.seconds)*10**6 +
self.microseconds) / 10**6
# Read-only field accessors
@property
def days(self):
"""days"""
return self._days
@property
def seconds(self):
"""seconds"""
return self._seconds
@property
def microseconds(self):
"""microseconds"""
return self._microseconds
def __add__(self, other):
if isinstance(other, timedelta):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta(self._days + other._days,
self._seconds + other._seconds,
self._microseconds + other._microseconds)
return NotImplemented
__radd__ = __add__
def __sub__(self, other):
if isinstance(other, timedelta):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta(self._days - other._days,
self._seconds - other._seconds,
self._microseconds - other._microseconds)
return NotImplemented
def __rsub__(self, other):
if isinstance(other, timedelta):
return -self + other
return NotImplemented
def __neg__(self):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta(-self._days,
-self._seconds,
-self._microseconds)
def __pos__(self):
return self
def __abs__(self):
if self._days < 0:
return -self
else:
return self
def __mul__(self, other):
if isinstance(other, int):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta(self._days * other,
self._seconds * other,
self._microseconds * other)
if isinstance(other, float):
a, b = other.as_integer_ratio()
return self * a / b
return NotImplemented
__rmul__ = __mul__
def _to_microseconds(self):
return ((self._days * (24*3600) + self._seconds) * 1000000 +
self._microseconds)
def __floordiv__(self, other):
if not isinstance(other, (int, timedelta)):
return NotImplemented
usec = self._to_microseconds()
if isinstance(other, timedelta):
return usec // other._to_microseconds()
if isinstance(other, int):
return timedelta(0, 0, usec // other)
def __truediv__(self, other):
if not isinstance(other, (int, float, timedelta)):
return NotImplemented
usec = self._to_microseconds()
if isinstance(other, timedelta):
return usec / other._to_microseconds()
if isinstance(other, int):
return timedelta(0, 0, usec / other)
if isinstance(other, float):
a, b = other.as_integer_ratio()
return timedelta(0, 0, b * usec / a)
def __mod__(self, other):
if isinstance(other, timedelta):
r = self._to_microseconds() % other._to_microseconds()
return timedelta(0, 0, r)
return NotImplemented
def __divmod__(self, other):
if isinstance(other, timedelta):
q, r = divmod(self._to_microseconds(),
other._to_microseconds())
return q, timedelta(0, 0, r)
return NotImplemented
# Comparisons of timedelta objects with other.
def __eq__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) == 0
else:
return False
def __ne__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) != 0
else:
return True
def __le__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) <= 0
else:
_cmperror(self, other)
def __lt__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) < 0
else:
_cmperror(self, other)
def __ge__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) >= 0
else:
_cmperror(self, other)
def __gt__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) > 0
else:
_cmperror(self, other)
def _cmp(self, other):
assert isinstance(other, timedelta)
return _cmp(self._getstate(), other._getstate())
def __hash__(self):
return hash(self._getstate())
def __bool__(self):
return (self._days != 0 or
self._seconds != 0 or
self._microseconds != 0)
# Pickle support.
def _getstate(self):
return (self._days, self._seconds, self._microseconds)
def __reduce__(self):
return (self.__class__, self._getstate())
timedelta.min = timedelta(-999999999)
timedelta.max = timedelta(days=999999999, hours=23, minutes=59, seconds=59,
microseconds=999999)
timedelta.resolution = timedelta(microseconds=1)
class date:
"""Concrete date type.
Constructors:
__new__()
fromtimestamp()
today()
fromordinal()
Operators:
__repr__, __str__
__cmp__, __hash__
__add__, __radd__, __sub__ (add/radd only with timedelta arg)
Methods:
timetuple()
toordinal()
weekday()
isoweekday(), isocalendar(), isoformat()
ctime()
strftime()
Properties (readonly):
year, month, day
"""
__slots__ = '_year', '_month', '_day'
def __new__(cls, year, month=None, day=None):
"""Constructor.
Arguments:
year, month, day (required, base 1)
"""
if (isinstance(year, bytes) and len(year) == 4 and
1 <= year[2] <= 12 and month is None): # Month is sane
# Pickle support
self = object.__new__(cls)
self.__setstate(year)
return self
_check_date_fields(year, month, day)
self = object.__new__(cls)
self._year = year
self._month = month
self._day = day
return self
# Additional constructors
@classmethod
def fromtimestamp(cls, t):
"Construct a date from a POSIX timestamp (like time.time())."
y, m, d, hh, mm, ss, weekday, jday, dst = _time.localtime(t)
return cls(y, m, d)
@classmethod
def today(cls):
"Construct a date from time.time()."
t = _time.time()
return cls.fromtimestamp(t)
@classmethod
def fromordinal(cls, n):
"""Contruct a date from a proleptic Gregorian ordinal.
January 1 of year 1 is day 1. Only the year, month and day are
non-zero in the result.
"""
y, m, d = _ord2ymd(n)
return cls(y, m, d)
# Conversions to string
def __repr__(self):
"""Convert to formal string, for repr().
>>> dt = datetime(2010, 1, 1)
>>> repr(dt)
'datetime.datetime(2010, 1, 1, 0, 0)'
>>> dt = datetime(2010, 1, 1, tzinfo=timezone.utc)
>>> repr(dt)
'datetime.datetime(2010, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)'
"""
return "%s(%d, %d, %d)" % ('datetime.' + self.__class__.__name__,
self._year,
self._month,
self._day)
# XXX These shouldn't depend on time.localtime(), because that
# clips the usable dates to [1970 .. 2038). At least ctime() is
# easily done without using strftime() -- that's better too because
# strftime("%c", ...) is locale specific.
def ctime(self):
"Return ctime() style string."
weekday = self.toordinal() % 7 or 7
return "%s %s %2d 00:00:00 %04d" % (
_DAYNAMES[weekday],
_MONTHNAMES[self._month],
self._day, self._year)
def strftime(self, fmt):
"Format using strftime()."
return _wrap_strftime(self, fmt, self.timetuple())
def __format__(self, fmt):
if len(fmt) != 0:
return self.strftime(fmt)
return str(self)
def isoformat(self):
"""Return the date formatted according to ISO.
This is 'YYYY-MM-DD'.
References:
- http://www.w3.org/TR/NOTE-datetime
- http://www.cl.cam.ac.uk/~mgk25/iso-time.html
"""
return "%04d-%02d-%02d" % (self._year, self._month, self._day)
__str__ = isoformat
# Read-only field accessors
@property
def year(self):
"""year (1-9999)"""
return self._year
@property
def month(self):
"""month (1-12)"""
return self._month
@property
def day(self):
"""day (1-31)"""
return self._day
# Standard conversions, __cmp__, __hash__ (and helpers)
def timetuple(self):
"Return local time tuple compatible with time.localtime()."
return _build_struct_time(self._year, self._month, self._day,
0, 0, 0, -1)
def toordinal(self):
"""Return proleptic Gregorian ordinal for the year, month and day.
January 1 of year 1 is day 1. Only the year, month and day values
contribute to the result.
"""
return _ymd2ord(self._year, self._month, self._day)
def replace(self, year=None, month=None, day=None):
"""Return a new date with new values for the specified fields."""
if year is None:
year = self._year
if month is None:
month = self._month
if day is None:
day = self._day
_check_date_fields(year, month, day)
return date(year, month, day)
# Comparisons of date objects with other.
def __eq__(self, other):
if isinstance(other, date):
return self._cmp(other) == 0
return NotImplemented
def __ne__(self, other):
if isinstance(other, date):
return self._cmp(other) != 0
return NotImplemented
def __le__(self, other):
if isinstance(other, date):
return self._cmp(other) <= 0
return NotImplemented
def __lt__(self, other):
if isinstance(other, date):
return self._cmp(other) < 0
return NotImplemented
def __ge__(self, other):
if isinstance(other, date):
return self._cmp(other) >= 0
return NotImplemented
def __gt__(self, other):
if isinstance(other, date):
return self._cmp(other) > 0
return NotImplemented
def _cmp(self, other):
assert isinstance(other, date)
y, m, d = self._year, self._month, self._day
y2, m2, d2 = other._year, other._month, other._day
return _cmp((y, m, d), (y2, m2, d2))
def __hash__(self):
"Hash."
return hash(self._getstate())
# Computations
def __add__(self, other):
"Add a date to a timedelta."
if isinstance(other, timedelta):
o = self.toordinal() + other.days
if 0 < o <= _MAXORDINAL:
return date.fromordinal(o)
raise OverflowError("result out of range")
return NotImplemented
__radd__ = __add__
def __sub__(self, other):
"""Subtract two dates, or a date and a timedelta."""
if isinstance(other, timedelta):
return self + timedelta(-other.days)
if isinstance(other, date):
days1 = self.toordinal()
days2 = other.toordinal()
return timedelta(days1 - days2)
return NotImplemented
def weekday(self):
"Return day of the week, where Monday == 0 ... Sunday == 6."
return (self.toordinal() + 6) % 7
# Day-of-the-week and week-of-the-year, according to ISO
def isoweekday(self):
"Return day of the week, where Monday == 1 ... Sunday == 7."
# 1-Jan-0001 is a Monday
return self.toordinal() % 7 or 7
def isocalendar(self):
"""Return a 3-tuple containing ISO year, week number, and weekday.
The first ISO week of the year is the (Mon-Sun) week
containing the year's first Thursday; everything else derives
from that.
The first week is 1; Monday is 1 ... Sunday is 7.
ISO calendar algorithm taken from
http://www.phys.uu.nl/~vgent/calendar/isocalendar.htm
"""
year = self._year
week1monday = _isoweek1monday(year)
today = _ymd2ord(self._year, self._month, self._day)
# Internally, week and day have origin 0
week, day = divmod(today - week1monday, 7)
if week < 0:
year -= 1
week1monday = _isoweek1monday(year)
week, day = divmod(today - week1monday, 7)
elif week >= 52:
if today >= _isoweek1monday(year+1):
year += 1
week = 0
return year, week+1, day+1
# Pickle support.
def _getstate(self):
yhi, ylo = divmod(self._year, 256)
return bytes([yhi, ylo, self._month, self._day]),
def __setstate(self, string):
if len(string) != 4 or not (1 <= string[2] <= 12):
raise TypeError("not enough arguments")
yhi, ylo, self._month, self._day = string
self._year = yhi * 256 + ylo
def __reduce__(self):
return (self.__class__, self._getstate())
_date_class = date # so functions w/ args named "date" can get at the class
date.min = date(1, 1, 1)
date.max = date(9999, 12, 31)
date.resolution = timedelta(days=1)
class tzinfo:
"""Abstract base class for time zone info classes.
Subclasses must override the name(), utcoffset() and dst() methods.
"""
__slots__ = ()
def tzname(self, dt):
"datetime -> string name of time zone."
raise NotImplementedError("tzinfo subclass must override tzname()")
def utcoffset(self, dt):
"datetime -> minutes east of UTC (negative for west of UTC)"
raise NotImplementedError("tzinfo subclass must override utcoffset()")
def dst(self, dt):
"""datetime -> DST offset in minutes east of UTC.
Return 0 if DST not in effect. utcoffset() must include the DST
offset.
"""
raise NotImplementedError("tzinfo subclass must override dst()")
def fromutc(self, dt):
"datetime in UTC -> datetime in local time."
if not isinstance(dt, datetime):
raise TypeError("fromutc() requires a datetime argument")
if dt.tzinfo is not self:
raise ValueError("dt.tzinfo is not self")
dtoff = dt.utcoffset()
if dtoff is None:
raise ValueError("fromutc() requires a non-None utcoffset() "
"result")
# See the long comment block at the end of this file for an
# explanation of this algorithm.
dtdst = dt.dst()
if dtdst is None:
raise ValueError("fromutc() requires a non-None dst() result")
delta = dtoff - dtdst
if delta:
dt += delta
dtdst = dt.dst()
if dtdst is None:
raise ValueError("fromutc(): dt.dst gave inconsistent "
"results; cannot convert")
return dt + dtdst
# Pickle support.
def __reduce__(self):
getinitargs = getattr(self, "__getinitargs__", None)
if getinitargs:
args = getinitargs()
else:
args = ()
getstate = getattr(self, "__getstate__", None)
if getstate:
state = getstate()
else:
state = getattr(self, "__dict__", None) or None
if state is None:
return (self.__class__, args)
else:
return (self.__class__, args, state)
_tzinfo_class = tzinfo
class time:
"""Time with time zone.
Constructors:
__new__()
Operators:
__repr__, __str__
__cmp__, __hash__
Methods:
strftime()
isoformat()
utcoffset()
tzname()
dst()
Properties (readonly):
hour, minute, second, microsecond, tzinfo
"""
def __new__(cls, hour=0, minute=0, second=0, microsecond=0, tzinfo=None):
"""Constructor.
Arguments:
hour, minute (required)
second, microsecond (default to zero)
tzinfo (default to None)
"""
self = object.__new__(cls)
if isinstance(hour, bytes) and len(hour) == 6:
# Pickle support
self.__setstate(hour, minute or None)
return self
_check_tzinfo_arg(tzinfo)
_check_time_fields(hour, minute, second, microsecond)
self._hour = hour
self._minute = minute
self._second = second
self._microsecond = microsecond
self._tzinfo = tzinfo
return self
# Read-only field accessors
@property
def hour(self):
"""hour (0-23)"""
return self._hour
@property
def minute(self):
"""minute (0-59)"""
return self._minute
@property
def second(self):
"""second (0-59)"""
return self._second
@property
def microsecond(self):
"""microsecond (0-999999)"""
return self._microsecond
@property
def tzinfo(self):
"""timezone info object"""
return self._tzinfo
# Standard conversions, __hash__ (and helpers)
# Comparisons of time objects with other.
def __eq__(self, other):
if isinstance(other, time):
return self._cmp(other, allow_mixed=True) == 0
else:
return False
def __ne__(self, other):
if isinstance(other, time):
return self._cmp(other, allow_mixed=True) != 0
else:
return True
def __le__(self, other):
if isinstance(other, time):
return self._cmp(other) <= 0
else:
_cmperror(self, other)
def __lt__(self, other):
if isinstance(other, time):
return self._cmp(other) < 0
else:
_cmperror(self, other)
def __ge__(self, other):
if isinstance(other, time):
return self._cmp(other) >= 0
else:
_cmperror(self, other)
def __gt__(self, other):
if isinstance(other, time):
return self._cmp(other) > 0
else:
_cmperror(self, other)
def _cmp(self, other, allow_mixed=False):
assert isinstance(other, time)
mytz = self._tzinfo
ottz = other._tzinfo
myoff = otoff = None
if mytz is ottz:
base_compare = True
else:
myoff = self.utcoffset()
otoff = other.utcoffset()
base_compare = myoff == otoff
if base_compare:
return _cmp((self._hour, self._minute, self._second,
self._microsecond),
(other._hour, other._minute, other._second,
other._microsecond))
if myoff is None or otoff is None:
if allow_mixed:
return 2 # arbitrary non-zero value
else:
raise TypeError("cannot compare naive and aware times")
myhhmm = self._hour * 60 + self._minute - myoff//timedelta(minutes=1)
othhmm = other._hour * 60 + other._minute - otoff//timedelta(minutes=1)
return _cmp((myhhmm, self._second, self._microsecond),
(othhmm, other._second, other._microsecond))
def __hash__(self):
"""Hash."""
tzoff = self.utcoffset()
if not tzoff: # zero or None
return hash(self._getstate()[0])
h, m = divmod(timedelta(hours=self.hour, minutes=self.minute) - tzoff,
timedelta(hours=1))
assert not m % timedelta(minutes=1), "whole minute"
m //= timedelta(minutes=1)
if 0 <= h < 24:
return hash(time(h, m, self.second, self.microsecond))
return hash((h, m, self.second, self.microsecond))
# Conversion to string
def _tzstr(self, sep=":"):
"""Return formatted timezone offset (+xx:xx) or None."""
off = self.utcoffset()
if off is not None:
if off.days < 0:
sign = "-"
off = -off
else:
sign = "+"
hh, mm = divmod(off, timedelta(hours=1))
assert not mm % timedelta(minutes=1), "whole minute"
mm //= timedelta(minutes=1)
assert 0 <= hh < 24
off = "%s%02d%s%02d" % (sign, hh, sep, mm)
return off
def __repr__(self):
"""Convert to formal string, for repr()."""
if self._microsecond != 0:
s = ", %d, %d" % (self._second, self._microsecond)
elif self._second != 0:
s = ", %d" % self._second
else:
s = ""
s= "%s(%d, %d%s)" % ('datetime.' + self.__class__.__name__,
self._hour, self._minute, s)
if self._tzinfo is not None:
assert s[-1:] == ")"
s = s[:-1] + ", tzinfo=%r" % self._tzinfo + ")"
return s
def isoformat(self):
"""Return the time formatted according to ISO.
This is 'HH:MM:SS.mmmmmm+zz:zz', or 'HH:MM:SS+zz:zz' if
self.microsecond == 0.
"""
s = _format_time(self._hour, self._minute, self._second,
self._microsecond)
tz = self._tzstr()
if tz:
s += tz
return s
__str__ = isoformat
def strftime(self, fmt):
"""Format using strftime(). The date part of the timestamp passed
to underlying strftime should not be used.
"""
# The year must be >= 1000 else Python's strftime implementation
# can raise a bogus exception.
timetuple = (1900, 1, 1,
self._hour, self._minute, self._second,
0, 1, -1)
return _wrap_strftime(self, fmt, timetuple)
def __format__(self, fmt):
if len(fmt) != 0:
return self.strftime(fmt)
return str(self)
# Timezone functions
def utcoffset(self):
"""Return the timezone offset in minutes east of UTC (negative west of
UTC)."""
if self._tzinfo is None:
return None
offset = self._tzinfo.utcoffset(None)
_check_utc_offset("utcoffset", offset)
return offset
def tzname(self):
"""Return the timezone name.
Note that the name is 100% informational -- there's no requirement that
it mean anything in particular. For example, "GMT", "UTC", "-500",
"-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies.
"""
if self._tzinfo is None:
return None
name = self._tzinfo.tzname(None)
_check_tzname(name)
return name
def dst(self):
"""Return 0 if DST is not in effect, or the DST offset (in minutes
eastward) if DST is in effect.
This is purely informational; the DST offset has already been added to
the UTC offset returned by utcoffset() if applicable, so there's no
need to consult dst() unless you're interested in displaying the DST
info.
"""
if self._tzinfo is None:
return None
offset = self._tzinfo.dst(None)
_check_utc_offset("dst", offset)
return offset
def replace(self, hour=None, minute=None, second=None, microsecond=None,
tzinfo=True):
"""Return a new time with new values for the specified fields."""
if hour is None:
hour = self.hour
if minute is None:
minute = self.minute
if second is None:
second = self.second
if microsecond is None:
microsecond = self.microsecond
if tzinfo is True:
tzinfo = self.tzinfo
_check_time_fields(hour, minute, second, microsecond)
_check_tzinfo_arg(tzinfo)
return time(hour, minute, second, microsecond, tzinfo)
def __bool__(self):
if self.second or self.microsecond:
return True
offset = self.utcoffset() or timedelta(0)
return timedelta(hours=self.hour, minutes=self.minute) != offset
# Pickle support.
def _getstate(self):
us2, us3 = divmod(self._microsecond, 256)
us1, us2 = divmod(us2, 256)
basestate = bytes([self._hour, self._minute, self._second,
us1, us2, us3])
if self._tzinfo is None:
return (basestate,)
else:
return (basestate, self._tzinfo)
def __setstate(self, string, tzinfo):
if len(string) != 6 or string[0] >= 24:
raise TypeError("an integer is required")
(self._hour, self._minute, self._second,
us1, us2, us3) = string
self._microsecond = (((us1 << 8) | us2) << 8) | us3
if tzinfo is None or isinstance(tzinfo, _tzinfo_class):
self._tzinfo = tzinfo
else:
raise TypeError("bad tzinfo state arg %r" % tzinfo)
def __reduce__(self):
return (time, self._getstate())
_time_class = time # so functions w/ args named "time" can get at the class
time.min = time(0, 0, 0)
time.max = time(23, 59, 59, 999999)
time.resolution = timedelta(microseconds=1)
class datetime(date):
"""datetime(year, month, day[, hour[, minute[, second[, microsecond[,tzinfo]]]]])
The year, month and day arguments are required. tzinfo may be None, or an
instance of a tzinfo subclass. The remaining arguments may be ints.
"""
__slots__ = date.__slots__ + (
'_hour', '_minute', '_second',
'_microsecond', '_tzinfo')
def __new__(cls, year, month=None, day=None, hour=0, minute=0, second=0,
microsecond=0, tzinfo=None):
if isinstance(year, bytes) and len(year) == 10:
# Pickle support
self = date.__new__(cls, year[:4])
self.__setstate(year, month)
return self
_check_tzinfo_arg(tzinfo)
_check_time_fields(hour, minute, second, microsecond)
self = date.__new__(cls, year, month, day)
self._hour = hour
self._minute = minute
self._second = second
self._microsecond = microsecond
self._tzinfo = tzinfo
return self
# Read-only field accessors
@property
def hour(self):
"""hour (0-23)"""
return self._hour
@property
def minute(self):
"""minute (0-59)"""
return self._minute
@property
def second(self):
"""second (0-59)"""
return self._second
@property
def microsecond(self):
"""microsecond (0-999999)"""
return self._microsecond
@property
def tzinfo(self):
"""timezone info object"""
return self._tzinfo
@classmethod
def fromtimestamp(cls, t, tz=None):
"""Construct a datetime from a POSIX timestamp (like time.time()).
A timezone info object may be passed in as well.
"""
_check_tzinfo_arg(tz)
converter = _time.localtime if tz is None else _time.gmtime
t, frac = divmod(t, 1.0)
us = int(frac * 1e6)
# If timestamp is less than one microsecond smaller than a
# full second, us can be rounded up to 1000000. In this case,
# roll over to seconds, otherwise, ValueError is raised
# by the constructor.
if us == 1000000:
t += 1
us = 0
y, m, d, hh, mm, ss, weekday, jday, dst = converter(t)
ss = min(ss, 59) # clamp out leap seconds if the platform has them
result = cls(y, m, d, hh, mm, ss, us, tz)
if tz is not None:
result = tz.fromutc(result)
return result
@classmethod
def utcfromtimestamp(cls, t):
"Construct a UTC datetime from a POSIX timestamp (like time.time())."
t, frac = divmod(t, 1.0)
us = int(frac * 1e6)
# If timestamp is less than one microsecond smaller than a
# full second, us can be rounded up to 1000000. In this case,
# roll over to seconds, otherwise, ValueError is raised
# by the constructor.
if us == 1000000:
t += 1
us = 0
y, m, d, hh, mm, ss, weekday, jday, dst = _time.gmtime(t)
ss = min(ss, 59) # clamp out leap seconds if the platform has them
return cls(y, m, d, hh, mm, ss, us)
# XXX This is supposed to do better than we *can* do by using time.time(),
# XXX if the platform supports a more accurate way. The C implementation
# XXX uses gettimeofday on platforms that have it, but that isn't
# XXX available from Python. So now() may return different results
# XXX across the implementations.
@classmethod
def now(cls, tz=None):
"Construct a datetime from time.time() and optional time zone info."
t = _time.time()
return cls.fromtimestamp(t, tz)
@classmethod
def utcnow(cls):
"Construct a UTC datetime from time.time()."
t = _time.time()
return cls.utcfromtimestamp(t)
@classmethod
def combine(cls, date, time):
"Construct a datetime from a given date and a given time."
if not isinstance(date, _date_class):
raise TypeError("date argument must be a date instance")
if not isinstance(time, _time_class):
raise TypeError("time argument must be a time instance")
return cls(date.year, date.month, date.day,
time.hour, time.minute, time.second, time.microsecond,
time.tzinfo)
def timetuple(self):
"Return local time tuple compatible with time.localtime()."
dst = self.dst()
if dst is None:
dst = -1
elif dst:
dst = 1
else:
dst = 0
return _build_struct_time(self.year, self.month, self.day,
self.hour, self.minute, self.second,
dst)
def timestamp(self):
"Return POSIX timestamp as float"
if self._tzinfo is None:
return _time.mktime((self.year, self.month, self.day,
self.hour, self.minute, self.second,
-1, -1, -1)) + self.microsecond / 1e6
else:
return (self - _EPOCH).total_seconds()
def utctimetuple(self):
"Return UTC time tuple compatible with time.gmtime()."
offset = self.utcoffset()
if offset:
self -= offset
y, m, d = self.year, self.month, self.day
hh, mm, ss = self.hour, self.minute, self.second
return _build_struct_time(y, m, d, hh, mm, ss, 0)
def date(self):
"Return the date part."
return date(self._year, self._month, self._day)
def time(self):
"Return the time part, with tzinfo None."
return time(self.hour, self.minute, self.second, self.microsecond)
def timetz(self):
"Return the time part, with same tzinfo."
return time(self.hour, self.minute, self.second, self.microsecond,
self._tzinfo)
def replace(self, year=None, month=None, day=None, hour=None,
minute=None, second=None, microsecond=None, tzinfo=True):
"""Return a new datetime with new values for the specified fields."""
if year is None:
year = self.year
if month is None:
month = self.month
if day is None:
day = self.day
if hour is None:
hour = self.hour
if minute is None:
minute = self.minute
if second is None:
second = self.second
if microsecond is None:
microsecond = self.microsecond
if tzinfo is True:
tzinfo = self.tzinfo
_check_date_fields(year, month, day)
_check_time_fields(hour, minute, second, microsecond)
_check_tzinfo_arg(tzinfo)
return datetime(year, month, day, hour, minute, second,
microsecond, tzinfo)
def astimezone(self, tz=None):
if tz is None:
if self.tzinfo is None:
raise ValueError("astimezone() requires an aware datetime")
ts = (self - _EPOCH) // timedelta(seconds=1)
localtm = _time.localtime(ts)
local = datetime(*localtm[:6])
try:
# Extract TZ data if available
gmtoff = localtm.tm_gmtoff
zone = localtm.tm_zone
except AttributeError:
# Compute UTC offset and compare with the value implied
# by tm_isdst. If the values match, use the zone name
# implied by tm_isdst.
delta = local - datetime(*_time.gmtime(ts)[:6])
dst = _time.daylight and localtm.tm_isdst > 0
gmtoff = -(_time.altzone if dst else _time.timezone)
if delta == timedelta(seconds=gmtoff):
tz = timezone(delta, _time.tzname[dst])
else:
tz = timezone(delta)
else:
tz = timezone(timedelta(seconds=gmtoff), zone)
elif not isinstance(tz, tzinfo):
raise TypeError("tz argument must be an instance of tzinfo")
mytz = self.tzinfo
if mytz is None:
raise ValueError("astimezone() requires an aware datetime")
if tz is mytz:
return self
# Convert self to UTC, and attach the new time zone object.
myoffset = self.utcoffset()
if myoffset is None:
raise ValueError("astimezone() requires an aware datetime")
utc = (self - myoffset).replace(tzinfo=tz)
# Convert from UTC to tz's local time.
return tz.fromutc(utc)
# Ways to produce a string.
def ctime(self):
"Return ctime() style string."
weekday = self.toordinal() % 7 or 7
return "%s %s %2d %02d:%02d:%02d %04d" % (
_DAYNAMES[weekday],
_MONTHNAMES[self._month],
self._day,
self._hour, self._minute, self._second,
self._year)
def isoformat(self, sep='T'):
"""Return the time formatted according to ISO.
This is 'YYYY-MM-DD HH:MM:SS.mmmmmm', or 'YYYY-MM-DD HH:MM:SS' if
self.microsecond == 0.
If self.tzinfo is not None, the UTC offset is also attached, giving
'YYYY-MM-DD HH:MM:SS.mmmmmm+HH:MM' or 'YYYY-MM-DD HH:MM:SS+HH:MM'.
Optional argument sep specifies the separator between date and
time, default 'T'.
"""
s = ("%04d-%02d-%02d%c" % (self._year, self._month, self._day,
sep) +
_format_time(self._hour, self._minute, self._second,
self._microsecond))
off = self.utcoffset()
if off is not None:
if off.days < 0:
sign = "-"
off = -off
else:
sign = "+"
hh, mm = divmod(off, timedelta(hours=1))
assert not mm % timedelta(minutes=1), "whole minute"
mm //= timedelta(minutes=1)
s += "%s%02d:%02d" % (sign, hh, mm)
return s
def __repr__(self):
"""Convert to formal string, for repr()."""
L = [self._year, self._month, self._day, # These are never zero
self._hour, self._minute, self._second, self._microsecond]
if L[-1] == 0:
del L[-1]
if L[-1] == 0:
del L[-1]
s = ", ".join(map(str, L))
s = "%s(%s)" % ('datetime.' + self.__class__.__name__, s)
if self._tzinfo is not None:
assert s[-1:] == ")"
s = s[:-1] + ", tzinfo=%r" % self._tzinfo + ")"
return s
def __str__(self):
"Convert to string, for str()."
return self.isoformat(sep=' ')
@classmethod
def strptime(cls, date_string, format):
'string, format -> new datetime parsed from a string (like time.strptime()).'
import _strptime
return _strptime._strptime_datetime(cls, date_string, format)
def utcoffset(self):
"""Return the timezone offset in minutes east of UTC (negative west of
UTC)."""
if self._tzinfo is None:
return None
offset = self._tzinfo.utcoffset(self)
_check_utc_offset("utcoffset", offset)
return offset
def tzname(self):
"""Return the timezone name.
Note that the name is 100% informational -- there's no requirement that
it mean anything in particular. For example, "GMT", "UTC", "-500",
"-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies.
"""
name = _call_tzinfo_method(self._tzinfo, "tzname", self)
_check_tzname(name)
return name
def dst(self):
"""Return 0 if DST is not in effect, or the DST offset (in minutes
eastward) if DST is in effect.
This is purely informational; the DST offset has already been added to
the UTC offset returned by utcoffset() if applicable, so there's no
need to consult dst() unless you're interested in displaying the DST
info.
"""
if self._tzinfo is None:
return None
offset = self._tzinfo.dst(self)
_check_utc_offset("dst", offset)
return offset
# Comparisons of datetime objects with other.
def __eq__(self, other):
if isinstance(other, datetime):
return self._cmp(other, allow_mixed=True) == 0
elif not isinstance(other, date):
return NotImplemented
else:
return False
def __ne__(self, other):
if isinstance(other, datetime):
return self._cmp(other, allow_mixed=True) != 0
elif not isinstance(other, date):
return NotImplemented
else:
return True
def __le__(self, other):
if isinstance(other, datetime):
return self._cmp(other) <= 0
elif not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def __lt__(self, other):
if isinstance(other, datetime):
return self._cmp(other) < 0
elif not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def __ge__(self, other):
if isinstance(other, datetime):
return self._cmp(other) >= 0
elif not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def __gt__(self, other):
if isinstance(other, datetime):
return self._cmp(other) > 0
elif not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def _cmp(self, other, allow_mixed=False):
assert isinstance(other, datetime)
mytz = self._tzinfo
ottz = other._tzinfo
myoff = otoff = None
if mytz is ottz:
base_compare = True
else:
myoff = self.utcoffset()
otoff = other.utcoffset()
base_compare = myoff == otoff
if base_compare:
return _cmp((self._year, self._month, self._day,
self._hour, self._minute, self._second,
self._microsecond),
(other._year, other._month, other._day,
other._hour, other._minute, other._second,
other._microsecond))
if myoff is None or otoff is None:
if allow_mixed:
return 2 # arbitrary non-zero value
else:
raise TypeError("cannot compare naive and aware datetimes")
# XXX What follows could be done more efficiently...
diff = self - other # this will take offsets into account
if diff.days < 0:
return -1
return diff and 1 or 0
def __add__(self, other):
"Add a datetime and a timedelta."
if not isinstance(other, timedelta):
return NotImplemented
delta = timedelta(self.toordinal(),
hours=self._hour,
minutes=self._minute,
seconds=self._second,
microseconds=self._microsecond)
delta += other
hour, rem = divmod(delta.seconds, 3600)
minute, second = divmod(rem, 60)
if 0 < delta.days <= _MAXORDINAL:
return datetime.combine(date.fromordinal(delta.days),
time(hour, minute, second,
delta.microseconds,
tzinfo=self._tzinfo))
raise OverflowError("result out of range")
__radd__ = __add__
def __sub__(self, other):
"Subtract two datetimes, or a datetime and a timedelta."
if not isinstance(other, datetime):
if isinstance(other, timedelta):
return self + -other
return NotImplemented
days1 = self.toordinal()
days2 = other.toordinal()
secs1 = self._second + self._minute * 60 + self._hour * 3600
secs2 = other._second + other._minute * 60 + other._hour * 3600
base = timedelta(days1 - days2,
secs1 - secs2,
self._microsecond - other._microsecond)
if self._tzinfo is other._tzinfo:
return base
myoff = self.utcoffset()
otoff = other.utcoffset()
if myoff == otoff:
return base
if myoff is None or otoff is None:
raise TypeError("cannot mix naive and timezone-aware time")
return base + otoff - myoff
def __hash__(self):
tzoff = self.utcoffset()
if tzoff is None:
return hash(self._getstate()[0])
days = _ymd2ord(self.year, self.month, self.day)
seconds = self.hour * 3600 + self.minute * 60 + self.second
return hash(timedelta(days, seconds, self.microsecond) - tzoff)
# Pickle support.
def _getstate(self):
yhi, ylo = divmod(self._year, 256)
us2, us3 = divmod(self._microsecond, 256)
us1, us2 = divmod(us2, 256)
basestate = bytes([yhi, ylo, self._month, self._day,
self._hour, self._minute, self._second,
us1, us2, us3])
if self._tzinfo is None:
return (basestate,)
else:
return (basestate, self._tzinfo)
def __setstate(self, string, tzinfo):
(yhi, ylo, self._month, self._day, self._hour,
self._minute, self._second, us1, us2, us3) = string
self._year = yhi * 256 + ylo
self._microsecond = (((us1 << 8) | us2) << 8) | us3
if tzinfo is None or isinstance(tzinfo, _tzinfo_class):
self._tzinfo = tzinfo
else:
raise TypeError("bad tzinfo state arg %r" % tzinfo)
def __reduce__(self):
return (self.__class__, self._getstate())
datetime.min = datetime(1, 1, 1)
datetime.max = datetime(9999, 12, 31, 23, 59, 59, 999999)
datetime.resolution = timedelta(microseconds=1)
def _isoweek1monday(year):
# Helper to calculate the day number of the Monday starting week 1
# XXX This could be done more efficiently
THURSDAY = 3
firstday = _ymd2ord(year, 1, 1)
firstweekday = (firstday + 6) % 7 # See weekday() above
week1monday = firstday - firstweekday
if firstweekday > THURSDAY:
week1monday += 7
return week1monday
class timezone(tzinfo):
__slots__ = '_offset', '_name'
# Sentinel value to disallow None
_Omitted = object()
def __new__(cls, offset, name=_Omitted):
if not isinstance(offset, timedelta):
raise TypeError("offset must be a timedelta")
if name is cls._Omitted:
if not offset:
return cls.utc
name = None
elif not isinstance(name, str):
raise TypeError("name must be a string")
if not cls._minoffset <= offset <= cls._maxoffset:
raise ValueError("offset must be a timedelta"
" strictly between -timedelta(hours=24) and"
" timedelta(hours=24).")
if (offset.microseconds != 0 or
offset.seconds % 60 != 0):
raise ValueError("offset must be a timedelta"
" representing a whole number of minutes")
return cls._create(offset, name)
@classmethod
def _create(cls, offset, name=None):
self = tzinfo.__new__(cls)
self._offset = offset
self._name = name
return self
def __getinitargs__(self):
"""pickle support"""
if self._name is None:
return (self._offset,)
return (self._offset, self._name)
def __eq__(self, other):
if type(other) != timezone:
return False
return self._offset == other._offset
def __hash__(self):
return hash(self._offset)
def __repr__(self):
"""Convert to formal string, for repr().
>>> tz = timezone.utc
>>> repr(tz)
'datetime.timezone.utc'
>>> tz = timezone(timedelta(hours=-5), 'EST')
>>> repr(tz)
"datetime.timezone(datetime.timedelta(-1, 68400), 'EST')"
"""
if self is self.utc:
return 'datetime.timezone.utc'
if self._name is None:
return "%s(%r)" % ('datetime.' + self.__class__.__name__,
self._offset)
return "%s(%r, %r)" % ('datetime.' + self.__class__.__name__,
self._offset, self._name)
def __str__(self):
return self.tzname(None)
def utcoffset(self, dt):
if isinstance(dt, datetime) or dt is None:
return self._offset
raise TypeError("utcoffset() argument must be a datetime instance"
" or None")
def tzname(self, dt):
if isinstance(dt, datetime) or dt is None:
if self._name is None:
return self._name_from_offset(self._offset)
return self._name
raise TypeError("tzname() argument must be a datetime instance"
" or None")
def dst(self, dt):
if isinstance(dt, datetime) or dt is None:
return None
raise TypeError("dst() argument must be a datetime instance"
" or None")
def fromutc(self, dt):
if isinstance(dt, datetime):
if dt.tzinfo is not self:
raise ValueError("fromutc: dt.tzinfo "
"is not self")
return dt + self._offset
raise TypeError("fromutc() argument must be a datetime instance"
" or None")
_maxoffset = timedelta(hours=23, minutes=59)
_minoffset = -_maxoffset
@staticmethod
def _name_from_offset(delta):
if delta < timedelta(0):
sign = '-'
delta = -delta
else:
sign = '+'
hours, rest = divmod(delta, timedelta(hours=1))
minutes = rest // timedelta(minutes=1)
return 'UTC{}{:02d}:{:02d}'.format(sign, hours, minutes)
timezone.utc = timezone._create(timedelta(0))
timezone.min = timezone._create(timezone._minoffset)
timezone.max = timezone._create(timezone._maxoffset)
_EPOCH = datetime(1970, 1, 1, tzinfo=timezone.utc)
"""
Some time zone algebra. For a datetime x, let
x.n = x stripped of its timezone -- its naive time.
x.o = x.utcoffset(), and assuming that doesn't raise an exception or
return None
x.d = x.dst(), and assuming that doesn't raise an exception or
return None
x.s = x's standard offset, x.o - x.d
Now some derived rules, where k is a duration (timedelta).
1. x.o = x.s + x.d
This follows from the definition of x.s.
2. If x and y have the same tzinfo member, x.s = y.s.
This is actually a requirement, an assumption we need to make about
sane tzinfo classes.
3. The naive UTC time corresponding to x is x.n - x.o.
This is again a requirement for a sane tzinfo class.
4. (x+k).s = x.s
This follows from #2, and that datimetimetz+timedelta preserves tzinfo.
5. (x+k).n = x.n + k
Again follows from how arithmetic is defined.
Now we can explain tz.fromutc(x). Let's assume it's an interesting case
(meaning that the various tzinfo methods exist, and don't blow up or return
None when called).
The function wants to return a datetime y with timezone tz, equivalent to x.
x is already in UTC.
By #3, we want
y.n - y.o = x.n [1]
The algorithm starts by attaching tz to x.n, and calling that y. So
x.n = y.n at the start. Then it wants to add a duration k to y, so that [1]
becomes true; in effect, we want to solve [2] for k:
(y+k).n - (y+k).o = x.n [2]
By #1, this is the same as
(y+k).n - ((y+k).s + (y+k).d) = x.n [3]
By #5, (y+k).n = y.n + k, which equals x.n + k because x.n=y.n at the start.
Substituting that into [3],
x.n + k - (y+k).s - (y+k).d = x.n; the x.n terms cancel, leaving
k - (y+k).s - (y+k).d = 0; rearranging,
k = (y+k).s - (y+k).d; by #4, (y+k).s == y.s, so
k = y.s - (y+k).d
On the RHS, (y+k).d can't be computed directly, but y.s can be, and we
approximate k by ignoring the (y+k).d term at first. Note that k can't be
very large, since all offset-returning methods return a duration of magnitude
less than 24 hours. For that reason, if y is firmly in std time, (y+k).d must
be 0, so ignoring it has no consequence then.
In any case, the new value is
z = y + y.s [4]
It's helpful to step back at look at [4] from a higher level: it's simply
mapping from UTC to tz's standard time.
At this point, if
z.n - z.o = x.n [5]
we have an equivalent time, and are almost done. The insecurity here is
at the start of daylight time. Picture US Eastern for concreteness. The wall
time jumps from 1:59 to 3:00, and wall hours of the form 2:MM don't make good
sense then. The docs ask that an Eastern tzinfo class consider such a time to
be EDT (because it's "after 2"), which is a redundant spelling of 1:MM EST
on the day DST starts. We want to return the 1:MM EST spelling because that's
the only spelling that makes sense on the local wall clock.
In fact, if [5] holds at this point, we do have the standard-time spelling,
but that takes a bit of proof. We first prove a stronger result. What's the
difference between the LHS and RHS of [5]? Let
diff = x.n - (z.n - z.o) [6]
Now
z.n = by [4]
(y + y.s).n = by #5
y.n + y.s = since y.n = x.n
x.n + y.s = since z and y are have the same tzinfo member,
y.s = z.s by #2
x.n + z.s
Plugging that back into [6] gives
diff =
x.n - ((x.n + z.s) - z.o) = expanding
x.n - x.n - z.s + z.o = cancelling
- z.s + z.o = by #2
z.d
So diff = z.d.
If [5] is true now, diff = 0, so z.d = 0 too, and we have the standard-time
spelling we wanted in the endcase described above. We're done. Contrarily,
if z.d = 0, then we have a UTC equivalent, and are also done.
If [5] is not true now, diff = z.d != 0, and z.d is the offset we need to
add to z (in effect, z is in tz's standard time, and we need to shift the
local clock into tz's daylight time).
Let
z' = z + z.d = z + diff [7]
and we can again ask whether
z'.n - z'.o = x.n [8]
If so, we're done. If not, the tzinfo class is insane, according to the
assumptions we've made. This also requires a bit of proof. As before, let's
compute the difference between the LHS and RHS of [8] (and skipping some of
the justifications for the kinds of substitutions we've done several times
already):
diff' = x.n - (z'.n - z'.o) = replacing z'.n via [7]
x.n - (z.n + diff - z'.o) = replacing diff via [6]
x.n - (z.n + x.n - (z.n - z.o) - z'.o) =
x.n - z.n - x.n + z.n - z.o + z'.o = cancel x.n
- z.n + z.n - z.o + z'.o = cancel z.n
- z.o + z'.o = #1 twice
-z.s - z.d + z'.s + z'.d = z and z' have same tzinfo
z'.d - z.d
So z' is UTC-equivalent to x iff z'.d = z.d at this point. If they are equal,
we've found the UTC-equivalent so are done. In fact, we stop with [7] and
return z', not bothering to compute z'.d.
How could z.d and z'd differ? z' = z + z.d [7], so merely moving z' by
a dst() offset, and starting *from* a time already in DST (we know z.d != 0),
would have to change the result dst() returns: we start in DST, and moving
a little further into it takes us out of DST.
There isn't a sane case where this can happen. The closest it gets is at
the end of DST, where there's an hour in UTC with no spelling in a hybrid
tzinfo class. In US Eastern, that's 5:MM UTC = 0:MM EST = 1:MM EDT. During
that hour, on an Eastern clock 1:MM is taken as being in standard time (6:MM
UTC) because the docs insist on that, but 0:MM is taken as being in daylight
time (4:MM UTC). There is no local time mapping to 5:MM UTC. The local
clock jumps from 1:59 back to 1:00 again, and repeats the 1:MM hour in
standard time. Since that's what the local clock *does*, we want to map both
UTC hours 5:MM and 6:MM to 1:MM Eastern. The result is ambiguous
in local time, but so it goes -- it's the way the local clock works.
When x = 5:MM UTC is the input to this algorithm, x.o=0, y.o=-5 and y.d=0,
so z=0:MM. z.d=60 (minutes) then, so [5] doesn't hold and we keep going.
z' = z + z.d = 1:MM then, and z'.d=0, and z'.d - z.d = -60 != 0 so [8]
(correctly) concludes that z' is not UTC-equivalent to x.
Because we know z.d said z was in daylight time (else [5] would have held and
we would have stopped then), and we know z.d != z'.d (else [8] would have held
and we have stopped then), and there are only 2 possible values dst() can
return in Eastern, it follows that z'.d must be 0 (which it is in the example,
but the reasoning doesn't depend on the example -- it depends on there being
two possible dst() outcomes, one zero and the other non-zero). Therefore
z' must be in standard time, and is the spelling we want in this case.
Note again that z' is not UTC-equivalent as far as the hybrid tzinfo class is
concerned (because it takes z' as being in standard time rather than the
daylight time we intend here), but returning it gives the real-life "local
clock repeats an hour" behavior when mapping the "unspellable" UTC hour into
tz.
When the input is 6:MM, z=1:MM and z.d=0, and we stop at once, again with
the 1:MM standard time spelling we want.
So how can this break? One of the assumptions must be violated. Two
possibilities:
1) [2] effectively says that y.s is invariant across all y belong to a given
time zone. This isn't true if, for political reasons or continental drift,
a region decides to change its base offset from UTC.
2) There may be versions of "double daylight" time where the tail end of
the analysis gives up a step too early. I haven't thought about that
enough to say.
In any case, it's clear that the default fromutc() is strong enough to handle
"almost all" time zones: so long as the standard offset is invariant, it
doesn't matter if daylight time transition points change from year to year, or
if daylight time is skipped in some years; it doesn't matter how large or
small dst() may get within its bounds; and it doesn't even matter if some
perverse time zone returns a negative dst()). So a breaking case must be
pretty bizarre, and a tzinfo subclass can override fromutc() if it is.
"""
#brython does not have a _datetime, so lets comment this out for now.
#try:
# from _datetime import *
#except ImportError:
# pass
#else:
# # Clean up unused names
# del (_DAYNAMES, _DAYS_BEFORE_MONTH, _DAYS_IN_MONTH,
# _DI100Y, _DI400Y, _DI4Y, _MAXORDINAL, _MONTHNAMES,
# _build_struct_time, _call_tzinfo_method, _check_date_fields,
# _check_time_fields, _check_tzinfo_arg, _check_tzname,
# _check_utc_offset, _cmp, _cmperror, _date_class, _days_before_month,
# _days_before_year, _days_in_month, _format_time, _is_leap,
# _isoweek1monday, _math, _ord2ymd, _time, _time_class, _tzinfo_class,
# _wrap_strftime, _ymd2ord)
# # XXX Since import * above excludes names that start with _,
# # docstring does not get overwritten. In the future, it may be
# # appropriate to maintain a single module level docstring and
# # remove the following line.
# #from _datetime import __doc__
| agpl-3.0 |
jyotikamboj/container | dj-tests/admin_checks/tests.py | 4 | 20485 | from __future__ import unicode_literals
from django import forms
from django.contrib import admin
from django.contrib.contenttypes.admin import GenericStackedInline
from django.core import checks
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase, ignore_warnings, override_settings
from .models import Song, Book, Album, TwoAlbumFKAndAnE, City, State, Influence
class SongForm(forms.ModelForm):
pass
class ValidFields(admin.ModelAdmin):
form = SongForm
fields = ['title']
class ValidFormFieldsets(admin.ModelAdmin):
def get_form(self, request, obj=None, **kwargs):
class ExtraFieldForm(SongForm):
name = forms.CharField(max_length=50)
return ExtraFieldForm
fieldsets = (
(None, {
'fields': ('name',),
}),
)
class MyAdmin(admin.ModelAdmin):
@classmethod
def check(cls, model, **kwargs):
return ['error!']
@override_settings(
SILENCED_SYSTEM_CHECKS=['fields.W342'], # ForeignKey(unique=True)
INSTALLED_APPS=['django.contrib.auth', 'django.contrib.contenttypes', 'admin_checks']
)
class SystemChecksTestCase(TestCase):
@override_settings(DEBUG=True)
def test_checks_are_performed(self):
admin.site.register(Song, MyAdmin)
try:
errors = checks.run_checks()
expected = ['error!']
self.assertEqual(errors, expected)
finally:
admin.site.unregister(Song)
admin.sites.system_check_errors = []
@override_settings(DEBUG=True)
def test_custom_adminsite(self):
class CustomAdminSite(admin.AdminSite):
pass
custom_site = CustomAdminSite()
custom_site.register(Song, MyAdmin)
try:
errors = checks.run_checks()
expected = ['error!']
self.assertEqual(errors, expected)
finally:
custom_site.unregister(Song)
admin.sites.system_check_errors = []
def test_readonly_and_editable(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ["original_release"]
list_display = ["pk", "original_release"]
list_editable = ["original_release"]
fieldsets = [
(None, {
"fields": ["title", "original_release"],
}),
]
errors = SongAdmin.check(model=Song)
expected = [
checks.Error(
("The value of 'list_editable[0]' refers to 'original_release', "
"which is not editable through the admin."),
hint=None,
obj=SongAdmin,
id='admin.E125',
)
]
self.assertEqual(errors, expected)
def test_editable(self):
class SongAdmin(admin.ModelAdmin):
list_display = ["pk", "title"]
list_editable = ["title"]
fieldsets = [
(None, {
"fields": ["title", "original_release"],
}),
]
errors = SongAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_custom_modelforms_with_fields_fieldsets(self):
"""
# Regression test for #8027: custom ModelForms with fields/fieldsets
"""
errors = ValidFields.check(model=Song)
self.assertEqual(errors, [])
def test_custom_get_form_with_fieldsets(self):
"""
Ensure that the fieldsets checks are skipped when the ModelAdmin.get_form() method
is overridden.
Refs #19445.
"""
errors = ValidFormFieldsets.check(model=Song)
self.assertEqual(errors, [])
def test_exclude_values(self):
"""
Tests for basic system checks of 'exclude' option values (#12689)
"""
class ExcludedFields1(admin.ModelAdmin):
exclude = 'foo'
errors = ExcludedFields1.check(model=Book)
expected = [
checks.Error(
"The value of 'exclude' must be a list or tuple.",
hint=None,
obj=ExcludedFields1,
id='admin.E014',
)
]
self.assertEqual(errors, expected)
def test_exclude_duplicate_values(self):
class ExcludedFields2(admin.ModelAdmin):
exclude = ('name', 'name')
errors = ExcludedFields2.check(model=Book)
expected = [
checks.Error(
"The value of 'exclude' contains duplicate field(s).",
hint=None,
obj=ExcludedFields2,
id='admin.E015',
)
]
self.assertEqual(errors, expected)
def test_exclude_in_inline(self):
class ExcludedFieldsInline(admin.TabularInline):
model = Song
exclude = 'foo'
class ExcludedFieldsAlbumAdmin(admin.ModelAdmin):
model = Album
inlines = [ExcludedFieldsInline]
errors = ExcludedFieldsAlbumAdmin.check(model=Album)
expected = [
checks.Error(
"The value of 'exclude' must be a list or tuple.",
hint=None,
obj=ExcludedFieldsInline,
id='admin.E014',
)
]
self.assertEqual(errors, expected)
def test_exclude_inline_model_admin(self):
"""
Regression test for #9932 - exclude in InlineModelAdmin should not
contain the ForeignKey field used in ModelAdmin.model
"""
class SongInline(admin.StackedInline):
model = Song
exclude = ['album']
class AlbumAdmin(admin.ModelAdmin):
model = Album
inlines = [SongInline]
errors = AlbumAdmin.check(model=Album)
expected = [
checks.Error(
("Cannot exclude the field 'album', because it is the foreign key "
"to the parent model 'admin_checks.Album'."),
hint=None,
obj=SongInline,
id='admin.E201',
)
]
self.assertEqual(errors, expected)
def test_valid_generic_inline_model_admin(self):
"""
Regression test for #22034 - check that generic inlines don't look for
normal ForeignKey relations.
"""
class InfluenceInline(GenericStackedInline):
model = Influence
class SongAdmin(admin.ModelAdmin):
inlines = [InfluenceInline]
errors = SongAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_generic_inline_model_admin_non_generic_model(self):
"""
Ensure that a model without a GenericForeignKey raises problems if it's included
in an GenericInlineModelAdmin definition.
"""
class BookInline(GenericStackedInline):
model = Book
class SongAdmin(admin.ModelAdmin):
inlines = [BookInline]
errors = SongAdmin.check(model=Song)
expected = [
checks.Error(
"'admin_checks.Book' has no GenericForeignKey.",
hint=None,
obj=BookInline,
id='admin.E301',
)
]
self.assertEqual(errors, expected)
def test_generic_inline_model_admin_bad_ct_field(self):
"A GenericInlineModelAdmin raises problems if the ct_field points to a non-existent field."
class InfluenceInline(GenericStackedInline):
model = Influence
ct_field = 'nonexistent'
class SongAdmin(admin.ModelAdmin):
inlines = [InfluenceInline]
errors = SongAdmin.check(model=Song)
expected = [
checks.Error(
"'ct_field' references 'nonexistent', which is not a field on 'admin_checks.Influence'.",
hint=None,
obj=InfluenceInline,
id='admin.E302',
)
]
self.assertEqual(errors, expected)
def test_generic_inline_model_admin_bad_fk_field(self):
"A GenericInlineModelAdmin raises problems if the ct_fk_field points to a non-existent field."
class InfluenceInline(GenericStackedInline):
model = Influence
ct_fk_field = 'nonexistent'
class SongAdmin(admin.ModelAdmin):
inlines = [InfluenceInline]
errors = SongAdmin.check(model=Song)
expected = [
checks.Error(
"'ct_fk_field' references 'nonexistent', which is not a field on 'admin_checks.Influence'.",
hint=None,
obj=InfluenceInline,
id='admin.E303',
)
]
self.assertEqual(errors, expected)
def test_generic_inline_model_admin_non_gfk_ct_field(self):
"A GenericInlineModelAdmin raises problems if the ct_field points to a field that isn't part of a GenericForeignKey"
class InfluenceInline(GenericStackedInline):
model = Influence
ct_field = 'name'
class SongAdmin(admin.ModelAdmin):
inlines = [InfluenceInline]
errors = SongAdmin.check(model=Song)
expected = [
checks.Error(
"'admin_checks.Influence' has no GenericForeignKey using content type field 'name' and object ID field 'object_id'.",
hint=None,
obj=InfluenceInline,
id='admin.E304',
)
]
self.assertEqual(errors, expected)
def test_generic_inline_model_admin_non_gfk_fk_field(self):
"A GenericInlineModelAdmin raises problems if the ct_fk_field points to a field that isn't part of a GenericForeignKey"
class InfluenceInline(GenericStackedInline):
model = Influence
ct_fk_field = 'name'
class SongAdmin(admin.ModelAdmin):
inlines = [InfluenceInline]
errors = SongAdmin.check(model=Song)
expected = [
checks.Error(
"'admin_checks.Influence' has no GenericForeignKey using content type field 'content_type' and object ID field 'name'.",
hint=None,
obj=InfluenceInline,
id='admin.E304',
)
]
self.assertEqual(errors, expected)
def test_app_label_in_admin_checks(self):
"""
Regression test for #15669 - Include app label in admin system check messages
"""
class RawIdNonexistingAdmin(admin.ModelAdmin):
raw_id_fields = ('nonexisting',)
errors = RawIdNonexistingAdmin.check(model=Album)
expected = [
checks.Error(
("The value of 'raw_id_fields[0]' refers to 'nonexisting', which is "
"not an attribute of 'admin_checks.Album'."),
hint=None,
obj=RawIdNonexistingAdmin,
id='admin.E002',
)
]
self.assertEqual(errors, expected)
def test_fk_exclusion(self):
"""
Regression test for #11709 - when testing for fk excluding (when exclude is
given) make sure fk_name is honored or things blow up when there is more
than one fk to the parent model.
"""
class TwoAlbumFKAndAnEInline(admin.TabularInline):
model = TwoAlbumFKAndAnE
exclude = ("e",)
fk_name = "album1"
class MyAdmin(admin.ModelAdmin):
inlines = [TwoAlbumFKAndAnEInline]
errors = MyAdmin.check(model=Album)
self.assertEqual(errors, [])
def test_inline_self_check(self):
class TwoAlbumFKAndAnEInline(admin.TabularInline):
model = TwoAlbumFKAndAnE
class MyAdmin(admin.ModelAdmin):
inlines = [TwoAlbumFKAndAnEInline]
errors = MyAdmin.check(model=Album)
expected = [
checks.Error(
"'admin_checks.TwoAlbumFKAndAnE' has more than one ForeignKey to 'admin_checks.Album'.",
hint=None,
obj=TwoAlbumFKAndAnEInline,
id='admin.E202',
)
]
self.assertEqual(errors, expected)
def test_inline_with_specified(self):
class TwoAlbumFKAndAnEInline(admin.TabularInline):
model = TwoAlbumFKAndAnE
fk_name = "album1"
class MyAdmin(admin.ModelAdmin):
inlines = [TwoAlbumFKAndAnEInline]
errors = MyAdmin.check(model=Album)
self.assertEqual(errors, [])
def test_readonly(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("title",)
errors = SongAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_readonly_on_method(self):
def my_function(obj):
pass
class SongAdmin(admin.ModelAdmin):
readonly_fields = (my_function,)
errors = SongAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_readonly_on_modeladmin(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("readonly_method_on_modeladmin",)
def readonly_method_on_modeladmin(self, obj):
pass
errors = SongAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_readonly_method_on_model(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("readonly_method_on_model",)
errors = SongAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_nonexistent_field(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("title", "nonexistent")
errors = SongAdmin.check(model=Song)
expected = [
checks.Error(
("The value of 'readonly_fields[1]' is not a callable, an attribute "
"of 'SongAdmin', or an attribute of 'admin_checks.Song'."),
hint=None,
obj=SongAdmin,
id='admin.E035',
)
]
self.assertEqual(errors, expected)
def test_nonexistent_field_on_inline(self):
class CityInline(admin.TabularInline):
model = City
readonly_fields = ['i_dont_exist'] # Missing attribute
errors = CityInline.check(State)
expected = [
checks.Error(
("The value of 'readonly_fields[0]' is not a callable, an attribute "
"of 'CityInline', or an attribute of 'admin_checks.City'."),
hint=None,
obj=CityInline,
id='admin.E035',
)
]
self.assertEqual(errors, expected)
def test_extra(self):
class SongAdmin(admin.ModelAdmin):
def awesome_song(self, instance):
if instance.title == "Born to Run":
return "Best Ever!"
return "Status unknown."
errors = SongAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_readonly_lambda(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = (lambda obj: "test",)
errors = SongAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_graceful_m2m_fail(self):
"""
Regression test for #12203/#12237 - Fail more gracefully when a M2M field that
specifies the 'through' option is included in the 'fields' or the 'fieldsets'
ModelAdmin options.
"""
class BookAdmin(admin.ModelAdmin):
fields = ['authors']
errors = BookAdmin.check(model=Book)
expected = [
checks.Error(
("The value of 'fields' cannot include the ManyToManyField 'authors', "
"because that field manually specifies a relationship model."),
hint=None,
obj=BookAdmin,
id='admin.E013',
)
]
self.assertEqual(errors, expected)
def test_cannot_include_through(self):
class FieldsetBookAdmin(admin.ModelAdmin):
fieldsets = (
('Header 1', {'fields': ('name',)}),
('Header 2', {'fields': ('authors',)}),
)
errors = FieldsetBookAdmin.check(model=Book)
expected = [
checks.Error(
("The value of 'fieldsets[1][1][\"fields\"]' cannot include the ManyToManyField "
"'authors', because that field manually specifies a relationship model."),
hint=None,
obj=FieldsetBookAdmin,
id='admin.E013',
)
]
self.assertEqual(errors, expected)
def test_nested_fields(self):
class NestedFieldsAdmin(admin.ModelAdmin):
fields = ('price', ('name', 'subtitle'))
errors = NestedFieldsAdmin.check(model=Book)
self.assertEqual(errors, [])
def test_nested_fieldsets(self):
class NestedFieldsetAdmin(admin.ModelAdmin):
fieldsets = (
('Main', {'fields': ('price', ('name', 'subtitle'))}),
)
errors = NestedFieldsetAdmin.check(model=Book)
self.assertEqual(errors, [])
def test_explicit_through_override(self):
"""
Regression test for #12209 -- If the explicitly provided through model
is specified as a string, the admin should still be able use
Model.m2m_field.through
"""
class AuthorsInline(admin.TabularInline):
model = Book.authors.through
class BookAdmin(admin.ModelAdmin):
inlines = [AuthorsInline]
errors = BookAdmin.check(model=Book)
self.assertEqual(errors, [])
def test_non_model_fields(self):
"""
Regression for ensuring ModelAdmin.fields can contain non-model fields
that broke with r11737
"""
class SongForm(forms.ModelForm):
extra_data = forms.CharField()
class FieldsOnFormOnlyAdmin(admin.ModelAdmin):
form = SongForm
fields = ['title', 'extra_data']
errors = FieldsOnFormOnlyAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_non_model_first_field(self):
"""
Regression for ensuring ModelAdmin.field can handle first elem being a
non-model field (test fix for UnboundLocalError introduced with r16225).
"""
class SongForm(forms.ModelForm):
extra_data = forms.CharField()
class Meta:
model = Song
fields = '__all__'
class FieldsOnFormOnlyAdmin(admin.ModelAdmin):
form = SongForm
fields = ['extra_data', 'title']
errors = FieldsOnFormOnlyAdmin.check(model=Song)
self.assertEqual(errors, [])
@ignore_warnings(module='django.contrib.admin.options')
def test_validator_compatibility(self):
class MyValidator(object):
def validate(self, cls, model):
raise ImproperlyConfigured("error!")
class MyModelAdmin(admin.ModelAdmin):
validator_class = MyValidator
errors = MyModelAdmin.check(model=Song)
expected = [
checks.Error(
'error!',
hint=None,
obj=MyModelAdmin,
)
]
self.assertEqual(errors, expected)
def test_check_sublists_for_duplicates(self):
class MyModelAdmin(admin.ModelAdmin):
fields = ['state', ['state']]
errors = MyModelAdmin.check(model=Song)
expected = [
checks.Error(
"The value of 'fields' contains duplicate field(s).",
hint=None,
obj=MyModelAdmin,
id='admin.E006'
)
]
self.assertEqual(errors, expected)
def test_check_fieldset_sublists_for_duplicates(self):
class MyModelAdmin(admin.ModelAdmin):
fieldsets = [
(None, {
'fields': ['title', 'album', ('title', 'album')]
}),
]
errors = MyModelAdmin.check(model=Song)
expected = [
checks.Error(
"There are duplicate field(s) in 'fieldsets[0][1]'.",
hint=None,
obj=MyModelAdmin,
id='admin.E012'
)
]
self.assertEqual(errors, expected)
| mit |
jjingrong/PONUS-1.2 | venv/build/psycopg2/lib/tz.py | 72 | 4427 | """tzinfo implementations for psycopg2
This module holds two different tzinfo implementations that can be used as
the 'tzinfo' argument to datetime constructors, directly passed to psycopg
functions or used to set the .tzinfo_factory attribute in cursors.
"""
# psycopg/tz.py - tzinfo implementation
#
# Copyright (C) 2003-2010 Federico Di Gregorio <fog@debian.org>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
import datetime
import time
ZERO = datetime.timedelta(0)
class FixedOffsetTimezone(datetime.tzinfo):
"""Fixed offset in minutes east from UTC.
This is exactly the implementation__ found in Python 2.3.x documentation,
with a small change to the `!__init__()` method to allow for pickling
and a default name in the form ``sHH:MM`` (``s`` is the sign.).
The implementation also caches instances. During creation, if a
FixedOffsetTimezone instance has previously been created with the same
offset and name that instance will be returned. This saves memory and
improves comparability.
.. __: http://docs.python.org/library/datetime.html#datetime-tzinfo
"""
_name = None
_offset = ZERO
_cache = {}
def __init__(self, offset=None, name=None):
if offset is not None:
self._offset = datetime.timedelta(minutes = offset)
if name is not None:
self._name = name
def __new__(cls, offset=None, name=None):
"""Return a suitable instance created earlier if it exists
"""
key = (offset, name)
try:
return cls._cache[key]
except KeyError:
tz = super(FixedOffsetTimezone, cls).__new__(cls, offset, name)
cls._cache[key] = tz
return tz
def __repr__(self):
offset_mins = self._offset.seconds // 60 + self._offset.days * 24 * 60
return "psycopg2.tz.FixedOffsetTimezone(offset=%r, name=%r)" \
% (offset_mins, self._name)
def __getinitargs__(self):
offset_mins = self._offset.seconds // 60 + self._offset.days * 24 * 60
return (offset_mins, self._name)
def utcoffset(self, dt):
return self._offset
def tzname(self, dt):
if self._name is not None:
return self._name
else:
seconds = self._offset.seconds + self._offset.days * 86400
hours, seconds = divmod(seconds, 3600)
minutes = seconds/60
if minutes:
return "%+03d:%d" % (hours, minutes)
else:
return "%+03d" % hours
def dst(self, dt):
return ZERO
STDOFFSET = datetime.timedelta(seconds = -time.timezone)
if time.daylight:
DSTOFFSET = datetime.timedelta(seconds = -time.altzone)
else:
DSTOFFSET = STDOFFSET
DSTDIFF = DSTOFFSET - STDOFFSET
class LocalTimezone(datetime.tzinfo):
"""Platform idea of local timezone.
This is the exact implementation from the Python 2.3 documentation.
"""
def utcoffset(self, dt):
if self._isdst(dt):
return DSTOFFSET
else:
return STDOFFSET
def dst(self, dt):
if self._isdst(dt):
return DSTDIFF
else:
return ZERO
def tzname(self, dt):
return time.tzname[self._isdst(dt)]
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, -1)
stamp = time.mktime(tt)
tt = time.localtime(stamp)
return tt.tm_isdst > 0
LOCAL = LocalTimezone()
# TODO: pre-generate some interesting time zones?
| mit |
StackPointCloud/libcloud | libcloud/compute/drivers/ktucloud.py | 3 | 3610 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from libcloud.compute.providers import Provider
from libcloud.compute.base import Node, NodeImage, NodeSize
from libcloud.compute.drivers.cloudstack import CloudStackNodeDriver
class KTUCloudNodeDriver(CloudStackNodeDriver):
"""Driver for KTUCloud Compute platform."""
EMPTY_DISKOFFERINGID = '0'
type = Provider.KTUCLOUD
name = 'KTUCloud'
website = 'https://ucloudbiz.olleh.com/'
def list_images(self, location=None):
args = {
'templatefilter': 'executable'
}
if location is not None:
args['zoneid'] = location.id
imgs = self._sync_request(command='listAvailableProductTypes',
method='GET')
images = []
for img in imgs['producttypes']:
images.append(
NodeImage(
img['serviceofferingid'],
img['serviceofferingdesc'],
self,
{'hypervisor': '',
'format': '',
'os': img['templatedesc'],
'templateid': img['templateid'],
'zoneid': img['zoneid']}
)
)
return images
def list_sizes(self, location=None):
szs = self._sync_request('listAvailableProductTypes')
sizes = []
for sz in szs['producttypes']:
diskofferingid = sz.get('diskofferingid',
self.EMPTY_DISKOFFERINGID)
sizes.append(NodeSize(
diskofferingid,
sz['diskofferingdesc'],
0, 0, 0, 0, self)
)
return sizes
def create_node(self, name, size, image, location=None, **kwargs):
params = {'displayname': name,
'serviceofferingid': image.id,
'templateid': str(image.extra['templateid']),
'zoneid': str(image.extra['zoneid'])}
usageplantype = kwargs.pop('usageplantype', None)
if usageplantype is None:
params['usageplantype'] = 'hourly'
else:
params['usageplantype'] = usageplantype
if size.id != self.EMPTY_DISKOFFERINGID:
params['diskofferingid'] = size.id
result = self._async_request(
command='deployVirtualMachine',
params=params,
method='GET')
node = result['virtualmachine']
return Node(
id=node['id'],
name=node['displayname'],
state=self.NODE_STATE_MAP[node['state']],
public_ips=[],
private_ips=[],
driver=self,
extra={
'zoneid': image.extra['zoneid'],
'ip_addresses': [],
'forwarding_rules': [],
}
)
| apache-2.0 |
AutorestCI/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_10_01/models/connection_monitor_source.py | 2 | 1193 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ConnectionMonitorSource(Model):
"""Describes the source of connection monitor.
:param resource_id: The ID of the resource used as the source by
connection monitor.
:type resource_id: str
:param port: The source port used by connection monitor.
:type port: int
"""
_validation = {
'resource_id': {'required': True},
}
_attribute_map = {
'resource_id': {'key': 'resourceId', 'type': 'str'},
'port': {'key': 'port', 'type': 'int'},
}
def __init__(self, resource_id, port=None):
super(ConnectionMonitorSource, self).__init__()
self.resource_id = resource_id
self.port = port
| mit |
40223117cda/2015_w11 | static/Brython3.1.1-20150328-091302/Lib/sys.py | 408 | 4998 | # hack to return special attributes
from _sys import *
from javascript import JSObject
has_local_storage=__BRYTHON__.has_local_storage
has_session_storage = __BRYTHON__.has_session_storage
has_json=__BRYTHON__.has_json
brython_debug_mode = __BRYTHON__.debug
argv = ['__main__']
base_exec_prefix = __BRYTHON__.brython_path
base_prefix = __BRYTHON__.brython_path
builtin_module_names=__BRYTHON__.builtin_module_names
byteorder='little'
def exc_info():
exc = __BRYTHON__.exception_stack[-1]
return (exc.__class__,exc,exc.traceback)
exec_prefix = __BRYTHON__.brython_path
executable = __BRYTHON__.brython_path+'/brython.js'
def exit(i=None):
raise SystemExit('')
class flag_class:
def __init__(self):
self.debug=0
self.inspect=0
self.interactive=0
self.optimize=0
self.dont_write_bytecode=0
self.no_user_site=0
self.no_site=0
self.ignore_environment=0
self.verbose=0
self.bytes_warning=0
self.quiet=0
self.hash_randomization=1
flags=flag_class()
def getfilesystemencoding(*args,**kw):
"""getfilesystemencoding() -> string
Return the encoding used to convert Unicode filenames in
operating system filenames."""
return 'utf-8'
maxsize=2147483647
maxunicode=1114111
path = __BRYTHON__.path
#path_hooks = list(JSObject(__BRYTHON__.path_hooks))
meta_path=__BRYTHON__.meta_path
platform="brython"
prefix = __BRYTHON__.brython_path
version = '.'.join(str(x) for x in __BRYTHON__.version_info[:3])
version += " (default, %s) \n[Javascript 1.5] on Brython" % __BRYTHON__.compiled_date
hexversion = 0x03000000 # python 3.0
class __version_info(object):
def __init__(self, version_info):
self.version_info = version_info
self.major = version_info[0]
self.minor = version_info[1]
self.micro = version_info[2]
self.releaselevel = version_info[3]
self.serial = version_info[4]
def __getitem__(self, index):
if isinstance(self.version_info[index], list):
return tuple(self.version_info[index])
return self.version_info[index]
def hexversion(self):
try:
return '0%d0%d0%d' % (self.major, self.minor, self.micro)
finally: #probably some invalid char in minor (rc, etc)
return '0%d0000' % (self.major)
def __str__(self):
_s="sys.version(major=%d, minor=%d, micro=%d, releaselevel='%s', serial=%d)"
return _s % (self.major, self.minor, self.micro,
self.releaselevel, self.serial)
#return str(self.version_info)
def __eq__(self,other):
if isinstance(other, tuple):
return (self.major, self.minor, self.micro) == other
raise Error("Error! I don't know how to compare!")
def __ge__(self,other):
if isinstance(other, tuple):
return (self.major, self.minor, self.micro) >= other
raise Error("Error! I don't know how to compare!")
def __gt__(self,other):
if isinstance(other, tuple):
return (self.major, self.minor, self.micro) > other
raise Error("Error! I don't know how to compare!")
def __le__(self,other):
if isinstance(other, tuple):
return (self.major, self.minor, self.micro) <= other
raise Error("Error! I don't know how to compare!")
def __lt__(self,other):
if isinstance(other, tuple):
return (self.major, self.minor, self.micro) < other
raise Error("Error! I don't know how to compare!")
def __ne__(self,other):
if isinstance(other, tuple):
return (self.major, self.minor, self.micro) != other
raise Error("Error! I don't know how to compare!")
#eventually this needs to be the real python version such as 3.0, 3.1, etc
version_info=__version_info(__BRYTHON__.version_info)
class _implementation:
def __init__(self):
self.name='brython'
self.version = __version_info(__BRYTHON__.implementation)
self.hexversion = self.version.hexversion()
self.cache_tag=None
def __repr__(self):
return "namespace(name='%s' version=%s hexversion='%s')" % (self.name, self.version, self.hexversion)
def __str__(self):
return "namespace(name='%s' version=%s hexversion='%s')" % (self.name, self.version, self.hexversion)
implementation=_implementation()
class _hash_info:
def __init__(self):
self.width=32,
self.modulus=2147483647
self.inf=314159
self.nan=0
self.imag=1000003
self.algorithm='siphash24'
self.hash_bits=64
self.seed_bits=128
cutoff=0
def __repr(self):
#fix me
return "sys.hash_info(width=32, modulus=2147483647, inf=314159, nan=0, imag=1000003, algorithm='siphash24', hash_bits=64, seed_bits=128, cutoff=0)"
hash_info=_hash_info()
warnoptions=[]
def getfilesystemencoding():
return 'utf-8'
#delete objects not in python sys module namespace
del JSObject
del _implementation
| gpl-3.0 |
tempbottle/ironpython3 | Src/StdLib/Lib/distutils/tests/test_install.py | 81 | 8432 | """Tests for distutils.command.install."""
import os
import sys
import unittest
import site
from test.support import captured_stdout, run_unittest
from distutils import sysconfig
from distutils.command.install import install
from distutils.command import install as install_module
from distutils.command.build_ext import build_ext
from distutils.command.install import INSTALL_SCHEMES
from distutils.core import Distribution
from distutils.errors import DistutilsOptionError
from distutils.extension import Extension
from distutils.tests import support
def _make_ext_name(modname):
if os.name == 'nt' and sys.executable.endswith('_d.exe'):
modname += '_d'
return modname + sysconfig.get_config_var('EXT_SUFFIX')
class InstallTestCase(support.TempdirManager,
support.EnvironGuard,
support.LoggingSilencer,
unittest.TestCase):
def test_home_installation_scheme(self):
# This ensure two things:
# - that --home generates the desired set of directory names
# - test --home is supported on all platforms
builddir = self.mkdtemp()
destination = os.path.join(builddir, "installation")
dist = Distribution({"name": "foopkg"})
# script_name need not exist, it just need to be initialized
dist.script_name = os.path.join(builddir, "setup.py")
dist.command_obj["build"] = support.DummyCommand(
build_base=builddir,
build_lib=os.path.join(builddir, "lib"),
)
cmd = install(dist)
cmd.home = destination
cmd.ensure_finalized()
self.assertEqual(cmd.install_base, destination)
self.assertEqual(cmd.install_platbase, destination)
def check_path(got, expected):
got = os.path.normpath(got)
expected = os.path.normpath(expected)
self.assertEqual(got, expected)
libdir = os.path.join(destination, "lib", "python")
check_path(cmd.install_lib, libdir)
check_path(cmd.install_platlib, libdir)
check_path(cmd.install_purelib, libdir)
check_path(cmd.install_headers,
os.path.join(destination, "include", "python", "foopkg"))
check_path(cmd.install_scripts, os.path.join(destination, "bin"))
check_path(cmd.install_data, destination)
def test_user_site(self):
# test install with --user
# preparing the environment for the test
self.old_user_base = site.USER_BASE
self.old_user_site = site.USER_SITE
self.tmpdir = self.mkdtemp()
self.user_base = os.path.join(self.tmpdir, 'B')
self.user_site = os.path.join(self.tmpdir, 'S')
site.USER_BASE = self.user_base
site.USER_SITE = self.user_site
install_module.USER_BASE = self.user_base
install_module.USER_SITE = self.user_site
def _expanduser(path):
return self.tmpdir
self.old_expand = os.path.expanduser
os.path.expanduser = _expanduser
def cleanup():
site.USER_BASE = self.old_user_base
site.USER_SITE = self.old_user_site
install_module.USER_BASE = self.old_user_base
install_module.USER_SITE = self.old_user_site
os.path.expanduser = self.old_expand
self.addCleanup(cleanup)
for key in ('nt_user', 'unix_user'):
self.assertIn(key, INSTALL_SCHEMES)
dist = Distribution({'name': 'xx'})
cmd = install(dist)
# making sure the user option is there
options = [name for name, short, lable in
cmd.user_options]
self.assertIn('user', options)
# setting a value
cmd.user = 1
# user base and site shouldn't be created yet
self.assertFalse(os.path.exists(self.user_base))
self.assertFalse(os.path.exists(self.user_site))
# let's run finalize
cmd.ensure_finalized()
# now they should
self.assertTrue(os.path.exists(self.user_base))
self.assertTrue(os.path.exists(self.user_site))
self.assertIn('userbase', cmd.config_vars)
self.assertIn('usersite', cmd.config_vars)
def test_handle_extra_path(self):
dist = Distribution({'name': 'xx', 'extra_path': 'path,dirs'})
cmd = install(dist)
# two elements
cmd.handle_extra_path()
self.assertEqual(cmd.extra_path, ['path', 'dirs'])
self.assertEqual(cmd.extra_dirs, 'dirs')
self.assertEqual(cmd.path_file, 'path')
# one element
cmd.extra_path = ['path']
cmd.handle_extra_path()
self.assertEqual(cmd.extra_path, ['path'])
self.assertEqual(cmd.extra_dirs, 'path')
self.assertEqual(cmd.path_file, 'path')
# none
dist.extra_path = cmd.extra_path = None
cmd.handle_extra_path()
self.assertEqual(cmd.extra_path, None)
self.assertEqual(cmd.extra_dirs, '')
self.assertEqual(cmd.path_file, None)
# three elements (no way !)
cmd.extra_path = 'path,dirs,again'
self.assertRaises(DistutilsOptionError, cmd.handle_extra_path)
def test_finalize_options(self):
dist = Distribution({'name': 'xx'})
cmd = install(dist)
# must supply either prefix/exec-prefix/home or
# install-base/install-platbase -- not both
cmd.prefix = 'prefix'
cmd.install_base = 'base'
self.assertRaises(DistutilsOptionError, cmd.finalize_options)
# must supply either home or prefix/exec-prefix -- not both
cmd.install_base = None
cmd.home = 'home'
self.assertRaises(DistutilsOptionError, cmd.finalize_options)
# can't combine user with prefix/exec_prefix/home or
# install_(plat)base
cmd.prefix = None
cmd.user = 'user'
self.assertRaises(DistutilsOptionError, cmd.finalize_options)
def test_record(self):
install_dir = self.mkdtemp()
project_dir, dist = self.create_dist(py_modules=['hello'],
scripts=['sayhi'])
os.chdir(project_dir)
self.write_file('hello.py', "def main(): print('o hai')")
self.write_file('sayhi', 'from hello import main; main()')
cmd = install(dist)
dist.command_obj['install'] = cmd
cmd.root = install_dir
cmd.record = os.path.join(project_dir, 'filelist')
cmd.ensure_finalized()
cmd.run()
f = open(cmd.record)
try:
content = f.read()
finally:
f.close()
found = [os.path.basename(line) for line in content.splitlines()]
expected = ['hello.py', 'hello.%s.pyc' % sys.implementation.cache_tag,
'sayhi',
'UNKNOWN-0.0.0-py%s.%s.egg-info' % sys.version_info[:2]]
self.assertEqual(found, expected)
def test_record_extensions(self):
install_dir = self.mkdtemp()
project_dir, dist = self.create_dist(ext_modules=[
Extension('xx', ['xxmodule.c'])])
os.chdir(project_dir)
support.copy_xxmodule_c(project_dir)
buildextcmd = build_ext(dist)
support.fixup_build_ext(buildextcmd)
buildextcmd.ensure_finalized()
cmd = install(dist)
dist.command_obj['install'] = cmd
dist.command_obj['build_ext'] = buildextcmd
cmd.root = install_dir
cmd.record = os.path.join(project_dir, 'filelist')
cmd.ensure_finalized()
cmd.run()
f = open(cmd.record)
try:
content = f.read()
finally:
f.close()
found = [os.path.basename(line) for line in content.splitlines()]
expected = [_make_ext_name('xx'),
'UNKNOWN-0.0.0-py%s.%s.egg-info' % sys.version_info[:2]]
self.assertEqual(found, expected)
def test_debug_mode(self):
# this covers the code called when DEBUG is set
old_logs_len = len(self.logs)
install_module.DEBUG = True
try:
with captured_stdout():
self.test_record()
finally:
install_module.DEBUG = False
self.assertGreater(len(self.logs), old_logs_len)
def test_suite():
return unittest.makeSuite(InstallTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
| apache-2.0 |
BlueBrain/NEST | testsuite/manualtests/stdp_check.py | 13 | 4713 | # -*- coding: utf-8 -*-
#
# stdp_check.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
from matplotlib.pylab import *
# Test script to reproduce changes in weight of a STDP synapse in an event-driven way.
# Pre- and post-synaptic spike trains are read in from spike_detector-0-0-3.gdf
# (output of test_stdp_poiss.sli).
# output: pre/post \t spike time \t weight
#
# Synaptic dynamics for STDP synapses according to Abigail Morrison's
# STDP model (see stdp_rec.pdf).
#
# first version: Moritz Helias, april 2006
# adapted to python MH, SK, May 2008
def stdp(w_init, w_max, pre_spikes, post_spikes, alpha, mu_plus, mu_minus, lmbd, tau_plus, tau_minus, delay):
w = w_init # initial weight
i = 0 # index of next presynaptic spike
j = 0 # index of next postsynaptic spike
K_plus = 0.
K_minus = 0.
last_t = 0.
advance = True
while advance:
advance = False
# next spike is presynaptic
if pre_spikes[i] < post_spikes[j]:
dt = pre_spikes[i] - last_t
# evolve exponential filters
K_plus *= exp(-dt/tau_plus)
K_minus *= exp(-dt/tau_minus)
# depression
w = w/w_max - lmbd * alpha * (w/w_max)**mu_minus * K_minus
if w > 0.:
w *= w_max
else:
w = 0.
print "pre\t%.16f\t%.16f" % (pre_spikes[i],w)
K_plus += 1.
last_t = pre_spikes[i] # time evolved until here
if i < len(pre_spikes) - 1:
i += 1
advance = True
# same timing of next pre- and postsynaptic spike
elif pre_spikes[i] == post_spikes[j]:
dt = pre_spikes[i] - last_t
# evolve exponential filters
K_plus *= exp(-dt/tau_plus)
K_minus *= exp(-dt/tau_minus)
# facilitation
w = w/w_max + lmbd * (1.-w/w_max)**mu_plus * K_plus
if w < 1.:
w *= w_max
else:
w = w_max
print "post\t%.16f\t%.16f" % (post_spikes[j]-delay,w)
# depression
w = w/w_max - lmbd * alpha * (w/w_max)**mu_minus * K_minus
if w > 0.:
w *= w_max
else:
w = 0.
print "pre\t%.16f\t%.16f" % (pre_spikes[i],w)
K_plus += 1.
K_minus += 1.
last_t = pre_spikes[i] # time evolved until here
if i < len(pre_spikes) - 1:
i += 1
advance = True
if j < len(post_spikes) - 1:
j += 1
advance = True
# next spike is postsynaptic
else:
dt = post_spikes[j] - last_t
# evolve exponential filters
K_plus *= exp(-dt / tau_plus)
K_minus *= exp(-dt / tau_minus)
# facilitation
w = w/w_max + lmbd * (1.-w/w_max)**mu_plus * K_plus
if w < 1.:
w *= w_max
else:
w = w_max
print "post\t%.16f\t%.16f" % (post_spikes[j]-delay,w)
K_minus += 1.
last_t = post_spikes[j] # time evolved until here
if j < len(post_spikes) - 1:
j += 1
advance = True
return w
# stdp parameters
w_init = 35.
w_max = 70.
alpha = .95
mu_plus = .05
mu_minus = .05
lmbd = .025
tau_plus = 20.
tau_minus = 20.
# dendritic delay
delay = 1.
# load spikes from simulation with test_stdp_poiss.sli
spikes = load("spike_detector-0-0-3.gdf")
pre_spikes = spikes[find(spikes[:,0] == 5), 1]
# delay is purely dendritic
# postsynaptic spike arrives at sp_j + delay at the synapse
post_spikes = spikes[find(spikes[:,0] == 6), 1] + delay
# calculate development of stdp weight
stdp(w_init, w_max, pre_spikes, post_spikes, alpha, mu_plus, mu_minus, lmbd, tau_plus, tau_minus, delay)
| gpl-2.0 |
gupascal/distcc | include_server/basics_test.py | 26 | 4328 | #! /usr/bin/python2.4
# Copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
# USA.
#
__author__ = "Nils Klarlund"
import os
import os.path
import tempfile
import unittest
import basics
class BasicsTest(unittest.TestCase):
def setUp(self):
basics.opt_debug_pattern = 1
def tearDown(self):
pass
def test_ClientRootKeeper(self):
os.environ['DISTCC_CLIENT_TMP'] = 'to/be'
self.assertRaises(SystemExit, basics.ClientRootKeeper)
os.environ['DISTCC_CLIENT_TMP'] = '/to/be/or'
self.assertRaises(SystemExit, basics.ClientRootKeeper)
try:
tempfile_mkdtemp = tempfile.mkdtemp
os_makedirs = os.makedirs
def Mock_tempfile_mkdtemp(pat, dir):
self.assert_((pat, dir)
in
[('.%s-%s-%d' %
(basics.ClientRootKeeper.INCLUDE_SERVER_NAME,
os.getpid(), generation),
prefix)
for generation, prefix in
[(1,'/to/be'), (2, '/to')]])
return (dir == '/to/be' and '/to/be/xxxxxx'
or dir == '/to' and '/to/xxxxxxx')
def Mock_os_makedirs(f, *unused_args):
if not f.startswith('/to/'):
raise Exception, f
tempfile.mkdtemp = Mock_tempfile_mkdtemp
os.makedirs = Mock_os_makedirs
os.environ['DISTCC_CLIENT_TMP'] = '/to/be'
client_root_keeper = basics.ClientRootKeeper()
client_root_keeper.ClientRootMakedir(1)
self.assertEqual(os.path.dirname(client_root_keeper.client_root),
"/to/be")
os.environ['DISTCC_CLIENT_TMP'] = '/to'
client_root_keeper = basics.ClientRootKeeper()
client_root_keeper.ClientRootMakedir(2)
self.assertEqual(os.path.dirname(
os.path.dirname(client_root_keeper.client_root)), "/to")
self.assertEqual(os.path.basename(client_root_keeper.client_root),
"padding")
self.assertEqual(len(
[ None for ch in client_root_keeper.client_root if ch == '/' ]), 3)
finally:
tempfile.mkdtemp = tempfile_mkdtemp
os.makedirs = os_makedirs
def test_ClientRootKeeper_Deletions(self):
"""Test whether directories emerge and go away appropriately."""
# Test with a one-level value of DISTCC_CLIENT_TMP.
os.environ['DISTCC_CLIENT_TMP'] = '/tmp'
client_root_keeper = basics.ClientRootKeeper()
client_root_keeper.ClientRootMakedir(117)
self.assert_(os.path.isdir(client_root_keeper._client_root_before_padding))
self.assert_(os.path.isdir(client_root_keeper.client_root))
self.assert_(client_root_keeper.client_root.endswith('/padding'))
client_root_keeper.ClientRootMakedir(118)
client_root_keeper.CleanOutClientRoots()
# Directories must be gone now!
self.assert_(not os.path.isdir(
client_root_keeper._client_root_before_padding))
# Test with a two-level value of DISTCC_CLIENT_TMP.
try:
os.environ['DISTCC_CLIENT_TMP'] = tempfile.mkdtemp('basics_test',
dir='/tmp')
client_root_keeper = basics.ClientRootKeeper()
client_root_keeper.ClientRootMakedir(117)
self.assert_(os.path.isdir(
client_root_keeper._client_root_before_padding))
self.assert_(os.path.isdir(client_root_keeper.client_root))
client_root_keeper.ClientRootMakedir(118)
client_root_keeper.CleanOutClientRoots()
self.assert_(os.path.isdir,
client_root_keeper._client_root_before_padding)
finally:
os.rmdir(os.environ['DISTCC_CLIENT_TMP'])
unittest.main()
| gpl-2.0 |
erwilan/ansible | lib/ansible/plugins/lookup/template.py | 23 | 3221 | # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
from ansible.module_utils._text import to_bytes, to_text
from ansible.template import generate_ansible_template_vars
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
convert_data_p = kwargs.get('convert_data', True)
ret = []
for term in terms:
display.debug("File lookup term: %s" % term)
lookupfile = self.find_file_in_search_path(variables, 'templates', term)
display.vvvv("File lookup using %s as file" % lookupfile)
if lookupfile:
with open(to_bytes(lookupfile, errors='surrogate_or_strict'), 'rb') as f:
template_data = to_text(f.read(), errors='surrogate_or_strict')
# set jinja2 internal search path for includes
searchpath = variables.get('ansible_search_path')
if searchpath:
# our search paths aren't actually the proper ones for jinja includes.
# We want to search into the 'templates' subdir of each search path in
# addition to our original search paths.
newsearchpath = []
for p in searchpath:
newsearchpath.append(os.path.join(p, 'templates'))
newsearchpath.append(p)
searchpath = newsearchpath
else:
searchpath = [self._loader._basedir, os.path.dirname(lookupfile)]
self._templar.environment.loader.searchpath = searchpath
# add ansible 'template' vars
temp_vars = variables.copy()
temp_vars.update(generate_ansible_template_vars(lookupfile))
self._templar.set_available_variables(temp_vars)
# do the templating
res = self._templar.template(template_data, preserve_trailing_newlines=True,convert_data=convert_data_p)
ret.append(res)
else:
raise AnsibleError("the template file %s could not be found for the lookup" % term)
return ret
| gpl-3.0 |
Krossom/python-for-android | python3-alpha/python3-src/Lib/lib2to3/fixes/fix_print.py | 164 | 2854 | # Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for print.
Change:
'print' into 'print()'
'print ...' into 'print(...)'
'print ... ,' into 'print(..., end=" ")'
'print >>x, ...' into 'print(..., file=x)'
No changes are applied if print_function is imported from __future__
"""
# Local imports
from .. import patcomp
from .. import pytree
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Name, Call, Comma, String, is_tuple
parend_expr = patcomp.compile_pattern(
"""atom< '(' [atom|STRING|NAME] ')' >"""
)
class FixPrint(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
simple_stmt< any* bare='print' any* > | print_stmt
"""
def transform(self, node, results):
assert results
bare_print = results.get("bare")
if bare_print:
# Special-case print all by itself
bare_print.replace(Call(Name("print"), [],
prefix=bare_print.prefix))
return
assert node.children[0] == Name("print")
args = node.children[1:]
if len(args) == 1 and parend_expr.match(args[0]):
# We don't want to keep sticking parens around an
# already-parenthesised expression.
return
sep = end = file = None
if args and args[-1] == Comma():
args = args[:-1]
end = " "
if args and args[0] == pytree.Leaf(token.RIGHTSHIFT, ">>"):
assert len(args) >= 2
file = args[1].clone()
args = args[3:] # Strip a possible comma after the file expression
# Now synthesize a print(args, sep=..., end=..., file=...) node.
l_args = [arg.clone() for arg in args]
if l_args:
l_args[0].prefix = ""
if sep is not None or end is not None or file is not None:
if sep is not None:
self.add_kwarg(l_args, "sep", String(repr(sep)))
if end is not None:
self.add_kwarg(l_args, "end", String(repr(end)))
if file is not None:
self.add_kwarg(l_args, "file", file)
n_stmt = Call(Name("print"), l_args)
n_stmt.prefix = node.prefix
return n_stmt
def add_kwarg(self, l_nodes, s_kwd, n_expr):
# XXX All this prefix-setting may lose comments (though rarely)
n_expr.prefix = ""
n_argument = pytree.Node(self.syms.argument,
(Name(s_kwd),
pytree.Leaf(token.EQUAL, "="),
n_expr))
if l_nodes:
l_nodes.append(Comma())
n_argument.prefix = " "
l_nodes.append(n_argument)
| apache-2.0 |
ychen820/microblog | flask/lib/python2.7/site-packages/openid/cryptutil.py | 136 | 6084 | """Module containing a cryptographic-quality source of randomness and
other cryptographically useful functionality
Python 2.4 needs no external support for this module, nor does Python
2.3 on a system with /dev/urandom.
Other configurations will need a quality source of random bytes and
access to a function that will convert binary strings to long
integers. This module will work with the Python Cryptography Toolkit
(pycrypto) if it is present. pycrypto can be found with a search
engine, but is currently found at:
http://www.amk.ca/python/code/crypto
"""
__all__ = [
'base64ToLong',
'binaryToLong',
'hmacSha1',
'hmacSha256',
'longToBase64',
'longToBinary',
'randomString',
'randrange',
'sha1',
'sha256',
]
import hmac
import os
import random
from openid.oidutil import toBase64, fromBase64
try:
import hashlib
except ImportError:
import sha as sha1_module
try:
from Crypto.Hash import SHA256 as sha256_module
except ImportError:
sha256_module = None
else:
class HashContainer(object):
def __init__(self, hash_constructor):
self.new = hash_constructor
self.digest_size = hash_constructor().digest_size
sha1_module = HashContainer(hashlib.sha1)
sha256_module = HashContainer(hashlib.sha256)
def hmacSha1(key, text):
return hmac.new(key, text, sha1_module).digest()
def sha1(s):
return sha1_module.new(s).digest()
if sha256_module is not None:
def hmacSha256(key, text):
return hmac.new(key, text, sha256_module).digest()
def sha256(s):
return sha256_module.new(s).digest()
SHA256_AVAILABLE = True
else:
_no_sha256 = NotImplementedError(
'Use Python 2.5, install pycrypto or install hashlib to use SHA256')
def hmacSha256(unused_key, unused_text):
raise _no_sha256
def sha256(s):
raise _no_sha256
SHA256_AVAILABLE = False
try:
from Crypto.Util.number import long_to_bytes, bytes_to_long
except ImportError:
import pickle
try:
# Check Python compatiblity by raising an exception on import
# if the needed functionality is not present. Present in
# Python >= 2.3
pickle.encode_long
pickle.decode_long
except AttributeError:
raise ImportError(
'No functionality for serializing long integers found')
# Present in Python >= 2.4
try:
reversed
except NameError:
def reversed(seq):
return map(seq.__getitem__, xrange(len(seq) - 1, -1, -1))
def longToBinary(l):
if l == 0:
return '\x00'
return ''.join(reversed(pickle.encode_long(l)))
def binaryToLong(s):
return pickle.decode_long(''.join(reversed(s)))
else:
# We have pycrypto
def longToBinary(l):
if l < 0:
raise ValueError('This function only supports positive integers')
bytes = long_to_bytes(l)
if ord(bytes[0]) > 127:
return '\x00' + bytes
else:
return bytes
def binaryToLong(bytes):
if not bytes:
raise ValueError('Empty string passed to strToLong')
if ord(bytes[0]) > 127:
raise ValueError('This function only supports positive integers')
return bytes_to_long(bytes)
# A cryptographically safe source of random bytes
try:
getBytes = os.urandom
except AttributeError:
try:
from Crypto.Util.randpool import RandomPool
except ImportError:
# Fall back on /dev/urandom, if present. It would be nice to
# have Windows equivalent here, but for now, require pycrypto
# on Windows.
try:
_urandom = file('/dev/urandom', 'rb')
except IOError:
raise ImportError('No adequate source of randomness found!')
else:
def getBytes(n):
bytes = []
while n:
chunk = _urandom.read(n)
n -= len(chunk)
bytes.append(chunk)
assert n >= 0
return ''.join(bytes)
else:
_pool = RandomPool()
def getBytes(n, pool=_pool):
if pool.entropy < n:
pool.randomize()
return pool.get_bytes(n)
# A randrange function that works for longs
try:
randrange = random.SystemRandom().randrange
except AttributeError:
# In Python 2.2's random.Random, randrange does not support
# numbers larger than sys.maxint for randrange. For simplicity,
# use this implementation for any Python that does not have
# random.SystemRandom
from math import log, ceil
_duplicate_cache = {}
def randrange(start, stop=None, step=1):
if stop is None:
stop = start
start = 0
r = (stop - start) // step
try:
(duplicate, nbytes) = _duplicate_cache[r]
except KeyError:
rbytes = longToBinary(r)
if rbytes[0] == '\x00':
nbytes = len(rbytes) - 1
else:
nbytes = len(rbytes)
mxrand = (256 ** nbytes)
# If we get a number less than this, then it is in the
# duplicated range.
duplicate = mxrand % r
if len(_duplicate_cache) > 10:
_duplicate_cache.clear()
_duplicate_cache[r] = (duplicate, nbytes)
while 1:
bytes = '\x00' + getBytes(nbytes)
n = binaryToLong(bytes)
# Keep looping if this value is in the low duplicated range
if n >= duplicate:
break
return start + (n % r) * step
def longToBase64(l):
return toBase64(longToBinary(l))
def base64ToLong(s):
return binaryToLong(fromBase64(s))
def randomString(length, chrs=None):
"""Produce a string of length random bytes, chosen from chrs."""
if chrs is None:
return getBytes(length)
else:
n = len(chrs)
return ''.join([chrs[randrange(n)] for _ in xrange(length)])
| bsd-3-clause |
gauribhoite/personfinder | env/site-packages/pygments/lexers/other.py | 77 | 1725 | # -*- coding: utf-8 -*-
"""
pygments.lexers.other
~~~~~~~~~~~~~~~~~~~~~
Just export lexer classes previously contained in this module.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexers.sql import SqlLexer, MySqlLexer, SqliteConsoleLexer
from pygments.lexers.shell import BashLexer, BashSessionLexer, BatchLexer, \
TcshLexer
from pygments.lexers.robotframework import RobotFrameworkLexer
from pygments.lexers.testing import GherkinLexer
from pygments.lexers.esoteric import BrainfuckLexer, BefungeLexer, RedcodeLexer
from pygments.lexers.prolog import LogtalkLexer
from pygments.lexers.snobol import SnobolLexer
from pygments.lexers.rebol import RebolLexer
from pygments.lexers.configs import KconfigLexer, Cfengine3Lexer
from pygments.lexers.modeling import ModelicaLexer
from pygments.lexers.scripting import AppleScriptLexer, MOOCodeLexer, \
HybrisLexer
from pygments.lexers.graphics import PostScriptLexer, GnuplotLexer, \
AsymptoteLexer, PovrayLexer
from pygments.lexers.business import ABAPLexer, OpenEdgeLexer, \
GoodDataCLLexer, MaqlLexer
from pygments.lexers.automation import AutoItLexer, AutohotkeyLexer
from pygments.lexers.dsls import ProtoBufLexer, BroLexer, PuppetLexer, \
MscgenLexer, VGLLexer
from pygments.lexers.basic import CbmBasicV2Lexer
from pygments.lexers.pawn import SourcePawnLexer, PawnLexer
from pygments.lexers.ecl import ECLLexer
from pygments.lexers.urbi import UrbiscriptLexer
from pygments.lexers.smalltalk import SmalltalkLexer, NewspeakLexer
from pygments.lexers.installers import NSISLexer, RPMSpecLexer
from pygments.lexers.textedit import AwkLexer
__all__ = []
| apache-2.0 |
sbadia/ifmap-python-client | ifmap/operations.py | 2 | 2640 | # Copyright 2011, Infoblox, All Rights Reserved
#
# Open Source, see LICENSE
#
from util import attr, link_ids
class OperationBase:
""" foundation class for operation factory """
pass
class PublishUpdateOperation(OperationBase):
def __init__(self, id1, metadata, id2=None, lifetime=None):
self.__id = link_ids(id1, id2)
self.__metadata = metadata
self.__lifetime = lifetime
def __str__(self):
if self.__lifetime:
_attr = attr({'lifetime':self.__lifetime})
return '<update %s>' % _attr + self.__id + self.__metadata + '</update>'
else:
return '<update>' + self.__id + self.__metadata + '</update>'
class PublishDeleteOperation(OperationBase):
def __init__(self, id1, id2=None, filter=None):
self.__id = link_ids(id1, id2)
self.__filter = filter
def __str__(self):
if self.__filter:
_attr = attr({'filter':self.__filter})
return '<delete %s>' % _attr + self.__id + '</delete>'
else:
return '<delete>' + self.__id + '</delete>'
class PublishNotifyOperation(OperationBase):
def __init__(self, id1, metadata, id2=None):
self.__id = link_ids(id1, id2)
self.__metadata = metadata
def __str__(self):
return '<notify>' + self.__id + self.__metadata + '</notify>'
class SubscribeUpdateOperation(OperationBase):
"""
SubscribeUpdate factory
name
identifier (single, or linked with link_ids())
search_parameters - dictionary eg. {'max_depth':'3', 'max_size':'10000'}
result_filter => string, #Optional. Rules for extracting specific data from the results
match_links => string, #Optional. Filter to match links to be followed, unmatched links will not be followed in the search process
max_depth => number, #Optional. Maximum distance of any included identifiers. Start depth is equal to 0
max_size => number, #Optional. Maximum size in bytes of the results
terminal_identifier_type => string, #Optional. Terminal identifier type of the search request
"""
def __init__(self, name, identifier, search_parameters={}):
self.__name = name
self.__identifier = identifier
self.__parameters = search_parameters
def __str__(self):
__attr = attr(self.__parameters)
return '<update name="'+ self.__name + '" ' + __attr + '>' + self.__identifier +'</update>'
class SubscribeDeleteOperation(OperationBase):
def __init__(self, name):
self.__name = name
def __str__(self):
return '<delete name="'+ self.__name + '" />'
| bsd-2-clause |
jku/telepathy-gabble | tests/twisted/rostertest.py | 2 | 5590 | from twisted.words.protocols.jabber.client import IQ
from gabbletest import (wrap_channel,)
from servicetest import (assertEquals, assertLength, EventPattern,
assertContains)
import constants as cs
import ns
def make_roster_push(stream, jid, subscription, ask_subscribe=False, name=None):
iq = IQ(stream, "set")
iq['id'] = 'push'
query = iq.addElement('query')
query['xmlns'] = ns.ROSTER
item = query.addElement('item')
item['jid'] = jid
item['subscription'] = subscription
if name is not None:
item['name'] = name
if ask_subscribe:
item['ask'] = 'subscribe'
return iq
def send_roster_push(stream, jid, subscription, ask_subscribe=False, name=None):
iq = make_roster_push(stream, jid, subscription,
ask_subscribe=ask_subscribe, name=name)
stream.send(iq)
def get_contact_list_event_patterns(q, bus, conn, expected_handle_type, name):
expected_handle = conn.RequestHandles(expected_handle_type, [name])[0]
def new_channel_predicate(e):
path, type, handle_type, handle, suppress_handler = e.args
if type != cs.CHANNEL_TYPE_CONTACT_LIST:
return False
if handle_type != expected_handle_type:
return False
if handle != expected_handle:
return False
return True
new_channel_repr = ('NewChannel(., ContactList, %u, "%s", .)'
% (expected_handle_type, name))
new_channel_predicate.__repr__ = lambda: new_channel_repr
def new_channels_predicate(e):
info, = e.args
if len(info) != 1:
return False
path, props = info[0]
if props.get(cs.CHANNEL_TYPE) != cs.CHANNEL_TYPE_CONTACT_LIST:
return False
if props.get(cs.TARGET_HANDLE_TYPE) != expected_handle_type:
return False
if props.get(cs.TARGET_HANDLE) != expected_handle:
return False
return True
new_channels_repr = ('NewChannels(... ct=ContactList, ht=%u, name="%s"... )'
% (expected_handle_type, name))
new_channels_predicate.__repr__ = lambda: new_channels_repr
return (
EventPattern('dbus-signal', signal='NewChannel',
predicate=new_channel_predicate),
EventPattern('dbus-signal', signal='NewChannels',
predicate=new_channels_predicate)
)
def expect_contact_list_signals(q, bus, conn, lists, groups=[]):
assert lists or groups
eps = []
for name in lists:
eps.extend(get_contact_list_event_patterns(q, bus, conn,
cs.HT_LIST, name))
for name in groups:
eps.extend(get_contact_list_event_patterns(q, bus, conn,
cs.HT_GROUP, name))
events = q.expect_many(*eps)
ret = []
for name in lists:
old_signal = events.pop(0)
new_signal = events.pop(0)
ret.append((old_signal, new_signal))
for name in groups:
old_signal = events.pop(0)
new_signal = events.pop(0)
ret.append((old_signal, new_signal))
assert len(events) == 0
return ret
def check_contact_list_signals(q, bus, conn, signals,
ht, name, contacts, lp_contacts=[], rp_contacts=[]):
"""
Looks at NewChannel and NewChannels signals for the contact list with ID
'name' and checks that its members, lp members and rp members are exactly
'contacts', 'lp_contacts' and 'rp_contacts'.
Returns a proxy for the channel.
"""
old_signal, new_signal = signals
path, type, handle_type, handle, suppress_handler = old_signal.args
assertEquals(cs.CHANNEL_TYPE_CONTACT_LIST, type)
assertEquals(name, conn.InspectHandles(handle_type, [handle])[0])
chan = wrap_channel(bus.get_object(conn.bus_name, path),
cs.CHANNEL_TYPE_CONTACT_LIST)
members = chan.Group.GetMembers()
assertEquals(sorted(contacts),
sorted(conn.InspectHandles(cs.HT_CONTACT, members)))
lp_handles = conn.RequestHandles(cs.HT_CONTACT, lp_contacts)
rp_handles = conn.RequestHandles(cs.HT_CONTACT, rp_contacts)
# NB. comma: we're unpacking args. Thython!
info, = new_signal.args
assertLength(1, info) # one channel
path_, emitted_props = info[0]
assertEquals(path_, path)
assertEquals(cs.CHANNEL_TYPE_CONTACT_LIST, emitted_props[cs.CHANNEL_TYPE])
assertEquals(ht, emitted_props[cs.TARGET_HANDLE_TYPE])
assertEquals(handle, emitted_props[cs.TARGET_HANDLE])
channel_props = chan.Properties.GetAll(cs.CHANNEL)
assertEquals(handle, channel_props.get('TargetHandle'))
assertEquals(ht, channel_props.get('TargetHandleType'))
assertEquals(cs.CHANNEL_TYPE_CONTACT_LIST, channel_props.get('ChannelType'))
assertContains(cs.CHANNEL_IFACE_GROUP, channel_props.get('Interfaces'))
assertEquals(name, channel_props['TargetID'])
assertEquals(False, channel_props['Requested'])
assertEquals('', channel_props['InitiatorID'])
assertEquals(0, channel_props['InitiatorHandle'])
group_props = chan.Properties.GetAll(cs.CHANNEL_IFACE_GROUP)
assertContains('HandleOwners', group_props)
assertContains('Members', group_props)
assertEquals(members, group_props['Members'])
assertContains('LocalPendingMembers', group_props)
actual_lp_handles = [x[0] for x in group_props['LocalPendingMembers']]
assertEquals(sorted(lp_handles), sorted(actual_lp_handles))
assertContains('RemotePendingMembers', group_props)
assertEquals(sorted(rp_handles), sorted(group_props['RemotePendingMembers']))
assertContains('GroupFlags', group_props)
return chan
| lgpl-2.1 |
akosel/incubator-airflow | tests/plugins/test_plugin.py | 1 | 2901 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# This is the class you derive to create a plugin
from airflow.plugins_manager import AirflowPlugin
from flask import Blueprint
from flask_admin import BaseView, expose
from flask_admin.base import MenuLink
# Importing base classes that we need to derive
from airflow.hooks.base_hook import BaseHook
from airflow.models import BaseOperator
from airflow.sensors.base_sensor_operator import BaseSensorOperator
from airflow.executors.base_executor import BaseExecutor
# Will show up under airflow.hooks.test_plugin.PluginHook
class PluginHook(BaseHook):
pass
# Will show up under airflow.operators.test_plugin.PluginOperator
class PluginOperator(BaseOperator):
pass
# Will show up under airflow.sensors.test_plugin.PluginSensorOperator
class PluginSensorOperator(BaseSensorOperator):
pass
# Will show up under airflow.executors.test_plugin.PluginExecutor
class PluginExecutor(BaseExecutor):
pass
# Will show up under airflow.macros.test_plugin.plugin_macro
def plugin_macro():
pass
# Creating a flask admin BaseView
class TestView(BaseView):
@expose('/')
def test(self):
# in this example, put your test_plugin/test.html template at airflow/plugins/templates/test_plugin/test.html
return self.render("test_plugin/test.html", content="Hello galaxy!")
v = TestView(category="Test Plugin", name="Test View")
# Creating a flask blueprint to intergrate the templates and static folder
bp = Blueprint(
"test_plugin", __name__,
template_folder='templates', # registers airflow/plugins/templates as a Jinja template folder
static_folder='static',
static_url_path='/static/test_plugin')
ml = MenuLink(
category='Test Plugin',
name='Test Menu Link',
url='https://airflow.incubator.apache.org/')
# Defining the plugin class
class AirflowTestPlugin(AirflowPlugin):
name = "test_plugin"
operators = [PluginOperator]
sensors = [PluginSensorOperator]
hooks = [PluginHook]
executors = [PluginExecutor]
macros = [plugin_macro]
admin_views = [v]
flask_blueprints = [bp]
menu_links = [ml]
| apache-2.0 |
SPIhub/hummingbird | src/analysis/pixel_detector.py | 2 | 20836 | # --------------------------------------------------------------------------------------
# Copyright 2016, Benedikt J. Daurer, Filipe R.N.C. Maia, Max F. Hantke, Carl Nettelblad
# Hummingbird is distributed under the terms of the Simplified BSD License.
# -------------------------------------------------------------------------
from __future__ import print_function, absolute_import # Compatibility with python 2 and 3
from numpy import sum, mean, min, max, std
import numpy as np
from backend import ureg
from backend import add_record
import utils.io
import utils.array
def printStatistics(detectors):
for k,r in detectors.iteritems():
v = r.data
print("%s (%s): sum=%g mean=%g min=%g max=%g std=%g" % (k, r.unit,
sum(v), mean(v),
min(v), max(v),
std(v)))
def pnccdGain(evt, record, gainmode):
"""Returns gain (Number of ADUs per photon) based on photon energy record and gain mode.
Args:
:evt: The event variable
:record: A photon energy ``Record`` given in eV
:gainmode: The gain mode of PNCCD (3,4,5,6) or 0 for no gain
"""
maximum_gain = 1250 # at photon energy of 1keV
if gainmode == 6: # 1/1
gain = maximum_gain
elif gainmode == 5 :# 1/4
gain = maximum_gain/4
elif gainmode == 4: # 1/16
gain = maximum_gain/16
elif gainmode == 3: # 1/64
gain = maximum_gain/64
elif gainmode == 2: # 1/128
gain = maximum_gain/128
elif gainmode == 1: # 1/256
gain = maximum_gain/256
elif gainmode == 0:
gain = 1.
gain = gain * (record.data / 1000.) # Rescale gain given a photon energy in eV
add_record(evt['analysis'], 'analysis', 'gain', gain)
def getSubsetAsics(evt, type, key, subset, output):
"""Adds a one-dimensional stack of an arbitrary subset of asics
to ``evt["analysis"][output]``.
Args:
:evt: The event variable
:type(str): The event type (e.g. photonPixelDetectors)
:key(str): The event key (e.g. CCD)
:subset(list): Asic indices
:output: The output key in analysis
:Authors:
Carl Nettelblad (carl.nettelblad@it.uu.se)
Benedikt J. Daurer (benedikt@xray.bmc.uu.se)
"""
stack = []
detector = evt[type][key]
for i in subset:
panel = i / 2
asic = i % 2
stack.append(detector.data[panel,:,(asic*194):((asic+1)*194)])
add_record(evt["analysis"], "analysis", output, np.hstack(stack), detector.unit)
def getCentral4Asics(evt, type, key):
"""Adds a one-dimensional stack of its 4 centermost asics
to ``evt["analysis"]["central4Asics"]``.
Args:
:evt: The event variable
:type(str): The event type (e.g. photonPixelDetectors)
:key(str): The event key (e.g. CCD)
:Authors:
Carl Nettelblad (carl.nettelblad@it.uu.se)
Benedikt J. Daurer (benedikt@xray.bmc.uu.se)
"""
getSubsetAsics(evt, type, key, map(lambda i : (i * 8 + 1) * 2, xrange(4)), "central4Asics")
def totalNrPhotons(evt, record, aduPhoton=1, aduThreshold=0.5, outkey=None):
"""Estimates the total nr. of photons on the detector and adds it to ``evt["analysis"][outkey]``.
Args:
:evt: The event variable
:record: The data record (e.g. evt['photonPixelDetectors']['CCD'])
Kwargs:
:aduPhoton(int): ADU count per photon, default = 1
:aduThreshold(int): only pixels above this threshold given in units of ADUs are valid, default = 0.5
:outkey(str): Data key of resulting data record, default is 'nrPhotons'
:Authors:
Benedikt J. Daurer (benedikt@xray.bmc.uu.se)
"""
if outkey is None:
outkey = 'nrPhotons'
data = record.data.flat
valid = data > aduThreshold
add_record(evt["analysis"], "analysis", outkey, sum(data[valid]) / float(aduPhoton))
def maxPhotonValue(evt, record, aduPhoton=1, outkey=None):
"""Estimates the maximum number of photons on one pixel on the detector and adds it to ``evt["analysis"][outkey]``.
Args:
:evt: The event variable
:record: The data record (e.g. evt['photonPixelDetectors']['CCD'])
Kwargs:
:aduPhoton(int): ADU count per photon, default = 1
:outkey(str): Data key of resulting data record, default is 'maxPhotons'
:Authors:
Tomas Ekeberg (ekeberg@xray.bmc.uu.se)
Benedikt J. Daurer (benedikt@xray.bmc.uu.se)
"""
if outkey is None:
outkey = 'maxPhotons'
data = record.data.flat
add_record(evt["analysis"], "analysis", outkey, max(data) / float(aduPhoton))
def threshold(evt, record, threshold, outkey=None):
"""Set all values in an array that are lower than the threshold to zero.
Args:
:evt: The event variable
:record: The data record
:threshold: Set everything lower than this number to zero
Kwargs:
:aduPhoton(int): ADU count per photon, default = 1
:outkey(str): Data key of resulting data record, default is 'maxPhotons'
:Authors:
Tomas Ekeberg (ekeberg@xray.bmc.uu.se)
"""
if outkey is None:
outkey = "thresholded"
data_clean = record.data.copy()
data_clean[data_clean < threshold] = 0.
rec = add_record(evt["analysis"], "analysis", outkey, data_clean)
return rec
initialized = {}
def assemble(evt, type, key, x, y, nx=None, ny=None, subset=None, outkey=None, initkey=None):
"""Asesembles a detector image given some geometry and adds assembled image to ``evt["analysis"]["assembled - " + key]``.
Args:
:evt: The event variable
:type(str): The event type (e.g. photonPixelDetectors)
:key(str): The event key (e.g. CCD)
:x(int ndarray): X coordinates
:y(int ndarray): Y coordinates
Kwargs:
:nx(int): Total width of assembled image (zero padding)
:ny(int): Total height of assembled image (zero padding)
:Authors:
Benedikt J. Daurer (benedikt@xray.bmc.uu.se)
"""
if initkey is None:
initkey = key
if not initkey in initialized:
if subset is not None:
x_ss = []
y_ss = []
for i in subset:
panel = i / 2
asic = i % 2
x_ss.append(x[panel,:,(asic*194):((asic+1)*194)])
y_ss.append(y[panel,:,(asic*194):((asic+1)*194)])
x_ss = np.hstack(x_ss)
y_ss = np.hstack(y_ss)
else:
x_ss = x
y_ss = y
assembled, height, width, shape, y_ss, x_ss = utils.array.assembleImage(x_ss, y_ss ,nx=nx, ny=ny, return_indices=True)
initialized[initkey] = {
'assembled':assembled,
'height':height,
'width':width,
'shape':shape,
'y':y_ss,
'x':x_ss
}
assembled = initialized[initkey]['assembled']
height = initialized[initkey]['height']
width = initialized[initkey]['width']
shape = initialized[initkey]['shape']
y = initialized[initkey]['y']
x = initialized[initkey]['x']
if subset is not None:
data = []
for i in subset:
panel = i / 2
asic = i % 2
data.append(evt[type][key].data[panel,:,(asic*194):((asic+1)*194)])
data = np.hstack(data)
else:
data = evt[type][key].data
assembled[height-shape[0]:, :shape[1]][y,x] = data
if outkey is None:
add_record(evt["analysis"], "analysis", "assembled - "+key, assembled)
else:
add_record(evt["analysis"], "analysis", outkey, assembled)
def bin(evt, type, key, binning, mask=None):
"""Bin a detector image given a binning factor (and mask).
Adds the records ``evt["analysis"]["binned image - " + key]`` and ``evt["analysis"]["binned mask - " + key]``.
.. note:: This feature depends on the python package `libspimage <https://github.com/FilipeMaia/libspimage>`_.
Args:
:evt: The event variable
:type(str): The event type (e.g. photonPixelDetectors)
:key(str): The event key (e.g. CCD)
:binning(int): The linear binning factor
Kwargs:
:mask: Binary mask, pixels that are masked out are not counted into the binned value.
:Authors:
Max F. Hantke (hantke@xray.bmc.uu.se)
"""
success, spimage = utils.io.load_spimage()
if not success:
print("Skipping analysis.pixel_detector.bin")
return
image = evt[type][key].data
binned_image, binned_mask = spimage.binImage(image, binning, msk=mask, output_binned_mask=True)
add_record(evt["analysis"], "analysis", "binned image - "+key, binned_image)
if binned_mask is not None:
add_record(evt["analysis"], "analysis", "binned mask - "+key, binned_mask)
def radial(evt, record, mask=None, cx=None, cy=None, key=''):
"""Compute the radial average of a detector image given the center position (and a mask).
Adds the records ``evt["analysis"]["radial average - " + key]`` and ``evt["analysis"]["radial distance - " + key]``.
.. note:: This feature depends on the python package `libspimage <https://github.com/FilipeMaia/libspimage>`_.
TODO: Updated docstring
Args:
:evt: The event variable
:type(str): The event type (e.g. photonPixelDetectors)
:key(str): The event key (e.g. CCD)
Kwargs:
:mask: Binary mask, pixels that are masked out are not counted into the radial average.
:cx(float): X-coordinate of the center position. If None the center will be in the middle.
:cy(float): Y-coordinate of the center position. If None the center will be in the middle.
:Authors:
Max F. Hantke (hantke@xray.bmc.uu.se)
"""
success, spimage = utils.io.load_spimage()
if not success:
print("Skipping analysis.pixel_detector.radial")
return
image = record.data
r, img_r = spimage.radialMeanImage(image, msk=mask, cx=cx, cy=cy, output_r=True)
valid = np.isfinite(img_r)
if valid.sum() > 0:
r = r[valid]
img_r = img_r[valid]
r_rec = add_record(evt["analysis"], "analysis", "radial distance - " + key, r)
img_rec = add_record(evt["analysis"], "analysis", "radial average - " + key, img_r)
return r_rec, img_rec
def commonModeCSPAD2x2(evt, type, key, mask=None):
"""Subtraction of common mode using median value of masked pixels (left and right half of detector are treated separately).
Adds a record ``evt["analysis"]["cm_corrected - " + key]``.
Args:
:evt: The event variable
:type(str): The event type (e.g. photonPixelDetectors)
:key(str): The event key (e.g. CCD)
Kwargs:
:mask: Binary mask
:Authors:
Max F. Hantke (hantke@xray.bmc.uu.se)
Benedikt J. Daurer (benedikt@xray.bmc.uu.se)
"""
data = evt[type][key].data
dataCorrected = np.copy(data)
lData = data[:,:data.shape[1]/2]
rData = data[:,data.shape[1]/2:]
if mask is None:
lMask = np.ones(shape=lData.shape, dtype="bool")
rMask = np.ones(shape=rData.shape, dtype="bool")
else:
lMask = mask[:,:data.shape[1]/2] == False
rMask = mask[:,data.shape[1]/2:] == False
if lMask.sum() > 0:
dataCorrected[:,:data.shape[1]/2] -= np.median(lData[lMask])
if rMask.sum() > 0:
dataCorrected[:,data.shape[1]/2:] -= np.median(rData[rMask])
add_record(evt["analysis"], "analysis", "cm_corrected - " + key, dataCorrected)
def commonModeLines(evt, record, outkey=None, direction='vertical'):
"""Common mode correction subtracting the median along lines.
Args:
:evt: The event variable
:record: A pixel detector ``Record```
Kwargs:
:outkey: The event key for the corrected detecor image, default is "corrected"
:direction: The direction of the lines across which median is taken, default is vertical
"""
if outkey is None:
outkey = "corrected"
data = record.data
dataCorrected = np.copy(data)
if direction is 'vertical':
dataCorrected -= np.transpose(np.median(data,axis=0).repeat(data.shape[0]).reshape(data.shape))
elif direction is 'horizontal':
dataCorrected -= np.median(data,axis=1).repeat(data.shape[1]).reshape(data.shape)
add_record(evt["analysis"], "analysis", outkey, dataCorrected)
def _cmc(img, msk=None, axis=1, signal_threshold=None, min_nr_pixels_per_median=1):
# Valid input?
if axis not in [0,1]:
logging.error("axis=%s is invalid input." % str(axis))
return
# Transpose if necessary
if axis == 0:
imgt = img.transpose()
if msk is not None:
mskt = msk.transpose()
else:
mskt = None
tmpimg = np.copy(imgt)
tmpmsk = mskt
else:
tmpimg = np.copy(img)
tmpmsk = msk
excl_calc = np.zeros(dtype='bool', shape=tmpimg.shape)
excl_corr = np.zeros(dtype='bool', shape=tmpimg.shape)
# Exclude pixels from median calculation
if signal_threshold is not None:
# Pixels that shall be excluded for median calculation
excl_calc = (tmpimg-np.median(tmpimg) > signal_threshold)
if msk is not None:
excl_calc |= (tmpmsk==False)
if min_nr_pixels_per_median >= 1:
# Pixels that shall not be cmc corrected
excl_corr = ((tmpimg.shape[1]-excl_calc.sum(axis=1)) <= min_nr_pixels_per_median).repeat(tmpimg.shape[1]).reshape(tmpimg.shape)
# For avoiding warning when calculating median with no values
if excl_corr.any():
excl_calc[excl_corr] = False
np.putmask(tmpimg, excl_calc, np.nan)
# Calculate medians
cm = np.nanmedian(tmpimg, axis=1).repeat(tmpimg.shape[1]).reshape(tmpimg.shape)
# Exclude pixels from correction
if msk is not None:
excl_corr |= (tmpmsk==False)
excl_corr |= np.isnan(cm)
np.putmask(cm, excl_corr, 0.)
# Subtract common mode
if axis == 0:
imgt -= cm
else:
img -= cm
def commonModePNCCD(evt, type, key, outkey=None, transpose=False, signal_threshold=None, mask=None, min_nr_pixels_per_median=1):
"""Common mode correction for PNCCDs.
For each row its median value is subtracted (left and right half of detector are treated separately).
Adds a record ``evt["analysis"][outkey]``.
Args:
:evt: The event variable
:type(str): The event type (e.g. photonPixelDetectors)
:key(str): The event key (e.g. CCD)
Kwargs:
:outkey(str): The event key for the corrected image, default is "corrected - " + key
:transpose(bool): Apply procedure on transposed image
:signal_threshold(float): Apply procedure by using only pixels below given value
:mask: You may provide a boolean mask with values that shall be excluded from common mode correction
:min_nr_pixels_per_median(int): Common mode correction will be skipped for pixel lines that do not contain as much as this number of values
:Authors:
Max F. Hantke (hantke@xray.bmc.uu.se)
Benedikt J. Daurer (benedikt@xray.bmc.uu.se)
"""
if outkey is None:
outkey = "corrected - " + key
data = evt[type][key].data
dataCorrected = np.copy(data)
lData = dataCorrected[:,:data.shape[1]/2]
rData = dataCorrected[:,data.shape[1]/2:]
if mask is not None:
lMask = mask[:,:data.shape[1]/2]
rMask = mask[:,data.shape[1]/2:]
else:
lMask = None
rMask = None
_cmc(lData, msk=lMask, axis=1 if not transpose else 0, signal_threshold=signal_threshold, min_nr_pixels_per_median=min_nr_pixels_per_median)
_cmc(rData, msk=rMask, axis=1 if not transpose else 0, signal_threshold=signal_threshold, min_nr_pixels_per_median=min_nr_pixels_per_median)
add_record(evt["analysis"], "analysis", outkey, dataCorrected)
def commonModePNCCD2(evt, type, key, outkey=None, signal_threshold=None, mask=None, min_nr_pixels_per_median=1):
"""Common mode correction for PNCCDs.
For each row its median value is subtracted (left and right half of detector are treated separately).
Adds a record ``evt["analysis"][outkey]``.
Args:
:evt: The event variable
:type(str): The event type (e.g. photonPixelDetectors)
:key(str): The event key (e.g. CCD)
Kwargs:
:outkey(str): The event key for the corrected image, default is "corrected - " + key
:transpose(bool): Apply procedure on transposed image
:signal_threshold(float): Apply procedure by using only pixels below given value
:mask: You may provide a boolean mask with values that shall be excluded from common mode correction
:min_nr_pixels_per_median(int): Common mode correction will be skipped for pixel lines that do not contain as much as this number of values
:Authors:
Max F. Hantke (hantke@xray.bmc.uu.se)
Benedikt J. Daurer (benedikt@xray.bmc.uu.se)
"""
if outkey is None:
outkey = "corrected - " + key
data = evt[type][key].data
dataCorrected = np.copy(data)
quads = [dataCorrected[:data.shape[0]/2,:data.shape[1]/2],
dataCorrected[data.shape[0]/2:,:data.shape[1]/2],
dataCorrected[:data.shape[0]/2,data.shape[1]/2:],
dataCorrected[data.shape[0]/2:,data.shape[1]/2:]]
if mask is not None:
mquads = [mask[:data.shape[0]/2,:data.shape[1]/2],
mask[data.shape[0]/2:,:data.shape[1]/2],
mask[:data.shape[0]/2,data.shape[1]/2:],
mask[data.shape[0]/2:,data.shape[1]/2:]]
else:
mquads = [None, None, None, None]
for quad,mquad in zip(quads,mquads):
_cmc(quad, msk=mquad, axis=0, signal_threshold=signal_threshold, min_nr_pixels_per_median=min_nr_pixels_per_median)
_cmc(quad, msk=mquad, axis=1, signal_threshold=signal_threshold, min_nr_pixels_per_median=min_nr_pixels_per_median)
add_record(evt["analysis"], "analysis", outkey, dataCorrected)
def subtractImage(evt, type, key, image, outkey=None):
"""Subtract an image.
Adds a record ``evt["analysis"][outkey]``.
Args:
:evt: The event variable
:type(str): The event type (e.g. photonPixelDetectors)
:key(str): The event key (e.g. CCD)
Kwargs:
:outkey(str): The event key for the subtracted image, default is "subtraced - " + key
:Authors:
Max F. Hantke (hantke@xray.bmc.uu.se)
Benedikt J. Daurer (benedikt@xray.bmc.uu.se)
"""
if outkey is None:
outkey = "subtracted - " + key
data = evt[type][key].data
dataCorrected = data - image
add_record(evt["analysis"], "analysis", outkey, dataCorrected)
def cropAndCenter(evt, data_rec, cx=None, cy=None, w=None, h=None, outkey='cropped'):
data = data_rec.data
name = data_rec.name
Ny, Nx = data.shape
if cx is None:
cx = (Nx-1)/2.
if cy is None:
cy = (Ny-1)/2.
# Round to .0 / .5
cx = np.round(cx * 2)/2.
cy = np.round(cy * 2)/2.
if w is None:
w = Nx
if h is None:
h = Ny
data_cropped = data[int(round(cy-h/2)):int(round(cy+h/2)), int(round(cx-w/2)):int(round(cx+w/2))]
add_record(evt["analysis"], "analysis", outkey, data_cropped)
def rotate90(evt, data_rec, k=1, outkey='rotated'):
data_rotated = np.rot90(data_rec.data,k)
return add_record(evt["analysis"], "analysis", outkey, data_rotated)
def moveHalf(evt, record, vertical=0, horizontal=0, outkey='half-moved', transpose=False):
data = record.data
if transpose:
data = np.transpose(data)
ny,nx = data.shape
data_moved = np.zeros((ny + abs(vertical), nx + horizontal), dtype=data.dtype)
if horizontal < 0: horizontal = 0
if vertical < 0:
data_moved[-vertical:,:nx/2] = data[:,:nx/2]
data_moved[:vertical,nx/2+horizontal:] = data[:,nx/2:]
elif vertical > 0:
data_moved[:-vertical,:nx/2] = data[:,:nx/2]
data_moved[vertical:,nx/2+horizontal:] = data[:,nx/2:]
else:
data_moved[:,:nx/2] = data[:,:nx/2]
data_moved[:,nx/2+horizontal:] = data[:,nx/2:]
if transpose:
data_moved = np.transpose(data_moved)
return add_record(evt["analysis"], "analysis", outkey, data_moved)
| bsd-2-clause |
tersmitten/ansible | lib/ansible/modules/cloud/vmware/vmware_host_powermgmt_policy.py | 43 | 8768 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Christian Kotte <christian.kotte@gmx.de>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_host_powermgmt_policy
short_description: Manages the Power Management Policy of an ESXI host system
description:
- This module can be used to manage the Power Management Policy of ESXi host systems in given vCenter infrastructure.
version_added: 2.8
author:
- Christian Kotte (@ckotte) <christian.kotte@gmx.de>
notes:
- Tested on vSphere 6.5
requirements:
- python >= 2.6
- PyVmomi
options:
policy:
description:
- Set the Power Management Policy of the host system.
choices: [ 'high-performance', 'balanced', 'low-power', 'custom' ]
default: 'balanced'
esxi_hostname:
description:
- Name of the host system to work with.
- This is required parameter if C(cluster_name) is not specified.
cluster_name:
description:
- Name of the cluster from which all host systems will be used.
- This is required parameter if C(esxi_hostname) is not specified.
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Set the Power Management Policy of a host system to high-performance
vmware_host_powermgmt_policy:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_hostname: '{{ esxi_host }}'
policy: high-performance
validate_certs: no
delegate_to: localhost
- name: Set the Power Management Policy of all host systems from cluster to high-performance
vmware_host_powermgmt_policy:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
cluster_name: '{{ cluster_name }}'
policy: high-performance
validate_certs: no
delegate_to: localhost
'''
RETURN = r'''
result:
description: metadata about host system's Power Management Policy
returned: always
type: dict
sample: {
"changed": true,
"result": {
"esxi01": {
"changed": true,
"current_state": "high-performance",
"desired_state": "high-performance",
"msg": "Power policy changed",
"previous_state": "balanced"
}
}
}
'''
try:
from pyVmomi import vim, vmodl
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec
from ansible.module_utils._text import to_native
class VmwareHostPowerManagement(PyVmomi):
"""
Class to manage power management policy of an ESXi host system
"""
def __init__(self, module):
super(VmwareHostPowerManagement, self).__init__(module)
cluster_name = self.params.get('cluster_name')
esxi_host_name = self.params.get('esxi_hostname')
self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
if not self.hosts:
self.module.fail_json(msg="Failed to find host system with given configuration.")
def ensure(self):
"""
Manage power management policy of an ESXi host system
"""
results = dict(changed=False, result=dict())
policy = self.params.get('policy')
host_change_list = []
power_policies = {
'high-performance': {
'key': 1,
'short_name': 'static'
},
'balanced': {
'key': 2,
'short_name': 'dynamic'
},
'low-power': {
'key': 3,
'short_name': 'low'
},
'custom': {
'key': 4,
'short_name': 'custom'
}
}
for host in self.hosts:
changed = False
results['result'][host.name] = dict(msg='')
power_system = host.configManager.powerSystem
# get current power policy
power_system_info = power_system.info
current_host_power_policy = power_system_info.currentPolicy
# the "name" and "description" parameters are pretty useless
# they store only strings containing "PowerPolicy.<shortName>.name" and "PowerPolicy.<shortName>.description"
if current_host_power_policy.shortName == "static":
current_policy = 'high-performance'
elif current_host_power_policy.shortName == "dynamic":
current_policy = 'balanced'
elif current_host_power_policy.shortName == "low":
current_policy = 'low-power'
elif current_host_power_policy.shortName == "custom":
current_policy = 'custom'
results['result'][host.name]['desired_state'] = policy
# Don't do anything if the power policy is already configured
if current_host_power_policy.key == power_policies[policy]['key']:
results['result'][host.name]['changed'] = changed
results['result'][host.name]['previous_state'] = current_policy
results['result'][host.name]['current_state'] = policy
results['result'][host.name]['msg'] = "Power policy is already configured"
else:
# get available power policies and check if policy is included
supported_policy = False
power_system_capability = power_system.capability
available_host_power_policies = power_system_capability.availablePolicy
for available_policy in available_host_power_policies:
if available_policy.shortName == power_policies[policy]['short_name']:
supported_policy = True
if supported_policy:
if not self.module.check_mode:
try:
power_system.ConfigurePowerPolicy(key=power_policies[policy]['key'])
changed = True
results['result'][host.name]['changed'] = True
results['result'][host.name]['msg'] = "Power policy changed"
except vmodl.fault.InvalidArgument:
self.module.fail_json(msg="Invalid power policy key provided for host '%s'" % host.name)
except vim.fault.HostConfigFault as host_config_fault:
self.module.fail_json(msg="Failed to configure power policy for host '%s': %s" %
(host.name, to_native(host_config_fault.msg)))
else:
changed = True
results['result'][host.name]['changed'] = True
results['result'][host.name]['msg'] = "Power policy will be changed"
results['result'][host.name]['previous_state'] = current_policy
results['result'][host.name]['current_state'] = policy
else:
changed = False
results['result'][host.name]['changed'] = changed
results['result'][host.name]['previous_state'] = current_policy
results['result'][host.name]['current_state'] = current_policy
self.module.fail_json(msg="Power policy '%s' isn't supported for host '%s'" %
(policy, host.name))
host_change_list.append(changed)
if any(host_change_list):
results['changed'] = True
self.module.exit_json(**results)
def main():
"""
Main
"""
argument_spec = vmware_argument_spec()
argument_spec.update(
policy=dict(type='str', default='balanced',
choices=['high-performance', 'balanced', 'low-power', 'custom']),
esxi_hostname=dict(type='str', required=False),
cluster_name=dict(type='str', required=False),
)
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=[
['cluster_name', 'esxi_hostname'],
],
supports_check_mode=True
)
host_power_management = VmwareHostPowerManagement(module)
host_power_management.ensure()
if __name__ == '__main__':
main()
| gpl-3.0 |
tscholl2/smc | src/scripts/build_sage.py | 3 | 2177 | #!/usr/bin/env python
###############################################################################
#
# CoCalc: Collaborative Calculation in the Cloud
#
# Copyright (C) 2016, Sagemath Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
## DEPRECATED -- not used at all
import os
def cmd(s):
print s
if not os.system(s):
raise RuntimeError("Error executing '%s'" % s)
available_disk = int(os.popen("df /").read().split()[-3])
if available_disk < 5000000:
raise RuntimeError(
"There is insufficient disk space to build Sage; ensure 5GB free on /."
)
cmd("export MAKE='make -j20'; make; unset MAKE; make")
cmd("rm -rf spkg/build/*")
cmd("echo 'easy_install markdown2' | ./sage -sh")
cmd("./sage -i 4ti2-1.3.2.p1 biopython-1.60 brian-1.2.1.p0 cbc-2.7.5 cluster_seed-1.0 coxeter3-1.1 cryptominisat-2.9.6 cunningham_tables-1.0 database_cremona_ellcurve-20121022 database_gap-4.5.7 database_jones_numfield-v4 database_kohel-20060803 database_odlyzko_zeta database_sloane_oeis-2005-12 database_symbolic_data-20070206 dot2tex-2.8.7-2 gap_packages-4.5.7 gmpy-1.0.1 gnuplotpy-1.8 guppy-0.1.8 kash3-2008-07-31.p0 lie-2.2.2.p5 lrs-4.2b.p1 nauty-24 normaliz-2.8.p0 nose-1.1.2 nzmath-1.1.0 p_group_cohomology-2.1.3 phc-2.3.65.p0 pybtex-20120618 pycryptoplus-20100809-git pyx-0.10 pyzmq-2.1.11.p1 qhull-2010.1 sage-mode-0.7 TOPCOM-0.17.4 zeromq-2.2.0.p0"
)
cmd("rm -rf spkg/optional/*")
cmd("rm -rf spkg/build/*")
cmd("make ptestlong")
| agpl-3.0 |
Memeo/samba-unovero | third_party/dnspython/dns/entropy.py | 91 | 3878 | # Copyright (C) 2009, 2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import os
import time
try:
import threading as _threading
except ImportError:
import dummy_threading as _threading
class EntropyPool(object):
def __init__(self, seed=None):
self.pool_index = 0
self.digest = None
self.next_byte = 0
self.lock = _threading.Lock()
try:
import hashlib
self.hash = hashlib.sha1()
self.hash_len = 20
except:
try:
import sha
self.hash = sha.new()
self.hash_len = 20
except:
import md5
self.hash = md5.new()
self.hash_len = 16
self.pool = '\0' * self.hash_len
if not seed is None:
self.stir(seed)
self.seeded = True
else:
self.seeded = False
def stir(self, entropy, already_locked=False):
if not already_locked:
self.lock.acquire()
try:
bytes = [ord(c) for c in self.pool]
for c in entropy:
if self.pool_index == self.hash_len:
self.pool_index = 0
b = ord(c) & 0xff
bytes[self.pool_index] ^= b
self.pool_index += 1
self.pool = ''.join([chr(c) for c in bytes])
finally:
if not already_locked:
self.lock.release()
def _maybe_seed(self):
if not self.seeded:
try:
seed = os.urandom(16)
except:
try:
r = file('/dev/urandom', 'r', 0)
try:
seed = r.read(16)
finally:
r.close()
except:
seed = str(time.time())
self.seeded = True
self.stir(seed, True)
def random_8(self):
self.lock.acquire()
self._maybe_seed()
try:
if self.digest is None or self.next_byte == self.hash_len:
self.hash.update(self.pool)
self.digest = self.hash.digest()
self.stir(self.digest, True)
self.next_byte = 0
value = ord(self.digest[self.next_byte])
self.next_byte += 1
finally:
self.lock.release()
return value
def random_16(self):
return self.random_8() * 256 + self.random_8()
def random_32(self):
return self.random_16() * 65536 + self.random_16()
def random_between(self, first, last):
size = last - first + 1
if size > 4294967296L:
raise ValueError('too big')
if size > 65536:
rand = self.random_32
max = 4294967295L
elif size > 256:
rand = self.random_16
max = 65535
else:
rand = self.random_8
max = 255
return (first + size * rand() // (max + 1))
pool = EntropyPool()
def random_16():
return pool.random_16()
def between(first, last):
return pool.random_between(first, last)
| gpl-3.0 |
aduric/crossfit | nonrel/tests/regressiontests/generic_views/views.py | 49 | 4506 | from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator
from django.core.urlresolvers import reverse
from django.utils.decorators import method_decorator
from django.views import generic
from regressiontests.generic_views.models import Artist, Author, Book, Page
from regressiontests.generic_views.forms import AuthorForm
class CustomTemplateView(generic.TemplateView):
template_name = 'generic_views/about.html'
def get_context_data(self, **kwargs):
return {
'params': kwargs,
'key': 'value'
}
class ObjectDetail(generic.DetailView):
template_name = 'generic_views/detail.html'
def get_object(self):
return {'foo': 'bar'}
class ArtistDetail(generic.DetailView):
queryset = Artist.objects.all()
class AuthorDetail(generic.DetailView):
queryset = Author.objects.all()
class PageDetail(generic.DetailView):
queryset = Page.objects.all()
template_name_field = 'template'
class DictList(generic.ListView):
"""A ListView that doesn't use a model."""
queryset = [
{'first': 'John', 'last': 'Lennon'},
{'last': 'Yoko', 'last': 'Ono'}
]
template_name = 'generic_views/list.html'
class ArtistList(generic.ListView):
template_name = 'generic_views/list.html'
queryset = Artist.objects.all()
class AuthorList(generic.ListView):
queryset = Author.objects.all()
class CustomPaginator(Paginator):
def __init__(self, queryset, page_size, orphans=0, allow_empty_first_page=True):
super(CustomPaginator, self).__init__(
queryset,
page_size,
orphans=2,
allow_empty_first_page=allow_empty_first_page)
class AuthorListCustomPaginator(AuthorList):
paginate_by = 5;
def get_paginator(self, queryset, page_size, orphans=0, allow_empty_first_page=True):
return super(AuthorListCustomPaginator, self).get_paginator(
queryset,
page_size,
orphans=2,
allow_empty_first_page=allow_empty_first_page)
class ArtistCreate(generic.CreateView):
model = Artist
class NaiveAuthorCreate(generic.CreateView):
queryset = Author.objects.all()
class AuthorCreate(generic.CreateView):
model = Author
success_url = '/list/authors/'
class SpecializedAuthorCreate(generic.CreateView):
model = Author
form_class = AuthorForm
template_name = 'generic_views/form.html'
context_object_name = 'thingy'
def get_success_url(self):
return reverse('author_detail', args=[self.object.id,])
class AuthorCreateRestricted(AuthorCreate):
post = method_decorator(login_required)(AuthorCreate.post)
class ArtistUpdate(generic.UpdateView):
model = Artist
class NaiveAuthorUpdate(generic.UpdateView):
queryset = Author.objects.all()
class AuthorUpdate(generic.UpdateView):
model = Author
success_url = '/list/authors/'
class OneAuthorUpdate(generic.UpdateView):
success_url = '/list/authors/'
def get_object(self):
return Author.objects.get(pk=1)
class SpecializedAuthorUpdate(generic.UpdateView):
model = Author
form_class = AuthorForm
template_name = 'generic_views/form.html'
context_object_name = 'thingy'
def get_success_url(self):
return reverse('author_detail', args=[self.object.id,])
class NaiveAuthorDelete(generic.DeleteView):
queryset = Author.objects.all()
class AuthorDelete(generic.DeleteView):
model = Author
success_url = '/list/authors/'
class SpecializedAuthorDelete(generic.DeleteView):
queryset = Author.objects.all()
template_name = 'generic_views/confirm_delete.html'
context_object_name = 'thingy'
def get_success_url(self):
return reverse('authors_list')
class BookConfig(object):
queryset = Book.objects.all()
date_field = 'pubdate'
class BookArchive(BookConfig, generic.ArchiveIndexView):
pass
class BookYearArchive(BookConfig, generic.YearArchiveView):
pass
class BookMonthArchive(BookConfig, generic.MonthArchiveView):
pass
class BookWeekArchive(BookConfig, generic.WeekArchiveView):
pass
class BookDayArchive(BookConfig, generic.DayArchiveView):
pass
class BookTodayArchive(BookConfig, generic.TodayArchiveView):
pass
class BookDetail(BookConfig, generic.DateDetailView):
pass
class AuthorGetQuerySetFormView(generic.edit.ModelFormMixin):
def get_queryset(self):
return Author.objects.all()
| bsd-3-clause |
samthor/intellij-community | python/lib/Lib/site-packages/django/utils/dates.py | 73 | 1837 | "Commonly-used date structures"
from django.utils.translation import ugettext_lazy as _, pgettext_lazy
WEEKDAYS = {
0:_('Monday'), 1:_('Tuesday'), 2:_('Wednesday'), 3:_('Thursday'), 4:_('Friday'),
5:_('Saturday'), 6:_('Sunday')
}
WEEKDAYS_ABBR = {
0:_('Mon'), 1:_('Tue'), 2:_('Wed'), 3:_('Thu'), 4:_('Fri'),
5:_('Sat'), 6:_('Sun')
}
WEEKDAYS_REV = {
'monday':0, 'tuesday':1, 'wednesday':2, 'thursday':3, 'friday':4,
'saturday':5, 'sunday':6
}
MONTHS = {
1:_('January'), 2:_('February'), 3:_('March'), 4:_('April'), 5:_('May'), 6:_('June'),
7:_('July'), 8:_('August'), 9:_('September'), 10:_('October'), 11:_('November'),
12:_('December')
}
MONTHS_3 = {
1:_('jan'), 2:_('feb'), 3:_('mar'), 4:_('apr'), 5:_('may'), 6:_('jun'),
7:_('jul'), 8:_('aug'), 9:_('sep'), 10:_('oct'), 11:_('nov'), 12:_('dec')
}
MONTHS_3_REV = {
'jan':1, 'feb':2, 'mar':3, 'apr':4, 'may':5, 'jun':6, 'jul':7, 'aug':8,
'sep':9, 'oct':10, 'nov':11, 'dec':12
}
MONTHS_AP = { # month names in Associated Press style
1:_('Jan.'), 2:_('Feb.'), 3:_('March'), 4:_('April'), 5:_('May'), 6:_('June'), 7:_('July'),
8:_('Aug.'), 9:_('Sept.'), 10:_('Oct.'), 11:_('Nov.'), 12:_('Dec.')
}
MONTHS_ALT = { # required for long date representation by some locales
1: pgettext_lazy('alt. month', 'January'),
2: pgettext_lazy('alt. month', 'February'),
3: pgettext_lazy('alt. month', 'March'),
4: pgettext_lazy('alt. month', 'April'),
5: pgettext_lazy('alt. month', 'May'),
6: pgettext_lazy('alt. month', 'June'),
7: pgettext_lazy('alt. month', 'July'),
8: pgettext_lazy('alt. month', 'August'),
9: pgettext_lazy('alt. month', 'September'),
10: pgettext_lazy('alt. month', 'October'),
11: pgettext_lazy('alt. month', 'November'),
12: pgettext_lazy('alt. month', 'December')
}
| apache-2.0 |
abhishekjairath/codeyard | commit/lib/python2.7/site-packages/pip/_vendor/requests/utils.py | 222 | 19653 | # -*- coding: utf-8 -*-
"""
requests.utils
~~~~~~~~~~~~~~
This module provides utility functions that are used within Requests
that are also useful for external consumption.
"""
import cgi
import codecs
import collections
import io
import os
import platform
import re
import sys
import socket
import struct
from . import __version__
from . import certs
from .compat import parse_http_list as _parse_list_header
from .compat import (quote, urlparse, bytes, str, OrderedDict, unquote, is_py2,
builtin_str, getproxies, proxy_bypass)
from .cookies import RequestsCookieJar, cookiejar_from_dict
from .structures import CaseInsensitiveDict
from .exceptions import MissingSchema, InvalidURL
_hush_pyflakes = (RequestsCookieJar,)
NETRC_FILES = ('.netrc', '_netrc')
DEFAULT_CA_BUNDLE_PATH = certs.where()
def dict_to_sequence(d):
"""Returns an internal sequence dictionary update."""
if hasattr(d, 'items'):
d = d.items()
return d
def super_len(o):
if hasattr(o, '__len__'):
return len(o)
if hasattr(o, 'len'):
return o.len
if hasattr(o, 'fileno'):
try:
fileno = o.fileno()
except io.UnsupportedOperation:
pass
else:
return os.fstat(fileno).st_size
if hasattr(o, 'getvalue'):
# e.g. BytesIO, cStringIO.StringI
return len(o.getvalue())
def get_netrc_auth(url):
"""Returns the Requests tuple auth for a given url from netrc."""
try:
from netrc import netrc, NetrcParseError
netrc_path = None
for f in NETRC_FILES:
try:
loc = os.path.expanduser('~/{0}'.format(f))
except KeyError:
# os.path.expanduser can fail when $HOME is undefined and
# getpwuid fails. See http://bugs.python.org/issue20164 &
# https://github.com/kennethreitz/requests/issues/1846
return
if os.path.exists(loc):
netrc_path = loc
break
# Abort early if there isn't one.
if netrc_path is None:
return
ri = urlparse(url)
# Strip port numbers from netloc
host = ri.netloc.split(':')[0]
try:
_netrc = netrc(netrc_path).authenticators(host)
if _netrc:
# Return with login / password
login_i = (0 if _netrc[0] else 1)
return (_netrc[login_i], _netrc[2])
except (NetrcParseError, IOError):
# If there was a parsing error or a permissions issue reading the file,
# we'll just skip netrc auth
pass
# AppEngine hackiness.
except (ImportError, AttributeError):
pass
def guess_filename(obj):
"""Tries to guess the filename of the given object."""
name = getattr(obj, 'name', None)
if name and name[0] != '<' and name[-1] != '>':
return os.path.basename(name)
def from_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. Unless it can not be represented as such, return an
OrderedDict, e.g.,
::
>>> from_key_val_list([('key', 'val')])
OrderedDict([('key', 'val')])
>>> from_key_val_list('string')
ValueError: need more than 1 value to unpack
>>> from_key_val_list({'key': 'val'})
OrderedDict([('key', 'val')])
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
return OrderedDict(value)
def to_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. If it can be, return a list of tuples, e.g.,
::
>>> to_key_val_list([('key', 'val')])
[('key', 'val')]
>>> to_key_val_list({'key': 'val'})
[('key', 'val')]
>>> to_key_val_list('string')
ValueError: cannot encode objects that are not 2-tuples.
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
if isinstance(value, collections.Mapping):
value = value.items()
return list(value)
# From mitsuhiko/werkzeug (used with permission).
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
It basically works like :func:`parse_set_header` just that items
may appear multiple times and case sensitivity is preserved.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
To create a header from the :class:`list` again, use the
:func:`dump_header` function.
:param value: a string with a list header.
:return: :class:`list`
"""
result = []
for item in _parse_list_header(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
# From mitsuhiko/werkzeug (used with permission).
def parse_dict_header(value):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict:
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
:param value: a string with a dict header.
:return: :class:`dict`
"""
result = {}
for item in _parse_list_header(value):
if '=' not in item:
result[item] = None
continue
name, value = item.split('=', 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result
# From mitsuhiko/werkzeug (used with permission).
def unquote_header_value(value, is_filename=False):
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
This does not use the real unquoting but what browsers are actually
using for quoting.
:param value: the header value to unquote.
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
# if this is a filename and the starting characters look like
# a UNC path, then just return the value without quotes. Using the
# replace sequence below on a UNC path has the effect of turning
# the leading double slash into a single slash and then
# _fix_ie_filename() doesn't work correctly. See #458.
if not is_filename or value[:2] != '\\\\':
return value.replace('\\\\', '\\').replace('\\"', '"')
return value
def dict_from_cookiejar(cj):
"""Returns a key/value dictionary from a CookieJar.
:param cj: CookieJar object to extract cookies from.
"""
cookie_dict = {}
for cookie in cj:
cookie_dict[cookie.name] = cookie.value
return cookie_dict
def add_dict_to_cookiejar(cj, cookie_dict):
"""Returns a CookieJar from a key/value dictionary.
:param cj: CookieJar to insert cookies into.
:param cookie_dict: Dict of key/values to insert into CookieJar.
"""
cj2 = cookiejar_from_dict(cookie_dict)
cj.update(cj2)
return cj
def get_encodings_from_content(content):
"""Returns encodings from given content string.
:param content: bytestring to extract encodings from.
"""
charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I)
pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I)
xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]')
return (charset_re.findall(content) +
pragma_re.findall(content) +
xml_re.findall(content))
def get_encoding_from_headers(headers):
"""Returns encodings from given HTTP Header Dict.
:param headers: dictionary to extract encoding from.
"""
content_type = headers.get('content-type')
if not content_type:
return None
content_type, params = cgi.parse_header(content_type)
if 'charset' in params:
return params['charset'].strip("'\"")
if 'text' in content_type:
return 'ISO-8859-1'
def stream_decode_response_unicode(iterator, r):
"""Stream decodes a iterator."""
if r.encoding is None:
for item in iterator:
yield item
return
decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace')
for chunk in iterator:
rv = decoder.decode(chunk)
if rv:
yield rv
rv = decoder.decode(b'', final=True)
if rv:
yield rv
def iter_slices(string, slice_length):
"""Iterate over slices of a string."""
pos = 0
while pos < len(string):
yield string[pos:pos + slice_length]
pos += slice_length
def get_unicode_from_response(r):
"""Returns the requested content back in unicode.
:param r: Response object to get unicode content from.
Tried:
1. charset from content-type
2. every encodings from ``<meta ... charset=XXX>``
3. fall back and replace all unicode characters
"""
tried_encodings = []
# Try charset from content-type
encoding = get_encoding_from_headers(r.headers)
if encoding:
try:
return str(r.content, encoding)
except UnicodeError:
tried_encodings.append(encoding)
# Fall back:
try:
return str(r.content, encoding, errors='replace')
except TypeError:
return r.content
# The unreserved URI characters (RFC 3986)
UNRESERVED_SET = frozenset(
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
+ "0123456789-._~")
def unquote_unreserved(uri):
"""Un-escape any percent-escape sequences in a URI that are unreserved
characters. This leaves all reserved, illegal and non-ASCII bytes encoded.
"""
parts = uri.split('%')
for i in range(1, len(parts)):
h = parts[i][0:2]
if len(h) == 2 and h.isalnum():
try:
c = chr(int(h, 16))
except ValueError:
raise InvalidURL("Invalid percent-escape sequence: '%s'" % h)
if c in UNRESERVED_SET:
parts[i] = c + parts[i][2:]
else:
parts[i] = '%' + parts[i]
else:
parts[i] = '%' + parts[i]
return ''.join(parts)
def requote_uri(uri):
"""Re-quote the given URI.
This function passes the given URI through an unquote/quote cycle to
ensure that it is fully and consistently quoted.
"""
# Unquote only the unreserved characters
# Then quote only illegal characters (do not quote reserved, unreserved,
# or '%')
return quote(unquote_unreserved(uri), safe="!#$%&'()*+,/:;=?@[]~")
def address_in_network(ip, net):
"""
This function allows you to check if on IP belongs to a network subnet
Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24
returns False if ip = 192.168.1.1 and net = 192.168.100.0/24
"""
ipaddr = struct.unpack('=L', socket.inet_aton(ip))[0]
netaddr, bits = net.split('/')
netmask = struct.unpack('=L', socket.inet_aton(dotted_netmask(int(bits))))[0]
network = struct.unpack('=L', socket.inet_aton(netaddr))[0] & netmask
return (ipaddr & netmask) == (network & netmask)
def dotted_netmask(mask):
"""
Converts mask from /xx format to xxx.xxx.xxx.xxx
Example: if mask is 24 function returns 255.255.255.0
"""
bits = 0xffffffff ^ (1 << 32 - mask) - 1
return socket.inet_ntoa(struct.pack('>I', bits))
def is_ipv4_address(string_ip):
try:
socket.inet_aton(string_ip)
except socket.error:
return False
return True
def is_valid_cidr(string_network):
"""Very simple check of the cidr format in no_proxy variable"""
if string_network.count('/') == 1:
try:
mask = int(string_network.split('/')[1])
except ValueError:
return False
if mask < 1 or mask > 32:
return False
try:
socket.inet_aton(string_network.split('/')[0])
except socket.error:
return False
else:
return False
return True
def get_environ_proxies(url):
"""Return a dict of environment proxies."""
get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper())
# First check whether no_proxy is defined. If it is, check that the URL
# we're getting isn't in the no_proxy list.
no_proxy = get_proxy('no_proxy')
netloc = urlparse(url).netloc
if no_proxy:
# We need to check whether we match here. We need to see if we match
# the end of the netloc, both with and without the port.
no_proxy = no_proxy.replace(' ', '').split(',')
ip = netloc.split(':')[0]
if is_ipv4_address(ip):
for proxy_ip in no_proxy:
if is_valid_cidr(proxy_ip):
if address_in_network(ip, proxy_ip):
return {}
else:
for host in no_proxy:
if netloc.endswith(host) or netloc.split(':')[0].endswith(host):
# The URL does match something in no_proxy, so we don't want
# to apply the proxies on this URL.
return {}
# If the system proxy settings indicate that this URL should be bypassed,
# don't proxy.
# The proxy_bypass function is incredibly buggy on OS X in early versions
# of Python 2.6, so allow this call to fail. Only catch the specific
# exceptions we've seen, though: this call failing in other ways can reveal
# legitimate problems.
try:
bypass = proxy_bypass(netloc)
except (TypeError, socket.gaierror):
bypass = False
if bypass:
return {}
# If we get here, we either didn't have no_proxy set or we're not going
# anywhere that no_proxy applies to, and the system settings don't require
# bypassing the proxy for the current URL.
return getproxies()
def default_user_agent(name="python-requests"):
"""Return a string representing the default user agent."""
_implementation = platform.python_implementation()
if _implementation == 'CPython':
_implementation_version = platform.python_version()
elif _implementation == 'PyPy':
_implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro)
if sys.pypy_version_info.releaselevel != 'final':
_implementation_version = ''.join([_implementation_version, sys.pypy_version_info.releaselevel])
elif _implementation == 'Jython':
_implementation_version = platform.python_version() # Complete Guess
elif _implementation == 'IronPython':
_implementation_version = platform.python_version() # Complete Guess
else:
_implementation_version = 'Unknown'
try:
p_system = platform.system()
p_release = platform.release()
except IOError:
p_system = 'Unknown'
p_release = 'Unknown'
return " ".join(['%s/%s' % (name, __version__),
'%s/%s' % (_implementation, _implementation_version),
'%s/%s' % (p_system, p_release)])
def default_headers():
return CaseInsensitiveDict({
'User-Agent': default_user_agent(),
'Accept-Encoding': ', '.join(('gzip', 'deflate', 'compress')),
'Accept': '*/*'
})
def parse_header_links(value):
"""Return a dict of parsed link headers proxies.
i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg"
"""
links = []
replace_chars = " '\""
for val in value.split(","):
try:
url, params = val.split(";", 1)
except ValueError:
url, params = val, ''
link = {}
link["url"] = url.strip("<> '\"")
for param in params.split(";"):
try:
key, value = param.split("=")
except ValueError:
break
link[key.strip(replace_chars)] = value.strip(replace_chars)
links.append(link)
return links
# Null bytes; no need to recreate these on each call to guess_json_utf
_null = '\x00'.encode('ascii') # encoding to ASCII for Python 3
_null2 = _null * 2
_null3 = _null * 3
def guess_json_utf(data):
# JSON always starts with two ASCII characters, so detection is as
# easy as counting the nulls and from their location and count
# determine the encoding. Also detect a BOM, if present.
sample = data[:4]
if sample in (codecs.BOM_UTF32_LE, codecs.BOM32_BE):
return 'utf-32' # BOM included
if sample[:3] == codecs.BOM_UTF8:
return 'utf-8-sig' # BOM included, MS style (discouraged)
if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE):
return 'utf-16' # BOM included
nullcount = sample.count(_null)
if nullcount == 0:
return 'utf-8'
if nullcount == 2:
if sample[::2] == _null2: # 1st and 3rd are null
return 'utf-16-be'
if sample[1::2] == _null2: # 2nd and 4th are null
return 'utf-16-le'
# Did not detect 2 valid UTF-16 ascii-range characters
if nullcount == 3:
if sample[:3] == _null3:
return 'utf-32-be'
if sample[1:] == _null3:
return 'utf-32-le'
# Did not detect a valid UTF-32 ascii-range character
return None
def except_on_missing_scheme(url):
"""Given a URL, raise a MissingSchema exception if the scheme is missing.
"""
scheme, netloc, path, params, query, fragment = urlparse(url)
if not scheme:
raise MissingSchema('Proxy URLs must have explicit schemes.')
def get_auth_from_url(url):
"""Given a url with authentication components, extract them into a tuple of
username,password."""
parsed = urlparse(url)
try:
auth = (unquote(parsed.username), unquote(parsed.password))
except (AttributeError, TypeError):
auth = ('', '')
return auth
def to_native_string(string, encoding='ascii'):
"""
Given a string object, regardless of type, returns a representation of that
string in the native string type, encoding and decoding where necessary.
This assumes ASCII unless told otherwise.
"""
out = None
if isinstance(string, builtin_str):
out = string
else:
if is_py2:
out = string.encode(encoding)
else:
out = string.decode(encoding)
return out
| mit |
harshaneelhg/scikit-learn | sklearn/metrics/pairwise.py | 104 | 42995 | # -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Robert Layton <robertlayton@gmail.com>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Philippe Gervais <philippe.gervais@inria.fr>
# Lars Buitinck <larsmans@gmail.com>
# Joel Nothman <joel.nothman@gmail.com>
# License: BSD 3 clause
import itertools
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils.fixes import partial
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
return X, Y, dtype
def check_pairwise_arrays(X, Y):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the second dimension of the two arrays is equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype = _return_float_dtype(X, Y)
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse='csr', dtype=dtype)
else:
X = check_array(X, accept_sparse='csr', dtype=dtype)
Y = check_array(Y, accept_sparse='csr', dtype=dtype)
if X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if x varies but y remains unchanged, then the right-most dot
product `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
# should not need X_norm_squared because if you could precompute that as
# well as Y, then you should just pre-compute the output and not even
# call this function.
X, Y = check_pairwise_arrays(X, Y)
if Y_norm_squared is not None:
YY = check_array(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
XX = YY.T
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X, Y : {array-like, sparse matrix}
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable, default 'euclidean'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype=np.intp)
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,
dense_output=True)
d_chunk *= -2
d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]
d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]
np.maximum(d_chunk, 0, d_chunk)
else:
d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
d_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = d_chunk.argmin(axis=1)
min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x][flags] = min_indices[flags] + chunk_y.start
values[chunk_x][flags] = min_values[flags]
if metric == "euclidean" and not metric_kwargs.get("squared", False):
np.sqrt(values, values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
----------
X : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
if metric_kwargs is None:
metric_kwargs = {}
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=5e8):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
size_threshold : int, default=5e8
Unused parameter.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances(3, 3)#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances(3, 2)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances(2, 3)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
X.shape[1], D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""
Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return row_norms(X - Y)
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
------
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
return .5 * row_norms(normalize(X) - normalize(Y), squared=True)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances}
def paired_distances(X, Y, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray (n_samples, n_features)
Array 1 for distance computation.
Y : ndarray (n_samples, n_features)
Array 2 for distance computation.
metric : string or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([ 0., 1.])
See also
--------
pairwise_distances : pairwise distances.
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Read more in the :ref:`User Guide <linear_kernel>`.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Read more in the :ref:`User Guide <polynomial_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
degree : int, default 3
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Read more in the :ref:`User Guide <sigmoid_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <rbf_kernel>`.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None, dense_output=True):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Read more in the :ref:`User Guide <cosine_similarity>`.
Parameters
----------
X : ndarray or sparse array, shape: (n_samples_X, n_features)
Input data.
Y : ndarray or sparse array, shape: (n_samples_Y, n_features)
Input data. If ``None``, the output will be the pairwise
similarities between all samples in ``X``.
dense_output : boolean (optional), default True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=dense_output)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances, }
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
if n_jobs == 1:
# Special case to avoid picklability checks in delayed
return func(X, Y, **kwds)
# TODO: in some cases, backend='threading' may be appropriate
fd = delayed(func)
ret = Parallel(n_jobs=n_jobs, verbose=0)(
fd(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def _pairwise_callable(X, Y, metric, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}
"""
X, Y = check_pairwise_arrays(X, Y)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"]
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features], optional
An optional second feature array. Only allowed if metric != "precomputed".
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if n_jobs == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": (),
"cosine": (),
"exp_chi2": frozenset(["gamma"]),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params: boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
if metric == "precomputed":
return X
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
| bsd-3-clause |
edmond-chhung/linkchecker | linkcheck/threader.py | 9 | 1347 | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2000-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Support for managing threads.
"""
import threading
class StoppableThread (threading.Thread):
"""Thread class with a stop() method. The thread itself has to check
regularly for the stopped() condition."""
def __init__ (self):
"""Store stop event."""
super(StoppableThread, self).__init__()
self._stop = threading.Event()
def stop (self):
"""Set stop event."""
self._stop.set()
def stopped (self, timeout=None):
"""Return True if stop event is set."""
return self._stop.wait(timeout)
| gpl-2.0 |
rcarrillocruz/ansible | lib/ansible/modules/network/illumos/dladm_vlan.py | 70 | 5867 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Adam Števko <adam.stevko@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: dladm_vlan
short_description: Manage VLAN interfaces on Solaris/illumos systems.
description:
- Create or delete VLAN interfaces on Solaris/illumos systems.
version_added: "2.3"
author: Adam Števko (@xen0l)
options:
name:
description:
- VLAN interface name.
required: true
link:
description:
- VLAN underlying link name.
required: true
temporary:
description:
- Specifies that the VLAN interface is temporary. Temporary VLANs
do not persist across reboots.
required: false
default: false
vlan_id:
description:
- VLAN ID value for VLAN interface.
required: false
default: false
aliases: [ "vid" ]
state:
description:
- Create or delete Solaris/illumos VNIC.
required: false
default: "present"
choices: [ "present", "absent" ]
'''
EXAMPLES = '''
name: Create 'vlan42' VLAN over 'bnx0' link
dladm_vlan: name=vlan42 link=bnx0 vlan_id=42 state=present
name: Remove 'vlan1337' VLAN interface
dladm_vlan: name=vlan1337 state=absent
'''
RETURN = '''
name:
description: VLAN name
returned: always
type: string
sample: vlan42
state:
description: state of the target
returned: always
type: string
sample: present
temporary:
description: specifies if operation will persist across reboots
returned: always
type: boolean
sample: True
link:
description: VLAN's underlying link name
returned: always
type: string
sample: e100g0
vlan_id:
description: VLAN ID
returned: always
type: string
sample: 42
'''
from ansible.module_utils.basic import AnsibleModule
class VLAN(object):
def __init__(self, module):
self.module = module
self.name = module.params['name']
self.link = module.params['link']
self.vlan_id = module.params['vlan_id']
self.temporary = module.params['temporary']
self.state = module.params['state']
def vlan_exists(self):
cmd = [self.module.get_bin_path('dladm', True)]
cmd.append('show-vlan')
cmd.append(self.name)
(rc, _, _) = self.module.run_command(cmd)
if rc == 0:
return True
else:
return False
def create_vlan(self):
cmd = [self.module.get_bin_path('dladm', True)]
cmd.append('create-vlan')
if self.temporary:
cmd.append('-t')
cmd.append('-l')
cmd.append(self.link)
cmd.append('-v')
cmd.append(self.vlan_id)
cmd.append(self.name)
return self.module.run_command(cmd)
def delete_vlan(self):
cmd = [self.module.get_bin_path('dladm', True)]
cmd.append('delete-vlan')
if self.temporary:
cmd.append('-t')
cmd.append(self.name)
return self.module.run_command(cmd)
def is_valid_vlan_id(self):
return 0 <= int(self.vlan_id) <= 4095
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True, type='str'),
link=dict(default=None, type='str'),
vlan_id=dict(default=0, aliases=['vid']),
temporary=dict(default=False, type='bool'),
state=dict(default='present', choices=['absent', 'present']),
),
required_if=[
['state', 'present', ['vlan_id', 'link', 'name']],
],
supports_check_mode=True
)
vlan = VLAN(module)
rc = None
out = ''
err = ''
result = {}
result['name'] = vlan.name
result['link'] = vlan.link
result['state'] = vlan.state
result['temporary'] = vlan.temporary
if int(vlan.vlan_id) != 0:
if not vlan.is_valid_vlan_id():
module.fail_json(msg='Invalid VLAN id value',
name=vlan.name,
state=vlan.state,
link=vlan.link,
vlan_id=vlan.vlan_id)
result['vlan_id'] = vlan.vlan_id
if vlan.state == 'absent':
if vlan.vlan_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = vlan.delete_vlan()
if rc != 0:
module.fail_json(name=vlan.name, msg=err, rc=rc)
elif vlan.state == 'present':
if not vlan.vlan_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = vlan.create_vlan()
if rc is not None and rc != 0:
module.fail_json(name=vlan.name, msg=err, rc=rc)
if rc is None:
result['changed'] = False
else:
result['changed'] = True
if out:
result['stdout'] = out
if err:
result['stderr'] = err
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
elpaso/arduino-slip | Esercizi_PyQT/Esercizio_1-1/Esercizio_1-1_00.py | 3 | 4316 | #!/usr/bin/python
ritardo = 2 #second
from PyQt4 import QtCore, QtGui
import serial
import sys, time
SERIALPORT = '/dev/ttyACM0'
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(800, 600)
MainWindow.setMaximumSize(QtCore.QSize(800, 600))
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.pushButton = QtGui.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(220, 290, 251, 61))
self.pushButton.setObjectName("pushButton")
self.pushButton_2 = QtGui.QPushButton(self.centralwidget)
self.pushButton_2.setGeometry(QtCore.QRect(680, 510, 85, 27))
self.pushButton_2.setObjectName("pushButton_2")
self.textEdit = QtGui.QTextEdit(self.centralwidget)
self.textEdit.setGeometry(QtCore.QRect(60, 140, 561, 94))
self.textEdit.setObjectName("textEdit")
self.lineEdit = QtGui.QLineEdit(self.centralwidget)
self.lineEdit.setGeometry(QtCore.QRect(60, 50, 461, 23))
self.lineEdit.setObjectName("lineEdit")
self.pushButton_3 = QtGui.QPushButton(self.centralwidget)
self.pushButton_3.setGeometry(QtCore.QRect(220, 370, 251, 61))
self.pushButton_3.setObjectName("pushButton_3")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 27))
self.menubar.setObjectName("menubar")
self.menuMenu = QtGui.QMenu(self.menubar)
self.menuMenu.setObjectName("menuMenu")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.menubar.addAction(self.menuMenu.menuAction())
self.retranslateUi(MainWindow)
#QtCore.QObject.connect(MainWindow, QtCore.SIGNAL("destroyed()"), MainWindow.deleteLater)
QtCore.QObject.connect(self.pushButton, QtCore.SIGNAL("clicked()"), invia_arduino_on)
QtCore.QObject.connect(self.pushButton_3, QtCore.SIGNAL("clicked()"), invia_arduino_off)
QtCore.QObject.connect(self.pushButton_2, QtCore.SIGNAL("clicked()"), quit_gui)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QtGui.QApplication.translate("MainWindow", "Python QT e Arduino Esercizio 1", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton.setText(QtGui.QApplication.translate("MainWindow", "Led 13 On", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_2.setText(QtGui.QApplication.translate("MainWindow", "Quit", None, QtGui.QApplication.UnicodeUTF8))
self.lineEdit.setText(QtGui.QApplication.translate("MainWindow", "Prova Scrittura Programma Python Qt4", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_3.setText(QtGui.QApplication.translate("MainWindow", "Led 13 Off", None, QtGui.QApplication.UnicodeUTF8))
self.menuMenu.setTitle(QtGui.QApplication.translate("MainWindow", "Menu", None, QtGui.QApplication.UnicodeUTF8))
def invia_arduino_on():
testo_arduino = "0"
arduino.write(testo_arduino)
msg="Acceso Led 13"
ui.lineEdit.setText(msg)
ui.textEdit.append(str(msg))
def invia_arduino_off():
testo_arduino = "1"
arduino.write(testo_arduino)
msg="Spento Led 13"
ui.lineEdit.setText(msg)
ui.textEdit.append(str(msg))
def quit_gui():
quit()
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
MainWindow = QtGui.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
try:
arduino = serial.Serial(SERIALPORT, 9600, timeout=1)
time.sleep(ritardo)
except:
QtGui.QMessageBox.warning(MainWindow, "Errore Porta seriale", "Attenzione! Verificare porta seriale")
reply = QtGui.QMessageBox.question(MainWindow, "Errore Porta seriale", "Vuoi chiudere l'applicazione?", QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.Yes)
if reply == QtGui.QMessageBox.Yes:
quit()
else:
pass
#time.sleep(ritardo)
MainWindow.show()
sys.exit(app.exec_())
arduino.close()
| gpl-2.0 |
translate/pootle | pootle/apps/pootle_app/management/commands/revision.py | 11 | 1070 | # -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'pootle.settings'
from django.core.management.base import BaseCommand
from pootle.core.models import Revision
from . import SkipChecksMixin
class Command(SkipChecksMixin, BaseCommand):
help = "Print Pootle's current revision."
skip_system_check_tags = ('data', )
def add_arguments(self, parser):
parser.add_argument(
'--restore',
action='store_true',
default=False,
dest='restore',
help='Restore the current revision number from the DB.',
)
def handle(self, **options):
if options['restore']:
from pootle_store.models import Unit
Revision.set(Unit.max_revision())
self.stdout.write('%s' % Revision.get())
| gpl-3.0 |
stormi/weblate | weblate/trans/tests/test_charts.py | 9 | 3501 | # -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2015 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <http://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Tests for charts and widgets.
"""
from weblate.trans.tests.test_views import ViewTestCase
from django.core.urlresolvers import reverse
import json
class ChartsTest(ViewTestCase):
'''
Testing of charts.
'''
def assert_json_chart_data(self, response):
"""
Tests whether response has valid json chart data.
"""
self.assertEqual(response.get('Content-Type'), 'application/json')
data = json.loads(response.content)
self.assertTrue('series' in data)
self.assertTrue('labels' in data)
def test_activity_monthly(self):
'''
Test of monthly activity charts.
'''
response = self.client.get(
reverse('monthly_activity')
)
self.assert_json_chart_data(response)
response = self.client.get(
reverse('monthly_activity', kwargs=self.kw_project)
)
self.assert_json_chart_data(response)
response = self.client.get(
reverse('monthly_activity', kwargs=self.kw_subproject)
)
self.assert_json_chart_data(response)
response = self.client.get(
reverse('monthly_activity', kwargs=self.kw_translation)
)
self.assert_json_chart_data(response)
response = self.client.get(
reverse('monthly_activity', kwargs={'lang': 'cs'})
)
self.assert_json_chart_data(response)
response = self.client.get(
reverse(
'monthly_activity',
kwargs={'user': self.user.username}
)
)
self.assert_json_chart_data(response)
def test_activity_yearly(self):
'''
Test of yearly activity charts.
'''
response = self.client.get(
reverse('yearly_activity')
)
self.assert_json_chart_data(response)
response = self.client.get(
reverse('yearly_activity', kwargs=self.kw_project)
)
self.assert_json_chart_data(response)
response = self.client.get(
reverse('yearly_activity', kwargs=self.kw_subproject)
)
self.assert_json_chart_data(response)
response = self.client.get(
reverse('yearly_activity', kwargs=self.kw_translation)
)
self.assert_json_chart_data(response)
response = self.client.get(
reverse('yearly_activity', kwargs={'lang': 'cs'})
)
self.assert_json_chart_data(response)
response = self.client.get(
reverse(
'yearly_activity',
kwargs={'user': self.user.username}
)
)
self.assert_json_chart_data(response)
| gpl-3.0 |
dCache/dcache-docker | dcache/deps/.vim/bundle/jedi-vim/jedi/test/test_evaluate/test_pyc.py | 26 | 1684 | """
Test completions from *.pyc files:
- generate a dummy python module
- compile the dummy module to generate a *.pyc
- delete the pure python dummy module
- try jedi on the generated *.pyc
"""
import os
import shutil
import sys
import pytest
import jedi
from ..helpers import cwd_at
SRC = """class Foo:
pass
class Bar:
pass
"""
def generate_pyc():
os.mkdir("dummy_package")
with open("dummy_package/__init__.py", 'w'):
pass
with open("dummy_package/dummy.py", 'w') as f:
f.write(SRC)
import compileall
compileall.compile_file("dummy_package/dummy.py")
os.remove("dummy_package/dummy.py")
if sys.version_info[0] == 3:
# Python3 specific:
# To import pyc modules, we must move them out of the __pycache__
# directory and rename them to remove ".cpython-%s%d"
# see: http://stackoverflow.com/questions/11648440/python-does-not-detect-pyc-files
for f in os.listdir("dummy_package/__pycache__"):
dst = f.replace('.cpython-%s%s' % sys.version_info[:2], "")
dst = os.path.join("dummy_package", dst)
shutil.copy(os.path.join("dummy_package/__pycache__", f), dst)
# Python 2.6 does not necessarily come with `compileall.compile_file`.
@pytest.mark.skipif("sys.version_info > (2,6)")
@cwd_at('test/test_evaluate')
def test_pyc():
"""
The list of completion must be greater than 2.
"""
try:
generate_pyc()
s = jedi.Script("from dummy_package import dummy; dummy.", path='blub.py')
assert len(s.completions()) >= 2
finally:
shutil.rmtree("dummy_package")
if __name__ == "__main__":
test_pyc()
| gpl-3.0 |
dmnyu/bitcurator | externals/py3fpdf/tools/designer.py | 8 | 26856 | #!/usr/bin/python
# -*- coding: latin-1 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
"Visual Template designer for PyFPDF (using wxPython OGL library)"
__author__ = "Mariano Reingart <reingart@gmail.com>"
__copyright__ = "Copyright (C) 2011 Mariano Reingart"
__license__ = "GPL 3.0"
__version__ = "1.01e"
# Based on:
# * pySjetch.py wxPython sample application
# * OGL.py and other wxPython demo modules
import os, sys
import wx
import wx.lib.ogl as ogl
from wx.lib.wordwrap import wordwrap
try:
from template import Template
except ImportError:
# we are frozen?
from pyfpdf_hg.template import Template
DEBUG = True
class CustomDialog(wx.Dialog):
"A dinamyc dialog to ask user about arbitrary fields"
def __init__(self, parent, ID, title, size=wx.DefaultSize,
pos=wx.DefaultPosition, style=wx.DEFAULT_DIALOG_STYLE,
fields=None, data=None):
wx.Dialog.__init__ (self, parent, ID, title, pos, size, style)
sizer = wx.BoxSizer(wx.VERTICAL)
self.textctrls = {}
for field in fields:
box = wx.BoxSizer(wx.HORIZONTAL)
label = wx.StaticText(self, -1, field)
label.SetHelpText("This is the help text for the label")
box.Add(label, 1, wx.ALIGN_CENTRE|wx.ALL, 5)
text = wx.TextCtrl(self, -1, "", size=(80,-1))
text.SetHelpText("Here's some help text for field #1")
if field in data:
text.SetValue(repr(data[field]))
box.Add(text, 1, wx.ALIGN_CENTRE|wx.ALL, 1)
sizer.Add(box, 0, wx.GROW|wx.ALIGN_CENTER_VERTICAL|wx.ALL, 1)
self.textctrls[field] = text
line = wx.StaticLine(self, -1, size=(20,-1), style=wx.LI_HORIZONTAL)
sizer.Add(line, 0, wx.GROW|wx.ALIGN_CENTER_VERTICAL|wx.RIGHT|wx.TOP, 5)
btnsizer = wx.StdDialogButtonSizer()
btn = wx.Button(self, wx.ID_OK)
btn.SetHelpText("The OK button completes the dialog")
btn.SetDefault()
btnsizer.AddButton(btn)
btn = wx.Button(self, wx.ID_CANCEL)
btn.SetHelpText("The Cancel button cancels the dialog. (Cool, huh?)")
btnsizer.AddButton(btn)
btnsizer.Realize()
sizer.Add(btnsizer, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5)
self.SetSizer(sizer)
sizer.Fit(self)
@classmethod
def do_input(Class, parent, title, fields, data):
dlg = Class(parent, -1, title, size=(350, 200),
style=wx.DEFAULT_DIALOG_STYLE, # & ~wx.CLOSE_BOX,
fields=fields, data=data
)
dlg.CenterOnScreen()
while 1:
val = dlg.ShowModal()
if val == wx.ID_OK:
values = {}
for field in fields:
try:
values[field] = eval(dlg.textctrls[field].GetValue())
except Exception, e:
msg = wx.MessageDialog(parent, unicode(e),
"Error in field %s" % field,
wx.OK | wx.ICON_INFORMATION
)
msg.ShowModal()
msg.Destroy()
break
else:
return dict([(field, values[field]) for field in fields])
else:
return None
class MyEvtHandler(ogl.ShapeEvtHandler):
"Custom Event Handler for Shapes"
def __init__(self, callback):
ogl.ShapeEvtHandler.__init__(self)
self.callback = callback
def OnLeftClick(self, x, y, keys=0, attachment=0):
shape = self.GetShape()
canvas = shape.GetCanvas()
dc = wx.ClientDC(canvas)
canvas.PrepareDC(dc)
if shape.Selected() and keys & ogl.KEY_SHIFT:
shape.Select(False, dc)
#canvas.Redraw(dc)
canvas.Refresh(False)
else:
redraw = False
shapeList = canvas.GetDiagram().GetShapeList()
toUnselect = []
for s in shapeList:
if s.Selected() and not keys & ogl.KEY_SHIFT:
# If we unselect it now then some of the objects in
# shapeList will become invalid (the control points are
# shapes too!) and bad things will happen...
toUnselect.append(s)
shape.Select(True, dc)
if toUnselect:
for s in toUnselect:
s.Select(False, dc)
##canvas.Redraw(dc)
canvas.Refresh(False)
self.callback()
def OnEndDragLeft(self, x, y, keys=0, attachment=0):
shape = self.GetShape()
ogl.ShapeEvtHandler.OnEndDragLeft(self, x, y, keys, attachment)
if not shape.Selected():
self.OnLeftClick(x, y, keys, attachment)
self.callback()
def OnSizingEndDragLeft(self, pt, x, y, keys, attch):
ogl.ShapeEvtHandler.OnSizingEndDragLeft(self, pt, x, y, keys, attch)
self.callback()
def OnMovePost(self, dc, x, y, oldX, oldY, display):
shape = self.GetShape()
ogl.ShapeEvtHandler.OnMovePost(self, dc, x, y, oldX, oldY, display)
self.callback()
if "wxMac" in wx.PlatformInfo:
shape.GetCanvas().Refresh(False)
def OnLeftDoubleClick(self, x, y, keys = 0, attachment = 0):
self.callback("LeftDoubleClick")
def OnRightClick(self, *dontcare):
self.callback("RightClick")
class Element(object):
"Visual class that represent a placeholder in the template"
fields = ['name', 'type',
'x1', 'y1', 'x2', 'y2',
'font', 'size',
'bold', 'italic', 'underline',
'foreground', 'background',
'align', 'text', 'priority',]
def __init__(self, canvas=None, frame=None, zoom=5.0, static=False, **kwargs):
self.kwargs = kwargs
self.zoom = zoom
self.frame = frame
self.canvas = canvas
self.static = static
name = kwargs['name']
kwargs['type']
type = kwargs['type']
x, y, w, h = self.set_coordinates(kwargs['x1'], kwargs['y1'], kwargs['x2'], kwargs['y2'])
text = kwargs['text']
shape = self.shape = ogl.RectangleShape(w, h)
if not static:
shape.SetDraggable(True, True)
shape.SetX(x)
shape.SetY(y)
#if pen: shape.SetPen(pen)
#if brush: shape.SetBrush(brush)
shape.SetBrush(wx.TRANSPARENT_BRUSH)
if type not in ('L', 'B', 'BC'):
if not static:
pen = wx.LIGHT_GREY_PEN
else:
pen = wx.RED_PEN
shape.SetPen(pen)
self.text = kwargs['text']
evthandler = MyEvtHandler(self.evt_callback)
evthandler.SetShape(shape)
evthandler.SetPreviousHandler(shape.GetEventHandler())
shape.SetEventHandler(evthandler)
shape.SetCentreResize(False)
shape.SetMaintainAspectRatio(False)
canvas.AddShape( shape )
@classmethod
def new(Class, parent):
data = dict(name='some_name', type='T',
x1=5.0, y1=5.0, x2=100.0, y2=10.0,
font="Arial", size=12,
bold=False, italic=False, underline=False,
foreground= 0x000000, background=0xFFFFFF,
align="L", text="", priority=0)
data = CustomDialog.do_input(parent, 'New element', Class.fields, data)
if data:
return Class(canvas=parent.canvas, frame=parent, **data)
def edit(self):
"Edit current element (show a dialog box with all fields)"
data = self.kwargs.copy()
x1, y1, x2, y2 = self.get_coordinates()
data.update(dict(name=self.name,
text=self.text,
x1=x1, y1=y1, x2=x2, y2=y2,
))
data = CustomDialog.do_input(self.frame, 'Edit element', self.fields, data)
if data:
self.kwargs.update(data)
self.name = data['name']
self.text = data['text']
x,y, w, h = self.set_coordinates(data['x1'], data['y1'], data['x2'], data['y2'])
self.shape.SetX(x)
self.shape.SetY(y)
self.shape.SetWidth(w)
self.shape.SetHeight(h)
self.canvas.Refresh(False)
self.canvas.GetDiagram().ShowAll(1)
def edit_text(self):
"Allow text edition (i.e. for doubleclick)"
dlg = wx.TextEntryDialog(
self.frame, 'Text for %s' % self.name,
'Edit Text', '')
if self.text:
dlg.SetValue(self.text)
if dlg.ShowModal() == wx.ID_OK:
self.text = dlg.GetValue().encode("latin1")
dlg.Destroy()
def copy(self):
"Return an identical duplicate"
kwargs = self.as_dict()
element = Element(canvas=self.canvas, frame=self.frame, zoom=self.zoom, static=self.static, **kwargs)
return element
def remove(self):
"Erases visual shape from OGL canvas (element must be deleted manually)"
self.canvas.RemoveShape(self.shape)
def move(self, dx, dy):
"Change pdf coordinates (converting to wx internal values)"
x1, y1, x2, y2 = self.get_coordinates()
x1 += dx
x2 += dx
y1 += dy
y2 += dy
x, y, w, h = self.set_coordinates(x1, y1, x2, y2)
self.shape.SetX(x)
self.shape.SetY(y)
def evt_callback(self, evt_type=None):
"Event dispatcher"
if evt_type=="LeftDoubleClick":
self.edit_text()
if evt_type=='RightClick':
self.edit()
# update the status bar
x1, y1, x2, y2 = self.get_coordinates()
self.frame.SetStatusText("%s (%0.2f, %0.2f) - (%0.2f, %0.2f)" %
(self.name, x1, y1, x2, y2))
def get_coordinates(self):
"Convert from wx to pdf coordinates"
x, y = self.shape.GetX(), self.shape.GetY()
w, h = self.shape.GetBoundingBoxMax()
w -= 1
h -= 1
x1 = x/self.zoom - w/self.zoom/2.0
x2 = x/self.zoom + w/self.zoom/2.0
y1 = y/self.zoom - h/self.zoom/2.0
y2 = y/self.zoom + h/self.zoom/2.0
return x1, y1, x2, y2
def set_coordinates(self, x1, y1, x2, y2):
"Convert from pdf to wx coordinates"
x1 = x1 * self.zoom
x2 = x2 * self.zoom
y1 = y1 * self.zoom
y2 = y2 * self.zoom
# shapes seems to be centred, pdf coord not
w = max(x1, x2) - min(x1, x2) + 1
h = max(y1, y2) - min(y1, y2) + 1
x = (min(x1, x2) + w/2.0)
y = (min(y1, y2) + h/2.0)
return x, y, w, h
def text(self, txt=None):
if txt is not None:
if not isinstance(txt,str):
txt = str(txt)
self.kwargs['text'] = txt
self.shape.ClearText()
for line in txt.split('\n'):
self.shape.AddText(unicode(line, "latin1"))
self.canvas.Refresh(False)
return self.kwargs['text']
text = property(text, text)
def set_x(self, x):
self.shape.SetX(x)
self.canvas.Refresh(False)
self.evt_callback()
def set_y(self, y):
self.shape.SetY(y)
self.canvas.Refresh(False)
self.evt_callback()
def get_x(self):
return self.shape.GetX()
def get_y(self):
return self.shape.GetY()
x = property(get_x, set_x)
y = property(get_y, set_y)
def selected(self, sel=None):
if sel is not None:
print "Setting Select(%s)" % sel
self.shape.Select(sel)
return self.shape.Selected()
selected = property(selected, selected)
def name(self, name=None):
if name is not None:
self.kwargs['name'] = name
return self.kwargs['name']
name = property(name, name)
def __contains__(self, k):
"Implement in keyword for searchs"
return k in self.name.lower() or self.text and k in self.text.lower()
def as_dict(self):
"Return a dictionary representation, used by pyfpdf"
d = self.kwargs
x1, y1, x2, y2 = self.get_coordinates()
d.update({
'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2,
'text': self.text})
return d
class AppFrame(wx.Frame):
"OGL Designer main window"
title = "PyFPDF Template Designer (wx OGL)"
def __init__(self):
wx.Frame.__init__( self,
None, -1, self.title,
size=(640,480),
style=wx.DEFAULT_FRAME_STYLE )
sys.excepthook = self.except_hook
self.filename = ""
# Create a toolbar:
tsize = (16,16)
self.toolbar = self.CreateToolBar(wx.TB_HORIZONTAL | wx.NO_BORDER | wx.TB_FLAT)
artBmp = wx.ArtProvider.GetBitmap
self.toolbar.AddSimpleTool(
wx.ID_NEW, artBmp(wx.ART_NEW, wx.ART_TOOLBAR, tsize), "New")
self.toolbar.AddSimpleTool(
wx.ID_OPEN, artBmp(wx.ART_FILE_OPEN, wx.ART_TOOLBAR, tsize), "Open")
self.toolbar.AddSimpleTool(
wx.ID_SAVE, artBmp(wx.ART_FILE_SAVE, wx.ART_TOOLBAR, tsize), "Save")
self.toolbar.AddSimpleTool(
wx.ID_SAVEAS, artBmp(wx.ART_FILE_SAVE_AS, wx.ART_TOOLBAR, tsize),
"Save As...")
#-------
self.toolbar.AddSeparator()
self.toolbar.AddSimpleTool(
wx.ID_UNDO, artBmp(wx.ART_UNDO, wx.ART_TOOLBAR, tsize), "Undo")
self.toolbar.AddSimpleTool(
wx.ID_REDO, artBmp(wx.ART_REDO, wx.ART_TOOLBAR, tsize), "Redo")
self.toolbar.AddSeparator()
#-------
self.toolbar.AddSimpleTool(
wx.ID_CUT, artBmp(wx.ART_CUT, wx.ART_TOOLBAR, tsize), "Remove")
self.toolbar.AddSimpleTool(
wx.ID_COPY, artBmp(wx.ART_COPY, wx.ART_TOOLBAR, tsize), "Duplicate")
self.toolbar.AddSimpleTool(
wx.ID_PASTE, artBmp(wx.ART_PASTE, wx.ART_TOOLBAR, tsize), "Insert")
self.toolbar.AddSeparator()
self.toolbar.AddSimpleTool(
wx.ID_FIND, artBmp(wx.ART_FIND, wx.ART_TOOLBAR, tsize), "Find")
self.toolbar.AddSimpleTool(
wx.ID_REPLACE, artBmp(wx.ART_FIND_AND_REPLACE, wx.ART_TOOLBAR, tsize), "Modify")
self.toolbar.AddSeparator()
self.toolbar.AddSimpleTool(
wx.ID_PRINT, artBmp(wx.ART_PRINT, wx.ART_TOOLBAR, tsize), "Print")
self.toolbar.AddSimpleTool(
wx.ID_ABOUT, artBmp(wx.ART_HELP, wx.ART_TOOLBAR, tsize), "About")
self.toolbar.Realize()
self.toolbar.EnableTool(wx.ID_SAVEAS, False)
self.toolbar.EnableTool(wx.ID_UNDO, False)
self.toolbar.EnableTool(wx.ID_REDO, False)
menu_handlers = [
(wx.ID_NEW, self.do_new),
(wx.ID_OPEN, self.do_open),
(wx.ID_SAVE, self.do_save),
(wx.ID_PRINT, self.do_print),
(wx.ID_FIND, self.do_find),
(wx.ID_REPLACE, self.do_modify),
(wx.ID_CUT, self.do_cut),
(wx.ID_COPY, self.do_copy),
(wx.ID_PASTE, self.do_paste),
(wx.ID_ABOUT, self.do_about),
]
for menu_id, handler in menu_handlers:
self.Bind(wx.EVT_MENU, handler, id = menu_id)
sizer = wx.BoxSizer(wx.VERTICAL)
# put stuff into sizer
self.CreateStatusBar()
canvas = self.canvas = ogl.ShapeCanvas( self )
maxWidth = 2000
maxHeight = 2000
canvas.SetScrollbars(20, 20, maxWidth/20, maxHeight/20)
sizer.Add( canvas, 1, wx.GROW )
canvas.SetBackgroundColour("WHITE") #
diagram = self.diagram = ogl.Diagram()
canvas.SetDiagram( diagram )
diagram.SetCanvas( canvas )
diagram.SetSnapToGrid( False )
# apply sizer
self.SetSizer(sizer)
self.SetAutoLayout(1)
self.Show(1)
self.Bind(wx.EVT_CHAR_HOOK, self.on_key_event)
self.elements = []
def on_key_event(self, event):
""" Respond to a keypress event.
We make the arrow keys move the selected object(s) by one pixel in
the given direction.
"""
step = 1
if event.ControlDown():
step = 20
if event.GetKeyCode() == wx.WXK_UP:
self.move_elements(0, -step)
elif event.GetKeyCode() == wx.WXK_DOWN:
self.move_elements(0, step)
elif event.GetKeyCode() == wx.WXK_LEFT:
self.move_elements(-step, 0)
elif event.GetKeyCode() == wx.WXK_RIGHT:
self.move_elements(step, 0)
elif event.GetKeyCode() == wx.WXK_DELETE:
self.do_cut()
else:
event.Skip()
def do_new(self, evt=None):
for element in self.elements:
element.remove()
self.elements = []
# draw paper size guides
for k, (w, h) in [('legal', (216, 356)), ('A4', (210, 297)), ('letter', (216, 279))]:
self.create_elements(
k, 'R', 0, 0, w, h,
size=70, foreground=0x808080, priority=-100,
canvas=self.canvas, frame=self, static=True)
self.diagram.ShowAll( 1 )
def do_open(self, evt):
dlg = wx.FileDialog(
self, message="Choose a file",
defaultDir=os.getcwd(),
defaultFile="invoice.csv",
wildcard="CSV Files (*.csv)|*.csv",
style=wx.OPEN
)
if dlg.ShowModal() == wx.ID_OK:
# This returns a Python list of files that were selected.
self.filename = dlg.GetPaths()[0]
dlg.Destroy()
self.SetTitle(self.filename + " - " + self.title)
self.do_new()
tmp = []
for lno, linea in enumerate(open(self.filename).readlines()):
if DEBUG: print "processing line", lno, linea
args = []
for i,v in enumerate(linea.split(";")):
if not v.startswith("'"):
v = v.replace(",",".")
else:
v = v#.decode('latin1')
if v.strip()=='':
v = None
else:
v = eval(v.strip())
args.append(v)
tmp.append(args)
# sort by z-order (priority)
for args in sorted(tmp, key=lambda t: t[-1]):
if DEBUG: print args
self.create_elements(*args)
self.diagram.ShowAll( 1 ) #
return True
def do_save(self, evt, filename=None):
try:
from time import gmtime, strftime
ts = strftime("%Y%m%d%H%M%S", gmtime())
os.rename(self.filename, self.filename + ts + ".bak")
except Exception, e:
if DEBUG: print e
pass
def csv_repr(v, decimal_sep="."):
if isinstance(v, float):
return ("%0.2f" % v).replace(".", decimal_sep)
else:
return repr(v)
f = open(self.filename, "w")
for element in sorted(self.elements, key=lambda e:e.name):
if element.static:
continue
d = element.as_dict()
l = [d['name'], d['type'],
d['x1'], d['y1'], d['x2'], d['y2'],
d['font'], d['size'],
d['bold'], d['italic'], d['underline'],
d['foreground'], d['background'],
d['align'], d['text'], d['priority'],
]
f.write(";".join([csv_repr(v) for v in l]))
f.write("\n")
f.close()
def do_print(self, evt):
# genero el renderizador con propiedades del PDF
t = Template(elements=[e.as_dict() for e in self.elements if not e.static])
t.add_page()
if not t['logo'] or not os.path.exists(t['logo']):
# put a default logo so it doesn't trow an exception
logo = os.path.join(os.path.dirname(__file__), 'tutorial','logo.png')
t.set('logo', logo)
try:
t.render(self.filename +".pdf")
except:
if DEBUG and False:
import pdb;
pdb.pm()
else:
raise
if sys.platform=="linux2":
os.system("evince ""%s""" % self.filename +".pdf")
else:
os.startfile(self.filename +".pdf")
def do_find(self, evt):
# busco nombre o texto
dlg = wx.TextEntryDialog(
self, 'Enter text to search for',
'Find Text', '')
if dlg.ShowModal() == wx.ID_OK:
txt = dlg.GetValue().encode("latin1").lower()
for element in self.elements:
if txt in element:
element.selected = True
print "Found:", element.name
self.canvas.Refresh(False)
dlg.Destroy()
def do_cut(self, evt=None):
"Delete selected elements"
new_elements = []
for element in self.elements:
if element.selected:
print "Erasing:", element.name
element.selected = False
self.canvas.Refresh(False)
element.remove()
else:
new_elements.append(element)
self.elements = new_elements
self.canvas.Refresh(False)
self.diagram.ShowAll( 1 )
def do_copy(self, evt):
"Duplicate selected elements"
fields = ['qty', 'dx', 'dy']
data = {'qty': 1, 'dx': 0.0, 'dy': 5.0}
data = CustomDialog.do_input(self, 'Copy elements', fields, data)
if data:
new_elements = []
for i in range(1, data['qty']+1):
for element in self.elements:
if element.selected:
print "Copying:", element.name
new_element = element.copy()
name = new_element.name
if len(name)>2 and name[-2:].isdigit():
new_element.name = name[:-2] + "%02d" % (int(name[-2:])+i)
else:
new_element.name = new_element.name + "_copy"
new_element.selected = False
new_element.move(data['dx']*i, data['dy']*i)
new_elements.append(new_element)
self.elements.extend(new_elements)
self.canvas.Refresh(False)
self.diagram.ShowAll( 1 )
def do_paste(self, evt):
"Insert new elements"
element = Element.new(self)
if element:
self.canvas.Refresh(False)
self.elements.append(element)
self.diagram.ShowAll( 1 )
def do_modify(self, evt):
"Modify selected elements"
fields = ['dx', 'dy']
data = {'dx': 0.0, 'dy': 0.0}
data = CustomDialog.do_input(self, 'Modify (move) elements', fields, data)
if data:
self.move_elements(data['dx'], data['dy'])
self.canvas.Refresh(False)
self.diagram.ShowAll( 1 )
def create_elements(self, name, type, x1, y1, x2, y2,
font="Arial", size=12,
bold=False, italic=False, underline=False,
foreground= 0x000000, background=0xFFFFFF,
align="L", text="", priority=0, canvas=None, frame=None, static=False,
**kwargs):
element = Element(name=name, type=type, x1=x1, y1=y1, x2=x2, y2=y2,
font=font, size=size,
bold=bold, italic=italic, underline=underline,
foreground= foreground, background=background,
align=align, text=text, priority=priority,
canvas=canvas or self.canvas, frame=frame or self,
static=static)
self.elements.append(element)
def move_elements(self, x, y):
for element in self.elements:
if element.selected:
print "moving", element.name, x, y
element.x = element.x + x
element.y = element.y + y
def do_about(self, evt):
info = wx.AboutDialogInfo()
info.Name = self.title
info.Version = __version__
info.Copyright = __copyright__
info.Description = (
"Visual Template designer for PyFPDF (using wxPython OGL library)\n"
"Input files are CSV format describing the layout, separated by ;\n"
"Use toolbar buttons to open, save, print (preview) your template, "
"and there are buttons to find, add, remove or duplicate elements.\n"
"Over an element, a double left click opens edit text dialog, "
"and a right click opens edit properties dialog. \n"
"Multiple element can be selected with shift left click. \n"
"Use arrow keys or drag-and-drop to move elements.\n"
"For further information see project webpage:"
)
info.WebSite = ("http://code.google.com/p/pyfpdf/wiki/Templates",
"pyfpdf Google Code Project")
info.Developers = [ __author__, ]
info.License = wordwrap(__license__, 500, wx.ClientDC(self))
# Then we call wx.AboutBox giving it that info object
wx.AboutBox(info)
def except_hook(self, type, value, trace):
import traceback
exc = traceback.format_exception(type, value, trace)
for e in exc: wx.LogError(e)
wx.LogError('Unhandled Error: %s: %s'%(str(type), str(value)))
if __name__ == "__main__":
app = wx.PySimpleApp()
ogl.OGLInitialize()
frame = AppFrame()
app.MainLoop()
app.Destroy()
| gpl-3.0 |
briancurtin/python-openstacksdk | openstack/tests/unit/network/v2/test_qos_policy.py | 1 | 1835 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
import uuid
from openstack.network.v2 import qos_policy
EXAMPLE = {
'id': 'IDENTIFIER',
'description': 'QoS policy description',
'name': 'qos-policy-name',
'shared': True,
'tenant_id': '2',
'rules': [uuid.uuid4().hex],
'is_default': False
}
class TestQoSPolicy(testtools.TestCase):
def test_basic(self):
sot = qos_policy.QoSPolicy()
self.assertEqual('policy', sot.resource_key)
self.assertEqual('policies', sot.resources_key)
self.assertEqual('/qos/policies', sot.base_path)
self.assertEqual('network', sot.service.service_type)
self.assertTrue(sot.allow_create)
self.assertTrue(sot.allow_get)
self.assertTrue(sot.allow_update)
self.assertTrue(sot.allow_delete)
self.assertTrue(sot.allow_list)
def test_make_it(self):
sot = qos_policy.QoSPolicy(**EXAMPLE)
self.assertEqual(EXAMPLE['id'], sot.id)
self.assertEqual(EXAMPLE['description'], sot.description)
self.assertEqual(EXAMPLE['name'], sot.name)
self.assertTrue(sot.is_shared)
self.assertEqual(EXAMPLE['tenant_id'], sot.project_id)
self.assertEqual(EXAMPLE['rules'], sot.rules)
self.assertEqual(EXAMPLE['is_default'], sot.is_default)
| apache-2.0 |
littlstar/chromium.src | tools/symsrc/pefile.py | 187 | 139621 | # -*- coding: Latin-1 -*-
"""pefile, Portable Executable reader module
All the PE file basic structures are available with their default names
as attributes of the instance returned.
Processed elements such as the import table are made available with lowercase
names, to differentiate them from the upper case basic structure names.
pefile has been tested against the limits of valid PE headers, that is, malware.
Lots of packed malware attempt to abuse the format way beyond its standard use.
To the best of my knowledge most of the abuses are handled gracefully.
Copyright (c) 2005, 2006, 2007, 2008 Ero Carrera <ero@dkbza.org>
All rights reserved.
For detailed copyright information see the file COPYING in
the root of the distribution archive.
"""
__author__ = 'Ero Carrera'
__version__ = '1.2.9.1'
__contact__ = 'ero@dkbza.org'
import os
import struct
import time
import math
import re
import exceptions
import string
import array
sha1, sha256, sha512, md5 = None, None, None, None
try:
import hashlib
sha1 = hashlib.sha1
sha256 = hashlib.sha256
sha512 = hashlib.sha512
md5 = hashlib.md5
except ImportError:
try:
import sha
sha1 = sha.new
except ImportError:
pass
try:
import md5
md5 = md5.new
except ImportError:
pass
fast_load = False
IMAGE_DOS_SIGNATURE = 0x5A4D
IMAGE_OS2_SIGNATURE = 0x454E
IMAGE_OS2_SIGNATURE_LE = 0x454C
IMAGE_VXD_SIGNATURE = 0x454C
IMAGE_NT_SIGNATURE = 0x00004550
IMAGE_NUMBEROF_DIRECTORY_ENTRIES= 16
IMAGE_ORDINAL_FLAG = 0x80000000L
IMAGE_ORDINAL_FLAG64 = 0x8000000000000000L
OPTIONAL_HEADER_MAGIC_PE = 0x10b
OPTIONAL_HEADER_MAGIC_PE_PLUS = 0x20b
directory_entry_types = [
('IMAGE_DIRECTORY_ENTRY_EXPORT', 0),
('IMAGE_DIRECTORY_ENTRY_IMPORT', 1),
('IMAGE_DIRECTORY_ENTRY_RESOURCE', 2),
('IMAGE_DIRECTORY_ENTRY_EXCEPTION', 3),
('IMAGE_DIRECTORY_ENTRY_SECURITY', 4),
('IMAGE_DIRECTORY_ENTRY_BASERELOC', 5),
('IMAGE_DIRECTORY_ENTRY_DEBUG', 6),
('IMAGE_DIRECTORY_ENTRY_COPYRIGHT', 7),
('IMAGE_DIRECTORY_ENTRY_GLOBALPTR', 8),
('IMAGE_DIRECTORY_ENTRY_TLS', 9),
('IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG', 10),
('IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT', 11),
('IMAGE_DIRECTORY_ENTRY_IAT', 12),
('IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT', 13),
('IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR',14),
('IMAGE_DIRECTORY_ENTRY_RESERVED', 15) ]
DIRECTORY_ENTRY = dict([(e[1], e[0]) for e in directory_entry_types]+directory_entry_types)
image_characteristics = [
('IMAGE_FILE_RELOCS_STRIPPED', 0x0001),
('IMAGE_FILE_EXECUTABLE_IMAGE', 0x0002),
('IMAGE_FILE_LINE_NUMS_STRIPPED', 0x0004),
('IMAGE_FILE_LOCAL_SYMS_STRIPPED', 0x0008),
('IMAGE_FILE_AGGRESIVE_WS_TRIM', 0x0010),
('IMAGE_FILE_LARGE_ADDRESS_AWARE', 0x0020),
('IMAGE_FILE_16BIT_MACHINE', 0x0040),
('IMAGE_FILE_BYTES_REVERSED_LO', 0x0080),
('IMAGE_FILE_32BIT_MACHINE', 0x0100),
('IMAGE_FILE_DEBUG_STRIPPED', 0x0200),
('IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP', 0x0400),
('IMAGE_FILE_NET_RUN_FROM_SWAP', 0x0800),
('IMAGE_FILE_SYSTEM', 0x1000),
('IMAGE_FILE_DLL', 0x2000),
('IMAGE_FILE_UP_SYSTEM_ONLY', 0x4000),
('IMAGE_FILE_BYTES_REVERSED_HI', 0x8000) ]
IMAGE_CHARACTERISTICS = dict([(e[1], e[0]) for e in
image_characteristics]+image_characteristics)
section_characteristics = [
('IMAGE_SCN_CNT_CODE', 0x00000020),
('IMAGE_SCN_CNT_INITIALIZED_DATA', 0x00000040),
('IMAGE_SCN_CNT_UNINITIALIZED_DATA', 0x00000080),
('IMAGE_SCN_LNK_OTHER', 0x00000100),
('IMAGE_SCN_LNK_INFO', 0x00000200),
('IMAGE_SCN_LNK_REMOVE', 0x00000800),
('IMAGE_SCN_LNK_COMDAT', 0x00001000),
('IMAGE_SCN_MEM_FARDATA', 0x00008000),
('IMAGE_SCN_MEM_PURGEABLE', 0x00020000),
('IMAGE_SCN_MEM_16BIT', 0x00020000),
('IMAGE_SCN_MEM_LOCKED', 0x00040000),
('IMAGE_SCN_MEM_PRELOAD', 0x00080000),
('IMAGE_SCN_ALIGN_1BYTES', 0x00100000),
('IMAGE_SCN_ALIGN_2BYTES', 0x00200000),
('IMAGE_SCN_ALIGN_4BYTES', 0x00300000),
('IMAGE_SCN_ALIGN_8BYTES', 0x00400000),
('IMAGE_SCN_ALIGN_16BYTES', 0x00500000),
('IMAGE_SCN_ALIGN_32BYTES', 0x00600000),
('IMAGE_SCN_ALIGN_64BYTES', 0x00700000),
('IMAGE_SCN_ALIGN_128BYTES', 0x00800000),
('IMAGE_SCN_ALIGN_256BYTES', 0x00900000),
('IMAGE_SCN_ALIGN_512BYTES', 0x00A00000),
('IMAGE_SCN_ALIGN_1024BYTES', 0x00B00000),
('IMAGE_SCN_ALIGN_2048BYTES', 0x00C00000),
('IMAGE_SCN_ALIGN_4096BYTES', 0x00D00000),
('IMAGE_SCN_ALIGN_8192BYTES', 0x00E00000),
('IMAGE_SCN_ALIGN_MASK', 0x00F00000),
('IMAGE_SCN_LNK_NRELOC_OVFL', 0x01000000),
('IMAGE_SCN_MEM_DISCARDABLE', 0x02000000),
('IMAGE_SCN_MEM_NOT_CACHED', 0x04000000),
('IMAGE_SCN_MEM_NOT_PAGED', 0x08000000),
('IMAGE_SCN_MEM_SHARED', 0x10000000),
('IMAGE_SCN_MEM_EXECUTE', 0x20000000),
('IMAGE_SCN_MEM_READ', 0x40000000),
('IMAGE_SCN_MEM_WRITE', 0x80000000L) ]
SECTION_CHARACTERISTICS = dict([(e[1], e[0]) for e in
section_characteristics]+section_characteristics)
debug_types = [
('IMAGE_DEBUG_TYPE_UNKNOWN', 0),
('IMAGE_DEBUG_TYPE_COFF', 1),
('IMAGE_DEBUG_TYPE_CODEVIEW', 2),
('IMAGE_DEBUG_TYPE_FPO', 3),
('IMAGE_DEBUG_TYPE_MISC', 4),
('IMAGE_DEBUG_TYPE_EXCEPTION', 5),
('IMAGE_DEBUG_TYPE_FIXUP', 6),
('IMAGE_DEBUG_TYPE_OMAP_TO_SRC', 7),
('IMAGE_DEBUG_TYPE_OMAP_FROM_SRC', 8),
('IMAGE_DEBUG_TYPE_BORLAND', 9),
('IMAGE_DEBUG_TYPE_RESERVED10', 10) ]
DEBUG_TYPE = dict([(e[1], e[0]) for e in debug_types]+debug_types)
subsystem_types = [
('IMAGE_SUBSYSTEM_UNKNOWN', 0),
('IMAGE_SUBSYSTEM_NATIVE', 1),
('IMAGE_SUBSYSTEM_WINDOWS_GUI', 2),
('IMAGE_SUBSYSTEM_WINDOWS_CUI', 3),
('IMAGE_SUBSYSTEM_OS2_CUI', 5),
('IMAGE_SUBSYSTEM_POSIX_CUI', 7),
('IMAGE_SUBSYSTEM_WINDOWS_CE_GUI', 9),
('IMAGE_SUBSYSTEM_EFI_APPLICATION', 10),
('IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER', 11),
('IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER', 12),
('IMAGE_SUBSYSTEM_EFI_ROM', 13),
('IMAGE_SUBSYSTEM_XBOX', 14)]
SUBSYSTEM_TYPE = dict([(e[1], e[0]) for e in subsystem_types]+subsystem_types)
machine_types = [
('IMAGE_FILE_MACHINE_UNKNOWN', 0),
('IMAGE_FILE_MACHINE_AM33', 0x1d3),
('IMAGE_FILE_MACHINE_AMD64', 0x8664),
('IMAGE_FILE_MACHINE_ARM', 0x1c0),
('IMAGE_FILE_MACHINE_EBC', 0xebc),
('IMAGE_FILE_MACHINE_I386', 0x14c),
('IMAGE_FILE_MACHINE_IA64', 0x200),
('IMAGE_FILE_MACHINE_MR32', 0x9041),
('IMAGE_FILE_MACHINE_MIPS16', 0x266),
('IMAGE_FILE_MACHINE_MIPSFPU', 0x366),
('IMAGE_FILE_MACHINE_MIPSFPU16',0x466),
('IMAGE_FILE_MACHINE_POWERPC', 0x1f0),
('IMAGE_FILE_MACHINE_POWERPCFP',0x1f1),
('IMAGE_FILE_MACHINE_R4000', 0x166),
('IMAGE_FILE_MACHINE_SH3', 0x1a2),
('IMAGE_FILE_MACHINE_SH3DSP', 0x1a3),
('IMAGE_FILE_MACHINE_SH4', 0x1a6),
('IMAGE_FILE_MACHINE_SH5', 0x1a8),
('IMAGE_FILE_MACHINE_THUMB', 0x1c2),
('IMAGE_FILE_MACHINE_WCEMIPSV2',0x169),
]
MACHINE_TYPE = dict([(e[1], e[0]) for e in machine_types]+machine_types)
relocation_types = [
('IMAGE_REL_BASED_ABSOLUTE', 0),
('IMAGE_REL_BASED_HIGH', 1),
('IMAGE_REL_BASED_LOW', 2),
('IMAGE_REL_BASED_HIGHLOW', 3),
('IMAGE_REL_BASED_HIGHADJ', 4),
('IMAGE_REL_BASED_MIPS_JMPADDR', 5),
('IMAGE_REL_BASED_SECTION', 6),
('IMAGE_REL_BASED_REL', 7),
('IMAGE_REL_BASED_MIPS_JMPADDR16', 9),
('IMAGE_REL_BASED_IA64_IMM64', 9),
('IMAGE_REL_BASED_DIR64', 10),
('IMAGE_REL_BASED_HIGH3ADJ', 11) ]
RELOCATION_TYPE = dict([(e[1], e[0]) for e in relocation_types]+relocation_types)
dll_characteristics = [
('IMAGE_DLL_CHARACTERISTICS_RESERVED_0x0001', 0x0001),
('IMAGE_DLL_CHARACTERISTICS_RESERVED_0x0002', 0x0002),
('IMAGE_DLL_CHARACTERISTICS_RESERVED_0x0004', 0x0004),
('IMAGE_DLL_CHARACTERISTICS_RESERVED_0x0008', 0x0008),
('IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE', 0x0040),
('IMAGE_DLL_CHARACTERISTICS_FORCE_INTEGRITY', 0x0080),
('IMAGE_DLL_CHARACTERISTICS_NX_COMPAT', 0x0100),
('IMAGE_DLL_CHARACTERISTICS_NO_ISOLATION', 0x0200),
('IMAGE_DLL_CHARACTERISTICS_NO_SEH', 0x0400),
('IMAGE_DLL_CHARACTERISTICS_NO_BIND', 0x0800),
('IMAGE_DLL_CHARACTERISTICS_RESERVED_0x1000', 0x1000),
('IMAGE_DLL_CHARACTERISTICS_WDM_DRIVER', 0x2000),
('IMAGE_DLL_CHARACTERISTICS_TERMINAL_SERVER_AWARE', 0x8000) ]
DLL_CHARACTERISTICS = dict([(e[1], e[0]) for e in dll_characteristics]+dll_characteristics)
# Resource types
resource_type = [
('RT_CURSOR', 1),
('RT_BITMAP', 2),
('RT_ICON', 3),
('RT_MENU', 4),
('RT_DIALOG', 5),
('RT_STRING', 6),
('RT_FONTDIR', 7),
('RT_FONT', 8),
('RT_ACCELERATOR', 9),
('RT_RCDATA', 10),
('RT_MESSAGETABLE', 11),
('RT_GROUP_CURSOR', 12),
('RT_GROUP_ICON', 14),
('RT_VERSION', 16),
('RT_DLGINCLUDE', 17),
('RT_PLUGPLAY', 19),
('RT_VXD', 20),
('RT_ANICURSOR', 21),
('RT_ANIICON', 22),
('RT_HTML', 23),
('RT_MANIFEST', 24) ]
RESOURCE_TYPE = dict([(e[1], e[0]) for e in resource_type]+resource_type)
# Language definitions
lang = [
('LANG_NEUTRAL', 0x00),
('LANG_INVARIANT', 0x7f),
('LANG_AFRIKAANS', 0x36),
('LANG_ALBANIAN', 0x1c),
('LANG_ARABIC', 0x01),
('LANG_ARMENIAN', 0x2b),
('LANG_ASSAMESE', 0x4d),
('LANG_AZERI', 0x2c),
('LANG_BASQUE', 0x2d),
('LANG_BELARUSIAN', 0x23),
('LANG_BENGALI', 0x45),
('LANG_BULGARIAN', 0x02),
('LANG_CATALAN', 0x03),
('LANG_CHINESE', 0x04),
('LANG_CROATIAN', 0x1a),
('LANG_CZECH', 0x05),
('LANG_DANISH', 0x06),
('LANG_DIVEHI', 0x65),
('LANG_DUTCH', 0x13),
('LANG_ENGLISH', 0x09),
('LANG_ESTONIAN', 0x25),
('LANG_FAEROESE', 0x38),
('LANG_FARSI', 0x29),
('LANG_FINNISH', 0x0b),
('LANG_FRENCH', 0x0c),
('LANG_GALICIAN', 0x56),
('LANG_GEORGIAN', 0x37),
('LANG_GERMAN', 0x07),
('LANG_GREEK', 0x08),
('LANG_GUJARATI', 0x47),
('LANG_HEBREW', 0x0d),
('LANG_HINDI', 0x39),
('LANG_HUNGARIAN', 0x0e),
('LANG_ICELANDIC', 0x0f),
('LANG_INDONESIAN', 0x21),
('LANG_ITALIAN', 0x10),
('LANG_JAPANESE', 0x11),
('LANG_KANNADA', 0x4b),
('LANG_KASHMIRI', 0x60),
('LANG_KAZAK', 0x3f),
('LANG_KONKANI', 0x57),
('LANG_KOREAN', 0x12),
('LANG_KYRGYZ', 0x40),
('LANG_LATVIAN', 0x26),
('LANG_LITHUANIAN', 0x27),
('LANG_MACEDONIAN', 0x2f),
('LANG_MALAY', 0x3e),
('LANG_MALAYALAM', 0x4c),
('LANG_MANIPURI', 0x58),
('LANG_MARATHI', 0x4e),
('LANG_MONGOLIAN', 0x50),
('LANG_NEPALI', 0x61),
('LANG_NORWEGIAN', 0x14),
('LANG_ORIYA', 0x48),
('LANG_POLISH', 0x15),
('LANG_PORTUGUESE', 0x16),
('LANG_PUNJABI', 0x46),
('LANG_ROMANIAN', 0x18),
('LANG_RUSSIAN', 0x19),
('LANG_SANSKRIT', 0x4f),
('LANG_SERBIAN', 0x1a),
('LANG_SINDHI', 0x59),
('LANG_SLOVAK', 0x1b),
('LANG_SLOVENIAN', 0x24),
('LANG_SPANISH', 0x0a),
('LANG_SWAHILI', 0x41),
('LANG_SWEDISH', 0x1d),
('LANG_SYRIAC', 0x5a),
('LANG_TAMIL', 0x49),
('LANG_TATAR', 0x44),
('LANG_TELUGU', 0x4a),
('LANG_THAI', 0x1e),
('LANG_TURKISH', 0x1f),
('LANG_UKRAINIAN', 0x22),
('LANG_URDU', 0x20),
('LANG_UZBEK', 0x43),
('LANG_VIETNAMESE', 0x2a),
('LANG_GAELIC', 0x3c),
('LANG_MALTESE', 0x3a),
('LANG_MAORI', 0x28),
('LANG_RHAETO_ROMANCE',0x17),
('LANG_SAAMI', 0x3b),
('LANG_SORBIAN', 0x2e),
('LANG_SUTU', 0x30),
('LANG_TSONGA', 0x31),
('LANG_TSWANA', 0x32),
('LANG_VENDA', 0x33),
('LANG_XHOSA', 0x34),
('LANG_ZULU', 0x35),
('LANG_ESPERANTO', 0x8f),
('LANG_WALON', 0x90),
('LANG_CORNISH', 0x91),
('LANG_WELSH', 0x92),
('LANG_BRETON', 0x93) ]
LANG = dict(lang+[(e[1], e[0]) for e in lang])
# Sublanguage definitions
sublang = [
('SUBLANG_NEUTRAL', 0x00),
('SUBLANG_DEFAULT', 0x01),
('SUBLANG_SYS_DEFAULT', 0x02),
('SUBLANG_ARABIC_SAUDI_ARABIA', 0x01),
('SUBLANG_ARABIC_IRAQ', 0x02),
('SUBLANG_ARABIC_EGYPT', 0x03),
('SUBLANG_ARABIC_LIBYA', 0x04),
('SUBLANG_ARABIC_ALGERIA', 0x05),
('SUBLANG_ARABIC_MOROCCO', 0x06),
('SUBLANG_ARABIC_TUNISIA', 0x07),
('SUBLANG_ARABIC_OMAN', 0x08),
('SUBLANG_ARABIC_YEMEN', 0x09),
('SUBLANG_ARABIC_SYRIA', 0x0a),
('SUBLANG_ARABIC_JORDAN', 0x0b),
('SUBLANG_ARABIC_LEBANON', 0x0c),
('SUBLANG_ARABIC_KUWAIT', 0x0d),
('SUBLANG_ARABIC_UAE', 0x0e),
('SUBLANG_ARABIC_BAHRAIN', 0x0f),
('SUBLANG_ARABIC_QATAR', 0x10),
('SUBLANG_AZERI_LATIN', 0x01),
('SUBLANG_AZERI_CYRILLIC', 0x02),
('SUBLANG_CHINESE_TRADITIONAL', 0x01),
('SUBLANG_CHINESE_SIMPLIFIED', 0x02),
('SUBLANG_CHINESE_HONGKONG', 0x03),
('SUBLANG_CHINESE_SINGAPORE', 0x04),
('SUBLANG_CHINESE_MACAU', 0x05),
('SUBLANG_DUTCH', 0x01),
('SUBLANG_DUTCH_BELGIAN', 0x02),
('SUBLANG_ENGLISH_US', 0x01),
('SUBLANG_ENGLISH_UK', 0x02),
('SUBLANG_ENGLISH_AUS', 0x03),
('SUBLANG_ENGLISH_CAN', 0x04),
('SUBLANG_ENGLISH_NZ', 0x05),
('SUBLANG_ENGLISH_EIRE', 0x06),
('SUBLANG_ENGLISH_SOUTH_AFRICA', 0x07),
('SUBLANG_ENGLISH_JAMAICA', 0x08),
('SUBLANG_ENGLISH_CARIBBEAN', 0x09),
('SUBLANG_ENGLISH_BELIZE', 0x0a),
('SUBLANG_ENGLISH_TRINIDAD', 0x0b),
('SUBLANG_ENGLISH_ZIMBABWE', 0x0c),
('SUBLANG_ENGLISH_PHILIPPINES', 0x0d),
('SUBLANG_FRENCH', 0x01),
('SUBLANG_FRENCH_BELGIAN', 0x02),
('SUBLANG_FRENCH_CANADIAN', 0x03),
('SUBLANG_FRENCH_SWISS', 0x04),
('SUBLANG_FRENCH_LUXEMBOURG', 0x05),
('SUBLANG_FRENCH_MONACO', 0x06),
('SUBLANG_GERMAN', 0x01),
('SUBLANG_GERMAN_SWISS', 0x02),
('SUBLANG_GERMAN_AUSTRIAN', 0x03),
('SUBLANG_GERMAN_LUXEMBOURG', 0x04),
('SUBLANG_GERMAN_LIECHTENSTEIN', 0x05),
('SUBLANG_ITALIAN', 0x01),
('SUBLANG_ITALIAN_SWISS', 0x02),
('SUBLANG_KASHMIRI_SASIA', 0x02),
('SUBLANG_KASHMIRI_INDIA', 0x02),
('SUBLANG_KOREAN', 0x01),
('SUBLANG_LITHUANIAN', 0x01),
('SUBLANG_MALAY_MALAYSIA', 0x01),
('SUBLANG_MALAY_BRUNEI_DARUSSALAM', 0x02),
('SUBLANG_NEPALI_INDIA', 0x02),
('SUBLANG_NORWEGIAN_BOKMAL', 0x01),
('SUBLANG_NORWEGIAN_NYNORSK', 0x02),
('SUBLANG_PORTUGUESE', 0x02),
('SUBLANG_PORTUGUESE_BRAZILIAN', 0x01),
('SUBLANG_SERBIAN_LATIN', 0x02),
('SUBLANG_SERBIAN_CYRILLIC', 0x03),
('SUBLANG_SPANISH', 0x01),
('SUBLANG_SPANISH_MEXICAN', 0x02),
('SUBLANG_SPANISH_MODERN', 0x03),
('SUBLANG_SPANISH_GUATEMALA', 0x04),
('SUBLANG_SPANISH_COSTA_RICA', 0x05),
('SUBLANG_SPANISH_PANAMA', 0x06),
('SUBLANG_SPANISH_DOMINICAN_REPUBLIC', 0x07),
('SUBLANG_SPANISH_VENEZUELA', 0x08),
('SUBLANG_SPANISH_COLOMBIA', 0x09),
('SUBLANG_SPANISH_PERU', 0x0a),
('SUBLANG_SPANISH_ARGENTINA', 0x0b),
('SUBLANG_SPANISH_ECUADOR', 0x0c),
('SUBLANG_SPANISH_CHILE', 0x0d),
('SUBLANG_SPANISH_URUGUAY', 0x0e),
('SUBLANG_SPANISH_PARAGUAY', 0x0f),
('SUBLANG_SPANISH_BOLIVIA', 0x10),
('SUBLANG_SPANISH_EL_SALVADOR', 0x11),
('SUBLANG_SPANISH_HONDURAS', 0x12),
('SUBLANG_SPANISH_NICARAGUA', 0x13),
('SUBLANG_SPANISH_PUERTO_RICO', 0x14),
('SUBLANG_SWEDISH', 0x01),
('SUBLANG_SWEDISH_FINLAND', 0x02),
('SUBLANG_URDU_PAKISTAN', 0x01),
('SUBLANG_URDU_INDIA', 0x02),
('SUBLANG_UZBEK_LATIN', 0x01),
('SUBLANG_UZBEK_CYRILLIC', 0x02),
('SUBLANG_DUTCH_SURINAM', 0x03),
('SUBLANG_ROMANIAN', 0x01),
('SUBLANG_ROMANIAN_MOLDAVIA', 0x02),
('SUBLANG_RUSSIAN', 0x01),
('SUBLANG_RUSSIAN_MOLDAVIA', 0x02),
('SUBLANG_CROATIAN', 0x01),
('SUBLANG_LITHUANIAN_CLASSIC', 0x02),
('SUBLANG_GAELIC', 0x01),
('SUBLANG_GAELIC_SCOTTISH', 0x02),
('SUBLANG_GAELIC_MANX', 0x03) ]
SUBLANG = dict(sublang+[(e[1], e[0]) for e in sublang])
class UnicodeStringWrapperPostProcessor:
"""This class attemps to help the process of identifying strings
that might be plain Unicode or Pascal. A list of strings will be
wrapped on it with the hope the overlappings will help make the
decission about their type."""
def __init__(self, pe, rva_ptr):
self.pe = pe
self.rva_ptr = rva_ptr
self.string = None
def get_rva(self):
"""Get the RVA of the string."""
return self.rva_ptr
def __str__(self):
"""Return the escaped ASCII representation of the string."""
def convert_char(char):
if char in string.printable:
return char
else:
return r'\x%02x' % ord(char)
if self.string:
return ''.join([convert_char(c) for c in self.string])
return ''
def invalidate(self):
"""Make this instance None, to express it's no known string type."""
self = None
def render_pascal_16(self):
self.string = self.pe.get_string_u_at_rva(
self.rva_ptr+2,
max_length=self.__get_pascal_16_length())
def ask_pascal_16(self, next_rva_ptr):
"""The next RVA is taken to be the one immediately following this one.
Such RVA could indicate the natural end of the string and will be checked
with the possible length contained in the first word.
"""
length = self.__get_pascal_16_length()
if length == (next_rva_ptr - (self.rva_ptr+2)) / 2:
self.length = length
return True
return False
def __get_pascal_16_length(self):
return self.__get_word_value_at_rva(self.rva_ptr)
def __get_word_value_at_rva(self, rva):
try:
data = self.pe.get_data(self.rva_ptr, 2)
except PEFormatError, e:
return False
if len(data)<2:
return False
return struct.unpack('<H', data)[0]
#def render_pascal_8(self):
# """"""
def ask_unicode_16(self, next_rva_ptr):
"""The next RVA is taken to be the one immediately following this one.
Such RVA could indicate the natural end of the string and will be checked
to see if there's a Unicode NULL character there.
"""
if self.__get_word_value_at_rva(next_rva_ptr-2) == 0:
self.length = next_rva_ptr - self.rva_ptr
return True
return False
def render_unicode_16(self):
""""""
self.string = self.pe.get_string_u_at_rva(self.rva_ptr)
class PEFormatError(Exception):
"""Generic PE format error exception."""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class Dump:
"""Convenience class for dumping the PE information."""
def __init__(self):
self.text = ''
def add_lines(self, txt, indent=0):
"""Adds a list of lines.
The list can be indented with the optional argument 'indent'.
"""
for line in txt:
self.add_line(line, indent)
def add_line(self, txt, indent=0):
"""Adds a line.
The line can be indented with the optional argument 'indent'.
"""
self.add(txt+'\n', indent)
def add(self, txt, indent=0):
"""Adds some text, no newline will be appended.
The text can be indented with the optional argument 'indent'.
"""
if isinstance(txt, unicode):
s = []
for c in txt:
try:
s.append(str(c))
except UnicodeEncodeError, e:
s.append(repr(c))
txt = ''.join(s)
self.text += ' '*indent+txt
def add_header(self, txt):
"""Adds a header element."""
self.add_line('-'*10+txt+'-'*10+'\n')
def add_newline(self):
"""Adds a newline."""
self.text += '\n'
def get_text(self):
"""Get the text in its current state."""
return self.text
class Structure:
"""Prepare structure object to extract members from data.
Format is a list containing definitions for the elements
of the structure.
"""
def __init__(self, format, name=None, file_offset=None):
# Format is forced little endian, for big endian non Intel platforms
self.__format__ = '<'
self.__keys__ = []
# self.values = {}
self.__format_length__ = 0
self.__set_format__(format[1])
self._all_zeroes = False
self.__unpacked_data_elms__ = None
self.__file_offset__ = file_offset
if name:
self.name = name
else:
self.name = format[0]
def __get_format__(self):
return self.__format__
def get_file_offset(self):
return self.__file_offset__
def set_file_offset(self, offset):
self.__file_offset__ = offset
def all_zeroes(self):
"""Returns true is the unpacked data is all zeroes."""
return self._all_zeroes
def __set_format__(self, format):
for elm in format:
if ',' in elm:
elm_type, elm_name = elm.split(',', 1)
self.__format__ += elm_type
elm_names = elm_name.split(',')
names = []
for elm_name in elm_names:
if elm_name in self.__keys__:
search_list = [x[:len(elm_name)] for x in self.__keys__]
occ_count = search_list.count(elm_name)
elm_name = elm_name+'_'+str(occ_count)
names.append(elm_name)
# Some PE header structures have unions on them, so a certain
# value might have different names, so each key has a list of
# all the possible members referring to the data.
self.__keys__.append(names)
self.__format_length__ = struct.calcsize(self.__format__)
def sizeof(self):
"""Return size of the structure."""
return self.__format_length__
def __unpack__(self, data):
if len(data)>self.__format_length__:
data = data[:self.__format_length__]
# OC Patch:
# Some malware have incorrect header lengths.
# Fail gracefully if this occurs
# Buggy malware: a29b0118af8b7408444df81701ad5a7f
#
elif len(data)<self.__format_length__:
raise PEFormatError('Data length less than expected header length.')
if data.count(chr(0)) == len(data):
self._all_zeroes = True
self.__unpacked_data_elms__ = struct.unpack(self.__format__, data)
for i in xrange(len(self.__unpacked_data_elms__)):
for key in self.__keys__[i]:
# self.values[key] = self.__unpacked_data_elms__[i]
setattr(self, key, self.__unpacked_data_elms__[i])
def __pack__(self):
new_values = []
for i in xrange(len(self.__unpacked_data_elms__)):
for key in self.__keys__[i]:
new_val = getattr(self, key)
old_val = self.__unpacked_data_elms__[i]
# In the case of Unions, when the first changed value
# is picked the loop is exited
if new_val != old_val:
break
new_values.append(new_val)
return struct.pack(self.__format__, *new_values)
def __str__(self):
return '\n'.join( self.dump() )
def __repr__(self):
return '<Structure: %s>' % (' '.join( [' '.join(s.split()) for s in self.dump()] ))
def dump(self, indentation=0):
"""Returns a string representation of the structure."""
dump = []
dump.append('[%s]' % self.name)
# Refer to the __set_format__ method for an explanation
# of the following construct.
for keys in self.__keys__:
for key in keys:
val = getattr(self, key)
if isinstance(val, int) or isinstance(val, long):
val_str = '0x%-8X' % (val)
if key == 'TimeDateStamp' or key == 'dwTimeStamp':
try:
val_str += ' [%s UTC]' % time.asctime(time.gmtime(val))
except exceptions.ValueError, e:
val_str += ' [INVALID TIME]'
else:
val_str = ''.join(filter(lambda c:c != '\0', str(val)))
dump.append('%-30s %s' % (key+':', val_str))
return dump
class SectionStructure(Structure):
"""Convenience section handling class."""
def get_data(self, start, length=None):
"""Get data chunk from a section.
Allows to query data from the section by passing the
addresses where the PE file would be loaded by default.
It is then possible to retrieve code and data by its real
addresses as it would be if loaded.
"""
offset = start - self.VirtualAddress
if length:
end = offset+length
else:
end = len(self.data)
return self.data[offset:end]
def get_rva_from_offset(self, offset):
return offset - self.PointerToRawData + self.VirtualAddress
def get_offset_from_rva(self, rva):
return (rva - self.VirtualAddress) + self.PointerToRawData
def contains_offset(self, offset):
"""Check whether the section contains the file offset provided."""
if not self.PointerToRawData:
# bss and other sections containing only uninitialized data must have 0
# and do not take space in the file
return False
return self.PointerToRawData <= offset < self.VirtualAddress + self.SizeOfRawData
def contains_rva(self, rva):
"""Check whether the section contains the address provided."""
# PECOFF documentation v8 says:
# The total size of the section when loaded into memory.
# If this value is greater than SizeOfRawData, the section is zero-padded.
# This field is valid only for executable images and should be set to zero
# for object files.
if len(self.data) < self.SizeOfRawData:
size = self.Misc_VirtualSize
else:
size = max(self.SizeOfRawData, self.Misc_VirtualSize)
return self.VirtualAddress <= rva < self.VirtualAddress + size
def contains(self, rva):
#print "DEPRECATION WARNING: you should use contains_rva() instead of contains()"
return self.contains_rva(rva)
def set_data(self, data):
"""Set the data belonging to the section."""
self.data = data
def get_entropy(self):
"""Calculate and return the entropy for the section."""
return self.entropy_H( self.data )
def get_hash_sha1(self):
"""Get the SHA-1 hex-digest of the section's data."""
if sha1 is not None:
return sha1( self.data ).hexdigest()
def get_hash_sha256(self):
"""Get the SHA-256 hex-digest of the section's data."""
if sha256 is not None:
return sha256( self.data ).hexdigest()
def get_hash_sha512(self):
"""Get the SHA-512 hex-digest of the section's data."""
if sha512 is not None:
return sha512( self.data ).hexdigest()
def get_hash_md5(self):
"""Get the MD5 hex-digest of the section's data."""
if md5 is not None:
return md5( self.data ).hexdigest()
def entropy_H(self, data):
"""Calculate the entropy of a chunk of data."""
if len(data) == 0:
return 0.0
occurences = array.array('L', [0]*256)
for x in data:
occurences[ord(x)] += 1
entropy = 0
for x in occurences:
if x:
p_x = float(x) / len(data)
entropy -= p_x*math.log(p_x, 2)
return entropy
class DataContainer:
"""Generic data container."""
def __init__(self, **args):
for key, value in args.items():
setattr(self, key, value)
class ImportDescData(DataContainer):
"""Holds import descriptor information.
dll: name of the imported DLL
imports: list of imported symbols (ImportData instances)
struct: IMAGE_IMPORT_DESCRIPTOR sctruture
"""
class ImportData(DataContainer):
"""Holds imported symbol's information.
ordinal: Ordinal of the symbol
name: Name of the symbol
bound: If the symbol is bound, this contains
the address.
"""
class ExportDirData(DataContainer):
"""Holds export directory information.
struct: IMAGE_EXPORT_DIRECTORY structure
symbols: list of exported symbols (ExportData instances)
"""
class ExportData(DataContainer):
"""Holds exported symbols' information.
ordinal: ordinal of the symbol
address: address of the symbol
name: name of the symbol (None if the symbol is
exported by ordinal only)
forwarder: if the symbol is forwarded it will
contain the name of the target symbol,
None otherwise.
"""
class ResourceDirData(DataContainer):
"""Holds resource directory information.
struct: IMAGE_RESOURCE_DIRECTORY structure
entries: list of entries (ResourceDirEntryData instances)
"""
class ResourceDirEntryData(DataContainer):
"""Holds resource directory entry data.
struct: IMAGE_RESOURCE_DIRECTORY_ENTRY structure
name: If the resource is identified by name this
attribute will contain the name string. None
otherwise. If identified by id, the id is
availabe at 'struct.Id'
id: the id, also in struct.Id
directory: If this entry has a lower level directory
this attribute will point to the
ResourceDirData instance representing it.
data: If this entry has no futher lower directories
and points to the actual resource data, this
attribute will reference the corresponding
ResourceDataEntryData instance.
(Either of the 'directory' or 'data' attribute will exist,
but not both.)
"""
class ResourceDataEntryData(DataContainer):
"""Holds resource data entry information.
struct: IMAGE_RESOURCE_DATA_ENTRY structure
lang: Primary language ID
sublang: Sublanguage ID
"""
class DebugData(DataContainer):
"""Holds debug information.
struct: IMAGE_DEBUG_DIRECTORY structure
"""
class BaseRelocationData(DataContainer):
"""Holds base relocation information.
struct: IMAGE_BASE_RELOCATION structure
entries: list of relocation data (RelocationData instances)
"""
class RelocationData(DataContainer):
"""Holds relocation information.
type: Type of relocation
The type string is can be obtained by
RELOCATION_TYPE[type]
rva: RVA of the relocation
"""
class TlsData(DataContainer):
"""Holds TLS information.
struct: IMAGE_TLS_DIRECTORY structure
"""
class BoundImportDescData(DataContainer):
"""Holds bound import descriptor data.
This directory entry will provide with information on the
DLLs this PE files has been bound to (if bound at all).
The structure will contain the name and timestamp of the
DLL at the time of binding so that the loader can know
whether it differs from the one currently present in the
system and must, therefore, re-bind the PE's imports.
struct: IMAGE_BOUND_IMPORT_DESCRIPTOR structure
name: DLL name
entries: list of entries (BoundImportRefData instances)
the entries will exist if this DLL has forwarded
symbols. If so, the destination DLL will have an
entry in this list.
"""
class BoundImportRefData(DataContainer):
"""Holds bound import forwader reference data.
Contains the same information as the bound descriptor but
for forwarded DLLs, if any.
struct: IMAGE_BOUND_FORWARDER_REF structure
name: dll name
"""
class PE:
"""A Portable Executable representation.
This class provides access to most of the information in a PE file.
It expects to be supplied the name of the file to load or PE data
to process and an optional argument 'fast_load' (False by default)
which controls whether to load all the directories information,
which can be quite time consuming.
pe = pefile.PE('module.dll')
pe = pefile.PE(name='module.dll')
would load 'module.dll' and process it. If the data would be already
available in a buffer the same could be achieved with:
pe = pefile.PE(data=module_dll_data)
The "fast_load" can be set to a default by setting its value in the
module itself by means,for instance, of a "pefile.fast_load = True".
That will make all the subsequent instances not to load the
whole PE structure. The "full_load" method can be used to parse
the missing data at a later stage.
Basic headers information will be available in the attributes:
DOS_HEADER
NT_HEADERS
FILE_HEADER
OPTIONAL_HEADER
All of them will contain among their attrbitues the members of the
corresponding structures as defined in WINNT.H
The raw data corresponding to the header (from the beginning of the
file up to the start of the first section) will be avaiable in the
instance's attribute 'header' as a string.
The sections will be available as a list in the 'sections' attribute.
Each entry will contain as attributes all the structure's members.
Directory entries will be available as attributes (if they exist):
(no other entries are processed at this point)
DIRECTORY_ENTRY_IMPORT (list of ImportDescData instances)
DIRECTORY_ENTRY_EXPORT (ExportDirData instance)
DIRECTORY_ENTRY_RESOURCE (ResourceDirData instance)
DIRECTORY_ENTRY_DEBUG (list of DebugData instances)
DIRECTORY_ENTRY_BASERELOC (list of BaseRelocationData instances)
DIRECTORY_ENTRY_TLS
DIRECTORY_ENTRY_BOUND_IMPORT (list of BoundImportData instances)
The following dictionary attributes provide ways of mapping different
constants. They will accept the numeric value and return the string
representation and the opposite, feed in the string and get the
numeric constant:
DIRECTORY_ENTRY
IMAGE_CHARACTERISTICS
SECTION_CHARACTERISTICS
DEBUG_TYPE
SUBSYSTEM_TYPE
MACHINE_TYPE
RELOCATION_TYPE
RESOURCE_TYPE
LANG
SUBLANG
"""
#
# Format specifications for PE structures.
#
__IMAGE_DOS_HEADER_format__ = ('IMAGE_DOS_HEADER',
('H,e_magic', 'H,e_cblp', 'H,e_cp',
'H,e_crlc', 'H,e_cparhdr', 'H,e_minalloc',
'H,e_maxalloc', 'H,e_ss', 'H,e_sp', 'H,e_csum',
'H,e_ip', 'H,e_cs', 'H,e_lfarlc', 'H,e_ovno', '8s,e_res',
'H,e_oemid', 'H,e_oeminfo', '20s,e_res2',
'L,e_lfanew'))
__IMAGE_FILE_HEADER_format__ = ('IMAGE_FILE_HEADER',
('H,Machine', 'H,NumberOfSections',
'L,TimeDateStamp', 'L,PointerToSymbolTable',
'L,NumberOfSymbols', 'H,SizeOfOptionalHeader',
'H,Characteristics'))
__IMAGE_DATA_DIRECTORY_format__ = ('IMAGE_DATA_DIRECTORY',
('L,VirtualAddress', 'L,Size'))
__IMAGE_OPTIONAL_HEADER_format__ = ('IMAGE_OPTIONAL_HEADER',
('H,Magic', 'B,MajorLinkerVersion',
'B,MinorLinkerVersion', 'L,SizeOfCode',
'L,SizeOfInitializedData', 'L,SizeOfUninitializedData',
'L,AddressOfEntryPoint', 'L,BaseOfCode', 'L,BaseOfData',
'L,ImageBase', 'L,SectionAlignment', 'L,FileAlignment',
'H,MajorOperatingSystemVersion', 'H,MinorOperatingSystemVersion',
'H,MajorImageVersion', 'H,MinorImageVersion',
'H,MajorSubsystemVersion', 'H,MinorSubsystemVersion',
'L,Reserved1', 'L,SizeOfImage', 'L,SizeOfHeaders',
'L,CheckSum', 'H,Subsystem', 'H,DllCharacteristics',
'L,SizeOfStackReserve', 'L,SizeOfStackCommit',
'L,SizeOfHeapReserve', 'L,SizeOfHeapCommit',
'L,LoaderFlags', 'L,NumberOfRvaAndSizes' ))
__IMAGE_OPTIONAL_HEADER64_format__ = ('IMAGE_OPTIONAL_HEADER64',
('H,Magic', 'B,MajorLinkerVersion',
'B,MinorLinkerVersion', 'L,SizeOfCode',
'L,SizeOfInitializedData', 'L,SizeOfUninitializedData',
'L,AddressOfEntryPoint', 'L,BaseOfCode',
'Q,ImageBase', 'L,SectionAlignment', 'L,FileAlignment',
'H,MajorOperatingSystemVersion', 'H,MinorOperatingSystemVersion',
'H,MajorImageVersion', 'H,MinorImageVersion',
'H,MajorSubsystemVersion', 'H,MinorSubsystemVersion',
'L,Reserved1', 'L,SizeOfImage', 'L,SizeOfHeaders',
'L,CheckSum', 'H,Subsystem', 'H,DllCharacteristics',
'Q,SizeOfStackReserve', 'Q,SizeOfStackCommit',
'Q,SizeOfHeapReserve', 'Q,SizeOfHeapCommit',
'L,LoaderFlags', 'L,NumberOfRvaAndSizes' ))
__IMAGE_NT_HEADERS_format__ = ('IMAGE_NT_HEADERS', ('L,Signature',))
__IMAGE_SECTION_HEADER_format__ = ('IMAGE_SECTION_HEADER',
('8s,Name', 'L,Misc,Misc_PhysicalAddress,Misc_VirtualSize',
'L,VirtualAddress', 'L,SizeOfRawData', 'L,PointerToRawData',
'L,PointerToRelocations', 'L,PointerToLinenumbers',
'H,NumberOfRelocations', 'H,NumberOfLinenumbers',
'L,Characteristics'))
__IMAGE_DELAY_IMPORT_DESCRIPTOR_format__ = ('IMAGE_DELAY_IMPORT_DESCRIPTOR',
('L,grAttrs', 'L,szName', 'L,phmod', 'L,pIAT', 'L,pINT',
'L,pBoundIAT', 'L,pUnloadIAT', 'L,dwTimeStamp'))
__IMAGE_IMPORT_DESCRIPTOR_format__ = ('IMAGE_IMPORT_DESCRIPTOR',
('L,OriginalFirstThunk,Characteristics',
'L,TimeDateStamp', 'L,ForwarderChain', 'L,Name', 'L,FirstThunk'))
__IMAGE_EXPORT_DIRECTORY_format__ = ('IMAGE_EXPORT_DIRECTORY',
('L,Characteristics',
'L,TimeDateStamp', 'H,MajorVersion', 'H,MinorVersion', 'L,Name',
'L,Base', 'L,NumberOfFunctions', 'L,NumberOfNames',
'L,AddressOfFunctions', 'L,AddressOfNames', 'L,AddressOfNameOrdinals'))
__IMAGE_RESOURCE_DIRECTORY_format__ = ('IMAGE_RESOURCE_DIRECTORY',
('L,Characteristics',
'L,TimeDateStamp', 'H,MajorVersion', 'H,MinorVersion',
'H,NumberOfNamedEntries', 'H,NumberOfIdEntries'))
__IMAGE_RESOURCE_DIRECTORY_ENTRY_format__ = ('IMAGE_RESOURCE_DIRECTORY_ENTRY',
('L,Name',
'L,OffsetToData'))
__IMAGE_RESOURCE_DATA_ENTRY_format__ = ('IMAGE_RESOURCE_DATA_ENTRY',
('L,OffsetToData', 'L,Size', 'L,CodePage', 'L,Reserved'))
__VS_VERSIONINFO_format__ = ( 'VS_VERSIONINFO',
('H,Length', 'H,ValueLength', 'H,Type' ))
__VS_FIXEDFILEINFO_format__ = ( 'VS_FIXEDFILEINFO',
('L,Signature', 'L,StrucVersion', 'L,FileVersionMS', 'L,FileVersionLS',
'L,ProductVersionMS', 'L,ProductVersionLS', 'L,FileFlagsMask', 'L,FileFlags',
'L,FileOS', 'L,FileType', 'L,FileSubtype', 'L,FileDateMS', 'L,FileDateLS'))
__StringFileInfo_format__ = ( 'StringFileInfo',
('H,Length', 'H,ValueLength', 'H,Type' ))
__StringTable_format__ = ( 'StringTable',
('H,Length', 'H,ValueLength', 'H,Type' ))
__String_format__ = ( 'String',
('H,Length', 'H,ValueLength', 'H,Type' ))
__Var_format__ = ( 'Var', ('H,Length', 'H,ValueLength', 'H,Type' ))
__IMAGE_THUNK_DATA_format__ = ('IMAGE_THUNK_DATA',
('L,ForwarderString,Function,Ordinal,AddressOfData',))
__IMAGE_THUNK_DATA64_format__ = ('IMAGE_THUNK_DATA',
('Q,ForwarderString,Function,Ordinal,AddressOfData',))
__IMAGE_DEBUG_DIRECTORY_format__ = ('IMAGE_DEBUG_DIRECTORY',
('L,Characteristics', 'L,TimeDateStamp', 'H,MajorVersion',
'H,MinorVersion', 'L,Type', 'L,SizeOfData', 'L,AddressOfRawData',
'L,PointerToRawData'))
__IMAGE_BASE_RELOCATION_format__ = ('IMAGE_BASE_RELOCATION',
('L,VirtualAddress', 'L,SizeOfBlock') )
__IMAGE_TLS_DIRECTORY_format__ = ('IMAGE_TLS_DIRECTORY',
('L,StartAddressOfRawData', 'L,EndAddressOfRawData',
'L,AddressOfIndex', 'L,AddressOfCallBacks',
'L,SizeOfZeroFill', 'L,Characteristics' ) )
__IMAGE_TLS_DIRECTORY64_format__ = ('IMAGE_TLS_DIRECTORY',
('Q,StartAddressOfRawData', 'Q,EndAddressOfRawData',
'Q,AddressOfIndex', 'Q,AddressOfCallBacks',
'L,SizeOfZeroFill', 'L,Characteristics' ) )
__IMAGE_BOUND_IMPORT_DESCRIPTOR_format__ = ('IMAGE_BOUND_IMPORT_DESCRIPTOR',
('L,TimeDateStamp', 'H,OffsetModuleName', 'H,NumberOfModuleForwarderRefs'))
__IMAGE_BOUND_FORWARDER_REF_format__ = ('IMAGE_BOUND_FORWARDER_REF',
('L,TimeDateStamp', 'H,OffsetModuleName', 'H,Reserved') )
def __init__(self, name=None, data=None, fast_load=None):
self.sections = []
self.__warnings = []
self.PE_TYPE = None
if not name and not data:
return
# This list will keep track of all the structures created.
# That will allow for an easy iteration through the list
# in order to save the modifications made
self.__structures__ = []
if not fast_load:
fast_load = globals()['fast_load']
self.__parse__(name, data, fast_load)
def __unpack_data__(self, format, data, file_offset):
"""Apply structure format to raw data.
Returns and unpacked structure object if successful, None otherwise.
"""
structure = Structure(format, file_offset=file_offset)
#if len(data) < structure.sizeof():
# return None
try:
structure.__unpack__(data)
except PEFormatError, err:
self.__warnings.append(
'Corrupt header "%s" at file offset %d. Exception: %s' % (
format[0], file_offset, str(err)) )
return None
self.__structures__.append(structure)
return structure
def __parse__(self, fname, data, fast_load):
"""Parse a Portable Executable file.
Loads a PE file, parsing all its structures and making them available
through the instance's attributes.
"""
if fname:
fd = file(fname, 'rb')
self.__data__ = fd.read()
fd.close()
elif data:
self.__data__ = data
self.DOS_HEADER = self.__unpack_data__(
self.__IMAGE_DOS_HEADER_format__,
self.__data__, file_offset=0)
if not self.DOS_HEADER or self.DOS_HEADER.e_magic != IMAGE_DOS_SIGNATURE:
raise PEFormatError('DOS Header magic not found.')
# OC Patch:
# Check for sane value in e_lfanew
#
if self.DOS_HEADER.e_lfanew > len(self.__data__):
raise PEFormatError('Invalid e_lfanew value, probably not a PE file')
nt_headers_offset = self.DOS_HEADER.e_lfanew
self.NT_HEADERS = self.__unpack_data__(
self.__IMAGE_NT_HEADERS_format__,
self.__data__[nt_headers_offset:],
file_offset = nt_headers_offset)
# We better check the signature right here, before the file screws
# around with sections:
# OC Patch:
# Some malware will cause the Signature value to not exist at all
if not self.NT_HEADERS or not self.NT_HEADERS.Signature:
raise PEFormatError('NT Headers not found.')
if self.NT_HEADERS.Signature != IMAGE_NT_SIGNATURE:
raise PEFormatError('Invalid NT Headers signature.')
self.FILE_HEADER = self.__unpack_data__(
self.__IMAGE_FILE_HEADER_format__,
self.__data__[nt_headers_offset+4:],
file_offset = nt_headers_offset+4)
image_flags = self.retrieve_flags(IMAGE_CHARACTERISTICS, 'IMAGE_FILE_')
if not self.FILE_HEADER:
raise PEFormatError('File Header missing')
# Set the image's flags according the the Characteristics member
self.set_flags(self.FILE_HEADER, self.FILE_HEADER.Characteristics, image_flags)
optional_header_offset = \
nt_headers_offset+4+self.FILE_HEADER.sizeof()
# Note: location of sections can be controlled from PE header:
sections_offset = optional_header_offset + self.FILE_HEADER.SizeOfOptionalHeader
self.OPTIONAL_HEADER = self.__unpack_data__(
self.__IMAGE_OPTIONAL_HEADER_format__,
self.__data__[optional_header_offset:],
file_offset = optional_header_offset)
# According to solardesigner's findings for his
# Tiny PE project, the optional header does not
# need fields beyond "Subsystem" in order to be
# loadable by the Windows loader (given that zeroes
# are acceptable values and the header is loaded
# in a zeroed memory page)
# If trying to parse a full Optional Header fails
# we try to parse it again with some 0 padding
#
MINIMUM_VALID_OPTIONAL_HEADER_RAW_SIZE = 69
if ( self.OPTIONAL_HEADER is None and
len(self.__data__[optional_header_offset:])
>= MINIMUM_VALID_OPTIONAL_HEADER_RAW_SIZE ):
# Add enough zeroes to make up for the unused fields
#
padding_length = 128
# Create padding
#
padded_data = self.__data__[optional_header_offset:] + (
'\0' * padding_length)
self.OPTIONAL_HEADER = self.__unpack_data__(
self.__IMAGE_OPTIONAL_HEADER_format__,
padded_data,
file_offset = optional_header_offset)
# Check the Magic in the OPTIONAL_HEADER and set the PE file
# type accordingly
#
if self.OPTIONAL_HEADER is not None:
if self.OPTIONAL_HEADER.Magic == OPTIONAL_HEADER_MAGIC_PE:
self.PE_TYPE = OPTIONAL_HEADER_MAGIC_PE
elif self.OPTIONAL_HEADER.Magic == OPTIONAL_HEADER_MAGIC_PE_PLUS:
self.PE_TYPE = OPTIONAL_HEADER_MAGIC_PE_PLUS
self.OPTIONAL_HEADER = self.__unpack_data__(
self.__IMAGE_OPTIONAL_HEADER64_format__,
self.__data__[optional_header_offset:],
file_offset = optional_header_offset)
# Again, as explained above, we try to parse
# a reduced form of the Optional Header which
# is still valid despite not including all
# structure members
#
MINIMUM_VALID_OPTIONAL_HEADER_RAW_SIZE = 69+4
if ( self.OPTIONAL_HEADER is None and
len(self.__data__[optional_header_offset:])
>= MINIMUM_VALID_OPTIONAL_HEADER_RAW_SIZE ):
padding_length = 128
padded_data = self.__data__[optional_header_offset:] + (
'\0' * padding_length)
self.OPTIONAL_HEADER = self.__unpack_data__(
self.__IMAGE_OPTIONAL_HEADER64_format__,
padded_data,
file_offset = optional_header_offset)
if not self.FILE_HEADER:
raise PEFormatError('File Header missing')
# OC Patch:
# Die gracefully if there is no OPTIONAL_HEADER field
# 975440f5ad5e2e4a92c4d9a5f22f75c1
if self.PE_TYPE is None or self.OPTIONAL_HEADER is None:
raise PEFormatError("No Optional Header found, invalid PE32 or PE32+ file")
dll_characteristics_flags = self.retrieve_flags(DLL_CHARACTERISTICS, 'IMAGE_DLL_CHARACTERISTICS_')
# Set the Dll Characteristics flags according the the DllCharacteristics member
self.set_flags(
self.OPTIONAL_HEADER,
self.OPTIONAL_HEADER.DllCharacteristics,
dll_characteristics_flags)
self.OPTIONAL_HEADER.DATA_DIRECTORY = []
#offset = (optional_header_offset + self.FILE_HEADER.SizeOfOptionalHeader)
offset = (optional_header_offset + self.OPTIONAL_HEADER.sizeof())
self.NT_HEADERS.FILE_HEADER = self.FILE_HEADER
self.NT_HEADERS.OPTIONAL_HEADER = self.OPTIONAL_HEADER
# The NumberOfRvaAndSizes is sanitized to stay within
# reasonable limits so can be casted to an int
#
if self.OPTIONAL_HEADER.NumberOfRvaAndSizes > 0x10:
self.__warnings.append(
'Suspicious NumberOfRvaAndSizes in the Optional Header. ' +
'Normal values are never larger than 0x10, the value is: 0x%x' %
self.OPTIONAL_HEADER.NumberOfRvaAndSizes )
for i in xrange(int(0x7fffffffL & self.OPTIONAL_HEADER.NumberOfRvaAndSizes)):
if len(self.__data__[offset:]) == 0:
break
if len(self.__data__[offset:]) < 8:
data = self.__data__[offset:]+'\0'*8
else:
data = self.__data__[offset:]
dir_entry = self.__unpack_data__(
self.__IMAGE_DATA_DIRECTORY_format__,
data,
file_offset = offset)
if dir_entry is None:
break
# Would fail if missing an entry
# 1d4937b2fa4d84ad1bce0309857e70ca offending sample
try:
dir_entry.name = DIRECTORY_ENTRY[i]
except (KeyError, AttributeError):
break
offset += dir_entry.sizeof()
self.OPTIONAL_HEADER.DATA_DIRECTORY.append(dir_entry)
# If the offset goes outside the optional header,
# the loop is broken, regardless of how many directories
# NumberOfRvaAndSizes says there are
#
# We assume a normally sized optional header, hence that we do
# a sizeof() instead of reading SizeOfOptionalHeader.
# Then we add a default number of drectories times their size,
# if we go beyond that, we assume the number of directories
# is wrong and stop processing
if offset >= (optional_header_offset +
self.OPTIONAL_HEADER.sizeof() + 8*16) :
break
offset = self.parse_sections(sections_offset)
# OC Patch:
# There could be a problem if there are no raw data sections
# greater than 0
# fc91013eb72529da005110a3403541b6 example
# Should this throw an exception in the minimum header offset
# can't be found?
#
rawDataPointers = [
s.PointerToRawData for s in self.sections if s.PointerToRawData>0]
if len(rawDataPointers) > 0:
lowest_section_offset = min(rawDataPointers)
else:
lowest_section_offset = None
if not lowest_section_offset or lowest_section_offset<offset:
self.header = self.__data__[:offset]
else:
self.header = self.__data__[:lowest_section_offset]
# Check whether the entry point lies within a section
#
if self.get_section_by_rva(self.OPTIONAL_HEADER.AddressOfEntryPoint) is not None:
# Check whether the entry point lies within the file
#
ep_offset = self.get_offset_from_rva(self.OPTIONAL_HEADER.AddressOfEntryPoint)
if ep_offset > len(self.__data__):
self.__warnings.append(
'Possibly corrupt file. AddressOfEntryPoint lies outside the file. ' +
'AddressOfEntryPoint: 0x%x' %
self.OPTIONAL_HEADER.AddressOfEntryPoint )
else:
self.__warnings.append(
'AddressOfEntryPoint lies outside the sections\' boundaries. ' +
'AddressOfEntryPoint: 0x%x' %
self.OPTIONAL_HEADER.AddressOfEntryPoint )
if not fast_load:
self.parse_data_directories()
def get_warnings(self):
"""Return the list of warnings.
Non-critical problems found when parsing the PE file are
appended to a list of warnings. This method returns the
full list.
"""
return self.__warnings
def show_warnings(self):
"""Print the list of warnings.
Non-critical problems found when parsing the PE file are
appended to a list of warnings. This method prints the
full list to standard output.
"""
for warning in self.__warnings:
print '>', warning
def full_load(self):
"""Process the data directories.
This mathod will load the data directories which might not have
been loaded if the "fast_load" option was used.
"""
self.parse_data_directories()
def write(self, filename=None):
"""Write the PE file.
This function will process all headers and components
of the PE file and include all changes made (by just
assigning to attributes in the PE objects) and write
the changes back to a file whose name is provided as
an argument. The filename is optional.
The data to be written to the file will be returned
as a 'str' object.
"""
file_data = list(self.__data__)
for struct in self.__structures__:
struct_data = list(struct.__pack__())
offset = struct.get_file_offset()
file_data[offset:offset+len(struct_data)] = struct_data
if hasattr(self, 'VS_VERSIONINFO'):
if hasattr(self, 'FileInfo'):
for entry in self.FileInfo:
if hasattr(entry, 'StringTable'):
for st_entry in entry.StringTable:
for key, entry in st_entry.entries.items():
offsets = st_entry.entries_offsets[key]
lengths = st_entry.entries_lengths[key]
if len( entry ) > lengths[1]:
uc = zip(
list(entry[:lengths[1]]), ['\0'] * lengths[1] )
l = list()
map(l.extend, uc)
file_data[
offsets[1] : offsets[1] + lengths[1]*2 ] = l
else:
uc = zip(
list(entry), ['\0'] * len(entry) )
l = list()
map(l.extend, uc)
file_data[
offsets[1] : offsets[1] + len(entry)*2 ] = l
remainder = lengths[1] - len(entry)
file_data[
offsets[1] + len(entry)*2 :
offsets[1] + lengths[1]*2 ] = [
u'\0' ] * remainder*2
new_file_data = ''.join( [ chr(ord(c)) for c in file_data ] )
if filename:
f = file(filename, 'wb+')
f.write(new_file_data)
f.close()
return new_file_data
def parse_sections(self, offset):
"""Fetch the PE file sections.
The sections will be readily available in the "sections" attribute.
Its attributes will contain all the section information plus "data"
a buffer containing the section's data.
The "Characteristics" member will be processed and attributes
representing the section characteristics (with the 'IMAGE_SCN_'
string trimmed from the constant's names) will be added to the
section instance.
Refer to the SectionStructure class for additional info.
"""
self.sections = []
for i in xrange(self.FILE_HEADER.NumberOfSections):
section = SectionStructure(self.__IMAGE_SECTION_HEADER_format__)
if not section:
break
section_offset = offset + section.sizeof() * i
section.set_file_offset(section_offset)
section.__unpack__(self.__data__[section_offset:])
self.__structures__.append(section)
if section.SizeOfRawData > len(self.__data__):
self.__warnings.append(
('Error parsing section %d. ' % i) +
'SizeOfRawData is larger than file.')
if section.PointerToRawData > len(self.__data__):
self.__warnings.append(
('Error parsing section %d. ' % i) +
'PointerToRawData points beyond the end of the file.')
if section.Misc_VirtualSize > 0x10000000:
self.__warnings.append(
('Suspicious value found parsing section %d. ' % i) +
'VirtualSize is extremely large > 256MiB.')
if section.VirtualAddress > 0x10000000:
self.__warnings.append(
('Suspicious value found parsing section %d. ' % i) +
'VirtualAddress is beyond 0x10000000.')
#
# Some packer used a non-aligned PointerToRawData in the sections,
# which causes several common tools not to load the section data
# properly as they blindly read from the indicated offset.
# It seems that Windows will round the offset down to the largest
# offset multiple of FileAlignment which is smaller than
# PointerToRawData. The following code will do the same.
#
#alignment = self.OPTIONAL_HEADER.FileAlignment
section_data_start = section.PointerToRawData
if ( self.OPTIONAL_HEADER.FileAlignment != 0 and
(section.PointerToRawData % self.OPTIONAL_HEADER.FileAlignment) != 0):
self.__warnings.append(
('Error parsing section %d. ' % i) +
'Suspicious value for FileAlignment in the Optional Header. ' +
'Normally the PointerToRawData entry of the sections\' structures ' +
'is a multiple of FileAlignment, this might imply the file ' +
'is trying to confuse tools which parse this incorrectly')
section_data_end = section_data_start+section.SizeOfRawData
section.set_data(self.__data__[section_data_start:section_data_end])
section_flags = self.retrieve_flags(SECTION_CHARACTERISTICS, 'IMAGE_SCN_')
# Set the section's flags according the the Characteristics member
self.set_flags(section, section.Characteristics, section_flags)
if ( section.__dict__.get('IMAGE_SCN_MEM_WRITE', False) and
section.__dict__.get('IMAGE_SCN_MEM_EXECUTE', False) ):
self.__warnings.append(
('Suspicious flags set for section %d. ' % i) +
'Both IMAGE_SCN_MEM_WRITE and IMAGE_SCN_MEM_EXECUTE are set.' +
'This might indicate a packed executable.')
self.sections.append(section)
if self.FILE_HEADER.NumberOfSections > 0 and self.sections:
return offset + self.sections[0].sizeof()*self.FILE_HEADER.NumberOfSections
else:
return offset
def retrieve_flags(self, flag_dict, flag_filter):
"""Read the flags from a dictionary and return them in a usable form.
Will return a list of (flag, value) for all flags in "flag_dict"
matching the filter "flag_filter".
"""
return [(f[0], f[1]) for f in flag_dict.items() if
isinstance(f[0], str) and f[0].startswith(flag_filter)]
def set_flags(self, obj, flag_field, flags):
"""Will process the flags and set attributes in the object accordingly.
The object "obj" will gain attritutes named after the flags provided in
"flags" and valued True/False, matching the results of applyin each
flag value from "flags" to flag_field.
"""
for flag in flags:
if flag[1] & flag_field:
setattr(obj, flag[0], True)
else:
setattr(obj, flag[0], False)
def parse_data_directories(self):
"""Parse and process the PE file's data directories."""
directory_parsing = (
('IMAGE_DIRECTORY_ENTRY_IMPORT', self.parse_import_directory),
('IMAGE_DIRECTORY_ENTRY_EXPORT', self.parse_export_directory),
('IMAGE_DIRECTORY_ENTRY_RESOURCE', self.parse_resources_directory),
('IMAGE_DIRECTORY_ENTRY_DEBUG', self.parse_debug_directory),
('IMAGE_DIRECTORY_ENTRY_BASERELOC', self.parse_relocations_directory),
('IMAGE_DIRECTORY_ENTRY_TLS', self.parse_directory_tls),
('IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT', self.parse_delay_import_directory),
('IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT', self.parse_directory_bound_imports) )
for entry in directory_parsing:
# OC Patch:
#
try:
dir_entry = self.OPTIONAL_HEADER.DATA_DIRECTORY[
DIRECTORY_ENTRY[entry[0]]]
except IndexError:
break
if dir_entry.VirtualAddress:
value = entry[1](dir_entry.VirtualAddress, dir_entry.Size)
if value:
setattr(self, entry[0][6:], value)
def parse_directory_bound_imports(self, rva, size):
""""""
bnd_descr = Structure(self.__IMAGE_BOUND_IMPORT_DESCRIPTOR_format__)
bnd_descr_size = bnd_descr.sizeof()
start = rva
bound_imports = []
while True:
bnd_descr = self.__unpack_data__(
self.__IMAGE_BOUND_IMPORT_DESCRIPTOR_format__,
self.__data__[rva:rva+bnd_descr_size],
file_offset = rva)
if bnd_descr is None:
# If can't parse directory then silently return.
# This directory does not necesarily have to be valid to
# still have a valid PE file
self.__warnings.append(
'The Bound Imports directory exists but can\'t be parsed.')
return
if bnd_descr.all_zeroes():
break
rva += bnd_descr.sizeof()
forwarder_refs = []
for idx in xrange(bnd_descr.NumberOfModuleForwarderRefs):
# Both structures IMAGE_BOUND_IMPORT_DESCRIPTOR and
# IMAGE_BOUND_FORWARDER_REF have the same size.
bnd_frwd_ref = self.__unpack_data__(
self.__IMAGE_BOUND_FORWARDER_REF_format__,
self.__data__[rva:rva+bnd_descr_size],
file_offset = rva)
# OC Patch:
if not bnd_frwd_ref:
raise PEFormatError(
"IMAGE_BOUND_FORWARDER_REF cannot be read")
rva += bnd_frwd_ref.sizeof()
name_str = self.get_string_from_data(
start+bnd_frwd_ref.OffsetModuleName, self.__data__)
if not name_str:
break
forwarder_refs.append(BoundImportRefData(
struct = bnd_frwd_ref,
name = name_str))
name_str = self.get_string_from_data(
start+bnd_descr.OffsetModuleName, self.__data__)
if not name_str:
break
bound_imports.append(
BoundImportDescData(
struct = bnd_descr,
name = name_str,
entries = forwarder_refs))
return bound_imports
def parse_directory_tls(self, rva, size):
""""""
if self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE:
format = self.__IMAGE_TLS_DIRECTORY_format__
elif self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE_PLUS:
format = self.__IMAGE_TLS_DIRECTORY64_format__
tls_struct = self.__unpack_data__(
format,
self.get_data(rva),
file_offset = self.get_offset_from_rva(rva))
if not tls_struct:
return None
return TlsData( struct = tls_struct )
def parse_relocations_directory(self, rva, size):
""""""
rlc = Structure(self.__IMAGE_BASE_RELOCATION_format__)
rlc_size = rlc.sizeof()
end = rva+size
relocations = []
while rva<end:
# OC Patch:
# Malware that has bad rva entries will cause an error.
# Just continue on after an exception
#
try:
rlc = self.__unpack_data__(
self.__IMAGE_BASE_RELOCATION_format__,
self.get_data(rva, rlc_size),
file_offset = self.get_offset_from_rva(rva) )
except PEFormatError:
self.__warnings.append(
'Invalid relocation information. Can\'t read ' +
'data at RVA: 0x%x' % rva)
rlc = None
if not rlc:
break
reloc_entries = self.parse_relocations(
rva+rlc_size, rlc.VirtualAddress, rlc.SizeOfBlock-rlc_size)
relocations.append(
BaseRelocationData(
struct = rlc,
entries = reloc_entries))
if not rlc.SizeOfBlock:
break
rva += rlc.SizeOfBlock
return relocations
def parse_relocations(self, data_rva, rva, size):
""""""
data = self.get_data(data_rva, size)
entries = []
for idx in xrange(len(data)/2):
word = struct.unpack('<H', data[idx*2:(idx+1)*2])[0]
reloc_type = (word>>12)
reloc_offset = (word&0x0fff)
entries.append(
RelocationData(
type = reloc_type,
rva = reloc_offset+rva))
return entries
def parse_debug_directory(self, rva, size):
""""""
dbg = Structure(self.__IMAGE_DEBUG_DIRECTORY_format__)
dbg_size = dbg.sizeof()
debug = []
for idx in xrange(size/dbg_size):
try:
data = self.get_data(rva+dbg_size*idx, dbg_size)
except PEFormatError, e:
self.__warnings.append(
'Invalid debug information. Can\'t read ' +
'data at RVA: 0x%x' % rva)
return None
dbg = self.__unpack_data__(
self.__IMAGE_DEBUG_DIRECTORY_format__,
data, file_offset = self.get_offset_from_rva(rva+dbg_size*idx))
if not dbg:
return None
debug.append(
DebugData(
struct = dbg))
return debug
def parse_resources_directory(self, rva, size=0, base_rva = None, level = 0):
"""Parse the resources directory.
Given the rva of the resources directory, it will process all
its entries.
The root will have the corresponding member of its structure,
IMAGE_RESOURCE_DIRECTORY plus 'entries', a list of all the
entries in the directory.
Those entries will have, correspondingly, all the structure's
members (IMAGE_RESOURCE_DIRECTORY_ENTRY) and an additional one,
"directory", pointing to the IMAGE_RESOURCE_DIRECTORY structure
representing upper layers of the tree. This one will also have
an 'entries' attribute, pointing to the 3rd, and last, level.
Another directory with more entries. Those last entries will
have a new atribute (both 'leaf' or 'data_entry' can be used to
access it). This structure finally points to the resource data.
All the members of this structure, IMAGE_RESOURCE_DATA_ENTRY,
are available as its attributes.
"""
# OC Patch:
original_rva = rva
if base_rva is None:
base_rva = rva
resources_section = self.get_section_by_rva(rva)
try:
# If the RVA is invalid all would blow up. Some EXEs seem to be
# specially nasty and have an invalid RVA.
data = self.get_data(rva)
except PEFormatError, e:
self.__warnings.append(
'Invalid resources directory. Can\'t read ' +
'directory data at RVA: 0x%x' % rva)
return None
# Get the resource directory structure, that is, the header
# of the table preceding the actual entries
#
resource_dir = self.__unpack_data__(
self.__IMAGE_RESOURCE_DIRECTORY_format__, data,
file_offset = self.get_offset_from_rva(rva) )
if resource_dir is None:
# If can't parse resources directory then silently return.
# This directory does not necesarily have to be valid to
# still have a valid PE file
self.__warnings.append(
'Invalid resources directory. Can\'t parse ' +
'directory data at RVA: 0x%x' % rva)
return None
dir_entries = []
# Advance the rva to the positon immediately following the directory
# table header and pointing to the first entry in the table
#
rva += resource_dir.sizeof()
number_of_entries = (
resource_dir.NumberOfNamedEntries +
resource_dir.NumberOfIdEntries )
strings_to_postprocess = list()
for idx in xrange(number_of_entries):
res = self.parse_resource_entry(rva)
if res is None:
self.__warnings.append(
'Error parsing the resources directory, ' +
'Entry %d is invalid, RVA = 0x%x. ' %
(idx, rva) )
break
entry_name = None
entry_id = None
# If all named entries have been processed, only Id ones
# remain
if idx >= resource_dir.NumberOfNamedEntries:
entry_id = res.Name
else:
ustr_offset = base_rva+res.NameOffset
try:
#entry_name = self.get_string_u_at_rva(ustr_offset, max_length=16)
entry_name = UnicodeStringWrapperPostProcessor(self, ustr_offset)
strings_to_postprocess.append(entry_name)
except PEFormatError, excp:
self.__warnings.append(
'Error parsing the resources directory, ' +
'attempting to read entry name. ' +
'Can\'t read unicode string at offset 0x%x' %
(ustr_offset) )
if res.DataIsDirectory:
# OC Patch:
#
# One trick malware can do is to recursively reference
# the next directory. This causes hilarity to ensue when
# trying to parse everything correctly.
# If the original RVA given to this function is equal to
# the next one to parse, we assume that it's a trick.
# Instead of raising a PEFormatError this would skip some
# reasonable data so we just break.
#
# 9ee4d0a0caf095314fd7041a3e4404dc is the offending sample
if original_rva == (base_rva + res.OffsetToDirectory):
break
else:
entry_directory = self.parse_resources_directory(
base_rva+res.OffsetToDirectory,
base_rva=base_rva, level = level+1)
if not entry_directory:
break
dir_entries.append(
ResourceDirEntryData(
struct = res,
name = entry_name,
id = entry_id,
directory = entry_directory))
else:
struct = self.parse_resource_data_entry(
base_rva + res.OffsetToDirectory)
if struct:
entry_data = ResourceDataEntryData(
struct = struct,
lang = res.Name & 0xff,
sublang = (res.Name>>8) & 0xff)
dir_entries.append(
ResourceDirEntryData(
struct = res,
name = entry_name,
id = entry_id,
data = entry_data))
else:
break
# Check if this entry contains version information
#
if level == 0 and res.Id == RESOURCE_TYPE['RT_VERSION']:
if len(dir_entries)>0:
last_entry = dir_entries[-1]
rt_version_struct = None
try:
rt_version_struct = last_entry.directory.entries[0].directory.entries[0].data.struct
except:
# Maybe a malformed directory structure...?
# Lets ignore it
pass
if rt_version_struct is not None:
self.parse_version_information(rt_version_struct)
rva += res.sizeof()
string_rvas = [s.get_rva() for s in strings_to_postprocess]
string_rvas.sort()
for idx, s in enumerate(strings_to_postprocess):
s.render_pascal_16()
resource_directory_data = ResourceDirData(
struct = resource_dir,
entries = dir_entries)
return resource_directory_data
def parse_resource_data_entry(self, rva):
"""Parse a data entry from the resources directory."""
try:
# If the RVA is invalid all would blow up. Some EXEs seem to be
# specially nasty and have an invalid RVA.
data = self.get_data(rva)
except PEFormatError, excp:
self.__warnings.append(
'Error parsing a resource directory data entry, ' +
'the RVA is invalid: 0x%x' % ( rva ) )
return None
data_entry = self.__unpack_data__(
self.__IMAGE_RESOURCE_DATA_ENTRY_format__, data,
file_offset = self.get_offset_from_rva(rva) )
return data_entry
def parse_resource_entry(self, rva):
"""Parse a directory entry from the resources directory."""
resource = self.__unpack_data__(
self.__IMAGE_RESOURCE_DIRECTORY_ENTRY_format__, self.get_data(rva),
file_offset = self.get_offset_from_rva(rva) )
if resource is None:
return None
#resource.NameIsString = (resource.Name & 0x80000000L) >> 31
resource.NameOffset = resource.Name & 0x7FFFFFFFL
resource.__pad = resource.Name & 0xFFFF0000L
resource.Id = resource.Name & 0x0000FFFFL
resource.DataIsDirectory = (resource.OffsetToData & 0x80000000L) >> 31
resource.OffsetToDirectory = resource.OffsetToData & 0x7FFFFFFFL
return resource
def parse_version_information(self, version_struct):
"""Parse version information structure.
The date will be made available in three attributes of the PE object.
VS_VERSIONINFO will contain the first three fields of the main structure:
'Length', 'ValueLength', and 'Type'
VS_FIXEDFILEINFO will hold the rest of the fields, accessible as sub-attributes:
'Signature', 'StrucVersion', 'FileVersionMS', 'FileVersionLS',
'ProductVersionMS', 'ProductVersionLS', 'FileFlagsMask', 'FileFlags',
'FileOS', 'FileType', 'FileSubtype', 'FileDateMS', 'FileDateLS'
FileInfo is a list of all StringFileInfo and VarFileInfo structures.
StringFileInfo structures will have a list as an attribute named 'StringTable'
containing all the StringTable structures. Each of those structures contains a
dictionary 'entries' with all the key/value version information string pairs.
VarFileInfo structures will have a list as an attribute named 'Var' containing
all Var structures. Each Var structure will have a dictionary as an attribute
named 'entry' which will contain the name and value of the Var.
"""
# Retrieve the data for the version info resource
#
start_offset = self.get_offset_from_rva( version_struct.OffsetToData )
raw_data = self.__data__[ start_offset : start_offset+version_struct.Size ]
# Map the main structure and the subsequent string
#
versioninfo_struct = self.__unpack_data__(
self.__VS_VERSIONINFO_format__, raw_data,
file_offset = start_offset )
if versioninfo_struct is None:
return
ustr_offset = version_struct.OffsetToData + versioninfo_struct.sizeof()
try:
versioninfo_string = self.get_string_u_at_rva( ustr_offset )
except PEFormatError, excp:
self.__warnings.append(
'Error parsing the version information, ' +
'attempting to read VS_VERSION_INFO string. Can\'t ' +
'read unicode string at offset 0x%x' % (
ustr_offset ) )
versioninfo_string = None
# If the structure does not contain the expected name, it's assumed to be invalid
#
if versioninfo_string != u'VS_VERSION_INFO':
self.__warnings.append('Invalid VS_VERSION_INFO block')
return
# Set the PE object's VS_VERSIONINFO to this one
#
self.VS_VERSIONINFO = versioninfo_struct
# The the Key attribute to point to the unicode string identifying the structure
#
self.VS_VERSIONINFO.Key = versioninfo_string
# Process the fixed version information, get the offset and structure
#
fixedfileinfo_offset = self.dword_align(
versioninfo_struct.sizeof() + 2 * (len(versioninfo_string) + 1),
version_struct.OffsetToData)
fixedfileinfo_struct = self.__unpack_data__(
self.__VS_FIXEDFILEINFO_format__,
raw_data[fixedfileinfo_offset:],
file_offset = start_offset+fixedfileinfo_offset )
if not fixedfileinfo_struct:
return
# Set the PE object's VS_FIXEDFILEINFO to this one
#
self.VS_FIXEDFILEINFO = fixedfileinfo_struct
# Start parsing all the StringFileInfo and VarFileInfo structures
#
# Get the first one
#
stringfileinfo_offset = self.dword_align(
fixedfileinfo_offset + fixedfileinfo_struct.sizeof(),
version_struct.OffsetToData)
original_stringfileinfo_offset = stringfileinfo_offset
# Set the PE object's attribute that will contain them all.
#
self.FileInfo = list()
while True:
# Process the StringFileInfo/VarFileInfo struct
#
stringfileinfo_struct = self.__unpack_data__(
self.__StringFileInfo_format__,
raw_data[stringfileinfo_offset:],
file_offset = start_offset+stringfileinfo_offset )
if stringfileinfo_struct is None:
self.__warnings.append(
'Error parsing StringFileInfo/VarFileInfo struct' )
return None
# Get the subsequent string defining the structure.
#
ustr_offset = ( version_struct.OffsetToData +
stringfileinfo_offset + versioninfo_struct.sizeof() )
try:
stringfileinfo_string = self.get_string_u_at_rva( ustr_offset )
except PEFormatError, excp:
self.__warnings.append(
'Error parsing the version information, ' +
'attempting to read StringFileInfo string. Can\'t ' +
'read unicode string at offset 0x%x' % ( ustr_offset ) )
break
# Set such string as the Key attribute
#
stringfileinfo_struct.Key = stringfileinfo_string
# Append the structure to the PE object's list
#
self.FileInfo.append(stringfileinfo_struct)
# Parse a StringFileInfo entry
#
if stringfileinfo_string == u'StringFileInfo':
if stringfileinfo_struct.Type == 1 and stringfileinfo_struct.ValueLength == 0:
stringtable_offset = self.dword_align(
stringfileinfo_offset + stringfileinfo_struct.sizeof() +
2*(len(stringfileinfo_string)+1),
version_struct.OffsetToData)
stringfileinfo_struct.StringTable = list()
# Process the String Table entries
#
while True:
stringtable_struct = self.__unpack_data__(
self.__StringTable_format__,
raw_data[stringtable_offset:],
file_offset = start_offset+stringtable_offset )
if not stringtable_struct:
break
ustr_offset = ( version_struct.OffsetToData + stringtable_offset +
stringtable_struct.sizeof() )
try:
stringtable_string = self.get_string_u_at_rva( ustr_offset )
except PEFormatError, excp:
self.__warnings.append(
'Error parsing the version information, ' +
'attempting to read StringTable string. Can\'t ' +
'read unicode string at offset 0x%x' % ( ustr_offset ) )
break
stringtable_struct.LangID = stringtable_string
stringtable_struct.entries = dict()
stringtable_struct.entries_offsets = dict()
stringtable_struct.entries_lengths = dict()
stringfileinfo_struct.StringTable.append(stringtable_struct)
entry_offset = self.dword_align(
stringtable_offset + stringtable_struct.sizeof() +
2*(len(stringtable_string)+1),
version_struct.OffsetToData)
# Process all entries in the string table
#
while entry_offset < stringtable_offset + stringtable_struct.Length:
string_struct = self.__unpack_data__(
self.__String_format__, raw_data[entry_offset:],
file_offset = start_offset+entry_offset )
if not string_struct:
break
ustr_offset = ( version_struct.OffsetToData + entry_offset +
string_struct.sizeof() )
try:
key = self.get_string_u_at_rva( ustr_offset )
key_offset = self.get_offset_from_rva( ustr_offset )
except PEFormatError, excp:
self.__warnings.append(
'Error parsing the version information, ' +
'attempting to read StringTable Key string. Can\'t ' +
'read unicode string at offset 0x%x' % ( ustr_offset ) )
break
value_offset = self.dword_align(
2*(len(key)+1) + entry_offset + string_struct.sizeof(),
version_struct.OffsetToData)
ustr_offset = version_struct.OffsetToData + value_offset
try:
value = self.get_string_u_at_rva( ustr_offset,
max_length = string_struct.ValueLength )
value_offset = self.get_offset_from_rva( ustr_offset )
except PEFormatError, excp:
self.__warnings.append(
'Error parsing the version information, ' +
'attempting to read StringTable Value string. ' +
'Can\'t read unicode string at offset 0x%x' % (
ustr_offset ) )
break
if string_struct.Length == 0:
entry_offset = stringtable_offset + stringtable_struct.Length
else:
entry_offset = self.dword_align(
string_struct.Length+entry_offset, version_struct.OffsetToData)
key_as_char = []
for c in key:
if ord(c)>128:
key_as_char.append('\\x%02x' %ord(c))
else:
key_as_char.append(c)
key_as_char = ''.join(key_as_char)
setattr(stringtable_struct, key_as_char, value)
stringtable_struct.entries[key] = value
stringtable_struct.entries_offsets[key] = (key_offset, value_offset)
stringtable_struct.entries_lengths[key] = (len(key), len(value))
stringtable_offset = self.dword_align(
stringtable_struct.Length + stringtable_offset,
version_struct.OffsetToData)
if stringtable_offset >= stringfileinfo_struct.Length:
break
# Parse a VarFileInfo entry
#
elif stringfileinfo_string == u'VarFileInfo':
varfileinfo_struct = stringfileinfo_struct
varfileinfo_struct.name = 'VarFileInfo'
if varfileinfo_struct.Type == 1 and varfileinfo_struct.ValueLength == 0:
var_offset = self.dword_align(
stringfileinfo_offset + varfileinfo_struct.sizeof() +
2*(len(stringfileinfo_string)+1),
version_struct.OffsetToData)
varfileinfo_struct.Var = list()
# Process all entries
#
while True:
var_struct = self.__unpack_data__(
self.__Var_format__,
raw_data[var_offset:],
file_offset = start_offset+var_offset )
if not var_struct:
break
ustr_offset = ( version_struct.OffsetToData + var_offset +
var_struct.sizeof() )
try:
var_string = self.get_string_u_at_rva( ustr_offset )
except PEFormatError, excp:
self.__warnings.append(
'Error parsing the version information, ' +
'attempting to read VarFileInfo Var string. ' +
'Can\'t read unicode string at offset 0x%x' % (ustr_offset))
break
varfileinfo_struct.Var.append(var_struct)
varword_offset = self.dword_align(
2*(len(var_string)+1) + var_offset + var_struct.sizeof(),
version_struct.OffsetToData)
orig_varword_offset = varword_offset
while varword_offset < orig_varword_offset + var_struct.ValueLength:
word1 = self.get_word_from_data(
raw_data[varword_offset:varword_offset+2], 0)
word2 = self.get_word_from_data(
raw_data[varword_offset+2:varword_offset+4], 0)
varword_offset += 4
var_struct.entry = {var_string: '0x%04x 0x%04x' % (word1, word2)}
var_offset = self.dword_align(
var_offset+var_struct.Length, version_struct.OffsetToData)
if var_offset <= var_offset+var_struct.Length:
break
# Increment and align the offset
#
stringfileinfo_offset = self.dword_align(
stringfileinfo_struct.Length+stringfileinfo_offset,
version_struct.OffsetToData)
# Check if all the StringFileInfo and VarFileInfo items have been processed
#
if stringfileinfo_struct.Length == 0 or stringfileinfo_offset >= versioninfo_struct.Length:
break
def parse_export_directory(self, rva, size):
"""Parse the export directory.
Given the rva of the export directory, it will process all
its entries.
The exports will be made available through a list "exports"
containing a tuple with the following elements:
(ordinal, symbol_address, symbol_name)
And also through a dicionary "exports_by_ordinal" whose keys
will be the ordinals and the values tuples of the from:
(symbol_address, symbol_name)
The symbol addresses are relative, not absolute.
"""
try:
export_dir = self.__unpack_data__(
self.__IMAGE_EXPORT_DIRECTORY_format__, self.get_data(rva),
file_offset = self.get_offset_from_rva(rva) )
except PEFormatError:
self.__warnings.append(
'Error parsing export directory at RVA: 0x%x' % ( rva ) )
return
if not export_dir:
return
try:
address_of_names = self.get_data(
export_dir.AddressOfNames, export_dir.NumberOfNames*4)
address_of_name_ordinals = self.get_data(
export_dir.AddressOfNameOrdinals, export_dir.NumberOfNames*4)
address_of_functions = self.get_data(
export_dir.AddressOfFunctions, export_dir.NumberOfFunctions*4)
except PEFormatError:
self.__warnings.append(
'Error parsing export directory at RVA: 0x%x' % ( rva ) )
return
exports = []
for i in xrange(export_dir.NumberOfNames):
symbol_name = self.get_string_at_rva(
self.get_dword_from_data(address_of_names, i))
symbol_ordinal = self.get_word_from_data(
address_of_name_ordinals, i)
if symbol_ordinal*4<len(address_of_functions):
symbol_address = self.get_dword_from_data(
address_of_functions, symbol_ordinal)
else:
# Corrupt? a bad pointer... we assume it's all
# useless, no exports
return None
# If the funcion's rva points within the export directory
# it will point to a string with the forwarded symbol's string
# instead of pointing the the function start address.
if symbol_address>=rva and symbol_address<rva+size:
forwarder_str = self.get_string_at_rva(symbol_address)
else:
forwarder_str = None
exports.append(
ExportData(
ordinal = export_dir.Base+symbol_ordinal,
address = symbol_address,
name = symbol_name,
forwarder = forwarder_str))
ordinals = [exp.ordinal for exp in exports]
for idx in xrange(export_dir.NumberOfFunctions):
if not idx+export_dir.Base in ordinals:
symbol_address = self.get_dword_from_data(
address_of_functions,
idx)
#
# Checking for forwarder again.
#
if symbol_address>=rva and symbol_address<rva+size:
forwarder_str = self.get_string_at_rva(symbol_address)
else:
forwarder_str = None
exports.append(
ExportData(
ordinal = export_dir.Base+idx,
address = symbol_address,
name = None,
forwarder = forwarder_str))
return ExportDirData(
struct = export_dir,
symbols = exports)
def dword_align(self, offset, base):
offset += base
return (offset+3) - ((offset+3)%4) - base
def parse_delay_import_directory(self, rva, size):
"""Walk and parse the delay import directory."""
import_descs = []
while True:
try:
# If the RVA is invalid all would blow up. Some PEs seem to be
# specially nasty and have an invalid RVA.
data = self.get_data(rva)
except PEFormatError, e:
self.__warnings.append(
'Error parsing the Delay import directory at RVA: 0x%x' % ( rva ) )
break
import_desc = self.__unpack_data__(
self.__IMAGE_DELAY_IMPORT_DESCRIPTOR_format__,
data, file_offset = self.get_offset_from_rva(rva) )
# If the structure is all zeores, we reached the end of the list
if not import_desc or import_desc.all_zeroes():
break
rva += import_desc.sizeof()
try:
import_data = self.parse_imports(
import_desc.pINT,
import_desc.pIAT,
None)
except PEFormatError, e:
self.__warnings.append(
'Error parsing the Delay import directory. ' +
'Invalid import data at RVA: 0x%x' % ( rva ) )
break
if not import_data:
continue
dll = self.get_string_at_rva(import_desc.szName)
if dll:
import_descs.append(
ImportDescData(
struct = import_desc,
imports = import_data,
dll = dll))
return import_descs
def parse_import_directory(self, rva, size):
"""Walk and parse the import directory."""
import_descs = []
while True:
try:
# If the RVA is invalid all would blow up. Some EXEs seem to be
# specially nasty and have an invalid RVA.
data = self.get_data(rva)
except PEFormatError, e:
self.__warnings.append(
'Error parsing the Import directory at RVA: 0x%x' % ( rva ) )
break
import_desc = self.__unpack_data__(
self.__IMAGE_IMPORT_DESCRIPTOR_format__,
data, file_offset = self.get_offset_from_rva(rva) )
# If the structure is all zeores, we reached the end of the list
if not import_desc or import_desc.all_zeroes():
break
rva += import_desc.sizeof()
try:
import_data = self.parse_imports(
import_desc.OriginalFirstThunk,
import_desc.FirstThunk,
import_desc.ForwarderChain)
except PEFormatError, excp:
self.__warnings.append(
'Error parsing the Import directory. ' +
'Invalid Import data at RVA: 0x%x' % ( rva ) )
break
#raise excp
if not import_data:
continue
dll = self.get_string_at_rva(import_desc.Name)
if dll:
import_descs.append(
ImportDescData(
struct = import_desc,
imports = import_data,
dll = dll))
return import_descs
def parse_imports(self, original_first_thunk, first_thunk, forwarder_chain):
"""Parse the imported symbols.
It will fill a list, which will be avalable as the dictionary
attribute "imports". Its keys will be the DLL names and the values
all the symbols imported from that object.
"""
imported_symbols = []
imports_section = self.get_section_by_rva(first_thunk)
if not imports_section:
raise PEFormatError, 'Invalid/corrupt imports.'
# Import Lookup Table. Contains ordinals or pointers to strings.
ilt = self.get_import_table(original_first_thunk)
# Import Address Table. May have identical content to ILT if
# PE file is not bounded, Will contain the address of the
# imported symbols once the binary is loaded or if it is already
# bound.
iat = self.get_import_table(first_thunk)
# OC Patch:
# Would crash if iat or ilt had None type
if not iat and not ilt:
raise PEFormatError(
'Invalid Import Table information. ' +
'Both ILT and IAT appear to be broken.')
if not iat and ilt:
table = ilt
elif iat and not ilt:
table = iat
elif ilt and ((len(ilt) and len(iat)==0) or (len(ilt) == len(iat))):
table = ilt
elif (ilt and len(ilt))==0 and (iat and len(iat)):
table = iat
else:
return None
for idx in xrange(len(table)):
imp_ord = None
imp_hint = None
imp_name = None
hint_name_table_rva = None
if table[idx].AddressOfData:
if self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE:
ordinal_flag = IMAGE_ORDINAL_FLAG
elif self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE_PLUS:
ordinal_flag = IMAGE_ORDINAL_FLAG64
# If imported by ordinal, we will append the ordinal number
#
if table[idx].AddressOfData & ordinal_flag:
import_by_ordinal = True
imp_ord = table[idx].AddressOfData & 0xffff
imp_name = None
else:
import_by_ordinal = False
try:
hint_name_table_rva = table[idx].AddressOfData & 0x7fffffff
data = self.get_data(hint_name_table_rva, 2)
# Get the Hint
imp_hint = self.get_word_from_data(data, 0)
imp_name = self.get_string_at_rva(table[idx].AddressOfData+2)
except PEFormatError, e:
pass
imp_address = first_thunk+self.OPTIONAL_HEADER.ImageBase+idx*4
if iat and ilt and ilt[idx].AddressOfData != iat[idx].AddressOfData:
imp_bound = iat[idx].AddressOfData
else:
imp_bound = None
if imp_name != '' and (imp_ord or imp_name):
imported_symbols.append(
ImportData(
import_by_ordinal = import_by_ordinal,
ordinal = imp_ord,
hint = imp_hint,
name = imp_name,
bound = imp_bound,
address = imp_address,
hint_name_table_rva = hint_name_table_rva))
return imported_symbols
def get_import_table(self, rva):
table = []
while True and rva:
try:
data = self.get_data(rva)
except PEFormatError, e:
self.__warnings.append(
'Error parsing the import table. ' +
'Invalid data at RVA: 0x%x' % ( rva ) )
return None
if self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE:
format = self.__IMAGE_THUNK_DATA_format__
elif self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE_PLUS:
format = self.__IMAGE_THUNK_DATA64_format__
thunk_data = self.__unpack_data__(
format, data, file_offset=self.get_offset_from_rva(rva) )
if not thunk_data or thunk_data.all_zeroes():
break
rva += thunk_data.sizeof()
table.append(thunk_data)
return table
def get_memory_mapped_image(self, max_virtual_address=0x10000000, ImageBase=None):
"""Returns the data corresponding to the memory layout of the PE file.
The data includes the PE header and the sections loaded at offsets
corresponding to their relative virtual addresses. (the VirtualAddress
section header member).
Any offset in this data corresponds to the absolute memory address
ImageBase+offset.
The optional argument 'max_virtual_address' provides with means of limiting
which section are processed.
Any section with their VirtualAddress beyond this value will be skipped.
Normally, sections with values beyond this range are just there to confuse
tools. It's a common trick to see in packed executables.
If the 'ImageBase' optional argument is supplied, the file's relocations
will be applied to the image by calling the 'relocate_image()' method.
"""
# Collect all sections in one code block
data = self.header
for section in self.sections:
# Miscellanous integrity tests.
# Some packer will set these to bogus values to
# make tools go nuts.
#
if section.Misc_VirtualSize == 0 or section.SizeOfRawData == 0:
continue
if section.SizeOfRawData > len(self.__data__):
continue
if section.PointerToRawData > len(self.__data__):
continue
if section.VirtualAddress >= max_virtual_address:
continue
padding_length = section.VirtualAddress - len(data)
if padding_length>0:
data += '\0'*padding_length
elif padding_length<0:
data = data[:padding_length]
data += section.data
return data
def get_data(self, rva, length=None):
"""Get data regardless of the section where it lies on.
Given a rva and the size of the chunk to retrieve, this method
will find the section where the data lies and return the data.
"""
s = self.get_section_by_rva(rva)
if not s:
if rva<len(self.header):
if length:
end = rva+length
else:
end = None
return self.header[rva:end]
raise PEFormatError, 'data at RVA can\'t be fetched. Corrupt header?'
return s.get_data(rva, length)
def get_rva_from_offset(self, offset):
"""Get the rva corresponding to this file offset. """
s = self.get_section_by_offset(offset)
if not s:
raise PEFormatError("specified offset (0x%x) doesn't belong to any section." % offset)
return s.get_rva_from_offset(offset)
def get_offset_from_rva(self, rva):
"""Get the file offset corresponding to this rva.
Given a rva , this method will find the section where the
data lies and return the offset within the file.
"""
s = self.get_section_by_rva(rva)
if not s:
raise PEFormatError, 'data at RVA can\'t be fetched. Corrupt header?'
return s.get_offset_from_rva(rva)
def get_string_at_rva(self, rva):
"""Get an ASCII string located at the given address."""
s = self.get_section_by_rva(rva)
if not s:
if rva<len(self.header):
return self.get_string_from_data(rva, self.header)
return None
return self.get_string_from_data(rva-s.VirtualAddress, s.data)
def get_string_from_data(self, offset, data):
"""Get an ASCII string from within the data."""
# OC Patch
b = None
try:
b = data[offset]
except IndexError:
return ''
s = ''
while ord(b):
s += b
offset += 1
try:
b = data[offset]
except IndexError:
break
return s
def get_string_u_at_rva(self, rva, max_length = 2**16):
"""Get an Unicode string located at the given address."""
try:
# If the RVA is invalid all would blow up. Some EXEs seem to be
# specially nasty and have an invalid RVA.
data = self.get_data(rva, 2)
except PEFormatError, e:
return None
#length = struct.unpack('<H', data)[0]
s = u''
for idx in xrange(max_length):
try:
uchr = struct.unpack('<H', self.get_data(rva+2*idx, 2))[0]
except struct.error:
break
if unichr(uchr) == u'\0':
break
s += unichr(uchr)
return s
def get_section_by_offset(self, offset):
"""Get the section containing the given file offset."""
sections = [s for s in self.sections if s.contains_offset(offset)]
if sections:
return sections[0]
return None
def get_section_by_rva(self, rva):
"""Get the section containing the given address."""
sections = [s for s in self.sections if s.contains_rva(rva)]
if sections:
return sections[0]
return None
def __str__(self):
return self.dump_info()
def print_info(self):
"""Print all the PE header information in a human readable from."""
print self.dump_info()
def dump_info(self, dump=None):
"""Dump all the PE header information into human readable string."""
if dump is None:
dump = Dump()
warnings = self.get_warnings()
if warnings:
dump.add_header('Parsing Warnings')
for warning in warnings:
dump.add_line(warning)
dump.add_newline()
dump.add_header('DOS_HEADER')
dump.add_lines(self.DOS_HEADER.dump())
dump.add_newline()
dump.add_header('NT_HEADERS')
dump.add_lines(self.NT_HEADERS.dump())
dump.add_newline()
dump.add_header('FILE_HEADER')
dump.add_lines(self.FILE_HEADER.dump())
image_flags = self.retrieve_flags(IMAGE_CHARACTERISTICS, 'IMAGE_FILE_')
dump.add('Flags: ')
flags = []
for flag in image_flags:
if getattr(self.FILE_HEADER, flag[0]):
flags.append(flag[0])
dump.add_line(', '.join(flags))
dump.add_newline()
if hasattr(self, 'OPTIONAL_HEADER') and self.OPTIONAL_HEADER is not None:
dump.add_header('OPTIONAL_HEADER')
dump.add_lines(self.OPTIONAL_HEADER.dump())
dll_characteristics_flags = self.retrieve_flags(DLL_CHARACTERISTICS, 'IMAGE_DLL_CHARACTERISTICS_')
dump.add('DllCharacteristics: ')
flags = []
for flag in dll_characteristics_flags:
if getattr(self.OPTIONAL_HEADER, flag[0]):
flags.append(flag[0])
dump.add_line(', '.join(flags))
dump.add_newline()
dump.add_header('PE Sections')
section_flags = self.retrieve_flags(SECTION_CHARACTERISTICS, 'IMAGE_SCN_')
for section in self.sections:
dump.add_lines(section.dump())
dump.add('Flags: ')
flags = []
for flag in section_flags:
if getattr(section, flag[0]):
flags.append(flag[0])
dump.add_line(', '.join(flags))
dump.add_line('Entropy: %f (Min=0.0, Max=8.0)' % section.get_entropy() )
if md5 is not None:
dump.add_line('MD5 hash: %s' % section.get_hash_md5() )
if sha1 is not None:
dump.add_line('SHA-1 hash: %s' % section.get_hash_sha1() )
if sha256 is not None:
dump.add_line('SHA-256 hash: %s' % section.get_hash_sha256() )
if sha512 is not None:
dump.add_line('SHA-512 hash: %s' % section.get_hash_sha512() )
dump.add_newline()
if (hasattr(self, 'OPTIONAL_HEADER') and
hasattr(self.OPTIONAL_HEADER, 'DATA_DIRECTORY') ):
dump.add_header('Directories')
for idx in xrange(len(self.OPTIONAL_HEADER.DATA_DIRECTORY)):
directory = self.OPTIONAL_HEADER.DATA_DIRECTORY[idx]
dump.add_lines(directory.dump())
dump.add_newline()
if hasattr(self, 'VS_VERSIONINFO'):
dump.add_header('Version Information')
dump.add_lines(self.VS_VERSIONINFO.dump())
dump.add_newline()
if hasattr(self, 'VS_FIXEDFILEINFO'):
dump.add_lines(self.VS_FIXEDFILEINFO.dump())
dump.add_newline()
if hasattr(self, 'FileInfo'):
for entry in self.FileInfo:
dump.add_lines(entry.dump())
dump.add_newline()
if hasattr(entry, 'StringTable'):
for st_entry in entry.StringTable:
[dump.add_line(' '+line) for line in st_entry.dump()]
dump.add_line(' LangID: '+st_entry.LangID)
dump.add_newline()
for str_entry in st_entry.entries.items():
dump.add_line(' '+str_entry[0]+': '+str_entry[1])
dump.add_newline()
elif hasattr(entry, 'Var'):
for var_entry in entry.Var:
if hasattr(var_entry, 'entry'):
[dump.add_line(' '+line) for line in var_entry.dump()]
dump.add_line(
' ' + var_entry.entry.keys()[0] +
': ' + var_entry.entry.values()[0])
dump.add_newline()
if hasattr(self, 'DIRECTORY_ENTRY_EXPORT'):
dump.add_header('Exported symbols')
dump.add_lines(self.DIRECTORY_ENTRY_EXPORT.struct.dump())
dump.add_newline()
dump.add_line('%-10s %-10s %s' % ('Ordinal', 'RVA', 'Name'))
for export in self.DIRECTORY_ENTRY_EXPORT.symbols:
dump.add('%-10d 0x%08Xh %s' % (
export.ordinal, export.address, export.name))
if export.forwarder:
dump.add_line(' forwarder: %s' % export.forwarder)
else:
dump.add_newline()
dump.add_newline()
if hasattr(self, 'DIRECTORY_ENTRY_IMPORT'):
dump.add_header('Imported symbols')
for module in self.DIRECTORY_ENTRY_IMPORT:
dump.add_lines(module.struct.dump())
dump.add_newline()
for symbol in module.imports:
if symbol.import_by_ordinal is True:
dump.add('%s Ordinal[%s] (Imported by Ordinal)' % (
module.dll, str(symbol.ordinal)))
else:
dump.add('%s.%s Hint[%s]' % (
module.dll, symbol.name, str(symbol.hint)))
if symbol.bound:
dump.add_line(' Bound: 0x%08X' % (symbol.bound))
else:
dump.add_newline()
dump.add_newline()
if hasattr(self, 'DIRECTORY_ENTRY_BOUND_IMPORT'):
dump.add_header('Bound imports')
for bound_imp_desc in self.DIRECTORY_ENTRY_BOUND_IMPORT:
dump.add_lines(bound_imp_desc.struct.dump())
dump.add_line('DLL: %s' % bound_imp_desc.name)
dump.add_newline()
for bound_imp_ref in bound_imp_desc.entries:
dump.add_lines(bound_imp_ref.struct.dump(), 4)
dump.add_line('DLL: %s' % bound_imp_ref.name, 4)
dump.add_newline()
if hasattr(self, 'DIRECTORY_ENTRY_DELAY_IMPORT'):
dump.add_header('Delay Imported symbols')
for module in self.DIRECTORY_ENTRY_DELAY_IMPORT:
dump.add_lines(module.struct.dump())
dump.add_newline()
for symbol in module.imports:
if symbol.import_by_ordinal is True:
dump.add('%s Ordinal[%s] (Imported by Ordinal)' % (
module.dll, str(symbol.ordinal)))
else:
dump.add('%s.%s Hint[%s]' % (
module.dll, symbol.name, str(symbol.hint)))
if symbol.bound:
dump.add_line(' Bound: 0x%08X' % (symbol.bound))
else:
dump.add_newline()
dump.add_newline()
if hasattr(self, 'DIRECTORY_ENTRY_RESOURCE'):
dump.add_header('Resource directory')
dump.add_lines(self.DIRECTORY_ENTRY_RESOURCE.struct.dump())
for resource_type in self.DIRECTORY_ENTRY_RESOURCE.entries:
if resource_type.name is not None:
dump.add_line('Name: [%s]' % resource_type.name, 2)
else:
dump.add_line('Id: [0x%X] (%s)' % (
resource_type.struct.Id, RESOURCE_TYPE.get(
resource_type.struct.Id, '-')),
2)
dump.add_lines(resource_type.struct.dump(), 2)
if hasattr(resource_type, 'directory'):
dump.add_lines(resource_type.directory.struct.dump(), 4)
for resource_id in resource_type.directory.entries:
if resource_id.name is not None:
dump.add_line('Name: [%s]' % resource_id.name, 6)
else:
dump.add_line('Id: [0x%X]' % resource_id.struct.Id, 6)
dump.add_lines(resource_id.struct.dump(), 6)
if hasattr(resource_id, 'directory'):
dump.add_lines(resource_id.directory.struct.dump(), 8)
for resource_lang in resource_id.directory.entries:
# dump.add_line('\\--- LANG [%d,%d][%s]' % (
# resource_lang.data.lang,
# resource_lang.data.sublang,
# LANG[resource_lang.data.lang]), 8)
dump.add_lines(resource_lang.struct.dump(), 10)
dump.add_lines(resource_lang.data.struct.dump(), 12)
dump.add_newline()
dump.add_newline()
if ( hasattr(self, 'DIRECTORY_ENTRY_TLS') and
self.DIRECTORY_ENTRY_TLS and
self.DIRECTORY_ENTRY_TLS.struct ):
dump.add_header('TLS')
dump.add_lines(self.DIRECTORY_ENTRY_TLS.struct.dump())
dump.add_newline()
if hasattr(self, 'DIRECTORY_ENTRY_DEBUG'):
dump.add_header('Debug information')
for dbg in self.DIRECTORY_ENTRY_DEBUG:
dump.add_lines(dbg.struct.dump())
try:
dump.add_line('Type: '+DEBUG_TYPE[dbg.struct.Type])
except KeyError:
dump.add_line('Type: 0x%x(Unknown)' % dbg.struct.Type)
dump.add_newline()
if hasattr(self, 'DIRECTORY_ENTRY_BASERELOC'):
dump.add_header('Base relocations')
for base_reloc in self.DIRECTORY_ENTRY_BASERELOC:
dump.add_lines(base_reloc.struct.dump())
for reloc in base_reloc.entries:
try:
dump.add_line('%08Xh %s' % (
reloc.rva, RELOCATION_TYPE[reloc.type][16:]), 4)
except KeyError:
dump.add_line('0x%08X 0x%x(Unknown)' % (
reloc.rva, reloc.type), 4)
dump.add_newline()
return dump.get_text()
# OC Patch
def get_physical_by_rva(self, rva):
"""Gets the physical address in the PE file from an RVA value."""
try:
return self.get_offset_from_rva(rva)
except Exception:
return None
##
# Double-Word get/set
##
def get_data_from_dword(self, dword):
"""Return a four byte string representing the double word value. (little endian)."""
return struct.pack('<L', dword)
def get_dword_from_data(self, data, offset):
"""Convert four bytes of data to a double word (little endian)
'offset' is assumed to index into a dword array. So setting it to
N will return a dword out of the data sarting at offset N*4.
Returns None if the data can't be turned into a double word.
"""
if (offset+1)*4 > len(data):
return None
return struct.unpack('<L', data[offset*4:(offset+1)*4])[0]
def get_dword_at_rva(self, rva):
"""Return the double word value at the given RVA.
Returns None if the value can't be read, i.e. the RVA can't be mapped
to a file offset.
"""
try:
return self.get_dword_from_data(self.get_data(rva)[:4], 0)
except PEFormatError:
return None
def get_dword_from_offset(self, offset):
"""Return the double word value at the given file offset. (little endian)"""
if offset+4 > len(self.__data__):
return None
return self.get_dword_from_data(self.__data__[offset:offset+4], 0)
def set_dword_at_rva(self, rva, dword):
"""Set the double word value at the file offset corresponding to the given RVA."""
return self.set_bytes_at_rva(rva, self.get_data_from_dword(dword))
def set_dword_at_offset(self, offset, dword):
"""Set the double word value at the given file offset."""
return self.set_bytes_at_offset(offset, self.get_data_from_dword(dword))
##
# Word get/set
##
def get_data_from_word(self, word):
"""Return a two byte string representing the word value. (little endian)."""
return struct.pack('<H', word)
def get_word_from_data(self, data, offset):
"""Convert two bytes of data to a word (little endian)
'offset' is assumed to index into a word array. So setting it to
N will return a dword out of the data sarting at offset N*2.
Returns None if the data can't be turned into a word.
"""
if (offset+1)*2 > len(data):
return None
return struct.unpack('<H', data[offset*2:(offset+1)*2])[0]
def get_word_at_rva(self, rva):
"""Return the word value at the given RVA.
Returns None if the value can't be read, i.e. the RVA can't be mapped
to a file offset.
"""
try:
return self.get_word_from_data(self.get_data(rva)[:2], 0)
except PEFormatError:
return None
def get_word_from_offset(self, offset):
"""Return the word value at the given file offset. (little endian)"""
if offset+2 > len(self.__data__):
return None
return self.get_word_from_data(self.__data__[offset:offset+2], 0)
def set_word_at_rva(self, rva, word):
"""Set the word value at the file offset corresponding to the given RVA."""
return self.set_bytes_at_rva(rva, self.get_data_from_word(word))
def set_word_at_offset(self, offset, word):
"""Set the word value at the given file offset."""
return self.set_bytes_at_offset(offset, self.get_data_from_word(word))
##
# Quad-Word get/set
##
def get_data_from_qword(self, word):
"""Return a eight byte string representing the quad-word value. (little endian)."""
return struct.pack('<Q', word)
def get_qword_from_data(self, data, offset):
"""Convert eight bytes of data to a word (little endian)
'offset' is assumed to index into a word array. So setting it to
N will return a dword out of the data sarting at offset N*8.
Returns None if the data can't be turned into a quad word.
"""
if (offset+1)*8 > len(data):
return None
return struct.unpack('<Q', data[offset*8:(offset+1)*8])[0]
def get_qword_at_rva(self, rva):
"""Return the quad-word value at the given RVA.
Returns None if the value can't be read, i.e. the RVA can't be mapped
to a file offset.
"""
try:
return self.get_qword_from_data(self.get_data(rva)[:8], 0)
except PEFormatError:
return None
def get_qword_from_offset(self, offset):
"""Return the quad-word value at the given file offset. (little endian)"""
if offset+8 > len(self.__data__):
return None
return self.get_qword_from_data(self.__data__[offset:offset+8], 0)
def set_qword_at_rva(self, rva, qword):
"""Set the quad-word value at the file offset corresponding to the given RVA."""
return self.set_bytes_at_rva(rva, self.get_data_from_qword(qword))
def set_qword_at_offset(self, offset, qword):
"""Set the quad-word value at the given file offset."""
return self.set_bytes_at_offset(offset, self.get_data_from_qword(qword))
##
# Set bytes
##
def set_bytes_at_rva(self, rva, data):
"""Overwrite, with the given string, the bytes at the file offset corresponding to the given RVA.
Return True if successful, False otherwise. It can fail if the
offset is outside the file's boundaries.
"""
offset = self.get_physical_by_rva(rva)
if not offset:
raise False
return self.set_bytes_at_offset(offset, data)
def set_bytes_at_offset(self, offset, data):
"""Overwrite the bytes at the given file offset with the given string.
Return True if successful, False otherwise. It can fail if the
offset is outside the file's boundaries.
"""
if not isinstance(data, str):
raise TypeError('data should be of type: str')
if offset >= 0 and offset < len(self.__data__):
self.__data__ = ( self.__data__[:offset] +
data +
self.__data__[offset+len(data):] )
else:
return False
# Refresh the section's data with the modified information
#
for section in self.sections:
section_data_start = section.PointerToRawData
section_data_end = section_data_start+section.SizeOfRawData
section.data = self.__data__[section_data_start:section_data_end]
return True
def relocate_image(self, new_ImageBase):
"""Apply the relocation information to the image using the provided new image base.
This method will apply the relocation information to the image. Given the new base,
all the relocations will be processed and both the raw data and the section's data
will be fixed accordingly.
The resulting image can be retrieved as well through the method:
get_memory_mapped_image()
In order to get something that would more closely match what could be found in memory
once the Windows loader finished its work.
"""
relocation_difference = new_ImageBase - self.OPTIONAL_HEADER.ImageBase
for reloc in self.DIRECTORY_ENTRY_BASERELOC:
virtual_address = reloc.struct.VirtualAddress
size_of_block = reloc.struct.SizeOfBlock
# We iterate with an index because if the relocation is of type
# IMAGE_REL_BASED_HIGHADJ we need to also process the next entry
# at once and skip it for the next interation
#
entry_idx = 0
while entry_idx<len(reloc.entries):
entry = reloc.entries[entry_idx]
entry_idx += 1
if entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_ABSOLUTE']:
# Nothing to do for this type of relocation
pass
elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_HIGH']:
# Fix the high 16bits of a relocation
#
# Add high 16bits of relocation_difference to the
# 16bit value at RVA=entry.rva
self.set_word_at_rva(
entry.rva,
( self.get_word_at_rva(entry.rva) + relocation_difference>>16)&0xffff )
elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_LOW']:
# Fix the low 16bits of a relocation
#
# Add low 16 bits of relocation_difference to the 16bit value
# at RVA=entry.rva
self.set_word_at_rva(
entry.rva,
( self.get_word_at_rva(entry.rva) + relocation_difference)&0xffff)
elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_HIGHLOW']:
# Handle all high and low parts of a 32bit relocation
#
# Add relocation_difference to the value at RVA=entry.rva
self.set_dword_at_rva(
entry.rva,
self.get_dword_at_rva(entry.rva)+relocation_difference)
elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_HIGHADJ']:
# Fix the high 16bits of a relocation and adjust
#
# Add high 16bits of relocation_difference to the 32bit value
# composed from the (16bit value at RVA=entry.rva)<<16 plus
# the 16bit value at the next relocation entry.
#
# If the next entry is beyond the array's limits,
# abort... the table is corrupt
#
if entry_idx == len(reloc.entries):
break
next_entry = reloc.entries[entry_idx]
entry_idx += 1
self.set_word_at_rva( entry.rva,
((self.get_word_at_rva(entry.rva)<<16) + next_entry.rva +
relocation_difference & 0xffff0000) >> 16 )
elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_DIR64']:
# Apply the difference to the 64bit value at the offset
# RVA=entry.rva
self.set_qword_at_rva(
entry.rva,
self.get_qword_at_rva(entry.rva) + relocation_difference)
def verify_checksum(self):
return self.OPTIONAL_HEADER.CheckSum == self.generate_checksum()
def generate_checksum(self):
# Get the offset to the CheckSum field in the OptionalHeader
#
checksum_offset = self.OPTIONAL_HEADER.__file_offset__ + 0x40 # 64
checksum = 0
for i in range( len(self.__data__) / 4 ):
# Skip the checksum field
#
if i == checksum_offset / 4:
continue
dword = struct.unpack('L', self.__data__[ i*4 : i*4+4 ])[0]
checksum = (checksum & 0xffffffff) + dword + (checksum>>32)
if checksum > 2**32:
checksum = (checksum & 0xffffffff) + (checksum >> 32)
checksum = (checksum & 0xffff) + (checksum >> 16)
checksum = (checksum) + (checksum >> 16)
checksum = checksum & 0xffff
return checksum + len(self.__data__)
| bsd-3-clause |
hacklab-fi/asylum | project/examples/handlers.py | 2 | 9747 | # -*- coding: utf-8 -*-
import calendar
import datetime
import logging
import environ
import holviapi
from creditor.handlers import BaseRecurringTransactionsHandler, BaseTransactionHandler
from creditor.models import RecurringTransaction, Transaction, TransactionTag
from django.core.mail import EmailMessage
from django.utils.translation import ugettext_lazy as _
from holviapp.utils import api_configured, get_invoiceapi
from members.handlers import BaseApplicationHandler, BaseMemberHandler
logger = logging.getLogger('example.handlers')
class ExampleBaseHandler(BaseMemberHandler):
def on_saving(self, instance, *args, **kwargs):
msg = "on_saving called for %s %s" % (type(instance), instance)
logger.info(msg)
print(msg)
def on_saved(self, instance, *args, **kwargs):
msg = "on_saved called for %s %s" % (type(instance), instance)
logger.info(msg)
print(msg)
class MemberHandler(ExampleBaseHandler):
pass
class ApplicationHandler(ExampleBaseHandler):
def on_approving(self, application, member):
msg = "on_approving called for %s" % application
logger.info(msg)
print(msg)
def on_approved(self, application, member):
msg = "on_approved called for %s" % application
logger.info(msg)
print(msg)
# Auto-add the membership fee as recurring transaction
membership_fee = env.float('MEMBEREXAMPLE_MEMBERSHIP_FEE', default=None)
membership_tag = env.int('MEMBEREXAMPLE_MEMBERSHIP_TAG_PK', default=None)
if membership_fee and membership_tag:
from creditor.models import RecurringTransaction, TransactionTag
rt = RecurringTransaction()
rt.tag = TransactionTag.objects.get(pk=membership_tag)
rt.owner = member
rt.amount = -membership_fee
rt.rtype = RecurringTransaction.YEARLY
# If application was received in Q4 set the recurringtransaction to start from next year
if application.received.month >= 10:
rt.start = datetime.date(year=application.received.year + 1, month=1, day=1)
rt.save()
rt.conditional_add_transaction()
mailman_subscribe = env('MEMBEREXAMPLE_MAILMAN_SUBSCRIBE', default=None)
if mailman_subscribe:
mail = EmailMessage()
mail.from_email = member.email
mail.to = [mailman_subscribe, ]
mail.subject = 'subscribe'
mail.body = 'subscribe'
mail.send()
mail = EmailMessage()
mail.to = [member.email, ]
mail.body = """Your membership has been approved, your member id is #%d""" % member.member_id
mail.send()
class TransactionHandler(BaseTransactionHandler):
def __init__(self, *args, **kwargs):
# We have to do this late to avoid problems with circular imports
from members.models import Member
self.memberclass = Member
self.try_methods = [
self.import_generic_transaction,
self.import_holvi_transaction,
self.import_tmatch_transaction,
]
super().__init__(*args, **kwargs)
def import_transaction(self, at):
msg = "import_transaction called for %s" % at
logger.info(msg)
print(msg)
# We only care about transactions with reference numbers
if not at.reference:
msg = "No reference number for %s, skip" % at
logger.info(msg)
print(msg)
return None
# If local transaction exists, return as-is
lt = at.get_local()
if lt.pk:
msg = "Found local transaction #%d with unique_id=%s" % (lt.pk, lt.unique_id)
logger.info(msg)
print(msg)
return lt
# We have few importers to try
for m in self.try_methods:
new_lt = m(at, lt)
if new_lt is not None:
return new_lt
# Nothing worked, return None
return None
def import_holvi_transaction(self, at, lt):
"""Try to find suitable owner and tag based on the invoice/order info"""
if not at.email:
# Not from holvi, those always have email
msg = "No email set, cannot be from holvi"
logger.info(msg)
print(msg)
return None
try:
lt.owner = self.memberclass.objects.get(email=at.email)
except self.memberclass.DoesNotExist:
msg = "No member with email %s" % at.email
logger.info(msg)
print(msg)
return None
try:
lt.tag = TransactionTag.objects.get(holvi_code=at.holvi_invoice.items[0].category.code)
except (AttributeError, TransactionTag.DoesNotExist) as e:
msg = "Got %s when trying to look for at.invoice.items[0].category.code" % repr(e)
logger.info(msg)
print(msg)
try:
# Triggers loading of the full product object so we can get the category, can be remove when https://github.com/rambo/python-holviapi/issues/14 is fixed
tmp = at.holvi_order.purchases[0].product.name
lt.tag = TransactionTag.objects.get(holvi_code=at.holvi_order.purchases[0].product.category.code)
except (AttributeError, TransactionTag.DoesNotExist) as e:
msg = "Got %s when trying to look for at.order.purchases[0].product.category.code" % repr(e)
logger.info(msg)
print(msg)
return None
lt.save()
return lt
def import_generic_transaction(self, at, lt):
"""Look for a transaction with same reference but oppsite value. If found use that for owner and tag"""
qs = Transaction.objects.filter(reference=at.reference, amount=-at.amount).order_by('-stamp')
if not qs.count():
return None
base = qs[0]
msg = "Found opposite transaction %s" % base
logger.info(msg)
print(msg)
lt.tag = base.tag
lt.owner = base.owner
lt.save()
return lt
def import_tmatch_transaction(self, at, lt):
if len(at.reference) < 2: # To avoid indexerrors
return None
if at.reference[0:2] == "RF": # ISO references, our lookup won't work with them, even worse: there will be exceptions
return None
# In this example the last meaningful number (last number is checksum) of the reference is used to recognize the TransactionTag
try:
lt.tag = TransactionTag.objects.get(tmatch=at.reference[-2])
except TransactionTag.DoesNotExist:
msg = "No TransactionTag with tmatch=%s" % at.reference[-2]
logger.info(msg)
print(msg)
# No tag matched, skip...
return None
# In this example the second number and up to the tag identifier in the reference is the member number, it might have zero prefix
try:
lt.owner = self.memberclass.objects.get(member_id=int(at.reference[1:-2], 10))
except self.memberclass.DoesNotExist:
msg = "No Member with member_id=%d" % int(at.reference[1:-2], 10)
logger.info(msg)
print(msg)
# No member matched, skip...
return None
# Rest of the fields are directly mapped already by get_local()
lt.save()
return lt
def __str__(self):
return str(_("Example application transactions handler"))
class RecurringTransactionsHolviHandler(BaseRecurringTransactionsHandler):
def on_creating(self, rt, t, *args, **kwargs):
import holviapi
import holviapi.utils
msg = "on_creating called for %s (from %s)" % (t, rt)
logger.info(msg)
print(msg)
# Only care about negative amounts
if t.amount >= 0.0:
return True
# If holvi is configured, make invoice
if api_configured():
return self.create_holvi_invoice(rt, t)
# otherwise make reference number that matches the tmatch logic above
t.reference = holviapi.utils.int2fin_reference(int("1%03d%s" % (rt.owner.member_id, rt.tag.tmatch)))
return True
def create_holvi_invoice(self, rt, t):
if t.stamp:
year = t.stamp.year
month = t.stamp.month
else:
now = datetime.datetime.now()
year = now.year
month = now.month
invoice = holviapi.Invoice(get_invoiceapi())
invoice.receiver = holviapi.InvoiceContact({
'email': t.owner.email,
'name': t.owner.name,
})
invoice.items.append(holviapi.InvoiceItem(invoice))
if rt.rtype == RecurringTransaction.YEARLY:
invoice.items[0].description = "%s %d" % (t.tag.label, year)
else:
invoice.items[0].description = "%s %02d/%d" % (t.tag.label, month, year)
invoice.items[0].net = -t.amount # Negative amount transaction -> positive amount invoice
if t.tag.holvi_code:
invoice.items[0].category = holviapi.IncomeCategory(invoice.api.categories_api, {'code': t.tag.holvi_code}) # Lazy-loading category, avoids a GET
invoice.subject = "%s / %s" % (invoice.items[0].description, invoice.receiver.name)
invoice = invoice.save()
invoice.send()
print("Created (and sent) Holvi invoice %s" % invoice.code)
t.reference = invoice.rf_reference
return True
def on_created(self, rt, t, *args, **kwargs):
msg = "on_created called for %s (from %s)" % (t, rt)
logger.info(msg)
print(msg)
| mit |
MobinRanjbar/hue | desktop/core/ext-py/tablib-0.10.0/tablib/packages/odf3/manifest.py | 56 | 1389 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2006-2007 Søren Roug, European Environment Agency
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
#
from .namespaces import MANIFESTNS
from .element import Element
# Autogenerated
def Manifest(**args):
return Element(qname = (MANIFESTNS,'manifest'), **args)
def FileEntry(**args):
return Element(qname = (MANIFESTNS,'file-entry'), **args)
def EncryptionData(**args):
return Element(qname = (MANIFESTNS,'encryption-data'), **args)
def Algorithm(**args):
return Element(qname = (MANIFESTNS,'algorithm'), **args)
def KeyDerivation(**args):
return Element(qname = (MANIFESTNS,'key-derivation'), **args)
| apache-2.0 |
dsprenkels/servo | tests/wpt/css-tests/tools/pywebsocket/src/test/test_endtoend.py | 449 | 26811 | #!/usr/bin/env python
#
# Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""End-to-end tests for pywebsocket. Tests standalone.py by default. You
can also test mod_pywebsocket hosted on an Apache server by setting
_use_external_server to True and modifying _external_server_port to point to
the port on which the Apache server is running.
"""
import logging
import os
import signal
import socket
import subprocess
import sys
import time
import unittest
import set_sys_path # Update sys.path to locate mod_pywebsocket module.
from test import client_for_testing
from test import mux_client_for_testing
# Special message that tells the echo server to start closing handshake
_GOODBYE_MESSAGE = 'Goodbye'
_SERVER_WARMUP_IN_SEC = 0.2
# If you want to use external server to run end to end tests, set following
# parameters correctly.
_use_external_server = False
_external_server_port = 0
# Test body functions
def _echo_check_procedure(client):
client.connect()
client.send_message('test')
client.assert_receive('test')
client.send_message('helloworld')
client.assert_receive('helloworld')
client.send_close()
client.assert_receive_close()
client.assert_connection_closed()
def _echo_check_procedure_with_binary(client):
client.connect()
client.send_message('binary', binary=True)
client.assert_receive('binary', binary=True)
client.send_message('\x00\x80\xfe\xff\x00\x80', binary=True)
client.assert_receive('\x00\x80\xfe\xff\x00\x80', binary=True)
client.send_close()
client.assert_receive_close()
client.assert_connection_closed()
def _echo_check_procedure_with_goodbye(client):
client.connect()
client.send_message('test')
client.assert_receive('test')
client.send_message(_GOODBYE_MESSAGE)
client.assert_receive(_GOODBYE_MESSAGE)
client.assert_receive_close()
client.send_close()
client.assert_connection_closed()
def _echo_check_procedure_with_code_and_reason(client, code, reason):
client.connect()
client.send_close(code, reason)
client.assert_receive_close(code, reason)
client.assert_connection_closed()
def _unmasked_frame_check_procedure(client):
client.connect()
client.send_message('test', mask=False)
client.assert_receive_close(client_for_testing.STATUS_PROTOCOL_ERROR, '')
client.assert_connection_closed()
def _mux_echo_check_procedure(mux_client):
mux_client.connect()
mux_client.send_flow_control(1, 1024)
logical_channel_options = client_for_testing.ClientOptions()
logical_channel_options.server_host = 'localhost'
logical_channel_options.server_port = 80
logical_channel_options.origin = 'http://localhost'
logical_channel_options.resource = '/echo'
mux_client.add_channel(2, logical_channel_options)
mux_client.send_flow_control(2, 1024)
mux_client.send_message(2, 'test')
mux_client.assert_receive(2, 'test')
mux_client.add_channel(3, logical_channel_options)
mux_client.send_flow_control(3, 1024)
mux_client.send_message(2, 'hello')
mux_client.send_message(3, 'world')
mux_client.assert_receive(2, 'hello')
mux_client.assert_receive(3, 'world')
# Don't send close message on channel id 1 so that server-initiated
# closing handshake won't occur.
mux_client.send_close(2)
mux_client.send_close(3)
mux_client.assert_receive_close(2)
mux_client.assert_receive_close(3)
mux_client.send_physical_connection_close()
mux_client.assert_physical_connection_receive_close()
class EndToEndTestBase(unittest.TestCase):
"""Base class for end-to-end tests that launch pywebsocket standalone
server as a separate process, connect to it using the client_for_testing
module, and check if the server behaves correctly by exchanging opening
handshake and frames over a TCP connection.
"""
def setUp(self):
self.server_stderr = None
self.top_dir = os.path.join(os.path.split(__file__)[0], '..')
os.putenv('PYTHONPATH', os.path.pathsep.join(sys.path))
self.standalone_command = os.path.join(
self.top_dir, 'mod_pywebsocket', 'standalone.py')
self.document_root = os.path.join(self.top_dir, 'example')
s = socket.socket()
s.bind(('localhost', 0))
(_, self.test_port) = s.getsockname()
s.close()
self._options = client_for_testing.ClientOptions()
self._options.server_host = 'localhost'
self._options.origin = 'http://localhost'
self._options.resource = '/echo'
# TODO(toyoshim): Eliminate launching a standalone server on using
# external server.
if _use_external_server:
self._options.server_port = _external_server_port
else:
self._options.server_port = self.test_port
# TODO(tyoshino): Use tearDown to kill the server.
def _run_python_command(self, commandline, stdout=None, stderr=None):
return subprocess.Popen([sys.executable] + commandline, close_fds=True,
stdout=stdout, stderr=stderr)
def _run_server(self):
args = [self.standalone_command,
'-H', 'localhost',
'-V', 'localhost',
'-p', str(self.test_port),
'-P', str(self.test_port),
'-d', self.document_root]
# Inherit the level set to the root logger by test runner.
root_logger = logging.getLogger()
log_level = root_logger.getEffectiveLevel()
if log_level != logging.NOTSET:
args.append('--log-level')
args.append(logging.getLevelName(log_level).lower())
return self._run_python_command(args,
stderr=self.server_stderr)
def _kill_process(self, pid):
if sys.platform in ('win32', 'cygwin'):
subprocess.call(
('taskkill.exe', '/f', '/pid', str(pid)), close_fds=True)
else:
os.kill(pid, signal.SIGKILL)
class EndToEndHyBiTest(EndToEndTestBase):
def setUp(self):
EndToEndTestBase.setUp(self)
def _run_test_with_client_options(self, test_function, options):
server = self._run_server()
try:
# TODO(tyoshino): add some logic to poll the server until it
# becomes ready
time.sleep(_SERVER_WARMUP_IN_SEC)
client = client_for_testing.create_client(options)
try:
test_function(client)
finally:
client.close_socket()
finally:
self._kill_process(server.pid)
def _run_test(self, test_function):
self._run_test_with_client_options(test_function, self._options)
def _run_deflate_frame_test(self, test_function):
server = self._run_server()
try:
time.sleep(_SERVER_WARMUP_IN_SEC)
self._options.enable_deflate_frame()
client = client_for_testing.create_client(self._options)
try:
test_function(client)
finally:
client.close_socket()
finally:
self._kill_process(server.pid)
def _run_permessage_deflate_test(
self, offer, response_checker, test_function):
server = self._run_server()
try:
time.sleep(_SERVER_WARMUP_IN_SEC)
self._options.extensions += offer
self._options.check_permessage_deflate = response_checker
client = client_for_testing.create_client(self._options)
try:
client.connect()
if test_function is not None:
test_function(client)
client.assert_connection_closed()
finally:
client.close_socket()
finally:
self._kill_process(server.pid)
def _run_close_with_code_and_reason_test(self, test_function, code,
reason):
server = self._run_server()
try:
time.sleep(_SERVER_WARMUP_IN_SEC)
client = client_for_testing.create_client(self._options)
try:
test_function(client, code, reason)
finally:
client.close_socket()
finally:
self._kill_process(server.pid)
def _run_http_fallback_test(self, options, status):
server = self._run_server()
try:
time.sleep(_SERVER_WARMUP_IN_SEC)
client = client_for_testing.create_client(options)
try:
client.connect()
self.fail('Could not catch HttpStatusException')
except client_for_testing.HttpStatusException, e:
self.assertEqual(status, e.status)
except Exception, e:
self.fail('Catch unexpected exception')
finally:
client.close_socket()
finally:
self._kill_process(server.pid)
def _run_mux_test(self, test_function):
server = self._run_server()
try:
time.sleep(_SERVER_WARMUP_IN_SEC)
client = mux_client_for_testing.MuxClient(self._options)
try:
test_function(client)
finally:
client.close_socket()
finally:
self._kill_process(server.pid)
def test_echo(self):
self._run_test(_echo_check_procedure)
def test_echo_binary(self):
self._run_test(_echo_check_procedure_with_binary)
def test_echo_server_close(self):
self._run_test(_echo_check_procedure_with_goodbye)
def test_unmasked_frame(self):
self._run_test(_unmasked_frame_check_procedure)
def test_echo_deflate_frame(self):
self._run_deflate_frame_test(_echo_check_procedure)
def test_echo_deflate_frame_server_close(self):
self._run_deflate_frame_test(
_echo_check_procedure_with_goodbye)
def test_echo_permessage_deflate(self):
def test_function(client):
# From the examples in the spec.
compressed_hello = '\xf2\x48\xcd\xc9\xc9\x07\x00'
client._stream.send_data(
compressed_hello,
client_for_testing.OPCODE_TEXT,
rsv1=1)
client._stream.assert_receive_binary(
compressed_hello,
opcode=client_for_testing.OPCODE_TEXT,
rsv1=1)
client.send_close()
client.assert_receive_close()
def response_checker(parameter):
self.assertEquals('permessage-deflate', parameter.name())
self.assertEquals([], parameter.get_parameters())
self._run_permessage_deflate_test(
['permessage-deflate'],
response_checker,
test_function)
def test_echo_permessage_deflate_two_frames(self):
def test_function(client):
# From the examples in the spec.
client._stream.send_data(
'\xf2\x48\xcd',
client_for_testing.OPCODE_TEXT,
end=False,
rsv1=1)
client._stream.send_data(
'\xc9\xc9\x07\x00',
client_for_testing.OPCODE_TEXT)
client._stream.assert_receive_binary(
'\xf2\x48\xcd\xc9\xc9\x07\x00',
opcode=client_for_testing.OPCODE_TEXT,
rsv1=1)
client.send_close()
client.assert_receive_close()
def response_checker(parameter):
self.assertEquals('permessage-deflate', parameter.name())
self.assertEquals([], parameter.get_parameters())
self._run_permessage_deflate_test(
['permessage-deflate'],
response_checker,
test_function)
def test_echo_permessage_deflate_two_messages(self):
def test_function(client):
# From the examples in the spec.
client._stream.send_data(
'\xf2\x48\xcd\xc9\xc9\x07\x00',
client_for_testing.OPCODE_TEXT,
rsv1=1)
client._stream.send_data(
'\xf2\x00\x11\x00\x00',
client_for_testing.OPCODE_TEXT,
rsv1=1)
client._stream.assert_receive_binary(
'\xf2\x48\xcd\xc9\xc9\x07\x00',
opcode=client_for_testing.OPCODE_TEXT,
rsv1=1)
client._stream.assert_receive_binary(
'\xf2\x00\x11\x00\x00',
opcode=client_for_testing.OPCODE_TEXT,
rsv1=1)
client.send_close()
client.assert_receive_close()
def response_checker(parameter):
self.assertEquals('permessage-deflate', parameter.name())
self.assertEquals([], parameter.get_parameters())
self._run_permessage_deflate_test(
['permessage-deflate'],
response_checker,
test_function)
def test_echo_permessage_deflate_two_msgs_server_no_context_takeover(self):
def test_function(client):
# From the examples in the spec.
client._stream.send_data(
'\xf2\x48\xcd\xc9\xc9\x07\x00',
client_for_testing.OPCODE_TEXT,
rsv1=1)
client._stream.send_data(
'\xf2\x00\x11\x00\x00',
client_for_testing.OPCODE_TEXT,
rsv1=1)
client._stream.assert_receive_binary(
'\xf2\x48\xcd\xc9\xc9\x07\x00',
opcode=client_for_testing.OPCODE_TEXT,
rsv1=1)
client._stream.assert_receive_binary(
'\xf2\x48\xcd\xc9\xc9\x07\x00',
opcode=client_for_testing.OPCODE_TEXT,
rsv1=1)
client.send_close()
client.assert_receive_close()
def response_checker(parameter):
self.assertEquals('permessage-deflate', parameter.name())
self.assertEquals([('server_no_context_takeover', None)],
parameter.get_parameters())
self._run_permessage_deflate_test(
['permessage-deflate; server_no_context_takeover'],
response_checker,
test_function)
def test_echo_permessage_deflate_preference(self):
def test_function(client):
# From the examples in the spec.
compressed_hello = '\xf2\x48\xcd\xc9\xc9\x07\x00'
client._stream.send_data(
compressed_hello,
client_for_testing.OPCODE_TEXT,
rsv1=1)
client._stream.assert_receive_binary(
compressed_hello,
opcode=client_for_testing.OPCODE_TEXT,
rsv1=1)
client.send_close()
client.assert_receive_close()
def response_checker(parameter):
self.assertEquals('permessage-deflate', parameter.name())
self.assertEquals([], parameter.get_parameters())
self._run_permessage_deflate_test(
['permessage-deflate', 'deflate-frame'],
response_checker,
test_function)
def test_echo_permessage_deflate_with_parameters(self):
def test_function(client):
# From the examples in the spec.
compressed_hello = '\xf2\x48\xcd\xc9\xc9\x07\x00'
client._stream.send_data(
compressed_hello,
client_for_testing.OPCODE_TEXT,
rsv1=1)
client._stream.assert_receive_binary(
compressed_hello,
opcode=client_for_testing.OPCODE_TEXT,
rsv1=1)
client.send_close()
client.assert_receive_close()
def response_checker(parameter):
self.assertEquals('permessage-deflate', parameter.name())
self.assertEquals([('server_max_window_bits', '10'),
('server_no_context_takeover', None)],
parameter.get_parameters())
self._run_permessage_deflate_test(
['permessage-deflate; server_max_window_bits=10; '
'server_no_context_takeover'],
response_checker,
test_function)
def test_echo_permessage_deflate_with_bad_server_max_window_bits(self):
def test_function(client):
client.send_close()
client.assert_receive_close()
def response_checker(parameter):
raise Exception('Unexpected acceptance of permessage-deflate')
self._run_permessage_deflate_test(
['permessage-deflate; server_max_window_bits=3000000'],
response_checker,
test_function)
def test_echo_permessage_deflate_with_bad_server_max_window_bits(self):
def test_function(client):
client.send_close()
client.assert_receive_close()
def response_checker(parameter):
raise Exception('Unexpected acceptance of permessage-deflate')
self._run_permessage_deflate_test(
['permessage-deflate; server_max_window_bits=3000000'],
response_checker,
test_function)
def test_echo_permessage_deflate_with_undefined_parameter(self):
def test_function(client):
client.send_close()
client.assert_receive_close()
def response_checker(parameter):
raise Exception('Unexpected acceptance of permessage-deflate')
self._run_permessage_deflate_test(
['permessage-deflate; foo=bar'],
response_checker,
test_function)
def test_echo_close_with_code_and_reason(self):
self._options.resource = '/close'
self._run_close_with_code_and_reason_test(
_echo_check_procedure_with_code_and_reason, 3333, 'sunsunsunsun')
def test_echo_close_with_empty_body(self):
self._options.resource = '/close'
self._run_close_with_code_and_reason_test(
_echo_check_procedure_with_code_and_reason, None, '')
def test_mux_echo(self):
self._run_mux_test(_mux_echo_check_procedure)
def test_close_on_protocol_error(self):
"""Tests that the server sends a close frame with protocol error status
code when the client sends data with some protocol error.
"""
def test_function(client):
client.connect()
# Intermediate frame without any preceding start of fragmentation
# frame.
client.send_frame_of_arbitrary_bytes('\x80\x80', '')
client.assert_receive_close(
client_for_testing.STATUS_PROTOCOL_ERROR)
self._run_test(test_function)
def test_close_on_unsupported_frame(self):
"""Tests that the server sends a close frame with unsupported operation
status code when the client sends data asking some operation that is
not supported by the server.
"""
def test_function(client):
client.connect()
# Text frame with RSV3 bit raised.
client.send_frame_of_arbitrary_bytes('\x91\x80', '')
client.assert_receive_close(
client_for_testing.STATUS_UNSUPPORTED_DATA)
self._run_test(test_function)
def test_close_on_invalid_frame(self):
"""Tests that the server sends a close frame with invalid frame payload
data status code when the client sends an invalid frame like containing
invalid UTF-8 character.
"""
def test_function(client):
client.connect()
# Text frame with invalid UTF-8 string.
client.send_message('\x80', raw=True)
client.assert_receive_close(
client_for_testing.STATUS_INVALID_FRAME_PAYLOAD_DATA)
self._run_test(test_function)
def test_close_on_internal_endpoint_error(self):
"""Tests that the server sends a close frame with internal endpoint
error status code when the handler does bad operation.
"""
self._options.resource = '/internal_error'
def test_function(client):
client.connect()
client.assert_receive_close(
client_for_testing.STATUS_INTERNAL_ENDPOINT_ERROR)
self._run_test(test_function)
# TODO(toyoshim): Add tests to verify invalid absolute uri handling like
# host unmatch, port unmatch and invalid port description (':' without port
# number).
def test_absolute_uri(self):
"""Tests absolute uri request."""
options = self._options
options.resource = 'ws://localhost:%d/echo' % options.server_port
self._run_test_with_client_options(_echo_check_procedure, options)
def test_origin_check(self):
"""Tests http fallback on origin check fail."""
options = self._options
options.resource = '/origin_check'
# Server shows warning message for http 403 fallback. This warning
# message is confusing. Following pipe disposes warning messages.
self.server_stderr = subprocess.PIPE
self._run_http_fallback_test(options, 403)
def test_version_check(self):
"""Tests http fallback on version check fail."""
options = self._options
options.version = 99
self._run_http_fallback_test(options, 400)
class EndToEndHyBi00Test(EndToEndTestBase):
def setUp(self):
EndToEndTestBase.setUp(self)
def _run_test(self, test_function):
server = self._run_server()
try:
time.sleep(_SERVER_WARMUP_IN_SEC)
client = client_for_testing.create_client_hybi00(self._options)
try:
test_function(client)
finally:
client.close_socket()
finally:
self._kill_process(server.pid)
def test_echo(self):
self._run_test(_echo_check_procedure)
def test_echo_server_close(self):
self._run_test(_echo_check_procedure_with_goodbye)
class EndToEndTestWithEchoClient(EndToEndTestBase):
def setUp(self):
EndToEndTestBase.setUp(self)
def _check_example_echo_client_result(
self, expected, stdoutdata, stderrdata):
actual = stdoutdata.decode("utf-8")
if actual != expected:
raise Exception('Unexpected result on example echo client: '
'%r (expected) vs %r (actual)' %
(expected, actual))
if stderrdata is not None:
raise Exception('Unexpected error message on example echo '
'client: %r' % stderrdata)
def test_example_echo_client(self):
"""Tests that the echo_client.py example can talk with the server."""
server = self._run_server()
try:
time.sleep(_SERVER_WARMUP_IN_SEC)
client_command = os.path.join(
self.top_dir, 'example', 'echo_client.py')
# Expected output for the default messages.
default_expectation = ('Send: Hello\n' 'Recv: Hello\n'
u'Send: \u65e5\u672c\n' u'Recv: \u65e5\u672c\n'
'Send close\n' 'Recv ack\n')
args = [client_command,
'-p', str(self._options.server_port)]
client = self._run_python_command(args, stdout=subprocess.PIPE)
stdoutdata, stderrdata = client.communicate()
self._check_example_echo_client_result(
default_expectation, stdoutdata, stderrdata)
# Process a big message for which extended payload length is used.
# To handle extended payload length, ws_version attribute will be
# accessed. This test checks that ws_version is correctly set.
big_message = 'a' * 1024
args = [client_command,
'-p', str(self._options.server_port),
'-m', big_message]
client = self._run_python_command(args, stdout=subprocess.PIPE)
stdoutdata, stderrdata = client.communicate()
expected = ('Send: %s\nRecv: %s\nSend close\nRecv ack\n' %
(big_message, big_message))
self._check_example_echo_client_result(
expected, stdoutdata, stderrdata)
# Test the permessage-deflate extension.
args = [client_command,
'-p', str(self._options.server_port),
'--use_permessage_deflate']
client = self._run_python_command(args, stdout=subprocess.PIPE)
stdoutdata, stderrdata = client.communicate()
self._check_example_echo_client_result(
default_expectation, stdoutdata, stderrdata)
finally:
self._kill_process(server.pid)
if __name__ == '__main__':
unittest.main()
# vi:sts=4 sw=4 et
| mpl-2.0 |
Joergen/zamboni | mkt/versions/api.py | 1 | 3297 | from rest_framework import mixins, serializers, viewsets
from rest_framework.exceptions import ParseError
import amo
from mkt.api.authorization import AllowAppOwner, AllowReviewerReadOnly
from mkt.api.base import CompatRelatedField
from mkt.constants import APP_FEATURES
from mkt.features.api import AppFeaturesSerializer
from versions.models import Version
class VersionSerializer(serializers.ModelSerializer):
addon = CompatRelatedField(view_name='api_dispatch_detail', read_only=True,
tastypie={'resource_name': 'app',
'api_name': 'apps'})
class Meta:
model = Version
fields = ('addon', '_developer_name', 'releasenotes', 'version')
depth = 0
field_rename = {
'_developer_name': 'developer_name',
'releasenotes': 'release_notes',
'addon': 'app'
}
def to_native(self, obj):
native = super(VersionSerializer, self).to_native(obj)
# Add non-field data to the response.
native.update({
'features': AppFeaturesSerializer().to_native(obj.features),
'is_current_version': obj.addon.current_version == obj,
'releasenotes': (unicode(obj.releasenotes) if obj.releasenotes else
None),
})
# Remap fields to friendlier, more backwards-compatible names.
for old, new in self.Meta.field_rename.items():
native[new] = native[old]
del native[old]
return native
class VersionViewSet(mixins.RetrieveModelMixin, mixins.UpdateModelMixin,
viewsets.GenericViewSet):
queryset = Version.objects.filter(
addon__type=amo.ADDON_WEBAPP).exclude(addon__status=amo.STATUS_DELETED)
serializer_class = VersionSerializer
authorization_classes = []
permission_classes = []
def update(self, request, *args, **kwargs):
"""
Allow a version's features to be updated.
"""
obj = self.get_object()
# Deny access to users who are not owners of this app.
is_owner = AllowAppOwner().has_object_permission(request, self,
obj.addon)
is_reviewer = AllowReviewerReadOnly().is_authorized(request)
if not is_owner or not is_reviewer:
self.permission_denied(request)
# Update features if they are provided.
if 'features' in request.DATA:
# Raise an exception if any invalid features are passed.
invalid = [f for f in request.DATA['features'] if f.upper() not in
APP_FEATURES.keys()]
if any(invalid):
raise ParseError('Invalid feature(s): %s' % ', '.join(invalid))
# Update the value of each feature (note: a feature not present in
# the form data is assumed to be False)
data = {}
for key, name in APP_FEATURES.items():
field_name = 'has_' + key.lower()
data[field_name] = key.lower() in request.DATA['features']
obj.features.update(**data)
del request.DATA['features']
return super(VersionViewSet, self).update(request, *args, **kwargs)
| bsd-3-clause |
gudcjfdldu/volatility | volatility/plugins/sockets.py | 45 | 2743 | # Volatility
#
# Authors:
# Mike Auty <mike.auty@gmail.com>
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
#pylint: disable-msg=C0111
import volatility.plugins.common as common
import volatility.debug as debug
import volatility.win32 as win32
import volatility.utils as utils
import volatility.protos as protos
class Sockets(common.AbstractWindowsCommand):
"""Print list of open sockets"""
def __init__(self, config, *args, **kwargs):
common.AbstractWindowsCommand.__init__(self, config, *args, **kwargs)
config.add_option("PHYSICAL-OFFSET", short_option = 'P', default = False,
cache_invalidator = False,
help = "Physical Offset", action = "store_true")
@staticmethod
def is_valid_profile(profile):
return (profile.metadata.get('os', 'unknown') == 'windows' and
profile.metadata.get('major', 0) == 5)
def render_text(self, outfd, data):
offsettype = "(V)" if not self._config.PHYSICAL_OFFSET else "(P)"
self.table_header(outfd,
[("Offset{0}".format(offsettype), "[addrpad]"),
("PID", ">8"),
("Port", ">6"),
("Proto", ">6"),
("Protocol", "15"),
("Address", "15"),
("Create Time", "")
])
for sock in data:
if not self._config.PHYSICAL_OFFSET:
offset = sock.obj_offset
else:
offset = sock.obj_vm.vtop(sock.obj_offset)
self.table_row(outfd, offset, sock.Pid, sock.LocalPort, sock.Protocol,
protos.protos.get(sock.Protocol.v(), "-"),
sock.LocalIpAddress, sock.CreateTime)
def calculate(self):
addr_space = utils.load_as(self._config)
if not self.is_valid_profile(addr_space.profile):
debug.error("This command does not support the selected profile.")
return win32.network.determine_sockets(addr_space)
| gpl-2.0 |
jackjansen/cerbero | cerbero/packages/packager.py | 21 | 2219 | # cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <ylatuya@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
from cerbero.config import Distro, Platform
from cerbero.errors import FatalError
from cerbero.utils import _
from cerbero.utils import messages as m
_packagers = {}
def register_packager(distro, klass, distro_version=None):
if not distro in _packagers:
_packagers[distro] = {}
_packagers[distro][distro_version] = klass
class Packager (object):
def __new__(klass, config, package, store):
d = config.target_distro
v = config.target_distro_version
if d not in _packagers:
raise FatalError(_("No packager available for the distro %s" % d))
if v not in _packagers[d]:
# Be tolerant with the distro version
m.warning(_("No specific packager available for the distro "
"version %s, using generic packager for distro %s" % (v, d)))
v = None
if (d == Distro.WINDOWS and config.platform == Platform.LINUX):
m.warning("Cross-compiling for Windows, overriding Packager")
d = Distro.NONE
return _packagers[d][v](config, package, store)
from cerbero.packages import wix_packager, rpm, debian, android, disttarball
from cerbero.packages.osx import packager as osx_packager
wix_packager.register()
osx_packager.register()
rpm.register()
debian.register()
android.register()
disttarball.register()
| lgpl-2.1 |
promptworks/horizon | openstack_dashboard/dashboards/project/database_backups/tables.py | 33 | 6058 | # Copyright 2013 Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.template import defaultfilters as d_filters
from django.utils.translation import pgettext_lazy
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import tables
from horizon.utils import filters
from openstack_dashboard import api
STATUS_CHOICES = (
("BUILDING", None),
("COMPLETED", True),
("DELETE_FAILED", False),
("FAILED", False),
("NEW", None),
("SAVING", None),
)
STATUS_DISPLAY_CHOICES = (
("BUILDING", pgettext_lazy("Current status of a Database Backup",
u"Building")),
("COMPLETED", pgettext_lazy("Current status of a Database Backup",
u"Completed")),
("DELETE_FAILED", pgettext_lazy("Current status of a Database Backup",
u"Delete Failed")),
("FAILED", pgettext_lazy("Current status of a Database Backup",
u"Failed")),
("NEW", pgettext_lazy("Current status of a Database Backup",
u"New")),
("SAVING", pgettext_lazy("Current status of a Database Backup",
u"Saving")),
)
class LaunchLink(tables.LinkAction):
name = "create"
verbose_name = _("Create Backup")
url = "horizon:project:database_backups:create"
classes = ("ajax-modal", "btn-create")
icon = "camera"
class RestoreLink(tables.LinkAction):
name = "restore"
verbose_name = _("Restore Backup")
url = "horizon:project:databases:launch"
classes = ("ajax-modal",)
icon = "cloud-upload"
def allowed(self, request, backup=None):
return backup.status == 'COMPLETED'
def get_link_url(self, datum):
url = reverse(self.url)
return url + '?backup=%s' % datum.id
class DownloadBackup(tables.LinkAction):
name = "download"
verbose_name = _("Download Backup")
url = 'horizon:project:containers:object_download'
classes = ("btn-download",)
def get_link_url(self, datum):
ref = datum.locationRef.split('/')
container_name = ref[5]
object_path = '/'.join(ref[6:])
return reverse(self.url,
kwargs={'container_name': container_name,
'object_path': object_path})
def allowed(self, request, datum):
return datum.status == 'COMPLETED'
class DeleteBackup(tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Backup",
u"Delete Backups",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted Backup",
u"Deleted Backups",
count
)
def delete(self, request, obj_id):
api.trove.backup_delete(request, obj_id)
class UpdateRow(tables.Row):
ajax = True
def get_data(self, request, backup_id):
backup = api.trove.backup_get(request, backup_id)
try:
backup.instance = api.trove.instance_get(request,
backup.instance_id)
except Exception:
pass
return backup
def db_link(obj):
if not hasattr(obj, 'instance'):
return
if hasattr(obj.instance, 'name'):
return reverse(
'horizon:project:databases:detail',
kwargs={'instance_id': obj.instance_id})
def db_name(obj):
if not hasattr(obj, 'instance') or not hasattr(obj.instance, 'name'):
return obj.instance_id
return obj.instance.name
def get_datastore(obj):
if hasattr(obj, "datastore"):
return obj.datastore["type"]
return _("Not available")
def get_datastore_version(obj):
if hasattr(obj, "datastore"):
return obj.datastore["version"]
return _("Not available")
def is_incremental(obj):
return hasattr(obj, 'parent_id') and obj.parent_id is not None
class BackupsTable(tables.DataTable):
name = tables.Column("name",
link="horizon:project:database_backups:detail",
verbose_name=_("Name"))
datastore = tables.Column(get_datastore,
verbose_name=_("Datastore"))
datastore_version = tables.Column(get_datastore_version,
verbose_name=_("Datastore Version"))
created = tables.Column("created", verbose_name=_("Created"),
filters=[filters.parse_isotime])
instance = tables.Column(db_name, link=db_link,
verbose_name=_("Database"))
incremental = tables.Column(is_incremental,
verbose_name=_("Incremental"),
filters=(d_filters.yesno,
d_filters.capfirst))
status = tables.Column("status",
verbose_name=_("Status"),
status=True,
status_choices=STATUS_CHOICES,
display_choices=STATUS_DISPLAY_CHOICES)
class Meta(object):
name = "backups"
verbose_name = _("Backups")
status_columns = ["status"]
row_class = UpdateRow
table_actions = (LaunchLink, DeleteBackup)
row_actions = (RestoreLink, DownloadBackup, DeleteBackup)
| apache-2.0 |
eul721/The-Perfect-Pokemon-Team-Balancer | libs/env/Lib/encodings/mac_croatian.py | 593 | 13889 | """ Python Character Mapping Codec mac_croatian generated from 'MAPPINGS/VENDORS/APPLE/CROATIAN.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-croatian',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> CONTROL CHARACTER
u'\x01' # 0x01 -> CONTROL CHARACTER
u'\x02' # 0x02 -> CONTROL CHARACTER
u'\x03' # 0x03 -> CONTROL CHARACTER
u'\x04' # 0x04 -> CONTROL CHARACTER
u'\x05' # 0x05 -> CONTROL CHARACTER
u'\x06' # 0x06 -> CONTROL CHARACTER
u'\x07' # 0x07 -> CONTROL CHARACTER
u'\x08' # 0x08 -> CONTROL CHARACTER
u'\t' # 0x09 -> CONTROL CHARACTER
u'\n' # 0x0A -> CONTROL CHARACTER
u'\x0b' # 0x0B -> CONTROL CHARACTER
u'\x0c' # 0x0C -> CONTROL CHARACTER
u'\r' # 0x0D -> CONTROL CHARACTER
u'\x0e' # 0x0E -> CONTROL CHARACTER
u'\x0f' # 0x0F -> CONTROL CHARACTER
u'\x10' # 0x10 -> CONTROL CHARACTER
u'\x11' # 0x11 -> CONTROL CHARACTER
u'\x12' # 0x12 -> CONTROL CHARACTER
u'\x13' # 0x13 -> CONTROL CHARACTER
u'\x14' # 0x14 -> CONTROL CHARACTER
u'\x15' # 0x15 -> CONTROL CHARACTER
u'\x16' # 0x16 -> CONTROL CHARACTER
u'\x17' # 0x17 -> CONTROL CHARACTER
u'\x18' # 0x18 -> CONTROL CHARACTER
u'\x19' # 0x19 -> CONTROL CHARACTER
u'\x1a' # 0x1A -> CONTROL CHARACTER
u'\x1b' # 0x1B -> CONTROL CHARACTER
u'\x1c' # 0x1C -> CONTROL CHARACTER
u'\x1d' # 0x1D -> CONTROL CHARACTER
u'\x1e' # 0x1E -> CONTROL CHARACTER
u'\x1f' # 0x1F -> CONTROL CHARACTER
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> CONTROL CHARACTER
u'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0x81 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc7' # 0x82 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xd1' # 0x84 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe3' # 0x8B -> LATIN SMALL LETTER A WITH TILDE
u'\xe5' # 0x8C -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
u'\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE
u'\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
u'\xec' # 0x93 -> LATIN SMALL LETTER I WITH GRAVE
u'\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xf1' # 0x96 -> LATIN SMALL LETTER N WITH TILDE
u'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf2' # 0x98 -> LATIN SMALL LETTER O WITH GRAVE
u'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE
u'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
u'\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE
u'\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u2020' # 0xA0 -> DAGGER
u'\xb0' # 0xA1 -> DEGREE SIGN
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa7' # 0xA4 -> SECTION SIGN
u'\u2022' # 0xA5 -> BULLET
u'\xb6' # 0xA6 -> PILCROW SIGN
u'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
u'\xae' # 0xA8 -> REGISTERED SIGN
u'\u0160' # 0xA9 -> LATIN CAPITAL LETTER S WITH CARON
u'\u2122' # 0xAA -> TRADE MARK SIGN
u'\xb4' # 0xAB -> ACUTE ACCENT
u'\xa8' # 0xAC -> DIAERESIS
u'\u2260' # 0xAD -> NOT EQUAL TO
u'\u017d' # 0xAE -> LATIN CAPITAL LETTER Z WITH CARON
u'\xd8' # 0xAF -> LATIN CAPITAL LETTER O WITH STROKE
u'\u221e' # 0xB0 -> INFINITY
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
u'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
u'\u2206' # 0xB4 -> INCREMENT
u'\xb5' # 0xB5 -> MICRO SIGN
u'\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL
u'\u2211' # 0xB7 -> N-ARY SUMMATION
u'\u220f' # 0xB8 -> N-ARY PRODUCT
u'\u0161' # 0xB9 -> LATIN SMALL LETTER S WITH CARON
u'\u222b' # 0xBA -> INTEGRAL
u'\xaa' # 0xBB -> FEMININE ORDINAL INDICATOR
u'\xba' # 0xBC -> MASCULINE ORDINAL INDICATOR
u'\u03a9' # 0xBD -> GREEK CAPITAL LETTER OMEGA
u'\u017e' # 0xBE -> LATIN SMALL LETTER Z WITH CARON
u'\xf8' # 0xBF -> LATIN SMALL LETTER O WITH STROKE
u'\xbf' # 0xC0 -> INVERTED QUESTION MARK
u'\xa1' # 0xC1 -> INVERTED EXCLAMATION MARK
u'\xac' # 0xC2 -> NOT SIGN
u'\u221a' # 0xC3 -> SQUARE ROOT
u'\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK
u'\u2248' # 0xC5 -> ALMOST EQUAL TO
u'\u0106' # 0xC6 -> LATIN CAPITAL LETTER C WITH ACUTE
u'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON
u'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
u'\xa0' # 0xCA -> NO-BREAK SPACE
u'\xc0' # 0xCB -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc3' # 0xCC -> LATIN CAPITAL LETTER A WITH TILDE
u'\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE
u'\u0152' # 0xCE -> LATIN CAPITAL LIGATURE OE
u'\u0153' # 0xCF -> LATIN SMALL LIGATURE OE
u'\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE
u'\u2014' # 0xD1 -> EM DASH
u'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
u'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
u'\xf7' # 0xD6 -> DIVISION SIGN
u'\u25ca' # 0xD7 -> LOZENGE
u'\uf8ff' # 0xD8 -> Apple logo
u'\xa9' # 0xD9 -> COPYRIGHT SIGN
u'\u2044' # 0xDA -> FRACTION SLASH
u'\u20ac' # 0xDB -> EURO SIGN
u'\u2039' # 0xDC -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
u'\u203a' # 0xDD -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
u'\xc6' # 0xDE -> LATIN CAPITAL LETTER AE
u'\xbb' # 0xDF -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2013' # 0xE0 -> EN DASH
u'\xb7' # 0xE1 -> MIDDLE DOT
u'\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK
u'\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2030' # 0xE4 -> PER MILLE SIGN
u'\xc2' # 0xE5 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\u0107' # 0xE6 -> LATIN SMALL LETTER C WITH ACUTE
u'\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON
u'\xc8' # 0xE9 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xEB -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xEC -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xcc' # 0xED -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE
u'\xd2' # 0xF1 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xF3 -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xd9' # 0xF4 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\u0131' # 0xF5 -> LATIN SMALL LETTER DOTLESS I
u'\u02c6' # 0xF6 -> MODIFIER LETTER CIRCUMFLEX ACCENT
u'\u02dc' # 0xF7 -> SMALL TILDE
u'\xaf' # 0xF8 -> MACRON
u'\u03c0' # 0xF9 -> GREEK SMALL LETTER PI
u'\xcb' # 0xFA -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\u02da' # 0xFB -> RING ABOVE
u'\xb8' # 0xFC -> CEDILLA
u'\xca' # 0xFD -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xe6' # 0xFE -> LATIN SMALL LETTER AE
u'\u02c7' # 0xFF -> CARON
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| gpl-2.0 |
salfab/CouchPotatoServer | libs/requests/packages/charade/sjisprober.py | 1182 | 3734 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import SJISDistributionAnalysis
from .jpcntx import SJISContextAnalysis
from .mbcssm import SJISSMModel
from . import constants
class SJISProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(SJISSMModel)
self._mDistributionAnalyzer = SJISDistributionAnalysis()
self._mContextAnalyzer = SJISContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return "SHIFT_JIS"
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar[2 - charLen:],
charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i + 1 - charLen:i + 3
- charLen], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mContextAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
| gpl-3.0 |
ngpestelos/ansible | lib/ansible/plugins/callback/mail.py | 44 | 4994 | # -*- coding: utf-8 -*-
# Copyright 2012 Dag Wieers <dag@wieers.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import smtplib
import json
from ansible.utils.unicode import to_bytes
from ansible.plugins.callback import CallbackBase
def mail(subject='Ansible error mail', sender=None, to=None, cc=None, bcc=None, body=None, smtphost=None):
if sender is None:
sender='<root>'
if to is None:
to='root'
if smtphost is None:
smtphost=os.getenv('SMTPHOST', 'localhost')
if body is None:
body = subject
smtp = smtplib.SMTP(smtphost)
b_sender = to_bytes(sender)
b_to = to_bytes(to)
b_cc = to_bytes(cc)
b_bcc = to_bytes(bcc)
b_subject = to_bytes(subject)
b_body = to_bytes(body)
b_content = b'From: %s\n' % b_sender
b_content += b'To: %s\n' % b_to
if cc:
b_content += b'Cc: %s\n' % b_cc
b_content += b'Subject: %s\n\n' % b_subject
b_content += b_body
b_addresses = b_to.split(b',')
if b_cc:
b_addresses += b_cc.split(b',')
if b_bcc:
b_addresses += b_bcc.split(b',')
for b_address in b_addresses:
smtp.sendmail(b_sender, b_address, b_content)
smtp.quit()
class CallbackModule(CallbackBase):
"""
This Ansible callback plugin mails errors to interested parties.
"""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'notification'
CALLBACK_NAME = 'mail'
CALLBACK_NEEDS_WHITELIST = True
def v2_runner_on_failed(self, res, ignore_errors=False):
host = res._host.get_name()
if ignore_errors:
return
sender = '"Ansible: %s" <root>' % host
attach = res._task.action
if 'invocation' in res._result:
attach = "%s: %s" % (res._result['invocation']['module_name'], json.dumps(res._result['invocation']['module_args']))
subject = 'Failed: %s' % attach
body = 'The following task failed for host ' + host + ':\n\n%s\n\n' % attach
if 'stdout' in res._result.keys() and res._result['stdout']:
subject = res._result['stdout'].strip('\r\n').split('\n')[-1]
body += 'with the following output in standard output:\n\n' + res._result['stdout'] + '\n\n'
if 'stderr' in res._result.keys() and res._result['stderr']:
subject = res['stderr'].strip('\r\n').split('\n')[-1]
body += 'with the following output in standard error:\n\n' + res._result['stderr'] + '\n\n'
if 'msg' in res._result.keys() and res._result['msg']:
subject = res._result['msg'].strip('\r\n').split('\n')[0]
body += 'with the following message:\n\n' + res._result['msg'] + '\n\n'
body += 'A complete dump of the error:\n\n' + self._dump_results(res._result)
mail(sender=sender, subject=subject, body=body)
def v2_runner_on_unreachable(self, result):
host = result._host.get_name()
res = result._result
sender = '"Ansible: %s" <root>' % host
if isinstance(res, basestring):
subject = 'Unreachable: %s' % res.strip('\r\n').split('\n')[-1]
body = 'An error occurred for host ' + host + ' with the following message:\n\n' + res
else:
subject = 'Unreachable: %s' % res['msg'].strip('\r\n').split('\n')[0]
body = 'An error occurred for host ' + host + ' with the following message:\n\n' + \
res['msg'] + '\n\nA complete dump of the error:\n\n' + str(res)
mail(sender=sender, subject=subject, body=body)
def v2_runner_on_async_failed(self, result):
host = result._host.get_name()
res = result._result
sender = '"Ansible: %s" <root>' % host
if isinstance(res, basestring):
subject = 'Async failure: %s' % res.strip('\r\n').split('\n')[-1]
body = 'An error occurred for host ' + host + ' with the following message:\n\n' + res
else:
subject = 'Async failure: %s' % res['msg'].strip('\r\n').split('\n')[0]
body = 'An error occurred for host ' + host + ' with the following message:\n\n' + \
res['msg'] + '\n\nA complete dump of the error:\n\n' + str(res)
mail(sender=sender, subject=subject, body=body)
| gpl-3.0 |
RubenKelevra/rethinkdb | packaging/ami/build-ami-files/firstrun_web.py | 46 | 1596 | import subprocess
import urllib2
# This web application gets launched by firstrun-init
# The form in settings.html posts to /action/set_password
# This application then launches firstrun.sh with the given parameters
def check_instance_id(pwd):
iid = urllib2.urlopen('http://169.254.169.254/latest/meta-data/instance-id').read()
# The instance id is 'i-' followed by some hex digits
return ( pwd == iid
or "i-" + pwd == iid)
def application(env, reply):
if env['PATH_INFO'] == '/action/set_password':
# Parse the query string from the POST data
query = {}
post_data = env['wsgi.input'].read(int(env['CONTENT_LENGTH']))
for elem in post_data.split('&'):
pair = elem.split('=', 1)
if len(pair) == 2:
query[pair[0]] = urllib2.unquote(pair[1])
if 'password' not in query or 'instanceid' not in query:
reply('302 Redirect', [('Location', '/')])
return []
if not check_instance_id(query['instanceid']):
reply('302 Redirect', [('Location', '/settings/?wrong_instance_id')])
return []
# Launch firstrun.sh
# bash -c is invoked in this contorted way to background the process
subprocess.call([
'bash', '-c', 'sleep 2; nohup bash firstrun.sh "$1" "$2" &', '-',
query['password'],
query.get('join', '')
])
reply('302 Redirect', [('Location', '/wait')])
return []
reply('404 Not Found', [])
return ["404 Not Found ", env['PATH_INFO']]
| agpl-3.0 |
jtoppins/beaker | IntegrationTests/src/bkr/inttest/server/selenium/test_recipetasks.py | 1 | 9452 |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import datetime
import requests
from bkr.server.model import session, RecipeTaskComment, RecipeTaskResultComment
from bkr.inttest import data_setup, get_server_base, DatabaseTestCase
from bkr.inttest.server.requests_utils import post_json, login as requests_login
class RecipeTaskHTTPTest(DatabaseTestCase):
"""
Directly tests the HTTP interface for recipe tasks.
"""
def setUp(self):
with session.begin():
self.owner = data_setup.create_user(password='theowner')
self.job = data_setup.create_job(owner=self.owner)
self.recipe = self.job.recipesets[0].recipes[0]
self.recipetask = self.recipe.tasks[0]
def test_get_log(self):
with session.begin():
job = data_setup.create_completed_job(server_log=True)
recipe = job.recipesets[0].recipes[0]
task = recipe.tasks[0]
response = requests.get(get_server_base() +
'recipes/%s/tasks/%s/logs/tasks/dummy.txt' % (recipe.id, task.id),
allow_redirects=False)
self.assertEqual(response.status_code, 307)
self.assertEqual(response.headers['Location'],
'http://dummy-archive-server/beaker/tasks/dummy.txt')
def test_404_for_nonexistent_log(self):
with session.begin():
job = data_setup.create_completed_job(server_log=True)
recipe = job.recipesets[0].recipes[0]
task = recipe.tasks[0]
response = requests.get(get_server_base() +
'recipes/%s/tasks/%s/logs/doesnotexist.log' % (recipe.id, task.id),
allow_redirects=False)
self.assertEqual(response.status_code, 404)
self.assertRegexpMatches(response.text, 'Task log .* not found')
def test_get_recipetask_comments(self):
with session.begin():
commenter = data_setup.create_user(user_name=u'peter')
self.recipetask.comments.append(RecipeTaskComment(
user=commenter,
created=datetime.datetime(2015, 11, 5, 17, 0, 55),
comment=u'Microsoft and Red Hat to deliver new standard for '
u'enterprise cloud experiences'))
response = requests.get(get_server_base() +
'recipes/%s/tasks/%s/comments/' % (self.recipe.id, self.recipetask.id),
headers={'Accept': 'application/json'})
response.raise_for_status()
json = response.json()
self.assertEqual(len(json['entries']), 1)
self.assertEqual(json['entries'][0]['user']['user_name'], u'peter')
self.assertEqual(json['entries'][0]['created'], u'2015-11-05 17:00:55')
self.assertIn(u'Microsoft', json['entries'][0]['comment'])
def test_post_recipetask_comment(self):
s = requests.Session()
requests_login(s, user=self.owner, password=u'theowner')
response = post_json(get_server_base() +
'recipes/%s/tasks/%s/comments/' % (self.recipe.id, self.recipetask.id),
session=s, data={'comment': 'we unite on common solutions'})
response.raise_for_status()
with session.begin():
session.expire_all()
self.assertEqual(len(self.recipetask.comments), 1)
self.assertEqual(self.recipetask.comments[0].user, self.owner)
self.assertEqual(self.recipetask.comments[0].comment,
u'we unite on common solutions')
self.assertEqual(response.json()['id'],
self.recipetask.comments[0].id)
def test_empty_comment_is_rejected(self):
s = requests.Session()
requests_login(s, user=self.owner, password=u'theowner')
response = post_json(get_server_base() +
'recipes/%s/tasks/%s/comments/' % (self.recipe.id, self.recipetask.id),
session=s, data={'comment': None})
self.assertEqual(response.status_code, 400)
# whitespace-only comment also counts as empty
response = post_json(get_server_base() +
'recipes/%s/tasks/%s/comments/' % (self.recipe.id, self.recipetask.id),
session=s, data={'comment': ' '})
self.assertEqual(response.status_code, 400)
def test_anonymous_can_not_post_comment(self):
response = post_json(get_server_base() +
'recipes/%s/tasks/%s/comments/' % (self.recipe.id, self.recipetask.id),
data={'comment': 'testdata'})
self.assertEqual(response.status_code, 401)
class RecipeTaskResultHTTPTest(DatabaseTestCase):
"""
Directly tests the HTTP interface for recipe task results.
"""
def setUp(self):
with session.begin():
self.owner = data_setup.create_user(password='theowner')
self.job = data_setup.create_completed_job(owner=self.owner)
self.recipe = self.job.recipesets[0].recipes[0]
self.recipetask = self.recipe.tasks[0]
self.result = self.recipetask.results[0]
def test_get_log(self):
with session.begin():
job = data_setup.create_completed_job(server_log=True)
recipe = job.recipesets[0].recipes[0]
task = recipe.tasks[0]
result = task.results[0]
response = requests.get(get_server_base() +
'recipes/%s/tasks/%s/results/%s/logs/result.txt'
% (recipe.id, task.id, result.id), allow_redirects=False)
self.assertEqual(response.status_code, 307)
self.assertEqual(response.headers['Location'],
'http://dummy-archive-server/beaker/result.txt')
def test_404_for_nonexistent_log(self):
with session.begin():
job = data_setup.create_completed_job(server_log=True)
recipe = job.recipesets[0].recipes[0]
task = recipe.tasks[0]
result = task.results[0]
response = requests.get(get_server_base() +
'recipes/%s/tasks/%s/results/%s/logs/doesnotexist.log'
% (recipe.id, task.id, result.id), allow_redirects=False)
self.assertEqual(response.status_code, 404)
self.assertRegexpMatches(response.text, 'Result log .* not found')
def test_get_recipe_task_result_comments(self):
with session.begin():
commenter = data_setup.create_user(user_name=u'adam')
self.result.comments.append(RecipeTaskResultComment(
user=commenter,
created=datetime.datetime(2015, 11, 5, 17, 0, 55),
comment=u'Microsoft and Red Hat to deliver new standard for '
u'enterprise cloud experiences'))
response = requests.get(get_server_base() +
'recipes/%s/tasks/%s/results/%s/comments/'
% (self.recipe.id, self.recipetask.id, self.result.id),
headers={'Accept': 'application/json'})
response.raise_for_status()
json = response.json()
self.assertEqual(len(json['entries']), 1)
self.assertEqual(json['entries'][0]['user']['user_name'], u'adam')
self.assertEqual(json['entries'][0]['created'], u'2015-11-05 17:00:55')
self.assertIn(u'Microsoft', json['entries'][0]['comment'])
def test_post_recipe_task_result_comment(self):
s = requests.Session()
requests_login(s, user=self.owner, password=u'theowner')
response = post_json(get_server_base() +
'recipes/%s/tasks/%s/results/%s/comments/'
% (self.recipe.id, self.recipetask.id, self.result.id),
session=s, data={'comment': 'we unite on common solutions'})
response.raise_for_status()
with session.begin():
session.expire_all()
self.assertEqual(len(self.result.comments), 1)
self.assertEqual(self.result.comments[0].user, self.owner)
self.assertEqual(self.result.comments[0].comment,
u'we unite on common solutions')
self.assertEqual(response.json()['id'],
self.result.comments[0].id)
def test_empty_comment_is_rejected(self):
s = requests.Session()
requests_login(s, user=self.owner, password=u'theowner')
response = post_json(get_server_base() +
'recipes/%s/tasks/%s/results/%s/comments/'
% (self.recipe.id, self.recipetask.id, self.result.id),
session=s, data={'comment': None})
self.assertEqual(response.status_code, 400)
# whitespace-only comment also counts as empty
response = post_json(get_server_base() +
'recipes/%s/tasks/%s/results/%s/comments/'
% (self.recipe.id, self.recipetask.id, self.result.id),
session=s, data={'comment': ' '})
self.assertEqual(response.status_code, 400)
def test_anonymous_can_not_post_comment(self):
response = post_json(get_server_base() +
'recipes/%s/tasks/%s/results/%s/comments/'
% (self.recipe.id, self.recipetask.id, self.result.id),
data={'comment': 'testdata'})
self.assertEqual(response.status_code, 401)
| gpl-2.0 |
qnib/qcollect | resources/diamond/collectors/chronyd/test/testchronyd.py | 33 | 3277 | #!/usr/bin/python
# coding=utf-8
################################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
from diamond.collector import Collector
from chronyd import ChronydCollector
################################################################################
class TestChronydCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('ChronydCollector', {
})
self.collector = ChronydCollector(config, {})
def test_import(self):
self.assertTrue(ChronydCollector)
@patch.object(Collector, 'publish')
def test_should_work_with_ip_addresses(self, publish_mock):
patch_collector = patch.object(
ChronydCollector,
'get_output',
Mock(return_value=self.getFixture(
'fedora').getvalue()))
patch_collector.start()
self.collector.collect()
patch_collector.stop()
metrics = {
'178_251_120_16.offset_ms': -7e-05,
'85_12_29_43.offset_ms': -0.785,
'85_234_197_3.offset_ms': 0.08,
'85_255_214_66.offset_ms': 0.386,
}
self.setDocExample(
collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
@patch.object(Collector, 'publish')
def test_should_work_with_fqdns(self, publish_mock):
patch_collector = patch.object(
ChronydCollector,
'get_output',
Mock(return_value=self.getFixture(
'fqdn').getvalue()))
patch_collector.start()
self.collector.collect()
patch_collector.stop()
metrics = {
'adm-dns-resolver-001.offset_ms': 0.000277,
'adm-dns-resolver-002.offset_ms': 0.456,
}
self.assertPublishedMany(publish_mock, metrics)
@patch.object(Collector, 'publish')
def test_check_invalid_unit(self, publish_mock):
patch_collector = patch.object(
ChronydCollector,
'get_output',
Mock(return_value=self.getFixture(
'bad_unit').getvalue()))
patch_collector.start()
self.collector.collect()
patch_collector.stop()
metrics = {
'adm-dns-resolver-002.offset_ms': 0.456,
}
self.assertPublishedMany(publish_mock, metrics)
@patch.object(Collector, 'publish')
def test_huge_values(self, publish_mock):
patch_collector = patch.object(
ChronydCollector,
'get_output',
Mock(return_value=self.getFixture(
'huge_vals').getvalue()))
patch_collector.start()
self.collector.collect()
patch_collector.stop()
metrics = {
'server1.offset_ms': 8735472000000,
'server2.offset_ms': -1009152000000,
}
self.assertPublishedMany(publish_mock, metrics)
################################################################################
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
fritz-k/django-wiki | wiki/management/commands/wikiviz.py | 13 | 16740 | #!/usr/bin/env python
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
"""Django model to DOT (Graphviz) converter
by Antonio Cavedoni <antonio@cavedoni.org>
edited as management script by Benjamin Bach <benjamin@overtag.dk>
Depends on package 'graphviz', ie. 'apt-get install graphviz'
Example usage:
$ ./manage.py wikiviz wiki --inheritance | dot -Tpdf -o <filename>.pdf
Place this script in the management.commands package of your application or project.
options:
-h, --help
show this help message and exit.
-a, --all_applications
show models from all applications.
-d, --disable_fields
don't show the class member fields.
-g, --group_models
draw an enclosing box around models from the same app.
-i, --include_models=User,Person,Car
only include selected models in graph.
-n, --verbose_names
use verbose_name for field and models.
-L, --language
specify language used for verrbose_name localization
-x, --exclude_columns
exclude specific column(s) from the graph.
-X, --exclude_models
exclude specific model(s) from the graph.
-e, --inheritance
show inheritance arrows.
"""
__version__ = "0.99"
__svnid__ = "$Id$"
__license__ = "Python"
__author__ = "Antonio Cavedoni <http://cavedoni.com/>"
__contributors__ = [
"Stefano J. Attardi <http://attardi.org/>",
"limodou <http://www.donews.net/limodou/>",
"Carlo C8E Miron",
"Andre Campos <cahenan@gmail.com>",
"Justin Findlay <jfindlay@gmail.com>",
"Alexander Houben <alexander@houben.ch>",
"Bas van Oostveen <v.oostveen@gmail.com>",
"Joern Hees <gitdev@joernhees.de>",
"Benjamin Bach <benjamin@overtag.dk>",
]
import sys
import os
from django.core.management.base import BaseCommand
from optparse import make_option
from django.utils.translation import activate as activate_language
from django.utils.safestring import mark_safe
from django.template import Context, loader
from django.db import models
from django.db.models import get_models
from django.db.models.fields.related import \
ForeignKey, OneToOneField, ManyToManyField, RelatedField
try:
from django.db.models.fields.generic import GenericRelation
except ImportError:
from django.contrib.contenttypes.generic import GenericRelation
def parse_file_or_list(arg):
if not arg:
return []
if not ',' in arg and os.path.isfile(arg):
return [e.strip() for e in open(arg).readlines()]
return arg.split(',')
def generate_dot(app_labels, **kwargs):
disable_fields = kwargs.get('disable_fields', False)
include_models = parse_file_or_list(kwargs.get('include_models', ""))
all_applications = kwargs.get('all_applications', False)
use_subgraph = kwargs.get('group_models', False)
verbose_names = kwargs.get('verbose_names', False)
inheritance = kwargs.get('inheritance', False)
language = kwargs.get('language', None)
if language is not None:
activate_language(language)
exclude_columns = parse_file_or_list(kwargs.get('exclude_columns', ""))
exclude_models = parse_file_or_list(kwargs.get('exclude_models', ""))
def skip_field(field):
if exclude_columns:
if verbose_names and field.verbose_name:
if field.verbose_name in exclude_columns:
return True
if field.name in exclude_columns:
return True
return False
t = loader.get_template_from_string("""
digraph name {
fontname = "Helvetica"
fontsize = 8
node [
fontname = "Helvetica"
fontsize = 8
shape = "plaintext"
]
edge [
fontname = "Helvetica"
fontsize = 8
]
""")
c = Context({})
dot = t.render(c)
apps = []
if all_applications:
apps = models.get_apps()
for app_label in app_labels:
app = models.get_app(app_label)
if app not in apps:
apps.append(app)
graphs = []
for app in apps:
graph = Context({
'name': '"%s"' % app.__name__,
'app_name': "%s" % '.'.join(app.__name__.split('.')[:-1]),
'cluster_app_name': "cluster_%s" % app.__name__.replace(".", "_"),
'disable_fields': disable_fields,
'use_subgraph': use_subgraph,
'models': []
})
appmodels = get_models(app)
abstract_models = []
for appmodel in appmodels:
abstract_models = abstract_models + [abstract_model
for abstract_model in appmodel.__bases__
if
hasattr(
abstract_model, '_meta') and
abstract_model._meta.abstract]
abstract_models = list(set(abstract_models)) # remove duplicates
appmodels = abstract_models + appmodels
for appmodel in appmodels:
appmodel_abstracts = [abstract_model.__name__
for abstract_model in appmodel.__bases__
if
hasattr(abstract_model, '_meta') and
abstract_model._meta.abstract]
# collect all attribs of abstract superclasses
def getBasesAbstractFields(c):
_abstract_fields = []
for e in c.__bases__:
if hasattr(e, '_meta') and e._meta.abstract:
_abstract_fields.extend(e._meta.fields)
_abstract_fields.extend(getBasesAbstractFields(e))
return _abstract_fields
abstract_fields = getBasesAbstractFields(appmodel)
model = {
'app_name': appmodel.__module__.replace(".", "_"),
'name': appmodel.__name__,
'abstracts': appmodel_abstracts,
'fields': [],
'relations': []
}
# consider given model name ?
def consider(model_name):
if exclude_models and model_name in exclude_models:
return False
return not include_models or model_name in include_models
if not consider(appmodel._meta.object_name):
continue
if verbose_names and appmodel._meta.verbose_name:
model['label'] = appmodel._meta.verbose_name
else:
model['label'] = model['name']
# model attributes
def add_attributes(field):
if verbose_names and field.verbose_name:
label = field.verbose_name
else:
label = field.name
t = type(field).__name__
if isinstance(field, (OneToOneField, ForeignKey)):
t += " ({0})".format(field.rel.field_name)
# TODO: ManyToManyField, GenericRelation
model['fields'].append({
'name': field.name,
'label': label,
'type': t,
'blank': field.blank,
'abstract': field in abstract_fields,
})
# Find all the real attributes. Relations are depicted as graph
# edges instead of attributes
attributes = [
field for field in appmodel._meta.local_fields if not isinstance(
field,
RelatedField)]
# find primary key and print it first, ignoring implicit id if
# other pk exists
pk = appmodel._meta.pk
if not appmodel._meta.abstract and pk in attributes:
add_attributes(pk)
for field in attributes:
if skip_field(field):
continue
if not field.primary_key:
add_attributes(field)
# FIXME: actually many_to_many fields aren't saved in this model's db table, so why should we add an attribute-line for them in the resulting graph?
# if appmodel._meta.many_to_many:
# for field in appmodel._meta.many_to_many:
# if skip_field(field):
# continue
# add_attributes(field)
# relations
def add_relation(field, extras=""):
if verbose_names and field.verbose_name:
label = field.verbose_name
else:
label = field.name
# show related field name
if hasattr(field, 'related_query_name'):
label += ' (%s)' % field.related_query_name()
# handle self-relationships
if field.rel.to == 'self':
target_model = field.model
else:
target_model = field.rel.to
_rel = {
'target_app': target_model.__module__.replace('.', '_'),
'target': target_model.__name__,
'type': type(field).__name__,
'name': field.name,
'label': label,
'arrows': extras,
'needs_node': True
}
if _rel not in model['relations'] and consider(_rel['target']):
model['relations'].append(_rel)
for field in appmodel._meta.local_fields:
# excluding field redundant with inheritance relation
if field.attname.endswith('_ptr_id'):
continue
# excluding fields inherited from abstract classes. they too
# show as local_fields
if field in abstract_fields:
continue
if skip_field(field):
continue
if isinstance(field, OneToOneField):
add_relation(field, '[arrowhead=none, arrowtail=none]')
elif isinstance(field, ForeignKey):
add_relation(field, '[arrowhead=none, arrowtail=dot]')
for field in appmodel._meta.local_many_to_many:
if skip_field(field):
continue
if isinstance(field, ManyToManyField):
if (getattr(field, 'creates_table', False) or # django 1.1.
(hasattr(field.rel.through, '_meta') and field.rel.through._meta.auto_created)): # django 1.2
add_relation(
field,
'[arrowhead=dot arrowtail=dot, dir=both]')
elif isinstance(field, GenericRelation):
add_relation(
field,
mark_safe('[style="dotted", arrowhead=normal, arrowtail=normal, dir=both]'))
if inheritance:
# add inheritance arrows
for parent in appmodel.__bases__:
if hasattr(parent, "_meta"): # parent is a model
l = "multi-table"
if parent._meta.abstract:
l = "abstract"
if appmodel._meta.proxy:
l = "proxy"
l += r"\ninheritance"
_rel = {
'target_app': parent.__module__.replace(".", "_"),
'target': parent.__name__,
'type': "inheritance",
'name': "inheritance",
'label': l,
'arrows': '[arrowhead=empty, arrowtail=none]',
'needs_node': True
}
# TODO: seems as if abstract models aren't part of
# models.getModels, which is why they are printed by
# this without any attributes.
if _rel not in model[
'relations'] and consider(_rel['target']):
model['relations'].append(_rel)
graph['models'].append(model)
graphs.append(graph)
nodes = []
for graph in graphs:
nodes.extend([e['name'] for e in graph['models']])
for graph in graphs:
# don't draw duplication nodes because of relations
for model in graph['models']:
for relation in model['relations']:
if relation['target'] in nodes:
relation['needs_node'] = False
# render templates
t = loader.get_template_from_string("""{% if use_subgraph %}
subgraph {{ cluster_app_name }} {
label=<
<TABLE BORDER="0" CELLBORDER="0" CELLSPACING="0">
<TR><TD COLSPAN="2" CELLPADDING="4" ALIGN="CENTER"
><FONT FACE="Helvetica Bold" COLOR="Black" POINT-SIZE="12"
>{{ app_name }}</FONT></TD></TR>
</TABLE>
>
color=olivedrab4
style="rounded"
{% endif %}
{% for model in models %}
{{ model.app_name }}_{{ model.name }} [label=<
<TABLE BGCOLOR="palegoldenrod" BORDER="0" CELLBORDER="0" CELLSPACING="0">
<TR><TD COLSPAN="2" CELLPADDING="4" ALIGN="CENTER" BGCOLOR="olivedrab4"
><FONT FACE="Helvetica Bold" COLOR="white"
>{{ model.label }}{% if model.abstracts %}<BR/><<FONT FACE="Helvetica Italic">{{ model.abstracts|join:"," }}</FONT>>{% endif %}</FONT></TD></TR>
{% if not disable_fields %}
{% for field in model.fields %}
<TR><TD ALIGN="LEFT" BORDER="0"
><FONT {% if field.blank %}COLOR="#7B7B7B" {% endif %}FACE="Helvetica {% if field.abstract %}Italic{% else %}Bold{% endif %}">{{ field.label }}</FONT
></TD>
<TD ALIGN="LEFT"
><FONT {% if field.blank %}COLOR="#7B7B7B" {% endif %}FACE="Helvetica {% if field.abstract %}Italic{% else %}Bold{% endif %}">{{ field.type }}</FONT
></TD></TR>
{% endfor %}
{% endif %}
</TABLE>
>]
{% endfor %}
{% if use_subgraph %}
}
{% endif %}""")
dot += '\n' + t.render(graph)
for graph in graphs:
t = loader.get_template_from_string("""{% for model in models %}
{% for relation in model.relations %}
{% if relation.needs_node %}
{{ relation.target_app }}_{{ relation.target }} [label=<
<TABLE BGCOLOR="palegoldenrod" BORDER="0" CELLBORDER="0" CELLSPACING="0">
<TR><TD COLSPAN="2" CELLPADDING="4" ALIGN="CENTER" BGCOLOR="olivedrab4"
><FONT FACE="Helvetica Bold" COLOR="white"
>{{ relation.target }}</FONT></TD></TR>
</TABLE>
>]
{% endif %}
{{ model.app_name }}_{{ model.name }} -> {{ relation.target_app }}_{{ relation.target }}
[label="{{ relation.label }}"] {{ relation.arrows }};
{% endfor %}
{% endfor %}""")
dot += '\n' + t.render(graph)
t = loader.get_template_from_string("}")
c = Context({})
dot += '\n' + t.render(c)
return dot
class Command(BaseCommand):
args = ('--dummy')
help = 'Create a graph of your app!' # @ReservedAssignment
option_list = BaseCommand.option_list + (
make_option(
'--all_applications', '-a',
dest='all_applications',
default=False,
action='store_true',
help='Include all applications'),
make_option(
'--disable_fields', '-d',
action='store',
dest='disable_fields',
default="",
help='Specify fields to exclude'),
make_option(
'--group_models', '-g',
action='store',
dest='group_models', help=''),
make_option(
'--include_models', '-i',
action='store',
dest='include_models',
help=''),
make_option(
'--verbose_names', '-n',
action='store',
dest='verbose_names', help=''),
make_option(
'--language', '-l',
action='store',
dest='language', help=''),
make_option(
'--exclude_models', '-X',
action='store',
dest='exclude_models',
help=''),
make_option(
'--inheritance', '-e',
action='store_true',
dest='inheritance', help=''),)
def handle(self, *args, **options):
if not args and not options.get('all_applications', False):
print(__doc__)
sys.exit()
print(generate_dot(args, **options))
| gpl-3.0 |
azumimuo/family-xbmc-addon | plugin.video.sanctuary/lib/Pandora.py | 1 | 5774 | '''This section was kindly donated by the dev of the addon, join his fb group to say thanks for this amazing section - https://www.facebook.com/groups/120495048374613/'''
import re, xbmcplugin, xbmcgui, process, base64, sys, urllib
addon_handle = int(sys.argv[1])
Dialog = xbmcgui.Dialog()
Decode = base64.decodestring
CAT=Decode('LnBocA==')
Base_Pand = (Decode('aHR0cDovL2dlbmlldHZjdW50cy5jby51ay9QYW5zQm94L09SSUdJTlMv'))
def Pandora_Main():
process.Menu('[COLOR darkgoldenrod][I]Open Pandora\'s Box[/I][/COLOR]','',901,'https://s32.postimg.org/ov9s6ipf9/icon.png','','','')
process.Menu('[COLOR darkgoldenrod][I]Search[/I][/COLOR]','',903,'http://icons.iconarchive.com/icons/icontexto/search/256/search-red-dark-icon.png','','','')
xbmcplugin.setContent(addon_handle, 'movies')
xbmcplugin.endOfDirectory(int(sys.argv[1]))
def Search_Menu():
process.Menu('[COLOR darkgoldenrod][I]Search Pandoras Films[/I][/COLOR]','',904,'http://icons.iconarchive.com/icons/icontexto/search/256/search-red-dark-icon.png','','','')
process.Menu('[COLOR darkgoldenrod][I]Search Pandoras TV[/I][/COLOR]','',905,'http://icons.iconarchive.com/icons/icontexto/search/256/search-red-dark-icon.png','','','')
xbmcplugin.setContent(addon_handle, 'movies')
xbmcplugin.endOfDirectory(int(sys.argv[1]))
def Pandoras_Box():
html=process.OPEN_URL(Base_Pand +Decode('c3BvbmdlbWFpbi5waHA='))
match = re.compile('<item>.+?<title>(.+?)</title>.+?<description>(.+?)</description>.+?<link>(.+?)</link>.+?<thumbnail>(.+?)</thumbnail>.+?<fanart>(.+?)</fanart>.+?<mode>(.+?)</mode>.+?</item>',re.DOTALL).findall(html)
for name,desc,url,img,fanart,mode in match:
process.Menu(name,url,mode,img,fanart,desc,'')
xbmcplugin.setContent(addon_handle, 'movies')
xbmcplugin.endOfDirectory(int(sys.argv[1]))
def Pandora_Menu(url):
link = process.OPEN_URL(url)
match=re.compile('<a href="(.+?)" target="_blank"><img src="(.+?)" style="max-width:200px;" /><description = "(.+?)" /><background = "(.+?)" </background></a><br><b>(.+?)</b>').findall(link)
for url,iconimage,desc,background,name in match:
process.Play(name,url,906,iconimage,background,desc,'')
xbmcplugin.addSortMethod(addon_handle, xbmcplugin.SORT_METHOD_TITLE);
xbmcplugin.endOfDirectory(int(sys.argv[1]))
def Search_Pandoras_Films():
Search_Name = Dialog.input('Search', type=xbmcgui.INPUT_ALPHANUM)
Search_Title = Search_Name.lower()
filenames = ['hey1080p','hey3D','hey','480p','720p','1080p','mova', 'movb', 'movc', 'movd', 'move', 'movf', 'movg', 'movh', 'movi', 'movj', 'movk', 'movl', 'movm', 'movn', 'movo', 'movp', 'movq', 'movr', 'movs', 'movt', 'movu', 'movv', 'movw', 'movx', 'movy', 'movz','720paction','720padventure','720panimation','720pcomedy','720pcrime','720pdocumentary','720pdrama','720pfamily','720pfantasy','720phorror','720pmystery','720promance','720psci-Fi','720psport','720pthriller','720pwestern','1080paction','1080padventure','1080panimation','1080pcomedy','1080pcrime','1080pdocumentary','1080pdrama','1080pfamily','1080pfantasy','1080phorror','1080pmystery','1080promance','1080psci-Fi','1080psport','1080pthriller','1080pwestern','top10action','top10animation','top10biography','top10comedy','top10crime','top10documentary','top10drama','top10family','top10fantasy','top10horror','top10music','top10mystery','top10romance','top10sci-fi','top10sport','top10thriller','top10western']
for file_Name in filenames:
search_URL = Base_Pand + file_Name + CAT
HTML = process.OPEN_URL(search_URL)
if HTML != 'Opened':
match=re.compile('<a href="(.+?)" target="_blank"><img src="(.+?)" style="max-width:200px;" /><description = "(.+?)" /><background = "(.+?)" </background></a><br><b>(.+?)</b>').findall(HTML)
for url,iconimage,desc,fanart,name in match:
if Search_Name in name.lower():
process.Play(name,url,906,iconimage,fanart,desc,'')
xbmcplugin.setContent(addon_handle, 'movies')
xbmcplugin.addSortMethod(addon_handle, xbmcplugin.SORT_METHOD_TITLE);
def Search_Pandoras_TV():
Search_Name = Dialog.input('Search', type=xbmcgui.INPUT_ALPHANUM)
Search_Title = Search_Name.lower()
filenames = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
for file_Name in filenames:
search_URL2 = Base_Pand + file_Name + CAT
HTML = process.OPEN_URL(search_URL2)
if HTML != 'Opened':
match = re.compile('<item>.+?<title>(.+?)</title>.+?<description>(.+?)</description>.+?<link>(.+?)</link>.+?<thumbnail>(.+?)</thumbnail>.+?<fanart>(.+?)</fanart>.+?<mode>(.+?)</mode>.+?</item>',re.DOTALL).findall(HTML)
for name,desc,url,img,fanart,mode in match:
if Search_Name in name.lower():
process.Menu(name,url,mode,img,fanart,desc,'')
xbmcplugin.setContent(addon_handle, 'movies')
xbmcplugin.addSortMethod(addon_handle, xbmcplugin.SORT_METHOD_TITLE);
def open_Menu(url):
html=process.OPEN_URL(url)
match = re.compile('<item>.+?<title>(.+?)</title>.+?<description>(.+?)</description>.+?<link>(.+?)</link>.+?<thumbnail>(.+?)</thumbnail>.+?<fanart>(.+?)</fanart>.+?<mode>(.+?)</mode>.+?</item>',re.DOTALL).findall(html)
for name,desc,url,img,fanart,mode in match:
process.Menu(name,url,mode,img,fanart,desc,'')
xbmcplugin.setContent(addon_handle, 'movies')
xbmcplugin.addSortMethod(addon_handle, xbmcplugin.SORT_METHOD_TITLE);
xbmcplugin.endOfDirectory(int(sys.argv[1])) | gpl-2.0 |
philanthropy-u/edx-platform | openedx/core/djangoapps/content/course_overviews/models.py | 1 | 36633 | """
Declaration of CourseOverview model
"""
import json
import logging
from urlparse import urlparse, urlunparse
from django.conf import settings
from django.db import models, transaction
from django.db.models.fields import BooleanField, DateTimeField, DecimalField, TextField, FloatField, IntegerField
from django.db.utils import IntegrityError
from django.template import defaultfilters
from ccx_keys.locator import CCXLocator
from model_utils.models import TimeStampedModel
from opaque_keys.edx.django.models import CourseKeyField, UsageKeyField
from six import text_type
from config_models.models import ConfigurationModel
from lms.djangoapps import django_comment_client
from openedx.core.djangoapps.catalog.models import CatalogIntegration
from openedx.core.djangoapps.lang_pref.api import get_closest_released_language
from openedx.core.djangoapps.models.course_details import CourseDetails
from static_replace.models import AssetBaseUrlConfig
from xmodule import course_metadata_utils, block_metadata_utils
from xmodule.course_module import CourseDescriptor, DEFAULT_START_DATE
from xmodule.error_module import ErrorDescriptor
from xmodule.modulestore.django import modulestore
log = logging.getLogger(__name__)
class CourseOverview(TimeStampedModel):
"""
Model for storing and caching basic information about a course.
This model contains basic course metadata such as an ID, display name,
image URL, and any other information that would be necessary to display
a course as part of:
user dashboard (enrolled courses)
course catalog (courses to enroll in)
course about (meta data about the course)
"""
class Meta(object):
app_label = 'course_overviews'
# IMPORTANT: Bump this whenever you modify this model and/or add a migration.
VERSION = 6
# Cache entry versioning.
version = IntegerField()
# Course identification
id = CourseKeyField(db_index=True, primary_key=True, max_length=255)
_location = UsageKeyField(max_length=255)
org = TextField(max_length=255, default='outdated_entry')
display_name = TextField(null=True)
display_number_with_default = TextField()
display_org_with_default = TextField()
# Start/end dates
start = DateTimeField(null=True)
end = DateTimeField(null=True)
advertised_start = TextField(null=True)
announcement = DateTimeField(null=True)
# URLs
course_image_url = TextField()
social_sharing_url = TextField(null=True)
end_of_course_survey_url = TextField(null=True)
# Certification data
certificates_display_behavior = TextField(null=True)
certificates_show_before_end = BooleanField(default=False)
cert_html_view_enabled = BooleanField(default=False)
has_any_active_web_certificate = BooleanField(default=False)
cert_name_short = TextField()
cert_name_long = TextField()
certificate_available_date = DateTimeField(default=None, null=True)
# Grading
lowest_passing_grade = DecimalField(max_digits=5, decimal_places=2, null=True)
# Access parameters
days_early_for_beta = FloatField(null=True)
mobile_available = BooleanField(default=False)
visible_to_staff_only = BooleanField(default=False)
_pre_requisite_courses_json = TextField() # JSON representation of list of CourseKey strings
# Enrollment details
enrollment_start = DateTimeField(null=True)
enrollment_end = DateTimeField(null=True)
enrollment_domain = TextField(null=True)
invitation_only = BooleanField(default=False)
max_student_enrollments_allowed = IntegerField(null=True)
# Catalog information
catalog_visibility = TextField(null=True)
short_description = TextField(null=True)
course_video_url = TextField(null=True)
effort = TextField(null=True)
self_paced = BooleanField(default=False)
marketing_url = TextField(null=True)
eligible_for_financial_aid = BooleanField(default=True)
language = TextField(null=True)
@classmethod
def _create_or_update(cls, course):
"""
Creates or updates a CourseOverview object from a CourseDescriptor.
Does not touch the database, simply constructs and returns an overview
from the given course.
Arguments:
course (CourseDescriptor): any course descriptor object
Returns:
CourseOverview: created or updated overview extracted from the given course
"""
from lms.djangoapps.certificates.api import get_active_web_certificate
from openedx.core.lib.courses import course_image_url
# Workaround for a problem discovered in https://openedx.atlassian.net/browse/TNL-2806.
# If the course has a malformed grading policy such that
# course._grading_policy['GRADE_CUTOFFS'] = {}, then
# course.lowest_passing_grade will raise a ValueError.
# Work around this for now by defaulting to None.
try:
lowest_passing_grade = course.lowest_passing_grade
except ValueError:
lowest_passing_grade = None
display_name = course.display_name
start = course.start
end = course.end
max_student_enrollments_allowed = course.max_student_enrollments_allowed
if isinstance(course.id, CCXLocator):
from lms.djangoapps.ccx.utils import get_ccx_from_ccx_locator
ccx = get_ccx_from_ccx_locator(course.id)
display_name = ccx.display_name
start = ccx.start
end = ccx.due
max_student_enrollments_allowed = ccx.max_student_enrollments_allowed
course_overview = cls.objects.filter(id=course.id)
if course_overview.exists():
log.info('Updating course overview for %s.', unicode(course.id))
course_overview = course_overview.first()
else:
log.info('Creating course overview for %s.', unicode(course.id))
course_overview = cls()
course_overview.version = cls.VERSION
course_overview.id = course.id
course_overview._location = course.location
course_overview.org = course.location.org
course_overview.display_name = display_name
course_overview.display_number_with_default = course.display_number_with_default
course_overview.display_org_with_default = course.display_org_with_default
course_overview.start = start
course_overview.end = end
course_overview.advertised_start = course.advertised_start
course_overview.announcement = course.announcement
course_overview.course_image_url = course_image_url(course)
course_overview.social_sharing_url = course.social_sharing_url
course_overview.certificates_display_behavior = course.certificates_display_behavior
course_overview.certificates_show_before_end = course.certificates_show_before_end
course_overview.cert_html_view_enabled = course.cert_html_view_enabled
course_overview.has_any_active_web_certificate = (get_active_web_certificate(course) is not None)
course_overview.cert_name_short = course.cert_name_short
course_overview.cert_name_long = course.cert_name_long
course_overview.certificate_available_date = course.certificate_available_date
course_overview.lowest_passing_grade = lowest_passing_grade
course_overview.end_of_course_survey_url = course.end_of_course_survey_url
course_overview.days_early_for_beta = course.days_early_for_beta
course_overview.mobile_available = course.mobile_available
course_overview.visible_to_staff_only = course.visible_to_staff_only
course_overview._pre_requisite_courses_json = json.dumps(course.pre_requisite_courses)
course_overview.enrollment_start = course.enrollment_start
course_overview.enrollment_end = course.enrollment_end
course_overview.enrollment_domain = course.enrollment_domain
course_overview.invitation_only = course.invitation_only
course_overview.max_student_enrollments_allowed = max_student_enrollments_allowed
course_overview.catalog_visibility = course.catalog_visibility
course_overview.short_description = CourseDetails.fetch_about_attribute(course.id, 'short_description')
course_overview.effort = CourseDetails.fetch_about_attribute(course.id, 'effort')
course_overview.course_video_url = CourseDetails.fetch_video_url(course.id)
course_overview.self_paced = course.self_paced
if not CatalogIntegration.is_enabled():
course_overview.language = course.language
return course_overview
@classmethod
def load_from_module_store(cls, course_id):
"""
Load a CourseDescriptor, create or update a CourseOverview from it, cache the
overview, and return it.
Arguments:
course_id (CourseKey): the ID of the course overview to be loaded.
Returns:
CourseOverview: overview of the requested course.
Raises:
- CourseOverview.DoesNotExist if the course specified by course_id
was not found.
- IOError if some other error occurs while trying to load the
course from the module store.
"""
store = modulestore()
with store.bulk_operations(course_id):
course = store.get_course(course_id)
if isinstance(course, CourseDescriptor):
course_overview = cls._create_or_update(course)
try:
with transaction.atomic():
course_overview.save()
# Remove and recreate all the course tabs
CourseOverviewTab.objects.filter(course_overview=course_overview).delete()
CourseOverviewTab.objects.bulk_create([
CourseOverviewTab(tab_id=tab.tab_id, course_overview=course_overview)
for tab in course.tabs
])
# Remove and recreate course images
CourseOverviewImageSet.objects.filter(course_overview=course_overview).delete()
CourseOverviewImageSet.create(course_overview, course)
except IntegrityError:
# There is a rare race condition that will occur if
# CourseOverview.get_from_id is called while a
# another identical overview is already in the process
# of being created.
# One of the overviews will be saved normally, while the
# other one will cause an IntegrityError because it tries
# to save a duplicate.
# (see: https://openedx.atlassian.net/browse/TNL-2854).
log.error("IntegrityError error for '%s'" % course_id)
pass
except Exception:
log.exception(
"CourseOverview for course %s failed!",
course_id,
)
raise
return course_overview
elif course is not None:
raise IOError(
"Error while loading course {} from the module store: {}",
unicode(course_id),
course.error_msg if isinstance(course, ErrorDescriptor) else unicode(course)
)
else:
raise cls.DoesNotExist()
@classmethod
def get_from_id(cls, course_id):
"""
Load a CourseOverview object for a given course ID.
First, we try to load the CourseOverview from the database. If it
doesn't exist, we load the entire course from the modulestore, create a
CourseOverview object from it, and then cache it in the database for
future use.
Arguments:
course_id (CourseKey): the ID of the course overview to be loaded.
Returns:
CourseOverview: overview of the requested course.
Raises:
- CourseOverview.DoesNotExist if the course specified by course_id
was not found.
- IOError if some other error occurs while trying to load the
course from the module store.
"""
try:
course_overview = cls.objects.select_related('image_set').get(id=course_id)
if course_overview.version < cls.VERSION:
# Throw away old versions of CourseOverview, as they might contain stale data.
course_overview.delete()
course_overview = None
except cls.DoesNotExist:
course_overview = None
# Regenerate the thumbnail images if they're missing (either because
# they were never generated, or because they were flushed out after
# a change to CourseOverviewImageConfig.
if course_overview and not hasattr(course_overview, 'image_set'):
CourseOverviewImageSet.create(course_overview)
return course_overview or cls.load_from_module_store(course_id)
@classmethod
def get_from_ids_if_exists(cls, course_ids):
"""
Return a dict mapping course_ids to CourseOverviews, if they exist.
This method will *not* generate new CourseOverviews or delete outdated
ones. It exists only as a small optimization used when CourseOverviews
are known to exist, for common situations like the student dashboard.
Callers should assume that this list is incomplete and fall back to
get_from_id if they need to guarantee CourseOverview generation.
"""
return {
overview.id: overview
for overview
in cls.objects.select_related('image_set').filter(
id__in=course_ids,
version__gte=cls.VERSION
)
}
@classmethod
def get_from_id_if_exists(cls, course_id):
"""
Return a CourseOverview for the provided course_id if it exists.
Returns None if no CourseOverview exists with the provided course_id
This method will *not* generate new CourseOverviews or delete outdated
ones. It exists only as a small optimization used when CourseOverviews
are known to exist, for common situations like the student dashboard.
Callers should assume that this list is incomplete and fall back to
get_from_id if they need to guarantee CourseOverview generation.
"""
try:
course_overview = cls.objects.select_related('image_set').get(
id=course_id,
version__gte=cls.VERSION
)
except cls.DoesNotExist:
course_overview = None
return course_overview
def clean_id(self, padding_char='='):
"""
Returns a unique deterministic base32-encoded ID for the course.
Arguments:
padding_char (str): Character used for padding at end of base-32
-encoded string, defaulting to '='
"""
return course_metadata_utils.clean_course_key(self.location.course_key, padding_char)
@property
def location(self):
"""
Returns the UsageKey of this course.
UsageKeyField has a strange behavior where it fails to parse the "run"
of a course out of the serialized form of a Mongo Draft UsageKey. This
method is a wrapper around _location attribute that fixes the problem
by calling map_into_course, which restores the run attribute.
"""
if self._location.run is None:
self._location = self._location.map_into_course(self.id)
return self._location
@property
def number(self):
"""
Returns this course's number.
This is a "number" in the sense of the "course numbers" that you see at
lots of universities. For example, given a course
"Intro to Computer Science" with the course key "edX/CS-101/2014", the
course number would be "CS-101"
"""
return course_metadata_utils.number_for_course_location(self.location)
@property
def url_name(self):
"""
Returns this course's URL name.
"""
return block_metadata_utils.url_name_for_block(self)
@property
def display_name_with_default(self):
"""
Return reasonable display name for the course.
"""
return block_metadata_utils.display_name_with_default(self)
@property
def display_name_with_default_escaped(self):
"""
DEPRECATED: use display_name_with_default
Return html escaped reasonable display name for the course.
Note: This newly introduced method should not be used. It was only
introduced to enable a quick search/replace and the ability to slowly
migrate and test switching to display_name_with_default, which is no
longer escaped.
"""
return block_metadata_utils.display_name_with_default_escaped(self)
@property
def dashboard_start_display(self):
"""
Return start date to diplay on learner's dashboard, preferably `Course Advertised Start`
"""
return self.advertised_start or self.start
def has_started(self):
"""
Returns whether the the course has started.
"""
return course_metadata_utils.has_course_started(self.start)
def has_ended(self):
"""
Returns whether the course has ended.
"""
return course_metadata_utils.has_course_ended(self.end)
def has_marketing_url(self):
"""
Returns whether the course has marketing url.
"""
return settings.FEATURES.get('ENABLE_MKTG_SITE') and bool(self.marketing_url)
def has_social_sharing_url(self):
"""
Returns whether the course has social sharing url.
"""
is_social_sharing_enabled = getattr(settings, 'SOCIAL_SHARING_SETTINGS', {}).get('CUSTOM_COURSE_URLS')
return is_social_sharing_enabled and bool(self.social_sharing_url)
def starts_within(self, days):
"""
Returns True if the course starts with-in given number of days otherwise returns False.
"""
return course_metadata_utils.course_starts_within(self.start, days)
@property
def start_date_is_still_default(self):
"""
Checks if the start date set for the course is still default, i.e.
.start has not been modified, and .advertised_start has not been set.
"""
return course_metadata_utils.course_start_date_is_default(
self.start,
self.advertised_start,
)
@property
def sorting_score(self):
"""
Returns a tuple that can be used to sort the courses according
the how "new" they are. The "newness" score is computed using a
heuristic that takes into account the announcement and
(advertised) start dates of the course if available.
The lower the number the "newer" the course.
"""
return course_metadata_utils.sorting_score(self.start, self.advertised_start, self.announcement)
@property
def start_type(self):
"""
Returns the type of the course's 'start' field.
"""
if self.advertised_start:
return u'string'
elif self.start != DEFAULT_START_DATE:
return u'timestamp'
else:
return u'empty'
@property
def start_display(self):
"""
Returns the display value for the course's start date.
"""
if self.advertised_start:
return self.advertised_start
elif self.start != DEFAULT_START_DATE:
return defaultfilters.date(self.start, "DATE_FORMAT")
else:
return None
def may_certify(self):
"""
Returns whether it is acceptable to show the student a certificate
download link.
"""
return course_metadata_utils.may_certify_for_course(
self.certificates_display_behavior,
self.certificates_show_before_end,
self.has_ended(),
self.certificate_available_date,
self.self_paced
)
@property
def pre_requisite_courses(self):
"""
Returns a list of ID strings for this course's prerequisite courses.
"""
return json.loads(self._pre_requisite_courses_json)
@pre_requisite_courses.setter
def pre_requisite_courses(self, value):
"""
Django requires there be a setter for this, but it is not
necessary for the way we currently use it. Due to the way
CourseOverviews are constructed raising errors here will
cause a lot of issues. These should not be mutable after
construction, so for now we just eat this.
"""
pass
@classmethod
def update_select_courses(cls, course_keys, force_update=False):
"""
A side-effecting method that updates CourseOverview objects for
the given course_keys.
Arguments:
course_keys (list[CourseKey]): Identifies for which courses to
return CourseOverview objects.
force_update (boolean): Optional parameter that indicates
whether the requested CourseOverview objects should be
forcefully updated (i.e., re-synched with the modulestore).
"""
log.info('Generating course overview for %d courses.', len(course_keys))
log.debug('Generating course overview(s) for the following courses: %s', course_keys)
action = CourseOverview.load_from_module_store if force_update else CourseOverview.get_from_id
for course_key in course_keys:
try:
action(course_key)
except Exception as ex: # pylint: disable=broad-except
log.exception(
'An error occurred while generating course overview for %s: %s',
unicode(course_key),
text_type(ex),
)
log.info('Finished generating course overviews.')
@classmethod
def get_all_courses(cls, orgs=None, filter_=None, exclude_=None):
"""
Returns all CourseOverview objects in the database.
Arguments:
orgs (list[string]): Optional parameter that allows case-insensitive
filtering by organization.
filter_ (dict): Optional parameter that allows custom filtering.
exclude_ (dict): Optional parameter that allows custom excludes.
"""
# Note: If a newly created course is not returned in this QueryList,
# make sure the "publish" signal was emitted when the course was
# created. For tests using CourseFactory, use emit_signals=True.
course_overviews = CourseOverview.objects.all()
if orgs:
# In rare cases, courses belonging to the same org may be accidentally assigned
# an org code with a different casing (e.g., Harvardx as opposed to HarvardX).
# Case-insensitive matching allows us to deal with this kind of dirty data.
course_overviews = course_overviews.filter(org__iregex=r'(' + '|'.join(orgs) + ')')
if filter_:
course_overviews = course_overviews.filter(**filter_)
if exclude_:
course_overviews = course_overviews.exclude(**exclude_)
return course_overviews
@classmethod
def get_all_course_keys(cls):
"""
Returns all course keys from course overviews.
"""
return CourseOverview.objects.values_list('id', flat=True)
def is_discussion_tab_enabled(self):
"""
Returns True if course has discussion tab and is enabled
"""
tabs = self.tabs.all()
# creates circular import; hence explicitly referenced is_discussion_enabled
for tab in tabs:
if tab.tab_id == "discussion" and django_comment_client.utils.is_discussion_enabled(self.id):
return True
return False
@property
def image_urls(self):
"""
Return a dict with all known URLs for this course image.
Current resolutions are:
raw = original upload from the user
small = thumbnail with dimensions CourseOverviewImageConfig.current().small
large = thumbnail with dimensions CourseOverviewImageConfig.current().large
If no thumbnails exist, the raw (originally uploaded) image will be
returned for all resolutions.
"""
# This is either the raw image that the course team uploaded, or the
# settings.DEFAULT_COURSE_ABOUT_IMAGE_URL if they didn't specify one.
raw_image_url = self.course_image_url
# Default all sizes to return the raw image if there is no
# CourseOverviewImageSet associated with this CourseOverview. This can
# happen because we're disabled via CourseOverviewImageConfig.
urls = {
'raw': raw_image_url,
'small': raw_image_url,
'large': raw_image_url,
}
# If we do have a CourseOverviewImageSet, we still default to the raw
# images if our thumbnails are blank (might indicate that there was a
# processing error of some sort while trying to generate thumbnails).
if hasattr(self, 'image_set') and CourseOverviewImageConfig.current().enabled:
urls['small'] = self.image_set.small_url or raw_image_url
urls['large'] = self.image_set.large_url or raw_image_url
return self.apply_cdn_to_urls(urls)
@property
def pacing(self):
""" Returns the pacing for the course.
Potential values:
self: Self-paced courses
instructor: Instructor-led courses
"""
return 'self' if self.self_paced else 'instructor'
@property
def closest_released_language(self):
"""
Returns the language code that most closely matches this course' language and is fully
supported by the LMS, or None if there are no fully supported languages that
match the target.
"""
return get_closest_released_language(self.language) if self.language else None
def apply_cdn_to_urls(self, image_urls):
"""
Given a dict of resolutions -> urls, return a copy with CDN applied.
If CDN does not exist or is disabled, just returns the original. The
URLs that we store in CourseOverviewImageSet are all already top level
paths, so we don't need to go through the /static remapping magic that
happens with other course assets. We just need to add the CDN server if
appropriate.
"""
cdn_config = AssetBaseUrlConfig.current()
if not cdn_config.enabled:
return image_urls
base_url = cdn_config.base_url
return {
resolution: self._apply_cdn_to_url(url, base_url)
for resolution, url in image_urls.items()
}
def _apply_cdn_to_url(self, url, base_url):
"""
Applies a new CDN/base URL to the given URL.
If a URL is absolute, we skip switching the host since it could
be a hostname that isn't behind our CDN, and we could unintentionally
break the URL overall.
"""
# The URL can't be empty.
if not url:
return url
_, netloc, path, params, query, fragment = urlparse(url)
# If this is an absolute URL, just return it as is. It could be a domain
# that isn't ours, and thus CDNing it would actually break it.
if netloc:
return url
return urlunparse((None, base_url, path, params, query, fragment))
def __unicode__(self):
"""Represent ourselves with the course key."""
return unicode(self.id)
class CourseOverviewTab(models.Model):
"""
Model for storing and caching tabs information of a course.
"""
tab_id = models.CharField(max_length=50)
course_overview = models.ForeignKey(CourseOverview, db_index=True, related_name="tabs", on_delete=models.CASCADE)
class CourseOverviewImageSet(TimeStampedModel):
"""
Model for Course overview images. Each column is an image type/size.
You should basically never use this class directly. Read from
CourseOverview.image_urls instead.
Special Notes on Deployment/Rollback/Changes:
1. By default, this functionality is disabled. To turn it on, you have to
create a CourseOverviewImageConfig entry via Django Admin and select
enabled=True.
2. If it is enabled in configuration, it will lazily create thumbnails as
individual CourseOverviews are requested. This is independent of the
CourseOverview's cls.VERSION scheme. This is to better support the use
case where someone might want to change the thumbnail resolutions for
their theme -- we didn't want to tie the code-based data schema of
CourseOverview to configuration changes.
3. A CourseOverviewImageSet is automatically deleted when the CourseOverview
it belongs to is deleted. So it will be regenerated whenever there's a
new publish or the CourseOverview schema version changes. It's not
particularly smart about this, and will just re-write the same thumbnails
over and over to the same location without checking to see if there were
changes.
4. Just because a CourseOverviewImageSet is successfully created does not
mean that any thumbnails exist. There might have been a processing error,
or there might simply be no source image to create a thumbnail out of.
In this case, accessing CourseOverview.image_urls will return the value
for course.course_image_url for all resolutions. CourseOverviewImageSet
will *not* try to regenerate if there is a model entry with blank values
for the URLs -- the assumption is that either there's no data there or
something has gone wrong and needs fixing in code.
5. If you want to change thumbnail resolutions, you need to create a new
CourseOverviewImageConfig with the desired dimensions and then wipe the
values in CourseOverviewImageSet.
Logical next steps that I punted on for this first cut:
1. Converting other parts of the app to use this.
Our first cut only affects About Pages and the Student Dashboard. But
most places that use course_image_url() should be converted -- e.g.
course discovery, mobile, etc.
2. Center cropping the image before scaling.
This is desirable, but it involves a few edge cases (what the rounding
policy is, what to do with undersized images, etc.) The behavior that
we implemented is at least no worse than what was already there in terms
of distorting images.
3. Automatically invalidating entries based on CourseOverviewImageConfig.
There are two basic paths I can think of for this. The first is to
completely wipe this table when the config changes. The second is to
actually tie the config as a foreign key from this model -- so you could
do the comparison to see if the image_set's config_id matched
CourseOverviewImageConfig.current() and invalidate it if they didn't
match. I punted on this mostly because it's just not something that
happens much at all in practice, there is an understood (if manual)
process to do it, and it can happen in a follow-on PR if anyone is
interested in extending this functionality.
"""
course_overview = models.OneToOneField(CourseOverview, db_index=True, related_name="image_set",
on_delete=models.CASCADE)
small_url = models.TextField(blank=True, default="")
large_url = models.TextField(blank=True, default="")
@classmethod
def create(cls, course_overview, course=None):
"""
Create thumbnail images for this CourseOverview.
This will save the CourseOverviewImageSet before it returns.
"""
from openedx.core.lib.courses import create_course_image_thumbnail
# If image thumbnails are not enabled, do nothing.
config = CourseOverviewImageConfig.current()
if not config.enabled:
return
# If a course object was provided, use that. Otherwise, pull it from
# CourseOverview's course_id. This happens because sometimes we are
# generated as part of the CourseOverview creation (course is available
# and passed in), and sometimes the CourseOverview already exists.
if not course:
course = modulestore().get_course(course_overview.id)
image_set = cls(course_overview=course_overview)
if course.course_image:
# Try to create a thumbnails of the course image. If this fails for any
# reason (weird format, non-standard URL, etc.), the URLs will default
# to being blank. No matter what happens, we don't want to bubble up
# a 500 -- an image_set is always optional.
try:
image_set.small_url = create_course_image_thumbnail(course, config.small)
image_set.large_url = create_course_image_thumbnail(course, config.large)
except Exception: # pylint: disable=broad-except
log.exception(
"Could not create thumbnail for course %s with image %s (small=%s), (large=%s)",
course.id,
course.course_image,
config.small,
config.large
)
# Regardless of whether we created thumbnails or not, we need to save
# this record before returning. If no thumbnails were created (there was
# an error or the course has no source course_image), our url fields
# just keep their blank defaults.
try:
with transaction.atomic():
image_set.save()
course_overview.image_set = image_set
except (IntegrityError, ValueError):
# In the event of a race condition that tries to save two image sets
# to the same CourseOverview, we'll just silently pass on the one
# that fails. They should be the same data anyway.
#
# The ValueError above is to catch the following error that can
# happen in Django 1.8.4+ if the CourseOverview object fails to save
# (again, due to race condition).
#
# Example: ValueError: save() prohibited to prevent data loss due
# to unsaved related object 'course_overview'.")
pass
def __unicode__(self):
return u"CourseOverviewImageSet({}, small_url={}, large_url={})".format(
self.course_overview_id, self.small_url, self.large_url
)
class CourseOverviewImageConfig(ConfigurationModel):
"""
This sets the size of the thumbnail images that Course Overviews will generate
to display on the about, info, and student dashboard pages. If you make any
changes to this, you will have to regenerate CourseOverviews in order for it
to take effect. You might want to do this if you're doing precise theming of
your install of edx-platform... but really, you probably don't want to do this
at all at the moment, given how new this is. :-P
"""
# Small thumbnail, for things like the student dashboard
small_width = models.IntegerField(default=375)
small_height = models.IntegerField(default=200)
# Large thumbnail, for things like the about page
large_width = models.IntegerField(default=750)
large_height = models.IntegerField(default=400)
@property
def small(self):
"""Tuple for small image dimensions in pixels -- (width, height)"""
return (self.small_width, self.small_height)
@property
def large(self):
"""Tuple for large image dimensions in pixels -- (width, height)"""
return (self.large_width, self.large_height)
def __unicode__(self):
return u"CourseOverviewImageConfig(enabled={}, small={}, large={})".format(
self.enabled, self.small, self.large
)
| agpl-3.0 |
willingc/oh-mainline | vendor/packages/scrapy/scrapyd/website.py | 16 | 3773 | from datetime import datetime
from twisted.web import resource, static
from twisted.application.service import IServiceCollection
from .interfaces import IPoller, IEggStorage, ISpiderScheduler
from . import webservice
class Root(resource.Resource):
def __init__(self, config, app):
resource.Resource.__init__(self)
self.debug = config.getboolean('debug', False)
self.runner = config.get('runner')
logsdir = config.get('logs_dir')
self.app = app
self.putChild('', Home(self))
self.putChild('schedule.json', webservice.Schedule(self))
self.putChild('addversion.json', webservice.AddVersion(self))
self.putChild('listprojects.json', webservice.ListProjects(self))
self.putChild('listversions.json', webservice.ListVersions(self))
self.putChild('listspiders.json', webservice.ListSpiders(self))
self.putChild('delproject.json', webservice.DeleteProject(self))
self.putChild('delversion.json', webservice.DeleteVersion(self))
self.putChild('listjobs.json', webservice.ListJobs(self))
self.putChild('logs', static.File(logsdir, 'text/plain'))
self.putChild('procmon', ProcessMonitor(self))
self.update_projects()
def update_projects(self):
self.poller.update_projects()
self.scheduler.update_projects()
@property
def launcher(self):
app = IServiceCollection(self.app, self.app)
return app.getServiceNamed('launcher')
@property
def scheduler(self):
return self.app.getComponent(ISpiderScheduler)
@property
def eggstorage(self):
return self.app.getComponent(IEggStorage)
@property
def poller(self):
return self.app.getComponent(IPoller)
class Home(resource.Resource):
def __init__(self, root):
resource.Resource.__init__(self)
self.root = root
def render_GET(self, txrequest):
vars = {
'projects': ', '.join(self.root.scheduler.list_projects()),
}
return """
<html>
<head><title>Scrapyd</title></head>
<body>
<h1>Scrapyd</h1>
<p>Available projects: <b>%(projects)s</b></p>
<ul>
<li><a href="/procmon">Process monitor</a></li>
<li><a href="/logs/">Logs</li>
<li><a href="http://doc.scrapy.org/en/latest/topics/scrapyd.html">Documentation</a></li>
</ul>
<h2>How to schedule a spider?</h2>
<p>To schedule a spider you need to use the API (this web UI is only for
monitoring)</p>
<p>Example using <a href="http://curl.haxx.se/">curl</a>:</p>
<p><code>curl http://localhost:6800/schedule.json -d project=default -d spider=somespider</code></p>
<p>For more information about the API, see the <a href="http://doc.scrapy.org/topics/scrapyd.html">Scrapyd documentation</a></p>
</body>
</html>
""" % vars
class ProcessMonitor(resource.Resource):
def __init__(self, root):
resource.Resource.__init__(self)
self.root = root
def render(self, txrequest):
s = "<html><head><title>Scrapyd</title></title>"
s += "<body>"
s += "<h1>Process monitor</h1>"
s += "<p><a href='..'>Go back</a></p>"
s += "<table border='1'>"
s += "<tr>"
s += "<th>Project</th><th>Spider</th><th>Job</th><th>PID</th><th>Runtime</th><th>Log</th>"
s += "</tr>"
for p in self.root.launcher.processes.values():
s += "<tr>"
for a in ['project', 'spider', 'job', 'pid']:
s += "<td>%s</td>" % getattr(p, a)
s += "<td>%s</td>" % (datetime.now() - p.start_time)
s += "<td><a href='/logs/%s/%s/%s.log'>Log</a></td>" % (p.project, p.spider, p.job)
s += "</tr>"
s += "</table>"
s += "</body>"
s += "</html>"
return s
| agpl-3.0 |
switchkiller/ProjDjanko | lib/python2.7/site-packages/django/contrib/gis/maps/google/overlays.py | 79 | 11934 | from __future__ import unicode_literals
from django.contrib.gis.geos import (
LinearRing, LineString, Point, Polygon, fromstr,
)
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
from django.utils.functional import total_ordering
from django.utils.html import html_safe
@html_safe
@python_2_unicode_compatible
class GEvent(object):
"""
A Python wrapper for the Google GEvent object.
Events can be attached to any object derived from GOverlayBase with the
add_event() call.
For more information please see the Google Maps API Reference:
http://code.google.com/apis/maps/documentation/reference.html#GEvent
Example:
from django.shortcuts import render_to_response
from django.contrib.gis.maps.google import GoogleMap, GEvent, GPolyline
def sample_request(request):
polyline = GPolyline('LINESTRING(101 26, 112 26, 102 31)')
event = GEvent('click',
'function() { location.href = "http://www.google.com"}')
polyline.add_event(event)
return render_to_response('mytemplate.html',
{'google' : GoogleMap(polylines=[polyline])})
"""
def __init__(self, event, action):
"""
Initializes a GEvent object.
Parameters:
event:
string for the event, such as 'click'. The event must be a valid
event for the object in the Google Maps API.
There is no validation of the event type within Django.
action:
string containing a Javascript function, such as
'function() { location.href = "newurl";}'
The string must be a valid Javascript function. Again there is no
validation fo the function within Django.
"""
self.event = event
self.action = action
def __str__(self):
"Returns the parameter part of a GEvent."
return '"%s", %s' % (self.event, self.action)
@html_safe
@python_2_unicode_compatible
class GOverlayBase(object):
def __init__(self):
self.events = []
def latlng_from_coords(self, coords):
"Generates a JavaScript array of GLatLng objects for the given coordinates."
return '[%s]' % ','.join('new GLatLng(%s,%s)' % (y, x) for x, y in coords)
def add_event(self, event):
"Attaches a GEvent to the overlay object."
self.events.append(event)
def __str__(self):
"The string representation is the JavaScript API call."
return '%s(%s)' % (self.__class__.__name__, self.js_params)
class GPolygon(GOverlayBase):
"""
A Python wrapper for the Google GPolygon object. For more information
please see the Google Maps API Reference:
http://code.google.com/apis/maps/documentation/reference.html#GPolygon
"""
def __init__(self, poly,
stroke_color='#0000ff', stroke_weight=2, stroke_opacity=1,
fill_color='#0000ff', fill_opacity=0.4):
"""
The GPolygon object initializes on a GEOS Polygon or a parameter that
may be instantiated into GEOS Polygon. Please note that this will not
depict a Polygon's internal rings.
Keyword Options:
stroke_color:
The color of the polygon outline. Defaults to '#0000ff' (blue).
stroke_weight:
The width of the polygon outline, in pixels. Defaults to 2.
stroke_opacity:
The opacity of the polygon outline, between 0 and 1. Defaults to 1.
fill_color:
The color of the polygon fill. Defaults to '#0000ff' (blue).
fill_opacity:
The opacity of the polygon fill. Defaults to 0.4.
"""
if isinstance(poly, six.string_types):
poly = fromstr(poly)
if isinstance(poly, (tuple, list)):
poly = Polygon(poly)
if not isinstance(poly, Polygon):
raise TypeError('GPolygon may only initialize on GEOS Polygons.')
# Getting the envelope of the input polygon (used for automatically
# determining the zoom level).
self.envelope = poly.envelope
# Translating the coordinates into a JavaScript array of
# Google `GLatLng` objects.
self.points = self.latlng_from_coords(poly.shell.coords)
# Stroke settings.
self.stroke_color, self.stroke_opacity, self.stroke_weight = stroke_color, stroke_opacity, stroke_weight
# Fill settings.
self.fill_color, self.fill_opacity = fill_color, fill_opacity
super(GPolygon, self).__init__()
@property
def js_params(self):
return '%s, "%s", %s, %s, "%s", %s' % (self.points, self.stroke_color, self.stroke_weight, self.stroke_opacity,
self.fill_color, self.fill_opacity)
class GPolyline(GOverlayBase):
"""
A Python wrapper for the Google GPolyline object. For more information
please see the Google Maps API Reference:
http://code.google.com/apis/maps/documentation/reference.html#GPolyline
"""
def __init__(self, geom, color='#0000ff', weight=2, opacity=1):
"""
The GPolyline object may be initialized on GEOS LineStirng, LinearRing,
and Polygon objects (internal rings not supported) or a parameter that
may instantiated into one of the above geometries.
Keyword Options:
color:
The color to use for the polyline. Defaults to '#0000ff' (blue).
weight:
The width of the polyline, in pixels. Defaults to 2.
opacity:
The opacity of the polyline, between 0 and 1. Defaults to 1.
"""
# If a GEOS geometry isn't passed in, try to construct one.
if isinstance(geom, six.string_types):
geom = fromstr(geom)
if isinstance(geom, (tuple, list)):
geom = Polygon(geom)
# Generating the lat/lng coordinate pairs.
if isinstance(geom, (LineString, LinearRing)):
self.latlngs = self.latlng_from_coords(geom.coords)
elif isinstance(geom, Polygon):
self.latlngs = self.latlng_from_coords(geom.shell.coords)
else:
raise TypeError('GPolyline may only initialize on GEOS LineString, LinearRing, and/or Polygon geometries.')
# Getting the envelope for automatic zoom determination.
self.envelope = geom.envelope
self.color, self.weight, self.opacity = color, weight, opacity
super(GPolyline, self).__init__()
@property
def js_params(self):
return '%s, "%s", %s, %s' % (self.latlngs, self.color, self.weight, self.opacity)
@total_ordering
class GIcon(object):
"""
Creates a GIcon object to pass into a Gmarker object.
The keyword arguments map to instance attributes of the same name. These,
in turn, correspond to a subset of the attributes of the official GIcon
javascript object:
http://code.google.com/apis/maps/documentation/reference.html#GIcon
Because a Google map often uses several different icons, a name field has
been added to the required arguments.
Required Arguments:
varname:
A string which will become the basis for the js variable name of
the marker, for this reason, your code should assign a unique
name for each GIcon you instantiate, otherwise there will be
name space collisions in your javascript.
Keyword Options:
image:
The url of the image to be used as the icon on the map defaults
to 'G_DEFAULT_ICON'
iconsize:
a tuple representing the pixel size of the foreground (not the
shadow) image of the icon, in the format: (width, height) ex.:
GIcon('fast_food',
image="/media/icon/star.png",
iconsize=(15,10))
Would indicate your custom icon was 15px wide and 10px height.
shadow:
the url of the image of the icon's shadow
shadowsize:
a tuple representing the pixel size of the shadow image, format is
the same as ``iconsize``
iconanchor:
a tuple representing the pixel coordinate relative to the top left
corner of the icon image at which this icon is anchored to the map.
In (x, y) format. x increases to the right in the Google Maps
coordinate system and y increases downwards in the Google Maps
coordinate system.)
infowindowanchor:
The pixel coordinate relative to the top left corner of the icon
image at which the info window is anchored to this icon.
"""
def __init__(self, varname, image=None, iconsize=None,
shadow=None, shadowsize=None, iconanchor=None,
infowindowanchor=None):
self.varname = varname
self.image = image
self.iconsize = iconsize
self.shadow = shadow
self.shadowsize = shadowsize
self.iconanchor = iconanchor
self.infowindowanchor = infowindowanchor
def __eq__(self, other):
return self.varname == other.varname
def __lt__(self, other):
return self.varname < other.varname
def __hash__(self):
# XOR with hash of GIcon type so that hash('varname') won't
# equal hash(GIcon('varname')).
return hash(self.__class__) ^ hash(self.varname)
class GMarker(GOverlayBase):
"""
A Python wrapper for the Google GMarker object. For more information
please see the Google Maps API Reference:
http://code.google.com/apis/maps/documentation/reference.html#GMarker
Example:
from django.shortcuts import render_to_response
from django.contrib.gis.maps.google.overlays import GMarker, GEvent
def sample_request(request):
marker = GMarker('POINT(101 26)')
event = GEvent('click',
'function() { location.href = "http://www.google.com"}')
marker.add_event(event)
return render_to_response('mytemplate.html',
{'google' : GoogleMap(markers=[marker])})
"""
def __init__(self, geom, title=None, draggable=False, icon=None):
"""
The GMarker object may initialize on GEOS Points or a parameter
that may be instantiated into a GEOS point. Keyword options map to
GMarkerOptions -- so far only the title option is supported.
Keyword Options:
title:
Title option for GMarker, will be displayed as a tooltip.
draggable:
Draggable option for GMarker, disabled by default.
"""
# If a GEOS geometry isn't passed in, try to construct one.
if isinstance(geom, six.string_types):
geom = fromstr(geom)
if isinstance(geom, (tuple, list)):
geom = Point(geom)
if isinstance(geom, Point):
self.latlng = self.latlng_from_coords(geom.coords)
else:
raise TypeError('GMarker may only initialize on GEOS Point geometry.')
# Getting the envelope for automatic zoom determination.
self.envelope = geom.envelope
# TODO: Add support for more GMarkerOptions
self.title = title
self.draggable = draggable
self.icon = icon
super(GMarker, self).__init__()
def latlng_from_coords(self, coords):
return 'new GLatLng(%s,%s)' % (coords[1], coords[0])
def options(self):
result = []
if self.title:
result.append('title: "%s"' % self.title)
if self.icon:
result.append('icon: %s' % self.icon.varname)
if self.draggable:
result.append('draggable: true')
return '{%s}' % ','.join(result)
@property
def js_params(self):
return '%s, %s' % (self.latlng, self.options())
| gpl-2.0 |
kenshay/ImageScripter | ProgramData/Android/ADB/platform-tools/systrace/catapult/telemetry/telemetry/page/page_unittest.py | 13 | 8554 | # Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
from telemetry import story
from telemetry.page import page
import mock
class TestPage(unittest.TestCase):
def assertPathEqual(self, path1, path2):
self.assertEqual(os.path.normpath(path1), os.path.normpath(path2))
def testFilePathRelative(self):
apage = page.Page('file://somedir/otherdir/file.html',
None, base_dir='basedir')
self.assertPathEqual(apage.file_path, 'basedir/somedir/otherdir/file.html')
def testFilePathAbsolute(self):
apage = page.Page('file:///somedir/otherdir/file.html',
None, base_dir='basedir')
self.assertPathEqual(apage.file_path, '/somedir/otherdir/file.html')
def testFilePathQueryString(self):
apage = page.Page('file://somedir/otherdir/file.html?key=val',
None, base_dir='basedir')
self.assertPathEqual(apage.file_path, 'basedir/somedir/otherdir/file.html')
def testFilePathUrlQueryString(self):
apage = page.Page('file://somedir/file.html?key=val',
None, base_dir='basedir')
self.assertPathEqual(apage.file_path_url,
'basedir/somedir/file.html?key=val')
def testFilePathUrlTrailingSeparator(self):
apage = page.Page('file://somedir/otherdir/',
None, base_dir='basedir')
self.assertPathEqual(apage.file_path_url, 'basedir/somedir/otherdir/')
self.assertTrue(apage.file_path_url.endswith(os.sep) or
(os.altsep and apage.file_path_url.endswith(os.altsep)))
def testSort(self):
story_set = story.StorySet(base_dir=os.path.dirname(__file__))
story_set.AddStory(
page.Page('http://www.foo.com/', story_set, story_set.base_dir))
story_set.AddStory(
page.Page('http://www.bar.com/', story_set, story_set.base_dir))
pages = sorted([story_set.stories[0], story_set.stories[1]])
self.assertEquals([story_set.stories[1], story_set.stories[0]],
pages)
def testGetUrlBaseDirAndFileForUrlBaseDir(self):
base_dir = os.path.dirname(__file__)
file_path = os.path.join(
os.path.dirname(base_dir), 'otherdir', 'file.html')
story_set = story.StorySet(base_dir=base_dir,
serving_dirs=[os.path.join('..', 'somedir', '')])
story_set.AddStory(
page.Page('file://../otherdir/file.html', story_set,
story_set.base_dir))
self.assertPathEqual(story_set[0].file_path, file_path)
def testDisplayUrlForHttp(self):
story_set = story.StorySet(base_dir=os.path.dirname(__file__))
story_set.AddStory(
page.Page('http://www.foo.com/', story_set, story_set.base_dir))
story_set.AddStory(
page.Page('http://www.bar.com/', story_set, story_set.base_dir))
self.assertEquals(story_set[0].display_name, 'http://www.foo.com/')
self.assertEquals(story_set[1].display_name, 'http://www.bar.com/')
def testDisplayUrlForHttps(self):
story_set = story.StorySet(base_dir=os.path.dirname(__file__))
story_set.AddStory(
page.Page('http://www.foo.com/', story_set, story_set.base_dir))
story_set.AddStory(
page.Page('https://www.bar.com/', story_set, story_set.base_dir))
self.assertEquals(story_set[0].display_name, 'http://www.foo.com/')
self.assertEquals(story_set[1].display_name, 'https://www.bar.com/')
def testDisplayUrlForFile(self):
story_set = story.StorySet(base_dir=os.path.dirname(__file__))
story_set.AddStory(page.Page(
'file://../../otherdir/foo.html', story_set, story_set.base_dir))
story_set.AddStory(page.Page(
'file://../../otherdir/bar.html', story_set, story_set.base_dir))
self.assertEquals(story_set[0].display_name, 'foo.html')
self.assertEquals(story_set[1].display_name, 'bar.html')
def testDisplayUrlForFilesDifferingBySuffix(self):
story_set = story.StorySet(base_dir=os.path.dirname(__file__))
story_set.AddStory(page.Page(
'file://../../otherdir/foo.html', story_set, story_set.base_dir))
story_set.AddStory(page.Page(
'file://../../otherdir/foo1.html', story_set, story_set.base_dir))
self.assertEquals(story_set[0].display_name, 'foo.html')
self.assertEquals(story_set[1].display_name, 'foo1.html')
def testDisplayUrlForFileOfDifferentPaths(self):
story_set = story.StorySet(base_dir=os.path.dirname(__file__))
story_set.AddStory(
page.Page(
'file://../../somedir/foo.html', story_set, story_set.base_dir))
story_set.AddStory(page.Page(
'file://../../otherdir/bar.html', story_set, story_set.base_dir))
self.assertEquals(story_set[0].display_name, 'somedir/foo.html')
self.assertEquals(story_set[1].display_name, 'otherdir/bar.html')
def testDisplayUrlForFileDirectories(self):
story_set = story.StorySet(base_dir=os.path.dirname(__file__))
story_set.AddStory(
page.Page('file://../../otherdir/foo', story_set, story_set.base_dir))
story_set.AddStory(
page.Page('file://../../otherdir/bar', story_set, story_set.base_dir))
self.assertEquals(story_set[0].display_name, 'foo')
self.assertEquals(story_set[1].display_name, 'bar')
def testDisplayUrlForSingleFile(self):
story_set = story.StorySet(base_dir=os.path.dirname(__file__))
story_set.AddStory(page.Page(
'file://../../otherdir/foo.html', story_set, story_set.base_dir))
self.assertEquals(story_set[0].display_name, 'foo.html')
def testDisplayUrlForSingleDirectory(self):
story_set = story.StorySet(base_dir=os.path.dirname(__file__))
story_set.AddStory(
page.Page('file://../../otherdir/foo', story_set, story_set.base_dir))
self.assertEquals(story_set[0].display_name, 'foo')
def testPagesHaveDifferentIds(self):
p0 = page.Page("http://example.com")
p1 = page.Page("http://example.com")
self.assertNotEqual(p0.id, p1.id)
def testNamelessPageAsDict(self):
nameless_dict = page.Page('http://example.com/').AsDict()
self.assertIn('id', nameless_dict)
del nameless_dict['id']
self.assertEquals({
'url': 'http://example.com/',
}, nameless_dict)
def testNamedPageAsDict(self):
named_dict = page.Page('http://example.com/', name='Example').AsDict()
self.assertIn('id', named_dict)
del named_dict['id']
self.assertEquals({
'url': 'http://example.com/',
'name': 'Example'
}, named_dict)
def testIsLocal(self):
p = page.Page('file://foo.html')
self.assertTrue(p.is_local)
p = page.Page('chrome://extensions')
self.assertTrue(p.is_local)
p = page.Page('about:blank')
self.assertTrue(p.is_local)
p = page.Page('http://foo.com')
self.assertFalse(p.is_local)
class TestPageRun(unittest.TestCase):
def testFiveGarbageCollectionCallsByDefault(self):
mock_shared_state = mock.Mock()
p = page.Page('file://foo.html')
p.Run(mock_shared_state)
expected = [mock.call.current_tab.CollectGarbage(),
mock.call.current_tab.CollectGarbage(),
mock.call.current_tab.CollectGarbage(),
mock.call.current_tab.CollectGarbage(),
mock.call.current_tab.CollectGarbage(),
mock.call.page_test.WillNavigateToPage(
p, mock_shared_state.current_tab),
mock.call.page_test.RunNavigateSteps(
p, mock_shared_state.current_tab),
mock.call.page_test.DidNavigateToPage(
p, mock_shared_state.current_tab)]
self.assertEquals(mock_shared_state.mock_calls, expected)
def testNoGarbageCollectionCalls(self):
mock_shared_state = mock.Mock()
class NonGarbageCollectPage(page.Page):
def __init__(self, url):
super(NonGarbageCollectPage, self).__init__(url)
self._collect_garbage_before_run = False
p = NonGarbageCollectPage('file://foo.html')
p.Run(mock_shared_state)
expected = [mock.call.page_test.WillNavigateToPage(
p, mock_shared_state.current_tab),
mock.call.page_test.RunNavigateSteps(
p, mock_shared_state.current_tab),
mock.call.page_test.DidNavigateToPage(
p, mock_shared_state.current_tab)]
self.assertEquals(mock_shared_state.mock_calls, expected)
| gpl-3.0 |
MSeifert04/astropy | astropy/utils/diff.py | 4 | 4980 | import difflib
import functools
import sys
import numbers
import numpy as np
from .misc import indent
__all__ = ['fixed_width_indent', 'diff_values', 'report_diff_values',
'where_not_allclose']
# Smaller default shift-width for indent
fixed_width_indent = functools.partial(indent, width=2)
def diff_values(a, b, rtol=0.0, atol=0.0):
"""
Diff two scalar values. If both values are floats, they are compared to
within the given absolute and relative tolerance.
Parameters
----------
a, b : int, float, str
Scalar values to compare.
rtol, atol : float
Relative and absolute tolerances as accepted by
:func:`numpy.allclose`.
Returns
-------
is_different : bool
`True` if they are different, else `False`.
"""
if isinstance(a, float) and isinstance(b, float):
if np.isnan(a) and np.isnan(b):
return False
return not np.allclose(a, b, rtol=rtol, atol=atol)
else:
return a != b
def report_diff_values(a, b, fileobj=sys.stdout, indent_width=0):
"""
Write a diff report between two values to the specified file-like object.
Parameters
----------
a, b
Values to compare. Anything that can be turned into strings
and compared using :py:mod:`difflib` should work.
fileobj : obj
File-like object to write to.
The default is ``sys.stdout``, which writes to terminal.
indent_width : int
Character column(s) to indent.
Returns
-------
identical : bool
`True` if no diff, else `False`.
"""
if isinstance(a, np.ndarray) and isinstance(b, np.ndarray):
if a.shape != b.shape:
fileobj.write(
fixed_width_indent(' Different array shapes:\n',
indent_width))
report_diff_values(str(a.shape), str(b.shape), fileobj=fileobj,
indent_width=indent_width + 1)
return False
diff_indices = np.transpose(np.where(a != b))
num_diffs = diff_indices.shape[0]
for idx in diff_indices[:3]:
lidx = idx.tolist()
fileobj.write(
fixed_width_indent(f' at {lidx!r}:\n', indent_width))
report_diff_values(a[tuple(idx)], b[tuple(idx)], fileobj=fileobj,
indent_width=indent_width + 1)
if num_diffs > 3:
fileobj.write(fixed_width_indent(
' ...and at {:d} more indices.\n'.format(num_diffs - 3),
indent_width))
return False
return num_diffs == 0
typea = type(a)
typeb = type(b)
if typea == typeb:
lnpad = ' '
sign_a = 'a>'
sign_b = 'b>'
if isinstance(a, numbers.Number):
a = repr(a)
b = repr(b)
else:
a = str(a)
b = str(b)
else:
padding = max(len(typea.__name__), len(typeb.__name__)) + 3
lnpad = (padding + 1) * ' '
sign_a = ('(' + typea.__name__ + ') ').rjust(padding) + 'a>'
sign_b = ('(' + typeb.__name__ + ') ').rjust(padding) + 'b>'
is_a_str = isinstance(a, str)
is_b_str = isinstance(b, str)
a = (repr(a) if ((is_a_str and not is_b_str) or
(not is_a_str and isinstance(a, numbers.Number)))
else str(a))
b = (repr(b) if ((is_b_str and not is_a_str) or
(not is_b_str and isinstance(b, numbers.Number)))
else str(b))
identical = True
for line in difflib.ndiff(a.splitlines(), b.splitlines()):
if line[0] == '-':
identical = False
line = sign_a + line[1:]
elif line[0] == '+':
identical = False
line = sign_b + line[1:]
else:
line = lnpad + line
fileobj.write(fixed_width_indent(
' {}\n'.format(line.rstrip('\n')), indent_width))
return identical
def where_not_allclose(a, b, rtol=1e-5, atol=1e-8):
"""
A version of :func:`numpy.allclose` that returns the indices
where the two arrays differ, instead of just a boolean value.
Parameters
----------
a, b : array_like
Input arrays to compare.
rtol, atol : float
Relative and absolute tolerances as accepted by
:func:`numpy.allclose`.
Returns
-------
idx : tuple of arrays
Indices where the two arrays differ.
"""
# Create fixed mask arrays to handle INF and NaN; currently INF and NaN
# are handled as equivalent
if not np.all(np.isfinite(a)):
a = np.ma.fix_invalid(a).data
if not np.all(np.isfinite(b)):
b = np.ma.fix_invalid(b).data
if atol == 0.0 and rtol == 0.0:
# Use a faster comparison for the most simple (and common) case
return np.where(a != b)
return np.where(np.abs(a - b) > (atol + rtol * np.abs(b)))
| bsd-3-clause |
shsingh/ansible | lib/ansible/modules/cloud/amazon/ec2_win_password.py | 13 | 6662 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_win_password
short_description: Gets the default administrator password for ec2 windows instances
description:
- Gets the default administrator password from any EC2 Windows instance. The instance is referenced by its id (e.g. C(i-XXXXXXX)).
- This module has a dependency on python-boto.
version_added: "2.0"
author: "Rick Mendes (@rickmendes)"
options:
instance_id:
description:
- The instance id to get the password data from.
required: true
type: str
key_file:
description:
- Path to the file containing the key pair used on the instance.
- Conflicts with I(key_data).
required: false
type: path
key_data:
version_added: "2.8"
description:
- The private key (usually stored in vault).
- Conflicts with I(key_file),
required: false
type: str
key_passphrase:
version_added: "2.0"
description:
- The passphrase for the instance key pair. The key must use DES or 3DES encryption for this module to decrypt it. You can use openssl to
convert your password protected keys if they do not use DES or 3DES. ex) C(openssl rsa -in current_key -out new_key -des3).
type: str
wait:
version_added: "2.0"
description:
- Whether or not to wait for the password to be available before returning.
type: bool
default: false
wait_timeout:
version_added: "2.0"
description:
- Number of seconds to wait before giving up.
default: 120
type: int
extends_documentation_fragment:
- aws
- ec2
requirements:
- cryptography
notes:
- As of Ansible 2.4, this module requires the python cryptography module rather than the
older pycrypto module.
'''
EXAMPLES = '''
# Example of getting a password
- name: get the Administrator password
ec2_win_password:
profile: my-boto-profile
instance_id: i-XXXXXX
region: us-east-1
key_file: "~/aws-creds/my_test_key.pem"
# Example of getting a password using a variable
- name: get the Administrator password
ec2_win_password:
profile: my-boto-profile
instance_id: i-XXXXXX
region: us-east-1
key_data: "{{ ec2_private_key }}"
# Example of getting a password with a password protected key
- name: get the Administrator password
ec2_win_password:
profile: my-boto-profile
instance_id: i-XXXXXX
region: us-east-1
key_file: "~/aws-creds/my_protected_test_key.pem"
key_passphrase: "secret"
# Example of waiting for a password
- name: get the Administrator password
ec2_win_password:
profile: my-boto-profile
instance_id: i-XXXXXX
region: us-east-1
key_file: "~/aws-creds/my_test_key.pem"
wait: yes
wait_timeout: 45
'''
import datetime
import time
from base64 import b64decode
try:
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric.padding import PKCS1v15
from cryptography.hazmat.primitives.serialization import load_pem_private_key
HAS_CRYPTOGRAPHY = True
except ImportError:
HAS_CRYPTOGRAPHY = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import HAS_BOTO, ec2_argument_spec, ec2_connect
from ansible.module_utils._text import to_bytes
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
instance_id=dict(required=True),
key_file=dict(required=False, default=None, type='path'),
key_passphrase=dict(no_log=True, default=None, required=False),
key_data=dict(no_log=True, default=None, required=False),
wait=dict(type='bool', default=False, required=False),
wait_timeout=dict(default=120, required=False, type='int'),
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='Boto required for this module.')
if not HAS_CRYPTOGRAPHY:
module.fail_json(msg='cryptography package required for this module.')
instance_id = module.params.get('instance_id')
key_file = module.params.get('key_file')
key_data = module.params.get('key_data')
if module.params.get('key_passphrase') is None:
b_key_passphrase = None
else:
b_key_passphrase = to_bytes(module.params.get('key_passphrase'), errors='surrogate_or_strict')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
ec2 = ec2_connect(module)
if wait:
start = datetime.datetime.now()
end = start + datetime.timedelta(seconds=wait_timeout)
while datetime.datetime.now() < end:
data = ec2.get_password_data(instance_id)
decoded = b64decode(data)
if not decoded:
time.sleep(5)
else:
break
else:
data = ec2.get_password_data(instance_id)
decoded = b64decode(data)
if wait and datetime.datetime.now() >= end:
module.fail_json(msg="wait for password timeout after %d seconds" % wait_timeout)
if key_file is not None and key_data is None:
try:
with open(key_file, 'rb') as f:
key = load_pem_private_key(f.read(), b_key_passphrase, default_backend())
except IOError as e:
# Handle bad files
module.fail_json(msg="I/O error (%d) opening key file: %s" % (e.errno, e.strerror))
except (ValueError, TypeError) as e:
# Handle issues loading key
module.fail_json(msg="unable to parse key file")
elif key_data is not None and key_file is None:
try:
key = load_pem_private_key(key_data, b_key_passphrase, default_backend())
except (ValueError, TypeError) as e:
module.fail_json(msg="unable to parse key data")
try:
decrypted = key.decrypt(decoded, PKCS1v15())
except ValueError as e:
decrypted = None
if decrypted is None:
module.exit_json(win_password='', changed=False)
else:
if wait:
elapsed = datetime.datetime.now() - start
module.exit_json(win_password=decrypted, changed=True, elapsed=elapsed.seconds)
else:
module.exit_json(win_password=decrypted, changed=True)
if __name__ == '__main__':
main()
| gpl-3.0 |
domino14/Webolith | djAerolith/accounts/admin.py | 1 | 1452 | # Aerolith 2.0: A web-based word game website
# Copyright (C) 2011 Cesar Del Solar
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# To contact the author, please email delsolar at gmail dot com
from django.contrib import admin
from accounts.models import AerolithProfile
from django import forms
class ProfileAdminForm(forms.ModelForm):
class Meta:
model = AerolithProfile
fields = '__all__'
widgets = {
'profile': forms.Textarea(attrs={'cols': 80, 'rows': 10}),
'customWordwallsStyle': forms.Textarea(
attrs={'cols': 80, 'rows': 3})
}
class AerolithProfileAdmin(admin.ModelAdmin):
list_display = ['user', 'member', 'wordwallsSaveListSize']
search_fields = ['user__username', 'member']
form = ProfileAdminForm
admin.site.register(AerolithProfile, AerolithProfileAdmin)
| gpl-3.0 |
wenottingham/ansible | lib/ansible/utils/module_docs_fragments/vyos.py | 2 | 2807 | #
# (c) 2015, Peter Sprygada <psprygada@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = """
options:
host:
description:
- Specifies the DNS host name or address for connecting to the remote
device over the specified transport. The value of host is used as
the destination address for the transport.
required: true
port:
description:
- Specifies the port to use when building the connection to the remote
device.
required: false
default: 22
username:
description:
- Configures the username to use to authenticate the connection to
the remote device. This value is used to authenticate
the SSH session. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_USERNAME) will be used instead.
required: false
password:
description:
- Specifies the password to use to authenticate the connection to
the remote device. This value is used to authenticate
the SSH session. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_PASSWORD) will be used instead.
required: false
default: null
ssh_keyfile:
description:
- Specifies the SSH key to use to authenticate the connection to
the remote device. This value is the path to the
key used to authenticate the SSH session. If the value is not specified
in the task, the value of environment variable C(ANSIBLE_NET_SSH_KEYFILE)
will be used instead.
required: false
timeout:
description:
- Specifies idle timeout for the connection, in seconds. Useful if the console
freezes before continuing. For example when saving configurations.
required: false
default: 10
provider:
description:
- Convenience method that allows all I(vyos) arguments to be passed as
a dict object. All constraints (required, choices, etc) must be
met either by individual arguments or values in this dict.
required: false
default: null
"""
| gpl-3.0 |
mikehulluk/ProcessManager | www/js/brython/Lib/sre_parse.py | 630 | 29657 | #
# Secret Labs' Regular Expression Engine
#
# convert re-style regular expression to sre pattern
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# See the sre.py file for information on usage and redistribution.
#
"""Internal support module for sre"""
# XXX: show string offset and offending character for all errors
import sys
from sre_constants import *
from _sre import MAXREPEAT
SPECIAL_CHARS = ".\\[{()*+?^$|"
REPEAT_CHARS = "*+?{"
DIGITS = set("0123456789")
OCTDIGITS = set("01234567")
HEXDIGITS = set("0123456789abcdefABCDEF")
WHITESPACE = set(" \t\n\r\v\f")
ESCAPES = {
r"\a": (LITERAL, ord("\a")),
r"\b": (LITERAL, ord("\b")),
r"\f": (LITERAL, ord("\f")),
r"\n": (LITERAL, ord("\n")),
r"\r": (LITERAL, ord("\r")),
r"\t": (LITERAL, ord("\t")),
r"\v": (LITERAL, ord("\v")),
r"\\": (LITERAL, ord("\\"))
}
CATEGORIES = {
r"\A": (AT, AT_BEGINNING_STRING), # start of string
r"\b": (AT, AT_BOUNDARY),
r"\B": (AT, AT_NON_BOUNDARY),
r"\d": (IN, [(CATEGORY, CATEGORY_DIGIT)]),
r"\D": (IN, [(CATEGORY, CATEGORY_NOT_DIGIT)]),
r"\s": (IN, [(CATEGORY, CATEGORY_SPACE)]),
r"\S": (IN, [(CATEGORY, CATEGORY_NOT_SPACE)]),
r"\w": (IN, [(CATEGORY, CATEGORY_WORD)]),
r"\W": (IN, [(CATEGORY, CATEGORY_NOT_WORD)]),
r"\Z": (AT, AT_END_STRING), # end of string
}
FLAGS = {
# standard flags
"i": SRE_FLAG_IGNORECASE,
"L": SRE_FLAG_LOCALE,
"m": SRE_FLAG_MULTILINE,
"s": SRE_FLAG_DOTALL,
"x": SRE_FLAG_VERBOSE,
# extensions
"a": SRE_FLAG_ASCII,
"t": SRE_FLAG_TEMPLATE,
"u": SRE_FLAG_UNICODE,
}
class Pattern:
# master pattern object. keeps track of global attributes
def __init__(self):
self.flags = 0
self.open = []
self.groups = 1
self.groupdict = {}
def opengroup(self, name=None):
gid = self.groups
self.groups = gid + 1
if name is not None:
ogid = self.groupdict.get(name, None)
if ogid is not None:
raise error("redefinition of group name %s as group %d; "
"was group %d" % (repr(name), gid, ogid))
self.groupdict[name] = gid
self.open.append(gid)
return gid
def closegroup(self, gid):
self.open.remove(gid)
def checkgroup(self, gid):
return gid < self.groups and gid not in self.open
class SubPattern:
# a subpattern, in intermediate form
def __init__(self, pattern, data=None):
self.pattern = pattern
if data is None:
data = []
self.data = data
self.width = None
def __iter__(self):
return iter(self.data)
def dump(self, level=0):
nl = 1
seqtypes = (tuple, list)
for op, av in self.data:
print(level*" " + op, end=' '); nl = 0
if op == "in":
# member sublanguage
print(); nl = 1
for op, a in av:
print((level+1)*" " + op, a)
elif op == "branch":
print(); nl = 1
i = 0
for a in av[1]:
if i > 0:
print(level*" " + "or")
a.dump(level+1); nl = 1
i = i + 1
elif isinstance(av, seqtypes):
for a in av:
if isinstance(a, SubPattern):
if not nl: print()
a.dump(level+1); nl = 1
else:
print(a, end=' ') ; nl = 0
else:
print(av, end=' ') ; nl = 0
if not nl: print()
def __repr__(self):
return repr(self.data)
def __len__(self):
return len(self.data)
def __delitem__(self, index):
del self.data[index]
def __getitem__(self, index):
if isinstance(index, slice):
return SubPattern(self.pattern, self.data[index])
return self.data[index]
def __setitem__(self, index, code):
self.data[index] = code
def insert(self, index, code):
self.data.insert(index, code)
def append(self, code):
self.data.append(code)
def getwidth(self):
# determine the width (min, max) for this subpattern
if self.width:
return self.width
lo = hi = 0
UNITCODES = (ANY, RANGE, IN, LITERAL, NOT_LITERAL, CATEGORY)
REPEATCODES = (MIN_REPEAT, MAX_REPEAT)
for op, av in self.data:
if op is BRANCH:
i = sys.maxsize
j = 0
for av in av[1]:
l, h = av.getwidth()
i = min(i, l)
j = max(j, h)
lo = lo + i
hi = hi + j
elif op is CALL:
i, j = av.getwidth()
lo = lo + i
hi = hi + j
elif op is SUBPATTERN:
i, j = av[1].getwidth()
lo = lo + i
hi = hi + j
elif op in REPEATCODES:
i, j = av[2].getwidth()
lo = lo + int(i) * av[0]
hi = hi + int(j) * av[1]
elif op in UNITCODES:
lo = lo + 1
hi = hi + 1
elif op == SUCCESS:
break
self.width = int(min(lo, sys.maxsize)), int(min(hi, sys.maxsize))
return self.width
class Tokenizer:
def __init__(self, string):
self.istext = isinstance(string, str)
self.string = string
self.index = 0
self.__next()
def __next(self):
if self.index >= len(self.string):
self.next = None
return
char = self.string[self.index:self.index+1]
# Special case for the str8, since indexing returns a integer
# XXX This is only needed for test_bug_926075 in test_re.py
if char and not self.istext:
char = chr(char[0])
if char == "\\":
try:
c = self.string[self.index + 1]
except IndexError:
raise error("bogus escape (end of line)")
if not self.istext:
c = chr(c)
char = char + c
self.index = self.index + len(char)
self.next = char
def match(self, char, skip=1):
if char == self.next:
if skip:
self.__next()
return 1
return 0
def get(self):
this = self.next
self.__next()
return this
def getwhile(self, n, charset):
result = ''
for _ in range(n):
c = self.next
if c not in charset:
break
result += c
self.__next()
return result
def tell(self):
return self.index, self.next
def seek(self, index):
self.index, self.next = index
def isident(char):
return "a" <= char <= "z" or "A" <= char <= "Z" or char == "_"
def isdigit(char):
return "0" <= char <= "9"
def isname(name):
# check that group name is a valid string
if not isident(name[0]):
return False
for char in name[1:]:
if not isident(char) and not isdigit(char):
return False
return True
def _class_escape(source, escape):
# handle escape code inside character class
code = ESCAPES.get(escape)
if code:
return code
code = CATEGORIES.get(escape)
if code and code[0] == IN:
return code
try:
c = escape[1:2]
if c == "x":
# hexadecimal escape (exactly two digits)
escape += source.getwhile(2, HEXDIGITS)
if len(escape) != 4:
raise ValueError
return LITERAL, int(escape[2:], 16) & 0xff
elif c == "u" and source.istext:
# unicode escape (exactly four digits)
escape += source.getwhile(4, HEXDIGITS)
if len(escape) != 6:
raise ValueError
return LITERAL, int(escape[2:], 16)
elif c == "U" and source.istext:
# unicode escape (exactly eight digits)
escape += source.getwhile(8, HEXDIGITS)
if len(escape) != 10:
raise ValueError
c = int(escape[2:], 16)
chr(c) # raise ValueError for invalid code
return LITERAL, c
elif c in OCTDIGITS:
# octal escape (up to three digits)
escape += source.getwhile(2, OCTDIGITS)
return LITERAL, int(escape[1:], 8) & 0xff
elif c in DIGITS:
raise ValueError
if len(escape) == 2:
return LITERAL, ord(escape[1])
except ValueError:
pass
raise error("bogus escape: %s" % repr(escape))
def _escape(source, escape, state):
# handle escape code in expression
code = CATEGORIES.get(escape)
if code:
return code
code = ESCAPES.get(escape)
if code:
return code
try:
c = escape[1:2]
if c == "x":
# hexadecimal escape
escape += source.getwhile(2, HEXDIGITS)
if len(escape) != 4:
raise ValueError
return LITERAL, int(escape[2:], 16) & 0xff
elif c == "u" and source.istext:
# unicode escape (exactly four digits)
escape += source.getwhile(4, HEXDIGITS)
if len(escape) != 6:
raise ValueError
return LITERAL, int(escape[2:], 16)
elif c == "U" and source.istext:
# unicode escape (exactly eight digits)
escape += source.getwhile(8, HEXDIGITS)
if len(escape) != 10:
raise ValueError
c = int(escape[2:], 16)
chr(c) # raise ValueError for invalid code
return LITERAL, c
elif c == "0":
# octal escape
escape += source.getwhile(2, OCTDIGITS)
return LITERAL, int(escape[1:], 8) & 0xff
elif c in DIGITS:
# octal escape *or* decimal group reference (sigh)
if source.next in DIGITS:
escape = escape + source.get()
if (escape[1] in OCTDIGITS and escape[2] in OCTDIGITS and
source.next in OCTDIGITS):
# got three octal digits; this is an octal escape
escape = escape + source.get()
return LITERAL, int(escape[1:], 8) & 0xff
# not an octal escape, so this is a group reference
group = int(escape[1:])
if group < state.groups:
if not state.checkgroup(group):
raise error("cannot refer to open group")
return GROUPREF, group
raise ValueError
if len(escape) == 2:
return LITERAL, ord(escape[1])
except ValueError:
pass
raise error("bogus escape: %s" % repr(escape))
def _parse_sub(source, state, nested=1):
# parse an alternation: a|b|c
items = []
itemsappend = items.append
sourcematch = source.match
while 1:
itemsappend(_parse(source, state))
if sourcematch("|"):
continue
if not nested:
break
if not source.next or sourcematch(")", 0):
break
else:
raise error("pattern not properly closed")
if len(items) == 1:
return items[0]
subpattern = SubPattern(state)
subpatternappend = subpattern.append
# check if all items share a common prefix
while 1:
prefix = None
for item in items:
if not item:
break
if prefix is None:
prefix = item[0]
elif item[0] != prefix:
break
else:
# all subitems start with a common "prefix".
# move it out of the branch
for item in items:
del item[0]
subpatternappend(prefix)
continue # check next one
break
# check if the branch can be replaced by a character set
for item in items:
if len(item) != 1 or item[0][0] != LITERAL:
break
else:
# we can store this as a character set instead of a
# branch (the compiler may optimize this even more)
set = []
setappend = set.append
for item in items:
setappend(item[0])
subpatternappend((IN, set))
return subpattern
subpattern.append((BRANCH, (None, items)))
return subpattern
def _parse_sub_cond(source, state, condgroup):
item_yes = _parse(source, state)
if source.match("|"):
item_no = _parse(source, state)
if source.match("|"):
raise error("conditional backref with more than two branches")
else:
item_no = None
if source.next and not source.match(")", 0):
raise error("pattern not properly closed")
subpattern = SubPattern(state)
subpattern.append((GROUPREF_EXISTS, (condgroup, item_yes, item_no)))
return subpattern
_PATTERNENDERS = set("|)")
_ASSERTCHARS = set("=!<")
_LOOKBEHINDASSERTCHARS = set("=!")
_REPEATCODES = set([MIN_REPEAT, MAX_REPEAT])
def _parse(source, state):
# parse a simple pattern
subpattern = SubPattern(state)
# precompute constants into local variables
subpatternappend = subpattern.append
sourceget = source.get
sourcematch = source.match
_len = len
PATTERNENDERS = _PATTERNENDERS
ASSERTCHARS = _ASSERTCHARS
LOOKBEHINDASSERTCHARS = _LOOKBEHINDASSERTCHARS
REPEATCODES = _REPEATCODES
while 1:
if source.next in PATTERNENDERS:
break # end of subpattern
this = sourceget()
if this is None:
break # end of pattern
if state.flags & SRE_FLAG_VERBOSE:
# skip whitespace and comments
if this in WHITESPACE:
continue
if this == "#":
while 1:
this = sourceget()
if this in (None, "\n"):
break
continue
if this and this[0] not in SPECIAL_CHARS:
subpatternappend((LITERAL, ord(this)))
elif this == "[":
# character set
set = []
setappend = set.append
## if sourcematch(":"):
## pass # handle character classes
if sourcematch("^"):
setappend((NEGATE, None))
# check remaining characters
start = set[:]
while 1:
this = sourceget()
if this == "]" and set != start:
break
elif this and this[0] == "\\":
code1 = _class_escape(source, this)
elif this:
code1 = LITERAL, ord(this)
else:
raise error("unexpected end of regular expression")
if sourcematch("-"):
# potential range
this = sourceget()
if this == "]":
if code1[0] is IN:
code1 = code1[1][0]
setappend(code1)
setappend((LITERAL, ord("-")))
break
elif this:
if this[0] == "\\":
code2 = _class_escape(source, this)
else:
code2 = LITERAL, ord(this)
if code1[0] != LITERAL or code2[0] != LITERAL:
raise error("bad character range")
lo = code1[1]
hi = code2[1]
if hi < lo:
raise error("bad character range")
setappend((RANGE, (lo, hi)))
else:
raise error("unexpected end of regular expression")
else:
if code1[0] is IN:
code1 = code1[1][0]
setappend(code1)
# XXX: <fl> should move set optimization to compiler!
if _len(set)==1 and set[0][0] is LITERAL:
subpatternappend(set[0]) # optimization
elif _len(set)==2 and set[0][0] is NEGATE and set[1][0] is LITERAL:
subpatternappend((NOT_LITERAL, set[1][1])) # optimization
else:
# XXX: <fl> should add charmap optimization here
subpatternappend((IN, set))
elif this and this[0] in REPEAT_CHARS:
# repeat previous item
if this == "?":
min, max = 0, 1
elif this == "*":
min, max = 0, MAXREPEAT
elif this == "+":
min, max = 1, MAXREPEAT
elif this == "{":
if source.next == "}":
subpatternappend((LITERAL, ord(this)))
continue
here = source.tell()
min, max = 0, MAXREPEAT
lo = hi = ""
while source.next in DIGITS:
lo = lo + source.get()
if sourcematch(","):
while source.next in DIGITS:
hi = hi + sourceget()
else:
hi = lo
if not sourcematch("}"):
subpatternappend((LITERAL, ord(this)))
source.seek(here)
continue
if lo:
min = int(lo)
if min >= MAXREPEAT:
raise OverflowError("the repetition number is too large")
if hi:
max = int(hi)
if max >= MAXREPEAT:
raise OverflowError("the repetition number is too large")
if max < min:
raise error("bad repeat interval")
else:
raise error("not supported")
# figure out which item to repeat
if subpattern:
item = subpattern[-1:]
else:
item = None
if not item or (_len(item) == 1 and item[0][0] == AT):
raise error("nothing to repeat")
if item[0][0] in REPEATCODES:
raise error("multiple repeat")
if sourcematch("?"):
subpattern[-1] = (MIN_REPEAT, (min, max, item))
else:
subpattern[-1] = (MAX_REPEAT, (min, max, item))
elif this == ".":
subpatternappend((ANY, None))
elif this == "(":
group = 1
name = None
condgroup = None
if sourcematch("?"):
group = 0
# options
if sourcematch("P"):
# python extensions
if sourcematch("<"):
# named group: skip forward to end of name
name = ""
while 1:
char = sourceget()
if char is None:
raise error("unterminated name")
if char == ">":
break
name = name + char
group = 1
if not name:
raise error("missing group name")
if not isname(name):
raise error("bad character in group name")
elif sourcematch("="):
# named backreference
name = ""
while 1:
char = sourceget()
if char is None:
raise error("unterminated name")
if char == ")":
break
name = name + char
if not name:
raise error("missing group name")
if not isname(name):
raise error("bad character in group name")
gid = state.groupdict.get(name)
if gid is None:
raise error("unknown group name")
subpatternappend((GROUPREF, gid))
continue
else:
char = sourceget()
if char is None:
raise error("unexpected end of pattern")
raise error("unknown specifier: ?P%s" % char)
elif sourcematch(":"):
# non-capturing group
group = 2
elif sourcematch("#"):
# comment
while 1:
if source.next is None or source.next == ")":
break
sourceget()
if not sourcematch(")"):
raise error("unbalanced parenthesis")
continue
elif source.next in ASSERTCHARS:
# lookahead assertions
char = sourceget()
dir = 1
if char == "<":
if source.next not in LOOKBEHINDASSERTCHARS:
raise error("syntax error")
dir = -1 # lookbehind
char = sourceget()
p = _parse_sub(source, state)
if not sourcematch(")"):
raise error("unbalanced parenthesis")
if char == "=":
subpatternappend((ASSERT, (dir, p)))
else:
subpatternappend((ASSERT_NOT, (dir, p)))
continue
elif sourcematch("("):
# conditional backreference group
condname = ""
while 1:
char = sourceget()
if char is None:
raise error("unterminated name")
if char == ")":
break
condname = condname + char
group = 2
if not condname:
raise error("missing group name")
if isname(condname):
condgroup = state.groupdict.get(condname)
if condgroup is None:
raise error("unknown group name")
else:
try:
condgroup = int(condname)
except ValueError:
raise error("bad character in group name")
else:
# flags
if not source.next in FLAGS:
raise error("unexpected end of pattern")
while source.next in FLAGS:
state.flags = state.flags | FLAGS[sourceget()]
if group:
# parse group contents
if group == 2:
# anonymous group
group = None
else:
group = state.opengroup(name)
if condgroup:
p = _parse_sub_cond(source, state, condgroup)
else:
p = _parse_sub(source, state)
if not sourcematch(")"):
raise error("unbalanced parenthesis")
if group is not None:
state.closegroup(group)
subpatternappend((SUBPATTERN, (group, p)))
else:
while 1:
char = sourceget()
if char is None:
raise error("unexpected end of pattern")
if char == ")":
break
raise error("unknown extension")
elif this == "^":
subpatternappend((AT, AT_BEGINNING))
elif this == "$":
subpattern.append((AT, AT_END))
elif this and this[0] == "\\":
code = _escape(source, this, state)
subpatternappend(code)
else:
raise error("parser error")
return subpattern
def fix_flags(src, flags):
# Check and fix flags according to the type of pattern (str or bytes)
if isinstance(src, str):
if not flags & SRE_FLAG_ASCII:
flags |= SRE_FLAG_UNICODE
elif flags & SRE_FLAG_UNICODE:
raise ValueError("ASCII and UNICODE flags are incompatible")
else:
if flags & SRE_FLAG_UNICODE:
raise ValueError("can't use UNICODE flag with a bytes pattern")
return flags
def parse(str, flags=0, pattern=None):
# parse 're' pattern into list of (opcode, argument) tuples
source = Tokenizer(str)
if pattern is None:
pattern = Pattern()
pattern.flags = flags
pattern.str = str
p = _parse_sub(source, pattern, 0)
p.pattern.flags = fix_flags(str, p.pattern.flags)
tail = source.get()
if tail == ")":
raise error("unbalanced parenthesis")
elif tail:
raise error("bogus characters at end of regular expression")
if flags & SRE_FLAG_DEBUG:
p.dump()
if not (flags & SRE_FLAG_VERBOSE) and p.pattern.flags & SRE_FLAG_VERBOSE:
# the VERBOSE flag was switched on inside the pattern. to be
# on the safe side, we'll parse the whole thing again...
return parse(str, p.pattern.flags)
return p
def parse_template(source, pattern):
# parse 're' replacement string into list of literals and
# group references
s = Tokenizer(source)
sget = s.get
p = []
a = p.append
def literal(literal, p=p, pappend=a):
if p and p[-1][0] is LITERAL:
p[-1] = LITERAL, p[-1][1] + literal
else:
pappend((LITERAL, literal))
sep = source[:0]
if isinstance(sep, str):
makechar = chr
else:
makechar = chr
while 1:
this = sget()
if this is None:
break # end of replacement string
if this and this[0] == "\\":
# group
c = this[1:2]
if c == "g":
name = ""
if s.match("<"):
while 1:
char = sget()
if char is None:
raise error("unterminated group name")
if char == ">":
break
name = name + char
if not name:
raise error("missing group name")
try:
index = int(name)
if index < 0:
raise error("negative group number")
except ValueError:
if not isname(name):
raise error("bad character in group name")
try:
index = pattern.groupindex[name]
except KeyError:
raise IndexError("unknown group name")
a((MARK, index))
elif c == "0":
if s.next in OCTDIGITS:
this = this + sget()
if s.next in OCTDIGITS:
this = this + sget()
literal(makechar(int(this[1:], 8) & 0xff))
elif c in DIGITS:
isoctal = False
if s.next in DIGITS:
this = this + sget()
if (c in OCTDIGITS and this[2] in OCTDIGITS and
s.next in OCTDIGITS):
this = this + sget()
isoctal = True
literal(makechar(int(this[1:], 8) & 0xff))
if not isoctal:
a((MARK, int(this[1:])))
else:
try:
this = makechar(ESCAPES[this][1])
except KeyError:
pass
literal(this)
else:
literal(this)
# convert template to groups and literals lists
i = 0
groups = []
groupsappend = groups.append
literals = [None] * len(p)
if isinstance(source, str):
encode = lambda x: x
else:
# The tokenizer implicitly decodes bytes objects as latin-1, we must
# therefore re-encode the final representation.
encode = lambda x: x.encode('latin-1')
for c, s in p:
if c is MARK:
groupsappend((i, s))
# literal[i] is already None
else:
literals[i] = encode(s)
i = i + 1
return groups, literals
def expand_template(template, match):
g = match.group
sep = match.string[:0]
groups, literals = template
literals = literals[:]
try:
for index, group in groups:
literals[index] = s = g(group)
if s is None:
raise error("unmatched group")
except IndexError:
raise error("invalid group reference")
return sep.join(literals)
| bsd-2-clause |
Just-D/chromium-1 | tools/memory_inspector/classification_rules/default/mmap-android.py | 54 | 1894 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This is a generic rule-tree for classifying memory maps on Android. It is a
# simple hierarchical python data structure (list of dictionaries). Some rules:
# - Order matters: what is caught by a node is not caught by its siblings.
# - Hierarchy matters: what is caught by a node is propagated to its children
# (if any). Only one of its children, though, will get the data.
# - Non leaf nodes have an extra implicit node called {node-name}-other: if
# something is caught by a non leaf node, but none of its children, it is
# appended to implicit {node-name}-other catch-all children.
#
# See memory_inspector/classification/mmap_classifier.py for more docs.
[
{
'name': 'Anon',
'mmap_file': r'(^$)|(^\[)',
'children': [
{
'name': 'stack',
'mmap_file': r'\[stack',
},
{
'name': 'libc malloc',
'mmap_file': 'libc_malloc',
},
{
'name': 'JIT',
'mmap_prot': 'r.x',
},
],
},
{
'name': 'Ashmem',
'mmap_file': r'^/dev/ashmem',
'children': [
{
'name': 'Dalvik',
'mmap_file': r'^/dev/ashmem/dalvik',
'children': [
{
'name': 'Java Heap',
'mmap_file': r'dalvik-heap',
},
{
'name': 'JIT',
'mmap_prot': 'r.x',
},
],
},
],
},
{
'name': 'Libs',
'mmap_file': r'(\.so)|(\.apk)|(\.jar)',
'children': [
{
'name': 'Native',
'mmap_file': r'\.so',
},
{
'name': 'APKs',
'mmap_file': r'\.apk',
},
{
'name': 'JARs',
'mmap_file': r'\.jar',
},
],
},
{
'name': 'Devices',
'mmap_file': r'^/dev/',
'children': [
{
'name': 'GPU',
'mmap_file': r'(nv)|(mali)|(kgsl)',
},
],
},
]
| bsd-3-clause |
edhuckle/statsmodels | statsmodels/datasets/cpunish/data.py | 25 | 2597 | """US Capital Punishment dataset."""
__docformat__ = 'restructuredtext'
COPYRIGHT = """Used with express permission from the original author,
who retains all rights."""
TITLE = __doc__
SOURCE = """
Jeff Gill's `Generalized Linear Models: A Unified Approach`
http://jgill.wustl.edu/research/books.html
"""
DESCRSHORT = """Number of state executions in 1997"""
DESCRLONG = """This data describes the number of times capital punishment is implemented
at the state level for the year 1997. The outcome variable is the number of
executions. There were executions in 17 states.
Included in the data are explanatory variables for median per capita income
in dollars, the percent of the population classified as living in poverty,
the percent of Black citizens in the population, the rate of violent
crimes per 100,000 residents for 1996, a dummy variable indicating
whether the state is in the South, and (an estimate of) the proportion
of the population with a college degree of some kind.
"""
NOTE = """::
Number of Observations - 17
Number of Variables - 7
Variable name definitions::
EXECUTIONS - Executions in 1996
INCOME - Median per capita income in 1996 dollars
PERPOVERTY - Percent of the population classified as living in poverty
PERBLACK - Percent of black citizens in the population
VC100k96 - Rate of violent crimes per 100,00 residents for 1996
SOUTH - SOUTH == 1 indicates a state in the South
DEGREE - An esimate of the proportion of the state population with a
college degree of some kind
State names are included in the data file, though not returned by load.
"""
from numpy import recfromtxt, column_stack, array
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""
Load the cpunish data and return a Dataset class.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray(data, endog_idx=0, dtype=float)
def load_pandas():
"""
Load the cpunish data and return a Dataset class.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray_pandas(data, endog_idx=0, dtype=float)
def _get_data():
filepath = dirname(abspath(__file__))
data = recfromtxt(open(filepath + '/cpunish.csv', 'rb'), delimiter=",",
names=True, dtype=float, usecols=(1,2,3,4,5,6,7))
return data
| bsd-3-clause |
bgris/ODL_bgris | lib/python3.5/site-packages/Sphinx-1.5.1-py3.5.egg/sphinx/pycode/pgen2/pgen.py | 4 | 14056 | # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
from __future__ import print_function
from six import iteritems
from collections import OrderedDict
# Pgen imports
from sphinx.pycode.pgen2 import grammar, token, tokenize
class PgenGrammar(grammar.Grammar):
pass
class ParserGenerator(object):
def __init__(self, filename, stream=None):
close_stream = None
if stream is None:
stream = open(filename)
close_stream = stream.close
self.filename = filename
self.stream = stream
self.generator = tokenize.generate_tokens(stream.readline)
self.gettoken() # Initialize lookahead
self.dfas, self.startsymbol = self.parse()
if close_stream is not None:
close_stream()
self.first = {} # map from symbol name to set of tokens
self.addfirstsets()
def make_grammar(self):
c = PgenGrammar()
names = list(self.dfas.keys())
names.sort()
names.remove(self.startsymbol)
names.insert(0, self.startsymbol)
for name in names:
i = 256 + len(c.symbol2number)
c.symbol2number[name] = i
c.number2symbol[i] = name
for name in names:
dfa = self.dfas[name]
states = []
for state in dfa:
arcs = []
for label, next in iteritems(state.arcs):
arcs.append((self.make_label(c, label), dfa.index(next)))
if state.isfinal:
arcs.append((0, dfa.index(state)))
states.append(arcs)
c.states.append(states)
c.dfas[c.symbol2number[name]] = (states, self.make_first(c, name))
c.start = c.symbol2number[self.startsymbol]
return c
def make_first(self, c, name):
rawfirst = self.first[name]
first = {}
for label in sorted(rawfirst):
ilabel = self.make_label(c, label)
##assert ilabel not in first # X X X failed on <> ... !=
first[ilabel] = 1
return first
def make_label(self, c, label):
# X X X Maybe this should be a method on a subclass of converter?
ilabel = len(c.labels)
if label[0].isalpha():
# Either a symbol name or a named token
if label in c.symbol2number:
# A symbol name (a non-terminal)
if label in c.symbol2label:
return c.symbol2label[label]
else:
c.labels.append((c.symbol2number[label], None))
c.symbol2label[label] = ilabel
return ilabel
else:
# A named token (NAME, NUMBER, STRING)
itoken = getattr(token, label, None)
assert isinstance(itoken, int), label
assert itoken in token.tok_name, label
if itoken in c.tokens:
return c.tokens[itoken]
else:
c.labels.append((itoken, None))
c.tokens[itoken] = ilabel
return ilabel
else:
# Either a keyword or an operator
assert label[0] in ('"', "'"), label
value = eval(label)
if value[0].isalpha():
# A keyword
if value in c.keywords:
return c.keywords[value]
else:
c.labels.append((token.NAME, value))
c.keywords[value] = ilabel
return ilabel
else:
# An operator (any non-numeric token)
itoken = grammar.opmap[value] # Fails if unknown token
if itoken in c.tokens:
return c.tokens[itoken]
else:
c.labels.append((itoken, None))
c.tokens[itoken] = ilabel
return ilabel
def addfirstsets(self):
names = list(self.dfas.keys())
names.sort()
for name in names:
if name not in self.first:
self.calcfirst(name)
#print name, self.first[name].keys()
def calcfirst(self, name):
dfa = self.dfas[name]
self.first[name] = None # dummy to detect left recursion
state = dfa[0]
totalset = {}
overlapcheck = {}
for label, next in iteritems(state.arcs):
if label in self.dfas:
if label in self.first:
fset = self.first[label]
if fset is None:
raise ValueError("recursion for rule %r" % name)
else:
self.calcfirst(label)
fset = self.first[label]
totalset.update(fset)
overlapcheck[label] = fset
else:
totalset[label] = 1
overlapcheck[label] = {label: 1}
inverse = {}
for label, itsfirst in sorted(overlapcheck.items()):
for symbol in sorted(itsfirst):
if symbol in inverse:
raise ValueError("rule %s is ambiguous; %s is in the"
" first sets of %s as well as %s" %
(name, symbol, label, inverse[symbol]))
inverse[symbol] = label
self.first[name] = totalset
def parse(self):
dfas = {}
startsymbol = None
# MSTART: (NEWLINE | RULE)* ENDMARKER
while self.type != token.ENDMARKER:
while self.type == token.NEWLINE:
self.gettoken()
# RULE: NAME ':' RHS NEWLINE
name = self.expect(token.NAME)
self.expect(token.OP, ":")
a, z = self.parse_rhs()
self.expect(token.NEWLINE)
#self.dump_nfa(name, a, z)
dfa = self.make_dfa(a, z)
#self.dump_dfa(name, dfa)
#oldlen = len(dfa)
self.simplify_dfa(dfa)
#newlen = len(dfa)
dfas[name] = dfa
#print name, oldlen, newlen
if startsymbol is None:
startsymbol = name
return dfas, startsymbol
def make_dfa(self, start, finish):
# To turn an NFA into a DFA, we define the states of the DFA
# to correspond to *sets* of states of the NFA. Then do some
# state reduction. Let's represent sets as dicts with 1 for
# values.
assert isinstance(start, NFAState)
assert isinstance(finish, NFAState)
def closure(state):
base = {}
addclosure(state, base)
return base
def addclosure(state, base):
assert isinstance(state, NFAState)
if state in base:
return
base[state] = 1
for label, next in state.arcs:
if label is None:
addclosure(next, base)
states = [DFAState(closure(start), finish)]
for state in states: # NB states grows while we're iterating
arcs = {}
for nfastate in state.nfaset:
for label, next in nfastate.arcs:
if label is not None:
addclosure(next, arcs.setdefault(label, {}))
for label, nfaset in iteritems(arcs):
for st in states:
if st.nfaset == nfaset:
break
else:
st = DFAState(nfaset, finish)
states.append(st)
state.addarc(st, label)
return states # List of DFAState instances; first one is start
def dump_nfa(self, name, start, finish):
print("Dump of NFA for", name)
todo = [start]
for i, state in enumerate(todo):
print(" State", i, state is finish and "(final)" or "")
for label, next in state.arcs:
if next in todo:
j = todo.index(next)
else:
j = len(todo)
todo.append(next)
if label is None:
print(" -> %d" % j)
else:
print(" %s -> %d" % (label, j))
def dump_dfa(self, name, dfa):
print("Dump of DFA for", name)
for i, state in enumerate(dfa):
print(" State", i, state.isfinal and "(final)" or "")
for label, next in iteritems(state.arcs):
print(" %s -> %d" % (label, dfa.index(next)))
def simplify_dfa(self, dfa):
# This is not theoretically optimal, but works well enough.
# Algorithm: repeatedly look for two states that have the same
# set of arcs (same labels pointing to the same nodes) and
# unify them, until things stop changing.
# dfa is a list of DFAState instances
changes = True
while changes:
changes = False
for i, state_i in enumerate(dfa):
for j in range(i+1, len(dfa)):
state_j = dfa[j]
if state_i == state_j:
#print " unify", i, j
del dfa[j]
for state in dfa:
state.unifystate(state_j, state_i)
changes = True
break
def parse_rhs(self):
# RHS: ALT ('|' ALT)*
a, z = self.parse_alt()
if self.value != "|":
return a, z
else:
aa = NFAState()
zz = NFAState()
aa.addarc(a)
z.addarc(zz)
while self.value == "|":
self.gettoken()
a, z = self.parse_alt()
aa.addarc(a)
z.addarc(zz)
return aa, zz
def parse_alt(self):
# ALT: ITEM+
a, b = self.parse_item()
while (self.value in ("(", "[") or
self.type in (token.NAME, token.STRING)):
c, d = self.parse_item()
b.addarc(c)
b = d
return a, b
def parse_item(self):
# ITEM: '[' RHS ']' | ATOM ['+' | '*']
if self.value == "[":
self.gettoken()
a, z = self.parse_rhs()
self.expect(token.OP, "]")
a.addarc(z)
return a, z
else:
a, z = self.parse_atom()
value = self.value
if value not in ("+", "*"):
return a, z
self.gettoken()
z.addarc(a)
if value == "+":
return a, z
else:
return a, a
def parse_atom(self):
# ATOM: '(' RHS ')' | NAME | STRING
if self.value == "(":
self.gettoken()
a, z = self.parse_rhs()
self.expect(token.OP, ")")
return a, z
elif self.type in (token.NAME, token.STRING):
a = NFAState()
z = NFAState()
a.addarc(z, self.value)
self.gettoken()
return a, z
else:
self.raise_error("expected (...) or NAME or STRING, got %s/%s",
self.type, self.value)
def expect(self, type, value=None):
if self.type != type or (value is not None and self.value != value):
self.raise_error("expected %s/%s, got %s/%s",
type, value, self.type, self.value)
value = self.value
self.gettoken()
return value
def gettoken(self):
tup = next(self.generator)
while tup[0] in (tokenize.COMMENT, tokenize.NL):
tup = next(self.generator)
self.type, self.value, self.begin, self.end, self.line = tup
#print token.tok_name[self.type], repr(self.value)
def raise_error(self, msg, *args):
if args:
try:
msg = msg % args
except:
msg = " ".join([msg] + [str(x) for x in args])
raise SyntaxError(msg, (self.filename, self.end[0],
self.end[1], self.line))
class NFAState(object):
def __init__(self):
self.arcs = [] # list of (label, NFAState) pairs
def addarc(self, next, label=None):
assert label is None or isinstance(label, str)
assert isinstance(next, NFAState)
self.arcs.append((label, next))
def __hash__(self):
return hash(tuple(x[0] for x in self.arcs))
class DFAState(object):
def __init__(self, nfaset, final):
assert isinstance(nfaset, dict)
assert isinstance(next(iter(nfaset)), NFAState)
assert isinstance(final, NFAState)
self.nfaset = nfaset
self.isfinal = final in nfaset
self.arcs = OrderedDict() # map from label to DFAState
def __hash__(self):
return hash(tuple(self.arcs))
def addarc(self, next, label):
assert isinstance(label, str)
assert label not in self.arcs
assert isinstance(next, DFAState)
self.arcs[label] = next
def unifystate(self, old, new):
for label, next in iteritems(self.arcs):
if next is old:
self.arcs[label] = new
def __eq__(self, other):
# Equality test -- ignore the nfaset instance variable
assert isinstance(other, DFAState)
if self.isfinal != other.isfinal:
return False
# Can't just return self.arcs == other.arcs, because that
# would invoke this method recursively, with cycles...
if len(self.arcs) != len(other.arcs):
return False
for label, next in iteritems(self.arcs):
if next is not other.arcs.get(label):
return False
return True
def generate_grammar(filename="Grammar.txt"):
p = ParserGenerator(filename)
return p.make_grammar()
| gpl-3.0 |
uannight/reposan | plugin.video.tvalacarta/lib/youtube_dl/utils.py | 1 | 116707 | #!/usr/bin/env python
# coding: utf-8
from __future__ import unicode_literals
import base64
import binascii
import calendar
import codecs
import contextlib
import ctypes
import datetime
import email.utils
import errno
import functools
import gzip
import io
import itertools
import json
import locale
import math
import operator
import os
import pipes
import platform
import random
import re
import socket
import ssl
import subprocess
import sys
import tempfile
import traceback
import xml.etree.ElementTree
import zlib
from .compat import (
compat_HTMLParser,
compat_basestring,
compat_chr,
compat_etree_fromstring,
compat_expanduser,
compat_html_entities,
compat_html_entities_html5,
compat_http_client,
compat_kwargs,
compat_os_name,
compat_parse_qs,
compat_shlex_quote,
compat_socket_create_connection,
compat_str,
compat_struct_pack,
compat_struct_unpack,
compat_urllib_error,
compat_urllib_parse,
compat_urllib_parse_urlencode,
compat_urllib_parse_urlparse,
compat_urllib_parse_unquote_plus,
compat_urllib_request,
compat_urlparse,
compat_xpath,
)
from .socks import (
ProxyType,
sockssocket,
)
def register_socks_protocols():
# "Register" SOCKS protocols
# In Python < 2.6.5, urlsplit() suffers from bug https://bugs.python.org/issue7904
# URLs with protocols not in urlparse.uses_netloc are not handled correctly
for scheme in ('socks', 'socks4', 'socks4a', 'socks5'):
if scheme not in compat_urlparse.uses_netloc:
compat_urlparse.uses_netloc.append(scheme)
# This is not clearly defined otherwise
compiled_regex_type = type(re.compile(''))
std_headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20150101 Firefox/47.0 (Chrome)',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-us,en;q=0.5',
}
USER_AGENTS = {
'Safari': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
}
NO_DEFAULT = object()
ENGLISH_MONTH_NAMES = [
'January', 'February', 'March', 'April', 'May', 'June',
'July', 'August', 'September', 'October', 'November', 'December']
MONTH_NAMES = {
'en': ENGLISH_MONTH_NAMES,
'fr': [
'janvier', 'février', 'mars', 'avril', 'mai', 'juin',
'juillet', 'août', 'septembre', 'octobre', 'novembre', 'décembre'],
}
KNOWN_EXTENSIONS = (
'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac',
'flv', 'f4v', 'f4a', 'f4b',
'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus',
'mkv', 'mka', 'mk3d',
'avi', 'divx',
'mov',
'asf', 'wmv', 'wma',
'3gp', '3g2',
'mp3',
'flac',
'ape',
'wav',
'f4f', 'f4m', 'm3u8', 'smil')
# needed for sanitizing filenames in restricted mode
ACCENT_CHARS = dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ',
itertools.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUYP', ['ss'],
'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuypy')))
DATE_FORMATS = (
'%d %B %Y',
'%d %b %Y',
'%B %d %Y',
'%B %dst %Y',
'%B %dnd %Y',
'%B %dth %Y',
'%b %d %Y',
'%b %dst %Y',
'%b %dnd %Y',
'%b %dth %Y',
'%b %dst %Y %I:%M',
'%b %dnd %Y %I:%M',
'%b %dth %Y %I:%M',
'%Y %m %d',
'%Y-%m-%d',
'%Y/%m/%d',
'%Y/%m/%d %H:%M',
'%Y/%m/%d %H:%M:%S',
'%Y-%m-%d %H:%M',
'%Y-%m-%d %H:%M:%S',
'%Y-%m-%d %H:%M:%S.%f',
'%d.%m.%Y %H:%M',
'%d.%m.%Y %H.%M',
'%Y-%m-%dT%H:%M:%SZ',
'%Y-%m-%dT%H:%M:%S.%fZ',
'%Y-%m-%dT%H:%M:%S.%f0Z',
'%Y-%m-%dT%H:%M:%S',
'%Y-%m-%dT%H:%M:%S.%f',
'%Y-%m-%dT%H:%M',
'%b %d %Y at %H:%M',
'%b %d %Y at %H:%M:%S',
)
DATE_FORMATS_DAY_FIRST = list(DATE_FORMATS)
DATE_FORMATS_DAY_FIRST.extend([
'%d-%m-%Y',
'%d.%m.%Y',
'%d.%m.%y',
'%d/%m/%Y',
'%d/%m/%y',
'%d/%m/%Y %H:%M:%S',
])
DATE_FORMATS_MONTH_FIRST = list(DATE_FORMATS)
DATE_FORMATS_MONTH_FIRST.extend([
'%m-%d-%Y',
'%m.%d.%Y',
'%m/%d/%Y',
'%m/%d/%y',
'%m/%d/%Y %H:%M:%S',
])
PACKED_CODES_RE = r"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)"
def preferredencoding():
"""Get preferred encoding.
Returns the best encoding scheme for the system, based on
locale.getpreferredencoding() and some further tweaks.
"""
try:
pref = locale.getpreferredencoding()
'TEST'.encode(pref)
except Exception:
pref = 'UTF-8'
return pref
def write_json_file(obj, fn):
""" Encode obj as JSON and write it to fn, atomically if possible """
fn = encodeFilename(fn)
if sys.version_info < (3, 0) and sys.platform != 'win32':
encoding = get_filesystem_encoding()
# os.path.basename returns a bytes object, but NamedTemporaryFile
# will fail if the filename contains non ascii characters unless we
# use a unicode object
path_basename = lambda f: os.path.basename(fn).decode(encoding)
# the same for os.path.dirname
path_dirname = lambda f: os.path.dirname(fn).decode(encoding)
else:
path_basename = os.path.basename
path_dirname = os.path.dirname
args = {
'suffix': '.tmp',
'prefix': path_basename(fn) + '.',
'dir': path_dirname(fn),
'delete': False,
}
# In Python 2.x, json.dump expects a bytestream.
# In Python 3.x, it writes to a character stream
if sys.version_info < (3, 0):
args['mode'] = 'wb'
else:
args.update({
'mode': 'w',
'encoding': 'utf-8',
})
tf = tempfile.NamedTemporaryFile(**compat_kwargs(args))
try:
with tf:
json.dump(obj, tf)
if sys.platform == 'win32':
# Need to remove existing file on Windows, else os.rename raises
# WindowsError or FileExistsError.
try:
os.unlink(fn)
except OSError:
pass
os.rename(tf.name, fn)
except Exception:
try:
os.remove(tf.name)
except OSError:
pass
raise
if sys.version_info >= (2, 7):
def find_xpath_attr(node, xpath, key, val=None):
""" Find the xpath xpath[@key=val] """
assert re.match(r'^[a-zA-Z_-]+$', key)
expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val))
return node.find(expr)
else:
def find_xpath_attr(node, xpath, key, val=None):
for f in node.findall(compat_xpath(xpath)):
if key not in f.attrib:
continue
if val is None or f.attrib.get(key) == val:
return f
return None
# On python2.6 the xml.etree.ElementTree.Element methods don't support
# the namespace parameter
def xpath_with_ns(path, ns_map):
components = [c.split(':') for c in path.split('/')]
replaced = []
for c in components:
if len(c) == 1:
replaced.append(c[0])
else:
ns, tag = c
replaced.append('{%s}%s' % (ns_map[ns], tag))
return '/'.join(replaced)
def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
def _find_xpath(xpath):
return node.find(compat_xpath(xpath))
if isinstance(xpath, (str, compat_str)):
n = _find_xpath(xpath)
else:
for xp in xpath:
n = _find_xpath(xp)
if n is not None:
break
if n is None:
if default is not NO_DEFAULT:
return default
elif fatal:
name = xpath if name is None else name
raise ExtractorError('Could not find XML element %s' % name)
else:
return None
return n
def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
n = xpath_element(node, xpath, name, fatal=fatal, default=default)
if n is None or n == default:
return n
if n.text is None:
if default is not NO_DEFAULT:
return default
elif fatal:
name = xpath if name is None else name
raise ExtractorError('Could not find XML element\'s text %s' % name)
else:
return None
return n.text
def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT):
n = find_xpath_attr(node, xpath, key)
if n is None:
if default is not NO_DEFAULT:
return default
elif fatal:
name = '%s[@%s]' % (xpath, key) if name is None else name
raise ExtractorError('Could not find XML attribute %s' % name)
else:
return None
return n.attrib[key]
def get_element_by_id(id, html):
"""Return the content of the tag with the specified ID in the passed HTML document"""
return get_element_by_attribute('id', id, html)
def get_element_by_class(class_name, html):
"""Return the content of the first tag with the specified class in the passed HTML document"""
retval = get_elements_by_class(class_name, html)
return retval[0] if retval else None
def get_element_by_attribute(attribute, value, html, escape_value=True):
retval = get_elements_by_attribute(attribute, value, html, escape_value)
return retval[0] if retval else None
def get_elements_by_class(class_name, html):
"""Return the content of all tags with the specified class in the passed HTML document as a list"""
return get_elements_by_attribute(
'class', r'[^\'"]*\b%s\b[^\'"]*' % re.escape(class_name),
html, escape_value=False)
def get_elements_by_attribute(attribute, value, html, escape_value=True):
"""Return the content of the tag with the specified attribute in the passed HTML document"""
value = re.escape(value) if escape_value else value
retlist = []
for m in re.finditer(r'''(?xs)
<([a-zA-Z0-9:._-]+)
(?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'))*?
\s+%s=['"]?%s['"]?
(?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'))*?
\s*>
(?P<content>.*?)
</\1>
''' % (re.escape(attribute), value), html):
res = m.group('content')
if res.startswith('"') or res.startswith("'"):
res = res[1:-1]
retlist.append(unescapeHTML(res))
return retlist
class HTMLAttributeParser(compat_HTMLParser):
"""Trivial HTML parser to gather the attributes for a single element"""
def __init__(self):
self.attrs = {}
compat_HTMLParser.__init__(self)
def handle_starttag(self, tag, attrs):
self.attrs = dict(attrs)
def extract_attributes(html_element):
"""Given a string for an HTML element such as
<el
a="foo" B="bar" c="&98;az" d=boz
empty= noval entity="&"
sq='"' dq="'"
>
Decode and return a dictionary of attributes.
{
'a': 'foo', 'b': 'bar', c: 'baz', d: 'boz',
'empty': '', 'noval': None, 'entity': '&',
'sq': '"', 'dq': '\''
}.
NB HTMLParser is stricter in Python 2.6 & 3.2 than in later versions,
but the cases in the unit test will work for all of 2.6, 2.7, 3.2-3.5.
"""
parser = HTMLAttributeParser()
parser.feed(html_element)
parser.close()
return parser.attrs
def clean_html(html):
"""Clean an HTML snippet into a readable string"""
if html is None: # Convenience for sanitizing descriptions etc.
return html
# Newline vs <br />
html = html.replace('\n', ' ')
html = re.sub(r'\s*<\s*br\s*/?\s*>\s*', '\n', html)
html = re.sub(r'<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html)
# Strip html tags
html = re.sub('<.*?>', '', html)
# Replace html entities
html = unescapeHTML(html)
return html.strip()
def sanitize_open(filename, open_mode):
"""Try to open the given filename, and slightly tweak it if this fails.
Attempts to open the given filename. If this fails, it tries to change
the filename slightly, step by step, until it's either able to open it
or it fails and raises a final exception, like the standard open()
function.
It returns the tuple (stream, definitive_file_name).
"""
try:
if filename == '-':
if sys.platform == 'win32':
import msvcrt
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename)
stream = open(encodeFilename(filename), open_mode)
return (stream, filename)
except (IOError, OSError) as err:
if err.errno in (errno.EACCES,):
raise
# In case of error, try to remove win32 forbidden chars
alt_filename = sanitize_path(filename)
if alt_filename == filename:
raise
else:
# An exception here should be caught in the caller
stream = open(encodeFilename(alt_filename), open_mode)
return (stream, alt_filename)
def timeconvert(timestr):
"""Convert RFC 2822 defined time string into system timestamp"""
timestamp = None
timetuple = email.utils.parsedate_tz(timestr)
if timetuple is not None:
timestamp = email.utils.mktime_tz(timetuple)
return timestamp
def sanitize_filename(s, restricted=False, is_id=False):
"""Sanitizes a string so it could be used as part of a filename.
If restricted is set, use a stricter subset of allowed characters.
Set is_id if this is not an arbitrary string, but an ID that should be kept
if possible.
"""
def replace_insane(char):
if restricted and char in ACCENT_CHARS:
return ACCENT_CHARS[char]
if char == '?' or ord(char) < 32 or ord(char) == 127:
return ''
elif char == '"':
return '' if restricted else '\''
elif char == ':':
return '_-' if restricted else ' -'
elif char in '\\/|*<>':
return '_'
if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()):
return '_'
if restricted and ord(char) > 127:
return '_'
return char
# Handle timestamps
s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s)
result = ''.join(map(replace_insane, s))
if not is_id:
while '__' in result:
result = result.replace('__', '_')
result = result.strip('_')
# Common case of "Foreign band name - English song title"
if restricted and result.startswith('-_'):
result = result[2:]
if result.startswith('-'):
result = '_' + result[len('-'):]
result = result.lstrip('.')
if not result:
result = '_'
return result
def sanitize_path(s):
"""Sanitizes and normalizes path on Windows"""
if sys.platform != 'win32':
return s
drive_or_unc, _ = os.path.splitdrive(s)
if sys.version_info < (2, 7) and not drive_or_unc:
drive_or_unc, _ = os.path.splitunc(s)
norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep)
if drive_or_unc:
norm_path.pop(0)
sanitized_path = [
path_part if path_part in ['.', '..'] else re.sub(r'(?:[/<>:"\|\\?\*]|[\s.]$)', '#', path_part)
for path_part in norm_path]
if drive_or_unc:
sanitized_path.insert(0, drive_or_unc + os.path.sep)
return os.path.join(*sanitized_path)
# Prepend protocol-less URLs with `http:` scheme in order to mitigate the number of
# unwanted failures due to missing protocol
def sanitize_url(url):
return 'http:%s' % url if url.startswith('//') else url
def sanitized_Request(url, *args, **kwargs):
return compat_urllib_request.Request(sanitize_url(url), *args, **kwargs)
def expand_path(s):
"""Expand shell variables and ~"""
return os.path.expandvars(compat_expanduser(s))
def orderedSet(iterable):
""" Remove all duplicates from the input iterable """
res = []
for el in iterable:
if el not in res:
res.append(el)
return res
def _htmlentity_transform(entity_with_semicolon):
"""Transforms an HTML entity to a character."""
entity = entity_with_semicolon[:-1]
# Known non-numeric HTML entity
if entity in compat_html_entities.name2codepoint:
return compat_chr(compat_html_entities.name2codepoint[entity])
# TODO: HTML5 allows entities without a semicolon. For example,
# 'Éric' should be decoded as 'Éric'.
if entity_with_semicolon in compat_html_entities_html5:
return compat_html_entities_html5[entity_with_semicolon]
mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity)
if mobj is not None:
numstr = mobj.group(1)
if numstr.startswith('x'):
base = 16
numstr = '0%s' % numstr
else:
base = 10
# See https://github.com/rg3/youtube-dl/issues/7518
try:
return compat_chr(int(numstr, base))
except ValueError:
pass
# Unknown entity in name, return its literal representation
return '&%s;' % entity
def unescapeHTML(s):
if s is None:
return None
assert type(s) == compat_str
return re.sub(
r'&([^;]+;)', lambda m: _htmlentity_transform(m.group(1)), s)
def get_subprocess_encoding():
if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
# For subprocess calls, encode with locale encoding
# Refer to http://stackoverflow.com/a/9951851/35070
encoding = preferredencoding()
else:
encoding = sys.getfilesystemencoding()
if encoding is None:
encoding = 'utf-8'
return encoding
def encodeFilename(s, for_subprocess=False):
"""
@param s The name of the file
"""
assert type(s) == compat_str
# Python 3 has a Unicode API
if sys.version_info >= (3, 0):
return s
# Pass '' directly to use Unicode APIs on Windows 2000 and up
# (Detecting Windows NT 4 is tricky because 'major >= 4' would
# match Windows 9x series as well. Besides, NT 4 is obsolete.)
if not for_subprocess and sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
return s
# Jython assumes filenames are Unicode strings though reported as Python 2.x compatible
if sys.platform.startswith('java'):
return s
return s.encode(get_subprocess_encoding(), 'ignore')
def decodeFilename(b, for_subprocess=False):
if sys.version_info >= (3, 0):
return b
if not isinstance(b, bytes):
return b
return b.decode(get_subprocess_encoding(), 'ignore')
def encodeArgument(s):
if not isinstance(s, compat_str):
# Legacy code that uses byte strings
# Uncomment the following line after fixing all post processors
# assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s))
s = s.decode('ascii')
return encodeFilename(s, True)
def decodeArgument(b):
return decodeFilename(b, True)
def decodeOption(optval):
if optval is None:
return optval
if isinstance(optval, bytes):
optval = optval.decode(preferredencoding())
assert isinstance(optval, compat_str)
return optval
def formatSeconds(secs):
if secs > 3600:
return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60)
elif secs > 60:
return '%d:%02d' % (secs // 60, secs % 60)
else:
return '%d' % secs
def make_HTTPS_handler(params, **kwargs):
opts_no_check_certificate = params.get('nocheckcertificate', False)
if hasattr(ssl, 'create_default_context'): # Python >= 3.4 or 2.7.9
context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
if opts_no_check_certificate:
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
try:
return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
except TypeError:
# Python 2.7.8
# (create_default_context present but HTTPSHandler has no context=)
pass
if sys.version_info < (3, 2):
return YoutubeDLHTTPSHandler(params, **kwargs)
else: # Python < 3.4
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = (ssl.CERT_NONE
if opts_no_check_certificate
else ssl.CERT_REQUIRED)
context.set_default_verify_paths()
return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
def bug_reports_message():
if ytdl_is_updateable():
update_cmd = 'type youtube-dl -U to update'
else:
update_cmd = 'see https://yt-dl.org/update on how to update'
msg = '; please report this issue on https://yt-dl.org/bug .'
msg += ' Make sure you are using the latest version; %s.' % update_cmd
msg += ' Be sure to call youtube-dl with the --verbose flag and include its complete output.'
return msg
class YoutubeDLError(Exception):
"""Base exception for YoutubeDL errors."""
pass
class ExtractorError(YoutubeDLError):
"""Error during info extraction."""
def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None):
""" tb, if given, is the original traceback (so that it can be printed out).
If expected is set, this is a normal error message and most likely not a bug in youtube-dl.
"""
if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError):
expected = True
if video_id is not None:
msg = video_id + ': ' + msg
if cause:
msg += ' (caused by %r)' % cause
if not expected:
msg += bug_reports_message()
super(ExtractorError, self).__init__(msg)
self.traceback = tb
self.exc_info = sys.exc_info() # preserve original exception
self.cause = cause
self.video_id = video_id
def format_traceback(self):
if self.traceback is None:
return None
return ''.join(traceback.format_tb(self.traceback))
class UnsupportedError(ExtractorError):
def __init__(self, url):
super(UnsupportedError, self).__init__(
'Unsupported URL: %s' % url, expected=True)
self.url = url
class RegexNotFoundError(ExtractorError):
"""Error when a regex didn't match"""
pass
class GeoRestrictedError(ExtractorError):
"""Geographic restriction Error exception.
This exception may be thrown when a video is not available from your
geographic location due to geographic restrictions imposed by a website.
"""
def __init__(self, msg, countries=None):
super(GeoRestrictedError, self).__init__(msg, expected=True)
self.msg = msg
self.countries = countries
class DownloadError(YoutubeDLError):
"""Download Error exception.
This exception may be thrown by FileDownloader objects if they are not
configured to continue on errors. They will contain the appropriate
error message.
"""
def __init__(self, msg, exc_info=None):
""" exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """
super(DownloadError, self).__init__(msg)
self.exc_info = exc_info
class SameFileError(YoutubeDLError):
"""Same File exception.
This exception will be thrown by FileDownloader objects if they detect
multiple files would have to be downloaded to the same file on disk.
"""
pass
class PostProcessingError(YoutubeDLError):
"""Post Processing exception.
This exception may be raised by PostProcessor's .run() method to
indicate an error in the postprocessing task.
"""
def __init__(self, msg):
super(PostProcessingError, self).__init__(msg)
self.msg = msg
class MaxDownloadsReached(YoutubeDLError):
""" --max-downloads limit has been reached. """
pass
class UnavailableVideoError(YoutubeDLError):
"""Unavailable Format exception.
This exception will be thrown when a video is requested
in a format that is not available for that video.
"""
pass
class ContentTooShortError(YoutubeDLError):
"""Content Too Short exception.
This exception may be raised by FileDownloader objects when a file they
download is too small for what the server announced first, indicating
the connection was probably interrupted.
"""
def __init__(self, downloaded, expected):
super(ContentTooShortError, self).__init__(
'Downloaded {0} bytes, expected {1} bytes'.format(downloaded, expected)
)
# Both in bytes
self.downloaded = downloaded
self.expected = expected
class XAttrMetadataError(YoutubeDLError):
def __init__(self, code=None, msg='Unknown error'):
super(XAttrMetadataError, self).__init__(msg)
self.code = code
self.msg = msg
# Parsing code and msg
if (self.code in (errno.ENOSPC, errno.EDQUOT) or
'No space left' in self.msg or 'Disk quota excedded' in self.msg):
self.reason = 'NO_SPACE'
elif self.code == errno.E2BIG or 'Argument list too long' in self.msg:
self.reason = 'VALUE_TOO_LONG'
else:
self.reason = 'NOT_SUPPORTED'
class XAttrUnavailableError(YoutubeDLError):
pass
def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs):
# Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting
# expected HTTP responses to meet HTTP/1.0 or later (see also
# https://github.com/rg3/youtube-dl/issues/6727)
if sys.version_info < (3, 0):
kwargs[b'strict'] = True
hc = http_class(*args, **kwargs)
source_address = ydl_handler._params.get('source_address')
if source_address is not None:
sa = (source_address, 0)
if hasattr(hc, 'source_address'): # Python 2.7+
hc.source_address = sa
else: # Python 2.6
def _hc_connect(self, *args, **kwargs):
sock = compat_socket_create_connection(
(self.host, self.port), self.timeout, sa)
if is_https:
self.sock = ssl.wrap_socket(
sock, self.key_file, self.cert_file,
ssl_version=ssl.PROTOCOL_TLSv1)
else:
self.sock = sock
hc.connect = functools.partial(_hc_connect, hc)
return hc
def handle_youtubedl_headers(headers):
filtered_headers = headers
if 'Youtubedl-no-compression' in filtered_headers:
filtered_headers = dict((k, v) for k, v in filtered_headers.items() if k.lower() != 'accept-encoding')
del filtered_headers['Youtubedl-no-compression']
return filtered_headers
class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
"""Handler for HTTP requests and responses.
This class, when installed with an OpenerDirector, automatically adds
the standard headers to every HTTP request and handles gzipped and
deflated responses from web servers. If compression is to be avoided in
a particular request, the original request in the program code only has
to include the HTTP header "Youtubedl-no-compression", which will be
removed before making the real request.
Part of this code was copied from:
http://techknack.net/python-urllib2-handlers/
Andrew Rowls, the author of that code, agreed to release it to the
public domain.
"""
def __init__(self, params, *args, **kwargs):
compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs)
self._params = params
def http_open(self, req):
conn_class = compat_http_client.HTTPConnection
socks_proxy = req.headers.get('Ytdl-socks-proxy')
if socks_proxy:
conn_class = make_socks_conn_class(conn_class, socks_proxy)
del req.headers['Ytdl-socks-proxy']
return self.do_open(functools.partial(
_create_http_connection, self, conn_class, False),
req)
@staticmethod
def deflate(data):
try:
return zlib.decompress(data, -zlib.MAX_WBITS)
except zlib.error:
return zlib.decompress(data)
@staticmethod
def addinfourl_wrapper(stream, headers, url, code):
if hasattr(compat_urllib_request.addinfourl, 'getcode'):
return compat_urllib_request.addinfourl(stream, headers, url, code)
ret = compat_urllib_request.addinfourl(stream, headers, url)
ret.code = code
return ret
def http_request(self, req):
# According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
# always respected by websites, some tend to give out URLs with non percent-encoded
# non-ASCII characters (see telemb.py, ard.py [#3412])
# urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
# To work around aforementioned issue we will replace request's original URL with
# percent-encoded one
# Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09)
# the code of this workaround has been moved here from YoutubeDL.urlopen()
url = req.get_full_url()
url_escaped = escape_url(url)
# Substitute URL if any change after escaping
if url != url_escaped:
req = update_Request(req, url=url_escaped)
for h, v in std_headers.items():
# Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275
# The dict keys are capitalized because of this bug by urllib
if h.capitalize() not in req.headers:
req.add_header(h, v)
req.headers = handle_youtubedl_headers(req.headers)
if sys.version_info < (2, 7) and '#' in req.get_full_url():
# Python 2.6 is brain-dead when it comes to fragments
req._Request__original = req._Request__original.partition('#')[0]
req._Request__r_type = req._Request__r_type.partition('#')[0]
return req
def http_response(self, req, resp):
old_resp = resp
# gzip
if resp.headers.get('Content-encoding', '') == 'gzip':
content = resp.read()
gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb')
try:
uncompressed = io.BytesIO(gz.read())
except IOError as original_ioerror:
# There may be junk add the end of the file
# See http://stackoverflow.com/q/4928560/35070 for details
for i in range(1, 1024):
try:
gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb')
uncompressed = io.BytesIO(gz.read())
except IOError:
continue
break
else:
raise original_ioerror
resp = self.addinfourl_wrapper(uncompressed, old_resp.headers, old_resp.url, old_resp.code)
resp.msg = old_resp.msg
del resp.headers['Content-encoding']
# deflate
if resp.headers.get('Content-encoding', '') == 'deflate':
gz = io.BytesIO(self.deflate(resp.read()))
resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code)
resp.msg = old_resp.msg
del resp.headers['Content-encoding']
# Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see
# https://github.com/rg3/youtube-dl/issues/6457).
if 300 <= resp.code < 400:
location = resp.headers.get('Location')
if location:
# As of RFC 2616 default charset is iso-8859-1 that is respected by python 3
if sys.version_info >= (3, 0):
location = location.encode('iso-8859-1').decode('utf-8')
else:
location = location.decode('utf-8')
location_escaped = escape_url(location)
if location != location_escaped:
del resp.headers['Location']
if sys.version_info < (3, 0):
location_escaped = location_escaped.encode('utf-8')
resp.headers['Location'] = location_escaped
return resp
https_request = http_request
https_response = http_response
def make_socks_conn_class(base_class, socks_proxy):
assert issubclass(base_class, (
compat_http_client.HTTPConnection, compat_http_client.HTTPSConnection))
url_components = compat_urlparse.urlparse(socks_proxy)
if url_components.scheme.lower() == 'socks5':
socks_type = ProxyType.SOCKS5
elif url_components.scheme.lower() in ('socks', 'socks4'):
socks_type = ProxyType.SOCKS4
elif url_components.scheme.lower() == 'socks4a':
socks_type = ProxyType.SOCKS4A
def unquote_if_non_empty(s):
if not s:
return s
return compat_urllib_parse_unquote_plus(s)
proxy_args = (
socks_type,
url_components.hostname, url_components.port or 1080,
True, # Remote DNS
unquote_if_non_empty(url_components.username),
unquote_if_non_empty(url_components.password),
)
class SocksConnection(base_class):
def connect(self):
self.sock = sockssocket()
self.sock.setproxy(*proxy_args)
if type(self.timeout) in (int, float):
self.sock.settimeout(self.timeout)
self.sock.connect((self.host, self.port))
if isinstance(self, compat_http_client.HTTPSConnection):
if hasattr(self, '_context'): # Python > 2.6
self.sock = self._context.wrap_socket(
self.sock, server_hostname=self.host)
else:
self.sock = ssl.wrap_socket(self.sock)
return SocksConnection
class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler):
def __init__(self, params, https_conn_class=None, *args, **kwargs):
compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs)
self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection
self._params = params
def https_open(self, req):
kwargs = {}
conn_class = self._https_conn_class
if hasattr(self, '_context'): # python > 2.6
kwargs['context'] = self._context
if hasattr(self, '_check_hostname'): # python 3.x
kwargs['check_hostname'] = self._check_hostname
socks_proxy = req.headers.get('Ytdl-socks-proxy')
if socks_proxy:
conn_class = make_socks_conn_class(conn_class, socks_proxy)
del req.headers['Ytdl-socks-proxy']
return self.do_open(functools.partial(
_create_http_connection, self, conn_class, True),
req, **kwargs)
class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor):
def __init__(self, cookiejar=None):
compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar)
def http_response(self, request, response):
# Python 2 will choke on next HTTP request in row if there are non-ASCII
# characters in Set-Cookie HTTP header of last response (see
# https://github.com/rg3/youtube-dl/issues/6769).
# In order to at least prevent crashing we will percent encode Set-Cookie
# header before HTTPCookieProcessor starts processing it.
# if sys.version_info < (3, 0) and response.headers:
# for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'):
# set_cookie = response.headers.get(set_cookie_header)
# if set_cookie:
# set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ")
# if set_cookie != set_cookie_escaped:
# del response.headers[set_cookie_header]
# response.headers[set_cookie_header] = set_cookie_escaped
return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response)
https_request = compat_urllib_request.HTTPCookieProcessor.http_request
https_response = http_response
def extract_timezone(date_str):
m = re.search(
r'^.{8,}?(?P<tz>Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)',
date_str)
if not m:
timezone = datetime.timedelta()
else:
date_str = date_str[:-len(m.group('tz'))]
if not m.group('sign'):
timezone = datetime.timedelta()
else:
sign = 1 if m.group('sign') == '+' else -1
timezone = datetime.timedelta(
hours=sign * int(m.group('hours')),
minutes=sign * int(m.group('minutes')))
return timezone, date_str
def parse_iso8601(date_str, delimiter='T', timezone=None):
""" Return a UNIX timestamp from the given date """
if date_str is None:
return None
date_str = re.sub(r'\.[0-9]+', '', date_str)
if timezone is None:
timezone, date_str = extract_timezone(date_str)
#try:
# date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter)
# dt = datetime.datetime.strptime(date_str, date_format) - timezone
# return calendar.timegm(dt.timetuple())
#except ValueError:
# pass
def date_formats(day_first=True):
return DATE_FORMATS_DAY_FIRST if day_first else DATE_FORMATS_MONTH_FIRST
def unified_strdate(date_str, day_first=True):
"""Return a string with the date in the format YYYYMMDD"""
if date_str is None:
return None
upload_date = None
# Replace commas
date_str = date_str.replace(',', ' ')
# Remove AM/PM + timezone
date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
_, date_str = extract_timezone(date_str)
#for expression in date_formats(day_first):
# try:
# upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d')
# except ValueError:
# pass
if upload_date is None:
timetuple = email.utils.parsedate_tz(date_str)
if timetuple:
try:
upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d')
except ValueError:
pass
if upload_date is not None:
return compat_str(upload_date)
def unified_timestamp(date_str, day_first=True):
if date_str is None:
return None
date_str = date_str.replace(',', ' ')
pm_delta = 12 if re.search(r'(?i)PM', date_str) else 0
timezone, date_str = extract_timezone(date_str)
# Remove AM/PM + timezone
date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
for expression in date_formats(day_first):
try:
dt = datetime.datetime.strptime(date_str, expression) - timezone + datetime.timedelta(hours=pm_delta)
return calendar.timegm(dt.timetuple())
except ValueError:
pass
timetuple = email.utils.parsedate_tz(date_str)
if timetuple:
return calendar.timegm(timetuple) + pm_delta * 3600
def determine_ext(url, default_ext='unknown_video'):
if url is None:
return default_ext
guess = url.partition('?')[0].rpartition('.')[2]
if re.match(r'^[A-Za-z0-9]+$', guess):
return guess
# Try extract ext from URLs like http://example.com/foo/bar.mp4/?download
elif guess.rstrip('/') in KNOWN_EXTENSIONS:
return guess.rstrip('/')
else:
return default_ext
def subtitles_filename(filename, sub_lang, sub_format):
return filename.rsplit('.', 1)[0] + '.' + sub_lang + '.' + sub_format
def date_from_str(date_str):
"""
Return a datetime object from a string in the format YYYYMMDD or
(now|today)[+-][0-9](day|week|month|year)(s)?"""
today = datetime.date.today()
if date_str in ('now', 'today'):
return today
if date_str == 'yesterday':
return today - datetime.timedelta(days=1)
match = re.match(r'(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str)
if match is not None:
sign = match.group('sign')
time = int(match.group('time'))
if sign == '-':
time = -time
unit = match.group('unit')
# A bad approximation?
if unit == 'month':
unit = 'day'
time *= 30
elif unit == 'year':
unit = 'day'
time *= 365
unit += 's'
delta = datetime.timedelta(**{unit: time})
return today + delta
return datetime.datetime.strptime(date_str, '%Y%m%d').date()
def hyphenate_date(date_str):
"""
Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format"""
match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str)
if match is not None:
return '-'.join(match.groups())
else:
return date_str
class DateRange(object):
"""Represents a time interval between two dates"""
def __init__(self, start=None, end=None):
"""start and end must be strings in the format accepted by date"""
if start is not None:
self.start = date_from_str(start)
else:
self.start = datetime.datetime.min.date()
if end is not None:
self.end = date_from_str(end)
else:
self.end = datetime.datetime.max.date()
if self.start > self.end:
raise ValueError('Date range: "%s" , the start date must be before the end date' % self)
@classmethod
def day(cls, day):
"""Returns a range that only contains the given day"""
return cls(day, day)
def __contains__(self, date):
"""Check if the date is in the range"""
if not isinstance(date, datetime.date):
date = date_from_str(date)
return self.start <= date <= self.end
def __str__(self):
return '%s - %s' % (self.start.isoformat(), self.end.isoformat())
def platform_name():
""" Returns the platform name as a compat_str """
res = platform.platform()
if isinstance(res, bytes):
res = res.decode(preferredencoding())
assert isinstance(res, compat_str)
return res
def _windows_write_string(s, out):
""" Returns True if the string was written using special methods,
False if it has yet to be written out."""
# Adapted from http://stackoverflow.com/a/3259271/35070
import ctypes
import ctypes.wintypes
WIN_OUTPUT_IDS = {
1: -11,
2: -12,
}
try:
fileno = out.fileno()
except AttributeError:
# If the output stream doesn't have a fileno, it's virtual
return False
except io.UnsupportedOperation:
# Some strange Windows pseudo files?
return False
if fileno not in WIN_OUTPUT_IDS:
return False
GetStdHandle = ctypes.WINFUNCTYPE(
ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)(
(b'GetStdHandle', ctypes.windll.kernel32))
h = GetStdHandle(WIN_OUTPUT_IDS[fileno])
WriteConsoleW = ctypes.WINFUNCTYPE(
ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR,
ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD),
ctypes.wintypes.LPVOID)((b'WriteConsoleW', ctypes.windll.kernel32))
written = ctypes.wintypes.DWORD(0)
GetFileType = ctypes.WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)((b'GetFileType', ctypes.windll.kernel32))
FILE_TYPE_CHAR = 0x0002
FILE_TYPE_REMOTE = 0x8000
GetConsoleMode = ctypes.WINFUNCTYPE(
ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE,
ctypes.POINTER(ctypes.wintypes.DWORD))(
(b'GetConsoleMode', ctypes.windll.kernel32))
INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value
def not_a_console(handle):
if handle == INVALID_HANDLE_VALUE or handle is None:
return True
return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR or
GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0)
if not_a_console(h):
return False
def next_nonbmp_pos(s):
try:
return next(i for i, c in enumerate(s) if ord(c) > 0xffff)
except StopIteration:
return len(s)
while s:
count = min(next_nonbmp_pos(s), 1024)
ret = WriteConsoleW(
h, s, count if count else 2, ctypes.byref(written), None)
if ret == 0:
raise OSError('Failed to write string')
if not count: # We just wrote a non-BMP character
assert written.value == 2
s = s[1:]
else:
assert written.value > 0
s = s[written.value:]
return True
def write_string(s, out=None, encoding=None):
if out is None:
out = sys.stderr
assert type(s) == compat_str
if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'):
if _windows_write_string(s, out):
return
if ('b' in getattr(out, 'mode', '') or
sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr
byt = s.encode(encoding or preferredencoding(), 'ignore')
out.write(byt)
elif hasattr(out, 'buffer'):
enc = encoding or getattr(out, 'encoding', None) or preferredencoding()
byt = s.encode(enc, 'ignore')
out.buffer.write(byt)
else:
out.write(s)
out.flush()
def bytes_to_intlist(bs):
if not bs:
return []
if isinstance(bs[0], int): # Python 3
return list(bs)
else:
return [ord(c) for c in bs]
def intlist_to_bytes(xs):
if not xs:
return b''
return compat_struct_pack('%dB' % len(xs), *xs)
# Cross-platform file locking
if sys.platform == 'win32':
import ctypes.wintypes
import msvcrt
class OVERLAPPED(ctypes.Structure):
_fields_ = [
('Internal', ctypes.wintypes.LPVOID),
('InternalHigh', ctypes.wintypes.LPVOID),
('Offset', ctypes.wintypes.DWORD),
('OffsetHigh', ctypes.wintypes.DWORD),
('hEvent', ctypes.wintypes.HANDLE),
]
kernel32 = ctypes.windll.kernel32
LockFileEx = kernel32.LockFileEx
LockFileEx.argtypes = [
ctypes.wintypes.HANDLE, # hFile
ctypes.wintypes.DWORD, # dwFlags
ctypes.wintypes.DWORD, # dwReserved
ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
ctypes.POINTER(OVERLAPPED) # Overlapped
]
LockFileEx.restype = ctypes.wintypes.BOOL
UnlockFileEx = kernel32.UnlockFileEx
UnlockFileEx.argtypes = [
ctypes.wintypes.HANDLE, # hFile
ctypes.wintypes.DWORD, # dwReserved
ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
ctypes.POINTER(OVERLAPPED) # Overlapped
]
UnlockFileEx.restype = ctypes.wintypes.BOOL
whole_low = 0xffffffff
whole_high = 0x7fffffff
def _lock_file(f, exclusive):
overlapped = OVERLAPPED()
overlapped.Offset = 0
overlapped.OffsetHigh = 0
overlapped.hEvent = 0
f._lock_file_overlapped_p = ctypes.pointer(overlapped)
handle = msvcrt.get_osfhandle(f.fileno())
if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0,
whole_low, whole_high, f._lock_file_overlapped_p):
raise OSError('Locking file failed: %r' % ctypes.FormatError())
def _unlock_file(f):
assert f._lock_file_overlapped_p
handle = msvcrt.get_osfhandle(f.fileno())
if not UnlockFileEx(handle, 0,
whole_low, whole_high, f._lock_file_overlapped_p):
raise OSError('Unlocking file failed: %r' % ctypes.FormatError())
else:
# Some platforms, such as Jython, is missing fcntl
try:
import fcntl
def _lock_file(f, exclusive):
fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH)
def _unlock_file(f):
fcntl.flock(f, fcntl.LOCK_UN)
except ImportError:
UNSUPPORTED_MSG = 'file locking is not supported on this platform'
def _lock_file(f, exclusive):
raise IOError(UNSUPPORTED_MSG)
def _unlock_file(f):
raise IOError(UNSUPPORTED_MSG)
class locked_file(object):
def __init__(self, filename, mode, encoding=None):
assert mode in ['r', 'a', 'w']
self.f = io.open(filename, mode, encoding=encoding)
self.mode = mode
def __enter__(self):
exclusive = self.mode != 'r'
try:
_lock_file(self.f, exclusive)
except IOError:
self.f.close()
raise
return self
def __exit__(self, etype, value, traceback):
try:
_unlock_file(self.f)
finally:
self.f.close()
def __iter__(self):
return iter(self.f)
def write(self, *args):
return self.f.write(*args)
def read(self, *args):
return self.f.read(*args)
def get_filesystem_encoding():
encoding = sys.getfilesystemencoding()
return encoding if encoding is not None else 'utf-8'
def shell_quote(args):
quoted_args = []
encoding = get_filesystem_encoding()
for a in args:
if isinstance(a, bytes):
# We may get a filename encoded with 'encodeFilename'
a = a.decode(encoding)
quoted_args.append(pipes.quote(a))
return ' '.join(quoted_args)
def smuggle_url(url, data):
""" Pass additional data in a URL for internal use. """
url, idata = unsmuggle_url(url, {})
data.update(idata)
sdata = compat_urllib_parse_urlencode(
{'__youtubedl_smuggle': json.dumps(data)})
return url + '#' + sdata
def unsmuggle_url(smug_url, default=None):
if '#__youtubedl_smuggle' not in smug_url:
return smug_url, default
url, _, sdata = smug_url.rpartition('#')
jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0]
data = json.loads(jsond)
return url, data
def format_bytes(bytes):
if bytes is None:
return 'N/A'
if type(bytes) is str:
bytes = float(bytes)
if bytes == 0.0:
exponent = 0
else:
exponent = int(math.log(bytes, 1024.0))
suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent]
converted = float(bytes) / float(1024 ** exponent)
return '%.2f%s' % (converted, suffix)
def lookup_unit_table(unit_table, s):
units_re = '|'.join(re.escape(u) for u in unit_table)
m = re.match(
r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)\b' % units_re, s)
if not m:
return None
num_str = m.group('num').replace(',', '.')
mult = unit_table[m.group('unit')]
return int(float(num_str) * mult)
def parse_filesize(s):
if s is None:
return None
# The lower-case forms are of course incorrect and unofficial,
# but we support those too
_UNIT_TABLE = {
'B': 1,
'b': 1,
'bytes': 1,
'KiB': 1024,
'KB': 1000,
'kB': 1024,
'Kb': 1000,
'kb': 1000,
'kilobytes': 1000,
'kibibytes': 1024,
'MiB': 1024 ** 2,
'MB': 1000 ** 2,
'mB': 1024 ** 2,
'Mb': 1000 ** 2,
'mb': 1000 ** 2,
'megabytes': 1000 ** 2,
'mebibytes': 1024 ** 2,
'GiB': 1024 ** 3,
'GB': 1000 ** 3,
'gB': 1024 ** 3,
'Gb': 1000 ** 3,
'gb': 1000 ** 3,
'gigabytes': 1000 ** 3,
'gibibytes': 1024 ** 3,
'TiB': 1024 ** 4,
'TB': 1000 ** 4,
'tB': 1024 ** 4,
'Tb': 1000 ** 4,
'tb': 1000 ** 4,
'terabytes': 1000 ** 4,
'tebibytes': 1024 ** 4,
'PiB': 1024 ** 5,
'PB': 1000 ** 5,
'pB': 1024 ** 5,
'Pb': 1000 ** 5,
'pb': 1000 ** 5,
'petabytes': 1000 ** 5,
'pebibytes': 1024 ** 5,
'EiB': 1024 ** 6,
'EB': 1000 ** 6,
'eB': 1024 ** 6,
'Eb': 1000 ** 6,
'eb': 1000 ** 6,
'exabytes': 1000 ** 6,
'exbibytes': 1024 ** 6,
'ZiB': 1024 ** 7,
'ZB': 1000 ** 7,
'zB': 1024 ** 7,
'Zb': 1000 ** 7,
'zb': 1000 ** 7,
'zettabytes': 1000 ** 7,
'zebibytes': 1024 ** 7,
'YiB': 1024 ** 8,
'YB': 1000 ** 8,
'yB': 1024 ** 8,
'Yb': 1000 ** 8,
'yb': 1000 ** 8,
'yottabytes': 1000 ** 8,
'yobibytes': 1024 ** 8,
}
return lookup_unit_table(_UNIT_TABLE, s)
def parse_count(s):
if s is None:
return None
s = s.strip()
if re.match(r'^[\d,.]+$', s):
return str_to_int(s)
_UNIT_TABLE = {
'k': 1000,
'K': 1000,
'm': 1000 ** 2,
'M': 1000 ** 2,
'kk': 1000 ** 2,
'KK': 1000 ** 2,
}
return lookup_unit_table(_UNIT_TABLE, s)
def month_by_name(name, lang='en'):
""" Return the number of a month by (locale-independently) English name """
month_names = MONTH_NAMES.get(lang, MONTH_NAMES['en'])
try:
return month_names.index(name) + 1
except ValueError:
return None
def month_by_abbreviation(abbrev):
""" Return the number of a month by (locale-independently) English
abbreviations """
try:
return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1
except ValueError:
return None
def fix_xml_ampersands(xml_str):
"""Replace all the '&' by '&' in XML"""
return re.sub(
r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)',
'&',
xml_str)
def setproctitle(title):
assert isinstance(title, compat_str)
# ctypes in Jython is not complete
# http://bugs.jython.org/issue2148
if sys.platform.startswith('java'):
return
try:
libc = ctypes.cdll.LoadLibrary('libc.so.6')
except OSError:
return
except TypeError:
# LoadLibrary in Windows Python 2.7.13 only expects
# a bytestring, but since unicode_literals turns
# every string into a unicode string, it fails.
return
title_bytes = title.encode('utf-8')
buf = ctypes.create_string_buffer(len(title_bytes))
buf.value = title_bytes
try:
libc.prctl(15, buf, 0, 0, 0)
except AttributeError:
return # Strange libc, just skip this
def remove_start(s, start):
return s[len(start):] if s is not None and s.startswith(start) else s
def remove_end(s, end):
return s[:-len(end)] if s is not None and s.endswith(end) else s
def remove_quotes(s):
if s is None or len(s) < 2:
return s
for quote in ('"', "'", ):
if s[0] == quote and s[-1] == quote:
return s[1:-1]
return s
def url_basename(url):
path = compat_urlparse.urlparse(url).path
return path.strip('/').split('/')[-1]
def base_url(url):
return re.match(r'https?://[^?#&]+/', url).group()
def urljoin(base, path):
if isinstance(path, bytes):
path = path.decode('utf-8')
if not isinstance(path, compat_str) or not path:
return None
if re.match(r'^(?:https?:)?//', path):
return path
if isinstance(base, bytes):
base = base.decode('utf-8')
if not isinstance(base, compat_str) or not re.match(
r'^(?:https?:)?//', base):
return None
return compat_urlparse.urljoin(base, path)
class HEADRequest(compat_urllib_request.Request):
def get_method(self):
return 'HEAD'
class PUTRequest(compat_urllib_request.Request):
def get_method(self):
return 'PUT'
def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1):
if get_attr:
if v is not None:
v = getattr(v, get_attr, None)
if v == '':
v = None
if v is None:
return default
try:
return int(v) * invscale // scale
except ValueError:
return default
def str_or_none(v, default=None):
return default if v is None else compat_str(v)
def str_to_int(int_str):
""" A more relaxed version of int_or_none """
if int_str is None:
return None
int_str = re.sub(r'[,\.\+]', '', int_str)
return int(int_str)
def float_or_none(v, scale=1, invscale=1, default=None):
if v is None:
return default
try:
return float(v) * invscale / scale
except ValueError:
return default
def strip_or_none(v):
return None if v is None else v.strip()
def parse_duration(s):
if not isinstance(s, compat_basestring):
return None
s = s.strip()
days, hours, mins, secs, ms = [None] * 5
m = re.match(r'(?:(?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?Z?$', s)
if m:
days, hours, mins, secs, ms = m.groups()
else:
m = re.match(
r'''(?ix)(?:P?T)?
(?:
(?P<days>[0-9]+)\s*d(?:ays?)?\s*
)?
(?:
(?P<hours>[0-9]+)\s*h(?:ours?)?\s*
)?
(?:
(?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?\s*
)?
(?:
(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s*
)?Z?$''', s)
if m:
days, hours, mins, secs, ms = m.groups()
else:
m = re.match(r'(?i)(?:(?P<hours>[0-9.]+)\s*(?:hours?)|(?P<mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*)Z?$', s)
if m:
hours, mins = m.groups()
else:
return None
duration = 0
if secs:
duration += float(secs)
if mins:
duration += float(mins) * 60
if hours:
duration += float(hours) * 60 * 60
if days:
duration += float(days) * 24 * 60 * 60
if ms:
duration += float(ms)
return duration
def prepend_extension(filename, ext, expected_real_ext=None):
name, real_ext = os.path.splitext(filename)
return (
'{0}.{1}{2}'.format(name, ext, real_ext)
if not expected_real_ext or real_ext[1:] == expected_real_ext
else '{0}.{1}'.format(filename, ext))
def replace_extension(filename, ext, expected_real_ext=None):
name, real_ext = os.path.splitext(filename)
return '{0}.{1}'.format(
name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename,
ext)
def check_executable(exe, args=[]):
""" Checks if the given binary is installed somewhere in PATH, and returns its name.
args can be a list of arguments for a short output (like -version) """
try:
subprocess.Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
except OSError:
return False
return exe
def get_exe_version(exe, args=['--version'],
version_re=None, unrecognized='present'):
""" Returns the version of the specified executable,
or False if the executable is not present """
try:
# STDIN should be redirected too. On UNIX-like systems, ffmpeg triggers
# SIGTTOU if youtube-dl is run in the background.
# See https://github.com/rg3/youtube-dl/issues/955#issuecomment-209789656
out, _ = subprocess.Popen(
[encodeArgument(exe)] + args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()
except OSError:
return False
if isinstance(out, bytes): # Python 2.x
out = out.decode('ascii', 'ignore')
return detect_exe_version(out, version_re, unrecognized)
def detect_exe_version(output, version_re=None, unrecognized='present'):
assert isinstance(output, compat_str)
if version_re is None:
version_re = r'version\s+([-0-9._a-zA-Z]+)'
m = re.search(version_re, output)
if m:
return m.group(1)
else:
return unrecognized
class PagedList(object):
def __len__(self):
# This is only useful for tests
return len(self.getslice())
class OnDemandPagedList(PagedList):
def __init__(self, pagefunc, pagesize, use_cache=False):
self._pagefunc = pagefunc
self._pagesize = pagesize
self._use_cache = use_cache
if use_cache:
self._cache = {}
def getslice(self, start=0, end=None):
res = []
for pagenum in itertools.count(start // self._pagesize):
firstid = pagenum * self._pagesize
nextfirstid = pagenum * self._pagesize + self._pagesize
if start >= nextfirstid:
continue
page_results = None
if self._use_cache:
page_results = self._cache.get(pagenum)
if page_results is None:
page_results = list(self._pagefunc(pagenum))
if self._use_cache:
self._cache[pagenum] = page_results
startv = (
start % self._pagesize
if firstid <= start < nextfirstid
else 0)
endv = (
((end - 1) % self._pagesize) + 1
if (end is not None and firstid <= end <= nextfirstid)
else None)
if startv != 0 or endv is not None:
page_results = page_results[startv:endv]
res.extend(page_results)
# A little optimization - if current page is not "full", ie. does
# not contain page_size videos then we can assume that this page
# is the last one - there are no more ids on further pages -
# i.e. no need to query again.
if len(page_results) + startv < self._pagesize:
break
# If we got the whole page, but the next page is not interesting,
# break out early as well
if end == nextfirstid:
break
return res
class InAdvancePagedList(PagedList):
def __init__(self, pagefunc, pagecount, pagesize):
self._pagefunc = pagefunc
self._pagecount = pagecount
self._pagesize = pagesize
def getslice(self, start=0, end=None):
res = []
start_page = start // self._pagesize
end_page = (
self._pagecount if end is None else (end // self._pagesize + 1))
skip_elems = start - start_page * self._pagesize
only_more = None if end is None else end - start
for pagenum in range(start_page, end_page):
page = list(self._pagefunc(pagenum))
if skip_elems:
page = page[skip_elems:]
skip_elems = None
if only_more is not None:
if len(page) < only_more:
only_more -= len(page)
else:
page = page[:only_more]
res.extend(page)
break
res.extend(page)
return res
def uppercase_escape(s):
unicode_escape = codecs.getdecoder('unicode_escape')
return re.sub(
r'\\U[0-9a-fA-F]{8}',
lambda m: unicode_escape(m.group(0))[0],
s)
def lowercase_escape(s):
unicode_escape = codecs.getdecoder('unicode_escape')
return re.sub(
r'\\u[0-9a-fA-F]{4}',
lambda m: unicode_escape(m.group(0))[0],
s)
def escape_rfc3986(s):
"""Escape non-ASCII characters as suggested by RFC 3986"""
if sys.version_info < (3, 0) and isinstance(s, compat_str):
s = s.encode('utf-8')
return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]")
def escape_url(url):
"""Escape URL as suggested by RFC 3986"""
url_parsed = compat_urllib_parse_urlparse(url)
return url_parsed._replace(
netloc=url_parsed.netloc.encode('idna').decode('ascii'),
path=escape_rfc3986(url_parsed.path),
params=escape_rfc3986(url_parsed.params),
query=escape_rfc3986(url_parsed.query),
fragment=escape_rfc3986(url_parsed.fragment)
).geturl()
def read_batch_urls(batch_fd):
def fixup(url):
if not isinstance(url, compat_str):
url = url.decode('utf-8', 'replace')
BOM_UTF8 = '\xef\xbb\xbf'
if url.startswith(BOM_UTF8):
url = url[len(BOM_UTF8):]
url = url.strip()
if url.startswith(('#', ';', ']')):
return False
return url
with contextlib.closing(batch_fd) as fd:
return [url for url in map(fixup, fd) if url]
def urlencode_postdata(*args, **kargs):
return compat_urllib_parse_urlencode(*args, **kargs).encode('ascii')
def update_url_query(url, query):
if not query:
return url
parsed_url = compat_urlparse.urlparse(url)
qs = compat_parse_qs(parsed_url.query)
qs.update(query)
return compat_urlparse.urlunparse(parsed_url._replace(
query=compat_urllib_parse_urlencode(qs, True)))
def update_Request(req, url=None, data=None, headers={}, query={}):
req_headers = req.headers.copy()
req_headers.update(headers)
req_data = data or req.data
req_url = update_url_query(url or req.get_full_url(), query)
req_get_method = req.get_method()
if req_get_method == 'HEAD':
req_type = HEADRequest
elif req_get_method == 'PUT':
req_type = PUTRequest
else:
req_type = compat_urllib_request.Request
new_req = req_type(
req_url, data=req_data, headers=req_headers,
origin_req_host=req.origin_req_host, unverifiable=req.unverifiable)
if hasattr(req, 'timeout'):
new_req.timeout = req.timeout
return new_req
def dict_get(d, key_or_keys, default=None, skip_false_values=True):
if isinstance(key_or_keys, (list, tuple)):
for key in key_or_keys:
if key not in d or d[key] is None or skip_false_values and not d[key]:
continue
return d[key]
return default
return d.get(key_or_keys, default)
def try_get(src, getter, expected_type=None):
if not isinstance(getter, (list, tuple)):
getter = [getter]
for get in getter:
try:
v = get(src)
except (AttributeError, KeyError, TypeError, IndexError):
pass
else:
if expected_type is None or isinstance(v, expected_type):
return v
def encode_compat_str(string, encoding=preferredencoding(), errors='strict'):
return string if isinstance(string, compat_str) else compat_str(string, encoding, errors)
US_RATINGS = {
'G': 0,
'PG': 10,
'PG-13': 13,
'R': 16,
'NC': 18,
}
TV_PARENTAL_GUIDELINES = {
'TV-Y': 0,
'TV-Y7': 7,
'TV-G': 0,
'TV-PG': 0,
'TV-14': 14,
'TV-MA': 17,
}
def parse_age_limit(s):
if type(s) == int:
return s if 0 <= s <= 21 else None
if not isinstance(s, compat_basestring):
return None
m = re.match(r'^(?P<age>\d{1,2})\+?$', s)
if m:
return int(m.group('age'))
if s in US_RATINGS:
return US_RATINGS[s]
return TV_PARENTAL_GUIDELINES.get(s)
def strip_jsonp(code):
return re.sub(
r'(?s)^[a-zA-Z0-9_.$]+\s*\(\s*(.*)\);?\s*?(?://[^\n]*)*$', r'\1', code)
def js_to_json(code):
COMMENT_RE = r'/\*(?:(?!\*/).)*?\*/|//[^\n]*'
SKIP_RE = r'\s*(?:{comment})?\s*'.format(comment=COMMENT_RE)
INTEGER_TABLE = (
(r'(?s)^(0[xX][0-9a-fA-F]+){skip}:?$'.format(skip=SKIP_RE), 16),
(r'(?s)^(0+[0-7]+){skip}:?$'.format(skip=SKIP_RE), 8),
)
def fix_kv(m):
v = m.group(0)
if v in ('true', 'false', 'null'):
return v
elif v.startswith('/*') or v.startswith('//') or v == ',':
return ""
if v[0] in ("'", '"'):
v = re.sub(r'(?s)\\.|"', lambda m: {
'"': '\\"',
"\\'": "'",
'\\\n': '',
'\\x': '\\u00',
}.get(m.group(0), m.group(0)), v[1:-1])
for regex, base in INTEGER_TABLE:
im = re.match(regex, v)
if im:
i = int(im.group(1), base)
return '"%d":' % i if v.endswith(':') else '%d' % i
return '"%s"' % v
return re.sub(r'''(?sx)
"(?:[^"\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^"\\]*"|
'(?:[^'\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^'\\]*'|
{comment}|,(?={skip}[\]}}])|
[a-zA-Z_][.a-zA-Z_0-9]*|
\b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:{skip}:)?|
[0-9]+(?={skip}:)
'''.format(comment=COMMENT_RE, skip=SKIP_RE), fix_kv, code)
def qualities(quality_ids):
""" Get a numeric quality value out of a list of possible values """
def q(qid):
try:
return quality_ids.index(qid)
except ValueError:
return -1
return q
DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s'
def limit_length(s, length):
""" Add ellipses to overly long strings """
if s is None:
return None
ELLIPSES = '...'
if len(s) > length:
return s[:length - len(ELLIPSES)] + ELLIPSES
return s
def version_tuple(v):
return tuple(int(e) for e in re.split(r'[-.]', v))
def is_outdated_version(version, limit, assume_new=True):
if not version:
return not assume_new
try:
return version_tuple(version) < version_tuple(limit)
except ValueError:
return not assume_new
def ytdl_is_updateable():
""" Returns if youtube-dl can be updated with -U """
from zipimport import zipimporter
return isinstance(globals().get('__loader__'), zipimporter) or hasattr(sys, 'frozen')
def args_to_str(args):
# Get a short string representation for a subprocess command
return ' '.join(compat_shlex_quote(a) for a in args)
def error_to_compat_str(err):
err_str = str(err)
# On python 2 error byte string must be decoded with proper
# encoding rather than ascii
if sys.version_info[0] < 3:
err_str = err_str.decode(preferredencoding())
return err_str
def mimetype2ext(mt):
if mt is None:
return None
ext = {
'audio/mp4': 'm4a',
# Per RFC 3003, audio/mpeg can be .mp1, .mp2 or .mp3. Here use .mp3 as
# it's the most popular one
'audio/mpeg': 'mp3',
}.get(mt)
if ext is not None:
return ext
_, _, res = mt.rpartition('/')
res = res.split(';')[0].strip().lower()
return {
'3gpp': '3gp',
'smptett+xml': 'tt',
'srt': 'srt',
'ttaf+xml': 'dfxp',
'ttml+xml': 'ttml',
'vtt': 'vtt',
'x-flv': 'flv',
'x-mp4-fragmented': 'mp4',
'x-ms-wmv': 'wmv',
'mpegurl': 'm3u8',
'x-mpegurl': 'm3u8',
'vnd.apple.mpegurl': 'm3u8',
'dash+xml': 'mpd',
'f4m': 'f4m',
'f4m+xml': 'f4m',
'hds+xml': 'f4m',
'vnd.ms-sstr+xml': 'ism',
'quicktime': 'mov',
}.get(res, res)
def parse_codecs(codecs_str):
# http://tools.ietf.org/html/rfc6381
if not codecs_str:
return {}
splited_codecs = list(filter(None, map(
lambda str: str.strip(), codecs_str.strip().strip(',').split(','))))
vcodec, acodec = None, None
for full_codec in splited_codecs:
codec = full_codec.split('.')[0]
if codec in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2', 'h263', 'h264', 'mp4v'):
if not vcodec:
vcodec = full_codec
elif codec in ('mp4a', 'opus', 'vorbis', 'mp3', 'aac', 'ac-3'):
if not acodec:
acodec = full_codec
else:
write_string('WARNING: Unknown codec %s' % full_codec, sys.stderr)
if not vcodec and not acodec:
if len(splited_codecs) == 2:
return {
'vcodec': vcodec,
'acodec': acodec,
}
elif len(splited_codecs) == 1:
return {
'vcodec': 'none',
'acodec': vcodec,
}
else:
return {
'vcodec': vcodec or 'none',
'acodec': acodec or 'none',
}
return {}
def urlhandle_detect_ext(url_handle):
getheader = url_handle.headers.get
cd = getheader('Content-Disposition')
if cd:
m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd)
if m:
e = determine_ext(m.group('filename'), default_ext=None)
if e:
return e
return mimetype2ext(getheader('Content-Type'))
def encode_data_uri(data, mime_type):
return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii'))
def age_restricted(content_limit, age_limit):
""" Returns True iff the content should be blocked """
if age_limit is None: # No limit set
return False
if content_limit is None:
return False # Content available for everyone
return age_limit < content_limit
def is_html(first_bytes):
""" Detect whether a file contains HTML by examining its first bytes. """
BOMS = [
(b'\xef\xbb\xbf', 'utf-8'),
(b'\x00\x00\xfe\xff', 'utf-32-be'),
(b'\xff\xfe\x00\x00', 'utf-32-le'),
(b'\xff\xfe', 'utf-16-le'),
(b'\xfe\xff', 'utf-16-be'),
]
for bom, enc in BOMS:
if first_bytes.startswith(bom):
s = first_bytes[len(bom):].decode(enc, 'replace')
break
else:
s = first_bytes.decode('utf-8', 'replace')
return re.match(r'^\s*<', s)
def determine_protocol(info_dict):
protocol = info_dict.get('protocol')
if protocol is not None:
return protocol
url = info_dict['url']
if url.startswith('rtmp'):
return 'rtmp'
elif url.startswith('mms'):
return 'mms'
elif url.startswith('rtsp'):
return 'rtsp'
ext = determine_ext(url)
if ext == 'm3u8':
return 'm3u8'
elif ext == 'f4m':
return 'f4m'
return compat_urllib_parse_urlparse(url).scheme
def render_table(header_row, data):
""" Render a list of rows, each as a list of values """
table = [header_row] + data
max_lens = [max(len(compat_str(v)) for v in col) for col in zip(*table)]
format_str = ' '.join('%-' + compat_str(ml + 1) + 's' for ml in max_lens[:-1]) + '%s'
return '\n'.join(format_str % tuple(row) for row in table)
def _match_one(filter_part, dct):
COMPARISON_OPERATORS = {
'<': operator.lt,
'<=': operator.le,
'>': operator.gt,
'>=': operator.ge,
'=': operator.eq,
'!=': operator.ne,
}
operator_rex = re.compile(r'''(?x)\s*
(?P<key>[a-z_]+)
\s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
(?:
(?P<intval>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)|
(?P<quote>["\'])(?P<quotedstrval>(?:\\.|(?!(?P=quote)|\\).)+?)(?P=quote)|
(?P<strval>(?![0-9.])[a-z0-9A-Z]*)
)
\s*$
''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys())))
m = operator_rex.search(filter_part)
if m:
op = COMPARISON_OPERATORS[m.group('op')]
actual_value = dct.get(m.group('key'))
if (m.group('quotedstrval') is not None or
m.group('strval') is not None or
# If the original field is a string and matching comparisonvalue is
# a number we should respect the origin of the original field
# and process comparison value as a string (see
# https://github.com/rg3/youtube-dl/issues/11082).
actual_value is not None and m.group('intval') is not None and
isinstance(actual_value, compat_str)):
if m.group('op') not in ('=', '!='):
raise ValueError(
'Operator %s does not support string values!' % m.group('op'))
comparison_value = m.group('quotedstrval') or m.group('strval') or m.group('intval')
quote = m.group('quote')
if quote is not None:
comparison_value = comparison_value.replace(r'\%s' % quote, quote)
else:
try:
comparison_value = int(m.group('intval'))
except ValueError:
comparison_value = parse_filesize(m.group('intval'))
if comparison_value is None:
comparison_value = parse_filesize(m.group('intval') + 'B')
if comparison_value is None:
raise ValueError(
'Invalid integer value %r in filter part %r' % (
m.group('intval'), filter_part))
if actual_value is None:
return m.group('none_inclusive')
return op(actual_value, comparison_value)
UNARY_OPERATORS = {
'': lambda v: v is not None,
'!': lambda v: v is None,
}
operator_rex = re.compile(r'''(?x)\s*
(?P<op>%s)\s*(?P<key>[a-z_]+)
\s*$
''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys())))
m = operator_rex.search(filter_part)
if m:
op = UNARY_OPERATORS[m.group('op')]
actual_value = dct.get(m.group('key'))
return op(actual_value)
raise ValueError('Invalid filter part %r' % filter_part)
def match_str(filter_str, dct):
""" Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false """
return all(
_match_one(filter_part, dct) for filter_part in filter_str.split('&'))
def match_filter_func(filter_str):
def _match_func(info_dict):
if match_str(filter_str, info_dict):
return None
else:
video_title = info_dict.get('title', info_dict.get('id', 'video'))
return '%s does not pass filter %s, skipping ..' % (video_title, filter_str)
return _match_func
def parse_dfxp_time_expr(time_expr):
if not time_expr:
return
mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr)
if mobj:
return float(mobj.group('time_offset'))
mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr)
if mobj:
return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.'))
def srt_subtitles_timecode(seconds):
return '%02d:%02d:%02d,%03d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 1000)
def dfxp2srt(dfxp_data):
LEGACY_NAMESPACES = (
('http://www.w3.org/ns/ttml', [
'http://www.w3.org/2004/11/ttaf1',
'http://www.w3.org/2006/04/ttaf1',
'http://www.w3.org/2006/10/ttaf1',
]),
('http://www.w3.org/ns/ttml#styling', [
'http://www.w3.org/ns/ttml#style',
]),
)
SUPPORTED_STYLING = [
'color',
'fontFamily',
'fontSize',
'fontStyle',
'fontWeight',
'textDecoration'
]
_x = functools.partial(xpath_with_ns, ns_map={
'ttml': 'http://www.w3.org/ns/ttml',
'tts': 'http://www.w3.org/ns/ttml#styling',
})
styles = {}
default_style = {}
class TTMLPElementParser(object):
_out = ''
_unclosed_elements = []
_applied_styles = []
def start(self, tag, attrib):
if tag in (_x('ttml:br'), 'br'):
self._out += '\n'
else:
unclosed_elements = []
style = {}
element_style_id = attrib.get('style')
if default_style:
style.update(default_style)
if element_style_id:
style.update(styles.get(element_style_id, {}))
for prop in SUPPORTED_STYLING:
prop_val = attrib.get(_x('tts:' + prop))
if prop_val:
style[prop] = prop_val
if style:
font = ''
for k, v in sorted(style.items()):
if self._applied_styles and self._applied_styles[-1].get(k) == v:
continue
if k == 'color':
font += ' color="%s"' % v
elif k == 'fontSize':
font += ' size="%s"' % v
elif k == 'fontFamily':
font += ' face="%s"' % v
elif k == 'fontWeight' and v == 'bold':
self._out += '<b>'
unclosed_elements.append('b')
elif k == 'fontStyle' and v == 'italic':
self._out += '<i>'
unclosed_elements.append('i')
elif k == 'textDecoration' and v == 'underline':
self._out += '<u>'
unclosed_elements.append('u')
if font:
self._out += '<font' + font + '>'
unclosed_elements.append('font')
applied_style = {}
if self._applied_styles:
applied_style.update(self._applied_styles[-1])
applied_style.update(style)
self._applied_styles.append(applied_style)
self._unclosed_elements.append(unclosed_elements)
def end(self, tag):
if tag not in (_x('ttml:br'), 'br'):
unclosed_elements = self._unclosed_elements.pop()
for element in reversed(unclosed_elements):
self._out += '</%s>' % element
if unclosed_elements and self._applied_styles:
self._applied_styles.pop()
def data(self, data):
self._out += data
def close(self):
return self._out.strip()
def parse_node(node):
target = TTMLPElementParser()
parser = xml.etree.ElementTree.XMLParser(target=target)
parser.feed(xml.etree.ElementTree.tostring(node))
return parser.close()
for k, v in LEGACY_NAMESPACES:
for ns in v:
dfxp_data = dfxp_data.replace(ns, k)
dfxp = compat_etree_fromstring(dfxp_data.encode('utf-8'))
out = []
paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall('.//p')
if not paras:
raise ValueError('Invalid dfxp/TTML subtitle')
repeat = False
while True:
for style in dfxp.findall(_x('.//ttml:style')):
style_id = style.get('id')
parent_style_id = style.get('style')
if parent_style_id:
if parent_style_id not in styles:
repeat = True
continue
styles[style_id] = styles[parent_style_id].copy()
for prop in SUPPORTED_STYLING:
prop_val = style.get(_x('tts:' + prop))
if prop_val:
styles.setdefault(style_id, {})[prop] = prop_val
if repeat:
repeat = False
else:
break
for p in ('body', 'div'):
ele = xpath_element(dfxp, [_x('.//ttml:' + p), './/' + p])
if ele is None:
continue
style = styles.get(ele.get('style'))
if not style:
continue
default_style.update(style)
for para, index in zip(paras, itertools.count(1)):
begin_time = parse_dfxp_time_expr(para.attrib.get('begin'))
end_time = parse_dfxp_time_expr(para.attrib.get('end'))
dur = parse_dfxp_time_expr(para.attrib.get('dur'))
if begin_time is None:
continue
if not end_time:
if not dur:
continue
end_time = begin_time + dur
out.append('%d\n%s --> %s\n%s\n\n' % (
index,
srt_subtitles_timecode(begin_time),
srt_subtitles_timecode(end_time),
parse_node(para)))
return ''.join(out)
def cli_option(params, command_option, param):
param = params.get(param)
if param:
param = compat_str(param)
return [command_option, param] if param is not None else []
def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None):
param = params.get(param)
assert isinstance(param, bool)
if separator:
return [command_option + separator + (true_value if param else false_value)]
return [command_option, true_value if param else false_value]
def cli_valueless_option(params, command_option, param, expected_value=True):
param = params.get(param)
return [command_option] if param == expected_value else []
def cli_configuration_args(params, param, default=[]):
ex_args = params.get(param)
if ex_args is None:
return default
assert isinstance(ex_args, list)
return ex_args
class ISO639Utils(object):
# See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt
_lang_map = {
'aa': 'aar',
'ab': 'abk',
'ae': 'ave',
'af': 'afr',
'ak': 'aka',
'am': 'amh',
'an': 'arg',
'ar': 'ara',
'as': 'asm',
'av': 'ava',
'ay': 'aym',
'az': 'aze',
'ba': 'bak',
'be': 'bel',
'bg': 'bul',
'bh': 'bih',
'bi': 'bis',
'bm': 'bam',
'bn': 'ben',
'bo': 'bod',
'br': 'bre',
'bs': 'bos',
'ca': 'cat',
'ce': 'che',
'ch': 'cha',
'co': 'cos',
'cr': 'cre',
'cs': 'ces',
'cu': 'chu',
'cv': 'chv',
'cy': 'cym',
'da': 'dan',
'de': 'deu',
'dv': 'div',
'dz': 'dzo',
'ee': 'ewe',
'el': 'ell',
'en': 'eng',
'eo': 'epo',
'es': 'spa',
'et': 'est',
'eu': 'eus',
'fa': 'fas',
'ff': 'ful',
'fi': 'fin',
'fj': 'fij',
'fo': 'fao',
'fr': 'fra',
'fy': 'fry',
'ga': 'gle',
'gd': 'gla',
'gl': 'glg',
'gn': 'grn',
'gu': 'guj',
'gv': 'glv',
'ha': 'hau',
'he': 'heb',
'hi': 'hin',
'ho': 'hmo',
'hr': 'hrv',
'ht': 'hat',
'hu': 'hun',
'hy': 'hye',
'hz': 'her',
'ia': 'ina',
'id': 'ind',
'ie': 'ile',
'ig': 'ibo',
'ii': 'iii',
'ik': 'ipk',
'io': 'ido',
'is': 'isl',
'it': 'ita',
'iu': 'iku',
'ja': 'jpn',
'jv': 'jav',
'ka': 'kat',
'kg': 'kon',
'ki': 'kik',
'kj': 'kua',
'kk': 'kaz',
'kl': 'kal',
'km': 'khm',
'kn': 'kan',
'ko': 'kor',
'kr': 'kau',
'ks': 'kas',
'ku': 'kur',
'kv': 'kom',
'kw': 'cor',
'ky': 'kir',
'la': 'lat',
'lb': 'ltz',
'lg': 'lug',
'li': 'lim',
'ln': 'lin',
'lo': 'lao',
'lt': 'lit',
'lu': 'lub',
'lv': 'lav',
'mg': 'mlg',
'mh': 'mah',
'mi': 'mri',
'mk': 'mkd',
'ml': 'mal',
'mn': 'mon',
'mr': 'mar',
'ms': 'msa',
'mt': 'mlt',
'my': 'mya',
'na': 'nau',
'nb': 'nob',
'nd': 'nde',
'ne': 'nep',
'ng': 'ndo',
'nl': 'nld',
'nn': 'nno',
'no': 'nor',
'nr': 'nbl',
'nv': 'nav',
'ny': 'nya',
'oc': 'oci',
'oj': 'oji',
'om': 'orm',
'or': 'ori',
'os': 'oss',
'pa': 'pan',
'pi': 'pli',
'pl': 'pol',
'ps': 'pus',
'pt': 'por',
'qu': 'que',
'rm': 'roh',
'rn': 'run',
'ro': 'ron',
'ru': 'rus',
'rw': 'kin',
'sa': 'san',
'sc': 'srd',
'sd': 'snd',
'se': 'sme',
'sg': 'sag',
'si': 'sin',
'sk': 'slk',
'sl': 'slv',
'sm': 'smo',
'sn': 'sna',
'so': 'som',
'sq': 'sqi',
'sr': 'srp',
'ss': 'ssw',
'st': 'sot',
'su': 'sun',
'sv': 'swe',
'sw': 'swa',
'ta': 'tam',
'te': 'tel',
'tg': 'tgk',
'th': 'tha',
'ti': 'tir',
'tk': 'tuk',
'tl': 'tgl',
'tn': 'tsn',
'to': 'ton',
'tr': 'tur',
'ts': 'tso',
'tt': 'tat',
'tw': 'twi',
'ty': 'tah',
'ug': 'uig',
'uk': 'ukr',
'ur': 'urd',
'uz': 'uzb',
've': 'ven',
'vi': 'vie',
'vo': 'vol',
'wa': 'wln',
'wo': 'wol',
'xh': 'xho',
'yi': 'yid',
'yo': 'yor',
'za': 'zha',
'zh': 'zho',
'zu': 'zul',
}
@classmethod
def short2long(cls, code):
"""Convert language code from ISO 639-1 to ISO 639-2/T"""
return cls._lang_map.get(code[:2])
@classmethod
def long2short(cls, code):
"""Convert language code from ISO 639-2/T to ISO 639-1"""
for short_name, long_name in cls._lang_map.items():
if long_name == code:
return short_name
class ISO3166Utils(object):
# From http://data.okfn.org/data/core/country-list
_country_map = {
'AF': 'Afghanistan',
'AX': 'Åland Islands',
'AL': 'Albania',
'DZ': 'Algeria',
'AS': 'American Samoa',
'AD': 'Andorra',
'AO': 'Angola',
'AI': 'Anguilla',
'AQ': 'Antarctica',
'AG': 'Antigua and Barbuda',
'AR': 'Argentina',
'AM': 'Armenia',
'AW': 'Aruba',
'AU': 'Australia',
'AT': 'Austria',
'AZ': 'Azerbaijan',
'BS': 'Bahamas',
'BH': 'Bahrain',
'BD': 'Bangladesh',
'BB': 'Barbados',
'BY': 'Belarus',
'BE': 'Belgium',
'BZ': 'Belize',
'BJ': 'Benin',
'BM': 'Bermuda',
'BT': 'Bhutan',
'BO': 'Bolivia, Plurinational State of',
'BQ': 'Bonaire, Sint Eustatius and Saba',
'BA': 'Bosnia and Herzegovina',
'BW': 'Botswana',
'BV': 'Bouvet Island',
'BR': 'Brazil',
'IO': 'British Indian Ocean Territory',
'BN': 'Brunei Darussalam',
'BG': 'Bulgaria',
'BF': 'Burkina Faso',
'BI': 'Burundi',
'KH': 'Cambodia',
'CM': 'Cameroon',
'CA': 'Canada',
'CV': 'Cape Verde',
'KY': 'Cayman Islands',
'CF': 'Central African Republic',
'TD': 'Chad',
'CL': 'Chile',
'CN': 'China',
'CX': 'Christmas Island',
'CC': 'Cocos (Keeling) Islands',
'CO': 'Colombia',
'KM': 'Comoros',
'CG': 'Congo',
'CD': 'Congo, the Democratic Republic of the',
'CK': 'Cook Islands',
'CR': 'Costa Rica',
'CI': 'Côte d\'Ivoire',
'HR': 'Croatia',
'CU': 'Cuba',
'CW': 'Curaçao',
'CY': 'Cyprus',
'CZ': 'Czech Republic',
'DK': 'Denmark',
'DJ': 'Djibouti',
'DM': 'Dominica',
'DO': 'Dominican Republic',
'EC': 'Ecuador',
'EG': 'Egypt',
'SV': 'El Salvador',
'GQ': 'Equatorial Guinea',
'ER': 'Eritrea',
'EE': 'Estonia',
'ET': 'Ethiopia',
'FK': 'Falkland Islands (Malvinas)',
'FO': 'Faroe Islands',
'FJ': 'Fiji',
'FI': 'Finland',
'FR': 'France',
'GF': 'French Guiana',
'PF': 'French Polynesia',
'TF': 'French Southern Territories',
'GA': 'Gabon',
'GM': 'Gambia',
'GE': 'Georgia',
'DE': 'Germany',
'GH': 'Ghana',
'GI': 'Gibraltar',
'GR': 'Greece',
'GL': 'Greenland',
'GD': 'Grenada',
'GP': 'Guadeloupe',
'GU': 'Guam',
'GT': 'Guatemala',
'GG': 'Guernsey',
'GN': 'Guinea',
'GW': 'Guinea-Bissau',
'GY': 'Guyana',
'HT': 'Haiti',
'HM': 'Heard Island and McDonald Islands',
'VA': 'Holy See (Vatican City State)',
'HN': 'Honduras',
'HK': 'Hong Kong',
'HU': 'Hungary',
'IS': 'Iceland',
'IN': 'India',
'ID': 'Indonesia',
'IR': 'Iran, Islamic Republic of',
'IQ': 'Iraq',
'IE': 'Ireland',
'IM': 'Isle of Man',
'IL': 'Israel',
'IT': 'Italy',
'JM': 'Jamaica',
'JP': 'Japan',
'JE': 'Jersey',
'JO': 'Jordan',
'KZ': 'Kazakhstan',
'KE': 'Kenya',
'KI': 'Kiribati',
'KP': 'Korea, Democratic People\'s Republic of',
'KR': 'Korea, Republic of',
'KW': 'Kuwait',
'KG': 'Kyrgyzstan',
'LA': 'Lao People\'s Democratic Republic',
'LV': 'Latvia',
'LB': 'Lebanon',
'LS': 'Lesotho',
'LR': 'Liberia',
'LY': 'Libya',
'LI': 'Liechtenstein',
'LT': 'Lithuania',
'LU': 'Luxembourg',
'MO': 'Macao',
'MK': 'Macedonia, the Former Yugoslav Republic of',
'MG': 'Madagascar',
'MW': 'Malawi',
'MY': 'Malaysia',
'MV': 'Maldives',
'ML': 'Mali',
'MT': 'Malta',
'MH': 'Marshall Islands',
'MQ': 'Martinique',
'MR': 'Mauritania',
'MU': 'Mauritius',
'YT': 'Mayotte',
'MX': 'Mexico',
'FM': 'Micronesia, Federated States of',
'MD': 'Moldova, Republic of',
'MC': 'Monaco',
'MN': 'Mongolia',
'ME': 'Montenegro',
'MS': 'Montserrat',
'MA': 'Morocco',
'MZ': 'Mozambique',
'MM': 'Myanmar',
'NA': 'Namibia',
'NR': 'Nauru',
'NP': 'Nepal',
'NL': 'Netherlands',
'NC': 'New Caledonia',
'NZ': 'New Zealand',
'NI': 'Nicaragua',
'NE': 'Niger',
'NG': 'Nigeria',
'NU': 'Niue',
'NF': 'Norfolk Island',
'MP': 'Northern Mariana Islands',
'NO': 'Norway',
'OM': 'Oman',
'PK': 'Pakistan',
'PW': 'Palau',
'PS': 'Palestine, State of',
'PA': 'Panama',
'PG': 'Papua New Guinea',
'PY': 'Paraguay',
'PE': 'Peru',
'PH': 'Philippines',
'PN': 'Pitcairn',
'PL': 'Poland',
'PT': 'Portugal',
'PR': 'Puerto Rico',
'QA': 'Qatar',
'RE': 'Réunion',
'RO': 'Romania',
'RU': 'Russian Federation',
'RW': 'Rwanda',
'BL': 'Saint Barthélemy',
'SH': 'Saint Helena, Ascension and Tristan da Cunha',
'KN': 'Saint Kitts and Nevis',
'LC': 'Saint Lucia',
'MF': 'Saint Martin (French part)',
'PM': 'Saint Pierre and Miquelon',
'VC': 'Saint Vincent and the Grenadines',
'WS': 'Samoa',
'SM': 'San Marino',
'ST': 'Sao Tome and Principe',
'SA': 'Saudi Arabia',
'SN': 'Senegal',
'RS': 'Serbia',
'SC': 'Seychelles',
'SL': 'Sierra Leone',
'SG': 'Singapore',
'SX': 'Sint Maarten (Dutch part)',
'SK': 'Slovakia',
'SI': 'Slovenia',
'SB': 'Solomon Islands',
'SO': 'Somalia',
'ZA': 'South Africa',
'GS': 'South Georgia and the South Sandwich Islands',
'SS': 'South Sudan',
'ES': 'Spain',
'LK': 'Sri Lanka',
'SD': 'Sudan',
'SR': 'Suriname',
'SJ': 'Svalbard and Jan Mayen',
'SZ': 'Swaziland',
'SE': 'Sweden',
'CH': 'Switzerland',
'SY': 'Syrian Arab Republic',
'TW': 'Taiwan, Province of China',
'TJ': 'Tajikistan',
'TZ': 'Tanzania, United Republic of',
'TH': 'Thailand',
'TL': 'Timor-Leste',
'TG': 'Togo',
'TK': 'Tokelau',
'TO': 'Tonga',
'TT': 'Trinidad and Tobago',
'TN': 'Tunisia',
'TR': 'Turkey',
'TM': 'Turkmenistan',
'TC': 'Turks and Caicos Islands',
'TV': 'Tuvalu',
'UG': 'Uganda',
'UA': 'Ukraine',
'AE': 'United Arab Emirates',
'GB': 'United Kingdom',
'US': 'United States',
'UM': 'United States Minor Outlying Islands',
'UY': 'Uruguay',
'UZ': 'Uzbekistan',
'VU': 'Vanuatu',
'VE': 'Venezuela, Bolivarian Republic of',
'VN': 'Viet Nam',
'VG': 'Virgin Islands, British',
'VI': 'Virgin Islands, U.S.',
'WF': 'Wallis and Futuna',
'EH': 'Western Sahara',
'YE': 'Yemen',
'ZM': 'Zambia',
'ZW': 'Zimbabwe',
}
@classmethod
def short2full(cls, code):
"""Convert an ISO 3166-2 country code to the corresponding full name"""
return cls._country_map.get(code.upper())
class GeoUtils(object):
# Major IPv4 address blocks per country
_country_ip_map = {
'AD': '85.94.160.0/19',
'AE': '94.200.0.0/13',
'AF': '149.54.0.0/17',
'AG': '209.59.64.0/18',
'AI': '204.14.248.0/21',
'AL': '46.99.0.0/16',
'AM': '46.70.0.0/15',
'AO': '105.168.0.0/13',
'AP': '159.117.192.0/21',
'AR': '181.0.0.0/12',
'AS': '202.70.112.0/20',
'AT': '84.112.0.0/13',
'AU': '1.128.0.0/11',
'AW': '181.41.0.0/18',
'AZ': '5.191.0.0/16',
'BA': '31.176.128.0/17',
'BB': '65.48.128.0/17',
'BD': '114.130.0.0/16',
'BE': '57.0.0.0/8',
'BF': '129.45.128.0/17',
'BG': '95.42.0.0/15',
'BH': '37.131.0.0/17',
'BI': '154.117.192.0/18',
'BJ': '137.255.0.0/16',
'BL': '192.131.134.0/24',
'BM': '196.12.64.0/18',
'BN': '156.31.0.0/16',
'BO': '161.56.0.0/16',
'BQ': '161.0.80.0/20',
'BR': '152.240.0.0/12',
'BS': '24.51.64.0/18',
'BT': '119.2.96.0/19',
'BW': '168.167.0.0/16',
'BY': '178.120.0.0/13',
'BZ': '179.42.192.0/18',
'CA': '99.224.0.0/11',
'CD': '41.243.0.0/16',
'CF': '196.32.200.0/21',
'CG': '197.214.128.0/17',
'CH': '85.0.0.0/13',
'CI': '154.232.0.0/14',
'CK': '202.65.32.0/19',
'CL': '152.172.0.0/14',
'CM': '165.210.0.0/15',
'CN': '36.128.0.0/10',
'CO': '181.240.0.0/12',
'CR': '201.192.0.0/12',
'CU': '152.206.0.0/15',
'CV': '165.90.96.0/19',
'CW': '190.88.128.0/17',
'CY': '46.198.0.0/15',
'CZ': '88.100.0.0/14',
'DE': '53.0.0.0/8',
'DJ': '197.241.0.0/17',
'DK': '87.48.0.0/12',
'DM': '192.243.48.0/20',
'DO': '152.166.0.0/15',
'DZ': '41.96.0.0/12',
'EC': '186.68.0.0/15',
'EE': '90.190.0.0/15',
'EG': '156.160.0.0/11',
'ER': '196.200.96.0/20',
'ES': '88.0.0.0/11',
'ET': '196.188.0.0/14',
'EU': '2.16.0.0/13',
'FI': '91.152.0.0/13',
'FJ': '144.120.0.0/16',
'FM': '119.252.112.0/20',
'FO': '88.85.32.0/19',
'FR': '90.0.0.0/9',
'GA': '41.158.0.0/15',
'GB': '25.0.0.0/8',
'GD': '74.122.88.0/21',
'GE': '31.146.0.0/16',
'GF': '161.22.64.0/18',
'GG': '62.68.160.0/19',
'GH': '45.208.0.0/14',
'GI': '85.115.128.0/19',
'GL': '88.83.0.0/19',
'GM': '160.182.0.0/15',
'GN': '197.149.192.0/18',
'GP': '104.250.0.0/19',
'GQ': '105.235.224.0/20',
'GR': '94.64.0.0/13',
'GT': '168.234.0.0/16',
'GU': '168.123.0.0/16',
'GW': '197.214.80.0/20',
'GY': '181.41.64.0/18',
'HK': '113.252.0.0/14',
'HN': '181.210.0.0/16',
'HR': '93.136.0.0/13',
'HT': '148.102.128.0/17',
'HU': '84.0.0.0/14',
'ID': '39.192.0.0/10',
'IE': '87.32.0.0/12',
'IL': '79.176.0.0/13',
'IM': '5.62.80.0/20',
'IN': '117.192.0.0/10',
'IO': '203.83.48.0/21',
'IQ': '37.236.0.0/14',
'IR': '2.176.0.0/12',
'IS': '82.221.0.0/16',
'IT': '79.0.0.0/10',
'JE': '87.244.64.0/18',
'JM': '72.27.0.0/17',
'JO': '176.29.0.0/16',
'JP': '126.0.0.0/8',
'KE': '105.48.0.0/12',
'KG': '158.181.128.0/17',
'KH': '36.37.128.0/17',
'KI': '103.25.140.0/22',
'KM': '197.255.224.0/20',
'KN': '198.32.32.0/19',
'KP': '175.45.176.0/22',
'KR': '175.192.0.0/10',
'KW': '37.36.0.0/14',
'KY': '64.96.0.0/15',
'KZ': '2.72.0.0/13',
'LA': '115.84.64.0/18',
'LB': '178.135.0.0/16',
'LC': '192.147.231.0/24',
'LI': '82.117.0.0/19',
'LK': '112.134.0.0/15',
'LR': '41.86.0.0/19',
'LS': '129.232.0.0/17',
'LT': '78.56.0.0/13',
'LU': '188.42.0.0/16',
'LV': '46.109.0.0/16',
'LY': '41.252.0.0/14',
'MA': '105.128.0.0/11',
'MC': '88.209.64.0/18',
'MD': '37.246.0.0/16',
'ME': '178.175.0.0/17',
'MF': '74.112.232.0/21',
'MG': '154.126.0.0/17',
'MH': '117.103.88.0/21',
'MK': '77.28.0.0/15',
'ML': '154.118.128.0/18',
'MM': '37.111.0.0/17',
'MN': '49.0.128.0/17',
'MO': '60.246.0.0/16',
'MP': '202.88.64.0/20',
'MQ': '109.203.224.0/19',
'MR': '41.188.64.0/18',
'MS': '208.90.112.0/22',
'MT': '46.11.0.0/16',
'MU': '105.16.0.0/12',
'MV': '27.114.128.0/18',
'MW': '105.234.0.0/16',
'MX': '187.192.0.0/11',
'MY': '175.136.0.0/13',
'MZ': '197.218.0.0/15',
'NA': '41.182.0.0/16',
'NC': '101.101.0.0/18',
'NE': '197.214.0.0/18',
'NF': '203.17.240.0/22',
'NG': '105.112.0.0/12',
'NI': '186.76.0.0/15',
'NL': '145.96.0.0/11',
'NO': '84.208.0.0/13',
'NP': '36.252.0.0/15',
'NR': '203.98.224.0/19',
'NU': '49.156.48.0/22',
'NZ': '49.224.0.0/14',
'OM': '5.36.0.0/15',
'PA': '186.72.0.0/15',
'PE': '186.160.0.0/14',
'PF': '123.50.64.0/18',
'PG': '124.240.192.0/19',
'PH': '49.144.0.0/13',
'PK': '39.32.0.0/11',
'PL': '83.0.0.0/11',
'PM': '70.36.0.0/20',
'PR': '66.50.0.0/16',
'PS': '188.161.0.0/16',
'PT': '85.240.0.0/13',
'PW': '202.124.224.0/20',
'PY': '181.120.0.0/14',
'QA': '37.210.0.0/15',
'RE': '139.26.0.0/16',
'RO': '79.112.0.0/13',
'RS': '178.220.0.0/14',
'RU': '5.136.0.0/13',
'RW': '105.178.0.0/15',
'SA': '188.48.0.0/13',
'SB': '202.1.160.0/19',
'SC': '154.192.0.0/11',
'SD': '154.96.0.0/13',
'SE': '78.64.0.0/12',
'SG': '152.56.0.0/14',
'SI': '188.196.0.0/14',
'SK': '78.98.0.0/15',
'SL': '197.215.0.0/17',
'SM': '89.186.32.0/19',
'SN': '41.82.0.0/15',
'SO': '197.220.64.0/19',
'SR': '186.179.128.0/17',
'SS': '105.235.208.0/21',
'ST': '197.159.160.0/19',
'SV': '168.243.0.0/16',
'SX': '190.102.0.0/20',
'SY': '5.0.0.0/16',
'SZ': '41.84.224.0/19',
'TC': '65.255.48.0/20',
'TD': '154.68.128.0/19',
'TG': '196.168.0.0/14',
'TH': '171.96.0.0/13',
'TJ': '85.9.128.0/18',
'TK': '27.96.24.0/21',
'TL': '180.189.160.0/20',
'TM': '95.85.96.0/19',
'TN': '197.0.0.0/11',
'TO': '175.176.144.0/21',
'TR': '78.160.0.0/11',
'TT': '186.44.0.0/15',
'TV': '202.2.96.0/19',
'TW': '120.96.0.0/11',
'TZ': '156.156.0.0/14',
'UA': '93.72.0.0/13',
'UG': '154.224.0.0/13',
'US': '3.0.0.0/8',
'UY': '167.56.0.0/13',
'UZ': '82.215.64.0/18',
'VA': '212.77.0.0/19',
'VC': '24.92.144.0/20',
'VE': '186.88.0.0/13',
'VG': '172.103.64.0/18',
'VI': '146.226.0.0/16',
'VN': '14.160.0.0/11',
'VU': '202.80.32.0/20',
'WF': '117.20.32.0/21',
'WS': '202.4.32.0/19',
'YE': '134.35.0.0/16',
'YT': '41.242.116.0/22',
'ZA': '41.0.0.0/11',
'ZM': '165.56.0.0/13',
'ZW': '41.85.192.0/19',
}
@classmethod
def random_ipv4(cls, code):
block = cls._country_ip_map.get(code.upper())
if not block:
return None
addr, preflen = block.split('/')
addr_min = compat_struct_unpack('!L', socket.inet_aton(addr))[0]
addr_max = addr_min | (0xffffffff >> int(preflen))
return compat_str(socket.inet_ntoa(
compat_struct_pack('!L', random.randint(addr_min, addr_max))))
class PerRequestProxyHandler(compat_urllib_request.ProxyHandler):
def __init__(self, proxies=None):
# Set default handlers
for type in ('http', 'https'):
setattr(self, '%s_open' % type,
lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open:
meth(r, proxy, type))
return compat_urllib_request.ProxyHandler.__init__(self, proxies)
def proxy_open(self, req, proxy, type):
req_proxy = req.headers.get('Ytdl-request-proxy')
if req_proxy is not None:
proxy = req_proxy
del req.headers['Ytdl-request-proxy']
if proxy == '__noproxy__':
return None # No Proxy
if compat_urlparse.urlparse(proxy).scheme.lower() in ('socks', 'socks4', 'socks4a', 'socks5'):
req.add_header('Ytdl-socks-proxy', proxy)
# youtube-dl's http/https handlers do wrapping the socket with socks
return None
return compat_urllib_request.ProxyHandler.proxy_open(
self, req, proxy, type)
# Both long_to_bytes and bytes_to_long are adapted from PyCrypto, which is
# released into Public Domain
# https://github.com/dlitz/pycrypto/blob/master/lib/Crypto/Util/number.py#L387
def long_to_bytes(n, blocksize=0):
"""long_to_bytes(n:long, blocksize:int) : string
Convert a long integer to a byte string.
If optional blocksize is given and greater than zero, pad the front of the
byte string with binary zeros so that the length is a multiple of
blocksize.
"""
# after much testing, this algorithm was deemed to be the fastest
s = b''
n = int(n)
while n > 0:
s = compat_struct_pack('>I', n & 0xffffffff) + s
n = n >> 32
# strip off leading zeros
for i in range(len(s)):
if s[i] != b'\000'[0]:
break
else:
# only happens when n == 0
s = b'\000'
i = 0
s = s[i:]
# add back some pad bytes. this could be done more efficiently w.r.t. the
# de-padding being done above, but sigh...
if blocksize > 0 and len(s) % blocksize:
s = (blocksize - len(s) % blocksize) * b'\000' + s
return s
def bytes_to_long(s):
"""bytes_to_long(string) : long
Convert a byte string to a long integer.
This is (essentially) the inverse of long_to_bytes().
"""
acc = 0
length = len(s)
if length % 4:
extra = (4 - length % 4)
s = b'\000' * extra + s
length = length + extra
for i in range(0, length, 4):
acc = (acc << 32) + compat_struct_unpack('>I', s[i:i + 4])[0]
return acc
def ohdave_rsa_encrypt(data, exponent, modulus):
'''
Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/
Input:
data: data to encrypt, bytes-like object
exponent, modulus: parameter e and N of RSA algorithm, both integer
Output: hex string of encrypted data
Limitation: supports one block encryption only
'''
payload = int(binascii.hexlify(data[::-1]), 16)
encrypted = pow(payload, exponent, modulus)
return '%x' % encrypted
def pkcs1pad(data, length):
"""
Padding input data with PKCS#1 scheme
@param {int[]} data input data
@param {int} length target length
@returns {int[]} padded data
"""
if len(data) > length - 11:
raise ValueError('Input data too long for PKCS#1 padding')
pseudo_random = [random.randint(0, 254) for _ in range(length - len(data) - 3)]
return [0, 2] + pseudo_random + [0] + data
def encode_base_n(num, n, table=None):
FULL_TABLE = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
if not table:
table = FULL_TABLE[:n]
if n > len(table):
raise ValueError('base %d exceeds table length %d' % (n, len(table)))
if num == 0:
return table[0]
ret = ''
while num:
ret = table[num % n] + ret
num = num // n
return ret
def decode_packed_codes(code):
mobj = re.search(PACKED_CODES_RE, code)
obfucasted_code, base, count, symbols = mobj.groups()
base = int(base)
count = int(count)
symbols = symbols.split('|')
symbol_table = {}
while count:
count -= 1
base_n_count = encode_base_n(count, base)
symbol_table[base_n_count] = symbols[count] or base_n_count
return re.sub(
r'\b(\w+)\b', lambda mobj: symbol_table[mobj.group(0)],
obfucasted_code)
def parse_m3u8_attributes(attrib):
info = {}
for (key, val) in re.findall(r'(?P<key>[A-Z0-9-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)', attrib):
if val.startswith('"'):
val = val[1:-1]
info[key] = val
return info
def urshift(val, n):
return val >> n if val >= 0 else (val + 0x100000000) >> n
# Based on png2str() written by @gdkchan and improved by @yokrysty
# Originally posted at https://github.com/rg3/youtube-dl/issues/9706
def decode_png(png_data):
# Reference: https://www.w3.org/TR/PNG/
header = png_data[8:]
if png_data[:8] != b'\x89PNG\x0d\x0a\x1a\x0a' or header[4:8] != b'IHDR':
raise IOError('Not a valid PNG file.')
int_map = {1: '>B', 2: '>H', 4: '>I'}
unpack_integer = lambda x: compat_struct_unpack(int_map[len(x)], x)[0]
chunks = []
while header:
length = unpack_integer(header[:4])
header = header[4:]
chunk_type = header[:4]
header = header[4:]
chunk_data = header[:length]
header = header[length:]
header = header[4:] # Skip CRC
chunks.append({
'type': chunk_type,
'length': length,
'data': chunk_data
})
ihdr = chunks[0]['data']
width = unpack_integer(ihdr[:4])
height = unpack_integer(ihdr[4:8])
idat = b''
for chunk in chunks:
if chunk['type'] == b'IDAT':
idat += chunk['data']
if not idat:
raise IOError('Unable to read PNG data.')
decompressed_data = bytearray(zlib.decompress(idat))
stride = width * 3
pixels = []
def _get_pixel(idx):
x = idx % stride
y = idx // stride
return pixels[y][x]
for y in range(height):
basePos = y * (1 + stride)
filter_type = decompressed_data[basePos]
current_row = []
pixels.append(current_row)
for x in range(stride):
color = decompressed_data[1 + basePos + x]
basex = y * stride + x
left = 0
up = 0
if x > 2:
left = _get_pixel(basex - 3)
if y > 0:
up = _get_pixel(basex - stride)
if filter_type == 1: # Sub
color = (color + left) & 0xff
elif filter_type == 2: # Up
color = (color + up) & 0xff
elif filter_type == 3: # Average
color = (color + ((left + up) >> 1)) & 0xff
elif filter_type == 4: # Paeth
a = left
b = up
c = 0
if x > 2 and y > 0:
c = _get_pixel(basex - stride - 3)
p = a + b - c
pa = abs(p - a)
pb = abs(p - b)
pc = abs(p - c)
if pa <= pb and pa <= pc:
color = (color + a) & 0xff
elif pb <= pc:
color = (color + b) & 0xff
else:
color = (color + c) & 0xff
current_row.append(color)
return width, height, pixels
def write_xattr(path, key, value):
# This mess below finds the best xattr tool for the job
try:
# try the pyxattr module...
import xattr
if hasattr(xattr, 'set'): # pyxattr
# Unicode arguments are not supported in python-pyxattr until
# version 0.5.0
# See https://github.com/rg3/youtube-dl/issues/5498
pyxattr_required_version = '0.5.0'
if version_tuple(xattr.__version__) < version_tuple(pyxattr_required_version):
# TODO: fallback to CLI tools
raise XAttrUnavailableError(
'python-pyxattr is detected but is too old. '
'youtube-dl requires %s or above while your version is %s. '
'Falling back to other xattr implementations' % (
pyxattr_required_version, xattr.__version__))
setxattr = xattr.set
else: # xattr
setxattr = xattr.setxattr
try:
setxattr(path, key, value)
except EnvironmentError as e:
raise XAttrMetadataError(e.errno, e.strerror)
except ImportError:
if compat_os_name == 'nt':
# Write xattrs to NTFS Alternate Data Streams:
# http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29
assert ':' not in key
assert os.path.exists(path)
ads_fn = path + ':' + key
try:
with open(ads_fn, 'wb') as f:
f.write(value)
except EnvironmentError as e:
raise XAttrMetadataError(e.errno, e.strerror)
else:
user_has_setfattr = check_executable('setfattr', ['--version'])
user_has_xattr = check_executable('xattr', ['-h'])
if user_has_setfattr or user_has_xattr:
value = value.decode('utf-8')
if user_has_setfattr:
executable = 'setfattr'
opts = ['-n', key, '-v', value]
elif user_has_xattr:
executable = 'xattr'
opts = ['-w', key, value]
cmd = ([encodeFilename(executable, True)] +
[encodeArgument(o) for o in opts] +
[encodeFilename(path, True)])
try:
p = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
except EnvironmentError as e:
raise XAttrMetadataError(e.errno, e.strerror)
stdout, stderr = p.communicate()
stderr = stderr.decode('utf-8', 'replace')
if p.returncode != 0:
raise XAttrMetadataError(p.returncode, stderr)
else:
# On Unix, and can't find pyxattr, setfattr, or xattr.
if sys.platform.startswith('linux'):
raise XAttrUnavailableError(
"Couldn't find a tool to set the xattrs. "
"Install either the python 'pyxattr' or 'xattr' "
"modules, or the GNU 'attr' package "
"(which contains the 'setfattr' tool).")
else:
raise XAttrUnavailableError(
"Couldn't find a tool to set the xattrs. "
"Install either the python 'xattr' module, "
"or the 'xattr' binary.")
| gpl-2.0 |
gregorio-project/hyphen-la | scripts/test-classical.py | 1 | 1357 | #!/usr/bin/env python3
import pyphen
import sys
import re
hyphenator = pyphen.Pyphen(filename='patterns/hyph_la_classical.dic',left=2,right=2)
seenSegs = {}
line = 0
def comparenoncompletehyphens(original, obtained):
i = 0
for c in obtained:
if c == '-':
if original[i] == '-':
i = i + 1
else:
if original[i] == '-':
return False
else:
i = i + 1
return True
def printError(wrong, correct, base):
print('%s %% %s (not %s)' % (base, correct, wrong))
def dotest(filename, allhyphens=True):
global hyphenator, seenSegs
print('differences in '+filename+':')
linenum = 0
with open(filename, 'r') as f:
for line in f:
linenum += 1
line = line.strip()
line = re.sub('\s*\%.*', '', line)
base = line.replace('-', '')
if base in seenSegs and line != seenSegs[base][1]:
print('ERROR: line %d: test \'%s\' differs from test \'%s\' line %d in %s' % (linenum, line, seenSegs[base][1], seenSegs[base][0], seenSegs[base][2]))
else:
seenSegs[base] = (linenum, line, filename)
new = hyphenator.inserted(base)
if allhyphens:
if not line == new:
printError(new, line, base)
else:
if not comparenoncompletehyphens(line, new):
printError(new, line, base)
dotest('tests/nonliturgical/wordlist-classical-italian.txt')
print()
dotest('tests/nonliturgical/wordlist-classical-only.txt')
| mit |
ferchaure/bci | capture.py | 1 | 7686 | import numpy as np #vectores, operaciones matematicas
import time #hora local
from configuration import GENERAL_CONFIG as CONFIG
from configuration import CONFIG_PARSER
#import os
from multiprocess_config import *
#from configuration import DATA_FRAME_CONFIG as COMM
from configuration import FILE_CONFIG
from importlib import import_module
import logging
from os import path
from os import makedirs
if not path.exists('Logs'):
makedirs('Logs')
dateformat = '%Y/%m/%d %I:%M:%S %p'
logging.basicConfig(filename = 'Logs/data_bci.log', level = logging.INFO, datefmt = dateformat)
logging.info("Init data acquisition")
def connect():
"""Retorna el dispositivo o una excepcion si falla"""
from ok import okapi
# find our device
dev = okapi.OpalKelly()
dev.reset()
return dev
def get_data_from_file(com, send_warnings, cola):
"""Lee archivos y los envia al resto de los programas simulando ser online"""
Parser = getattr(import_module("Parsers."+CONFIG["FORMAT"]),"Parser")
save_data = False
reg_files = FileHandle()
parser = Parser(cola,logging,data_in)
comando = 'normal'
extra_data = 0 #datos extra que necesitan para llenar el paquete
pre_time = time.time()
while(comando != EXIT_SIGNAL):
while not com.poll(): #mientras no se recivan comandos leo
extra_data = parser.offline_update()
if extra_data == 0: #estoy justo o me sobran datos
time.sleep(max(CONFIG['PAQ_USB'] / CONFIG['FS']-(time.time()-pre_time),0))
try:
cola.put(parser.data, timeout = TIMEOUT_PUT)
pre_time = time.time()
except Queue_Full:
try:
send_warnings.put_nowait([SLOW_PROCESS_SIGNAL])
except Queue_Full:
logging.error(Errors_Messages[SLOW_GRAPHICS_SIGNAL])
if save_data:
reg_files.save(parser.new_data)
comando = com.recv()
save_data = (comando == START_SIGNAL)
reg_files.actions(comando)
def get_data(com, send_warnings, dev, cola):
"""lee datos del USB, los reagrupa en una matriz de muestras y los envia por el buffer.
Si se le envia la segnal los guarda"""
Parser = getattr(import_module("Parsers."+CONFIG["FORMAT"]),"Parser")
save_data = False
reg_files = FileHandle()
parser = Parser(cola,logging,data_in)
comando = 'normal'
extra_data = 0
dev.start(int(CONFIG['FS']))
while(comando != EXIT_SIGNAL):
while not com.poll(): #mientras no se recivan comandos leo
if (dev.data_available() >= 1000000):
new_data = parser.get_raw(extra_data)
dev.read_data(new_data) #deberia ser N mas grande, si me faltan N tramas
if save_data:
reg_files.save(new_data)
extra_data = parser.online_update(new_data)
new_data = parser.get_raw(extra_data)
while extra_data <= 0: #estoy justo o me sobran datos
try:
cola.put(parser.data, timeout = TIMEOUT_PUT)
except Queue_Full:
try:
send_warnings.put_nowait([SLOW_PROCESS_SIGNAL])
except Queue_Full:
logging.error(Errors_Messages[SLOW_GRAPHICS_SIGNAL])
if extra_data < 0: #sobraban datos
extra_data = parser.online_update(new_data)
new_data = parser.get_raw(extra_data)
else:
break
else :
time.sleep(100 / CONFIG['FS'])
comando = com.recv()
save_data = (comando == START_SIGNAL)
reg_files.actions(comando)
dev.close();
class FileHandle():
"""Simplifica el guardado de archivos"""
def __init__(self):
#archivo cabecera
self.paqxfile = FILE_CONFIG['MAX_SIZE_FILE']*2**20 /(
CONFIG['#CHANNELS']*CONFIG['PAQ_USB']*np.int16().nbytes
)
self.num_registro = -1
def new(self):
self.part = 1 #parte del registro todo corrido
self.file_part = open(FILE_CONFIG['GENERIC_FILE'] +'-'+str(self.num_registro) + '-' +str(self.part), 'wb')
self.paq_in_part = 0
def save(self, data):
if(self.paq_in_part < self.paqxfile):
data.tofile(self.file_part)
self.paq_in_part += 1
else:
self.file_part.close()
self.part += 1
self.file_part = open(FILE_CONFIG['GENERIC_FILE'] + '-' + str(self.num_registro) +'-' +str(self.part),'wb')
data.tofile(self.file_part)
self.paq_in_part = 0
def actions(self, signal):
"""Interfaz para facilitar el envio de segniales"""
self.close()
if signal is START_SIGNAL:
self.num_registro += 1
self.new()
elif signal is EXIT_SIGNAL:
self.close()
def close(self):
"""Cierra el ultimo archivo y guarda configuraciones.
Puede seguir guardando luego, solo que se modifica el numero de registro"""
if self.num_registro >= 0:
from ConfigParser import ConfigParser
config_parser=ConfigParser()
newsection = 'GENERAL'
config_parser.add_section(newsection)
config_parser.set(newsection, 'fs', CONFIG['FS'])
config_parser.set(newsection, 'channels', CONFIG['#CHANNELS'])
config_parser.set(newsection, 'adc_scale', CONFIG['ADC_SCALE'])
config_parser.set(newsection, 'format', CONFIG['FORMAT'])
config_parser.set(newsection, 'online', 'False')
config_parser.set(newsection, 'filtered', CONFIG['FILTERED'])
newsection = "FORMAT_CONFIG"
config_parser.add_section(newsection)
for key in CONFIG_PARSER[newsection].keys():
config_parser.set(newsection, key, CONFIG_PARSER["FORMAT_CONFIG"][key])
# newsection = 'DATA_FRAME'
# config_parser.add_section(newsection)
# config_parser.set(newsection, 'l_frame', COMM['L_FRAME'])
# config_parser.set(newsection, 'channels_pos', COMM['CHANNELS_POS'])
# config_parser.set(newsection, 'counter_pos', COMM['COUNTER_POS'])
# config_parser.set(newsection, 'hash_pos', COMM['HASH_POS'])
# config_parser.set(newsection, 'ampcount', COMM['AMPCOUNT'])
newsection = 'DATA_INFO'
config_parser.add_section(newsection)
config_parser.set(newsection, 'samples4file',CONFIG['PAQ_USB']*self.paqxfile)
config_parser.set(newsection,'samples4lastfile',CONFIG['PAQ_USB']*self.paq_in_part)
config_parser.set(newsection,'files',self.part)
config_parser.set(newsection,'date',time.asctime( time.localtime(time.time())))
file_head = open(FILE_CONFIG['GENERIC_FILE'] +'-'+str(self.num_registro) + '-0','w')
config_parser.write(file_head)
file_head.close()
self.file_part.close()
class data_in():
"""Estructua que se envia al siguiente proceso"""
__slots__ = ['data_loss_cuts','spikes','channels']
def __init__(self):
self.data_loss_cuts = list()
self.spikes = list()
self.channels = np.ndarray([CONFIG['#CHANNELS'], CONFIG['PAQ_USB']],np.uint16) #estan sin signo!
| mit |
YYWen0o0/python-frame-django | django/contrib/gis/db/backends/mysql/introspection.py | 43 | 1427 | from MySQLdb.constants import FIELD_TYPE
from django.contrib.gis.gdal import OGRGeomType
from django.db.backends.mysql.introspection import DatabaseIntrospection
class MySQLIntrospection(DatabaseIntrospection):
# Updating the data_types_reverse dictionary with the appropriate
# type for Geometry fields.
data_types_reverse = DatabaseIntrospection.data_types_reverse.copy()
data_types_reverse[FIELD_TYPE.GEOMETRY] = 'GeometryField'
def get_geometry_type(self, table_name, geo_col):
cursor = self.connection.cursor()
try:
# In order to get the specific geometry type of the field,
# we introspect on the table definition using `DESCRIBE`.
cursor.execute('DESCRIBE %s' %
self.connection.ops.quote_name(table_name))
# Increment over description info until we get to the geometry
# column.
for column, typ, null, key, default, extra in cursor.fetchall():
if column == geo_col:
# Using OGRGeomType to convert from OGC name to Django field.
# MySQL does not support 3D or SRIDs, so the field params
# are empty.
field_type = OGRGeomType(typ).django
field_params = {}
break
finally:
cursor.close()
return field_type, field_params
| bsd-3-clause |
sriramsitharaman/sp17-i524 | project/S17-IR-P001/code/TwitterStreaming.py | 20 | 1982 | #Import the necessary methods from tweepy library
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
OutFile = "C:\\Users\\sriram\OneDrive for Business\\Election2016-"
Count=0
Fileno=1
TweetCount=0
out=open(OutFile+str(Fileno)+".txt","w")
#Variables that contains the user credentials to access Twitter API
access_token = "141817420-ViMO9ic2MuVmjw4u04CACINnCA0MIJEs2uaPbkYX"
access_token_secret = "LWNQKJYkHJnrAjNsH7LnrkKWmnf5qZ9akizidiWjbLhOy"
consumer_key = "SyLwuJP6pzy4FevmLMOmcWdpf"
consumer_secret = "XfZkuRRj5yqVmReRkAVvVFm9t6vaPHVeoXEdg85Iuqb8k524pU"
#This is a basic listener that just prints received tweets to stdout.
class StdOutListener(StreamListener):
def on_data(self, data):
#print (data)
global Count
global TweetCount
global Fileno
global out
Count+=1
TweetCount+=1
if Count%10000==0:
#print (Count)
out.close()
Count=0
Fileno+=1
out=open(OutFile+str(Fileno)+".txt","w")
if TweetCount%500==0:
print (TweetCount, " tweets received")
out.write(data)
return True
def on_error(self, status):
print (status)
if __name__ == '__main__':
#This handles Twitter authetification and the connection to Twitter Streaming API
l = StdOutListener()
OutFile = "C:\\Users\\sriram\OneDrive for Business\\Election2016-"
Count=1
Fileno=1
TweetCount=1
out=open(OutFile+str(Fileno)+".txt","w")
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
stream = Stream(auth, l)
#This line filter Twitter Streams to capture data by the keywords: 'python', 'javascript', 'ruby'
stream.filter(languages=["en"],track=['election2016', 'hilary', 'hillary','trump','republican','democrat','clinton']) | apache-2.0 |
roguecoin/roguecoin | SOAPpy/Errors.py | 294 | 3002 | """
################################################################################
#
# SOAPpy - Cayce Ullman (cayce@actzero.com)
# Brian Matthews (blm@actzero.com)
# Gregory Warnes (Gregory.R.Warnes@Pfizer.com)
# Christopher Blunck (blunck@gst.com)
#
################################################################################
# Copyright (c) 2003, Pfizer
# Copyright (c) 2001, Cayce Ullman.
# Copyright (c) 2001, Brian Matthews.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of actzero, inc. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
ident = '$Id: Errors.py 921 2005-02-15 16:32:23Z warnes $'
from version import __version__
import exceptions
################################################################################
# Exceptions
################################################################################
class Error(exceptions.Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return "<Error : %s>" % self.msg
__repr__ = __str__
def __call__(self):
return (msg,)
class RecursionError(Error):
pass
class UnknownTypeError(Error):
pass
class HTTPError(Error):
# indicates an HTTP protocol error
def __init__(self, code, msg):
self.code = code
self.msg = msg
def __str__(self):
return "<HTTPError %s %s>" % (self.code, self.msg)
__repr__ = __str__
def __call___(self):
return (self.code, self.msg, )
class UnderflowError(exceptions.ArithmeticError):
pass
| gpl-3.0 |
OSSESAC/odoopubarquiluz | addons/l10n_be_hr_payroll/__openerp__.py | 118 | 1817 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Belgium - Payroll',
'category': 'Localization',
'author': 'OpenERP SA',
'depends': ['hr_payroll'],
'version': '1.0',
'description': """
Belgian Payroll Rules.
======================
* Employee Details
* Employee Contracts
* Passport based Contract
* Allowances/Deductions
* Allow to configure Basic/Gross/Net Salary
* Employee Payslip
* Monthly Payroll Register
* Integrated with Holiday Management
* Salary Maj, ONSS, Withholding Tax, Child Allowance, ...
""",
'auto_install': False,
'demo': ['l10n_be_hr_payroll_demo.xml'],
'data':[
'l10n_be_hr_payroll_view.xml',
'l10n_be_hr_payroll_data.xml',
'data/hr.salary.rule.csv',
],
'installable': True
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
lcpt/xc | verif/tests/preprocessor/cad/arco_circunf_05.py | 1 | 2685 | # -*- coding: utf-8 -*-
import xc_base
import geom
import xc
import math
import os
from model import predefined_spaces
from materials import typical_materials
__author__= "Luis C. Pérez Tato (LCPT)"
__copyright__= "Copyright 2014, LCPT"
__license__= "GPL"
__version__= "3.0"
__email__= "l.pereztato@gmail.com"
NumDiv= 2
R= 2.0
cos225= math.cos(math.radians(22.5))
sin225= math.sin(math.radians(22.5))
cos45= math.cos(math.radians(45))
sin45= cos45
cos675= math.cos(math.radians(67.5))
sin675= math.sin(math.radians(67.5))
# Problem type
feProblem= xc.FEProblem()
preprocessor= feProblem.getPreprocessor
nodes= preprocessor.getNodeHandler
modelSpace= predefined_spaces.SolidMechanics3D(nodes)
# Define materials
elast= typical_materials.defElasticMaterial(preprocessor, "elast",3000)
nodes.newSeedNode()
seedElemHandler= preprocessor.getElementHandler.seedElemHandler
seedElemHandler.defaultMaterial= "elast"
seedElemHandler.dimElem= 3 # Dimension of element space
seedElemHandler.defaultTag= 1 #Tag for the next element.
truss= seedElemHandler.newElement("Truss",xc.ID([0,0]))
truss.area= 10.0
points= preprocessor.getMultiBlockTopology.getPoints
pt= points.newPntIDPos3d(1,geom.Pos3d(R,0.0,0.0))
pt= points.newPntIDPos3d(2,geom.Pos3d(R*cos225,R*sin225,0.0))
pt= points.newPntIDPos3d(3,geom.Pos3d((R*cos45),(R*sin45),0.0))
pt= points.newPntIDPos3d(4,geom.Pos3d((R*cos675),(R*sin675),0.0))
pt= points.newPntIDPos3d(5,geom.Pos3d(0.0,R,0.0))
lines= preprocessor.getMultiBlockTopology.getLines
lines.defaultTag= 1
l1= lines.newCircleArc(1,2,3)
l1.nDiv= NumDiv
th1= l1.getTheta1()
th2= l1.getTheta2()
long= l1.getLong()
xC= l1.getCenter().x
yC= l1.getCenter().y
zC= l1.getCenter().z
xi= l1.getPInic().x
yi= l1.getPInic().y
zi= l1.getPInic().z
r= l1.getRadius()
l2= lines.newCircleArc(3,4,5)
l2.nDiv= NumDiv
l1= preprocessor.getSets.getSet("l1")
l1.genMesh(xc.meshDir.I)
l2= preprocessor.getSets.getSet("l2")
l2.genMesh(xc.meshDir.I)
nnodes= l1.getNumNodes+l2.getNumNodes-1
nodes= preprocessor.getNodeHandler
nod3= nodes.getNode(3)
x3= nod3.get3dCoo[0]
y3= nod3.get3dCoo[1]
nod5= nodes.getNode(5)
x5= nod5.get3dCoo[0]
y5= nod5.get3dCoo[1]
nnodteor= 2*NumDiv+1
ratio1= (nnodteor/nnodes)
ratio2= (y5-x3)**2+(y3-x5)**2
'''
print "ratio1= ",(ratio1)
print "theta1= ",(rad2deg(th1))
print "theta2= ",(rad2deg(th2))
print "xc= ",(xc)
print "yc= ",(yc)
print "zc= ",(zc)
print "xi= ",(xi)
print "yi= ",(yi)
print "zi= ",(zi)
print "radius= ",(r)
print "ratio2= ",(ratio2)
'''
import os
from miscUtils import LogMessages as lmsg
fname= os.path.basename(__file__)
if (abs(ratio1-1.0)<1e-12) & (abs(ratio2)<1e-5):
print "test ",fname,": ok."
else:
lmsg.error(fname+' ERROR.')
| gpl-3.0 |
citrix-openstack/build-trove | trove/openstack/common/rpc/__init__.py | 3 | 11700 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2011 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A remote procedure call (rpc) abstraction.
For some wrappers that add message versioning to rpc, see:
rpc.dispatcher
rpc.proxy
"""
import inspect
from oslo.config import cfg
from trove.openstack.common.gettextutils import _ # noqa
from trove.openstack.common import importutils
from trove.openstack.common import local
from trove.openstack.common import log as logging
LOG = logging.getLogger(__name__)
rpc_opts = [
cfg.StrOpt('rpc_backend',
default='%s.impl_kombu' % __package__,
help="The messaging module to use, defaults to kombu."),
cfg.IntOpt('rpc_thread_pool_size',
default=64,
help='Size of RPC thread pool'),
cfg.IntOpt('rpc_conn_pool_size',
default=30,
help='Size of RPC connection pool'),
cfg.IntOpt('rpc_response_timeout',
default=60,
help='Seconds to wait for a response from call or multicall'),
cfg.IntOpt('rpc_cast_timeout',
default=30,
help='Seconds to wait before a cast expires (TTL). '
'Only supported by impl_zmq.'),
cfg.ListOpt('allowed_rpc_exception_modules',
default=['nova.exception',
'cinder.exception',
'exceptions',
],
help='Modules of exceptions that are permitted to be recreated'
'upon receiving exception data from an rpc call.'),
cfg.BoolOpt('fake_rabbit',
default=False,
help='If passed, use a fake RabbitMQ provider'),
cfg.StrOpt('control_exchange',
default='openstack',
help='AMQP exchange to connect to if using RabbitMQ or Qpid'),
]
CONF = cfg.CONF
CONF.register_opts(rpc_opts)
def set_defaults(control_exchange):
cfg.set_defaults(rpc_opts,
control_exchange=control_exchange)
def create_connection(new=True):
"""Create a connection to the message bus used for rpc.
For some example usage of creating a connection and some consumers on that
connection, see nova.service.
:param new: Whether or not to create a new connection. A new connection
will be created by default. If new is False, the
implementation is free to return an existing connection from a
pool.
:returns: An instance of openstack.common.rpc.common.Connection
"""
return _get_impl().create_connection(CONF, new=new)
def _check_for_lock():
if not CONF.debug:
return None
if ((hasattr(local.strong_store, 'locks_held')
and local.strong_store.locks_held)):
stack = ' :: '.join([frame[3] for frame in inspect.stack()])
LOG.warn(_('A RPC is being made while holding a lock. The locks '
'currently held are %(locks)s. This is probably a bug. '
'Please report it. Include the following: [%(stack)s].'),
{'locks': local.strong_store.locks_held,
'stack': stack})
return True
return False
def call(context, topic, msg, timeout=None, check_for_lock=False):
"""Invoke a remote method that returns something.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the rpc message to. This correlates to the
topic argument of
openstack.common.rpc.common.Connection.create_consumer()
and only applies when the consumer was created with
fanout=False.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:param timeout: int, number of seconds to use for a response timeout.
If set, this overrides the rpc_response_timeout option.
:param check_for_lock: if True, a warning is emitted if a RPC call is made
with a lock held.
:returns: A dict from the remote method.
:raises: openstack.common.rpc.common.Timeout if a complete response
is not received before the timeout is reached.
"""
if check_for_lock:
_check_for_lock()
return _get_impl().call(CONF, context, topic, msg, timeout)
def cast(context, topic, msg):
"""Invoke a remote method that does not return anything.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the rpc message to. This correlates to the
topic argument of
openstack.common.rpc.common.Connection.create_consumer()
and only applies when the consumer was created with
fanout=False.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:returns: None
"""
return _get_impl().cast(CONF, context, topic, msg)
def fanout_cast(context, topic, msg):
"""Broadcast a remote method invocation with no return.
This method will get invoked on all consumers that were set up with this
topic name and fanout=True.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the rpc message to. This correlates to the
topic argument of
openstack.common.rpc.common.Connection.create_consumer()
and only applies when the consumer was created with
fanout=True.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:returns: None
"""
return _get_impl().fanout_cast(CONF, context, topic, msg)
def multicall(context, topic, msg, timeout=None, check_for_lock=False):
"""Invoke a remote method and get back an iterator.
In this case, the remote method will be returning multiple values in
separate messages, so the return values can be processed as the come in via
an iterator.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the rpc message to. This correlates to the
topic argument of
openstack.common.rpc.common.Connection.create_consumer()
and only applies when the consumer was created with
fanout=False.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:param timeout: int, number of seconds to use for a response timeout.
If set, this overrides the rpc_response_timeout option.
:param check_for_lock: if True, a warning is emitted if a RPC call is made
with a lock held.
:returns: An iterator. The iterator will yield a tuple (N, X) where N is
an index that starts at 0 and increases by one for each value
returned and X is the Nth value that was returned by the remote
method.
:raises: openstack.common.rpc.common.Timeout if a complete response
is not received before the timeout is reached.
"""
if check_for_lock:
_check_for_lock()
return _get_impl().multicall(CONF, context, topic, msg, timeout)
def notify(context, topic, msg, envelope=False):
"""Send notification event.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the notification to.
:param msg: This is a dict of content of event.
:param envelope: Set to True to enable message envelope for notifications.
:returns: None
"""
return _get_impl().notify(cfg.CONF, context, topic, msg, envelope)
def cleanup():
"""Clean up resoruces in use by implementation.
Clean up any resources that have been allocated by the RPC implementation.
This is typically open connections to a messaging service. This function
would get called before an application using this API exits to allow
connections to get torn down cleanly.
:returns: None
"""
return _get_impl().cleanup()
def cast_to_server(context, server_params, topic, msg):
"""Invoke a remote method that does not return anything.
:param context: Information that identifies the user that has made this
request.
:param server_params: Connection information
:param topic: The topic to send the notification to.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:returns: None
"""
return _get_impl().cast_to_server(CONF, context, server_params, topic,
msg)
def fanout_cast_to_server(context, server_params, topic, msg):
"""Broadcast to a remote method invocation with no return.
:param context: Information that identifies the user that has made this
request.
:param server_params: Connection information
:param topic: The topic to send the notification to.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:returns: None
"""
return _get_impl().fanout_cast_to_server(CONF, context, server_params,
topic, msg)
def queue_get_for(context, topic, host):
"""Get a queue name for a given topic + host.
This function only works if this naming convention is followed on the
consumer side, as well. For example, in nova, every instance of the
nova-foo service calls create_consumer() for two topics:
foo
foo.<host>
Messages sent to the 'foo' topic are distributed to exactly one instance of
the nova-foo service. The services are chosen in a round-robin fashion.
Messages sent to the 'foo.<host>' topic are sent to the nova-foo service on
<host>.
"""
return '%s.%s' % (topic, host) if host else topic
_RPCIMPL = None
def _get_impl():
"""Delay import of rpc_backend until configuration is loaded."""
global _RPCIMPL
if _RPCIMPL is None:
try:
_RPCIMPL = importutils.import_module(CONF.rpc_backend)
except ImportError:
# For backwards compatibility with older nova config.
impl = CONF.rpc_backend.replace('nova.rpc',
'nova.openstack.common.rpc')
_RPCIMPL = importutils.import_module(impl)
return _RPCIMPL
| apache-2.0 |
Lh4cKg/sl4a | python/src/Mac/Modules/win/winedit.py | 39 | 1612 | # These are inline-routines/defines, so we do them "by hand"
#
f = Method(Boolean, 'IsWindowVisible',
(WindowRef, 'theWindow', InMode),
)
methods.append(f)
f = Method(void, 'GetWindowStructureRgn',
(WindowRef, 'theWindow', InMode),
(RgnHandle, 'r', InMode),
)
methods.append(f)
f = Method(void, 'GetWindowContentRgn',
(WindowRef, 'theWindow', InMode),
(RgnHandle, 'r', InMode),
)
methods.append(f)
f = Method(void, 'GetWindowUpdateRgn',
(WindowRef, 'theWindow', InMode),
(RgnHandle, 'r', InMode),
)
methods.append(f)
f = Method(ExistingWindowPtr, 'GetNextWindow',
(WindowRef, 'theWindow', InMode),
)
methods.append(f)
f = Function(short, 'FindWindow',
(Point, 'thePoint', InMode),
(ExistingWindowPtr, 'theWindow', OutMode),
)
functions.append(f)
f = Method(void, 'MoveWindow',
(WindowPtr, 'theWindow', InMode),
(short, 'hGlobal', InMode),
(short, 'vGlobal', InMode),
(Boolean, 'front', InMode),
)
methods.append(f)
f = Method(void, 'ShowWindow',
(WindowPtr, 'theWindow', InMode),
)
methods.append(f)
#
# A method to set the auto-dispose flag
#
AutoDispose_body = """
int onoff, old = 0;
if (!PyArg_ParseTuple(_args, "i", &onoff))
return NULL;
if ( _self->ob_freeit )
old = 1;
if ( onoff )
_self->ob_freeit = PyMac_AutoDisposeWindow;
else
_self->ob_freeit = NULL;
_res = Py_BuildValue("i", old);
return _res;
"""
f = ManualGenerator("AutoDispose", AutoDispose_body)
f.docstring = lambda: "(int)->int. Automatically DisposeHandle the object on Python object cleanup"
methods.append(f)
| apache-2.0 |
reelai/packstack | packstack/installer/utils/network.py | 8 | 2797 | # -*- coding: utf-8 -*-
import re
import socket
from ..exceptions import NetworkError
from .shell import execute, ScriptRunner
def get_localhost_ip():
"""
Returns IP address of localhost.
"""
# TO-DO: Will probably need to find better way to find out localhost
# address.
# find nameservers
ns_regex = re.compile('nameserver\s*(?P<ns_ip>[\d\.\:]+)')
rc, resolv = execute('cat /etc/resolv.conf | grep nameserver',
can_fail=False, use_shell=True, log=False)
nsrvs = []
for line in resolv.split('\n'):
match = ns_regex.match(line.strip())
if match:
nsrvs.append(match.group('ns_ip'))
# try to connect to nameservers and return own IP address
nsrvs.append('8.8.8.8') # default to google dns
for i in nsrvs:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect((i, 0))
loc_ip = s.getsockname()[0]
except socket.error:
continue
else:
return loc_ip
raise NetworkError('Local IP address discovery failed. Please set '
'nameserver correctly.')
def host2ip(hostname, allow_localhost=False):
"""
Converts given hostname to IP address. Raises NetworkError
if conversion failed.
"""
try:
ip_list = socket.gethostbyaddr(hostname)[2]
if allow_localhost:
return ip_list[0]
else:
local_ips = ('127.0.0.1', '::1')
for ip in ip_list:
if ip not in local_ips:
break
else:
raise NameError()
return ip
except NameError:
# given hostname is localhost, return appropriate IP address
return get_localhost_ip()
except socket.error:
raise NetworkError('Unknown hostname %s.' % hostname)
except Exception, ex:
raise NetworkError('Unknown error appeared: %s' % repr(ex))
def force_ip(host, allow_localhost=False):
host = host.strip()
ipv4_regex = re.compile('\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}')
ipv6_regex = re.compile('[abcdef\d\:]+')
if not ipv4_regex.match(host) or not ipv6_regex.match(host):
host = host2ip(host, allow_localhost=allow_localhost)
return host
def device_from_ip(ip):
server = ScriptRunner()
server.append("DEVICE=($(ip -o address show to %s | cut -f 2 -d ' '))"
% ip)
# Ensure that the IP is only assigned to one interface
server.append("if [ ! -z ${DISPLAY[1]} ]; then false; fi")
# Test device, raises an exception if it doesn't exist
server.append("ip link show \"$DEVICE\" > /dev/null")
server.append("echo $DEVICE")
rv, stdout = server.execute()
return stdout.strip()
| apache-2.0 |
pablodiguerero/asterisk.api | models/calendar.py | 1 | 1827 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ~ Author: Pavel Ivanov
from models.base import db
from models.user import User
from modules.sqlalchemy.uuid import GUID
from modules.sqlalchemy.rrule import RRule
class CalendarRubric(db.Model):
""" Рубрики событий календаря
"""
__tablename__ = "crm_calendar_rubrics"
id = db.Column(db.Integer, primary_key=True)
caption = db.Column(db.String(255), nullable=False)
def __init__(self, caption):
"""
:param caption:
"""
self.caption = caption
class CalendarEventRubrics(db.Model):
"""
"""
__tablename__ = "crm_calendar_event_rubrics"
event_id = db.Column(GUID, db.ForeignKey("crm_calendar_event.id", ondelete="CASCADE"), primary_key=True)
rubric_id = db.Column(db.Integer, db.ForeignKey("crm_calendar_rubrics.id", ondelete="CASCADE"), primary_key=True)
class CalendarEvent(db.Model):
""" Событие календаря
"""
__tablename__ = "crm_calendar_event"
id = db.Column(GUID, primary_key=True)
author_id = db.Column(db.Integer, db.ForeignKey("crm_users.id", ondelete="SET NULL"), nullable=False)
caption = db.Column(db.String(255), nullable=False)
description = db.Column(db.Text, nullable=True)
start_date = db.Column(db.DateTime, nullable=False)
rules = db.Column(RRule, nullable=True)
author = db.relationship(User, uselist=False, backref=db.backref("events", uselist=True))
rubrics = db.relationship(CalendarRubric, uselist=True, lazy="joined", secondary="crm_calendar_event_rubrics")
def __init__(self, caption, start_date):
"""
:param caption:
:param start_date:
"""
self.caption = caption
self.start_date = start_date
| mit |
thurt/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/test/test_multibytecodec_support.py | 55 | 12606 | #!/usr/bin/env python
#
# test_multibytecodec_support.py
# Common Unittest Routines for CJK codecs
#
import sys, codecs
import unittest, re
from test import test_support
from StringIO import StringIO
class TestBase:
encoding = '' # codec name
codec = None # codec tuple (with 4 elements)
tstring = '' # string to test StreamReader
codectests = None # must set. codec test tuple
roundtriptest = 1 # set if roundtrip is possible with unicode
has_iso10646 = 0 # set if this encoding contains whole iso10646 map
xmlcharnametest = None # string to test xmlcharrefreplace
unmappedunicode = u'\udeee' # a unicode codepoint that is not mapped.
def setUp(self):
if self.codec is None:
self.codec = codecs.lookup(self.encoding)
self.encode = self.codec.encode
self.decode = self.codec.decode
self.reader = self.codec.streamreader
self.writer = self.codec.streamwriter
self.incrementalencoder = self.codec.incrementalencoder
self.incrementaldecoder = self.codec.incrementaldecoder
def test_chunkcoding(self):
for native, utf8 in zip(*[StringIO(f).readlines()
for f in self.tstring]):
u = self.decode(native)[0]
self.assertEqual(u, utf8.decode('utf-8'))
if self.roundtriptest:
self.assertEqual(native, self.encode(u)[0])
def test_errorhandle(self):
for source, scheme, expected in self.codectests:
if type(source) == type(''):
func = self.decode
else:
func = self.encode
if expected:
result = func(source, scheme)[0]
self.assertEqual(result, expected)
else:
self.assertRaises(UnicodeError, func, source, scheme)
def test_xmlcharrefreplace(self):
if self.has_iso10646:
return
s = u"\u0b13\u0b23\u0b60 nd eggs"
self.assertEqual(
self.encode(s, "xmlcharrefreplace")[0],
"ଓଣୠ nd eggs"
)
def test_customreplace_encode(self):
if self.has_iso10646:
return
from htmlentitydefs import codepoint2name
def xmlcharnamereplace(exc):
if not isinstance(exc, UnicodeEncodeError):
raise TypeError("don't know how to handle %r" % exc)
l = []
for c in exc.object[exc.start:exc.end]:
if ord(c) in codepoint2name:
l.append(u"&%s;" % codepoint2name[ord(c)])
else:
l.append(u"&#%d;" % ord(c))
return (u"".join(l), exc.end)
codecs.register_error("test.xmlcharnamereplace", xmlcharnamereplace)
if self.xmlcharnametest:
sin, sout = self.xmlcharnametest
else:
sin = u"\xab\u211c\xbb = \u2329\u1234\u232a"
sout = "«ℜ» = ⟨ሴ⟩"
self.assertEqual(self.encode(sin,
"test.xmlcharnamereplace")[0], sout)
def test_callback_wrong_objects(self):
def myreplace(exc):
return (ret, exc.end)
codecs.register_error("test.cjktest", myreplace)
for ret in ([1, 2, 3], [], None, object(), 'string', ''):
self.assertRaises(TypeError, self.encode, self.unmappedunicode,
'test.cjktest')
def test_callback_long_index(self):
def myreplace(exc):
return (u'x', long(exc.end))
codecs.register_error("test.cjktest", myreplace)
self.assertEqual(self.encode(u'abcd' + self.unmappedunicode + u'efgh',
'test.cjktest'), ('abcdxefgh', 9))
def myreplace(exc):
return (u'x', sys.maxint + 1)
codecs.register_error("test.cjktest", myreplace)
self.assertRaises(IndexError, self.encode, self.unmappedunicode,
'test.cjktest')
def test_callback_None_index(self):
def myreplace(exc):
return (u'x', None)
codecs.register_error("test.cjktest", myreplace)
self.assertRaises(TypeError, self.encode, self.unmappedunicode,
'test.cjktest')
def test_callback_backward_index(self):
def myreplace(exc):
if myreplace.limit > 0:
myreplace.limit -= 1
return (u'REPLACED', 0)
else:
return (u'TERMINAL', exc.end)
myreplace.limit = 3
codecs.register_error("test.cjktest", myreplace)
self.assertEqual(self.encode(u'abcd' + self.unmappedunicode + u'efgh',
'test.cjktest'),
('abcdREPLACEDabcdREPLACEDabcdREPLACEDabcdTERMINALefgh', 9))
def test_callback_forward_index(self):
def myreplace(exc):
return (u'REPLACED', exc.end + 2)
codecs.register_error("test.cjktest", myreplace)
self.assertEqual(self.encode(u'abcd' + self.unmappedunicode + u'efgh',
'test.cjktest'), ('abcdREPLACEDgh', 9))
def test_callback_index_outofbound(self):
def myreplace(exc):
return (u'TERM', 100)
codecs.register_error("test.cjktest", myreplace)
self.assertRaises(IndexError, self.encode, self.unmappedunicode,
'test.cjktest')
def test_incrementalencoder(self):
UTF8Reader = codecs.getreader('utf-8')
for sizehint in [None] + range(1, 33) + \
[64, 128, 256, 512, 1024]:
istream = UTF8Reader(StringIO(self.tstring[1]))
ostream = StringIO()
encoder = self.incrementalencoder()
while 1:
if sizehint is not None:
data = istream.read(sizehint)
else:
data = istream.read()
if not data:
break
e = encoder.encode(data)
ostream.write(e)
self.assertEqual(ostream.getvalue(), self.tstring[0])
def test_incrementaldecoder(self):
UTF8Writer = codecs.getwriter('utf-8')
for sizehint in [None, -1] + range(1, 33) + \
[64, 128, 256, 512, 1024]:
istream = StringIO(self.tstring[0])
ostream = UTF8Writer(StringIO())
decoder = self.incrementaldecoder()
while 1:
data = istream.read(sizehint)
if not data:
break
else:
u = decoder.decode(data)
ostream.write(u)
self.assertEqual(ostream.getvalue(), self.tstring[1])
def test_incrementalencoder_error_callback(self):
inv = self.unmappedunicode
e = self.incrementalencoder()
self.assertRaises(UnicodeEncodeError, e.encode, inv, True)
e.errors = 'ignore'
self.assertEqual(e.encode(inv, True), '')
e.reset()
def tempreplace(exc):
return (u'called', exc.end)
codecs.register_error('test.incremental_error_callback', tempreplace)
e.errors = 'test.incremental_error_callback'
self.assertEqual(e.encode(inv, True), 'called')
# again
e.errors = 'ignore'
self.assertEqual(e.encode(inv, True), '')
def test_streamreader(self):
UTF8Writer = codecs.getwriter('utf-8')
for name in ["read", "readline", "readlines"]:
for sizehint in [None, -1] + range(1, 33) + \
[64, 128, 256, 512, 1024]:
istream = self.reader(StringIO(self.tstring[0]))
ostream = UTF8Writer(StringIO())
func = getattr(istream, name)
while 1:
data = func(sizehint)
if not data:
break
if name == "readlines":
ostream.writelines(data)
else:
ostream.write(data)
self.assertEqual(ostream.getvalue(), self.tstring[1])
def test_streamwriter(self):
readfuncs = ('read', 'readline', 'readlines')
UTF8Reader = codecs.getreader('utf-8')
for name in readfuncs:
for sizehint in [None] + range(1, 33) + \
[64, 128, 256, 512, 1024]:
istream = UTF8Reader(StringIO(self.tstring[1]))
ostream = self.writer(StringIO())
func = getattr(istream, name)
while 1:
if sizehint is not None:
data = func(sizehint)
else:
data = func()
if not data:
break
if name == "readlines":
ostream.writelines(data)
else:
ostream.write(data)
self.assertEqual(ostream.getvalue(), self.tstring[0])
if len(u'\U00012345') == 2: # ucs2 build
_unichr = unichr
def unichr(v):
if v >= 0x10000:
return _unichr(0xd800 + ((v - 0x10000) >> 10)) + \
_unichr(0xdc00 + ((v - 0x10000) & 0x3ff))
else:
return _unichr(v)
_ord = ord
def ord(c):
if len(c) == 2:
return 0x10000 + ((_ord(c[0]) - 0xd800) << 10) + \
(ord(c[1]) - 0xdc00)
else:
return _ord(c)
class TestBase_Mapping(unittest.TestCase):
pass_enctest = []
pass_dectest = []
supmaps = []
def __init__(self, *args, **kw):
unittest.TestCase.__init__(self, *args, **kw)
self.open_mapping_file() # test it to report the error early
def open_mapping_file(self):
return test_support.open_urlresource(self.mapfileurl)
def test_mapping_file(self):
if self.mapfileurl.endswith('.xml'):
self._test_mapping_file_ucm()
else:
self._test_mapping_file_plain()
def _test_mapping_file_plain(self):
unichrs = lambda s: u''.join(map(unichr, map(eval, s.split('+'))))
urt_wa = {}
for line in self.open_mapping_file():
if not line:
break
data = line.split('#')[0].strip().split()
if len(data) != 2:
continue
csetval = eval(data[0])
if csetval <= 0x7F:
csetch = chr(csetval & 0xff)
elif csetval >= 0x1000000:
csetch = chr(csetval >> 24) + chr((csetval >> 16) & 0xff) + \
chr((csetval >> 8) & 0xff) + chr(csetval & 0xff)
elif csetval >= 0x10000:
csetch = chr(csetval >> 16) + \
chr((csetval >> 8) & 0xff) + chr(csetval & 0xff)
elif csetval >= 0x100:
csetch = chr(csetval >> 8) + chr(csetval & 0xff)
else:
continue
unich = unichrs(data[1])
if ord(unich) == 0xfffd or urt_wa.has_key(unich):
continue
urt_wa[unich] = csetch
self._testpoint(csetch, unich)
def _test_mapping_file_ucm(self):
ucmdata = self.open_mapping_file().read()
uc = re.findall('<a u="([A-F0-9]{4})" b="([0-9A-F ]+)"/>', ucmdata)
for uni, coded in uc:
unich = unichr(int(uni, 16))
codech = ''.join(chr(int(c, 16)) for c in coded.split())
self._testpoint(codech, unich)
def test_mapping_supplemental(self):
for mapping in self.supmaps:
self._testpoint(*mapping)
def _testpoint(self, csetch, unich):
if (csetch, unich) not in self.pass_enctest:
try:
self.assertEqual(unich.encode(self.encoding), csetch)
except UnicodeError, exc:
self.fail('Encoding failed while testing %s -> %s: %s' % (
repr(unich), repr(csetch), exc.reason))
if (csetch, unich) not in self.pass_dectest:
try:
self.assertEqual(csetch.decode(self.encoding), unich)
except UnicodeError, exc:
self.fail('Decoding failed while testing %s -> %s: %s' % (
repr(csetch), repr(unich), exc.reason))
def load_teststring(encoding):
from test import cjkencodings_test
return cjkencodings_test.teststring[encoding]
| apache-2.0 |
ThisWeekInBlenderDev/source | content/conf.py | 1 | 1582 |
import sys, os
sys.path.insert(0, os.path.abspath(os.path.join('..', 'exts')))
extensions = [
# general
'youtube',
'newsfeed',
'extlinks_plus',
'googleanalytics',
# sphinx extensions
'sphinx.ext.githubpages',
# installed with pip
'sphinxjp.themes.basicstrap',
]
extlinks_plus = {
'task': ('http://developer.blender.org/T%s', '%s: (%s)', 'T%s'),
'diff': ('http://developer.blender.org/D%s', '%s: (%s)', 'D%s'),
# Show 9 chars of revision (typically there is a B prefix, so 8 chars or SHA1).
'rev': ('http://developer.blender.org/r%s', '%s: (%s)', 'r%.9s'),
}
googleanalytics_enabled = True
googleanalytics_id = 'UA-105840115-1'
# The suffix of source filenames.
source_suffix = '.rst'
# exclude_patterns = ['']
master_doc = 'index'
# General information about the project.
project = 'This Week in Blender Development'
copyright = 'Creative Commons'
# include at end of every file
# rst_epilog = ''
templates_path = ['../resources/templates']
# without this it calls it 'documentation', which it's not
html_title = 'This Week in Blender Development'
html_short_title = 'This Week in Blender Dev'
html_static_path = ["../resources/theme"]
# visual noise for my purpose
html_show_copyright = False
html_show_sphinx = False
html_copy_source = False
html_show_sourcelink = False
def setup(app):
app.add_stylesheet("theme_overrides.css")
html_theme = 'basicstrap'
html_theme_options = {
'header_inverse': False,
'relbar_inverse': False,
'inner_theme': True,
'inner_theme_name': 'bootswatch-flatly',
}
| apache-2.0 |
daevaorn/kombu | kombu/transport/django/models.py | 15 | 1125 | from __future__ import absolute_import
import django
from django.db import models
from django.utils.translation import ugettext_lazy as _
from .managers import QueueManager, MessageManager
class Queue(models.Model):
name = models.CharField(_('name'), max_length=200, unique=True)
objects = QueueManager()
class Meta:
if django.VERSION >= (1, 7):
app_label = 'kombu_transport_django'
db_table = 'djkombu_queue'
verbose_name = _('queue')
verbose_name_plural = _('queues')
class Message(models.Model):
visible = models.BooleanField(default=True, db_index=True)
sent_at = models.DateTimeField(null=True, blank=True, db_index=True,
auto_now_add=True)
payload = models.TextField(_('payload'), null=False)
queue = models.ForeignKey(Queue, related_name='messages')
objects = MessageManager()
class Meta:
if django.VERSION >= (1, 7):
app_label = 'kombu_transport_django'
db_table = 'djkombu_message'
verbose_name = _('message')
verbose_name_plural = _('messages')
| bsd-3-clause |
markneville/nupic | tests/unit/nupic/encoders/multi_test.py | 18 | 4941 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for multi- encoder"""
import numpy
import tempfile
import unittest2 as unittest
from nupic.encoders.multi import MultiEncoder
from nupic.encoders import ScalarEncoder, SDRCategoryEncoder
from nupic.data.dictutils import DictObj
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.encoders.multi_capnp import MultiEncoderProto
class MultiEncoderTest(unittest.TestCase):
"""Unit tests for MultiEncoder class"""
def testMultiEncoder(self):
"""Testing MultiEncoder..."""
e = MultiEncoder()
# should be 7 bits wide
# use of forced=True is not recommended, but here for readibility, see
# scalar.py
e.addEncoder("dow",
ScalarEncoder(w=3, resolution=1, minval=1, maxval=8,
periodic=True, name="day of week", forced=True))
# sould be 14 bits wide
e.addEncoder("myval",
ScalarEncoder(w=5, resolution=1, minval=1, maxval=10,
periodic=False, name="aux", forced=True))
self.assertEqual(e.getWidth(), 21)
self.assertEqual(e.getDescription(), [("day of week", 0), ("aux", 7)])
d = DictObj(dow=3, myval=10)
expected=numpy.array([0,1,1,1,0,0,0] + [0,0,0,0,0,0,0,0,0,1,1,1,1,1],
dtype="uint8")
output = e.encode(d)
self.assertTrue(numpy.array_equal(expected, output))
# Check decoding
decoded = e.decode(output)
self.assertEqual(len(decoded), 2)
(ranges, _) = decoded[0]["aux"]
self.assertEqual(len(ranges), 1)
self.assertTrue(numpy.array_equal(ranges[0], [10, 10]))
(ranges, _) = decoded[0]["day of week"]
self.assertTrue(len(ranges) == 1 and numpy.array_equal(ranges[0], [3, 3]))
e.addEncoder("myCat",
SDRCategoryEncoder(n=7, w=3,
categoryList=["run", "pass","kick"],
forced=True))
d = DictObj(dow=4, myval=6, myCat="pass")
output = e.encode(d)
topDownOut = e.topDownCompute(output)
self.assertAlmostEqual(topDownOut[0].value, 4.5)
self.assertEqual(topDownOut[1].value, 6.0)
self.assertEqual(topDownOut[2].value, "pass")
self.assertEqual(topDownOut[2].scalar, 2)
self.assertEqual(topDownOut[2].encoding.sum(), 3)
@unittest.skipUnless(
capnp, "pycapnp is not installed, skipping serialization test.")
def testReadWrite(self):
original = MultiEncoder()
original.addEncoder("dow",
ScalarEncoder(w=3, resolution=1, minval=1, maxval=8,
periodic=True, name="day of week",
forced=True))
original.addEncoder("myval",
ScalarEncoder(w=5, resolution=1, minval=1, maxval=10,
periodic=False, name="aux", forced=True))
originalValue = DictObj(dow=3, myval=10)
output = original.encode(originalValue)
proto1 = MultiEncoderProto.new_message()
original.write(proto1)
# Write the proto to a temp file and read it back into a new proto
with tempfile.TemporaryFile() as f:
proto1.write(f)
f.seek(0)
proto2 = MultiEncoderProto.read(f)
encoder = MultiEncoder.read(proto2)
self.assertIsInstance(encoder, MultiEncoder)
self.assertEqual(encoder.name, original.name)
self.assertEqual(encoder.width, original.width)
self.assertTrue(numpy.array_equal(encoder.encode(originalValue), output))
testObj1 = DictObj(dow=4, myval=9)
self.assertEqual(original.decode(encoder.encode(testObj1)),
encoder.decode(original.encode(testObj1)))
# Feed in a new value and ensure the encodings match
testObj2 = DictObj(dow=5, myval=8)
result1 = original.encode(testObj2)
result2 = encoder.encode(testObj2)
self.assertTrue(numpy.array_equal(result1, result2))
if __name__ == "__main__":
unittest.main()
| agpl-3.0 |
MadeiraCloud/opsagent | libs/jinja2/debug.py | 620 | 10980 | # -*- coding: utf-8 -*-
"""
jinja2.debug
~~~~~~~~~~~~
Implements the debug interface for Jinja. This module does some pretty
ugly stuff with the Python traceback system in order to achieve tracebacks
with correct line numbers, locals and contents.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import sys
import traceback
from types import TracebackType
from jinja2.utils import missing, internal_code
from jinja2.exceptions import TemplateSyntaxError
from jinja2._compat import iteritems, reraise, code_type
# on pypy we can take advantage of transparent proxies
try:
from __pypy__ import tproxy
except ImportError:
tproxy = None
# how does the raise helper look like?
try:
exec("raise TypeError, 'foo'")
except SyntaxError:
raise_helper = 'raise __jinja_exception__[1]'
except TypeError:
raise_helper = 'raise __jinja_exception__[0], __jinja_exception__[1]'
class TracebackFrameProxy(object):
"""Proxies a traceback frame."""
def __init__(self, tb):
self.tb = tb
self._tb_next = None
@property
def tb_next(self):
return self._tb_next
def set_next(self, next):
if tb_set_next is not None:
try:
tb_set_next(self.tb, next and next.tb or None)
except Exception:
# this function can fail due to all the hackery it does
# on various python implementations. We just catch errors
# down and ignore them if necessary.
pass
self._tb_next = next
@property
def is_jinja_frame(self):
return '__jinja_template__' in self.tb.tb_frame.f_globals
def __getattr__(self, name):
return getattr(self.tb, name)
def make_frame_proxy(frame):
proxy = TracebackFrameProxy(frame)
if tproxy is None:
return proxy
def operation_handler(operation, *args, **kwargs):
if operation in ('__getattribute__', '__getattr__'):
return getattr(proxy, args[0])
elif operation == '__setattr__':
proxy.__setattr__(*args, **kwargs)
else:
return getattr(proxy, operation)(*args, **kwargs)
return tproxy(TracebackType, operation_handler)
class ProcessedTraceback(object):
"""Holds a Jinja preprocessed traceback for printing or reraising."""
def __init__(self, exc_type, exc_value, frames):
assert frames, 'no frames for this traceback?'
self.exc_type = exc_type
self.exc_value = exc_value
self.frames = frames
# newly concatenate the frames (which are proxies)
prev_tb = None
for tb in self.frames:
if prev_tb is not None:
prev_tb.set_next(tb)
prev_tb = tb
prev_tb.set_next(None)
def render_as_text(self, limit=None):
"""Return a string with the traceback."""
lines = traceback.format_exception(self.exc_type, self.exc_value,
self.frames[0], limit=limit)
return ''.join(lines).rstrip()
def render_as_html(self, full=False):
"""Return a unicode string with the traceback as rendered HTML."""
from jinja2.debugrenderer import render_traceback
return u'%s\n\n<!--\n%s\n-->' % (
render_traceback(self, full=full),
self.render_as_text().decode('utf-8', 'replace')
)
@property
def is_template_syntax_error(self):
"""`True` if this is a template syntax error."""
return isinstance(self.exc_value, TemplateSyntaxError)
@property
def exc_info(self):
"""Exception info tuple with a proxy around the frame objects."""
return self.exc_type, self.exc_value, self.frames[0]
@property
def standard_exc_info(self):
"""Standard python exc_info for re-raising"""
tb = self.frames[0]
# the frame will be an actual traceback (or transparent proxy) if
# we are on pypy or a python implementation with support for tproxy
if type(tb) is not TracebackType:
tb = tb.tb
return self.exc_type, self.exc_value, tb
def make_traceback(exc_info, source_hint=None):
"""Creates a processed traceback object from the exc_info."""
exc_type, exc_value, tb = exc_info
if isinstance(exc_value, TemplateSyntaxError):
exc_info = translate_syntax_error(exc_value, source_hint)
initial_skip = 0
else:
initial_skip = 1
return translate_exception(exc_info, initial_skip)
def translate_syntax_error(error, source=None):
"""Rewrites a syntax error to please traceback systems."""
error.source = source
error.translated = True
exc_info = (error.__class__, error, None)
filename = error.filename
if filename is None:
filename = '<unknown>'
return fake_exc_info(exc_info, filename, error.lineno)
def translate_exception(exc_info, initial_skip=0):
"""If passed an exc_info it will automatically rewrite the exceptions
all the way down to the correct line numbers and frames.
"""
tb = exc_info[2]
frames = []
# skip some internal frames if wanted
for x in range(initial_skip):
if tb is not None:
tb = tb.tb_next
initial_tb = tb
while tb is not None:
# skip frames decorated with @internalcode. These are internal
# calls we can't avoid and that are useless in template debugging
# output.
if tb.tb_frame.f_code in internal_code:
tb = tb.tb_next
continue
# save a reference to the next frame if we override the current
# one with a faked one.
next = tb.tb_next
# fake template exceptions
template = tb.tb_frame.f_globals.get('__jinja_template__')
if template is not None:
lineno = template.get_corresponding_lineno(tb.tb_lineno)
tb = fake_exc_info(exc_info[:2] + (tb,), template.filename,
lineno)[2]
frames.append(make_frame_proxy(tb))
tb = next
# if we don't have any exceptions in the frames left, we have to
# reraise it unchanged.
# XXX: can we backup here? when could this happen?
if not frames:
reraise(exc_info[0], exc_info[1], exc_info[2])
return ProcessedTraceback(exc_info[0], exc_info[1], frames)
def fake_exc_info(exc_info, filename, lineno):
"""Helper for `translate_exception`."""
exc_type, exc_value, tb = exc_info
# figure the real context out
if tb is not None:
real_locals = tb.tb_frame.f_locals.copy()
ctx = real_locals.get('context')
if ctx:
locals = ctx.get_all()
else:
locals = {}
for name, value in iteritems(real_locals):
if name.startswith('l_') and value is not missing:
locals[name[2:]] = value
# if there is a local called __jinja_exception__, we get
# rid of it to not break the debug functionality.
locals.pop('__jinja_exception__', None)
else:
locals = {}
# assamble fake globals we need
globals = {
'__name__': filename,
'__file__': filename,
'__jinja_exception__': exc_info[:2],
# we don't want to keep the reference to the template around
# to not cause circular dependencies, but we mark it as Jinja
# frame for the ProcessedTraceback
'__jinja_template__': None
}
# and fake the exception
code = compile('\n' * (lineno - 1) + raise_helper, filename, 'exec')
# if it's possible, change the name of the code. This won't work
# on some python environments such as google appengine
try:
if tb is None:
location = 'template'
else:
function = tb.tb_frame.f_code.co_name
if function == 'root':
location = 'top-level template code'
elif function.startswith('block_'):
location = 'block "%s"' % function[6:]
else:
location = 'template'
code = code_type(0, code.co_nlocals, code.co_stacksize,
code.co_flags, code.co_code, code.co_consts,
code.co_names, code.co_varnames, filename,
location, code.co_firstlineno,
code.co_lnotab, (), ())
except:
pass
# execute the code and catch the new traceback
try:
exec(code, globals, locals)
except:
exc_info = sys.exc_info()
new_tb = exc_info[2].tb_next
# return without this frame
return exc_info[:2] + (new_tb,)
def _init_ugly_crap():
"""This function implements a few ugly things so that we can patch the
traceback objects. The function returned allows resetting `tb_next` on
any python traceback object. Do not attempt to use this on non cpython
interpreters
"""
import ctypes
from types import TracebackType
# figure out side of _Py_ssize_t
if hasattr(ctypes.pythonapi, 'Py_InitModule4_64'):
_Py_ssize_t = ctypes.c_int64
else:
_Py_ssize_t = ctypes.c_int
# regular python
class _PyObject(ctypes.Structure):
pass
_PyObject._fields_ = [
('ob_refcnt', _Py_ssize_t),
('ob_type', ctypes.POINTER(_PyObject))
]
# python with trace
if hasattr(sys, 'getobjects'):
class _PyObject(ctypes.Structure):
pass
_PyObject._fields_ = [
('_ob_next', ctypes.POINTER(_PyObject)),
('_ob_prev', ctypes.POINTER(_PyObject)),
('ob_refcnt', _Py_ssize_t),
('ob_type', ctypes.POINTER(_PyObject))
]
class _Traceback(_PyObject):
pass
_Traceback._fields_ = [
('tb_next', ctypes.POINTER(_Traceback)),
('tb_frame', ctypes.POINTER(_PyObject)),
('tb_lasti', ctypes.c_int),
('tb_lineno', ctypes.c_int)
]
def tb_set_next(tb, next):
"""Set the tb_next attribute of a traceback object."""
if not (isinstance(tb, TracebackType) and
(next is None or isinstance(next, TracebackType))):
raise TypeError('tb_set_next arguments must be traceback objects')
obj = _Traceback.from_address(id(tb))
if tb.tb_next is not None:
old = _Traceback.from_address(id(tb.tb_next))
old.ob_refcnt -= 1
if next is None:
obj.tb_next = ctypes.POINTER(_Traceback)()
else:
next = _Traceback.from_address(id(next))
next.ob_refcnt += 1
obj.tb_next = ctypes.pointer(next)
return tb_set_next
# try to get a tb_set_next implementation if we don't have transparent
# proxies.
tb_set_next = None
if tproxy is None:
try:
tb_set_next = _init_ugly_crap()
except:
pass
del _init_ugly_crap
| apache-2.0 |
bavardage/statsmodels | statsmodels/sandbox/tsa/examples/example_var.py | 37 | 1218 | """
Look at some macro plots, then do some VARs and IRFs.
"""
import numpy as np
import statsmodels.api as sm
import scikits.timeseries as ts
import scikits.timeseries.lib.plotlib as tplt
from matplotlib import pyplot as plt
data = sm.datasets.macrodata.load()
data = data.data
### Create Timeseries Representations of a few vars
dates = ts.date_array(start_date=ts.Date('Q', year=1959, quarter=1),
end_date=ts.Date('Q', year=2009, quarter=3))
ts_data = data[['realgdp','realcons','cpi']].view(float).reshape(-1,3)
ts_data = np.column_stack((ts_data, (1 - data['unemp']/100) * data['pop']))
ts_series = ts.time_series(ts_data, dates)
fig = tplt.tsfigure()
fsp = fig.add_tsplot(221)
fsp.tsplot(ts_series[:,0],'-')
fsp.set_title("Real GDP")
fsp = fig.add_tsplot(222)
fsp.tsplot(ts_series[:,1],'r-')
fsp.set_title("Real Consumption")
fsp = fig.add_tsplot(223)
fsp.tsplot(ts_series[:,2],'g-')
fsp.set_title("CPI")
fsp = fig.add_tsplot(224)
fsp.tsplot(ts_series[:,3],'y-')
fsp.set_title("Employment")
# Plot real GDP
#plt.subplot(221)
#plt.plot(data['realgdp'])
#plt.title("Real GDP")
# Plot employment
#plt.subplot(222)
# Plot cpi
#plt.subplot(223)
# Plot real consumption
#plt.subplot(224)
#plt.show()
| bsd-3-clause |
wdv4758h/ZipPy | lib-python/3/encodings/charmap.py | 860 | 2084 | """ Generic Python Character Mapping Codec.
Use this codec directly rather than through the automatic
conversion mechanisms supplied by unicode() and .encode().
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
# Note: Binding these as C functions will result in the class not
# converting them to methods. This is intended.
encode = codecs.charmap_encode
decode = codecs.charmap_decode
class IncrementalEncoder(codecs.IncrementalEncoder):
def __init__(self, errors='strict', mapping=None):
codecs.IncrementalEncoder.__init__(self, errors)
self.mapping = mapping
def encode(self, input, final=False):
return codecs.charmap_encode(input, self.errors, self.mapping)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def __init__(self, errors='strict', mapping=None):
codecs.IncrementalDecoder.__init__(self, errors)
self.mapping = mapping
def decode(self, input, final=False):
return codecs.charmap_decode(input, self.errors, self.mapping)[0]
class StreamWriter(Codec,codecs.StreamWriter):
def __init__(self,stream,errors='strict',mapping=None):
codecs.StreamWriter.__init__(self,stream,errors)
self.mapping = mapping
def encode(self,input,errors='strict'):
return Codec.encode(input,errors,self.mapping)
class StreamReader(Codec,codecs.StreamReader):
def __init__(self,stream,errors='strict',mapping=None):
codecs.StreamReader.__init__(self,stream,errors)
self.mapping = mapping
def decode(self,input,errors='strict'):
return Codec.decode(input,errors,self.mapping)
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='charmap',
encode=Codec.encode,
decode=Codec.decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
| bsd-3-clause |
feroda/gasistafelice | gasistafelice/base/utils.py | 6 | 1951 | from django.contrib.contenttypes.models import ContentType
from django.template.defaultfilters import slugify
import os
import datetime
def get_ctype_from_model_label(label):
"""
This helper function takes a model identifier (a string of the form
'app_label.model_name', where `app_label` is as in Django docs and `model_name`
is the model class' name, and return the ContentType instance associated with
the model class. If the label is malformed or there is no model with that label,
return `None`.
"""
try:
(app_label, model_name) = label.split('.')
# ContenType framework expects lowercased model names
model_name = model_name.lower()
ctype = ContentType.objects.get(app_label=app_label, model=model_name)
return ctype
except:
return None
#-------------------------------------------------------------------------------
# Icon and file path management for resources
def get_resource_icon_path(instance, filename):
return get_attr_file_path(instance, filename, "icon", base_path="images")
def get_association_act_path(instance, filename):
return get_attr_file_path(instance, filename, "association_act")
def get_intent_act_path(instance, filename):
return get_attr_file_path(instance, filename, "intent_act")
def get_pact_path(instance, filename):
return get_attr_file_path(instance, filename, "pact")
def get_attr_file_path(instance, filename, attr_name, base_path="docs"):
if instance.pk:
old_instance = instance.__class__.objects.get(pk=instance.pk)
old_name = getattr(old_instance, attr_name).name
if old_name:
return old_name
ext = filename.split('.')[-1]
d = datetime.datetime.today()
filename = "%s-%s-%s.%s" % (d.strftime("%Y-%m-%s"), slugify(instance.name), attr_name, ext)
return '%s/%s/%s' % (base_path, instance.resource_type, filename)
| agpl-3.0 |
edx/ansible | plugins/inventory/zabbix.py | 119 | 3956 | #!/usr/bin/env python
# (c) 2013, Greg Buehler
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
"""
Zabbix Server external inventory script.
========================================
Returns hosts and hostgroups from Zabbix Server.
Configuration is read from `zabbix.ini`.
Tested with Zabbix Server 2.0.6.
"""
import os, sys
import argparse
import ConfigParser
try:
from zabbix_api import ZabbixAPI
except:
print >> sys.stderr, "Error: Zabbix API library must be installed: pip install zabbix-api."
sys.exit(1)
try:
import json
except:
import simplejson as json
class ZabbixInventory(object):
def read_settings(self):
config = ConfigParser.SafeConfigParser()
config.read(os.path.dirname(os.path.realpath(__file__)) + '/zabbix.ini')
# server
if config.has_option('zabbix', 'server'):
self.zabbix_server = config.get('zabbix', 'server')
# login
if config.has_option('zabbix', 'username'):
self.zabbix_username = config.get('zabbix', 'username')
if config.has_option('zabbix', 'password'):
self.zabbix_password = config.get('zabbix', 'password')
def read_cli(self):
parser = argparse.ArgumentParser()
parser.add_argument('--host')
parser.add_argument('--list', action='store_true')
self.options = parser.parse_args()
def hoststub(self):
return {
'hosts': []
}
def get_host(self, api, name):
data = {}
return data
def get_list(self, api):
hostsData = api.host.get({'output': 'extend', 'selectGroups': 'extend'})
data = {}
data[self.defaultgroup] = self.hoststub()
for host in hostsData:
hostname = host['name']
data[self.defaultgroup]['hosts'].append(hostname)
for group in host['groups']:
groupname = group['name']
if not groupname in data:
data[groupname] = self.hoststub()
data[groupname]['hosts'].append(hostname)
return data
def __init__(self):
self.defaultgroup = 'group_all'
self.zabbix_server = None
self.zabbix_username = None
self.zabbix_password = None
self.read_settings()
self.read_cli()
if self.zabbix_server and self.zabbix_username:
try:
api = ZabbixAPI(server=self.zabbix_server)
api.login(user=self.zabbix_username, password=self.zabbix_password)
except BaseException, e:
print >> sys.stderr, "Error: Could not login to Zabbix server. Check your zabbix.ini."
sys.exit(1)
if self.options.host:
data = self.get_host(api, self.options.host)
print json.dumps(data, indent=2)
elif self.options.list:
data = self.get_list(api)
print json.dumps(data, indent=2)
else:
print >> sys.stderr, "usage: --list ..OR.. --host <hostname>"
sys.exit(1)
else:
print >> sys.stderr, "Error: Configuration of server and credentials are required. See zabbix.ini."
sys.exit(1)
ZabbixInventory()
| gpl-3.0 |
sabi0/intellij-community | python/lib/Lib/site-packages/django/contrib/gis/tests/test_geoforms.py | 94 | 2671 | from django.forms import ValidationError
from django.contrib.gis import forms
from django.contrib.gis.geos import GEOSGeometry
from django.utils import unittest
class GeometryFieldTest(unittest.TestCase):
def test00_init(self):
"Testing GeometryField initialization with defaults."
fld = forms.GeometryField()
for bad_default in ('blah', 3, 'FoO', None, 0):
self.assertRaises(ValidationError, fld.clean, bad_default)
def test01_srid(self):
"Testing GeometryField with a SRID set."
# Input that doesn't specify the SRID is assumed to be in the SRID
# of the input field.
fld = forms.GeometryField(srid=4326)
geom = fld.clean('POINT(5 23)')
self.assertEqual(4326, geom.srid)
# Making the field in a different SRID from that of the geometry, and
# asserting it transforms.
fld = forms.GeometryField(srid=32140)
tol = 0.0000001
xform_geom = GEOSGeometry('POINT (951640.547328465 4219369.26171664)', srid=32140)
# The cleaned geometry should be transformed to 32140.
cleaned_geom = fld.clean('SRID=4326;POINT (-95.363151 29.763374)')
self.failUnless(xform_geom.equals_exact(cleaned_geom, tol))
def test02_null(self):
"Testing GeometryField's handling of null (None) geometries."
# Form fields, by default, are required (`required=True`)
fld = forms.GeometryField()
self.assertRaises(forms.ValidationError, fld.clean, None)
# Still not allowed if `null=False`.
fld = forms.GeometryField(required=False, null=False)
self.assertRaises(forms.ValidationError, fld.clean, None)
# This will clean None as a geometry (See #10660).
fld = forms.GeometryField(required=False)
self.assertEqual(None, fld.clean(None))
def test03_geom_type(self):
"Testing GeometryField's handling of different geometry types."
# By default, all geometry types are allowed.
fld = forms.GeometryField()
for wkt in ('POINT(5 23)', 'MULTIPOLYGON(((0 0, 0 1, 1 1, 1 0, 0 0)))', 'LINESTRING(0 0, 1 1)'):
self.assertEqual(GEOSGeometry(wkt), fld.clean(wkt))
pnt_fld = forms.GeometryField(geom_type='POINT')
self.assertEqual(GEOSGeometry('POINT(5 23)'), pnt_fld.clean('POINT(5 23)'))
self.assertRaises(forms.ValidationError, pnt_fld.clean, 'LINESTRING(0 0, 1 1)')
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(GeometryFieldTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
if __name__=="__main__":
run()
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.