repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
dvberkel/servo | refs/heads/master | python/mach/mach/test/test_entry_point.py | 121 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import unicode_literals
import imp
import os
import sys
from mach.base import MachError
from mach.test.common import TestBase
from mock import patch
from mozunit import main
here = os.path.abspath(os.path.dirname(__file__))
class Entry():
"""Stub replacement for pkg_resources.EntryPoint"""
def __init__(self, providers):
self.providers = providers
def load(self):
def _providers():
return self.providers
return _providers
class TestEntryPoints(TestBase):
"""Test integrating with setuptools entry points"""
provider_dir = os.path.join(here, 'providers')
def _run_mach(self):
return TestBase._run_mach(self, ['help'], entry_point='mach.providers')
@patch('pkg_resources.iter_entry_points')
def test_load_entry_point_from_directory(self, mock):
# Ensure parent module is present otherwise we'll (likely) get
# an error due to unknown parent.
if b'mach.commands' not in sys.modules:
mod = imp.new_module(b'mach.commands')
sys.modules[b'mach.commands'] = mod
mock.return_value = [Entry(['providers'])]
# Mach error raised due to conditions_invalid.py
with self.assertRaises(MachError):
self._run_mach()
@patch('pkg_resources.iter_entry_points')
def test_load_entry_point_from_file(self, mock):
mock.return_value = [Entry([os.path.join('providers', 'basic.py')])]
result, stdout, stderr = self._run_mach()
self.assertIsNone(result)
self.assertIn('cmd_foo', stdout)
# Not enabled in automation because tests are failing.
#if __name__ == '__main__':
# main()
|
thanhphat11/Kernel-Stock-A900-SLK | refs/heads/master | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py | 12527 | # Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
|
piosz/test-infra | refs/heads/master | gubernator/third_party/cloudstorage/errors.py | 147 | # Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Google Cloud Storage specific Files API calls."""
__all__ = ['AuthorizationError',
'check_status',
'Error',
'FatalError',
'FileClosedError',
'ForbiddenError',
'InvalidRange',
'NotFoundError',
'ServerError',
'TimeoutError',
'TransientError',
]
import httplib
class Error(Exception):
"""Base error for all gcs operations.
Error can happen on GAE side or GCS server side.
For details on a particular GCS HTTP response code, see
https://developers.google.com/storage/docs/reference-status#standardcodes
"""
class TransientError(Error):
"""TransientError could be retried."""
class TimeoutError(TransientError):
"""HTTP 408 timeout."""
class FatalError(Error):
"""FatalError shouldn't be retried."""
class FileClosedError(FatalError):
"""File is already closed.
This can happen when the upload has finished but 'write' is called on
a stale upload handle.
"""
class NotFoundError(FatalError):
"""HTTP 404 resource not found."""
class ForbiddenError(FatalError):
"""HTTP 403 Forbidden.
While GCS replies with a 403 error for many reasons, the most common one
is due to bucket permission not correctly setup for your app to access.
"""
class AuthorizationError(FatalError):
"""HTTP 401 authentication required.
Unauthorized request has been received by GCS.
This error is mostly handled by GCS client. GCS client will request
a new access token and retry the request.
"""
class InvalidRange(FatalError):
"""HTTP 416 RequestRangeNotSatifiable."""
class ServerError(TransientError):
"""HTTP >= 500 server side error."""
def check_status(status, expected, path, headers=None,
resp_headers=None, body=None, extras=None):
"""Check HTTP response status is expected.
Args:
status: HTTP response status. int.
expected: a list of expected statuses. A list of ints.
path: filename or a path prefix.
headers: HTTP request headers.
resp_headers: HTTP response headers.
body: HTTP response body.
extras: extra info to be logged verbatim if error occurs.
Raises:
AuthorizationError: if authorization failed.
NotFoundError: if an object that's expected to exist doesn't.
TimeoutError: if HTTP request timed out.
ServerError: if server experienced some errors.
FatalError: if any other unexpected errors occurred.
"""
if status in expected:
return
msg = ('Expect status %r from Google Storage. But got status %d.\n'
'Path: %r.\n'
'Request headers: %r.\n'
'Response headers: %r.\n'
'Body: %r.\n'
'Extra info: %r.\n' %
(expected, status, path, headers, resp_headers, body, extras))
if status == httplib.UNAUTHORIZED:
raise AuthorizationError(msg)
elif status == httplib.FORBIDDEN:
raise ForbiddenError(msg)
elif status == httplib.NOT_FOUND:
raise NotFoundError(msg)
elif status == httplib.REQUEST_TIMEOUT:
raise TimeoutError(msg)
elif status == httplib.REQUESTED_RANGE_NOT_SATISFIABLE:
raise InvalidRange(msg)
elif (status == httplib.OK and 308 in expected and
httplib.OK not in expected):
raise FileClosedError(msg)
elif status >= 500:
raise ServerError(msg)
else:
raise FatalError(msg)
|
hrjn/scikit-learn | refs/heads/master | sklearn/decomposition/dict_learning.py | 19 | """ Dictionary learning
"""
from __future__ import print_function
# Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import sys
import itertools
from math import sqrt, ceil
import numpy as np
from scipy import linalg
from numpy.lib.stride_tricks import as_strided
from ..base import BaseEstimator, TransformerMixin
from ..externals.joblib import Parallel, delayed, cpu_count
from ..externals.six.moves import zip
from ..utils import (check_array, check_random_state, gen_even_slices,
gen_batches, _get_n_jobs)
from ..utils.extmath import randomized_svd, row_norms
from ..utils.validation import check_is_fitted
from ..linear_model import Lasso, orthogonal_mp_gram, LassoLars, Lars
def _sparse_encode(X, dictionary, gram, cov=None, algorithm='lasso_lars',
regularization=None, copy_cov=True,
init=None, max_iter=1000, check_input=True, verbose=0):
"""Generic sparse coding
Each column of the result is the solution to a Lasso problem.
Parameters
----------
X : array of shape (n_samples, n_features)
Data matrix.
dictionary : array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows.
gram : None | array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
gram can be None if method is 'threshold'.
cov : array, shape=(n_components, n_samples)
Precomputed covariance, dictionary * X'
algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than regularization
from the projection dictionary * data'
regularization : int | float
The regularization parameter. It corresponds to alpha when
algorithm is 'lasso_lars', 'lasso_cd' or 'threshold'.
Otherwise it corresponds to n_nonzero_coefs.
init : array of shape (n_samples, n_components)
Initialization value of the sparse code. Only used if
`algorithm='lasso_cd'`.
max_iter : int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov : boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
check_input : boolean, optional
If False, the input arrays X and dictionary will not be checked.
verbose : int
Controls the verbosity; the higher, the more messages. Defaults to 0.
Returns
-------
code : array of shape (n_components, n_features)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
if X.ndim == 1:
X = X[:, np.newaxis]
n_samples, n_features = X.shape
if cov is None and algorithm != 'lasso_cd':
# overwriting cov is safe
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm == 'lasso_lars':
alpha = float(regularization) / n_features # account for scaling
try:
err_mgt = np.seterr(all='ignore')
# Not passing in verbose=max(0, verbose-1) because Lars.fit already
# corrects the verbosity level.
lasso_lars = LassoLars(alpha=alpha, fit_intercept=False,
verbose=verbose, normalize=False,
precompute=gram, fit_path=False)
lasso_lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lasso_lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'lasso_cd':
alpha = float(regularization) / n_features # account for scaling
# TODO: Make verbosity argument for Lasso?
# sklearn.linear_model.coordinate_descent.enet_path has a verbosity
# argument that we could pass in from Lasso.
clf = Lasso(alpha=alpha, fit_intercept=False, normalize=False,
precompute=gram, max_iter=max_iter, warm_start=True)
if init is not None:
clf.coef_ = init
clf.fit(dictionary.T, X.T, check_input=check_input)
new_code = clf.coef_
elif algorithm == 'lars':
try:
err_mgt = np.seterr(all='ignore')
# Not passing in verbose=max(0, verbose-1) because Lars.fit already
# corrects the verbosity level.
lars = Lars(fit_intercept=False, verbose=verbose, normalize=False,
precompute=gram, n_nonzero_coefs=int(regularization),
fit_path=False)
lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'threshold':
new_code = ((np.sign(cov) *
np.maximum(np.abs(cov) - regularization, 0)).T)
elif algorithm == 'omp':
# TODO: Should verbose argument be passed to this?
new_code = orthogonal_mp_gram(
Gram=gram, Xy=cov, n_nonzero_coefs=int(regularization),
tol=None, norms_squared=row_norms(X, squared=True),
copy_Xy=copy_cov).T
else:
raise ValueError('Sparse coding method must be "lasso_lars" '
'"lasso_cd", "lasso", "threshold" or "omp", got %s.'
% algorithm)
return new_code
# XXX : could be moved to the linear_model module
def sparse_encode(X, dictionary, gram=None, cov=None, algorithm='lasso_lars',
n_nonzero_coefs=None, alpha=None, copy_cov=True, init=None,
max_iter=1000, n_jobs=1, check_input=True, verbose=0):
"""Sparse coding
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
X : array of shape (n_samples, n_features)
Data matrix
dictionary : array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows for meaningful
output.
gram : array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
cov : array, shape=(n_components, n_samples)
Precomputed covariance, dictionary' * X
algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
n_nonzero_coefs : int, 0.1 * n_features by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
init : array of shape (n_samples, n_components)
Initialization value of the sparse codes. Only used if
`algorithm='lasso_cd'`.
max_iter : int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov : boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
n_jobs : int, optional
Number of parallel jobs to run.
check_input : boolean, optional
If False, the input arrays X and dictionary will not be checked.
verbose : int, optional
Controls the verbosity; the higher, the more messages. Defaults to 0.
Returns
-------
code : array of shape (n_samples, n_components)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
if check_input:
if algorithm == 'lasso_cd':
dictionary = check_array(dictionary, order='C', dtype='float64')
X = check_array(X, order='C', dtype='float64')
else:
dictionary = check_array(dictionary)
X = check_array(X)
n_samples, n_features = X.shape
n_components = dictionary.shape[0]
if gram is None and algorithm != 'threshold':
gram = np.dot(dictionary, dictionary.T)
if cov is None and algorithm != 'lasso_cd':
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm in ('lars', 'omp'):
regularization = n_nonzero_coefs
if regularization is None:
regularization = min(max(n_features / 10, 1), n_components)
else:
regularization = alpha
if regularization is None:
regularization = 1.
if n_jobs == 1 or algorithm == 'threshold':
code = _sparse_encode(X,
dictionary, gram, cov=cov,
algorithm=algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init,
max_iter=max_iter,
check_input=False,
verbose=verbose)
# This ensure that dimensionality of code is always 2,
# consistant with the case n_jobs > 1
if code.ndim == 1:
code = code[np.newaxis, :]
return code
# Enter parallel code block
code = np.empty((n_samples, n_components))
slices = list(gen_even_slices(n_samples, _get_n_jobs(n_jobs)))
code_views = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_sparse_encode)(
X[this_slice], dictionary, gram,
cov[:, this_slice] if cov is not None else None,
algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init[this_slice] if init is not None else None,
max_iter=max_iter,
check_input=False)
for this_slice in slices)
for this_slice, this_view in zip(slices, code_views):
code[this_slice] = this_view
return code
def _update_dict(dictionary, Y, code, verbose=False, return_r2=False,
random_state=None):
"""Update the dense dictionary factor in place.
Parameters
----------
dictionary : array of shape (n_features, n_components)
Value of the dictionary at the previous iteration.
Y : array of shape (n_features, n_samples)
Data matrix.
code : array of shape (n_components, n_samples)
Sparse coding of the data against which to optimize the dictionary.
verbose:
Degree of output the procedure will print.
return_r2 : bool
Whether to compute and return the residual sum of squares corresponding
to the computed solution.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Returns
-------
dictionary : array of shape (n_features, n_components)
Updated dictionary.
"""
n_components = len(code)
n_samples = Y.shape[0]
random_state = check_random_state(random_state)
# Residuals, computed 'in-place' for efficiency
R = -np.dot(dictionary, code)
R += Y
R = np.asfortranarray(R)
ger, = linalg.get_blas_funcs(('ger',), (dictionary, code))
for k in range(n_components):
# R <- 1.0 * U_k * V_k^T + R
R = ger(1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
dictionary[:, k] = np.dot(R, code[k, :].T)
# Scale k'th atom
atom_norm_square = np.dot(dictionary[:, k], dictionary[:, k])
if atom_norm_square < 1e-20:
if verbose == 1:
sys.stdout.write("+")
sys.stdout.flush()
elif verbose:
print("Adding new random atom")
dictionary[:, k] = random_state.randn(n_samples)
# Setting corresponding coefs to 0
code[k, :] = 0.0
dictionary[:, k] /= sqrt(np.dot(dictionary[:, k],
dictionary[:, k]))
else:
dictionary[:, k] /= sqrt(atom_norm_square)
# R <- -1.0 * U_k * V_k^T + R
R = ger(-1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
if return_r2:
R **= 2
# R is fortran-ordered. For numpy version < 1.6, sum does not
# follow the quick striding first, and is thus inefficient on
# fortran ordered data. We take a flat view of the data with no
# striding
R = as_strided(R, shape=(R.size, ), strides=(R.dtype.itemsize,))
R = np.sum(R)
return dictionary, R
return dictionary
def dict_learning(X, n_components, alpha, max_iter=100, tol=1e-8,
method='lars', n_jobs=1, dict_init=None, code_init=None,
callback=None, verbose=False, random_state=None,
return_n_iter=False):
"""Solves a dictionary learning matrix factorization problem.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X : array of shape (n_samples, n_features)
Data matrix.
n_components : int,
Number of dictionary atoms to extract.
alpha : int,
Sparsity controlling parameter.
max_iter : int,
Maximum number of iterations to perform.
tol : float,
Tolerance for the stopping condition.
method : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
n_jobs : int,
Number of parallel jobs to run, or -1 to autodetect.
dict_init : array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
code_init : array of shape (n_samples, n_components),
Initial value for the sparse code for warm restart scenarios.
callback :
Callable that gets invoked every five iterations.
verbose :
Degree of output the procedure will print.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
code : array of shape (n_samples, n_components)
The sparse code factor in the matrix factorization.
dictionary : array of shape (n_components, n_features),
The dictionary factor in the matrix factorization.
errors : array
Vector of errors at each iteration.
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to True.
See also
--------
dict_learning_online
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if method not in ('lars', 'cd'):
raise ValueError('Coding method %r not supported as a fit algorithm.'
% method)
method = 'lasso_' + method
t0 = time.time()
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init the code and the dictionary with SVD of Y
if code_init is not None and dict_init is not None:
code = np.array(code_init, order='F')
# Don't copy V, it will happen below
dictionary = dict_init
else:
code, S, dictionary = linalg.svd(X, full_matrices=False)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r: # True even if n_components=None
code = code[:, :n_components]
dictionary = dictionary[:n_components, :]
else:
code = np.c_[code, np.zeros((len(code), n_components - r))]
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
# Fortran-order dict, as we are going to access its row vectors
dictionary = np.array(dictionary, order='F')
residuals = 0
errors = []
current_cost = np.nan
if verbose == 1:
print('[dict_learning]', end=' ')
# If max_iter is 0, number of iterations returned should be zero
ii = -1
for ii in range(max_iter):
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
print("Iteration % 3i "
"(elapsed time: % 3is, % 4.1fmn, current cost % 7.3f)"
% (ii, dt, dt / 60, current_cost))
# Update code
code = sparse_encode(X, dictionary, algorithm=method, alpha=alpha,
init=code, n_jobs=n_jobs)
# Update dictionary
dictionary, residuals = _update_dict(dictionary.T, X.T, code.T,
verbose=verbose, return_r2=True,
random_state=random_state)
dictionary = dictionary.T
# Cost function
current_cost = 0.5 * residuals + alpha * np.sum(np.abs(code))
errors.append(current_cost)
if ii > 0:
dE = errors[-2] - errors[-1]
# assert(dE >= -tol * errors[-1])
if dE < tol * errors[-1]:
if verbose == 1:
# A line return
print("")
elif verbose:
print("--- Convergence reached after %d iterations" % ii)
break
if ii % 5 == 0 and callback is not None:
callback(locals())
if return_n_iter:
return code, dictionary, errors, ii + 1
else:
return code, dictionary, errors
def dict_learning_online(X, n_components=2, alpha=1, n_iter=100,
return_code=True, dict_init=None, callback=None,
batch_size=3, verbose=False, shuffle=True, n_jobs=1,
method='lars', iter_offset=0, random_state=None,
return_inner_stats=False, inner_stats=None,
return_n_iter=False):
"""Solves a dictionary learning matrix factorization problem online.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code. This is
accomplished by repeatedly iterating over mini-batches by slicing
the input data.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X : array of shape (n_samples, n_features)
Data matrix.
n_components : int,
Number of dictionary atoms to extract.
alpha : float,
Sparsity controlling parameter.
n_iter : int,
Number of iterations to perform.
return_code : boolean,
Whether to also return the code U or just the dictionary V.
dict_init : array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
callback :
Callable that gets invoked every five iterations.
batch_size : int,
The number of samples to take in each batch.
verbose :
Degree of output the procedure will print.
shuffle : boolean,
Whether to shuffle the data before splitting it in batches.
n_jobs : int,
Number of parallel jobs to run, or -1 to autodetect.
method : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
iter_offset : int, default 0
Number of previous iterations completed on the dictionary used for
initialization.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
return_inner_stats : boolean, optional
Return the inner statistics A (dictionary covariance) and B
(data approximation). Useful to restart the algorithm in an
online setting. If return_inner_stats is True, return_code is
ignored
inner_stats : tuple of (A, B) ndarrays
Inner sufficient statistics that are kept by the algorithm.
Passing them at initialization is useful in online settings, to
avoid loosing the history of the evolution.
A (n_components, n_components) is the dictionary covariance matrix.
B (n_features, n_components) is the data approximation matrix
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
code : array of shape (n_samples, n_components),
the sparse code (only returned if `return_code=True`)
dictionary : array of shape (n_components, n_features),
the solutions to the dictionary learning problem
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to `True`.
See also
--------
dict_learning
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if n_components is None:
n_components = X.shape[1]
if method not in ('lars', 'cd'):
raise ValueError('Coding method not supported as a fit algorithm.')
method = 'lasso_' + method
t0 = time.time()
n_samples, n_features = X.shape
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init V with SVD of X
if dict_init is not None:
dictionary = dict_init
else:
_, S, dictionary = randomized_svd(X, n_components,
random_state=random_state)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r:
dictionary = dictionary[:n_components, :]
else:
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
if verbose == 1:
print('[dict_learning]', end=' ')
if shuffle:
X_train = X.copy()
random_state.shuffle(X_train)
else:
X_train = X
dictionary = check_array(dictionary.T, order='F', dtype=np.float64,
copy=False)
X_train = check_array(X_train, order='C', dtype=np.float64, copy=False)
batches = gen_batches(n_samples, batch_size)
batches = itertools.cycle(batches)
# The covariance of the dictionary
if inner_stats is None:
A = np.zeros((n_components, n_components))
# The data approximation
B = np.zeros((n_features, n_components))
else:
A = inner_stats[0].copy()
B = inner_stats[1].copy()
# If n_iter is zero, we need to return zero.
ii = iter_offset - 1
for ii, batch in zip(range(iter_offset, iter_offset + n_iter), batches):
this_X = X_train[batch]
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
if verbose > 10 or ii % ceil(100. / verbose) == 0:
print ("Iteration % 3i (elapsed time: % 3is, % 4.1fmn)"
% (ii, dt, dt / 60))
this_code = sparse_encode(this_X, dictionary.T, algorithm=method,
alpha=alpha, n_jobs=n_jobs).T
# Update the auxiliary variables
if ii < batch_size - 1:
theta = float((ii + 1) * batch_size)
else:
theta = float(batch_size ** 2 + ii + 1 - batch_size)
beta = (theta + 1 - batch_size) / (theta + 1)
A *= beta
A += np.dot(this_code, this_code.T)
B *= beta
B += np.dot(this_X.T, this_code.T)
# Update dictionary
dictionary = _update_dict(dictionary, B, A, verbose=verbose,
random_state=random_state)
# XXX: Can the residuals be of any use?
# Maybe we need a stopping criteria based on the amount of
# modification in the dictionary
if callback is not None:
callback(locals())
if return_inner_stats:
if return_n_iter:
return dictionary.T, (A, B), ii - iter_offset + 1
else:
return dictionary.T, (A, B)
if return_code:
if verbose > 1:
print('Learning code...', end=' ')
elif verbose == 1:
print('|', end=' ')
code = sparse_encode(X, dictionary.T, algorithm=method, alpha=alpha,
n_jobs=n_jobs, check_input=False)
if verbose > 1:
dt = (time.time() - t0)
print('done (total time: % 3is, % 4.1fmn)' % (dt, dt / 60))
if return_n_iter:
return code, dictionary.T, ii - iter_offset + 1
else:
return code, dictionary.T
if return_n_iter:
return dictionary.T, ii - iter_offset + 1
else:
return dictionary.T
class SparseCodingMixin(TransformerMixin):
"""Sparse coding mixin"""
def _set_sparse_coding_params(self, n_components,
transform_algorithm='omp',
transform_n_nonzero_coefs=None,
transform_alpha=None, split_sign=False,
n_jobs=1):
self.n_components = n_components
self.transform_algorithm = transform_algorithm
self.transform_n_nonzero_coefs = transform_n_nonzero_coefs
self.transform_alpha = transform_alpha
self.split_sign = split_sign
self.n_jobs = n_jobs
def transform(self, X, y=None):
"""Encode the data as a sparse combination of the dictionary atoms.
Coding method is determined by the object parameter
`transform_algorithm`.
Parameters
----------
X : array of shape (n_samples, n_features)
Test data to be transformed, must have the same number of
features as the data used to train the model.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data
"""
check_is_fitted(self, 'components_')
# XXX : kwargs is not documented
X = check_array(X)
n_samples, n_features = X.shape
code = sparse_encode(
X, self.components_, algorithm=self.transform_algorithm,
n_nonzero_coefs=self.transform_n_nonzero_coefs,
alpha=self.transform_alpha, n_jobs=self.n_jobs)
if self.split_sign:
# feature vector is split into a positive and negative side
n_samples, n_features = code.shape
split_code = np.empty((n_samples, 2 * n_features))
split_code[:, :n_features] = np.maximum(code, 0)
split_code[:, n_features:] = -np.minimum(code, 0)
code = split_code
return code
class SparseCoder(BaseEstimator, SparseCodingMixin):
"""Sparse coding
Finds a sparse representation of data against a fixed, precomputed
dictionary.
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
dictionary : array, [n_components, n_features]
The dictionary atoms used for sparse coding. Lines are assumed to be
normalized to unit norm.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data:
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
Attributes
----------
components_ : array, [n_components, n_features]
The unchanged dictionary atoms
See also
--------
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
sparse_encode
"""
def __init__(self, dictionary, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
split_sign=False, n_jobs=1):
self._set_sparse_coding_params(dictionary.shape[0],
transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.components_ = dictionary
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
class DictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : float,
sparsity controlling parameter
max_iter : int,
maximum number of iterations to perform
tol : float,
tolerance for numerical error
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
.. versionadded:: 0.17
*cd* coordinate descent method to improve speed.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
.. versionadded:: 0.17
*lasso_cd* coordinate descent method to improve speed.
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
code_init : array of shape (n_samples, n_components),
initial value for the code, for warm restart
dict_init : array of shape (n_components, n_features),
initial values for the dictionary, for warm restart
verbose :
degree of verbosity of the printed output
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : array, [n_components, n_features]
dictionary atoms extracted from the data
error_ : array
vector of errors at each iteration
n_iter_ : int
Number of iterations run.
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, max_iter=1000, tol=1e-8,
fit_algorithm='lars', transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
n_jobs=1, code_init=None, dict_init=None, verbose=False,
split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.max_iter = max_iter
self.tol = tol
self.fit_algorithm = fit_algorithm
self.code_init = code_init
self.dict_init = dict_init
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the object itself
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
V, U, E, self.n_iter_ = dict_learning(
X, n_components, self.alpha,
tol=self.tol, max_iter=self.max_iter,
method=self.fit_algorithm,
n_jobs=self.n_jobs,
code_init=self.code_init,
dict_init=self.dict_init,
verbose=self.verbose,
random_state=random_state,
return_n_iter=True)
self.components_ = U
self.error_ = E
return self
class MiniBatchDictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Mini-batch dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : float,
sparsity controlling parameter
n_iter : int,
total number of iterations to perform
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data.
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
dict_init : array of shape (n_components, n_features),
initial value of the dictionary for warm restart scenarios
verbose :
degree of verbosity of the printed output
batch_size : int,
number of samples in each mini-batch
shuffle : bool,
whether to shuffle the samples before forming batches
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : array, [n_components, n_features]
components extracted from the data
inner_stats_ : tuple of (A, B) ndarrays
Internal sufficient statistics that are kept by the algorithm.
Keeping them is useful in online settings, to avoid loosing the
history of the evolution, but they shouldn't have any use for the
end user.
A (n_components, n_components) is the dictionary covariance matrix.
B (n_features, n_components) is the data approximation matrix
n_iter_ : int
Number of iterations run.
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
DictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, n_iter=1000,
fit_algorithm='lars', n_jobs=1, batch_size=3,
shuffle=True, dict_init=None, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
verbose=False, split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.n_iter = n_iter
self.fit_algorithm = fit_algorithm
self.dict_init = dict_init
self.verbose = verbose
self.shuffle = shuffle
self.batch_size = batch_size
self.split_sign = split_sign
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
U, (A, B), self.n_iter_ = dict_learning_online(
X, self.n_components, self.alpha,
n_iter=self.n_iter, return_code=False,
method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=self.dict_init,
batch_size=self.batch_size, shuffle=self.shuffle,
verbose=self.verbose, random_state=random_state,
return_inner_stats=True,
return_n_iter=True)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = self.n_iter
return self
def partial_fit(self, X, y=None, iter_offset=None):
"""Updates the model using the data in X as a mini-batch.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
iter_offset : integer, optional
The number of iteration on data batches that has been
performed before this call to partial_fit. This is optional:
if no number is passed, the memory of the object is
used.
Returns
-------
self : object
Returns the instance itself.
"""
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
X = check_array(X)
if hasattr(self, 'components_'):
dict_init = self.components_
else:
dict_init = self.dict_init
inner_stats = getattr(self, 'inner_stats_', None)
if iter_offset is None:
iter_offset = getattr(self, 'iter_offset_', 0)
U, (A, B) = dict_learning_online(
X, self.n_components, self.alpha,
n_iter=self.n_iter, method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=dict_init,
batch_size=len(X), shuffle=False,
verbose=self.verbose, return_code=False,
iter_offset=iter_offset, random_state=self.random_state_,
return_inner_stats=True, inner_stats=inner_stats)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = iter_offset + self.n_iter
return self
|
gsmaxwell/phase_offset_rx | refs/heads/master | gnuradio-core/src/lib/filter/generate_gr_fir_util.py | 17 | #!/bin/env python
#
# Copyright 2003,2009 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from generate_utils import *
def make_info_struct (out, sig):
out.write (
'''
struct GR_CORE_API gr_fir_%s_info {
const char *name; // implementation name, e.g., "generic", "SSE", "3DNow!"
gr_fir_%s *(*create)(const std::vector<%s> &taps);
};
''' % (sig, sig, tap_type(sig)))
def make_create (out, sig):
out.write (''' static gr_fir_%s *create_gr_fir_%s (const std::vector<%s> &taps);
''' % (sig, sig, tap_type (sig)))
def make_info (out, sig):
out.write (''' static void get_gr_fir_%s_info (std::vector<gr_fir_%s_info> *info);
''' % (sig, sig))
# ----------------------------------------------------------------
def make_gr_fir_util_h ():
out = open_and_log_name ('gr_fir_util.h', 'w')
if not out:
return
out.write (copyright)
out.write (
'''
/*
* WARNING: This file is automatically generated by
* generate_gr_fir_util.py.
*
* Any changes made to this file will be overwritten.
*/
#ifndef INCLUDED_GR_FIR_UTIL_H
#define INCLUDED_GR_FIR_UTIL_H
/*!
* \\brief routines to create gr_fir_XXX's
*
* This class handles selecting the fastest version of the finite
* implulse response filter available for your platform. This
* interface should be used by the rest of the system for creating
* gr_fir_XXX's.
*
* The trailing suffix has the form _IOT where I codes the input type,
* O codes the output type, and T codes the tap type.
* I,O,T are elements of the set 's' (short), 'f' (float), 'c' (gr_complex),
* 'i' (short)
*/
#include <gr_core_api.h>
#include <gr_types.h>
''')
for sig in fir_signatures:
out.write ('class gr_fir_%s;\n' % sig);
out.write ('\n// structures returned by get_gr_fir_XXX_info methods\n\n')
for sig in fir_signatures:
make_info_struct (out, sig)
out.write ('''
struct GR_CORE_API gr_fir_util {
// create a fast version of gr_fir_XXX.
''')
for sig in fir_signatures:
make_create (out, sig)
out.write ('''
// Get information about all gr_fir_XXX implementations.
// This is useful for benchmarking, testing, etc without having to
// know a priori what's linked into this image
//
// The caller must pass in a valid pointer to a vector.
// The vector will be filled with structs describing the
// available implementations.
''')
for sig in fir_signatures:
make_info (out, sig)
out.write ('''
};
#endif /* INCLUDED_GR_FIR_UTIL_H */
''')
out.close ()
# ----------------------------------------------------------------
def make_constructor_cc (out, sig):
out.write (
'''
gr_fir_%s *
gr_fir_util::create_gr_fir_%s (const std::vector<%s> &taps)
{
return gr_fir_sysconfig_singleton()->create_gr_fir_%s (taps);
}
''' % (sig, sig, tap_type (sig), sig))
def make_info_cc (out, sig):
out.write (
'''
void
gr_fir_util::get_gr_fir_%s_info (std::vector<gr_fir_%s_info> *info)
{
gr_fir_sysconfig_singleton()->get_gr_fir_%s_info (info);
}
''' % (sig, sig, sig))
def make_gr_fir_util_cc ():
out = open_and_log_name ('gr_fir_util.cc', 'w')
if not out:
return
out.write (copyright)
out.write ('''
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include <gr_fir_util.h>
#include <gr_fir_sysconfig.h>
//
// There's no problem that can't be solved by the addition of
// another layer of indirection...
//
// --- constructors ---
''')
for sig in fir_signatures:
make_constructor_cc (out, sig)
out.write ('''
// --- info gatherers ---
''')
for sig in fir_signatures:
make_info_cc (out, sig)
out.close ()
# ----------------------------------------------------------------
def generate ():
make_gr_fir_util_h ()
make_gr_fir_util_cc ()
if __name__ == '__main__':
generate ()
|
JonathanSeguin/Mariana | refs/heads/master | Mariana/network.py | 1 | from collections import OrderedDict
from wrappers import TheanoFunction
import Mariana.settings as MSET
import types
__all__= ["Network", "OutputMap"]
# TYPE_INPUT_LAYER = "input"
# TYPE_OUTPUT_LAYER = "output"
# TYPE_HIDDEN_LAYER = "hidden"
def loadModel(filename) :
"""Shorthand for Network.load"""
return Network.load(filename)
class OutputMap(object):
"""
Encapsulates outputs as well as their theano functions.
The role of an output map object is to apply a function such as theano_train to the set of outputs it belongs to
"""
def __init__(self, name, network):
self.name = name
self.network = network
self.outputFcts = {}
def printGraph(self, outputLayer) :
"""Print the theano graph of the function associated with a given output"""
if type(outputLayer) is types.StringType :
ol = self.network[outputLayer]
else :
ol = outputLayer
self.outputFcts[ol].printGraph()
def addOutput(self, outputLayer, fct) :
self.outputFcts[outputLayer] = fct
def callTheanoFct(self, outputLayer, **kwargs) :
if type(outputLayer) is types.StringType :
ol = self.network.layers[outputLayer]
else :
ol = outputLayer
return self.outputFcts[ol](**kwargs)
def __call__(self, outputLayer, **kwargs) :
return self.callTheanoFct(outputLayer, **kwargs)
def __repr__(self) :
os = []
for o, v in self.outputFcts.iteritems() :
os.append(o.name)
os = ', '.join(os)
return "<theano fct '%s' for layers: '%s'>" % (self.name, os)
class Network(object) :
"""All theano functions of all layers are accessible through the network interface **network.x(...)**."""
def __init__(self) :
self.inputs = OrderedDict()
self.layers = OrderedDict()
self.outputs = OrderedDict()
self.layerAppelidos = {}
self.edges = OrderedDict()
self.outConnections = {}
self.inConnections = {}
self.parameters = []
self._mustInit = True
self.outputMaps = {}
self.log = []
def logEvent(self, entity, message, parameters = {}) :
"Adds a log event to self.log. Entity can be anything hashable, Message should be a string and parameters and dict: param_name => value"
import time, types
assert type(message) is types.StringType
assert type(parameters) is types.DictType
entry = {
"date": time.ctime(),
"timestamp": time.time(),
"message": message,
"parameters": parameters,
"entity": entity
}
self.log.append(entry)
def logNetworkEvent(self, message, parameters = {}) :
self.logEvent("Network", message, parameters)
def logLayerEvent(self, layer, message, parameters = {}) :
"Adds a log event to self.log. Message should be a string and parameters and dict: param_name => value"
import time, types
assert type(message) is types.StringType
assert type(parameters) is types.DictType
self.logEvent(layer.name, message, parameters)
def printLog(self) :
"Print a very pretty version of self.log. The log should contain all meaningful events in a chronological order"
errMsg = ""
try :
self.init()
except Exception as e :
errMsg = "----OUCH----\nUnable to initialize network: %s\n------------" % e
t = " The story of how it all began "
t = "="*len(t) + "\n" + t + "\n" + "="*len(t)
es = []
for e in self.log :
ps = []
for param, value in e["parameters"].iteritems() :
ps.append( " -%s: %s" % (param, value) )
ps = '\n' + '\n'.join(ps)
# es.append("-[%s]@%s(%s), %s.\n%s" % (e["entity"], e["timestamp"], e["date"], e["message"], ps))
es.append("-@%s(%s):\n %s -> %s.%s" % (e["timestamp"], e["date"], e["entity"], e["message"], ps))
es = '\n'.join(es)
print "\n" + t + "\n\n" + es + "\n" + errMsg + "\n"
def _addEdge(self, layer1Name, layer2Name) :
"""Add a connection between two layers"""
layer1 = self.layers[layer1Name]
layer2 = self.layers[layer2Name]
self.edges[ (layer1.name, layer2.name) ] = (layer1, layer2)
try :
self.outConnections[layer1].add(layer2)
except :
self.outConnections[layer1] = set([layer2])
try :
self.inConnections[layer2].add(layer1)
except :
self.inConnections[layer2] = set([layer1])
self.logNetworkEvent("New edge %s > %s" % (layer1.name, layer2.name))
def _addLayer(self, h) :
"""adds a layer to the network"""
try :
if self.layerAppelidos[h.name] != h.appelido :
raise ValueError("There's already a layer by the name of '%s'" % (h.name))
except KeyError :
self.layerAppelidos[h.name] = h.appelido
self.layers[h.name] = h
try :
self.inConnections[h] = self.inConnections[h].union(h.network.inConnections[h])
self.outConnections[h] = self.outConnections[h].union(h.network.outConnections[h])
except KeyError :
try :
self.inConnections[h] = h.network.inConnections[h]
self.outConnections[h] = h.network.outConnections[h]
except KeyError :
self.inConnections[h] = set()
self.outConnections[h] = set()
# if MSET.TYPE_INPUT_LAYER in h.types:
# self.inputs[h.name] = h
# self.logNetworkEvent("New Input layer %s" % (h.name))
# elif MSET.TYPE_OUTPUT_LAYER in h.types :
# self.outputs[h.name] = h
# self.logNetworkEvent("New Output layer %s" % (h.name))
# else :
# self.logNetworkEvent("New Hidden layer %s" % (h.name))
def merge(self, fromLayer, toLayer) :
"""Merges the networks of two layers together. fromLayer must be part of the self"""
self.logNetworkEvent("Merging nets: %s and %s" % (fromLayer.name, toLayer.name))
if fromLayer.name not in self.layers :
raise ValueError("from layer '%s' is not part of this network" % fromLayer.name)
newLayers = toLayer.network.layers.values()
for l in newLayers :
self._addLayer(l)
for e in toLayer.network.edges.iterkeys() :
self._addEdge(e[0], e[1])
self._addEdge(fromLayer.name, toLayer.name)
for l in newLayers :
l.network = self
self.inputs = OrderedDict()
self.outputs = OrderedDict()
for name, layer in self.layers.iteritems() :
if MSET.TYPE_INPUT_LAYER in layer.types:
self.inputs[layer.name] = layer
self.logNetworkEvent("Registering Input layer %s" % (layer.name))
if MSET.TYPE_OUTPUT_LAYER in layer.types :
self.outputs[layer.name] = layer
self.logNetworkEvent("Registering Output layer %s" % (layer.name))
if MSET.TYPE_HIDDEN_LAYER in layer.types :
self.logNetworkEvent("Registering Hidden layer %s" % (layer.name))
def initParameters(self, forceReset = False) :
"""Initializes the parameters of all layers but does nothing else.
Call this before tying parameters together::
model = i > h > o
model.initParameters()
h.W = o.W.T
model.train(...)
"""
for l in self.layers.itervalues() :
l._initParameters(forceReset)
def init(self, forceInit=False) :
"Initialiases the network by initialising every layer."
if self._mustInit or forceInit :
self.logNetworkEvent("Initialization begins!")
print("\n" + MSET.OMICRON_SIGNATURE)
if len(self.inputs) < 1 :
raise ValueError("Network has no inputs")
self.initParameters(forceReset=False)
for inp in self.inputs.itervalues() :
inp._initA()
for l in self.layers.itervalues() :
l._initB()
self.parameters.extend(l.getParameters())
for o in self.layers.itervalues() :
for k, v in o.__dict__.iteritems() :
if ( v.__class__ is TheanoFunction ) or issubclass(v.__class__, TheanoFunction) :
if k not in self.outputMaps :
self.outputMaps[k] = OutputMap(k, self)
self.outputMaps[k].addOutput(o, v)
self._mustInit = False
def help(self) :
"""prints the list of available model functions, such as train, test,..."""
self.init()
os = []
for o in self.outputMaps.itervalues() :
os.append(repr(o))
os = '\n\t'.join(os)
print "Available model functions:\n%s\n" % os
@classmethod
def isLayer(cls, obj) :
try :
return obj.isLayer
except AttributeError :
return False
def save(self, filename) :
import cPickle, pickle
self.init()
ext = '.mar.mdl.pkl'
if filename.find(ext) < 0 :
fn = filename + ext
else :
fn = filename
res = {
"edges": self.edges.keys(),
"log": self.log,
"layers": {}
}
for l in self.layers.itervalues() :
sumary = {
"class": l.__class__,
"arguments": {
"args": [],
"kwargs": {}
},
"parameters": {},
"needs": set()
}
for v in l.creationArguments["args"] :
if self.isLayer(v) :
if v.name not in self.layers :
raise ValueError("Unable to save, layer '%s' is an argument to layer '%s' but is not part of the network" % (v.name, l.name))
sumary["arguments"]["args"].append("MARLAYER.%s" % v.name)
sumary["needs"].add(v.name)
else :
sumary["arguments"]["args"].append(v)
for k, v in l.creationArguments["kwargs"].iteritems() :
if self.isLayer(v) :
if v.name not in self.layers :
raise ValueError("Unable to save, layer '%s' is an argument to layer '%s' but is not part of the network" % (v.name, l.name))
sumary["arguments"]["kwargs"][k] = "MARLAYER.%s" % v.name
sumary["needs"].add(v.name)
else :
sumary["arguments"]["kwargs"][k] = v
for k, v in l.getParameterDict().iteritems() :
sumary["parameters"][k] = v
res["layers"][l.name] = sumary
f = open(fn, 'wb', pickle.HIGHEST_PROTOCOL)
cPickle.dump(res, f)
f.close()
@classmethod
def load(cls, filename) :
"""Loads a model from disk"""
import cPickle
ext = '.mar.mdl.pkl'
if filename.find(ext) < 0 :
fn = filename + ext
else :
fn = filename
f = open(fn)
model = cPickle.load(f)
expandedLayers = {}
while len(expandedLayers) < len(model["layers"]) :
for name, stuff in model["layers"].iteritems() :
if name not in expandedLayers :
if len(stuff["needs"]) == 0 :
expandedLayers[name] = stuff["class"](*stuff["arguments"]["args"], **stuff["arguments"]["kwargs"])
else :
if len(stuff["needs"] - set(expandedLayers.keys())) == 0 :
for i, v in enumerate(stuff["arguments"]["args"]) :
if type(v) == types.StringType and v.find("MARLAYER") == 0 :
stuff["arguments"]["args"][i] = expandedLayers[v.split(".")[1]]
for k, v in stuff["arguments"]["kwargs"].iteritems() :
if type(v) == types.StringType and v.find("MARLAYER") == 0 :
stuff["arguments"]["kwargs"][k] = expandedLayers[v.split(".")[1]]
expandedLayers[name] = stuff["class"](*stuff["arguments"]["args"], **stuff["arguments"]["kwargs"])
for l in expandedLayers.itervalues() :
for k, v in model["layers"][l.name]["parameters"].iteritems() :
# print l, k, v.get_value()
try:
l.updateParameter(k, v)
except :
l.initParameter(k, v)
l._mustReset = False
for l1, l2 in model["edges"] :
network = expandedLayers[l1] > expandedLayers[l2]
return network
def toDOT(self, name, forceInit = True) :
"""returns a string representing the network in the DOT language.
If forceInit, the network will first try to initialize each layer
before constructing the graph"""
import time
if forceInit :
self.init()
com = "//Mariana network DOT representation generated on %s" % time.ctime()
s = '#COM#\ndigraph "%s"{\n#HEAD#;\n\n#GRAPH#;\n}' % name
headers = []
aidi = 0
aidis = {}
for l in self.layers.itervalues() :
aidis[l.name] = "layer%s" % aidi
headers.append("\t" + aidis[l.name] + l._dot_representation())
aidi += 1
g = []
for e in self.edges :
g.append("\t%s -> %s" % (aidis[e[0]], aidis[e[1]]))
s = s.replace("#COM#", com)
s = s.replace("#HEAD#", ';\n'.join(headers))
s = s.replace("#GRAPH#", ';\n'.join(g))
s = s.replace("-", '_')
return s
def saveHTML(self, name, forceInit = True) :
"""Creates an HTML file with the graph representation. Heavily inspired from: http://stackoverflow.com/questions/22595493/reading-dot-files-in-javascript-d3"""
from Mariana.HTML_Templates.aqua import getHTML
import time
temp = getHTML(self.toDOT(name, forceInit), name, time.ctime())
f = open(name + '.mariana.dot.html', 'wb')
f.write(temp)
f.close()
def saveDOT(self, name, forceInit = True) :
"saves the current network as a graph in the DOT format into the file name.mariana.dot"
f = open(name + '.mariana.dot', 'wb')
f.write(self.toDOT(name, forceInit))
f.close()
def __getitem__(self, l) :
"""get a layer by name"""
return self.layers[l]
def __repr__(self) :
return "<Net (%s layers): %s > ... > [%s]>" % (len(self.layers), self.inputs.keys(), self.outputs.keys())
def __getattribute__(self, k) :
"""
All theano functions are accessible through the network interface network.x(). Here x is called a model function.
"""
try :
return object.__getattribute__(self, k)
except AttributeError as e :
# a bit too hacky, but solves the following: Pickle asks for attribute not found in networks which triggers initializations
# of free outputs, and then theano complains that the layer.outputs are None, and everything crashes miserably.
if k == "__getstate__" or k == "__slots__" :
raise e
outs = object.__getattribute__(self, 'outputs')
init = object.__getattribute__(self, 'init')
init()
maps = object.__getattribute__(self, 'outputMaps')
try :
return maps[k]
except KeyError :
raise e
|
manastech/de-bee | refs/heads/master | gdata/media/data.py | 132 | #!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the data classes of the Yahoo! Media RSS Extension"""
__author__ = 'j.s@google.com (Jeff Scudder)'
import atom.core
MEDIA_TEMPLATE = '{http://search.yahoo.com/mrss//}%s'
class MediaCategory(atom.core.XmlElement):
"""Describes a media category."""
_qname = MEDIA_TEMPLATE % 'category'
scheme = 'scheme'
label = 'label'
class MediaCopyright(atom.core.XmlElement):
"""Describes a media copyright."""
_qname = MEDIA_TEMPLATE % 'copyright'
url = 'url'
class MediaCredit(atom.core.XmlElement):
"""Describes a media credit."""
_qname = MEDIA_TEMPLATE % 'credit'
role = 'role'
scheme = 'scheme'
class MediaDescription(atom.core.XmlElement):
"""Describes a media description."""
_qname = MEDIA_TEMPLATE % 'description'
type = 'type'
class MediaHash(atom.core.XmlElement):
"""Describes a media hash."""
_qname = MEDIA_TEMPLATE % 'hash'
algo = 'algo'
class MediaKeywords(atom.core.XmlElement):
"""Describes a media keywords."""
_qname = MEDIA_TEMPLATE % 'keywords'
class MediaPlayer(atom.core.XmlElement):
"""Describes a media player."""
_qname = MEDIA_TEMPLATE % 'player'
height = 'height'
width = 'width'
url = 'url'
class MediaRating(atom.core.XmlElement):
"""Describes a media rating."""
_qname = MEDIA_TEMPLATE % 'rating'
scheme = 'scheme'
class MediaRestriction(atom.core.XmlElement):
"""Describes a media restriction."""
_qname = MEDIA_TEMPLATE % 'restriction'
relationship = 'relationship'
type = 'type'
class MediaText(atom.core.XmlElement):
"""Describes a media text."""
_qname = MEDIA_TEMPLATE % 'text'
end = 'end'
lang = 'lang'
type = 'type'
start = 'start'
class MediaThumbnail(atom.core.XmlElement):
"""Describes a media thumbnail."""
_qname = MEDIA_TEMPLATE % 'thumbnail'
time = 'time'
url = 'url'
width = 'width'
height = 'height'
class MediaTitle(atom.core.XmlElement):
"""Describes a media title."""
_qname = MEDIA_TEMPLATE % 'title'
type = 'type'
class MediaContent(atom.core.XmlElement):
"""Describes a media content."""
_qname = MEDIA_TEMPLATE % 'content'
bitrate = 'bitrate'
is_default = 'isDefault'
medium = 'medium'
height = 'height'
credit = [MediaCredit]
language = 'language'
hash = MediaHash
width = 'width'
player = MediaPlayer
url = 'url'
file_size = 'fileSize'
channels = 'channels'
expression = 'expression'
text = [MediaText]
samplingrate = 'samplingrate'
title = MediaTitle
category = [MediaCategory]
rating = [MediaRating]
type = 'type'
description = MediaDescription
framerate = 'framerate'
thumbnail = [MediaThumbnail]
duration = 'duration'
copyright = MediaCopyright
keywords = MediaKeywords
restriction = [MediaRestriction]
class MediaGroup(atom.core.XmlElement):
"""Describes a media group."""
_qname = MEDIA_TEMPLATE % 'group'
credit = [MediaCredit]
content = [MediaContent]
copyright = MediaCopyright
description = MediaDescription
category = [MediaCategory]
player = MediaPlayer
rating = [MediaRating]
hash = MediaHash
title = MediaTitle
keywords = MediaKeywords
restriction = [MediaRestriction]
thumbnail = [MediaThumbnail]
text = [MediaText]
|
moinulkuet/machine-learning | refs/heads/master | Part 3 - Classification/Section 20 - Random Forest Classification/random_forest_classification.py | 6 | # Random Forest Classification
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Social_Network_Ads.csv')
X = dataset.iloc[:, [2, 3]].values
y = dataset.iloc[:, 4].values
# Splitting the dataset into the Training set and Test set
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# Fitting Random Forest Classification to the Training set
from sklearn.ensemble import RandomForestClassifier
classifier = RandomForestClassifier(n_estimators = 10, criterion = 'entropy', random_state = 0)
classifier.fit(X_train, y_train)
# Predicting the Test set results
y_pred = classifier.predict(X_test)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
# Visualising the Training set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_train, y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Random Forest Classification (Training set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
# Visualising the Test set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_test, y_test
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Random Forest Classification (Test set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show() |
Lh4cKg/sl4a | refs/heads/master | python/src/Lib/test/fork_wait.py | 58 | """This test case provides support for checking forking and wait behavior.
To test different wait behavior, overrise the wait_impl method.
We want fork1() semantics -- only the forking thread survives in the
child after a fork().
On some systems (e.g. Solaris without posix threads) we find that all
active threads survive in the child after a fork(); this is an error.
While BeOS doesn't officially support fork and native threading in
the same application, the present example should work just fine. DC
"""
import os, sys, time, thread, unittest
LONGSLEEP = 2
SHORTSLEEP = 0.5
NUM_THREADS = 4
class ForkWait(unittest.TestCase):
def setUp(self):
self.alive = {}
self.stop = 0
def f(self, id):
while not self.stop:
self.alive[id] = os.getpid()
try:
time.sleep(SHORTSLEEP)
except IOError:
pass
def wait_impl(self, cpid):
for i in range(10):
# waitpid() shouldn't hang, but some of the buildbots seem to hang
# in the forking tests. This is an attempt to fix the problem.
spid, status = os.waitpid(cpid, os.WNOHANG)
if spid == cpid:
break
time.sleep(2 * SHORTSLEEP)
self.assertEquals(spid, cpid)
self.assertEquals(status, 0, "cause = %d, exit = %d" % (status&0xff, status>>8))
def test_wait(self):
for i in range(NUM_THREADS):
thread.start_new(self.f, (i,))
time.sleep(LONGSLEEP)
a = self.alive.keys()
a.sort()
self.assertEquals(a, range(NUM_THREADS))
prefork_lives = self.alive.copy()
if sys.platform in ['unixware7']:
cpid = os.fork1()
else:
cpid = os.fork()
if cpid == 0:
# Child
time.sleep(LONGSLEEP)
n = 0
for key in self.alive:
if self.alive[key] != prefork_lives[key]:
n += 1
os._exit(n)
else:
# Parent
self.wait_impl(cpid)
# Tell threads to die
self.stop = 1
time.sleep(2*SHORTSLEEP) # Wait for threads to die
|
penpen/graphite-web | refs/heads/master | webapp/graphite/render/views.py | 18 | """Copyright 2008 Orbitz WorldWide
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
import csv
import math
import pytz
from datetime import datetime
from time import time
from random import shuffle
from httplib import CannotSendRequest
from urllib import urlencode
from urlparse import urlsplit, urlunsplit
from cgi import parse_qs
from cStringIO import StringIO
try:
import cPickle as pickle
except ImportError:
import pickle
from graphite.compat import HttpResponse
from graphite.util import getProfileByUsername, json, unpickle
from graphite.remote_storage import HTTPConnectionWithTimeout
from graphite.logger import log
from graphite.render.evaluator import evaluateTarget
from graphite.render.attime import parseATTime
from graphite.render.functions import PieFunctions
from graphite.render.hashing import hashRequest, hashData
from graphite.render.glyph import GraphTypes
from django.http import HttpResponseServerError, HttpResponseRedirect
from django.template import Context, loader
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist
from django.conf import settings
from django.utils.cache import add_never_cache_headers, patch_response_headers
def renderView(request):
start = time()
(graphOptions, requestOptions) = parseOptions(request)
useCache = 'noCache' not in requestOptions
cacheTimeout = requestOptions['cacheTimeout']
requestContext = {
'startTime' : requestOptions['startTime'],
'endTime' : requestOptions['endTime'],
'localOnly' : requestOptions['localOnly'],
'template' : requestOptions['template'],
'data' : []
}
data = requestContext['data']
# First we check the request cache
if useCache:
requestKey = hashRequest(request)
cachedResponse = cache.get(requestKey)
if cachedResponse:
log.cache('Request-Cache hit [%s]' % requestKey)
log.rendering('Returned cached response in %.6f' % (time() - start))
return cachedResponse
else:
log.cache('Request-Cache miss [%s]' % requestKey)
# Now we prepare the requested data
if requestOptions['graphType'] == 'pie':
for target in requestOptions['targets']:
if target.find(':') >= 0:
try:
name,value = target.split(':',1)
value = float(value)
except:
raise ValueError("Invalid target '%s'" % target)
data.append( (name,value) )
else:
seriesList = evaluateTarget(requestContext, target)
for series in seriesList:
func = PieFunctions[requestOptions['pieMode']]
data.append( (series.name, func(requestContext, series) or 0 ))
elif requestOptions['graphType'] == 'line':
# Let's see if at least our data is cached
if useCache:
targets = requestOptions['targets']
startTime = requestOptions['startTime']
endTime = requestOptions['endTime']
dataKey = hashData(targets, startTime, endTime)
cachedData = cache.get(dataKey)
if cachedData:
log.cache("Data-Cache hit [%s]" % dataKey)
else:
log.cache("Data-Cache miss [%s]" % dataKey)
else:
cachedData = None
if cachedData is not None:
requestContext['data'] = data = cachedData
else: # Have to actually retrieve the data now
for target in requestOptions['targets']:
if not target.strip():
continue
t = time()
seriesList = evaluateTarget(requestContext, target)
log.rendering("Retrieval of %s took %.6f" % (target, time() - t))
data.extend(seriesList)
if useCache:
cache.add(dataKey, data, cacheTimeout)
# If data is all we needed, we're done
format = requestOptions.get('format')
if format == 'csv':
response = HttpResponse(content_type='text/csv')
writer = csv.writer(response, dialect='excel')
for series in data:
for i, value in enumerate(series):
timestamp = datetime.fromtimestamp(series.start + (i * series.step), requestOptions['tzinfo'])
writer.writerow((series.name, timestamp.strftime("%Y-%m-%d %H:%M:%S"), value))
return response
if format == 'json':
series_data = []
if 'maxDataPoints' in requestOptions and any(data):
startTime = min([series.start for series in data])
endTime = max([series.end for series in data])
timeRange = endTime - startTime
maxDataPoints = requestOptions['maxDataPoints']
for series in data:
numberOfDataPoints = timeRange/series.step
if maxDataPoints < numberOfDataPoints:
valuesPerPoint = math.ceil(float(numberOfDataPoints) / float(maxDataPoints))
secondsPerPoint = int(valuesPerPoint * series.step)
# Nudge start over a little bit so that the consolidation bands align with each call
# removing 'jitter' seen when refreshing.
nudge = secondsPerPoint + (series.start % series.step) - (series.start % secondsPerPoint)
series.start = series.start + nudge
valuesToLose = int(nudge/series.step)
for r in range(1, valuesToLose):
del series[0]
series.consolidate(valuesPerPoint)
timestamps = range(int(series.start), int(series.end) + 1, int(secondsPerPoint))
else:
timestamps = range(int(series.start), int(series.end) + 1, int(series.step))
datapoints = zip(series, timestamps)
series_data.append(dict(target=series.name, datapoints=datapoints))
else:
for series in data:
timestamps = range(int(series.start), int(series.end) + 1, int(series.step))
datapoints = zip(series, timestamps)
series_data.append(dict(target=series.name, datapoints=datapoints))
if 'jsonp' in requestOptions:
response = HttpResponse(
content="%s(%s)" % (requestOptions['jsonp'], json.dumps(series_data)),
content_type='text/javascript')
else:
response = HttpResponse(content=json.dumps(series_data),
content_type='application/json')
if useCache:
patch_response_headers(response, cache_timeout=cacheTimeout)
else:
add_never_cache_headers(response)
return response
if format == 'raw':
response = HttpResponse(content_type='text/plain')
for series in data:
response.write( "%s,%d,%d,%d|" % (series.name, series.start, series.end, series.step) )
response.write( ','.join(map(str,series)) )
response.write('\n')
log.rendering('Total rawData rendering time %.6f' % (time() - start))
return response
if format == 'svg':
graphOptions['outputFormat'] = 'svg'
if format == 'pickle':
response = HttpResponse(content_type='application/pickle')
seriesInfo = [series.getInfo() for series in data]
pickle.dump(seriesInfo, response, protocol=-1)
log.rendering('Total pickle rendering time %.6f' % (time() - start))
return response
# We've got the data, now to render it
graphOptions['data'] = data
if settings.REMOTE_RENDERING: # Rendering on other machines is faster in some situations
image = delegateRendering(requestOptions['graphType'], graphOptions)
else:
image = doImageRender(requestOptions['graphClass'], graphOptions)
useSVG = graphOptions.get('outputFormat') == 'svg'
if useSVG and 'jsonp' in requestOptions:
response = HttpResponse(
content="%s(%s)" % (requestOptions['jsonp'], json.dumps(image)),
content_type='text/javascript')
else:
response = buildResponse(image, 'image/svg+xml' if useSVG else 'image/png')
if useCache:
cache.add(requestKey, response, cacheTimeout)
patch_response_headers(response, cache_timeout=cacheTimeout)
else:
add_never_cache_headers(response)
log.rendering('Total rendering time %.6f seconds' % (time() - start))
return response
def parseOptions(request):
queryParams = request.REQUEST
# Start with some defaults
graphOptions = {'width' : 330, 'height' : 250}
requestOptions = {}
graphType = queryParams.get('graphType','line')
assert graphType in GraphTypes, "Invalid graphType '%s', must be one of %s" % (graphType,GraphTypes.keys())
graphClass = GraphTypes[graphType]
# Fill in the requestOptions
requestOptions['graphType'] = graphType
requestOptions['graphClass'] = graphClass
requestOptions['pieMode'] = queryParams.get('pieMode', 'average')
requestOptions['cacheTimeout'] = int( queryParams.get('cacheTimeout', settings.DEFAULT_CACHE_DURATION) )
requestOptions['targets'] = []
# Extract the targets out of the queryParams
mytargets = []
# Normal format: ?target=path.1&target=path.2
if len(queryParams.getlist('target')) > 0:
mytargets = queryParams.getlist('target')
# Rails/PHP/jQuery common practice format: ?target[]=path.1&target[]=path.2
elif len(queryParams.getlist('target[]')) > 0:
mytargets = queryParams.getlist('target[]')
# Collect the targets
for target in mytargets:
requestOptions['targets'].append(target)
template = dict()
for key, val in queryParams.items():
if key.startswith("template["):
template[key[9:-1]] = val
requestOptions['template'] = template
if 'pickle' in queryParams:
requestOptions['format'] = 'pickle'
if 'rawData' in queryParams:
requestOptions['format'] = 'raw'
if 'format' in queryParams:
requestOptions['format'] = queryParams['format']
if 'jsonp' in queryParams:
requestOptions['jsonp'] = queryParams['jsonp']
if 'noCache' in queryParams:
requestOptions['noCache'] = True
if 'maxDataPoints' in queryParams and queryParams['maxDataPoints'].isdigit():
requestOptions['maxDataPoints'] = int(queryParams['maxDataPoints'])
requestOptions['localOnly'] = queryParams.get('local') == '1'
# Fill in the graphOptions
for opt in graphClass.customizable:
if opt in queryParams:
val = queryParams[opt]
if (val.isdigit() or (val.startswith('-') and val[1:].isdigit())) and 'color' not in opt.lower():
val = int(val)
elif '.' in val and (val.replace('.','',1).isdigit() or (val.startswith('-') and val[1:].replace('.','',1).isdigit())):
val = float(val)
elif val.lower() in ('true','false'):
val = val.lower() == 'true'
elif val.lower() == 'default' or val == '':
continue
graphOptions[opt] = val
tzinfo = pytz.timezone(settings.TIME_ZONE)
if 'tz' in queryParams:
try:
tzinfo = pytz.timezone(queryParams['tz'])
except pytz.UnknownTimeZoneError:
pass
requestOptions['tzinfo'] = tzinfo
# Get the time interval for time-oriented graph types
if graphType == 'line' or graphType == 'pie':
if 'until' in queryParams:
untilTime = parseATTime(queryParams['until'], tzinfo)
else:
untilTime = parseATTime('now', tzinfo)
if 'from' in queryParams:
fromTime = parseATTime(queryParams['from'], tzinfo)
else:
fromTime = parseATTime('-1d', tzinfo)
startTime = min(fromTime, untilTime)
endTime = max(fromTime, untilTime)
assert startTime != endTime, "Invalid empty time range"
requestOptions['startTime'] = startTime
requestOptions['endTime'] = endTime
return (graphOptions, requestOptions)
connectionPools = {}
def delegateRendering(graphType, graphOptions):
start = time()
postData = graphType + '\n' + pickle.dumps(graphOptions)
servers = settings.RENDERING_HOSTS[:] #make a copy so we can shuffle it safely
shuffle(servers)
for server in servers:
start2 = time()
try:
# Get a connection
try:
pool = connectionPools[server]
except KeyError: #happens the first time
pool = connectionPools[server] = set()
try:
connection = pool.pop()
except KeyError: #No available connections, have to make a new one
connection = HTTPConnectionWithTimeout(server)
connection.timeout = settings.REMOTE_RENDER_CONNECT_TIMEOUT
# Send the request
try:
connection.request('POST','/render/local/', postData)
except CannotSendRequest:
connection = HTTPConnectionWithTimeout(server) #retry once
connection.timeout = settings.REMOTE_RENDER_CONNECT_TIMEOUT
connection.request('POST', '/render/local/', postData)
# Read the response
response = connection.getresponse()
assert response.status == 200, "Bad response code %d from %s" % (response.status,server)
contentType = response.getheader('Content-Type')
imageData = response.read()
assert contentType == 'image/png', "Bad content type: \"%s\" from %s" % (contentType,server)
assert imageData, "Received empty response from %s" % server
# Wrap things up
log.rendering('Remotely rendered image on %s in %.6f seconds' % (server,time() - start2))
log.rendering('Spent a total of %.6f seconds doing remote rendering work' % (time() - start))
pool.add(connection)
return imageData
except:
log.exception("Exception while attempting remote rendering request on %s" % server)
log.rendering('Exception while remotely rendering on %s wasted %.6f' % (server,time() - start2))
continue
def renderLocalView(request):
try:
start = time()
reqParams = StringIO(request.body)
graphType = reqParams.readline().strip()
optionsPickle = reqParams.read()
reqParams.close()
graphClass = GraphTypes[graphType]
options = unpickle.loads(optionsPickle)
image = doImageRender(graphClass, options)
log.rendering("Delegated rendering request took %.6f seconds" % (time() - start))
response = buildResponse(image)
add_never_cache_headers(response)
return response
except:
log.exception("Exception in graphite.render.views.rawrender")
return HttpResponseServerError()
def renderMyGraphView(request,username,graphName):
profile = getProfileByUsername(username)
if not profile:
return errorPage("No such user '%s'" % username)
try:
graph = profile.mygraph_set.get(name=graphName)
except ObjectDoesNotExist:
return errorPage("User %s doesn't have a MyGraph named '%s'" % (username,graphName))
request_params = dict(request.REQUEST.items())
if request_params:
url_parts = urlsplit(graph.url)
query_string = url_parts[3]
if query_string:
url_params = parse_qs(query_string)
# Remove lists so that we can do an update() on the dict
for param, value in url_params.items():
if isinstance(value, list) and param != 'target':
url_params[param] = value[-1]
url_params.update(request_params)
# Handle 'target' being a list - we want duplicate &target params out of it
url_param_pairs = []
for key,val in url_params.items():
if isinstance(val, list):
for v in val:
url_param_pairs.append( (key,v) )
else:
url_param_pairs.append( (key,val) )
query_string = urlencode(url_param_pairs)
url = urlunsplit(url_parts[:3] + (query_string,) + url_parts[4:])
else:
url = graph.url
return HttpResponseRedirect(url)
def doImageRender(graphClass, graphOptions):
pngData = StringIO()
t = time()
img = graphClass(**graphOptions)
img.output(pngData)
log.rendering('Rendered PNG in %.6f seconds' % (time() - t))
imageData = pngData.getvalue()
pngData.close()
return imageData
def buildResponse(imageData, content_type="image/png"):
return HttpResponse(imageData, content_type=content_type)
def errorPage(message):
template = loader.get_template('500.html')
context = Context(dict(message=message))
return HttpResponseServerError( template.render(context) )
|
snakeleon/YouCompleteMe-x64 | refs/heads/master | third_party/ycmd/third_party/jedi_deps/jedi/test/examples/not_in_sys_path/not_in_sys_path_package/__init__.py | 34 | value = 'package'
|
OCA/l10n-italy | refs/heads/12.0 | l10n_it_website_sale_corrispettivi/models/__init__.py | 1 | # License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from . import website
|
RydrDojo/Ridr_app | refs/heads/master | pylotVenv/lib/python2.7/site-packages/pip/_vendor/html5lib/constants.py | 497 | from __future__ import absolute_import, division, unicode_literals
import string
EOF = None
E = {
"null-character":
"Null character in input stream, replaced with U+FFFD.",
"invalid-codepoint":
"Invalid codepoint in stream.",
"incorrectly-placed-solidus":
"Solidus (/) incorrectly placed in tag.",
"incorrect-cr-newline-entity":
"Incorrect CR newline entity, replaced with LF.",
"illegal-windows-1252-entity":
"Entity used with illegal number (windows-1252 reference).",
"cant-convert-numeric-entity":
"Numeric entity couldn't be converted to character "
"(codepoint U+%(charAsInt)08x).",
"illegal-codepoint-for-numeric-entity":
"Numeric entity represents an illegal codepoint: "
"U+%(charAsInt)08x.",
"numeric-entity-without-semicolon":
"Numeric entity didn't end with ';'.",
"expected-numeric-entity-but-got-eof":
"Numeric entity expected. Got end of file instead.",
"expected-numeric-entity":
"Numeric entity expected but none found.",
"named-entity-without-semicolon":
"Named entity didn't end with ';'.",
"expected-named-entity":
"Named entity expected. Got none.",
"attributes-in-end-tag":
"End tag contains unexpected attributes.",
'self-closing-flag-on-end-tag':
"End tag contains unexpected self-closing flag.",
"expected-tag-name-but-got-right-bracket":
"Expected tag name. Got '>' instead.",
"expected-tag-name-but-got-question-mark":
"Expected tag name. Got '?' instead. (HTML doesn't "
"support processing instructions.)",
"expected-tag-name":
"Expected tag name. Got something else instead",
"expected-closing-tag-but-got-right-bracket":
"Expected closing tag. Got '>' instead. Ignoring '</>'.",
"expected-closing-tag-but-got-eof":
"Expected closing tag. Unexpected end of file.",
"expected-closing-tag-but-got-char":
"Expected closing tag. Unexpected character '%(data)s' found.",
"eof-in-tag-name":
"Unexpected end of file in the tag name.",
"expected-attribute-name-but-got-eof":
"Unexpected end of file. Expected attribute name instead.",
"eof-in-attribute-name":
"Unexpected end of file in attribute name.",
"invalid-character-in-attribute-name":
"Invalid character in attribute name",
"duplicate-attribute":
"Dropped duplicate attribute on tag.",
"expected-end-of-tag-name-but-got-eof":
"Unexpected end of file. Expected = or end of tag.",
"expected-attribute-value-but-got-eof":
"Unexpected end of file. Expected attribute value.",
"expected-attribute-value-but-got-right-bracket":
"Expected attribute value. Got '>' instead.",
'equals-in-unquoted-attribute-value':
"Unexpected = in unquoted attribute",
'unexpected-character-in-unquoted-attribute-value':
"Unexpected character in unquoted attribute",
"invalid-character-after-attribute-name":
"Unexpected character after attribute name.",
"unexpected-character-after-attribute-value":
"Unexpected character after attribute value.",
"eof-in-attribute-value-double-quote":
"Unexpected end of file in attribute value (\").",
"eof-in-attribute-value-single-quote":
"Unexpected end of file in attribute value (').",
"eof-in-attribute-value-no-quotes":
"Unexpected end of file in attribute value.",
"unexpected-EOF-after-solidus-in-tag":
"Unexpected end of file in tag. Expected >",
"unexpected-character-after-solidus-in-tag":
"Unexpected character after / in tag. Expected >",
"expected-dashes-or-doctype":
"Expected '--' or 'DOCTYPE'. Not found.",
"unexpected-bang-after-double-dash-in-comment":
"Unexpected ! after -- in comment",
"unexpected-space-after-double-dash-in-comment":
"Unexpected space after -- in comment",
"incorrect-comment":
"Incorrect comment.",
"eof-in-comment":
"Unexpected end of file in comment.",
"eof-in-comment-end-dash":
"Unexpected end of file in comment (-)",
"unexpected-dash-after-double-dash-in-comment":
"Unexpected '-' after '--' found in comment.",
"eof-in-comment-double-dash":
"Unexpected end of file in comment (--).",
"eof-in-comment-end-space-state":
"Unexpected end of file in comment.",
"eof-in-comment-end-bang-state":
"Unexpected end of file in comment.",
"unexpected-char-in-comment":
"Unexpected character in comment found.",
"need-space-after-doctype":
"No space after literal string 'DOCTYPE'.",
"expected-doctype-name-but-got-right-bracket":
"Unexpected > character. Expected DOCTYPE name.",
"expected-doctype-name-but-got-eof":
"Unexpected end of file. Expected DOCTYPE name.",
"eof-in-doctype-name":
"Unexpected end of file in DOCTYPE name.",
"eof-in-doctype":
"Unexpected end of file in DOCTYPE.",
"expected-space-or-right-bracket-in-doctype":
"Expected space or '>'. Got '%(data)s'",
"unexpected-end-of-doctype":
"Unexpected end of DOCTYPE.",
"unexpected-char-in-doctype":
"Unexpected character in DOCTYPE.",
"eof-in-innerhtml":
"XXX innerHTML EOF",
"unexpected-doctype":
"Unexpected DOCTYPE. Ignored.",
"non-html-root":
"html needs to be the first start tag.",
"expected-doctype-but-got-eof":
"Unexpected End of file. Expected DOCTYPE.",
"unknown-doctype":
"Erroneous DOCTYPE.",
"expected-doctype-but-got-chars":
"Unexpected non-space characters. Expected DOCTYPE.",
"expected-doctype-but-got-start-tag":
"Unexpected start tag (%(name)s). Expected DOCTYPE.",
"expected-doctype-but-got-end-tag":
"Unexpected end tag (%(name)s). Expected DOCTYPE.",
"end-tag-after-implied-root":
"Unexpected end tag (%(name)s) after the (implied) root element.",
"expected-named-closing-tag-but-got-eof":
"Unexpected end of file. Expected end tag (%(name)s).",
"two-heads-are-not-better-than-one":
"Unexpected start tag head in existing head. Ignored.",
"unexpected-end-tag":
"Unexpected end tag (%(name)s). Ignored.",
"unexpected-start-tag-out-of-my-head":
"Unexpected start tag (%(name)s) that can be in head. Moved.",
"unexpected-start-tag":
"Unexpected start tag (%(name)s).",
"missing-end-tag":
"Missing end tag (%(name)s).",
"missing-end-tags":
"Missing end tags (%(name)s).",
"unexpected-start-tag-implies-end-tag":
"Unexpected start tag (%(startName)s) "
"implies end tag (%(endName)s).",
"unexpected-start-tag-treated-as":
"Unexpected start tag (%(originalName)s). Treated as %(newName)s.",
"deprecated-tag":
"Unexpected start tag %(name)s. Don't use it!",
"unexpected-start-tag-ignored":
"Unexpected start tag %(name)s. Ignored.",
"expected-one-end-tag-but-got-another":
"Unexpected end tag (%(gotName)s). "
"Missing end tag (%(expectedName)s).",
"end-tag-too-early":
"End tag (%(name)s) seen too early. Expected other end tag.",
"end-tag-too-early-named":
"Unexpected end tag (%(gotName)s). Expected end tag (%(expectedName)s).",
"end-tag-too-early-ignored":
"End tag (%(name)s) seen too early. Ignored.",
"adoption-agency-1.1":
"End tag (%(name)s) violates step 1, "
"paragraph 1 of the adoption agency algorithm.",
"adoption-agency-1.2":
"End tag (%(name)s) violates step 1, "
"paragraph 2 of the adoption agency algorithm.",
"adoption-agency-1.3":
"End tag (%(name)s) violates step 1, "
"paragraph 3 of the adoption agency algorithm.",
"adoption-agency-4.4":
"End tag (%(name)s) violates step 4, "
"paragraph 4 of the adoption agency algorithm.",
"unexpected-end-tag-treated-as":
"Unexpected end tag (%(originalName)s). Treated as %(newName)s.",
"no-end-tag":
"This element (%(name)s) has no end tag.",
"unexpected-implied-end-tag-in-table":
"Unexpected implied end tag (%(name)s) in the table phase.",
"unexpected-implied-end-tag-in-table-body":
"Unexpected implied end tag (%(name)s) in the table body phase.",
"unexpected-char-implies-table-voodoo":
"Unexpected non-space characters in "
"table context caused voodoo mode.",
"unexpected-hidden-input-in-table":
"Unexpected input with type hidden in table context.",
"unexpected-form-in-table":
"Unexpected form in table context.",
"unexpected-start-tag-implies-table-voodoo":
"Unexpected start tag (%(name)s) in "
"table context caused voodoo mode.",
"unexpected-end-tag-implies-table-voodoo":
"Unexpected end tag (%(name)s) in "
"table context caused voodoo mode.",
"unexpected-cell-in-table-body":
"Unexpected table cell start tag (%(name)s) "
"in the table body phase.",
"unexpected-cell-end-tag":
"Got table cell end tag (%(name)s) "
"while required end tags are missing.",
"unexpected-end-tag-in-table-body":
"Unexpected end tag (%(name)s) in the table body phase. Ignored.",
"unexpected-implied-end-tag-in-table-row":
"Unexpected implied end tag (%(name)s) in the table row phase.",
"unexpected-end-tag-in-table-row":
"Unexpected end tag (%(name)s) in the table row phase. Ignored.",
"unexpected-select-in-select":
"Unexpected select start tag in the select phase "
"treated as select end tag.",
"unexpected-input-in-select":
"Unexpected input start tag in the select phase.",
"unexpected-start-tag-in-select":
"Unexpected start tag token (%(name)s in the select phase. "
"Ignored.",
"unexpected-end-tag-in-select":
"Unexpected end tag (%(name)s) in the select phase. Ignored.",
"unexpected-table-element-start-tag-in-select-in-table":
"Unexpected table element start tag (%(name)s) in the select in table phase.",
"unexpected-table-element-end-tag-in-select-in-table":
"Unexpected table element end tag (%(name)s) in the select in table phase.",
"unexpected-char-after-body":
"Unexpected non-space characters in the after body phase.",
"unexpected-start-tag-after-body":
"Unexpected start tag token (%(name)s)"
" in the after body phase.",
"unexpected-end-tag-after-body":
"Unexpected end tag token (%(name)s)"
" in the after body phase.",
"unexpected-char-in-frameset":
"Unexpected characters in the frameset phase. Characters ignored.",
"unexpected-start-tag-in-frameset":
"Unexpected start tag token (%(name)s)"
" in the frameset phase. Ignored.",
"unexpected-frameset-in-frameset-innerhtml":
"Unexpected end tag token (frameset) "
"in the frameset phase (innerHTML).",
"unexpected-end-tag-in-frameset":
"Unexpected end tag token (%(name)s)"
" in the frameset phase. Ignored.",
"unexpected-char-after-frameset":
"Unexpected non-space characters in the "
"after frameset phase. Ignored.",
"unexpected-start-tag-after-frameset":
"Unexpected start tag (%(name)s)"
" in the after frameset phase. Ignored.",
"unexpected-end-tag-after-frameset":
"Unexpected end tag (%(name)s)"
" in the after frameset phase. Ignored.",
"unexpected-end-tag-after-body-innerhtml":
"Unexpected end tag after body(innerHtml)",
"expected-eof-but-got-char":
"Unexpected non-space characters. Expected end of file.",
"expected-eof-but-got-start-tag":
"Unexpected start tag (%(name)s)"
". Expected end of file.",
"expected-eof-but-got-end-tag":
"Unexpected end tag (%(name)s)"
". Expected end of file.",
"eof-in-table":
"Unexpected end of file. Expected table content.",
"eof-in-select":
"Unexpected end of file. Expected select content.",
"eof-in-frameset":
"Unexpected end of file. Expected frameset content.",
"eof-in-script-in-script":
"Unexpected end of file. Expected script content.",
"eof-in-foreign-lands":
"Unexpected end of file. Expected foreign content",
"non-void-element-with-trailing-solidus":
"Trailing solidus not allowed on element %(name)s",
"unexpected-html-element-in-foreign-content":
"Element %(name)s not allowed in a non-html context",
"unexpected-end-tag-before-html":
"Unexpected end tag (%(name)s) before html.",
"XXX-undefined-error":
"Undefined error (this sucks and should be fixed)",
}
namespaces = {
"html": "http://www.w3.org/1999/xhtml",
"mathml": "http://www.w3.org/1998/Math/MathML",
"svg": "http://www.w3.org/2000/svg",
"xlink": "http://www.w3.org/1999/xlink",
"xml": "http://www.w3.org/XML/1998/namespace",
"xmlns": "http://www.w3.org/2000/xmlns/"
}
scopingElements = frozenset([
(namespaces["html"], "applet"),
(namespaces["html"], "caption"),
(namespaces["html"], "html"),
(namespaces["html"], "marquee"),
(namespaces["html"], "object"),
(namespaces["html"], "table"),
(namespaces["html"], "td"),
(namespaces["html"], "th"),
(namespaces["mathml"], "mi"),
(namespaces["mathml"], "mo"),
(namespaces["mathml"], "mn"),
(namespaces["mathml"], "ms"),
(namespaces["mathml"], "mtext"),
(namespaces["mathml"], "annotation-xml"),
(namespaces["svg"], "foreignObject"),
(namespaces["svg"], "desc"),
(namespaces["svg"], "title"),
])
formattingElements = frozenset([
(namespaces["html"], "a"),
(namespaces["html"], "b"),
(namespaces["html"], "big"),
(namespaces["html"], "code"),
(namespaces["html"], "em"),
(namespaces["html"], "font"),
(namespaces["html"], "i"),
(namespaces["html"], "nobr"),
(namespaces["html"], "s"),
(namespaces["html"], "small"),
(namespaces["html"], "strike"),
(namespaces["html"], "strong"),
(namespaces["html"], "tt"),
(namespaces["html"], "u")
])
specialElements = frozenset([
(namespaces["html"], "address"),
(namespaces["html"], "applet"),
(namespaces["html"], "area"),
(namespaces["html"], "article"),
(namespaces["html"], "aside"),
(namespaces["html"], "base"),
(namespaces["html"], "basefont"),
(namespaces["html"], "bgsound"),
(namespaces["html"], "blockquote"),
(namespaces["html"], "body"),
(namespaces["html"], "br"),
(namespaces["html"], "button"),
(namespaces["html"], "caption"),
(namespaces["html"], "center"),
(namespaces["html"], "col"),
(namespaces["html"], "colgroup"),
(namespaces["html"], "command"),
(namespaces["html"], "dd"),
(namespaces["html"], "details"),
(namespaces["html"], "dir"),
(namespaces["html"], "div"),
(namespaces["html"], "dl"),
(namespaces["html"], "dt"),
(namespaces["html"], "embed"),
(namespaces["html"], "fieldset"),
(namespaces["html"], "figure"),
(namespaces["html"], "footer"),
(namespaces["html"], "form"),
(namespaces["html"], "frame"),
(namespaces["html"], "frameset"),
(namespaces["html"], "h1"),
(namespaces["html"], "h2"),
(namespaces["html"], "h3"),
(namespaces["html"], "h4"),
(namespaces["html"], "h5"),
(namespaces["html"], "h6"),
(namespaces["html"], "head"),
(namespaces["html"], "header"),
(namespaces["html"], "hr"),
(namespaces["html"], "html"),
(namespaces["html"], "iframe"),
# Note that image is commented out in the spec as "this isn't an
# element that can end up on the stack, so it doesn't matter,"
(namespaces["html"], "image"),
(namespaces["html"], "img"),
(namespaces["html"], "input"),
(namespaces["html"], "isindex"),
(namespaces["html"], "li"),
(namespaces["html"], "link"),
(namespaces["html"], "listing"),
(namespaces["html"], "marquee"),
(namespaces["html"], "menu"),
(namespaces["html"], "meta"),
(namespaces["html"], "nav"),
(namespaces["html"], "noembed"),
(namespaces["html"], "noframes"),
(namespaces["html"], "noscript"),
(namespaces["html"], "object"),
(namespaces["html"], "ol"),
(namespaces["html"], "p"),
(namespaces["html"], "param"),
(namespaces["html"], "plaintext"),
(namespaces["html"], "pre"),
(namespaces["html"], "script"),
(namespaces["html"], "section"),
(namespaces["html"], "select"),
(namespaces["html"], "style"),
(namespaces["html"], "table"),
(namespaces["html"], "tbody"),
(namespaces["html"], "td"),
(namespaces["html"], "textarea"),
(namespaces["html"], "tfoot"),
(namespaces["html"], "th"),
(namespaces["html"], "thead"),
(namespaces["html"], "title"),
(namespaces["html"], "tr"),
(namespaces["html"], "ul"),
(namespaces["html"], "wbr"),
(namespaces["html"], "xmp"),
(namespaces["svg"], "foreignObject")
])
htmlIntegrationPointElements = frozenset([
(namespaces["mathml"], "annotaion-xml"),
(namespaces["svg"], "foreignObject"),
(namespaces["svg"], "desc"),
(namespaces["svg"], "title")
])
mathmlTextIntegrationPointElements = frozenset([
(namespaces["mathml"], "mi"),
(namespaces["mathml"], "mo"),
(namespaces["mathml"], "mn"),
(namespaces["mathml"], "ms"),
(namespaces["mathml"], "mtext")
])
adjustForeignAttributes = {
"xlink:actuate": ("xlink", "actuate", namespaces["xlink"]),
"xlink:arcrole": ("xlink", "arcrole", namespaces["xlink"]),
"xlink:href": ("xlink", "href", namespaces["xlink"]),
"xlink:role": ("xlink", "role", namespaces["xlink"]),
"xlink:show": ("xlink", "show", namespaces["xlink"]),
"xlink:title": ("xlink", "title", namespaces["xlink"]),
"xlink:type": ("xlink", "type", namespaces["xlink"]),
"xml:base": ("xml", "base", namespaces["xml"]),
"xml:lang": ("xml", "lang", namespaces["xml"]),
"xml:space": ("xml", "space", namespaces["xml"]),
"xmlns": (None, "xmlns", namespaces["xmlns"]),
"xmlns:xlink": ("xmlns", "xlink", namespaces["xmlns"])
}
unadjustForeignAttributes = dict([((ns, local), qname) for qname, (prefix, local, ns) in
adjustForeignAttributes.items()])
spaceCharacters = frozenset([
"\t",
"\n",
"\u000C",
" ",
"\r"
])
tableInsertModeElements = frozenset([
"table",
"tbody",
"tfoot",
"thead",
"tr"
])
asciiLowercase = frozenset(string.ascii_lowercase)
asciiUppercase = frozenset(string.ascii_uppercase)
asciiLetters = frozenset(string.ascii_letters)
digits = frozenset(string.digits)
hexDigits = frozenset(string.hexdigits)
asciiUpper2Lower = dict([(ord(c), ord(c.lower()))
for c in string.ascii_uppercase])
# Heading elements need to be ordered
headingElements = (
"h1",
"h2",
"h3",
"h4",
"h5",
"h6"
)
voidElements = frozenset([
"base",
"command",
"event-source",
"link",
"meta",
"hr",
"br",
"img",
"embed",
"param",
"area",
"col",
"input",
"source",
"track"
])
cdataElements = frozenset(['title', 'textarea'])
rcdataElements = frozenset([
'style',
'script',
'xmp',
'iframe',
'noembed',
'noframes',
'noscript'
])
booleanAttributes = {
"": frozenset(["irrelevant"]),
"style": frozenset(["scoped"]),
"img": frozenset(["ismap"]),
"audio": frozenset(["autoplay", "controls"]),
"video": frozenset(["autoplay", "controls"]),
"script": frozenset(["defer", "async"]),
"details": frozenset(["open"]),
"datagrid": frozenset(["multiple", "disabled"]),
"command": frozenset(["hidden", "disabled", "checked", "default"]),
"hr": frozenset(["noshade"]),
"menu": frozenset(["autosubmit"]),
"fieldset": frozenset(["disabled", "readonly"]),
"option": frozenset(["disabled", "readonly", "selected"]),
"optgroup": frozenset(["disabled", "readonly"]),
"button": frozenset(["disabled", "autofocus"]),
"input": frozenset(["disabled", "readonly", "required", "autofocus", "checked", "ismap"]),
"select": frozenset(["disabled", "readonly", "autofocus", "multiple"]),
"output": frozenset(["disabled", "readonly"]),
}
# entitiesWindows1252 has to be _ordered_ and needs to have an index. It
# therefore can't be a frozenset.
entitiesWindows1252 = (
8364, # 0x80 0x20AC EURO SIGN
65533, # 0x81 UNDEFINED
8218, # 0x82 0x201A SINGLE LOW-9 QUOTATION MARK
402, # 0x83 0x0192 LATIN SMALL LETTER F WITH HOOK
8222, # 0x84 0x201E DOUBLE LOW-9 QUOTATION MARK
8230, # 0x85 0x2026 HORIZONTAL ELLIPSIS
8224, # 0x86 0x2020 DAGGER
8225, # 0x87 0x2021 DOUBLE DAGGER
710, # 0x88 0x02C6 MODIFIER LETTER CIRCUMFLEX ACCENT
8240, # 0x89 0x2030 PER MILLE SIGN
352, # 0x8A 0x0160 LATIN CAPITAL LETTER S WITH CARON
8249, # 0x8B 0x2039 SINGLE LEFT-POINTING ANGLE QUOTATION MARK
338, # 0x8C 0x0152 LATIN CAPITAL LIGATURE OE
65533, # 0x8D UNDEFINED
381, # 0x8E 0x017D LATIN CAPITAL LETTER Z WITH CARON
65533, # 0x8F UNDEFINED
65533, # 0x90 UNDEFINED
8216, # 0x91 0x2018 LEFT SINGLE QUOTATION MARK
8217, # 0x92 0x2019 RIGHT SINGLE QUOTATION MARK
8220, # 0x93 0x201C LEFT DOUBLE QUOTATION MARK
8221, # 0x94 0x201D RIGHT DOUBLE QUOTATION MARK
8226, # 0x95 0x2022 BULLET
8211, # 0x96 0x2013 EN DASH
8212, # 0x97 0x2014 EM DASH
732, # 0x98 0x02DC SMALL TILDE
8482, # 0x99 0x2122 TRADE MARK SIGN
353, # 0x9A 0x0161 LATIN SMALL LETTER S WITH CARON
8250, # 0x9B 0x203A SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
339, # 0x9C 0x0153 LATIN SMALL LIGATURE OE
65533, # 0x9D UNDEFINED
382, # 0x9E 0x017E LATIN SMALL LETTER Z WITH CARON
376 # 0x9F 0x0178 LATIN CAPITAL LETTER Y WITH DIAERESIS
)
xmlEntities = frozenset(['lt;', 'gt;', 'amp;', 'apos;', 'quot;'])
entities = {
"AElig": "\xc6",
"AElig;": "\xc6",
"AMP": "&",
"AMP;": "&",
"Aacute": "\xc1",
"Aacute;": "\xc1",
"Abreve;": "\u0102",
"Acirc": "\xc2",
"Acirc;": "\xc2",
"Acy;": "\u0410",
"Afr;": "\U0001d504",
"Agrave": "\xc0",
"Agrave;": "\xc0",
"Alpha;": "\u0391",
"Amacr;": "\u0100",
"And;": "\u2a53",
"Aogon;": "\u0104",
"Aopf;": "\U0001d538",
"ApplyFunction;": "\u2061",
"Aring": "\xc5",
"Aring;": "\xc5",
"Ascr;": "\U0001d49c",
"Assign;": "\u2254",
"Atilde": "\xc3",
"Atilde;": "\xc3",
"Auml": "\xc4",
"Auml;": "\xc4",
"Backslash;": "\u2216",
"Barv;": "\u2ae7",
"Barwed;": "\u2306",
"Bcy;": "\u0411",
"Because;": "\u2235",
"Bernoullis;": "\u212c",
"Beta;": "\u0392",
"Bfr;": "\U0001d505",
"Bopf;": "\U0001d539",
"Breve;": "\u02d8",
"Bscr;": "\u212c",
"Bumpeq;": "\u224e",
"CHcy;": "\u0427",
"COPY": "\xa9",
"COPY;": "\xa9",
"Cacute;": "\u0106",
"Cap;": "\u22d2",
"CapitalDifferentialD;": "\u2145",
"Cayleys;": "\u212d",
"Ccaron;": "\u010c",
"Ccedil": "\xc7",
"Ccedil;": "\xc7",
"Ccirc;": "\u0108",
"Cconint;": "\u2230",
"Cdot;": "\u010a",
"Cedilla;": "\xb8",
"CenterDot;": "\xb7",
"Cfr;": "\u212d",
"Chi;": "\u03a7",
"CircleDot;": "\u2299",
"CircleMinus;": "\u2296",
"CirclePlus;": "\u2295",
"CircleTimes;": "\u2297",
"ClockwiseContourIntegral;": "\u2232",
"CloseCurlyDoubleQuote;": "\u201d",
"CloseCurlyQuote;": "\u2019",
"Colon;": "\u2237",
"Colone;": "\u2a74",
"Congruent;": "\u2261",
"Conint;": "\u222f",
"ContourIntegral;": "\u222e",
"Copf;": "\u2102",
"Coproduct;": "\u2210",
"CounterClockwiseContourIntegral;": "\u2233",
"Cross;": "\u2a2f",
"Cscr;": "\U0001d49e",
"Cup;": "\u22d3",
"CupCap;": "\u224d",
"DD;": "\u2145",
"DDotrahd;": "\u2911",
"DJcy;": "\u0402",
"DScy;": "\u0405",
"DZcy;": "\u040f",
"Dagger;": "\u2021",
"Darr;": "\u21a1",
"Dashv;": "\u2ae4",
"Dcaron;": "\u010e",
"Dcy;": "\u0414",
"Del;": "\u2207",
"Delta;": "\u0394",
"Dfr;": "\U0001d507",
"DiacriticalAcute;": "\xb4",
"DiacriticalDot;": "\u02d9",
"DiacriticalDoubleAcute;": "\u02dd",
"DiacriticalGrave;": "`",
"DiacriticalTilde;": "\u02dc",
"Diamond;": "\u22c4",
"DifferentialD;": "\u2146",
"Dopf;": "\U0001d53b",
"Dot;": "\xa8",
"DotDot;": "\u20dc",
"DotEqual;": "\u2250",
"DoubleContourIntegral;": "\u222f",
"DoubleDot;": "\xa8",
"DoubleDownArrow;": "\u21d3",
"DoubleLeftArrow;": "\u21d0",
"DoubleLeftRightArrow;": "\u21d4",
"DoubleLeftTee;": "\u2ae4",
"DoubleLongLeftArrow;": "\u27f8",
"DoubleLongLeftRightArrow;": "\u27fa",
"DoubleLongRightArrow;": "\u27f9",
"DoubleRightArrow;": "\u21d2",
"DoubleRightTee;": "\u22a8",
"DoubleUpArrow;": "\u21d1",
"DoubleUpDownArrow;": "\u21d5",
"DoubleVerticalBar;": "\u2225",
"DownArrow;": "\u2193",
"DownArrowBar;": "\u2913",
"DownArrowUpArrow;": "\u21f5",
"DownBreve;": "\u0311",
"DownLeftRightVector;": "\u2950",
"DownLeftTeeVector;": "\u295e",
"DownLeftVector;": "\u21bd",
"DownLeftVectorBar;": "\u2956",
"DownRightTeeVector;": "\u295f",
"DownRightVector;": "\u21c1",
"DownRightVectorBar;": "\u2957",
"DownTee;": "\u22a4",
"DownTeeArrow;": "\u21a7",
"Downarrow;": "\u21d3",
"Dscr;": "\U0001d49f",
"Dstrok;": "\u0110",
"ENG;": "\u014a",
"ETH": "\xd0",
"ETH;": "\xd0",
"Eacute": "\xc9",
"Eacute;": "\xc9",
"Ecaron;": "\u011a",
"Ecirc": "\xca",
"Ecirc;": "\xca",
"Ecy;": "\u042d",
"Edot;": "\u0116",
"Efr;": "\U0001d508",
"Egrave": "\xc8",
"Egrave;": "\xc8",
"Element;": "\u2208",
"Emacr;": "\u0112",
"EmptySmallSquare;": "\u25fb",
"EmptyVerySmallSquare;": "\u25ab",
"Eogon;": "\u0118",
"Eopf;": "\U0001d53c",
"Epsilon;": "\u0395",
"Equal;": "\u2a75",
"EqualTilde;": "\u2242",
"Equilibrium;": "\u21cc",
"Escr;": "\u2130",
"Esim;": "\u2a73",
"Eta;": "\u0397",
"Euml": "\xcb",
"Euml;": "\xcb",
"Exists;": "\u2203",
"ExponentialE;": "\u2147",
"Fcy;": "\u0424",
"Ffr;": "\U0001d509",
"FilledSmallSquare;": "\u25fc",
"FilledVerySmallSquare;": "\u25aa",
"Fopf;": "\U0001d53d",
"ForAll;": "\u2200",
"Fouriertrf;": "\u2131",
"Fscr;": "\u2131",
"GJcy;": "\u0403",
"GT": ">",
"GT;": ">",
"Gamma;": "\u0393",
"Gammad;": "\u03dc",
"Gbreve;": "\u011e",
"Gcedil;": "\u0122",
"Gcirc;": "\u011c",
"Gcy;": "\u0413",
"Gdot;": "\u0120",
"Gfr;": "\U0001d50a",
"Gg;": "\u22d9",
"Gopf;": "\U0001d53e",
"GreaterEqual;": "\u2265",
"GreaterEqualLess;": "\u22db",
"GreaterFullEqual;": "\u2267",
"GreaterGreater;": "\u2aa2",
"GreaterLess;": "\u2277",
"GreaterSlantEqual;": "\u2a7e",
"GreaterTilde;": "\u2273",
"Gscr;": "\U0001d4a2",
"Gt;": "\u226b",
"HARDcy;": "\u042a",
"Hacek;": "\u02c7",
"Hat;": "^",
"Hcirc;": "\u0124",
"Hfr;": "\u210c",
"HilbertSpace;": "\u210b",
"Hopf;": "\u210d",
"HorizontalLine;": "\u2500",
"Hscr;": "\u210b",
"Hstrok;": "\u0126",
"HumpDownHump;": "\u224e",
"HumpEqual;": "\u224f",
"IEcy;": "\u0415",
"IJlig;": "\u0132",
"IOcy;": "\u0401",
"Iacute": "\xcd",
"Iacute;": "\xcd",
"Icirc": "\xce",
"Icirc;": "\xce",
"Icy;": "\u0418",
"Idot;": "\u0130",
"Ifr;": "\u2111",
"Igrave": "\xcc",
"Igrave;": "\xcc",
"Im;": "\u2111",
"Imacr;": "\u012a",
"ImaginaryI;": "\u2148",
"Implies;": "\u21d2",
"Int;": "\u222c",
"Integral;": "\u222b",
"Intersection;": "\u22c2",
"InvisibleComma;": "\u2063",
"InvisibleTimes;": "\u2062",
"Iogon;": "\u012e",
"Iopf;": "\U0001d540",
"Iota;": "\u0399",
"Iscr;": "\u2110",
"Itilde;": "\u0128",
"Iukcy;": "\u0406",
"Iuml": "\xcf",
"Iuml;": "\xcf",
"Jcirc;": "\u0134",
"Jcy;": "\u0419",
"Jfr;": "\U0001d50d",
"Jopf;": "\U0001d541",
"Jscr;": "\U0001d4a5",
"Jsercy;": "\u0408",
"Jukcy;": "\u0404",
"KHcy;": "\u0425",
"KJcy;": "\u040c",
"Kappa;": "\u039a",
"Kcedil;": "\u0136",
"Kcy;": "\u041a",
"Kfr;": "\U0001d50e",
"Kopf;": "\U0001d542",
"Kscr;": "\U0001d4a6",
"LJcy;": "\u0409",
"LT": "<",
"LT;": "<",
"Lacute;": "\u0139",
"Lambda;": "\u039b",
"Lang;": "\u27ea",
"Laplacetrf;": "\u2112",
"Larr;": "\u219e",
"Lcaron;": "\u013d",
"Lcedil;": "\u013b",
"Lcy;": "\u041b",
"LeftAngleBracket;": "\u27e8",
"LeftArrow;": "\u2190",
"LeftArrowBar;": "\u21e4",
"LeftArrowRightArrow;": "\u21c6",
"LeftCeiling;": "\u2308",
"LeftDoubleBracket;": "\u27e6",
"LeftDownTeeVector;": "\u2961",
"LeftDownVector;": "\u21c3",
"LeftDownVectorBar;": "\u2959",
"LeftFloor;": "\u230a",
"LeftRightArrow;": "\u2194",
"LeftRightVector;": "\u294e",
"LeftTee;": "\u22a3",
"LeftTeeArrow;": "\u21a4",
"LeftTeeVector;": "\u295a",
"LeftTriangle;": "\u22b2",
"LeftTriangleBar;": "\u29cf",
"LeftTriangleEqual;": "\u22b4",
"LeftUpDownVector;": "\u2951",
"LeftUpTeeVector;": "\u2960",
"LeftUpVector;": "\u21bf",
"LeftUpVectorBar;": "\u2958",
"LeftVector;": "\u21bc",
"LeftVectorBar;": "\u2952",
"Leftarrow;": "\u21d0",
"Leftrightarrow;": "\u21d4",
"LessEqualGreater;": "\u22da",
"LessFullEqual;": "\u2266",
"LessGreater;": "\u2276",
"LessLess;": "\u2aa1",
"LessSlantEqual;": "\u2a7d",
"LessTilde;": "\u2272",
"Lfr;": "\U0001d50f",
"Ll;": "\u22d8",
"Lleftarrow;": "\u21da",
"Lmidot;": "\u013f",
"LongLeftArrow;": "\u27f5",
"LongLeftRightArrow;": "\u27f7",
"LongRightArrow;": "\u27f6",
"Longleftarrow;": "\u27f8",
"Longleftrightarrow;": "\u27fa",
"Longrightarrow;": "\u27f9",
"Lopf;": "\U0001d543",
"LowerLeftArrow;": "\u2199",
"LowerRightArrow;": "\u2198",
"Lscr;": "\u2112",
"Lsh;": "\u21b0",
"Lstrok;": "\u0141",
"Lt;": "\u226a",
"Map;": "\u2905",
"Mcy;": "\u041c",
"MediumSpace;": "\u205f",
"Mellintrf;": "\u2133",
"Mfr;": "\U0001d510",
"MinusPlus;": "\u2213",
"Mopf;": "\U0001d544",
"Mscr;": "\u2133",
"Mu;": "\u039c",
"NJcy;": "\u040a",
"Nacute;": "\u0143",
"Ncaron;": "\u0147",
"Ncedil;": "\u0145",
"Ncy;": "\u041d",
"NegativeMediumSpace;": "\u200b",
"NegativeThickSpace;": "\u200b",
"NegativeThinSpace;": "\u200b",
"NegativeVeryThinSpace;": "\u200b",
"NestedGreaterGreater;": "\u226b",
"NestedLessLess;": "\u226a",
"NewLine;": "\n",
"Nfr;": "\U0001d511",
"NoBreak;": "\u2060",
"NonBreakingSpace;": "\xa0",
"Nopf;": "\u2115",
"Not;": "\u2aec",
"NotCongruent;": "\u2262",
"NotCupCap;": "\u226d",
"NotDoubleVerticalBar;": "\u2226",
"NotElement;": "\u2209",
"NotEqual;": "\u2260",
"NotEqualTilde;": "\u2242\u0338",
"NotExists;": "\u2204",
"NotGreater;": "\u226f",
"NotGreaterEqual;": "\u2271",
"NotGreaterFullEqual;": "\u2267\u0338",
"NotGreaterGreater;": "\u226b\u0338",
"NotGreaterLess;": "\u2279",
"NotGreaterSlantEqual;": "\u2a7e\u0338",
"NotGreaterTilde;": "\u2275",
"NotHumpDownHump;": "\u224e\u0338",
"NotHumpEqual;": "\u224f\u0338",
"NotLeftTriangle;": "\u22ea",
"NotLeftTriangleBar;": "\u29cf\u0338",
"NotLeftTriangleEqual;": "\u22ec",
"NotLess;": "\u226e",
"NotLessEqual;": "\u2270",
"NotLessGreater;": "\u2278",
"NotLessLess;": "\u226a\u0338",
"NotLessSlantEqual;": "\u2a7d\u0338",
"NotLessTilde;": "\u2274",
"NotNestedGreaterGreater;": "\u2aa2\u0338",
"NotNestedLessLess;": "\u2aa1\u0338",
"NotPrecedes;": "\u2280",
"NotPrecedesEqual;": "\u2aaf\u0338",
"NotPrecedesSlantEqual;": "\u22e0",
"NotReverseElement;": "\u220c",
"NotRightTriangle;": "\u22eb",
"NotRightTriangleBar;": "\u29d0\u0338",
"NotRightTriangleEqual;": "\u22ed",
"NotSquareSubset;": "\u228f\u0338",
"NotSquareSubsetEqual;": "\u22e2",
"NotSquareSuperset;": "\u2290\u0338",
"NotSquareSupersetEqual;": "\u22e3",
"NotSubset;": "\u2282\u20d2",
"NotSubsetEqual;": "\u2288",
"NotSucceeds;": "\u2281",
"NotSucceedsEqual;": "\u2ab0\u0338",
"NotSucceedsSlantEqual;": "\u22e1",
"NotSucceedsTilde;": "\u227f\u0338",
"NotSuperset;": "\u2283\u20d2",
"NotSupersetEqual;": "\u2289",
"NotTilde;": "\u2241",
"NotTildeEqual;": "\u2244",
"NotTildeFullEqual;": "\u2247",
"NotTildeTilde;": "\u2249",
"NotVerticalBar;": "\u2224",
"Nscr;": "\U0001d4a9",
"Ntilde": "\xd1",
"Ntilde;": "\xd1",
"Nu;": "\u039d",
"OElig;": "\u0152",
"Oacute": "\xd3",
"Oacute;": "\xd3",
"Ocirc": "\xd4",
"Ocirc;": "\xd4",
"Ocy;": "\u041e",
"Odblac;": "\u0150",
"Ofr;": "\U0001d512",
"Ograve": "\xd2",
"Ograve;": "\xd2",
"Omacr;": "\u014c",
"Omega;": "\u03a9",
"Omicron;": "\u039f",
"Oopf;": "\U0001d546",
"OpenCurlyDoubleQuote;": "\u201c",
"OpenCurlyQuote;": "\u2018",
"Or;": "\u2a54",
"Oscr;": "\U0001d4aa",
"Oslash": "\xd8",
"Oslash;": "\xd8",
"Otilde": "\xd5",
"Otilde;": "\xd5",
"Otimes;": "\u2a37",
"Ouml": "\xd6",
"Ouml;": "\xd6",
"OverBar;": "\u203e",
"OverBrace;": "\u23de",
"OverBracket;": "\u23b4",
"OverParenthesis;": "\u23dc",
"PartialD;": "\u2202",
"Pcy;": "\u041f",
"Pfr;": "\U0001d513",
"Phi;": "\u03a6",
"Pi;": "\u03a0",
"PlusMinus;": "\xb1",
"Poincareplane;": "\u210c",
"Popf;": "\u2119",
"Pr;": "\u2abb",
"Precedes;": "\u227a",
"PrecedesEqual;": "\u2aaf",
"PrecedesSlantEqual;": "\u227c",
"PrecedesTilde;": "\u227e",
"Prime;": "\u2033",
"Product;": "\u220f",
"Proportion;": "\u2237",
"Proportional;": "\u221d",
"Pscr;": "\U0001d4ab",
"Psi;": "\u03a8",
"QUOT": "\"",
"QUOT;": "\"",
"Qfr;": "\U0001d514",
"Qopf;": "\u211a",
"Qscr;": "\U0001d4ac",
"RBarr;": "\u2910",
"REG": "\xae",
"REG;": "\xae",
"Racute;": "\u0154",
"Rang;": "\u27eb",
"Rarr;": "\u21a0",
"Rarrtl;": "\u2916",
"Rcaron;": "\u0158",
"Rcedil;": "\u0156",
"Rcy;": "\u0420",
"Re;": "\u211c",
"ReverseElement;": "\u220b",
"ReverseEquilibrium;": "\u21cb",
"ReverseUpEquilibrium;": "\u296f",
"Rfr;": "\u211c",
"Rho;": "\u03a1",
"RightAngleBracket;": "\u27e9",
"RightArrow;": "\u2192",
"RightArrowBar;": "\u21e5",
"RightArrowLeftArrow;": "\u21c4",
"RightCeiling;": "\u2309",
"RightDoubleBracket;": "\u27e7",
"RightDownTeeVector;": "\u295d",
"RightDownVector;": "\u21c2",
"RightDownVectorBar;": "\u2955",
"RightFloor;": "\u230b",
"RightTee;": "\u22a2",
"RightTeeArrow;": "\u21a6",
"RightTeeVector;": "\u295b",
"RightTriangle;": "\u22b3",
"RightTriangleBar;": "\u29d0",
"RightTriangleEqual;": "\u22b5",
"RightUpDownVector;": "\u294f",
"RightUpTeeVector;": "\u295c",
"RightUpVector;": "\u21be",
"RightUpVectorBar;": "\u2954",
"RightVector;": "\u21c0",
"RightVectorBar;": "\u2953",
"Rightarrow;": "\u21d2",
"Ropf;": "\u211d",
"RoundImplies;": "\u2970",
"Rrightarrow;": "\u21db",
"Rscr;": "\u211b",
"Rsh;": "\u21b1",
"RuleDelayed;": "\u29f4",
"SHCHcy;": "\u0429",
"SHcy;": "\u0428",
"SOFTcy;": "\u042c",
"Sacute;": "\u015a",
"Sc;": "\u2abc",
"Scaron;": "\u0160",
"Scedil;": "\u015e",
"Scirc;": "\u015c",
"Scy;": "\u0421",
"Sfr;": "\U0001d516",
"ShortDownArrow;": "\u2193",
"ShortLeftArrow;": "\u2190",
"ShortRightArrow;": "\u2192",
"ShortUpArrow;": "\u2191",
"Sigma;": "\u03a3",
"SmallCircle;": "\u2218",
"Sopf;": "\U0001d54a",
"Sqrt;": "\u221a",
"Square;": "\u25a1",
"SquareIntersection;": "\u2293",
"SquareSubset;": "\u228f",
"SquareSubsetEqual;": "\u2291",
"SquareSuperset;": "\u2290",
"SquareSupersetEqual;": "\u2292",
"SquareUnion;": "\u2294",
"Sscr;": "\U0001d4ae",
"Star;": "\u22c6",
"Sub;": "\u22d0",
"Subset;": "\u22d0",
"SubsetEqual;": "\u2286",
"Succeeds;": "\u227b",
"SucceedsEqual;": "\u2ab0",
"SucceedsSlantEqual;": "\u227d",
"SucceedsTilde;": "\u227f",
"SuchThat;": "\u220b",
"Sum;": "\u2211",
"Sup;": "\u22d1",
"Superset;": "\u2283",
"SupersetEqual;": "\u2287",
"Supset;": "\u22d1",
"THORN": "\xde",
"THORN;": "\xde",
"TRADE;": "\u2122",
"TSHcy;": "\u040b",
"TScy;": "\u0426",
"Tab;": "\t",
"Tau;": "\u03a4",
"Tcaron;": "\u0164",
"Tcedil;": "\u0162",
"Tcy;": "\u0422",
"Tfr;": "\U0001d517",
"Therefore;": "\u2234",
"Theta;": "\u0398",
"ThickSpace;": "\u205f\u200a",
"ThinSpace;": "\u2009",
"Tilde;": "\u223c",
"TildeEqual;": "\u2243",
"TildeFullEqual;": "\u2245",
"TildeTilde;": "\u2248",
"Topf;": "\U0001d54b",
"TripleDot;": "\u20db",
"Tscr;": "\U0001d4af",
"Tstrok;": "\u0166",
"Uacute": "\xda",
"Uacute;": "\xda",
"Uarr;": "\u219f",
"Uarrocir;": "\u2949",
"Ubrcy;": "\u040e",
"Ubreve;": "\u016c",
"Ucirc": "\xdb",
"Ucirc;": "\xdb",
"Ucy;": "\u0423",
"Udblac;": "\u0170",
"Ufr;": "\U0001d518",
"Ugrave": "\xd9",
"Ugrave;": "\xd9",
"Umacr;": "\u016a",
"UnderBar;": "_",
"UnderBrace;": "\u23df",
"UnderBracket;": "\u23b5",
"UnderParenthesis;": "\u23dd",
"Union;": "\u22c3",
"UnionPlus;": "\u228e",
"Uogon;": "\u0172",
"Uopf;": "\U0001d54c",
"UpArrow;": "\u2191",
"UpArrowBar;": "\u2912",
"UpArrowDownArrow;": "\u21c5",
"UpDownArrow;": "\u2195",
"UpEquilibrium;": "\u296e",
"UpTee;": "\u22a5",
"UpTeeArrow;": "\u21a5",
"Uparrow;": "\u21d1",
"Updownarrow;": "\u21d5",
"UpperLeftArrow;": "\u2196",
"UpperRightArrow;": "\u2197",
"Upsi;": "\u03d2",
"Upsilon;": "\u03a5",
"Uring;": "\u016e",
"Uscr;": "\U0001d4b0",
"Utilde;": "\u0168",
"Uuml": "\xdc",
"Uuml;": "\xdc",
"VDash;": "\u22ab",
"Vbar;": "\u2aeb",
"Vcy;": "\u0412",
"Vdash;": "\u22a9",
"Vdashl;": "\u2ae6",
"Vee;": "\u22c1",
"Verbar;": "\u2016",
"Vert;": "\u2016",
"VerticalBar;": "\u2223",
"VerticalLine;": "|",
"VerticalSeparator;": "\u2758",
"VerticalTilde;": "\u2240",
"VeryThinSpace;": "\u200a",
"Vfr;": "\U0001d519",
"Vopf;": "\U0001d54d",
"Vscr;": "\U0001d4b1",
"Vvdash;": "\u22aa",
"Wcirc;": "\u0174",
"Wedge;": "\u22c0",
"Wfr;": "\U0001d51a",
"Wopf;": "\U0001d54e",
"Wscr;": "\U0001d4b2",
"Xfr;": "\U0001d51b",
"Xi;": "\u039e",
"Xopf;": "\U0001d54f",
"Xscr;": "\U0001d4b3",
"YAcy;": "\u042f",
"YIcy;": "\u0407",
"YUcy;": "\u042e",
"Yacute": "\xdd",
"Yacute;": "\xdd",
"Ycirc;": "\u0176",
"Ycy;": "\u042b",
"Yfr;": "\U0001d51c",
"Yopf;": "\U0001d550",
"Yscr;": "\U0001d4b4",
"Yuml;": "\u0178",
"ZHcy;": "\u0416",
"Zacute;": "\u0179",
"Zcaron;": "\u017d",
"Zcy;": "\u0417",
"Zdot;": "\u017b",
"ZeroWidthSpace;": "\u200b",
"Zeta;": "\u0396",
"Zfr;": "\u2128",
"Zopf;": "\u2124",
"Zscr;": "\U0001d4b5",
"aacute": "\xe1",
"aacute;": "\xe1",
"abreve;": "\u0103",
"ac;": "\u223e",
"acE;": "\u223e\u0333",
"acd;": "\u223f",
"acirc": "\xe2",
"acirc;": "\xe2",
"acute": "\xb4",
"acute;": "\xb4",
"acy;": "\u0430",
"aelig": "\xe6",
"aelig;": "\xe6",
"af;": "\u2061",
"afr;": "\U0001d51e",
"agrave": "\xe0",
"agrave;": "\xe0",
"alefsym;": "\u2135",
"aleph;": "\u2135",
"alpha;": "\u03b1",
"amacr;": "\u0101",
"amalg;": "\u2a3f",
"amp": "&",
"amp;": "&",
"and;": "\u2227",
"andand;": "\u2a55",
"andd;": "\u2a5c",
"andslope;": "\u2a58",
"andv;": "\u2a5a",
"ang;": "\u2220",
"ange;": "\u29a4",
"angle;": "\u2220",
"angmsd;": "\u2221",
"angmsdaa;": "\u29a8",
"angmsdab;": "\u29a9",
"angmsdac;": "\u29aa",
"angmsdad;": "\u29ab",
"angmsdae;": "\u29ac",
"angmsdaf;": "\u29ad",
"angmsdag;": "\u29ae",
"angmsdah;": "\u29af",
"angrt;": "\u221f",
"angrtvb;": "\u22be",
"angrtvbd;": "\u299d",
"angsph;": "\u2222",
"angst;": "\xc5",
"angzarr;": "\u237c",
"aogon;": "\u0105",
"aopf;": "\U0001d552",
"ap;": "\u2248",
"apE;": "\u2a70",
"apacir;": "\u2a6f",
"ape;": "\u224a",
"apid;": "\u224b",
"apos;": "'",
"approx;": "\u2248",
"approxeq;": "\u224a",
"aring": "\xe5",
"aring;": "\xe5",
"ascr;": "\U0001d4b6",
"ast;": "*",
"asymp;": "\u2248",
"asympeq;": "\u224d",
"atilde": "\xe3",
"atilde;": "\xe3",
"auml": "\xe4",
"auml;": "\xe4",
"awconint;": "\u2233",
"awint;": "\u2a11",
"bNot;": "\u2aed",
"backcong;": "\u224c",
"backepsilon;": "\u03f6",
"backprime;": "\u2035",
"backsim;": "\u223d",
"backsimeq;": "\u22cd",
"barvee;": "\u22bd",
"barwed;": "\u2305",
"barwedge;": "\u2305",
"bbrk;": "\u23b5",
"bbrktbrk;": "\u23b6",
"bcong;": "\u224c",
"bcy;": "\u0431",
"bdquo;": "\u201e",
"becaus;": "\u2235",
"because;": "\u2235",
"bemptyv;": "\u29b0",
"bepsi;": "\u03f6",
"bernou;": "\u212c",
"beta;": "\u03b2",
"beth;": "\u2136",
"between;": "\u226c",
"bfr;": "\U0001d51f",
"bigcap;": "\u22c2",
"bigcirc;": "\u25ef",
"bigcup;": "\u22c3",
"bigodot;": "\u2a00",
"bigoplus;": "\u2a01",
"bigotimes;": "\u2a02",
"bigsqcup;": "\u2a06",
"bigstar;": "\u2605",
"bigtriangledown;": "\u25bd",
"bigtriangleup;": "\u25b3",
"biguplus;": "\u2a04",
"bigvee;": "\u22c1",
"bigwedge;": "\u22c0",
"bkarow;": "\u290d",
"blacklozenge;": "\u29eb",
"blacksquare;": "\u25aa",
"blacktriangle;": "\u25b4",
"blacktriangledown;": "\u25be",
"blacktriangleleft;": "\u25c2",
"blacktriangleright;": "\u25b8",
"blank;": "\u2423",
"blk12;": "\u2592",
"blk14;": "\u2591",
"blk34;": "\u2593",
"block;": "\u2588",
"bne;": "=\u20e5",
"bnequiv;": "\u2261\u20e5",
"bnot;": "\u2310",
"bopf;": "\U0001d553",
"bot;": "\u22a5",
"bottom;": "\u22a5",
"bowtie;": "\u22c8",
"boxDL;": "\u2557",
"boxDR;": "\u2554",
"boxDl;": "\u2556",
"boxDr;": "\u2553",
"boxH;": "\u2550",
"boxHD;": "\u2566",
"boxHU;": "\u2569",
"boxHd;": "\u2564",
"boxHu;": "\u2567",
"boxUL;": "\u255d",
"boxUR;": "\u255a",
"boxUl;": "\u255c",
"boxUr;": "\u2559",
"boxV;": "\u2551",
"boxVH;": "\u256c",
"boxVL;": "\u2563",
"boxVR;": "\u2560",
"boxVh;": "\u256b",
"boxVl;": "\u2562",
"boxVr;": "\u255f",
"boxbox;": "\u29c9",
"boxdL;": "\u2555",
"boxdR;": "\u2552",
"boxdl;": "\u2510",
"boxdr;": "\u250c",
"boxh;": "\u2500",
"boxhD;": "\u2565",
"boxhU;": "\u2568",
"boxhd;": "\u252c",
"boxhu;": "\u2534",
"boxminus;": "\u229f",
"boxplus;": "\u229e",
"boxtimes;": "\u22a0",
"boxuL;": "\u255b",
"boxuR;": "\u2558",
"boxul;": "\u2518",
"boxur;": "\u2514",
"boxv;": "\u2502",
"boxvH;": "\u256a",
"boxvL;": "\u2561",
"boxvR;": "\u255e",
"boxvh;": "\u253c",
"boxvl;": "\u2524",
"boxvr;": "\u251c",
"bprime;": "\u2035",
"breve;": "\u02d8",
"brvbar": "\xa6",
"brvbar;": "\xa6",
"bscr;": "\U0001d4b7",
"bsemi;": "\u204f",
"bsim;": "\u223d",
"bsime;": "\u22cd",
"bsol;": "\\",
"bsolb;": "\u29c5",
"bsolhsub;": "\u27c8",
"bull;": "\u2022",
"bullet;": "\u2022",
"bump;": "\u224e",
"bumpE;": "\u2aae",
"bumpe;": "\u224f",
"bumpeq;": "\u224f",
"cacute;": "\u0107",
"cap;": "\u2229",
"capand;": "\u2a44",
"capbrcup;": "\u2a49",
"capcap;": "\u2a4b",
"capcup;": "\u2a47",
"capdot;": "\u2a40",
"caps;": "\u2229\ufe00",
"caret;": "\u2041",
"caron;": "\u02c7",
"ccaps;": "\u2a4d",
"ccaron;": "\u010d",
"ccedil": "\xe7",
"ccedil;": "\xe7",
"ccirc;": "\u0109",
"ccups;": "\u2a4c",
"ccupssm;": "\u2a50",
"cdot;": "\u010b",
"cedil": "\xb8",
"cedil;": "\xb8",
"cemptyv;": "\u29b2",
"cent": "\xa2",
"cent;": "\xa2",
"centerdot;": "\xb7",
"cfr;": "\U0001d520",
"chcy;": "\u0447",
"check;": "\u2713",
"checkmark;": "\u2713",
"chi;": "\u03c7",
"cir;": "\u25cb",
"cirE;": "\u29c3",
"circ;": "\u02c6",
"circeq;": "\u2257",
"circlearrowleft;": "\u21ba",
"circlearrowright;": "\u21bb",
"circledR;": "\xae",
"circledS;": "\u24c8",
"circledast;": "\u229b",
"circledcirc;": "\u229a",
"circleddash;": "\u229d",
"cire;": "\u2257",
"cirfnint;": "\u2a10",
"cirmid;": "\u2aef",
"cirscir;": "\u29c2",
"clubs;": "\u2663",
"clubsuit;": "\u2663",
"colon;": ":",
"colone;": "\u2254",
"coloneq;": "\u2254",
"comma;": ",",
"commat;": "@",
"comp;": "\u2201",
"compfn;": "\u2218",
"complement;": "\u2201",
"complexes;": "\u2102",
"cong;": "\u2245",
"congdot;": "\u2a6d",
"conint;": "\u222e",
"copf;": "\U0001d554",
"coprod;": "\u2210",
"copy": "\xa9",
"copy;": "\xa9",
"copysr;": "\u2117",
"crarr;": "\u21b5",
"cross;": "\u2717",
"cscr;": "\U0001d4b8",
"csub;": "\u2acf",
"csube;": "\u2ad1",
"csup;": "\u2ad0",
"csupe;": "\u2ad2",
"ctdot;": "\u22ef",
"cudarrl;": "\u2938",
"cudarrr;": "\u2935",
"cuepr;": "\u22de",
"cuesc;": "\u22df",
"cularr;": "\u21b6",
"cularrp;": "\u293d",
"cup;": "\u222a",
"cupbrcap;": "\u2a48",
"cupcap;": "\u2a46",
"cupcup;": "\u2a4a",
"cupdot;": "\u228d",
"cupor;": "\u2a45",
"cups;": "\u222a\ufe00",
"curarr;": "\u21b7",
"curarrm;": "\u293c",
"curlyeqprec;": "\u22de",
"curlyeqsucc;": "\u22df",
"curlyvee;": "\u22ce",
"curlywedge;": "\u22cf",
"curren": "\xa4",
"curren;": "\xa4",
"curvearrowleft;": "\u21b6",
"curvearrowright;": "\u21b7",
"cuvee;": "\u22ce",
"cuwed;": "\u22cf",
"cwconint;": "\u2232",
"cwint;": "\u2231",
"cylcty;": "\u232d",
"dArr;": "\u21d3",
"dHar;": "\u2965",
"dagger;": "\u2020",
"daleth;": "\u2138",
"darr;": "\u2193",
"dash;": "\u2010",
"dashv;": "\u22a3",
"dbkarow;": "\u290f",
"dblac;": "\u02dd",
"dcaron;": "\u010f",
"dcy;": "\u0434",
"dd;": "\u2146",
"ddagger;": "\u2021",
"ddarr;": "\u21ca",
"ddotseq;": "\u2a77",
"deg": "\xb0",
"deg;": "\xb0",
"delta;": "\u03b4",
"demptyv;": "\u29b1",
"dfisht;": "\u297f",
"dfr;": "\U0001d521",
"dharl;": "\u21c3",
"dharr;": "\u21c2",
"diam;": "\u22c4",
"diamond;": "\u22c4",
"diamondsuit;": "\u2666",
"diams;": "\u2666",
"die;": "\xa8",
"digamma;": "\u03dd",
"disin;": "\u22f2",
"div;": "\xf7",
"divide": "\xf7",
"divide;": "\xf7",
"divideontimes;": "\u22c7",
"divonx;": "\u22c7",
"djcy;": "\u0452",
"dlcorn;": "\u231e",
"dlcrop;": "\u230d",
"dollar;": "$",
"dopf;": "\U0001d555",
"dot;": "\u02d9",
"doteq;": "\u2250",
"doteqdot;": "\u2251",
"dotminus;": "\u2238",
"dotplus;": "\u2214",
"dotsquare;": "\u22a1",
"doublebarwedge;": "\u2306",
"downarrow;": "\u2193",
"downdownarrows;": "\u21ca",
"downharpoonleft;": "\u21c3",
"downharpoonright;": "\u21c2",
"drbkarow;": "\u2910",
"drcorn;": "\u231f",
"drcrop;": "\u230c",
"dscr;": "\U0001d4b9",
"dscy;": "\u0455",
"dsol;": "\u29f6",
"dstrok;": "\u0111",
"dtdot;": "\u22f1",
"dtri;": "\u25bf",
"dtrif;": "\u25be",
"duarr;": "\u21f5",
"duhar;": "\u296f",
"dwangle;": "\u29a6",
"dzcy;": "\u045f",
"dzigrarr;": "\u27ff",
"eDDot;": "\u2a77",
"eDot;": "\u2251",
"eacute": "\xe9",
"eacute;": "\xe9",
"easter;": "\u2a6e",
"ecaron;": "\u011b",
"ecir;": "\u2256",
"ecirc": "\xea",
"ecirc;": "\xea",
"ecolon;": "\u2255",
"ecy;": "\u044d",
"edot;": "\u0117",
"ee;": "\u2147",
"efDot;": "\u2252",
"efr;": "\U0001d522",
"eg;": "\u2a9a",
"egrave": "\xe8",
"egrave;": "\xe8",
"egs;": "\u2a96",
"egsdot;": "\u2a98",
"el;": "\u2a99",
"elinters;": "\u23e7",
"ell;": "\u2113",
"els;": "\u2a95",
"elsdot;": "\u2a97",
"emacr;": "\u0113",
"empty;": "\u2205",
"emptyset;": "\u2205",
"emptyv;": "\u2205",
"emsp13;": "\u2004",
"emsp14;": "\u2005",
"emsp;": "\u2003",
"eng;": "\u014b",
"ensp;": "\u2002",
"eogon;": "\u0119",
"eopf;": "\U0001d556",
"epar;": "\u22d5",
"eparsl;": "\u29e3",
"eplus;": "\u2a71",
"epsi;": "\u03b5",
"epsilon;": "\u03b5",
"epsiv;": "\u03f5",
"eqcirc;": "\u2256",
"eqcolon;": "\u2255",
"eqsim;": "\u2242",
"eqslantgtr;": "\u2a96",
"eqslantless;": "\u2a95",
"equals;": "=",
"equest;": "\u225f",
"equiv;": "\u2261",
"equivDD;": "\u2a78",
"eqvparsl;": "\u29e5",
"erDot;": "\u2253",
"erarr;": "\u2971",
"escr;": "\u212f",
"esdot;": "\u2250",
"esim;": "\u2242",
"eta;": "\u03b7",
"eth": "\xf0",
"eth;": "\xf0",
"euml": "\xeb",
"euml;": "\xeb",
"euro;": "\u20ac",
"excl;": "!",
"exist;": "\u2203",
"expectation;": "\u2130",
"exponentiale;": "\u2147",
"fallingdotseq;": "\u2252",
"fcy;": "\u0444",
"female;": "\u2640",
"ffilig;": "\ufb03",
"fflig;": "\ufb00",
"ffllig;": "\ufb04",
"ffr;": "\U0001d523",
"filig;": "\ufb01",
"fjlig;": "fj",
"flat;": "\u266d",
"fllig;": "\ufb02",
"fltns;": "\u25b1",
"fnof;": "\u0192",
"fopf;": "\U0001d557",
"forall;": "\u2200",
"fork;": "\u22d4",
"forkv;": "\u2ad9",
"fpartint;": "\u2a0d",
"frac12": "\xbd",
"frac12;": "\xbd",
"frac13;": "\u2153",
"frac14": "\xbc",
"frac14;": "\xbc",
"frac15;": "\u2155",
"frac16;": "\u2159",
"frac18;": "\u215b",
"frac23;": "\u2154",
"frac25;": "\u2156",
"frac34": "\xbe",
"frac34;": "\xbe",
"frac35;": "\u2157",
"frac38;": "\u215c",
"frac45;": "\u2158",
"frac56;": "\u215a",
"frac58;": "\u215d",
"frac78;": "\u215e",
"frasl;": "\u2044",
"frown;": "\u2322",
"fscr;": "\U0001d4bb",
"gE;": "\u2267",
"gEl;": "\u2a8c",
"gacute;": "\u01f5",
"gamma;": "\u03b3",
"gammad;": "\u03dd",
"gap;": "\u2a86",
"gbreve;": "\u011f",
"gcirc;": "\u011d",
"gcy;": "\u0433",
"gdot;": "\u0121",
"ge;": "\u2265",
"gel;": "\u22db",
"geq;": "\u2265",
"geqq;": "\u2267",
"geqslant;": "\u2a7e",
"ges;": "\u2a7e",
"gescc;": "\u2aa9",
"gesdot;": "\u2a80",
"gesdoto;": "\u2a82",
"gesdotol;": "\u2a84",
"gesl;": "\u22db\ufe00",
"gesles;": "\u2a94",
"gfr;": "\U0001d524",
"gg;": "\u226b",
"ggg;": "\u22d9",
"gimel;": "\u2137",
"gjcy;": "\u0453",
"gl;": "\u2277",
"glE;": "\u2a92",
"gla;": "\u2aa5",
"glj;": "\u2aa4",
"gnE;": "\u2269",
"gnap;": "\u2a8a",
"gnapprox;": "\u2a8a",
"gne;": "\u2a88",
"gneq;": "\u2a88",
"gneqq;": "\u2269",
"gnsim;": "\u22e7",
"gopf;": "\U0001d558",
"grave;": "`",
"gscr;": "\u210a",
"gsim;": "\u2273",
"gsime;": "\u2a8e",
"gsiml;": "\u2a90",
"gt": ">",
"gt;": ">",
"gtcc;": "\u2aa7",
"gtcir;": "\u2a7a",
"gtdot;": "\u22d7",
"gtlPar;": "\u2995",
"gtquest;": "\u2a7c",
"gtrapprox;": "\u2a86",
"gtrarr;": "\u2978",
"gtrdot;": "\u22d7",
"gtreqless;": "\u22db",
"gtreqqless;": "\u2a8c",
"gtrless;": "\u2277",
"gtrsim;": "\u2273",
"gvertneqq;": "\u2269\ufe00",
"gvnE;": "\u2269\ufe00",
"hArr;": "\u21d4",
"hairsp;": "\u200a",
"half;": "\xbd",
"hamilt;": "\u210b",
"hardcy;": "\u044a",
"harr;": "\u2194",
"harrcir;": "\u2948",
"harrw;": "\u21ad",
"hbar;": "\u210f",
"hcirc;": "\u0125",
"hearts;": "\u2665",
"heartsuit;": "\u2665",
"hellip;": "\u2026",
"hercon;": "\u22b9",
"hfr;": "\U0001d525",
"hksearow;": "\u2925",
"hkswarow;": "\u2926",
"hoarr;": "\u21ff",
"homtht;": "\u223b",
"hookleftarrow;": "\u21a9",
"hookrightarrow;": "\u21aa",
"hopf;": "\U0001d559",
"horbar;": "\u2015",
"hscr;": "\U0001d4bd",
"hslash;": "\u210f",
"hstrok;": "\u0127",
"hybull;": "\u2043",
"hyphen;": "\u2010",
"iacute": "\xed",
"iacute;": "\xed",
"ic;": "\u2063",
"icirc": "\xee",
"icirc;": "\xee",
"icy;": "\u0438",
"iecy;": "\u0435",
"iexcl": "\xa1",
"iexcl;": "\xa1",
"iff;": "\u21d4",
"ifr;": "\U0001d526",
"igrave": "\xec",
"igrave;": "\xec",
"ii;": "\u2148",
"iiiint;": "\u2a0c",
"iiint;": "\u222d",
"iinfin;": "\u29dc",
"iiota;": "\u2129",
"ijlig;": "\u0133",
"imacr;": "\u012b",
"image;": "\u2111",
"imagline;": "\u2110",
"imagpart;": "\u2111",
"imath;": "\u0131",
"imof;": "\u22b7",
"imped;": "\u01b5",
"in;": "\u2208",
"incare;": "\u2105",
"infin;": "\u221e",
"infintie;": "\u29dd",
"inodot;": "\u0131",
"int;": "\u222b",
"intcal;": "\u22ba",
"integers;": "\u2124",
"intercal;": "\u22ba",
"intlarhk;": "\u2a17",
"intprod;": "\u2a3c",
"iocy;": "\u0451",
"iogon;": "\u012f",
"iopf;": "\U0001d55a",
"iota;": "\u03b9",
"iprod;": "\u2a3c",
"iquest": "\xbf",
"iquest;": "\xbf",
"iscr;": "\U0001d4be",
"isin;": "\u2208",
"isinE;": "\u22f9",
"isindot;": "\u22f5",
"isins;": "\u22f4",
"isinsv;": "\u22f3",
"isinv;": "\u2208",
"it;": "\u2062",
"itilde;": "\u0129",
"iukcy;": "\u0456",
"iuml": "\xef",
"iuml;": "\xef",
"jcirc;": "\u0135",
"jcy;": "\u0439",
"jfr;": "\U0001d527",
"jmath;": "\u0237",
"jopf;": "\U0001d55b",
"jscr;": "\U0001d4bf",
"jsercy;": "\u0458",
"jukcy;": "\u0454",
"kappa;": "\u03ba",
"kappav;": "\u03f0",
"kcedil;": "\u0137",
"kcy;": "\u043a",
"kfr;": "\U0001d528",
"kgreen;": "\u0138",
"khcy;": "\u0445",
"kjcy;": "\u045c",
"kopf;": "\U0001d55c",
"kscr;": "\U0001d4c0",
"lAarr;": "\u21da",
"lArr;": "\u21d0",
"lAtail;": "\u291b",
"lBarr;": "\u290e",
"lE;": "\u2266",
"lEg;": "\u2a8b",
"lHar;": "\u2962",
"lacute;": "\u013a",
"laemptyv;": "\u29b4",
"lagran;": "\u2112",
"lambda;": "\u03bb",
"lang;": "\u27e8",
"langd;": "\u2991",
"langle;": "\u27e8",
"lap;": "\u2a85",
"laquo": "\xab",
"laquo;": "\xab",
"larr;": "\u2190",
"larrb;": "\u21e4",
"larrbfs;": "\u291f",
"larrfs;": "\u291d",
"larrhk;": "\u21a9",
"larrlp;": "\u21ab",
"larrpl;": "\u2939",
"larrsim;": "\u2973",
"larrtl;": "\u21a2",
"lat;": "\u2aab",
"latail;": "\u2919",
"late;": "\u2aad",
"lates;": "\u2aad\ufe00",
"lbarr;": "\u290c",
"lbbrk;": "\u2772",
"lbrace;": "{",
"lbrack;": "[",
"lbrke;": "\u298b",
"lbrksld;": "\u298f",
"lbrkslu;": "\u298d",
"lcaron;": "\u013e",
"lcedil;": "\u013c",
"lceil;": "\u2308",
"lcub;": "{",
"lcy;": "\u043b",
"ldca;": "\u2936",
"ldquo;": "\u201c",
"ldquor;": "\u201e",
"ldrdhar;": "\u2967",
"ldrushar;": "\u294b",
"ldsh;": "\u21b2",
"le;": "\u2264",
"leftarrow;": "\u2190",
"leftarrowtail;": "\u21a2",
"leftharpoondown;": "\u21bd",
"leftharpoonup;": "\u21bc",
"leftleftarrows;": "\u21c7",
"leftrightarrow;": "\u2194",
"leftrightarrows;": "\u21c6",
"leftrightharpoons;": "\u21cb",
"leftrightsquigarrow;": "\u21ad",
"leftthreetimes;": "\u22cb",
"leg;": "\u22da",
"leq;": "\u2264",
"leqq;": "\u2266",
"leqslant;": "\u2a7d",
"les;": "\u2a7d",
"lescc;": "\u2aa8",
"lesdot;": "\u2a7f",
"lesdoto;": "\u2a81",
"lesdotor;": "\u2a83",
"lesg;": "\u22da\ufe00",
"lesges;": "\u2a93",
"lessapprox;": "\u2a85",
"lessdot;": "\u22d6",
"lesseqgtr;": "\u22da",
"lesseqqgtr;": "\u2a8b",
"lessgtr;": "\u2276",
"lesssim;": "\u2272",
"lfisht;": "\u297c",
"lfloor;": "\u230a",
"lfr;": "\U0001d529",
"lg;": "\u2276",
"lgE;": "\u2a91",
"lhard;": "\u21bd",
"lharu;": "\u21bc",
"lharul;": "\u296a",
"lhblk;": "\u2584",
"ljcy;": "\u0459",
"ll;": "\u226a",
"llarr;": "\u21c7",
"llcorner;": "\u231e",
"llhard;": "\u296b",
"lltri;": "\u25fa",
"lmidot;": "\u0140",
"lmoust;": "\u23b0",
"lmoustache;": "\u23b0",
"lnE;": "\u2268",
"lnap;": "\u2a89",
"lnapprox;": "\u2a89",
"lne;": "\u2a87",
"lneq;": "\u2a87",
"lneqq;": "\u2268",
"lnsim;": "\u22e6",
"loang;": "\u27ec",
"loarr;": "\u21fd",
"lobrk;": "\u27e6",
"longleftarrow;": "\u27f5",
"longleftrightarrow;": "\u27f7",
"longmapsto;": "\u27fc",
"longrightarrow;": "\u27f6",
"looparrowleft;": "\u21ab",
"looparrowright;": "\u21ac",
"lopar;": "\u2985",
"lopf;": "\U0001d55d",
"loplus;": "\u2a2d",
"lotimes;": "\u2a34",
"lowast;": "\u2217",
"lowbar;": "_",
"loz;": "\u25ca",
"lozenge;": "\u25ca",
"lozf;": "\u29eb",
"lpar;": "(",
"lparlt;": "\u2993",
"lrarr;": "\u21c6",
"lrcorner;": "\u231f",
"lrhar;": "\u21cb",
"lrhard;": "\u296d",
"lrm;": "\u200e",
"lrtri;": "\u22bf",
"lsaquo;": "\u2039",
"lscr;": "\U0001d4c1",
"lsh;": "\u21b0",
"lsim;": "\u2272",
"lsime;": "\u2a8d",
"lsimg;": "\u2a8f",
"lsqb;": "[",
"lsquo;": "\u2018",
"lsquor;": "\u201a",
"lstrok;": "\u0142",
"lt": "<",
"lt;": "<",
"ltcc;": "\u2aa6",
"ltcir;": "\u2a79",
"ltdot;": "\u22d6",
"lthree;": "\u22cb",
"ltimes;": "\u22c9",
"ltlarr;": "\u2976",
"ltquest;": "\u2a7b",
"ltrPar;": "\u2996",
"ltri;": "\u25c3",
"ltrie;": "\u22b4",
"ltrif;": "\u25c2",
"lurdshar;": "\u294a",
"luruhar;": "\u2966",
"lvertneqq;": "\u2268\ufe00",
"lvnE;": "\u2268\ufe00",
"mDDot;": "\u223a",
"macr": "\xaf",
"macr;": "\xaf",
"male;": "\u2642",
"malt;": "\u2720",
"maltese;": "\u2720",
"map;": "\u21a6",
"mapsto;": "\u21a6",
"mapstodown;": "\u21a7",
"mapstoleft;": "\u21a4",
"mapstoup;": "\u21a5",
"marker;": "\u25ae",
"mcomma;": "\u2a29",
"mcy;": "\u043c",
"mdash;": "\u2014",
"measuredangle;": "\u2221",
"mfr;": "\U0001d52a",
"mho;": "\u2127",
"micro": "\xb5",
"micro;": "\xb5",
"mid;": "\u2223",
"midast;": "*",
"midcir;": "\u2af0",
"middot": "\xb7",
"middot;": "\xb7",
"minus;": "\u2212",
"minusb;": "\u229f",
"minusd;": "\u2238",
"minusdu;": "\u2a2a",
"mlcp;": "\u2adb",
"mldr;": "\u2026",
"mnplus;": "\u2213",
"models;": "\u22a7",
"mopf;": "\U0001d55e",
"mp;": "\u2213",
"mscr;": "\U0001d4c2",
"mstpos;": "\u223e",
"mu;": "\u03bc",
"multimap;": "\u22b8",
"mumap;": "\u22b8",
"nGg;": "\u22d9\u0338",
"nGt;": "\u226b\u20d2",
"nGtv;": "\u226b\u0338",
"nLeftarrow;": "\u21cd",
"nLeftrightarrow;": "\u21ce",
"nLl;": "\u22d8\u0338",
"nLt;": "\u226a\u20d2",
"nLtv;": "\u226a\u0338",
"nRightarrow;": "\u21cf",
"nVDash;": "\u22af",
"nVdash;": "\u22ae",
"nabla;": "\u2207",
"nacute;": "\u0144",
"nang;": "\u2220\u20d2",
"nap;": "\u2249",
"napE;": "\u2a70\u0338",
"napid;": "\u224b\u0338",
"napos;": "\u0149",
"napprox;": "\u2249",
"natur;": "\u266e",
"natural;": "\u266e",
"naturals;": "\u2115",
"nbsp": "\xa0",
"nbsp;": "\xa0",
"nbump;": "\u224e\u0338",
"nbumpe;": "\u224f\u0338",
"ncap;": "\u2a43",
"ncaron;": "\u0148",
"ncedil;": "\u0146",
"ncong;": "\u2247",
"ncongdot;": "\u2a6d\u0338",
"ncup;": "\u2a42",
"ncy;": "\u043d",
"ndash;": "\u2013",
"ne;": "\u2260",
"neArr;": "\u21d7",
"nearhk;": "\u2924",
"nearr;": "\u2197",
"nearrow;": "\u2197",
"nedot;": "\u2250\u0338",
"nequiv;": "\u2262",
"nesear;": "\u2928",
"nesim;": "\u2242\u0338",
"nexist;": "\u2204",
"nexists;": "\u2204",
"nfr;": "\U0001d52b",
"ngE;": "\u2267\u0338",
"nge;": "\u2271",
"ngeq;": "\u2271",
"ngeqq;": "\u2267\u0338",
"ngeqslant;": "\u2a7e\u0338",
"nges;": "\u2a7e\u0338",
"ngsim;": "\u2275",
"ngt;": "\u226f",
"ngtr;": "\u226f",
"nhArr;": "\u21ce",
"nharr;": "\u21ae",
"nhpar;": "\u2af2",
"ni;": "\u220b",
"nis;": "\u22fc",
"nisd;": "\u22fa",
"niv;": "\u220b",
"njcy;": "\u045a",
"nlArr;": "\u21cd",
"nlE;": "\u2266\u0338",
"nlarr;": "\u219a",
"nldr;": "\u2025",
"nle;": "\u2270",
"nleftarrow;": "\u219a",
"nleftrightarrow;": "\u21ae",
"nleq;": "\u2270",
"nleqq;": "\u2266\u0338",
"nleqslant;": "\u2a7d\u0338",
"nles;": "\u2a7d\u0338",
"nless;": "\u226e",
"nlsim;": "\u2274",
"nlt;": "\u226e",
"nltri;": "\u22ea",
"nltrie;": "\u22ec",
"nmid;": "\u2224",
"nopf;": "\U0001d55f",
"not": "\xac",
"not;": "\xac",
"notin;": "\u2209",
"notinE;": "\u22f9\u0338",
"notindot;": "\u22f5\u0338",
"notinva;": "\u2209",
"notinvb;": "\u22f7",
"notinvc;": "\u22f6",
"notni;": "\u220c",
"notniva;": "\u220c",
"notnivb;": "\u22fe",
"notnivc;": "\u22fd",
"npar;": "\u2226",
"nparallel;": "\u2226",
"nparsl;": "\u2afd\u20e5",
"npart;": "\u2202\u0338",
"npolint;": "\u2a14",
"npr;": "\u2280",
"nprcue;": "\u22e0",
"npre;": "\u2aaf\u0338",
"nprec;": "\u2280",
"npreceq;": "\u2aaf\u0338",
"nrArr;": "\u21cf",
"nrarr;": "\u219b",
"nrarrc;": "\u2933\u0338",
"nrarrw;": "\u219d\u0338",
"nrightarrow;": "\u219b",
"nrtri;": "\u22eb",
"nrtrie;": "\u22ed",
"nsc;": "\u2281",
"nsccue;": "\u22e1",
"nsce;": "\u2ab0\u0338",
"nscr;": "\U0001d4c3",
"nshortmid;": "\u2224",
"nshortparallel;": "\u2226",
"nsim;": "\u2241",
"nsime;": "\u2244",
"nsimeq;": "\u2244",
"nsmid;": "\u2224",
"nspar;": "\u2226",
"nsqsube;": "\u22e2",
"nsqsupe;": "\u22e3",
"nsub;": "\u2284",
"nsubE;": "\u2ac5\u0338",
"nsube;": "\u2288",
"nsubset;": "\u2282\u20d2",
"nsubseteq;": "\u2288",
"nsubseteqq;": "\u2ac5\u0338",
"nsucc;": "\u2281",
"nsucceq;": "\u2ab0\u0338",
"nsup;": "\u2285",
"nsupE;": "\u2ac6\u0338",
"nsupe;": "\u2289",
"nsupset;": "\u2283\u20d2",
"nsupseteq;": "\u2289",
"nsupseteqq;": "\u2ac6\u0338",
"ntgl;": "\u2279",
"ntilde": "\xf1",
"ntilde;": "\xf1",
"ntlg;": "\u2278",
"ntriangleleft;": "\u22ea",
"ntrianglelefteq;": "\u22ec",
"ntriangleright;": "\u22eb",
"ntrianglerighteq;": "\u22ed",
"nu;": "\u03bd",
"num;": "#",
"numero;": "\u2116",
"numsp;": "\u2007",
"nvDash;": "\u22ad",
"nvHarr;": "\u2904",
"nvap;": "\u224d\u20d2",
"nvdash;": "\u22ac",
"nvge;": "\u2265\u20d2",
"nvgt;": ">\u20d2",
"nvinfin;": "\u29de",
"nvlArr;": "\u2902",
"nvle;": "\u2264\u20d2",
"nvlt;": "<\u20d2",
"nvltrie;": "\u22b4\u20d2",
"nvrArr;": "\u2903",
"nvrtrie;": "\u22b5\u20d2",
"nvsim;": "\u223c\u20d2",
"nwArr;": "\u21d6",
"nwarhk;": "\u2923",
"nwarr;": "\u2196",
"nwarrow;": "\u2196",
"nwnear;": "\u2927",
"oS;": "\u24c8",
"oacute": "\xf3",
"oacute;": "\xf3",
"oast;": "\u229b",
"ocir;": "\u229a",
"ocirc": "\xf4",
"ocirc;": "\xf4",
"ocy;": "\u043e",
"odash;": "\u229d",
"odblac;": "\u0151",
"odiv;": "\u2a38",
"odot;": "\u2299",
"odsold;": "\u29bc",
"oelig;": "\u0153",
"ofcir;": "\u29bf",
"ofr;": "\U0001d52c",
"ogon;": "\u02db",
"ograve": "\xf2",
"ograve;": "\xf2",
"ogt;": "\u29c1",
"ohbar;": "\u29b5",
"ohm;": "\u03a9",
"oint;": "\u222e",
"olarr;": "\u21ba",
"olcir;": "\u29be",
"olcross;": "\u29bb",
"oline;": "\u203e",
"olt;": "\u29c0",
"omacr;": "\u014d",
"omega;": "\u03c9",
"omicron;": "\u03bf",
"omid;": "\u29b6",
"ominus;": "\u2296",
"oopf;": "\U0001d560",
"opar;": "\u29b7",
"operp;": "\u29b9",
"oplus;": "\u2295",
"or;": "\u2228",
"orarr;": "\u21bb",
"ord;": "\u2a5d",
"order;": "\u2134",
"orderof;": "\u2134",
"ordf": "\xaa",
"ordf;": "\xaa",
"ordm": "\xba",
"ordm;": "\xba",
"origof;": "\u22b6",
"oror;": "\u2a56",
"orslope;": "\u2a57",
"orv;": "\u2a5b",
"oscr;": "\u2134",
"oslash": "\xf8",
"oslash;": "\xf8",
"osol;": "\u2298",
"otilde": "\xf5",
"otilde;": "\xf5",
"otimes;": "\u2297",
"otimesas;": "\u2a36",
"ouml": "\xf6",
"ouml;": "\xf6",
"ovbar;": "\u233d",
"par;": "\u2225",
"para": "\xb6",
"para;": "\xb6",
"parallel;": "\u2225",
"parsim;": "\u2af3",
"parsl;": "\u2afd",
"part;": "\u2202",
"pcy;": "\u043f",
"percnt;": "%",
"period;": ".",
"permil;": "\u2030",
"perp;": "\u22a5",
"pertenk;": "\u2031",
"pfr;": "\U0001d52d",
"phi;": "\u03c6",
"phiv;": "\u03d5",
"phmmat;": "\u2133",
"phone;": "\u260e",
"pi;": "\u03c0",
"pitchfork;": "\u22d4",
"piv;": "\u03d6",
"planck;": "\u210f",
"planckh;": "\u210e",
"plankv;": "\u210f",
"plus;": "+",
"plusacir;": "\u2a23",
"plusb;": "\u229e",
"pluscir;": "\u2a22",
"plusdo;": "\u2214",
"plusdu;": "\u2a25",
"pluse;": "\u2a72",
"plusmn": "\xb1",
"plusmn;": "\xb1",
"plussim;": "\u2a26",
"plustwo;": "\u2a27",
"pm;": "\xb1",
"pointint;": "\u2a15",
"popf;": "\U0001d561",
"pound": "\xa3",
"pound;": "\xa3",
"pr;": "\u227a",
"prE;": "\u2ab3",
"prap;": "\u2ab7",
"prcue;": "\u227c",
"pre;": "\u2aaf",
"prec;": "\u227a",
"precapprox;": "\u2ab7",
"preccurlyeq;": "\u227c",
"preceq;": "\u2aaf",
"precnapprox;": "\u2ab9",
"precneqq;": "\u2ab5",
"precnsim;": "\u22e8",
"precsim;": "\u227e",
"prime;": "\u2032",
"primes;": "\u2119",
"prnE;": "\u2ab5",
"prnap;": "\u2ab9",
"prnsim;": "\u22e8",
"prod;": "\u220f",
"profalar;": "\u232e",
"profline;": "\u2312",
"profsurf;": "\u2313",
"prop;": "\u221d",
"propto;": "\u221d",
"prsim;": "\u227e",
"prurel;": "\u22b0",
"pscr;": "\U0001d4c5",
"psi;": "\u03c8",
"puncsp;": "\u2008",
"qfr;": "\U0001d52e",
"qint;": "\u2a0c",
"qopf;": "\U0001d562",
"qprime;": "\u2057",
"qscr;": "\U0001d4c6",
"quaternions;": "\u210d",
"quatint;": "\u2a16",
"quest;": "?",
"questeq;": "\u225f",
"quot": "\"",
"quot;": "\"",
"rAarr;": "\u21db",
"rArr;": "\u21d2",
"rAtail;": "\u291c",
"rBarr;": "\u290f",
"rHar;": "\u2964",
"race;": "\u223d\u0331",
"racute;": "\u0155",
"radic;": "\u221a",
"raemptyv;": "\u29b3",
"rang;": "\u27e9",
"rangd;": "\u2992",
"range;": "\u29a5",
"rangle;": "\u27e9",
"raquo": "\xbb",
"raquo;": "\xbb",
"rarr;": "\u2192",
"rarrap;": "\u2975",
"rarrb;": "\u21e5",
"rarrbfs;": "\u2920",
"rarrc;": "\u2933",
"rarrfs;": "\u291e",
"rarrhk;": "\u21aa",
"rarrlp;": "\u21ac",
"rarrpl;": "\u2945",
"rarrsim;": "\u2974",
"rarrtl;": "\u21a3",
"rarrw;": "\u219d",
"ratail;": "\u291a",
"ratio;": "\u2236",
"rationals;": "\u211a",
"rbarr;": "\u290d",
"rbbrk;": "\u2773",
"rbrace;": "}",
"rbrack;": "]",
"rbrke;": "\u298c",
"rbrksld;": "\u298e",
"rbrkslu;": "\u2990",
"rcaron;": "\u0159",
"rcedil;": "\u0157",
"rceil;": "\u2309",
"rcub;": "}",
"rcy;": "\u0440",
"rdca;": "\u2937",
"rdldhar;": "\u2969",
"rdquo;": "\u201d",
"rdquor;": "\u201d",
"rdsh;": "\u21b3",
"real;": "\u211c",
"realine;": "\u211b",
"realpart;": "\u211c",
"reals;": "\u211d",
"rect;": "\u25ad",
"reg": "\xae",
"reg;": "\xae",
"rfisht;": "\u297d",
"rfloor;": "\u230b",
"rfr;": "\U0001d52f",
"rhard;": "\u21c1",
"rharu;": "\u21c0",
"rharul;": "\u296c",
"rho;": "\u03c1",
"rhov;": "\u03f1",
"rightarrow;": "\u2192",
"rightarrowtail;": "\u21a3",
"rightharpoondown;": "\u21c1",
"rightharpoonup;": "\u21c0",
"rightleftarrows;": "\u21c4",
"rightleftharpoons;": "\u21cc",
"rightrightarrows;": "\u21c9",
"rightsquigarrow;": "\u219d",
"rightthreetimes;": "\u22cc",
"ring;": "\u02da",
"risingdotseq;": "\u2253",
"rlarr;": "\u21c4",
"rlhar;": "\u21cc",
"rlm;": "\u200f",
"rmoust;": "\u23b1",
"rmoustache;": "\u23b1",
"rnmid;": "\u2aee",
"roang;": "\u27ed",
"roarr;": "\u21fe",
"robrk;": "\u27e7",
"ropar;": "\u2986",
"ropf;": "\U0001d563",
"roplus;": "\u2a2e",
"rotimes;": "\u2a35",
"rpar;": ")",
"rpargt;": "\u2994",
"rppolint;": "\u2a12",
"rrarr;": "\u21c9",
"rsaquo;": "\u203a",
"rscr;": "\U0001d4c7",
"rsh;": "\u21b1",
"rsqb;": "]",
"rsquo;": "\u2019",
"rsquor;": "\u2019",
"rthree;": "\u22cc",
"rtimes;": "\u22ca",
"rtri;": "\u25b9",
"rtrie;": "\u22b5",
"rtrif;": "\u25b8",
"rtriltri;": "\u29ce",
"ruluhar;": "\u2968",
"rx;": "\u211e",
"sacute;": "\u015b",
"sbquo;": "\u201a",
"sc;": "\u227b",
"scE;": "\u2ab4",
"scap;": "\u2ab8",
"scaron;": "\u0161",
"sccue;": "\u227d",
"sce;": "\u2ab0",
"scedil;": "\u015f",
"scirc;": "\u015d",
"scnE;": "\u2ab6",
"scnap;": "\u2aba",
"scnsim;": "\u22e9",
"scpolint;": "\u2a13",
"scsim;": "\u227f",
"scy;": "\u0441",
"sdot;": "\u22c5",
"sdotb;": "\u22a1",
"sdote;": "\u2a66",
"seArr;": "\u21d8",
"searhk;": "\u2925",
"searr;": "\u2198",
"searrow;": "\u2198",
"sect": "\xa7",
"sect;": "\xa7",
"semi;": ";",
"seswar;": "\u2929",
"setminus;": "\u2216",
"setmn;": "\u2216",
"sext;": "\u2736",
"sfr;": "\U0001d530",
"sfrown;": "\u2322",
"sharp;": "\u266f",
"shchcy;": "\u0449",
"shcy;": "\u0448",
"shortmid;": "\u2223",
"shortparallel;": "\u2225",
"shy": "\xad",
"shy;": "\xad",
"sigma;": "\u03c3",
"sigmaf;": "\u03c2",
"sigmav;": "\u03c2",
"sim;": "\u223c",
"simdot;": "\u2a6a",
"sime;": "\u2243",
"simeq;": "\u2243",
"simg;": "\u2a9e",
"simgE;": "\u2aa0",
"siml;": "\u2a9d",
"simlE;": "\u2a9f",
"simne;": "\u2246",
"simplus;": "\u2a24",
"simrarr;": "\u2972",
"slarr;": "\u2190",
"smallsetminus;": "\u2216",
"smashp;": "\u2a33",
"smeparsl;": "\u29e4",
"smid;": "\u2223",
"smile;": "\u2323",
"smt;": "\u2aaa",
"smte;": "\u2aac",
"smtes;": "\u2aac\ufe00",
"softcy;": "\u044c",
"sol;": "/",
"solb;": "\u29c4",
"solbar;": "\u233f",
"sopf;": "\U0001d564",
"spades;": "\u2660",
"spadesuit;": "\u2660",
"spar;": "\u2225",
"sqcap;": "\u2293",
"sqcaps;": "\u2293\ufe00",
"sqcup;": "\u2294",
"sqcups;": "\u2294\ufe00",
"sqsub;": "\u228f",
"sqsube;": "\u2291",
"sqsubset;": "\u228f",
"sqsubseteq;": "\u2291",
"sqsup;": "\u2290",
"sqsupe;": "\u2292",
"sqsupset;": "\u2290",
"sqsupseteq;": "\u2292",
"squ;": "\u25a1",
"square;": "\u25a1",
"squarf;": "\u25aa",
"squf;": "\u25aa",
"srarr;": "\u2192",
"sscr;": "\U0001d4c8",
"ssetmn;": "\u2216",
"ssmile;": "\u2323",
"sstarf;": "\u22c6",
"star;": "\u2606",
"starf;": "\u2605",
"straightepsilon;": "\u03f5",
"straightphi;": "\u03d5",
"strns;": "\xaf",
"sub;": "\u2282",
"subE;": "\u2ac5",
"subdot;": "\u2abd",
"sube;": "\u2286",
"subedot;": "\u2ac3",
"submult;": "\u2ac1",
"subnE;": "\u2acb",
"subne;": "\u228a",
"subplus;": "\u2abf",
"subrarr;": "\u2979",
"subset;": "\u2282",
"subseteq;": "\u2286",
"subseteqq;": "\u2ac5",
"subsetneq;": "\u228a",
"subsetneqq;": "\u2acb",
"subsim;": "\u2ac7",
"subsub;": "\u2ad5",
"subsup;": "\u2ad3",
"succ;": "\u227b",
"succapprox;": "\u2ab8",
"succcurlyeq;": "\u227d",
"succeq;": "\u2ab0",
"succnapprox;": "\u2aba",
"succneqq;": "\u2ab6",
"succnsim;": "\u22e9",
"succsim;": "\u227f",
"sum;": "\u2211",
"sung;": "\u266a",
"sup1": "\xb9",
"sup1;": "\xb9",
"sup2": "\xb2",
"sup2;": "\xb2",
"sup3": "\xb3",
"sup3;": "\xb3",
"sup;": "\u2283",
"supE;": "\u2ac6",
"supdot;": "\u2abe",
"supdsub;": "\u2ad8",
"supe;": "\u2287",
"supedot;": "\u2ac4",
"suphsol;": "\u27c9",
"suphsub;": "\u2ad7",
"suplarr;": "\u297b",
"supmult;": "\u2ac2",
"supnE;": "\u2acc",
"supne;": "\u228b",
"supplus;": "\u2ac0",
"supset;": "\u2283",
"supseteq;": "\u2287",
"supseteqq;": "\u2ac6",
"supsetneq;": "\u228b",
"supsetneqq;": "\u2acc",
"supsim;": "\u2ac8",
"supsub;": "\u2ad4",
"supsup;": "\u2ad6",
"swArr;": "\u21d9",
"swarhk;": "\u2926",
"swarr;": "\u2199",
"swarrow;": "\u2199",
"swnwar;": "\u292a",
"szlig": "\xdf",
"szlig;": "\xdf",
"target;": "\u2316",
"tau;": "\u03c4",
"tbrk;": "\u23b4",
"tcaron;": "\u0165",
"tcedil;": "\u0163",
"tcy;": "\u0442",
"tdot;": "\u20db",
"telrec;": "\u2315",
"tfr;": "\U0001d531",
"there4;": "\u2234",
"therefore;": "\u2234",
"theta;": "\u03b8",
"thetasym;": "\u03d1",
"thetav;": "\u03d1",
"thickapprox;": "\u2248",
"thicksim;": "\u223c",
"thinsp;": "\u2009",
"thkap;": "\u2248",
"thksim;": "\u223c",
"thorn": "\xfe",
"thorn;": "\xfe",
"tilde;": "\u02dc",
"times": "\xd7",
"times;": "\xd7",
"timesb;": "\u22a0",
"timesbar;": "\u2a31",
"timesd;": "\u2a30",
"tint;": "\u222d",
"toea;": "\u2928",
"top;": "\u22a4",
"topbot;": "\u2336",
"topcir;": "\u2af1",
"topf;": "\U0001d565",
"topfork;": "\u2ada",
"tosa;": "\u2929",
"tprime;": "\u2034",
"trade;": "\u2122",
"triangle;": "\u25b5",
"triangledown;": "\u25bf",
"triangleleft;": "\u25c3",
"trianglelefteq;": "\u22b4",
"triangleq;": "\u225c",
"triangleright;": "\u25b9",
"trianglerighteq;": "\u22b5",
"tridot;": "\u25ec",
"trie;": "\u225c",
"triminus;": "\u2a3a",
"triplus;": "\u2a39",
"trisb;": "\u29cd",
"tritime;": "\u2a3b",
"trpezium;": "\u23e2",
"tscr;": "\U0001d4c9",
"tscy;": "\u0446",
"tshcy;": "\u045b",
"tstrok;": "\u0167",
"twixt;": "\u226c",
"twoheadleftarrow;": "\u219e",
"twoheadrightarrow;": "\u21a0",
"uArr;": "\u21d1",
"uHar;": "\u2963",
"uacute": "\xfa",
"uacute;": "\xfa",
"uarr;": "\u2191",
"ubrcy;": "\u045e",
"ubreve;": "\u016d",
"ucirc": "\xfb",
"ucirc;": "\xfb",
"ucy;": "\u0443",
"udarr;": "\u21c5",
"udblac;": "\u0171",
"udhar;": "\u296e",
"ufisht;": "\u297e",
"ufr;": "\U0001d532",
"ugrave": "\xf9",
"ugrave;": "\xf9",
"uharl;": "\u21bf",
"uharr;": "\u21be",
"uhblk;": "\u2580",
"ulcorn;": "\u231c",
"ulcorner;": "\u231c",
"ulcrop;": "\u230f",
"ultri;": "\u25f8",
"umacr;": "\u016b",
"uml": "\xa8",
"uml;": "\xa8",
"uogon;": "\u0173",
"uopf;": "\U0001d566",
"uparrow;": "\u2191",
"updownarrow;": "\u2195",
"upharpoonleft;": "\u21bf",
"upharpoonright;": "\u21be",
"uplus;": "\u228e",
"upsi;": "\u03c5",
"upsih;": "\u03d2",
"upsilon;": "\u03c5",
"upuparrows;": "\u21c8",
"urcorn;": "\u231d",
"urcorner;": "\u231d",
"urcrop;": "\u230e",
"uring;": "\u016f",
"urtri;": "\u25f9",
"uscr;": "\U0001d4ca",
"utdot;": "\u22f0",
"utilde;": "\u0169",
"utri;": "\u25b5",
"utrif;": "\u25b4",
"uuarr;": "\u21c8",
"uuml": "\xfc",
"uuml;": "\xfc",
"uwangle;": "\u29a7",
"vArr;": "\u21d5",
"vBar;": "\u2ae8",
"vBarv;": "\u2ae9",
"vDash;": "\u22a8",
"vangrt;": "\u299c",
"varepsilon;": "\u03f5",
"varkappa;": "\u03f0",
"varnothing;": "\u2205",
"varphi;": "\u03d5",
"varpi;": "\u03d6",
"varpropto;": "\u221d",
"varr;": "\u2195",
"varrho;": "\u03f1",
"varsigma;": "\u03c2",
"varsubsetneq;": "\u228a\ufe00",
"varsubsetneqq;": "\u2acb\ufe00",
"varsupsetneq;": "\u228b\ufe00",
"varsupsetneqq;": "\u2acc\ufe00",
"vartheta;": "\u03d1",
"vartriangleleft;": "\u22b2",
"vartriangleright;": "\u22b3",
"vcy;": "\u0432",
"vdash;": "\u22a2",
"vee;": "\u2228",
"veebar;": "\u22bb",
"veeeq;": "\u225a",
"vellip;": "\u22ee",
"verbar;": "|",
"vert;": "|",
"vfr;": "\U0001d533",
"vltri;": "\u22b2",
"vnsub;": "\u2282\u20d2",
"vnsup;": "\u2283\u20d2",
"vopf;": "\U0001d567",
"vprop;": "\u221d",
"vrtri;": "\u22b3",
"vscr;": "\U0001d4cb",
"vsubnE;": "\u2acb\ufe00",
"vsubne;": "\u228a\ufe00",
"vsupnE;": "\u2acc\ufe00",
"vsupne;": "\u228b\ufe00",
"vzigzag;": "\u299a",
"wcirc;": "\u0175",
"wedbar;": "\u2a5f",
"wedge;": "\u2227",
"wedgeq;": "\u2259",
"weierp;": "\u2118",
"wfr;": "\U0001d534",
"wopf;": "\U0001d568",
"wp;": "\u2118",
"wr;": "\u2240",
"wreath;": "\u2240",
"wscr;": "\U0001d4cc",
"xcap;": "\u22c2",
"xcirc;": "\u25ef",
"xcup;": "\u22c3",
"xdtri;": "\u25bd",
"xfr;": "\U0001d535",
"xhArr;": "\u27fa",
"xharr;": "\u27f7",
"xi;": "\u03be",
"xlArr;": "\u27f8",
"xlarr;": "\u27f5",
"xmap;": "\u27fc",
"xnis;": "\u22fb",
"xodot;": "\u2a00",
"xopf;": "\U0001d569",
"xoplus;": "\u2a01",
"xotime;": "\u2a02",
"xrArr;": "\u27f9",
"xrarr;": "\u27f6",
"xscr;": "\U0001d4cd",
"xsqcup;": "\u2a06",
"xuplus;": "\u2a04",
"xutri;": "\u25b3",
"xvee;": "\u22c1",
"xwedge;": "\u22c0",
"yacute": "\xfd",
"yacute;": "\xfd",
"yacy;": "\u044f",
"ycirc;": "\u0177",
"ycy;": "\u044b",
"yen": "\xa5",
"yen;": "\xa5",
"yfr;": "\U0001d536",
"yicy;": "\u0457",
"yopf;": "\U0001d56a",
"yscr;": "\U0001d4ce",
"yucy;": "\u044e",
"yuml": "\xff",
"yuml;": "\xff",
"zacute;": "\u017a",
"zcaron;": "\u017e",
"zcy;": "\u0437",
"zdot;": "\u017c",
"zeetrf;": "\u2128",
"zeta;": "\u03b6",
"zfr;": "\U0001d537",
"zhcy;": "\u0436",
"zigrarr;": "\u21dd",
"zopf;": "\U0001d56b",
"zscr;": "\U0001d4cf",
"zwj;": "\u200d",
"zwnj;": "\u200c",
}
replacementCharacters = {
0x0: "\uFFFD",
0x0d: "\u000D",
0x80: "\u20AC",
0x81: "\u0081",
0x81: "\u0081",
0x82: "\u201A",
0x83: "\u0192",
0x84: "\u201E",
0x85: "\u2026",
0x86: "\u2020",
0x87: "\u2021",
0x88: "\u02C6",
0x89: "\u2030",
0x8A: "\u0160",
0x8B: "\u2039",
0x8C: "\u0152",
0x8D: "\u008D",
0x8E: "\u017D",
0x8F: "\u008F",
0x90: "\u0090",
0x91: "\u2018",
0x92: "\u2019",
0x93: "\u201C",
0x94: "\u201D",
0x95: "\u2022",
0x96: "\u2013",
0x97: "\u2014",
0x98: "\u02DC",
0x99: "\u2122",
0x9A: "\u0161",
0x9B: "\u203A",
0x9C: "\u0153",
0x9D: "\u009D",
0x9E: "\u017E",
0x9F: "\u0178",
}
encodings = {
'437': 'cp437',
'850': 'cp850',
'852': 'cp852',
'855': 'cp855',
'857': 'cp857',
'860': 'cp860',
'861': 'cp861',
'862': 'cp862',
'863': 'cp863',
'865': 'cp865',
'866': 'cp866',
'869': 'cp869',
'ansix341968': 'ascii',
'ansix341986': 'ascii',
'arabic': 'iso8859-6',
'ascii': 'ascii',
'asmo708': 'iso8859-6',
'big5': 'big5',
'big5hkscs': 'big5hkscs',
'chinese': 'gbk',
'cp037': 'cp037',
'cp1026': 'cp1026',
'cp154': 'ptcp154',
'cp367': 'ascii',
'cp424': 'cp424',
'cp437': 'cp437',
'cp500': 'cp500',
'cp775': 'cp775',
'cp819': 'windows-1252',
'cp850': 'cp850',
'cp852': 'cp852',
'cp855': 'cp855',
'cp857': 'cp857',
'cp860': 'cp860',
'cp861': 'cp861',
'cp862': 'cp862',
'cp863': 'cp863',
'cp864': 'cp864',
'cp865': 'cp865',
'cp866': 'cp866',
'cp869': 'cp869',
'cp936': 'gbk',
'cpgr': 'cp869',
'cpis': 'cp861',
'csascii': 'ascii',
'csbig5': 'big5',
'cseuckr': 'cp949',
'cseucpkdfmtjapanese': 'euc_jp',
'csgb2312': 'gbk',
'cshproman8': 'hp-roman8',
'csibm037': 'cp037',
'csibm1026': 'cp1026',
'csibm424': 'cp424',
'csibm500': 'cp500',
'csibm855': 'cp855',
'csibm857': 'cp857',
'csibm860': 'cp860',
'csibm861': 'cp861',
'csibm863': 'cp863',
'csibm864': 'cp864',
'csibm865': 'cp865',
'csibm866': 'cp866',
'csibm869': 'cp869',
'csiso2022jp': 'iso2022_jp',
'csiso2022jp2': 'iso2022_jp_2',
'csiso2022kr': 'iso2022_kr',
'csiso58gb231280': 'gbk',
'csisolatin1': 'windows-1252',
'csisolatin2': 'iso8859-2',
'csisolatin3': 'iso8859-3',
'csisolatin4': 'iso8859-4',
'csisolatin5': 'windows-1254',
'csisolatin6': 'iso8859-10',
'csisolatinarabic': 'iso8859-6',
'csisolatincyrillic': 'iso8859-5',
'csisolatingreek': 'iso8859-7',
'csisolatinhebrew': 'iso8859-8',
'cskoi8r': 'koi8-r',
'csksc56011987': 'cp949',
'cspc775baltic': 'cp775',
'cspc850multilingual': 'cp850',
'cspc862latinhebrew': 'cp862',
'cspc8codepage437': 'cp437',
'cspcp852': 'cp852',
'csptcp154': 'ptcp154',
'csshiftjis': 'shift_jis',
'csunicode11utf7': 'utf-7',
'cyrillic': 'iso8859-5',
'cyrillicasian': 'ptcp154',
'ebcdiccpbe': 'cp500',
'ebcdiccpca': 'cp037',
'ebcdiccpch': 'cp500',
'ebcdiccphe': 'cp424',
'ebcdiccpnl': 'cp037',
'ebcdiccpus': 'cp037',
'ebcdiccpwt': 'cp037',
'ecma114': 'iso8859-6',
'ecma118': 'iso8859-7',
'elot928': 'iso8859-7',
'eucjp': 'euc_jp',
'euckr': 'cp949',
'extendedunixcodepackedformatforjapanese': 'euc_jp',
'gb18030': 'gb18030',
'gb2312': 'gbk',
'gb231280': 'gbk',
'gbk': 'gbk',
'greek': 'iso8859-7',
'greek8': 'iso8859-7',
'hebrew': 'iso8859-8',
'hproman8': 'hp-roman8',
'hzgb2312': 'hz',
'ibm037': 'cp037',
'ibm1026': 'cp1026',
'ibm367': 'ascii',
'ibm424': 'cp424',
'ibm437': 'cp437',
'ibm500': 'cp500',
'ibm775': 'cp775',
'ibm819': 'windows-1252',
'ibm850': 'cp850',
'ibm852': 'cp852',
'ibm855': 'cp855',
'ibm857': 'cp857',
'ibm860': 'cp860',
'ibm861': 'cp861',
'ibm862': 'cp862',
'ibm863': 'cp863',
'ibm864': 'cp864',
'ibm865': 'cp865',
'ibm866': 'cp866',
'ibm869': 'cp869',
'iso2022jp': 'iso2022_jp',
'iso2022jp2': 'iso2022_jp_2',
'iso2022kr': 'iso2022_kr',
'iso646irv1991': 'ascii',
'iso646us': 'ascii',
'iso88591': 'windows-1252',
'iso885910': 'iso8859-10',
'iso8859101992': 'iso8859-10',
'iso885911987': 'windows-1252',
'iso885913': 'iso8859-13',
'iso885914': 'iso8859-14',
'iso8859141998': 'iso8859-14',
'iso885915': 'iso8859-15',
'iso885916': 'iso8859-16',
'iso8859162001': 'iso8859-16',
'iso88592': 'iso8859-2',
'iso885921987': 'iso8859-2',
'iso88593': 'iso8859-3',
'iso885931988': 'iso8859-3',
'iso88594': 'iso8859-4',
'iso885941988': 'iso8859-4',
'iso88595': 'iso8859-5',
'iso885951988': 'iso8859-5',
'iso88596': 'iso8859-6',
'iso885961987': 'iso8859-6',
'iso88597': 'iso8859-7',
'iso885971987': 'iso8859-7',
'iso88598': 'iso8859-8',
'iso885981988': 'iso8859-8',
'iso88599': 'windows-1254',
'iso885991989': 'windows-1254',
'isoceltic': 'iso8859-14',
'isoir100': 'windows-1252',
'isoir101': 'iso8859-2',
'isoir109': 'iso8859-3',
'isoir110': 'iso8859-4',
'isoir126': 'iso8859-7',
'isoir127': 'iso8859-6',
'isoir138': 'iso8859-8',
'isoir144': 'iso8859-5',
'isoir148': 'windows-1254',
'isoir149': 'cp949',
'isoir157': 'iso8859-10',
'isoir199': 'iso8859-14',
'isoir226': 'iso8859-16',
'isoir58': 'gbk',
'isoir6': 'ascii',
'koi8r': 'koi8-r',
'koi8u': 'koi8-u',
'korean': 'cp949',
'ksc5601': 'cp949',
'ksc56011987': 'cp949',
'ksc56011989': 'cp949',
'l1': 'windows-1252',
'l10': 'iso8859-16',
'l2': 'iso8859-2',
'l3': 'iso8859-3',
'l4': 'iso8859-4',
'l5': 'windows-1254',
'l6': 'iso8859-10',
'l8': 'iso8859-14',
'latin1': 'windows-1252',
'latin10': 'iso8859-16',
'latin2': 'iso8859-2',
'latin3': 'iso8859-3',
'latin4': 'iso8859-4',
'latin5': 'windows-1254',
'latin6': 'iso8859-10',
'latin8': 'iso8859-14',
'latin9': 'iso8859-15',
'ms936': 'gbk',
'mskanji': 'shift_jis',
'pt154': 'ptcp154',
'ptcp154': 'ptcp154',
'r8': 'hp-roman8',
'roman8': 'hp-roman8',
'shiftjis': 'shift_jis',
'tis620': 'cp874',
'unicode11utf7': 'utf-7',
'us': 'ascii',
'usascii': 'ascii',
'utf16': 'utf-16',
'utf16be': 'utf-16-be',
'utf16le': 'utf-16-le',
'utf8': 'utf-8',
'windows1250': 'cp1250',
'windows1251': 'cp1251',
'windows1252': 'cp1252',
'windows1253': 'cp1253',
'windows1254': 'cp1254',
'windows1255': 'cp1255',
'windows1256': 'cp1256',
'windows1257': 'cp1257',
'windows1258': 'cp1258',
'windows936': 'gbk',
'x-x-big5': 'big5'}
tokenTypes = {
"Doctype": 0,
"Characters": 1,
"SpaceCharacters": 2,
"StartTag": 3,
"EndTag": 4,
"EmptyTag": 5,
"Comment": 6,
"ParseError": 7
}
tagTokenTypes = frozenset([tokenTypes["StartTag"], tokenTypes["EndTag"],
tokenTypes["EmptyTag"]])
prefixes = dict([(v, k) for k, v in namespaces.items()])
prefixes["http://www.w3.org/1998/Math/MathML"] = "math"
class DataLossWarning(UserWarning):
pass
class ReparseException(Exception):
pass
|
rossella/neutron | refs/heads/master | quantum/tests/unit/__init__.py | 1 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# See http://code.google.com/p/python-nose/issues/detail?id=373
# The code below enables nosetests to work with i18n _() blocks
import __builtin__
import os
import unittest
setattr(__builtin__, '_', lambda x: x)
from quantum.openstack.common import cfg
reldir = os.path.join(os.path.dirname(__file__), '..', '..', '..')
absdir = os.path.abspath(reldir)
cfg.CONF.state_path = absdir
# An empty lock path forces lockutils.synchronized to use a temporary
# location for lock files that will be cleaned up automatically.
cfg.CONF.lock_path = ''
class BaseTest(unittest.TestCase):
def setUp(self):
pass
def setUp():
pass
|
vyscond/cocos | refs/heads/master | test/test_rich_label.py | 6 | from __future__ import division, print_function, unicode_literals
# This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
testinfo = "s, t 5, s, t 10.1, s, q"
tags = "RichLabel"
import cocos
from cocos.director import director
from cocos.sprite import Sprite
from cocos.actions import *
from cocos.text import *
class TestLayer(cocos.layer.Layer):
def __init__(self):
super( TestLayer, self ).__init__()
x,y = director.get_window_size()
self.text = RichLabel(
"Hello {color (255, 0, 0, 255)}World!",
(x//2, y//2))
self.text.element.document.set_style(0, 5, dict(bold = True))
self.text.element.document.set_style(6, 11, dict(italic = True))
self.text.do( Rotate( 360, 10 ) )
self.text.do( ScaleTo( 10, 10 ) )
self.add( self.text )
def main():
director.init()
test_layer = TestLayer ()
main_scene = cocos.scene.Scene (test_layer)
director.run (main_scene)
if __name__ == '__main__':
main()
|
peterlauri/django | refs/heads/master | django/contrib/gis/utils/__init__.py | 92 | """
This module contains useful utilities for GeoDjango.
"""
from django.contrib.gis.gdal import HAS_GDAL
from django.contrib.gis.utils.wkt import precision_wkt # NOQA
if HAS_GDAL:
from django.contrib.gis.utils.ogrinfo import ogrinfo # NOQA
from django.contrib.gis.utils.ogrinspect import mapping, ogrinspect # NOQA
from django.contrib.gis.utils.srs import add_srs_entry # NOQA
from django.core.exceptions import ImproperlyConfigured
try:
# LayerMapping requires DJANGO_SETTINGS_MODULE to be set,
# so this needs to be in try/except.
from django.contrib.gis.utils.layermapping import LayerMapping, LayerMapError # NOQA
except ImproperlyConfigured:
pass
|
lattwood/phantomjs | refs/heads/master | src/qt/qtwebkit/Tools/TestResultServer/handlers/__init__.py | 6014 | # Required for Python to search this directory for module files
|
TalShafir/ansible | refs/heads/devel | lib/ansible/modules/utilities/logic/async_status.py | 19 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>, and others
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: async_status
short_description: Obtain status of asynchronous task
description:
- This module gets the status of an asynchronous task.
- This module is also supported for Windows targets.
version_added: "0.5"
options:
jid:
description:
- Job or task identifier
required: true
mode:
description:
- if C(status), obtain the status; if C(cleanup), clean up the async job cache (by default in C(~/.ansible_async/)) for the specified job I(jid).
choices: [ "status", "cleanup" ]
default: "status"
notes:
- See also U(https://docs.ansible.com/playbooks_async.html)
- This module is also supported for Windows targets.
author:
- "Ansible Core Team"
- "Michael DeHaan"
'''
import json
import os
from ansible.module_utils._text import to_native
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
def main():
module = AnsibleModule(argument_spec=dict(
jid=dict(required=True),
mode=dict(default='status', choices=['status', 'cleanup']),
# passed in from the async_status action plugin
_async_dir=dict(required=True, type='path'),
))
mode = module.params['mode']
jid = module.params['jid']
async_dir = module.params['_async_dir']
# setup logging directory
logdir = os.path.expanduser(async_dir)
log_path = os.path.join(logdir, jid)
if not os.path.exists(log_path):
module.fail_json(msg="could not find job", ansible_job_id=jid, started=1, finished=1)
if mode == 'cleanup':
os.unlink(log_path)
module.exit_json(ansible_job_id=jid, erased=log_path)
# NOT in cleanup mode, assume regular status mode
# no remote kill mode currently exists, but probably should
# consider log_path + ".pid" file and also unlink that above
data = None
try:
data = open(log_path).read()
data = json.loads(data)
except Exception:
if not data:
# file not written yet? That means it is running
module.exit_json(results_file=log_path, ansible_job_id=jid, started=1, finished=0)
else:
module.fail_json(ansible_job_id=jid, results_file=log_path,
msg="Could not parse job output: %s" % data, started=1, finished=1)
if 'started' not in data:
data['finished'] = 1
data['ansible_job_id'] = jid
elif 'finished' not in data:
data['finished'] = 0
# Fix error: TypeError: exit_json() keywords must be strings
data = dict([(to_native(k), v) for k, v in iteritems(data)])
module.exit_json(**data)
if __name__ == '__main__':
main()
|
pysv/djep | refs/heads/develop | pyconde/search/search_indexes.py | 3 | import re
from django.test.client import RequestFactory
from django.template import RequestContext
from haystack import indexes
from cms import models as cmsmodels
rf = RequestFactory()
HTML_TAG_RE = re.compile(r'<[^>]+>')
def cleanup_content(s):
"""
Removes HTML tags from data and replaces them with spaces.
"""
return HTML_TAG_RE.subn('', s)[0]
class PageIndex(indexes.SearchIndex, indexes.Indexable):
"""
Since for now we only offer this site in one language, we can get around
by not doing any language model hacks.
"""
text = indexes.CharField(document=True)
title = indexes.CharField()
url = indexes.CharField()
def get_model(self):
return cmsmodels.Page
def index_queryset(self, using=None):
return self.get_model().objects.filter(published=True)
def prepare(self, obj):
self.prepared_data = super(PageIndex, self).prepare(obj)
request = rf.get('/')
request.session = {}
text = u""
extra_data = []
# Let's extract the title
context = RequestContext(request)
for title in obj.title_set.all():
if title.page_title:
self.prepared_data['title'] = title.page_title
extra_data.append(title.title)
else:
self.prepared_data['title'] = title.title
if title.meta_keywords:
extra_data.append(title.meta_keywords)
if title.meta_description:
extra_data.append(title.meta_description)
if title.menu_title:
extra_data.append(title.menu_title)
for placeholder in obj.placeholders.all():
text += placeholder.render(context, None)
self.prepared_data['text'] = cleanup_content(u' '.join([
self.prepared_data['title'],
text,
] + extra_data))
self.prepared_data['url'] = obj.get_absolute_url()
return self.prepared_data
|
john-soklaski/CoilSnake | refs/heads/master | coilsnake/ui/gui.py | 1 | #! /usr/bin/env python
import tkinter
from functools import partial
import logging
from subprocess import Popen
from threading import Thread
import tkinter.filedialog
import tkinter.messagebox
import tkinter.simpledialog
from traceback import format_exc
import tkinter.ttk
import webbrowser
from tkinter import *
from tkinter.ttk import *
import os
from PIL import ImageTk
from coilsnake.model.common.blocks import Rom
from coilsnake.ui import information, gui_util
from coilsnake.ui.common import decompile_rom, compile_project, upgrade_project, setup_logging, decompile_script, \
patch_rom
from coilsnake.ui.gui_preferences import CoilSnakePreferences
from coilsnake.ui.gui_util import browse_for_patch, browse_for_rom, browse_for_project, open_folder, set_entry_text, \
find_system_java_exe
from coilsnake.ui.information import coilsnake_about
from coilsnake.ui.widgets import ThreadSafeConsole, CoilSnakeGuiProgressBar
from coilsnake.util.common.project import PROJECT_FILENAME
from coilsnake.util.common.assets import asset_path
log = logging.getLogger(__name__)
BUTTON_WIDTH = 15
LABEL_WIDTH = 20
class CoilSnakeGui(object):
def __init__(self):
self.preferences = CoilSnakePreferences()
self.preferences.load()
self.components = []
self.progress_bar = None
# Preferences functions
def refresh_debug_logging(self):
if self.preferences["debug mode"]:
logging.root.setLevel(logging.DEBUG)
else:
logging.root.setLevel(logging.INFO)
def refresh_debug_mode_command_label(self):
# The "Debug Mode" command is the 5th in the Preferences menu (starting counting at 0, including separators)
self.pref_menu.entryconfig(5, label=self.get_debug_mode_command_label())
def get_debug_mode_command_label(self):
return 'Disable Debug Mode' if self.preferences["debug mode"] else 'Enable Debug Mode'
def set_debug_mode(self):
if self.preferences["debug mode"]:
confirm = tkinter.messagebox.askquestion(
"Disable Debug Mode?",
"Would you like to disable Debug mode?",
icon="question"
)
if confirm == "yes":
self.preferences["debug mode"] = False
else:
confirm = tkinter.messagebox.askquestion(
"Enable Debug Mode?",
"Would you like to enable Debug mode? Debug mode will provide you with more detailed output while "
+ "CoilSnake is running.\n\n"
+ "This is generally only needed by advanced users.",
icon="question"
)
if confirm == "yes":
self.preferences["debug mode"] = True
self.preferences.save()
self.refresh_debug_logging()
self.refresh_debug_mode_command_label()
def set_emulator_exe(self):
tkinter.messagebox.showinfo(
"Select the Emulator Executable",
"Select an emulator executable for CoilSnake to use.\n\n"
"Hint: It is probably named either zsnesw.exe, snes9x.exe, or higan-accuracy.exe"
)
emulator_exe = tkinter.filedialog.askopenfilename(
parent=self.root,
initialdir=os.path.expanduser("~"),
title="Select an Emulator Executable")
if emulator_exe:
self.preferences["emulator"] = emulator_exe
self.preferences.save()
def set_ccscript_offset(self):
ccscript_offset_str = tkinter.simpledialog.askstring(
title="Input CCScript Offset",
prompt=("Specify the hexidecimal offset to which CCScript should compile text.\n"
+ "(The default value is F10000)\n\n"
+ "You should leave this setting alone unless if you really know what you are doing."),
initialvalue="{:x}".format(self.preferences.get_ccscript_offset()).upper())
if ccscript_offset_str:
try:
ccscript_offset = int(ccscript_offset_str, 16)
except:
tkinter.messagebox.showerror(parent=self.root,
title="Error",
message="{} is not a valid hexidecimal number.".format(ccscript_offset_str))
return
self.preferences.set_ccscript_offset(ccscript_offset)
self.preferences.save()
def get_java_exe(self):
return self.preferences["java"] or find_system_java_exe()
def set_java_exe(self):
system_java_exe = find_system_java_exe()
if system_java_exe:
confirm = tkinter.messagebox.askquestion(
"Configure Java",
"CoilSnake has detected Java at the following location:\n\n"
+ system_java_exe + "\n\n"
+ "To use this installation of Java, select \"Yes\".\n\n"
+ "To override and instead use a different version of Java, select \"No\".",
icon="question"
)
if confirm == "yes":
self.preferences["java"] = None
self.preferences.save()
return
tkinter.messagebox.showinfo(
"Select the Java Executable",
"Select a Java executable for CoilSnake to use.\n\n"
"On Windows, it might be called \"javaw.exe\" or \"java.exe\"."
)
java_exe = tkinter.filedialog.askopenfilename(
parent=self.root,
title="Select the Java Executable",
initialfile=(self.preferences["java"] or system_java_exe))
if java_exe:
self.preferences["java"] = java_exe
self.preferences.save()
def save_default_tab(self):
tab_number = self.notebook.index(self.notebook.select())
self.preferences.set_default_tab(tab_number)
self.preferences.save()
# GUI update functions
def disable_all_components(self):
for component in self.components:
component["state"] = DISABLED
def enable_all_components(self):
for component in self.components:
component["state"] = NORMAL
# GUI popup functions
def run_rom(self, entry):
rom_filename = entry.get()
if not self.preferences["emulator"]:
tkinter.messagebox.showerror(parent=self.root,
title="Error",
message="""CoilSnake could not find an emulator.
Please configure your emulator in the Settings menu.""")
elif rom_filename:
Popen([self.preferences["emulator"], rom_filename])
def open_ebprojedit(self, entry=None):
if entry:
project_path = entry.get()
else:
project_path = None
java_exe = self.get_java_exe()
if not java_exe:
tkinter.messagebox.showerror(parent=self.root,
title="Error",
message="""CoilSnake could not find Java.
Please configure Java in the Settings menu.""")
return
command = [java_exe, "-jar", asset_path(["bin", "EbProjEdit.jar"])]
if project_path:
command.append(os.path.join(project_path, PROJECT_FILENAME))
Popen(command)
# Actions
def do_decompile(self, rom_entry, project_entry):
rom = rom_entry.get()
project = project_entry.get()
if rom and project:
if os.path.isdir(project):
confirm = tkinter.messagebox.askquestion("Are You Sure?",
"Are you sure you would like to permanently overwrite the "
+ "contents of the selected output directory?",
icon='warning')
if confirm != "yes":
return
self.save_default_tab()
# Update the GUI
self.console.clear()
self.disable_all_components()
self.progress_bar.clear()
thread = Thread(target=self._do_decompile_help, args=(rom, project))
thread.start()
def _do_decompile_help(self, rom, project):
try:
decompile_rom(rom_filename=rom, project_path=project, progress_bar=self.progress_bar)
except Exception as inst:
log.debug(format_exc())
log.error(inst)
self.progress_bar.clear()
self.enable_all_components()
def do_compile(self, project_entry, base_rom_entry, rom_entry):
base_rom = base_rom_entry.get()
rom = rom_entry.get()
project = project_entry.get()
if base_rom and rom and project:
self.save_default_tab()
base_rom_rom = Rom()
base_rom_rom.from_file(base_rom)
if base_rom_rom.type == "Earthbound" and len(base_rom_rom) == 0x300000:
confirm = tkinter.messagebox.askquestion("Expand Your Base ROM?",
"You are attempting to compile using a base ROM which is "
"unexpanded. It is likely that this will not succeed, as CoilSnake "
"needs the extra space in an expanded ROM to store additional data."
"\n\n"
"Would you like to expand this base ROM before proceeding? This "
"will permanently overwrite your base ROM.",
icon='warning')
if confirm == "yes":
base_rom_rom.expand(0x400000)
base_rom_rom.to_file(base_rom)
del base_rom_rom
# Update the GUI
self.console.clear()
self.disable_all_components()
self.progress_bar.clear()
log.info("Starting compilation...")
thread = Thread(target=self._do_compile_help, args=(project, base_rom, rom))
thread.start()
def _do_compile_help(self, project, base_rom, rom):
try:
compile_project(project, base_rom, rom,
ccscript_offset=self.preferences.get_ccscript_offset(),
progress_bar=self.progress_bar)
except Exception as inst:
log.debug(format_exc())
log.error(inst)
self.progress_bar.clear()
self.enable_all_components()
def do_upgrade(self, rom_entry, project_entry):
rom = rom_entry.get()
project = project_entry.get()
if rom and project:
confirm = tkinter.messagebox.askquestion("Are You Sure?",
"Are you sure you would like to upgrade this project? This operation "
+ "cannot be undone.\n\n"
+ "It is recommended that you backup your project before proceeding.",
icon='warning')
if confirm != "yes":
return
self.save_default_tab()
# Update the GUI
self.console.clear()
self.disable_all_components()
self.progress_bar.clear()
thread = Thread(target=self._do_upgrade_help, args=(rom, project))
thread.start()
def _do_upgrade_help(self, rom, project):
try:
upgrade_project(project_path=project, base_rom_filename=rom, progress_bar=self.progress_bar)
except Exception as inst:
log.debug(format_exc())
log.error(inst)
self.progress_bar.clear()
self.enable_all_components()
def do_decompile_script(self, rom_entry, project_entry):
rom = rom_entry.get()
project = project_entry.get()
if rom and project:
confirm = tkinter.messagebox.askquestion("Are You Sure?",
"Are you sure you would like to decompile the script into this "
"project? This operation cannot be undone.\n\n"
+ "It is recommended that you backup your project before proceeding.",
icon='warning')
if confirm != "yes":
return
self.save_default_tab()
# Update the GUI
self.console.clear()
self.disable_all_components()
self.progress_bar.cycle_animation_start()
thread = Thread(target=self._do_decompile_script_help, args=(rom, project))
thread.start()
def _do_decompile_script_help(self, rom, project):
try:
decompile_script(rom_filename=rom, project_path=project, progress_bar=self.progress_bar)
except Exception as inst:
log.debug(format_exc())
log.error(inst)
self.progress_bar.cycle_animation_stop()
self.enable_all_components()
def do_patch_rom(self, clean_rom_entry, patched_rom_entry, patch_entry, headered_var):
clean_rom = clean_rom_entry.get()
patched_rom = patched_rom_entry.get()
patch = patch_entry.get()
headered = headered_var.get()
if clean_rom and patched_rom and patch:
self.save_default_tab()
# Update the GUI
self.console.clear()
self.disable_all_components()
self.progress_bar.cycle_animation_start()
thread = Thread(target=self._do_patch_rom_help, args=(clean_rom, patched_rom, patch, headered))
thread.start()
def _do_patch_rom_help(self, clean_rom, patched_rom, patch, headered):
try:
patch_rom(clean_rom, patched_rom, patch, headered, progress_bar=self.progress_bar)
except Exception as inst:
log.debug(format_exc())
log.error(inst)
self.progress_bar.cycle_animation_stop()
self.enable_all_components()
def main(self):
self.create_gui()
self.root.mainloop()
def create_gui(self):
self.root = Tk()
self.root.wm_title("CoilSnake " + information.VERSION)
self.icon = ImageTk.PhotoImage(file=asset_path(["images", "icon.png"]))
self.root.tk.call('wm', 'iconphoto', self.root._w, self.icon)
self.create_menubar()
self.notebook = tkinter.ttk.Notebook(self.root)
decompile_frame = self.create_decompile_frame(self.notebook)
self.notebook.add(decompile_frame, text="Decompile")
compile_frame = self.create_compile_frame(self.notebook)
self.notebook.add(compile_frame, text="Compile")
upgrade_frame = self.create_upgrade_frame(self.notebook)
self.notebook.add(upgrade_frame, text="Upgrade")
decompile_script_frame = self.create_decompile_script_frame(self.notebook)
self.notebook.add(decompile_script_frame, text="Decompile Script")
patcher_patch_frame = self.create_apply_patch_frame(self.notebook)
self.notebook.add(patcher_patch_frame, text="Apply Patch")
#patcher_create_frame = self.create_create_patch_frame(self.notebook)
#self.notebook.add(patcher_create_frame, text="Create Patch")
self.notebook.pack(fill=X)
self.notebook.select(self.preferences.get_default_tab())
self.progress_bar = CoilSnakeGuiProgressBar(self.root, orient=HORIZONTAL, mode='determinate')
self.progress_bar.pack(fill=X)
console_frame = Frame(self.root)
scrollbar = Scrollbar(console_frame)
scrollbar.pack(side=RIGHT, fill=Y)
self.console = ThreadSafeConsole(console_frame, width=80, height=8)
self.console.pack(fill=BOTH, expand=1)
scrollbar.config(command=self.console.yview)
self.console.config(yscrollcommand=scrollbar.set)
console_frame.pack(fill=BOTH, expand=1)
def selectall_text(event):
event.widget.tag_add("sel", "1.0", "end")
self.root.bind_class("Text", "<Control-a>", selectall_text)
def selectall_entry(event):
event.widget.selection_range(0, END)
self.root.bind_class("Entry", "<Control-a>", selectall_entry)
def tab_changed(event):
# Do this so some random element in the tab isn't selected upon tab change
self.notebook.focus()
## Recalculate the height of the notebook depending on the contents of the new tab
# Ensure the dimensions of the widgets are up to date
self.notebook.update_idletasks()
# Get the geometry of the window, so we can reset it later
window_geometry = self.root.winfo_geometry()
# Set the notebook height to the selected tab's requested height
tab_window_name = self.notebook.select()
tab = self.notebook.nametowidget(tab_window_name)
tab_height = tab.winfo_reqheight()
self.notebook.configure(height=tab_height)
# Keeps the window from changing size
self.root.geometry(window_geometry)
self.notebook.bind("<<NotebookTabChanged>>", tab_changed)
self.console_stream = self.console
setup_logging(quiet=False, verbose=False, stream=self.console_stream)
self.refresh_debug_logging()
def create_about_window(self):
self.about_menu = Toplevel(self.root, takefocus=True)
self.about_menu.tk.call('wm', 'iconphoto', self.about_menu._w, self.icon)
photo = ImageTk.PhotoImage(file=asset_path(["images", "logo.png"]))
about_label = Label(self.about_menu, image=photo)
about_label.photo = photo
about_label.pack(side=LEFT, expand=1)
about_right_frame = tkinter.ttk.Frame(self.about_menu)
Label(about_right_frame,
text=coilsnake_about(),
font=("Courier", 11),
anchor="w",
justify="left",
borderwidth=5,
relief=GROOVE).pack(fill=BOTH, expand=1, side=TOP)
about_right_frame.pack(side=LEFT, fill=BOTH, expand=1)
self.about_menu.resizable(False, False)
self.about_menu.title("About CoilSnake {}".format(information.VERSION))
self.about_menu.withdraw()
self.about_menu.transient(self.root)
self.about_menu.protocol('WM_DELETE_WINDOW', self.about_menu.withdraw)
def create_menubar(self):
menubar = Menu(self.root)
# Tools pulldown menu
tools_menu = Menu(menubar, tearoff=0)
tools_menu.add_command(label="EB Project Editor",
command=self.open_ebprojedit)
tools_menu.add_separator()
tools_menu.add_command(label="Expand ROM to 32 MBit",
command=partial(gui_util.expand_rom, self.root))
tools_menu.add_command(label="Expand ROM to 48 MBit",
command=partial(gui_util.expand_rom_ex, self.root))
tools_menu.add_separator()
tools_menu.add_command(label="Add Header to ROM",
command=partial(gui_util.add_header_to_rom, self.root))
tools_menu.add_command(label="Remove Header from ROM",
command=partial(gui_util.strip_header_from_rom, self.root))
menubar.add_cascade(label="Tools", menu=tools_menu)
# Preferences pulldown menu
self.pref_menu = Menu(menubar, tearoff=0)
self.pref_menu.add_command(label="Configure Emulator",
command=self.set_emulator_exe)
self.pref_menu.add_command(label="Configure Java",
command=self.set_java_exe)
self.pref_menu.add_separator()
self.pref_menu.add_command(label="Configure CCScript",
command=self.set_ccscript_offset)
self.pref_menu.add_separator()
self.pref_menu.add_command(label=self.get_debug_mode_command_label(),
command=self.set_debug_mode)
menubar.add_cascade(label="Settings", menu=self.pref_menu)
# Help menu
help_menu = Menu(menubar, tearoff=0)
self.create_about_window()
def show_about_window():
self.about_menu.deiconify()
self.about_menu.lift()
def open_coilsnake_website():
webbrowser.open(information.WEBSITE, 2)
help_menu.add_command(label="About CoilSnake", command=show_about_window)
help_menu.add_command(label="CoilSnake Website", command=open_coilsnake_website)
menubar.add_cascade(label="Help", menu=help_menu)
self.root.config(menu=menubar)
def create_decompile_frame(self, notebook):
self.decompile_fields = dict()
decompile_frame = tkinter.ttk.Frame(notebook)
self.add_title_label_to_frame(text="Decompile a ROM to create a new project.", frame=decompile_frame)
profile_selector_init = self.add_profile_selector_to_frame(frame=decompile_frame,
tab="decompile",
fields=self.decompile_fields)
input_rom_entry = self.add_rom_fields_to_frame(name="ROM", frame=decompile_frame)
self.decompile_fields["rom"] = input_rom_entry
project_entry = self.add_project_fields_to_frame(name="Output Directory", frame=decompile_frame)
self.decompile_fields["output_directory"] = project_entry
profile_selector_init()
def decompile_tmp():
self.do_decompile(input_rom_entry, project_entry)
decompile_button = Button(decompile_frame, text="Decompile", command=decompile_tmp)
decompile_button.pack(fill=X, expand=1)
self.components.append(decompile_button)
return decompile_frame
def create_compile_frame(self, notebook):
self.compile_fields = dict()
compile_frame = tkinter.ttk.Frame(notebook)
self.add_title_label_to_frame(text="Compile a project to create a new ROM.", frame=compile_frame)
profile_selector_init = self.add_profile_selector_to_frame(frame=compile_frame,
tab="compile",
fields=self.compile_fields)
base_rom_entry = self.add_rom_fields_to_frame(name="Base ROM", frame=compile_frame)
self.compile_fields["base_rom"] = base_rom_entry
project_entry = self.add_project_fields_to_frame(name="Project", frame=compile_frame)
self.compile_fields["project"] = project_entry
output_rom_entry = self.add_rom_fields_to_frame(name="Output ROM", frame=compile_frame, save=True)
self.compile_fields["output_rom"] = output_rom_entry
profile_selector_init()
def compile_tmp():
self.do_compile(project_entry, base_rom_entry, output_rom_entry)
compile_button = Button(compile_frame, text="Compile", command=compile_tmp)
compile_button.pack(fill=X, expand=1)
self.components.append(compile_button)
return compile_frame
def create_upgrade_frame(self, notebook):
upgrade_frame = tkinter.ttk.Frame(notebook)
self.add_title_label_to_frame(text="Upgrade a project created using an older version of CoilSnake.",
frame=upgrade_frame)
rom_entry = self.add_rom_fields_to_frame(name="Clean ROM", frame=upgrade_frame)
project_entry = self.add_project_fields_to_frame(name="Project", frame=upgrade_frame)
def upgrade_tmp():
self.preferences["default upgrade rom"] = rom_entry.get()
self.preferences.save()
self.do_upgrade(rom_entry, project_entry)
self.upgrade_button = Button(upgrade_frame, text="Upgrade", command=upgrade_tmp)
self.upgrade_button.pack(fill=X, expand=1)
self.components.append(self.upgrade_button)
if self.preferences["default upgrade rom"]:
set_entry_text(entry=rom_entry,
text=self.preferences["default upgrade rom"])
return upgrade_frame
def create_decompile_script_frame(self, notebook):
decompile_script_frame = tkinter.ttk.Frame(notebook)
self.add_title_label_to_frame(text="Decompile a ROM's script to an already existing project.",
frame=decompile_script_frame)
input_rom_entry = self.add_rom_fields_to_frame(name="ROM", frame=decompile_script_frame)
project_entry = self.add_project_fields_to_frame(name="Project", frame=decompile_script_frame)
def decompile_script_tmp():
self.preferences["default decompile script rom"] = input_rom_entry.get()
self.preferences.save()
self.do_decompile_script(input_rom_entry, project_entry)
button = Button(decompile_script_frame, text="Decompile Script", command=decompile_script_tmp)
button.pack(fill=X, expand=1)
self.components.append(button)
if self.preferences["default decompile script rom"]:
set_entry_text(entry=input_rom_entry,
text=self.preferences["default decompile script rom"])
return decompile_script_frame
def create_apply_patch_frame(self, notebook):
patcher_patch_frame = tkinter.ttk.Frame(notebook)
self.add_title_label_to_frame("Apply an EBP or IPS patch to a ROM", patcher_patch_frame)
clean_rom_entry = self.add_rom_fields_to_frame(name="Clean ROM", frame=patcher_patch_frame, padding_buttons=0)
patched_rom_entry = self.add_rom_fields_to_frame(name="Patched ROM", frame=patcher_patch_frame, save=True,
padding_buttons=0)
patch_entry = self.add_patch_fields_to_frame(name="Patch", frame=patcher_patch_frame)
headered_var = self.add_headered_field_to_frame(name="ROM Header (IPS only)", frame=patcher_patch_frame)
def patch_rom_tmp():
self.preferences["default clean rom"] = clean_rom_entry.get()
self.preferences["default patched rom"] = patched_rom_entry.get()
self.preferences["default patch"] = patch_entry.get()
self.preferences.save()
self.do_patch_rom(clean_rom_entry, patched_rom_entry, patch_entry, headered_var)
button = Button(patcher_patch_frame, text="Patch ROM", command=patch_rom_tmp)
button.pack(fill=X, expand=1)
self.components.append(button)
if self.preferences["default clean rom"]:
set_entry_text(entry=clean_rom_entry,
text=self.preferences["default clean rom"])
if self.preferences["default patched rom"]:
set_entry_text(entry=patched_rom_entry,
text=self.preferences["default patched rom"])
if self.preferences["default patch"]:
set_entry_text(entry=patch_entry,
text=self.preferences["default patch"])
return patcher_patch_frame
def create_create_patch_frame(self, notebook):
patcher_create_frame = tkinter.ttk.Frame(notebook)
self.add_title_label_to_frame("Create EBP patch from a ROM", patcher_create_frame)
clean_rom_entry = self.add_rom_fields_to_frame(name="Clean ROM", frame=patcher_create_frame, padding_buttons=0)
modified_rom_entry = self.add_rom_fields_to_frame(name="Modified ROM", frame=patcher_create_frame,
padding_buttons=0)
patch_entry = self.add_patch_fields_to_frame(name="Patch", frame=patcher_create_frame, save=True)
def create_patch_tmp():
self.preferences["default clean rom"] = clean_rom_entry.get()
self.preferences["default modified rom"] = modified_rom_entry.get()
self.preferences.save()
self.do_create_patch(clean_rom_entry, modified_rom_entry, patch_entry)
button = Button(patcher_create_frame, text="Create Patch", command=create_patch_tmp)
button.pack(fill=X, expand=1)
self.components.append(button)
if self.preferences["default clean rom"]:
set_entry_text(entry=clean_rom_entry,
text=self.preferences["default clean rom"])
if self.preferences["default modified rom"]:
set_entry_text(entry=modified_rom_entry,
text=self.preferences["default patched rom"])
return patcher_create_frame
def add_title_label_to_frame(self, text, frame):
Label(frame, text=text, justify=CENTER).pack(fill=BOTH, expand=1)
def add_profile_selector_to_frame(self, frame, tab, fields):
profile_frame = tkinter.ttk.Frame(frame)
Label(profile_frame, text="Profile:", width=LABEL_WIDTH).pack(side=LEFT)
def tmp_select(profile_name):
for field_id in fields:
set_entry_text(entry=fields[field_id],
text=self.preferences.get_profile_value(tab, profile_name, field_id))
self.preferences.set_default_profile(tab, profile_name)
self.preferences.save()
profile_var = StringVar(profile_frame)
profile = OptionMenu(profile_frame, profile_var, "", command=tmp_select)
profile.pack(side=LEFT, fill=BOTH, expand=1, ipadx=1)
self.components.append(profile)
def tmp_reload_options(selected_profile_name=None):
profile["menu"].delete(0, END)
for profile_name in sorted(self.preferences.get_profiles(tab)):
if not selected_profile_name:
selected_profile_name = profile_name
profile["menu"].add_command(label=profile_name,
command=tkinter._setit(profile_var, profile_name, tmp_select))
profile_var.set(selected_profile_name)
tmp_select(selected_profile_name)
def tmp_new():
profile_name = tkinter.simpledialog.askstring("New Profile Name", "Specify the name of the new profile.")
if profile_name:
profile_name = profile_name.strip()
if self.preferences.has_profile(tab, profile_name):
tkinter.messagebox.showerror(parent=self.root,
title="Error",
message="A profile with that name already exists.")
return
self.preferences.add_profile(tab, profile_name)
tmp_reload_options(profile_name)
self.preferences.save()
def tmp_save():
profile_name = profile_var.get()
for field_id in fields:
self.preferences.set_profile_value(tab, profile_name, field_id, fields[field_id].get())
self.preferences.save()
def tmp_delete():
if self.preferences.count_profiles(tab) <= 1:
tkinter.messagebox.showerror(parent=self.root,
title="Error",
message="Cannot delete the only profile.")
else:
self.preferences.delete_profile(tab, profile_var.get())
tmp_reload_options()
self.preferences.save()
button = Button(profile_frame, text="Save", width=BUTTON_WIDTH, command=tmp_save)
button.pack(side=LEFT)
self.components.append(button)
button = Button(profile_frame, text="Delete", width=BUTTON_WIDTH, command=tmp_delete)
button.pack(side=LEFT)
self.components.append(button)
button = Button(profile_frame, text="New", width=BUTTON_WIDTH, command=tmp_new)
button.pack(side=LEFT)
self.components.append(button)
profile_frame.pack(fill=X, expand=1)
def tmp_reload_options_and_select_default():
tmp_reload_options(selected_profile_name=self.preferences.get_default_profile(tab))
return tmp_reload_options_and_select_default
def add_rom_fields_to_frame(self, name, frame, save=False, padding_buttons=1):
rom_frame = tkinter.ttk.Frame(frame)
Label(rom_frame, text="{}:".format(name), width=LABEL_WIDTH, justify=RIGHT).pack(side=LEFT)
rom_entry = Entry(rom_frame)
rom_entry.pack(side=LEFT, fill=BOTH, expand=1, padx=1)
self.components.append(rom_entry)
def browse_tmp():
browse_for_rom(self.root, rom_entry, save)
def run_tmp():
self.run_rom(rom_entry)
button = Button(rom_frame, text="Browse...", command=browse_tmp, width=BUTTON_WIDTH)
button.pack(side=LEFT)
self.components.append(button)
button = Button(rom_frame, text="Run", command=run_tmp, width=BUTTON_WIDTH)
button.pack(side=LEFT)
self.components.append(button)
for i in range(padding_buttons):
button = Button(rom_frame, text="", width=BUTTON_WIDTH, state=DISABLED, takefocus=False)
button.pack(side=LEFT)
button.lower()
rom_frame.pack(fill=X)
return rom_entry
def add_project_fields_to_frame(self, name, frame):
project_frame = tkinter.ttk.Frame(frame)
Label(project_frame, text="{}:".format(name), width=LABEL_WIDTH, justify=RIGHT).pack(side=LEFT)
project_entry = Entry(project_frame)
project_entry.pack(side=LEFT, fill=BOTH, expand=1, padx=1)
self.components.append(project_entry)
def browse_tmp():
browse_for_project(self.root, project_entry, save=True)
def open_tmp():
open_folder(project_entry)
def edit_tmp():
self.open_ebprojedit(project_entry)
button = Button(project_frame, text="Browse...", command=browse_tmp, width=BUTTON_WIDTH)
button.pack(side=LEFT)
self.components.append(button)
button = Button(project_frame, text="Open", command=open_tmp, width=BUTTON_WIDTH)
button.pack(side=LEFT)
self.components.append(button)
button = Button(project_frame, text="Edit", command=edit_tmp, width=BUTTON_WIDTH)
button.pack(side=LEFT)
self.components.append(button)
project_frame.pack(fill=X, expand=1)
return project_entry
def add_patch_fields_to_frame(self, name, frame, save=False):
patch_frame = tkinter.ttk.Frame(frame)
Label(
patch_frame, text="{}:".format(name), width=LABEL_WIDTH, justify=RIGHT
).pack(side=LEFT)
patch_entry = Entry(patch_frame)
patch_entry.pack(side=LEFT, fill=BOTH, expand=1, padx=1)
self.components.append(patch_entry)
def browse_tmp():
browse_for_patch(self.root, patch_entry, save)
button = Button(patch_frame, text="Browse...", command=browse_tmp, width=BUTTON_WIDTH)
button.pack(side=LEFT)
self.components.append(button)
button = Button(patch_frame, text="", width=BUTTON_WIDTH, state=DISABLED, takefocus=False)
button.pack(side=LEFT)
button.lower()
patch_frame.pack(fill=BOTH, expand=1)
return patch_entry
def add_headered_field_to_frame(self, name, frame):
patch_frame = tkinter.ttk.Frame(frame)
headered_var = BooleanVar()
headered_check = Checkbutton(patch_frame, text=name, variable=headered_var)
headered_check.pack(
side=LEFT, fill=BOTH, expand=1
)
self.components.append(headered_check)
patch_frame.pack(fill=BOTH, expand=1)
return headered_var
def main():
gui = CoilSnakeGui()
sys.exit(gui.main()) |
rs2/bokeh | refs/heads/master | bokeh/sampledata/stocks.py | 2 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide recorded stock data for the following stocks:
AAPL, FB, GOOG, IBM, MSFT
Each eries is available as an attribute on the module (e.g., ``stocks.AAPL``)
and the value is a dictionary with the structure:
.. code-block:: python
AAPL['date'] # list of date string
AAPL['open'] # list of float
AAPL['high'] # list of float
AAPL['low'] # list of float
AAPL['close'] # list of float
AAPL['volume'] # list of int
AAPL['adj_close'] # list of float
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
from bokeh.util.api import public, internal ; public, internal
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import csv
# External imports
# Bokeh imports
from ..util.sampledata import external_path, open_csv
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'AAPL',
'FB',
'GOOG',
'IBM',
'MSFT',
)
#-----------------------------------------------------------------------------
# Public API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Internal API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
def _read_data(name):
'''
'''
filename = external_path(name+'.csv')
data = {
'date' : [],
'open' : [],
'high' : [],
'low' : [],
'close' : [],
'volume' : [],
'adj_close': [],
}
with open_csv(filename) as f:
next(f)
reader = csv.reader(f, delimiter=str(','))
for row in reader:
date, open_price, high, low, close, volume, adj_close = row
data['date'].append(date)
data['open'].append(float(open_price))
data['high'].append(float(high))
data['low'].append(float(low))
data['close'].append(float(close))
data['volume'].append(int(volume))
data['adj_close'].append(float(adj_close))
return data
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
AAPL = _read_data('AAPL')
FB = _read_data('FB')
GOOG = _read_data('GOOG')
IBM = _read_data('IBM')
MSFT = _read_data('MSFT')
|
ychen820/microblog | refs/heads/master | y/google-cloud-sdk/platform/google_appengine/lib/django-1.5/django/conf/locale/de/formats.py | 107 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = 'j. F Y H:i:s'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y H:i:s'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
# '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
|
Acehaidrey/incubator-airflow | refs/heads/master | airflow/utils/net.py | 10 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import socket
from airflow.configuration import conf
def get_host_ip_address():
"""Fetch host ip address."""
return socket.gethostbyname(socket.getfqdn())
def get_hostname():
"""
Fetch the hostname using the callable from the config or using
`socket.getfqdn` as a fallback.
"""
return conf.getimport('core', 'hostname_callable', fallback='socket.getfqdn')()
|
pokemon4ik2008/py-airfoil | refs/heads/master | manage.py | 2 | import os
from ctypes import *
from async import Scheduler
map_expansion_const=50.0
(map_x_size, map_z_size)=(1.0,1.0)
worker=Scheduler(block=False)
global proxy
proxy=None
global server
server=None
global sound_effects
sound_effects=False
global opt
global scale
scale=3.0
global y_terrain_scale
y_terrain_scale=3.0
global vbo
vbo=False
from time import time
global now, delta, iteration
iteration=0
global lookup_colliders
global cterrain
global collider
if os.name == 'nt':
cterrain = cdll.LoadLibrary("bin\cterrain.dll")
collider = cdll.LoadLibrary("bin\collider.dll")
positions = cdll.LoadLibrary("bin\positions.dll")
else:
cterrain = cdll.LoadLibrary("bin/libcterrain.so")
collider = cdll.LoadLibrary("bin/libcollider.so")
positions = cdll.LoadLibrary("bin/libpositions.so")
class Quat(Structure):
_fields_ = [ ("w", c_float),
("x", c_float),
("y", c_float),
("z", c_float) ]
QuatPtr=POINTER(Quat)
cterrain.xSize.argtypes=[]
cterrain.xSize.restype=c_float
cterrain.zSize.argtypes=[]
cterrain.zSize.restype=c_float
positions.newObj.argtypes=[ ]
positions.newObj.restype=c_void_p
positions.updateCorrection.argtypes=[ c_void_p, c_void_p, c_float ]
positions.updateCorrection.restype=None
positions.getCorrection.argtypes=[ c_void_p, c_float ]
positions.getCorrection.restype=QuatPtr
positions.delObj.argtypes=[ c_void_p ]
positions.delObj.restype=None
cterrain.checkCollision.argtypes=[ c_void_p, c_void_p, POINTER(c_uint) ]
cterrain.checkCollision.restype=c_bool
collider.checkCollisionCol.argtypes=[ c_void_p, c_void_p,
c_void_p, POINTER(c_uint),
c_void_p, POINTER(c_uint)]
collider.checkCollisionCol.restype=c_bool
collider.checkCollisionPoint.argtypes=[ c_void_p,
c_double, c_double, c_double,
c_double, c_double, c_double,
c_void_p, POINTER(c_uint) ]
collider.checkCollisionPoint.restype=c_bool
collider.allocTransCols.argtypes=[ c_void_p ]
collider.allocTransCols.restype=c_void_p
collider.load.argtypes=[ c_char_p, c_float, c_uint ]
collider.load.restype=c_void_p
collider.updateColliders.argtypes=[ c_void_p,
c_uint,
c_double, c_double, c_double,
c_double, c_double, c_double, c_double ]
collider.updateColliders.restype=None
collider.allocColliders.argtypes=[ c_uint ]
collider.allocColliders.restype=c_void_p
collider.allocTransCols.argtypes=[ c_void_p ]
collider.allocTransCols.restype=c_void_p
collider.deleteColliders.argtypes=[ c_void_p ]
collider.deleteColliders.restype=None
collider.deleteTransCols.argtypes=[ c_void_p ]
collider.deleteTransCols.restype=None
collider.loadCollider.argtypes=[ c_void_p, c_uint, c_char_p, c_float ]
collider.loadCollider.restype=None
collider.identifyBigCollider.argtypes=[ c_void_p ]
collider.identifyBigCollider.restype=None
collider.getMeshPath.argtypes=[ c_void_p ]
collider.getMeshPath.restype=c_char_p
def updateTime():
global now, delta, iteration
n=time()
delta=n-now
now=n
iteration+=1
now=time()
delta=0.0
|
repotvsupertuga/tvsupertuga.repository | refs/heads/master | script.module.resolveurl/lib/resolveurl/plugins/movierulz.py | 2 | """
resolveurl site plugin
Copyright (C) 2018 gujal
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from __resolve_generic__ import ResolveGeneric
class MovieRulzResolver(ResolveGeneric):
name = "movierulz.pro"
domains = ["movierulz.pro"]
pattern = '(?://|\.)(movierulz\.pro)/(?:embed-)?([0-9a-zA-Z-]+)'
def get_url(self, host, media_id):
return self._default_get_url(host, media_id, template='https://{host}/embed-{media_id}.html')
|
arpitpanwar/zulip | refs/heads/master | zerver/test_hooks.py | 103 | # -*- coding: utf-8 -*-
from zerver.lib.test_helpers import AuthedTestCase
from zerver.lib.test_runner import slow
from zerver.models import Message
import ujson
class JiraHookTests(AuthedTestCase):
def send_jira_message(self, action):
email = "hamlet@zulip.com"
api_key = self.get_api_key(email)
url = "/api/v1/external/jira?api_key=%s" % (api_key,)
return self.send_json_payload(email,
url,
self.fixture_data('jira', action),
stream_name="jira",
content_type="application/json")
def test_unknown(self):
email = "hamlet@zulip.com"
api_key = self.get_api_key(email)
url = "/api/v1/external/jira?api_key=%s" % (api_key,)
result = self.client.post(url, self.fixture_data('jira', 'unknown'),
stream_name="jira",
content_type="application/json")
self.assert_json_error(result, 'Unknown JIRA event type')
def test_custom_stream(self):
email = "hamlet@zulip.com"
api_key = self.get_api_key(email)
action = 'created'
url = "/api/v1/external/jira?api_key=%s&stream=jira_custom" % (api_key,)
msg = self.send_json_payload(email, url,
self.fixture_data('jira', action),
stream_name="jira_custom",
content_type="application/json")
self.assertEqual(msg.subject, "BUG-15: New bug with hook")
self.assertEqual(msg.content, """Leo Franchi **created** [BUG-15](http://lfranchi.com:8080/browse/BUG-15) priority Major, assigned to **no one**:
> New bug with hook""")
def test_created(self):
msg = self.send_jira_message('created')
self.assertEqual(msg.subject, "BUG-15: New bug with hook")
self.assertEqual(msg.content, """Leo Franchi **created** [BUG-15](http://lfranchi.com:8080/browse/BUG-15) priority Major, assigned to **no one**:
> New bug with hook""")
def test_created_assignee(self):
msg = self.send_jira_message('created_assignee')
self.assertEqual(msg.subject, "TEST-4: Test Created Assignee")
self.assertEqual(msg.content, """Leonardo Franchi [Administrator] **created** [TEST-4](https://zulipp.atlassian.net/browse/TEST-4) priority Major, assigned to **Leonardo Franchi [Administrator]**:
> Test Created Assignee""")
def test_commented(self):
msg = self.send_jira_message('commented')
self.assertEqual(msg.subject, "BUG-15: New bug with hook")
self.assertEqual(msg.content, """Leo Franchi **updated** [BUG-15](http://lfranchi.com:8080/browse/BUG-15) (assigned to @**Othello, the Moor of Venice**):
Adding a comment. Oh, what a comment it is!
""")
def test_commented_markup(self):
msg = self.send_jira_message('commented_markup')
self.assertEqual(msg.subject, "TEST-7: Testing of rich text")
self.assertEqual(msg.content, """Leonardo Franchi [Administrator] **updated** [TEST-7](https://zulipp.atlassian.net/browse/TEST-7):\n\n\nThis is a comment that likes to **exercise** a lot of _different_ `conventions` that `jira uses`.\r\n\r\n~~~\n\r\nthis code is not highlighted, but monospaced\r\n\n~~~\r\n\r\n~~~\n\r\ndef python():\r\n print "likes to be formatted"\r\n\n~~~\r\n\r\n[http://www.google.com](http://www.google.com) is a bare link, and [Google](http://www.google.com) is given a title.\r\n\r\nThanks!\r\n\r\n~~~ quote\n\r\nSomeone said somewhere\r\n\n~~~\n""")
def test_deleted(self):
msg = self.send_jira_message('deleted')
self.assertEqual(msg.subject, "BUG-15: New bug with hook")
self.assertEqual(msg.content, "Leo Franchi **deleted** [BUG-15](http://lfranchi.com:8080/browse/BUG-15)!")
def test_reassigned(self):
msg = self.send_jira_message('reassigned')
self.assertEqual(msg.subject, "BUG-15: New bug with hook")
self.assertEqual(msg.content, """Leo Franchi **updated** [BUG-15](http://lfranchi.com:8080/browse/BUG-15) (assigned to @**Othello, the Moor of Venice**):
* Changed assignee from **None** to @**Othello, the Moor of Venice**
""")
def test_reopened(self):
msg = self.send_jira_message('reopened')
self.assertEqual(msg.subject, "BUG-7: More cowbell polease")
self.assertEqual(msg.content, """Leo Franchi **updated** [BUG-7](http://lfranchi.com:8080/browse/BUG-7) (assigned to @**Othello, the Moor of Venice**):
* Changed resolution from **Fixed** to **None**
* Changed status from **Resolved** to **Reopened**
Re-opened yeah!
""")
def test_resolved(self):
msg = self.send_jira_message('resolved')
self.assertEqual(msg.subject, "BUG-13: Refreshing the page loses the user's current posi...")
self.assertEqual(msg.content, """Leo Franchi **updated** [BUG-13](http://lfranchi.com:8080/browse/BUG-13) (assigned to @**Othello, the Moor of Venice**):
* Changed status from **Open** to **Resolved**
* Changed assignee from **None** to @**Othello, the Moor of Venice**
* Changed resolution from **None** to **Fixed**
Fixed it, finally!
""")
def test_workflow_postfuncion(self):
msg = self.send_jira_message('postfunction_hook')
self.assertEqual(msg.subject, "TEST-5: PostTest")
self.assertEqual(msg.content, """Leo Franchi [Administrator] **transitioned** [TEST-5](https://lfranchi-test.atlassian.net/browse/TEST-5) from Resolved to Reopened""")
def test_workflow_postfunction(self):
msg = self.send_jira_message('postfunction_hook')
self.assertEqual(msg.subject, "TEST-5: PostTest")
self.assertEqual(msg.content, """Leo Franchi [Administrator] **transitioned** [TEST-5](https://lfranchi-test.atlassian.net/browse/TEST-5) from Resolved to Reopened""")
def test_workflow_postfunction_started(self):
msg = self.send_jira_message('postfunction_started')
self.assertEqual(msg.subject, "TEST-7: Gluttony of Post Functions")
self.assertEqual(msg.content, """Leo Franchi [Administrator] **transitioned** [TEST-7](https://lfranchi-test.atlassian.net/browse/TEST-7) from Open to Underway""")
def test_workflow_postfunction_resolved(self):
msg = self.send_jira_message('postfunction_resolved')
self.assertEqual(msg.subject, "TEST-7: Gluttony of Post Functions")
self.assertEqual(msg.content, """Leo Franchi [Administrator] **transitioned** [TEST-7](https://lfranchi-test.atlassian.net/browse/TEST-7) from Open to Resolved""")
def test_mention(self):
msg = self.send_jira_message('watch_mention_updated')
self.assertEqual(msg.subject, "TEST-5: Lunch Decision Needed")
self.assertEqual(msg.content, """Leonardo Franchi [Administrator] **updated** [TEST-5](https://zulipp.atlassian.net/browse/TEST-5) (assigned to @**Othello, the Moor of Venice**):
Making a comment, @**Othello, the Moor of Venice** is watching this issue
""")
def test_priority_updated(self):
msg = self.send_jira_message('updated_priority')
self.assertEqual(msg.subject, "TEST-1: Fix That")
self.assertEqual(msg.content, """Leonardo Franchi [Administrator] **updated** [TEST-1](https://zulipp.atlassian.net/browse/TEST-1) (assigned to **leo@zulip.com**):
* Changed priority from **Critical** to **Major**
""")
class BeanstalkHookTests(AuthedTestCase):
def send_beanstalk_message(self, action):
email = "hamlet@zulip.com"
data = {'payload': self.fixture_data('beanstalk', action)}
return self.send_json_payload(email, "/api/v1/external/beanstalk",
data,
stream_name="commits",
**self.api_auth(email))
def test_git_single(self):
msg = self.send_beanstalk_message('git_singlecommit')
self.assertEqual(msg.subject, "work-test")
self.assertEqual(msg.content, """Leo Franchi [pushed](http://lfranchi-svn.beanstalkapp.com/work-test) to branch master
* [e50508d](http://lfranchi-svn.beanstalkapp.com/work-test/changesets/e50508df): add some stuff
""")
@slow(0.20, "lots of queries")
def test_git_multiple(self):
msg = self.send_beanstalk_message('git_multiple')
self.assertEqual(msg.subject, "work-test")
self.assertEqual(msg.content, """Leo Franchi [pushed](http://lfranchi-svn.beanstalkapp.com/work-test) to branch master
* [edf529c](http://lfranchi-svn.beanstalkapp.com/work-test/changesets/edf529c7): Added new file
* [c2a191b](http://lfranchi-svn.beanstalkapp.com/work-test/changesets/c2a191b9): Filled in new file with some stuff
* [2009815](http://lfranchi-svn.beanstalkapp.com/work-test/changesets/20098158): More work to fix some bugs
""")
def test_svn_addremove(self):
msg = self.send_beanstalk_message('svn_addremove')
self.assertEqual(msg.subject, "svn r3")
self.assertEqual(msg.content, """Leo Franchi pushed [revision 3](http://lfranchi-svn.beanstalkapp.com/work-test/changesets/3):
> Removed a file and added another one!""")
def test_svn_changefile(self):
msg = self.send_beanstalk_message('svn_changefile')
self.assertEqual(msg.subject, "svn r2")
self.assertEqual(msg.content, """Leo Franchi pushed [revision 2](http://lfranchi-svn.beanstalkapp.com/work-test/changesets/2):
> Added some code""")
class GithubV1HookTests(AuthedTestCase):
push_content = """zbenjamin [pushed](https://github.com/zbenjamin/zulip-test/compare/4f9adc4777d5...b95449196980) to branch master
* [48c329a](https://github.com/zbenjamin/zulip-test/commit/48c329a0b68a9a379ff195ee3f1c1f4ab0b2a89e): Add baz
* [06ebe5f](https://github.com/zbenjamin/zulip-test/commit/06ebe5f472a32f6f31fd2a665f0c7442b69cce72): Baz needs to be longer
* [b954491](https://github.com/zbenjamin/zulip-test/commit/b95449196980507f08209bdfdc4f1d611689b7a8): Final edit to baz, I swear
"""
def test_spam_branch_is_ignored(self):
email = "hamlet@zulip.com"
api_key = self.get_api_key(email)
stream = 'commits'
data = ujson.loads(self.fixture_data('github', 'v1_push'))
data.update({'email': email,
'api-key': api_key,
'branches': 'dev,staging',
'stream': stream,
'payload': ujson.dumps(data['payload'])})
url = '/api/v1/external/github'
# We subscribe to the stream in this test, even though
# it won't get written, to avoid failing for the wrong
# reason.
self.subscribe_to_stream(email, stream)
prior_count = Message.objects.count()
result = self.client.post(url, data)
self.assert_json_success(result)
after_count = Message.objects.count()
self.assertEqual(prior_count, after_count)
def basic_test(self, fixture_name, stream_name, expected_subject, expected_content, send_stream=False, branches=None):
email = "hamlet@zulip.com"
api_key = self.get_api_key(email)
data = ujson.loads(self.fixture_data('github', 'v1_' + fixture_name))
data.update({'email': email,
'api-key': api_key,
'payload': ujson.dumps(data['payload'])})
if send_stream:
data['stream'] = stream_name
if branches is not None:
data['branches'] = branches
msg = self.send_json_payload(email, "/api/v1/external/github",
data,
stream_name=stream_name)
self.assertEqual(msg.subject, expected_subject)
self.assertEqual(msg.content, expected_content)
def test_user_specified_branches(self):
self.basic_test('push', 'my_commits', 'zulip-test', self.push_content,
send_stream=True, branches="master,staging")
def test_user_specified_stream(self):
# Around May 2013 the github webhook started to specify the stream.
# Before then, the stream was hard coded to "commits".
self.basic_test('push', 'my_commits', 'zulip-test', self.push_content,
send_stream=True)
def test_legacy_hook(self):
self.basic_test('push', 'commits', 'zulip-test', self.push_content)
def test_issues_opened(self):
self.basic_test('issues_opened', 'issues',
"zulip-test: issue 5: The frobnicator doesn't work",
"zbenjamin opened [issue 5](https://github.com/zbenjamin/zulip-test/issues/5)\n\n~~~ quote\nI tried changing the widgets, but I got:\r\n\r\nPermission denied: widgets are immutable\n~~~")
def test_issue_comment(self):
self.basic_test('issue_comment', 'issues',
"zulip-test: issue 5: The frobnicator doesn't work",
"zbenjamin [commented](https://github.com/zbenjamin/zulip-test/issues/5#issuecomment-23374280) on [issue 5](https://github.com/zbenjamin/zulip-test/issues/5)\n\n~~~ quote\nWhoops, I did something wrong.\r\n\r\nI'm sorry.\n~~~")
def test_issues_closed(self):
self.basic_test('issues_closed', 'issues',
"zulip-test: issue 5: The frobnicator doesn't work",
"zbenjamin closed [issue 5](https://github.com/zbenjamin/zulip-test/issues/5)")
def test_pull_request_opened(self):
self.basic_test('pull_request_opened', 'commits',
"zulip-test: pull request 7: Counting is hard.",
"lfaraone opened [pull request 7](https://github.com/zbenjamin/zulip-test/pull/7)\n\n~~~ quote\nOmitted something I think?\n~~~")
def test_pull_request_closed(self):
self.basic_test('pull_request_closed', 'commits',
"zulip-test: pull request 7: Counting is hard.",
"zbenjamin closed [pull request 7](https://github.com/zbenjamin/zulip-test/pull/7)")
def test_pull_request_synchronize(self):
self.basic_test('pull_request_synchronize', 'commits',
"zulip-test: pull request 13: Even more cowbell.",
"zbenjamin synchronized [pull request 13](https://github.com/zbenjamin/zulip-test/pull/13)")
def test_pull_request_comment(self):
self.basic_test('pull_request_comment', 'commits',
"zulip-test: pull request 9: Less cowbell.",
"zbenjamin [commented](https://github.com/zbenjamin/zulip-test/pull/9#issuecomment-24771110) on [pull request 9](https://github.com/zbenjamin/zulip-test/pull/9)\n\n~~~ quote\nYeah, who really needs more cowbell than we already have?\n~~~")
def test_pull_request_comment_user_specified_stream(self):
self.basic_test('pull_request_comment', 'my_commits',
"zulip-test: pull request 9: Less cowbell.",
"zbenjamin [commented](https://github.com/zbenjamin/zulip-test/pull/9#issuecomment-24771110) on [pull request 9](https://github.com/zbenjamin/zulip-test/pull/9)\n\n~~~ quote\nYeah, who really needs more cowbell than we already have?\n~~~",
send_stream=True)
def test_commit_comment(self):
self.basic_test('commit_comment', 'commits',
"zulip-test: commit 7c994678d2f98797d299abed852d3ff9d0834533",
"zbenjamin [commented](https://github.com/zbenjamin/zulip-test/commit/7c994678d2f98797d299abed852d3ff9d0834533#commitcomment-4252302)\n\n~~~ quote\nAre we sure this is enough cowbell?\n~~~")
def test_commit_comment_line(self):
self.basic_test('commit_comment_line', 'commits',
"zulip-test: commit 7c994678d2f98797d299abed852d3ff9d0834533",
"zbenjamin [commented](https://github.com/zbenjamin/zulip-test/commit/7c994678d2f98797d299abed852d3ff9d0834533#commitcomment-4252307) on `cowbell`, line 13\n\n~~~ quote\nThis line adds /unlucky/ cowbell (because of its line number). We should remove it.\n~~~")
class GithubV2HookTests(AuthedTestCase):
push_content = """zbenjamin [pushed](https://github.com/zbenjamin/zulip-test/compare/4f9adc4777d5...b95449196980) to branch master
* [48c329a](https://github.com/zbenjamin/zulip-test/commit/48c329a0b68a9a379ff195ee3f1c1f4ab0b2a89e): Add baz
* [06ebe5f](https://github.com/zbenjamin/zulip-test/commit/06ebe5f472a32f6f31fd2a665f0c7442b69cce72): Baz needs to be longer
* [b954491](https://github.com/zbenjamin/zulip-test/commit/b95449196980507f08209bdfdc4f1d611689b7a8): Final edit to baz, I swear
"""
def test_spam_branch_is_ignored(self):
email = "hamlet@zulip.com"
api_key = self.get_api_key(email)
stream = 'commits'
data = ujson.loads(self.fixture_data('github', 'v2_push'))
data.update({'email': email,
'api-key': api_key,
'branches': 'dev,staging',
'stream': stream,
'payload': ujson.dumps(data['payload'])})
url = '/api/v1/external/github'
# We subscribe to the stream in this test, even though
# it won't get written, to avoid failing for the wrong
# reason.
self.subscribe_to_stream(email, stream)
prior_count = Message.objects.count()
result = self.client.post(url, data)
self.assert_json_success(result)
after_count = Message.objects.count()
self.assertEqual(prior_count, after_count)
def basic_test(self, fixture_name, stream_name, expected_subject, expected_content, send_stream=False, branches=None):
email = "hamlet@zulip.com"
api_key = self.get_api_key(email)
data = ujson.loads(self.fixture_data('github', 'v2_' + fixture_name))
data.update({'email': email,
'api-key': api_key,
'payload': ujson.dumps(data['payload'])})
if send_stream:
data['stream'] = stream_name
if branches is not None:
data['branches'] = branches
msg = self.send_json_payload(email, "/api/v1/external/github",
data,
stream_name=stream_name)
self.assertEqual(msg.subject, expected_subject)
self.assertEqual(msg.content, expected_content)
def test_user_specified_branches(self):
self.basic_test('push', 'my_commits', 'zulip-test', self.push_content,
send_stream=True, branches="master,staging")
def test_user_specified_stream(self):
# Around May 2013 the github webhook started to specify the stream.
# Before then, the stream was hard coded to "commits".
self.basic_test('push', 'my_commits', 'zulip-test', self.push_content,
send_stream=True)
def test_legacy_hook(self):
self.basic_test('push', 'commits', 'zulip-test', self.push_content)
def test_issues_opened(self):
self.basic_test('issues_opened', 'issues',
"zulip-test: issue 5: The frobnicator doesn't work",
"zbenjamin opened [issue 5](https://github.com/zbenjamin/zulip-test/issues/5)\n\n~~~ quote\nI tried changing the widgets, but I got:\r\n\r\nPermission denied: widgets are immutable\n~~~")
def test_issue_comment(self):
self.basic_test('issue_comment', 'issues',
"zulip-test: issue 5: The frobnicator doesn't work",
"zbenjamin [commented](https://github.com/zbenjamin/zulip-test/issues/5#issuecomment-23374280) on [issue 5](https://github.com/zbenjamin/zulip-test/issues/5)\n\n~~~ quote\nWhoops, I did something wrong.\r\n\r\nI'm sorry.\n~~~")
def test_issues_closed(self):
self.basic_test('issues_closed', 'issues',
"zulip-test: issue 5: The frobnicator doesn't work",
"zbenjamin closed [issue 5](https://github.com/zbenjamin/zulip-test/issues/5)")
def test_pull_request_opened(self):
self.basic_test('pull_request_opened', 'commits',
"zulip-test: pull request 7: Counting is hard.",
"lfaraone opened [pull request 7](https://github.com/zbenjamin/zulip-test/pull/7)\n\n~~~ quote\nOmitted something I think?\n~~~")
def test_pull_request_closed(self):
self.basic_test('pull_request_closed', 'commits',
"zulip-test: pull request 7: Counting is hard.",
"zbenjamin closed [pull request 7](https://github.com/zbenjamin/zulip-test/pull/7)")
def test_pull_request_synchronize(self):
self.basic_test('pull_request_synchronize', 'commits',
"zulip-test: pull request 13: Even more cowbell.",
"zbenjamin synchronized [pull request 13](https://github.com/zbenjamin/zulip-test/pull/13)")
def test_pull_request_comment(self):
self.basic_test('pull_request_comment', 'commits',
"zulip-test: pull request 9: Less cowbell.",
"zbenjamin [commented](https://github.com/zbenjamin/zulip-test/pull/9#issuecomment-24771110) on [pull request 9](https://github.com/zbenjamin/zulip-test/pull/9)\n\n~~~ quote\nYeah, who really needs more cowbell than we already have?\n~~~")
def test_pull_request_comment_user_specified_stream(self):
self.basic_test('pull_request_comment', 'my_commits',
"zulip-test: pull request 9: Less cowbell.",
"zbenjamin [commented](https://github.com/zbenjamin/zulip-test/pull/9#issuecomment-24771110) on [pull request 9](https://github.com/zbenjamin/zulip-test/pull/9)\n\n~~~ quote\nYeah, who really needs more cowbell than we already have?\n~~~",
send_stream=True)
def test_commit_comment(self):
self.basic_test('commit_comment', 'commits',
"zulip-test: commit 7c994678d2f98797d299abed852d3ff9d0834533",
"zbenjamin [commented](https://github.com/zbenjamin/zulip-test/commit/7c994678d2f98797d299abed852d3ff9d0834533#commitcomment-4252302)\n\n~~~ quote\nAre we sure this is enough cowbell?\n~~~")
def test_commit_comment_line(self):
self.basic_test('commit_comment_line', 'commits',
"zulip-test: commit 7c994678d2f98797d299abed852d3ff9d0834533",
"zbenjamin [commented](https://github.com/zbenjamin/zulip-test/commit/7c994678d2f98797d299abed852d3ff9d0834533#commitcomment-4252307) on `cowbell`, line 13\n\n~~~ quote\nThis line adds /unlucky/ cowbell (because of its line number). We should remove it.\n~~~")
class PivotalV3HookTests(AuthedTestCase):
def send_pivotal_message(self, name):
email = "hamlet@zulip.com"
api_key = self.get_api_key(email)
return self.send_json_payload(email, "/api/v1/external/pivotal?api_key=%s&stream=%s" % (api_key,"pivotal"),
self.fixture_data('pivotal', name, file_type='xml'),
stream_name="pivotal",
content_type="application/xml")
def test_accepted(self):
msg = self.send_pivotal_message('accepted')
self.assertEqual(msg.subject, 'My new Feature story')
self.assertEqual(msg.content, 'Leo Franchi accepted "My new Feature story" \
[(view)](https://www.pivotaltracker.com/s/projects/807213/stories/48276573)')
def test_commented(self):
msg = self.send_pivotal_message('commented')
self.assertEqual(msg.subject, 'Comment added')
self.assertEqual(msg.content, 'Leo Franchi added comment: "FIX THIS NOW" \
[(view)](https://www.pivotaltracker.com/s/projects/807213/stories/48276573)')
def test_created(self):
msg = self.send_pivotal_message('created')
self.assertEqual(msg.subject, 'My new Feature story')
self.assertEqual(msg.content, 'Leo Franchi added "My new Feature story" \
(unscheduled feature):\n\n~~~ quote\nThis is my long description\n~~~\n\n \
[(view)](https://www.pivotaltracker.com/s/projects/807213/stories/48276573)')
def test_delivered(self):
msg = self.send_pivotal_message('delivered')
self.assertEqual(msg.subject, 'Another new story')
self.assertEqual(msg.content, 'Leo Franchi delivered "Another new story" \
[(view)](https://www.pivotaltracker.com/s/projects/807213/stories/48278289)')
def test_finished(self):
msg = self.send_pivotal_message('finished')
self.assertEqual(msg.subject, 'Another new story')
self.assertEqual(msg.content, 'Leo Franchi finished "Another new story" \
[(view)](https://www.pivotaltracker.com/s/projects/807213/stories/48278289)')
def test_moved(self):
msg = self.send_pivotal_message('moved')
self.assertEqual(msg.subject, 'My new Feature story')
self.assertEqual(msg.content, 'Leo Franchi edited "My new Feature story" \
[(view)](https://www.pivotaltracker.com/s/projects/807213/stories/48276573)')
def test_rejected(self):
msg = self.send_pivotal_message('rejected')
self.assertEqual(msg.subject, 'Another new story')
self.assertEqual(msg.content, 'Leo Franchi rejected "Another new story" with comments: \
"Not good enough, sorry" [(view)](https://www.pivotaltracker.com/s/projects/807213/stories/48278289)')
def test_started(self):
msg = self.send_pivotal_message('started')
self.assertEqual(msg.subject, 'Another new story')
self.assertEqual(msg.content, 'Leo Franchi started "Another new story" \
[(view)](https://www.pivotaltracker.com/s/projects/807213/stories/48278289)')
def test_created_estimate(self):
msg = self.send_pivotal_message('created_estimate')
self.assertEqual(msg.subject, 'Another new story')
self.assertEqual(msg.content, 'Leo Franchi added "Another new story" \
(unscheduled feature worth 2 story points):\n\n~~~ quote\nSome loong description\n~~~\n\n \
[(view)](https://www.pivotaltracker.com/s/projects/807213/stories/48278289)')
def test_type_changed(self):
msg = self.send_pivotal_message('type_changed')
self.assertEqual(msg.subject, 'My new Feature story')
self.assertEqual(msg.content, 'Leo Franchi edited "My new Feature story" \
[(view)](https://www.pivotaltracker.com/s/projects/807213/stories/48276573)')
class PivotalV5HookTests(AuthedTestCase):
def send_pivotal_message(self, name):
email = "hamlet@zulip.com"
api_key = self.get_api_key(email)
return self.send_json_payload(email, "/api/v1/external/pivotal?api_key=%s&stream=%s" % (api_key,"pivotal"),
self.fixture_data('pivotal', "v5_" + name, file_type='json'),
stream_name="pivotal",
content_type="application/xml")
def test_accepted(self):
msg = self.send_pivotal_message('accepted')
self.assertEqual(msg.subject, '#63486316: Story of the Year')
self.assertEqual(msg.content, """Leo Franchi updated [Hard Code](https://www.pivotaltracker.com/s/projects/807213): [Story of the Year](http://www.pivotaltracker.com/story/show/63486316):
* state changed from **unstarted** to **accepted**
""")
def test_commented(self):
msg = self.send_pivotal_message('commented')
self.assertEqual(msg.subject, '#63486316: Story of the Year')
self.assertEqual(msg.content, """Leo Franchi added a comment to [Hard Code](https://www.pivotaltracker.com/s/projects/807213): [Story of the Year](http://www.pivotaltracker.com/story/show/63486316):
~~~quote
A comment on the story
~~~""")
def test_created(self):
msg = self.send_pivotal_message('created')
self.assertEqual(msg.subject, '#63495662: Story that I created')
self.assertEqual(msg.content, """Leo Franchi created bug: [Hard Code](https://www.pivotaltracker.com/s/projects/807213): [Story that I created](http://www.pivotaltracker.com/story/show/63495662)
* State is **unscheduled**
* Description is
> What a description""")
def test_delivered(self):
msg = self.send_pivotal_message('delivered')
self.assertEqual(msg.subject, '#63486316: Story of the Year')
self.assertEqual(msg.content, """Leo Franchi updated [Hard Code](https://www.pivotaltracker.com/s/projects/807213): [Story of the Year](http://www.pivotaltracker.com/story/show/63486316):
* state changed from **accepted** to **delivered**
""")
def test_finished(self):
msg = self.send_pivotal_message('finished')
self.assertEqual(msg.subject, '#63486316: Story of the Year')
self.assertEqual(msg.content, """Leo Franchi updated [Hard Code](https://www.pivotaltracker.com/s/projects/807213): [Story of the Year](http://www.pivotaltracker.com/story/show/63486316):
* state changed from **delivered** to **accepted**
""")
def test_moved(self):
msg = self.send_pivotal_message('moved')
self.assertEqual(msg.subject, '#63496066: Pivotal Test')
self.assertEqual(msg.content, """Leo Franchi moved [Hard Code](https://www.pivotaltracker.com/s/projects/807213): [Pivotal Test](http://www.pivotaltracker.com/story/show/63496066) from **unstarted** to **unscheduled**""")
def test_rejected(self):
msg = self.send_pivotal_message('rejected')
self.assertEqual(msg.subject, '#63486316: Story of the Year')
self.assertEqual(msg.content, """Leo Franchi updated [Hard Code](https://www.pivotaltracker.com/s/projects/807213): [Story of the Year](http://www.pivotaltracker.com/story/show/63486316):
* Comment added:
~~~quote
Try again next time
~~~
* state changed from **delivered** to **rejected**
""")
def test_started(self):
msg = self.send_pivotal_message('started')
self.assertEqual(msg.subject, '#63495972: Fresh Story')
self.assertEqual(msg.content, """Leo Franchi updated [Hard Code](https://www.pivotaltracker.com/s/projects/807213): [Fresh Story](http://www.pivotaltracker.com/story/show/63495972):
* state changed from **unstarted** to **started**
""")
def test_created_estimate(self):
msg = self.send_pivotal_message('created_estimate')
self.assertEqual(msg.subject, '#63496066: Pivotal Test')
self.assertEqual(msg.content, """Leo Franchi updated [Hard Code](https://www.pivotaltracker.com/s/projects/807213): [Pivotal Test](http://www.pivotaltracker.com/story/show/63496066):
* estimate is now **3 points**
""")
def test_type_changed(self):
msg = self.send_pivotal_message('type_changed')
self.assertEqual(msg.subject, '#63496066: Pivotal Test')
self.assertEqual(msg.content, """Leo Franchi updated [Hard Code](https://www.pivotaltracker.com/s/projects/807213): [Pivotal Test](http://www.pivotaltracker.com/story/show/63496066):
* estimate changed from 3 to **0 points**
* type changed from **feature** to **bug**
""")
class NewRelicHookTests(AuthedTestCase):
def send_new_relic_message(self, name):
email = "hamlet@zulip.com"
api_key = self.get_api_key(email)
return self.send_json_payload(email, "/api/v1/external/newrelic?api_key=%s&stream=%s" % (api_key,"newrelic"),
self.fixture_data('newrelic', name, file_type='txt'),
stream_name="newrelic",
content_type="application/x-www-form-urlencoded")
def test_alert(self):
msg = self.send_new_relic_message('alert')
self.assertEqual(msg.subject, "Apdex score fell below critical level of 0.90")
self.assertEqual(msg.content, 'Alert opened on [application name]: \
Apdex score fell below critical level of 0.90\n\
[View alert](https://rpm.newrelc.com/accounts/[account_id]/applications/[application_id]/incidents/[incident_id])')
def test_deployment(self):
msg = self.send_new_relic_message('deployment')
self.assertEqual(msg.subject, 'Test App deploy')
self.assertEqual(msg.content, '`1242` deployed by **Zulip Test**\n\
Description sent via curl\n\nChangelog string')
class StashHookTests(AuthedTestCase):
def test_stash_message(self):
"""
Messages are generated by Stash on a `git push`.
The subject describes the repo and Stash "project". The
content describes the commits pushed.
"""
email = "hamlet@zulip.com"
msg = self.send_json_payload(
email, "/api/v1/external/stash?stream=commits",
self.fixture_data("stash", "push", file_type="json"),
stream_name="commits",
content_type="application/x-www-form-urlencoded",
**self.api_auth(email))
self.assertEqual(msg.subject, u"Secret project/Operation unicorn: master")
self.assertEqual(msg.content, """`f259e90` was pushed to **master** in **Secret project/Operation unicorn** with:
* `f259e90`: Updating poms ...""")
class FreshdeskHookTests(AuthedTestCase):
def generate_webhook_response(self, fixture):
"""
Helper function to handle the webhook boilerplate.
"""
email = "hamlet@zulip.com"
return self.send_json_payload(
email, "/api/v1/external/freshdesk?stream=freshdesk",
self.fixture_data("freshdesk", fixture, file_type="json"),
stream_name="freshdesk",
content_type="application/x-www-form-urlencoded",
**self.api_auth(email))
def test_ticket_creation(self):
"""
Messages are generated on ticket creation through Freshdesk's
"Dispatch'r" service.
"""
msg = self.generate_webhook_response("ticket_created")
self.assertEqual(msg.subject, u"#11: Test ticket subject ☃")
self.assertEqual(msg.content, u"""Requester ☃ Bob <requester-bob@example.com> created [ticket #11](http://test1234zzz.freshdesk.com/helpdesk/tickets/11):
~~~ quote
Test ticket description ☃.
~~~
Type: **Incident**
Priority: **High**
Status: **Pending**""")
def test_status_change(self):
"""
Messages are generated when a ticket's status changes through
Freshdesk's "Observer" service.
"""
msg = self.generate_webhook_response("status_changed")
self.assertEqual(msg.subject, u"#11: Test ticket subject ☃")
self.assertEqual(msg.content, """Requester Bob <requester-bob@example.com> updated [ticket #11](http://test1234zzz.freshdesk.com/helpdesk/tickets/11):
Status: **Resolved** => **Waiting on Customer**""")
def test_priority_change(self):
"""
Messages are generated when a ticket's priority changes through
Freshdesk's "Observer" service.
"""
msg = self.generate_webhook_response("priority_changed")
self.assertEqual(msg.subject, u"#11: Test ticket subject")
self.assertEqual(msg.content, """Requester Bob <requester-bob@example.com> updated [ticket #11](http://test1234zzz.freshdesk.com/helpdesk/tickets/11):
Priority: **High** => **Low**""")
def note_change(self, fixture, note_type):
"""
Messages are generated when a note gets added to a ticket through
Freshdesk's "Observer" service.
"""
msg = self.generate_webhook_response(fixture)
self.assertEqual(msg.subject, u"#11: Test ticket subject")
self.assertEqual(msg.content, """Requester Bob <requester-bob@example.com> added a %s note to [ticket #11](http://test1234zzz.freshdesk.com/helpdesk/tickets/11).""" % (note_type,))
def test_private_note_change(self):
self.note_change("private_note", "private")
def test_public_note_change(self):
self.note_change("public_note", "public")
def test_inline_image(self):
"""
Freshdesk sends us descriptions as HTML, so we have to make the
descriptions Zulip markdown-friendly while still doing our best to
preserve links and images.
"""
msg = self.generate_webhook_response("inline_images")
self.assertEqual(msg.subject, u"#12: Not enough ☃ guinea pigs")
self.assertIn("[guinea_pig.png](http://cdn.freshdesk.com/data/helpdesk/attachments/production/12744808/original/guinea_pig.png)", msg.content)
class ZenDeskHookTests(AuthedTestCase):
def generate_webhook_response(self, ticket_title='User can\'t login',
ticket_id=54, message='Message',
stream_name='zendesk'):
data = {
'ticket_title': ticket_title,
'ticket_id': ticket_id,
'message': message,
'stream': stream_name,
}
email = 'hamlet@zulip.com'
self.subscribe_to_stream(email, stream_name)
result = self.client.post('/api/v1/external/zendesk', data,
**self.api_auth(email))
self.assert_json_success(result)
# Check the correct message was sent
msg = Message.objects.filter().order_by('-id')[0]
self.assertEqual(msg.sender.email, email)
return msg
def test_subject(self):
msg = self.generate_webhook_response(ticket_id=4, ticket_title="Test ticket")
self.assertEqual(msg.subject, '#4: Test ticket')
def test_long_subject(self):
msg = self.generate_webhook_response(ticket_id=4, ticket_title="Test ticket" + '!' * 80)
self.assertEqual(msg.subject, '#4: Test ticket' + '!' * 42 + '...')
def test_content(self):
msg = self.generate_webhook_response(message='New comment:\n> It is better\n* here')
self.assertEqual(msg.content, 'New comment:\n> It is better\n* here')
class PagerDutyHookTests(AuthedTestCase):
def send_webhook(self, data, stream_name, topic=None):
email = 'hamlet@zulip.com'
self.subscribe_to_stream(email, stream_name)
api_key = self.get_api_key(email)
if topic:
url = '/api/v1/external/pagerduty?api_key=%s&stream=%s&topic=%s' % (api_key, stream_name, topic)
else:
url = '/api/v1/external/pagerduty?api_key=%s&stream=%s' % (api_key, stream_name)
result = self.client.post(url, ujson.dumps(data), content_type="application/json")
self.assert_json_success(result)
# Check the correct message was sent
msg = Message.objects.filter().order_by('-id')[0]
self.assertEqual(msg.sender.email, email)
return msg
def test_trigger(self):
data = ujson.loads(self.fixture_data('pagerduty', 'trigger'))
msg = self.send_webhook(data, 'pagerduty')
self.assertEqual(msg.subject, 'incident 3')
self.assertEqual(
msg.content,
':unhealthy_heart: Incident [3](https://zulip-test.pagerduty.com/incidents/P140S4Y) triggered by [Test service](https://zulip-test.pagerduty.com/services/PIL5CUQ) and assigned to [armooo@](https://zulip-test.pagerduty.com/users/POBCFRJ)\n\n>foo'
)
def test_unacknowledge(self):
data = ujson.loads(self.fixture_data('pagerduty', 'unacknowledge'))
msg = self.send_webhook(data, 'pagerduty')
self.assertEqual(msg.subject, 'incident 3')
self.assertEqual(
msg.content,
':unhealthy_heart: Incident [3](https://zulip-test.pagerduty.com/incidents/P140S4Y) unacknowledged by [Test service](https://zulip-test.pagerduty.com/services/PIL5CUQ) and assigned to [armooo@](https://zulip-test.pagerduty.com/users/POBCFRJ)\n\n>foo'
)
def test_resolved(self):
data = ujson.loads(self.fixture_data('pagerduty', 'resolved'))
msg = self.send_webhook(data, 'pagerduty')
self.assertEqual(msg.subject, 'incident 1')
self.assertEqual(
msg.content,
':healthy_heart: Incident [1](https://zulip-test.pagerduty.com/incidents/PO1XIJ5) resolved by [armooo@](https://zulip-test.pagerduty.com/users/POBCFRJ)\n\n>It is on fire'
)
def test_auto_resolved(self):
data = ujson.loads(self.fixture_data('pagerduty', 'auto_resolved'))
msg = self.send_webhook(data, 'pagerduty')
self.assertEqual(msg.subject, 'incident 2')
self.assertEqual(
msg.content,
':healthy_heart: Incident [2](https://zulip-test.pagerduty.com/incidents/PX7K9J2) resolved\n\n>new'
)
def test_acknowledge(self):
data = ujson.loads(self.fixture_data('pagerduty', 'acknowledge'))
msg = self.send_webhook(data, 'pagerduty')
self.assertEqual(msg.subject, 'incident 1')
self.assertEqual(
msg.content,
':average_heart: Incident [1](https://zulip-test.pagerduty.com/incidents/PO1XIJ5) acknowledged by [armooo@](https://zulip-test.pagerduty.com/users/POBCFRJ)\n\n>It is on fire'
)
def test_no_subject(self):
data = ujson.loads(self.fixture_data('pagerduty', 'mp_fail'))
msg = self.send_webhook(data, 'pagerduty')
self.assertEqual(msg.subject, 'incident 48219')
self.assertEqual(
msg.content,
u':healthy_heart: Incident [48219](https://dropbox.pagerduty.com/incidents/PJKGZF9) resolved\n\n>mp_error_block_down_critical\u2119\u01b4'
)
def test_explicit_subject(self):
data = ujson.loads(self.fixture_data('pagerduty', 'acknowledge'))
msg = self.send_webhook(data, 'pagerduty', topic="my+cool+topic")
self.assertEqual(msg.subject, 'my cool topic')
self.assertEqual(
msg.content,
':average_heart: Incident [1](https://zulip-test.pagerduty.com/incidents/PO1XIJ5) acknowledged by [armooo@](https://zulip-test.pagerduty.com/users/POBCFRJ)\n\n>It is on fire'
)
def test_bad_message(self):
data = {'messages': [{'type': 'incident.triggered'}]}
msg = self.send_webhook(data, 'pagerduty')
self.assertEqual(msg.subject, 'pagerduty')
self.assertEqual(
msg.content,
'Unknown pagerduty message\n``` py\n{u\'type\': u\'incident.triggered\'}\n```'
)
def test_unknown_message_type(self):
data = {'messages': [{'type': 'foo'}]}
msg = self.send_webhook(data, 'pagerduty')
self.assertEqual(msg.subject, 'pagerduty')
self.assertEqual(
msg.content,
'Unknown pagerduty message\n``` py\n{u\'type\': u\'foo\'}\n```'
)
|
efortuna/AndroidSDKClone | refs/heads/master | ndk_experimental/prebuilt/linux-x86_64/lib/python2.7/cgi.py | 64 | #! /usr/local/bin/python
# NOTE: the above "/usr/local/bin/python" is NOT a mistake. It is
# intentionally NOT "/usr/bin/env python". On many systems
# (e.g. Solaris), /usr/local/bin is not in $PATH as passed to CGI
# scripts, and /usr/local/bin is the default directory where Python is
# installed, so /usr/bin/env would be unable to find python. Granted,
# binary installations by Linux vendors often install Python in
# /usr/bin. So let those vendors patch cgi.py to match their choice
# of installation.
"""Support module for CGI (Common Gateway Interface) scripts.
This module defines a number of utilities for use by CGI scripts
written in Python.
"""
# XXX Perhaps there should be a slimmed version that doesn't contain
# all those backwards compatible and debugging classes and functions?
# History
# -------
#
# Michael McLay started this module. Steve Majewski changed the
# interface to SvFormContentDict and FormContentDict. The multipart
# parsing was inspired by code submitted by Andreas Paepcke. Guido van
# Rossum rewrote, reformatted and documented the module and is currently
# responsible for its maintenance.
#
__version__ = "2.6"
# Imports
# =======
from operator import attrgetter
import sys
import os
import UserDict
import urlparse
from warnings import filterwarnings, catch_warnings, warn
with catch_warnings():
if sys.py3kwarning:
filterwarnings("ignore", ".*mimetools has been removed",
DeprecationWarning)
filterwarnings("ignore", ".*rfc822 has been removed",
DeprecationWarning)
import mimetools
import rfc822
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
__all__ = ["MiniFieldStorage", "FieldStorage", "FormContentDict",
"SvFormContentDict", "InterpFormContentDict", "FormContent",
"parse", "parse_qs", "parse_qsl", "parse_multipart",
"parse_header", "print_exception", "print_environ",
"print_form", "print_directory", "print_arguments",
"print_environ_usage", "escape"]
# Logging support
# ===============
logfile = "" # Filename to log to, if not empty
logfp = None # File object to log to, if not None
def initlog(*allargs):
"""Write a log message, if there is a log file.
Even though this function is called initlog(), you should always
use log(); log is a variable that is set either to initlog
(initially), to dolog (once the log file has been opened), or to
nolog (when logging is disabled).
The first argument is a format string; the remaining arguments (if
any) are arguments to the % operator, so e.g.
log("%s: %s", "a", "b")
will write "a: b" to the log file, followed by a newline.
If the global logfp is not None, it should be a file object to
which log data is written.
If the global logfp is None, the global logfile may be a string
giving a filename to open, in append mode. This file should be
world writable!!! If the file can't be opened, logging is
silently disabled (since there is no safe place where we could
send an error message).
"""
global logfp, log
if logfile and not logfp:
try:
logfp = open(logfile, "a")
except IOError:
pass
if not logfp:
log = nolog
else:
log = dolog
log(*allargs)
def dolog(fmt, *args):
"""Write a log message to the log file. See initlog() for docs."""
logfp.write(fmt%args + "\n")
def nolog(*allargs):
"""Dummy function, assigned to log when logging is disabled."""
pass
log = initlog # The current logging function
# Parsing functions
# =================
# Maximum input we will accept when REQUEST_METHOD is POST
# 0 ==> unlimited input
maxlen = 0
def parse(fp=None, environ=os.environ, keep_blank_values=0, strict_parsing=0):
"""Parse a query in the environment or from a file (default stdin)
Arguments, all optional:
fp : file pointer; default: sys.stdin
environ : environment dictionary; default: os.environ
keep_blank_values: flag indicating whether blank values in
percent-encoded forms should be treated as blank strings.
A true value indicates that blanks should be retained as
blank strings. The default false value indicates that
blank values are to be ignored and treated as if they were
not included.
strict_parsing: flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored.
If true, errors raise a ValueError exception.
"""
if fp is None:
fp = sys.stdin
if not 'REQUEST_METHOD' in environ:
environ['REQUEST_METHOD'] = 'GET' # For testing stand-alone
if environ['REQUEST_METHOD'] == 'POST':
ctype, pdict = parse_header(environ['CONTENT_TYPE'])
if ctype == 'multipart/form-data':
return parse_multipart(fp, pdict)
elif ctype == 'application/x-www-form-urlencoded':
clength = int(environ['CONTENT_LENGTH'])
if maxlen and clength > maxlen:
raise ValueError, 'Maximum content length exceeded'
qs = fp.read(clength)
else:
qs = '' # Unknown content-type
if 'QUERY_STRING' in environ:
if qs: qs = qs + '&'
qs = qs + environ['QUERY_STRING']
elif sys.argv[1:]:
if qs: qs = qs + '&'
qs = qs + sys.argv[1]
environ['QUERY_STRING'] = qs # XXX Shouldn't, really
elif 'QUERY_STRING' in environ:
qs = environ['QUERY_STRING']
else:
if sys.argv[1:]:
qs = sys.argv[1]
else:
qs = ""
environ['QUERY_STRING'] = qs # XXX Shouldn't, really
return urlparse.parse_qs(qs, keep_blank_values, strict_parsing)
# parse query string function called from urlparse,
# this is done in order to maintain backward compatiblity.
def parse_qs(qs, keep_blank_values=0, strict_parsing=0):
"""Parse a query given as a string argument."""
warn("cgi.parse_qs is deprecated, use urlparse.parse_qs instead",
PendingDeprecationWarning, 2)
return urlparse.parse_qs(qs, keep_blank_values, strict_parsing)
def parse_qsl(qs, keep_blank_values=0, strict_parsing=0):
"""Parse a query given as a string argument."""
warn("cgi.parse_qsl is deprecated, use urlparse.parse_qsl instead",
PendingDeprecationWarning, 2)
return urlparse.parse_qsl(qs, keep_blank_values, strict_parsing)
def parse_multipart(fp, pdict):
"""Parse multipart input.
Arguments:
fp : input file
pdict: dictionary containing other parameters of content-type header
Returns a dictionary just like parse_qs(): keys are the field names, each
value is a list of values for that field. This is easy to use but not
much good if you are expecting megabytes to be uploaded -- in that case,
use the FieldStorage class instead which is much more flexible. Note
that content-type is the raw, unparsed contents of the content-type
header.
XXX This does not parse nested multipart parts -- use FieldStorage for
that.
XXX This should really be subsumed by FieldStorage altogether -- no
point in having two implementations of the same parsing algorithm.
Also, FieldStorage protects itself better against certain DoS attacks
by limiting the size of the data read in one chunk. The API here
does not support that kind of protection. This also affects parse()
since it can call parse_multipart().
"""
boundary = ""
if 'boundary' in pdict:
boundary = pdict['boundary']
if not valid_boundary(boundary):
raise ValueError, ('Invalid boundary in multipart form: %r'
% (boundary,))
nextpart = "--" + boundary
lastpart = "--" + boundary + "--"
partdict = {}
terminator = ""
while terminator != lastpart:
bytes = -1
data = None
if terminator:
# At start of next part. Read headers first.
headers = mimetools.Message(fp)
clength = headers.getheader('content-length')
if clength:
try:
bytes = int(clength)
except ValueError:
pass
if bytes > 0:
if maxlen and bytes > maxlen:
raise ValueError, 'Maximum content length exceeded'
data = fp.read(bytes)
else:
data = ""
# Read lines until end of part.
lines = []
while 1:
line = fp.readline()
if not line:
terminator = lastpart # End outer loop
break
if line[:2] == "--":
terminator = line.strip()
if terminator in (nextpart, lastpart):
break
lines.append(line)
# Done with part.
if data is None:
continue
if bytes < 0:
if lines:
# Strip final line terminator
line = lines[-1]
if line[-2:] == "\r\n":
line = line[:-2]
elif line[-1:] == "\n":
line = line[:-1]
lines[-1] = line
data = "".join(lines)
line = headers['content-disposition']
if not line:
continue
key, params = parse_header(line)
if key != 'form-data':
continue
if 'name' in params:
name = params['name']
else:
continue
if name in partdict:
partdict[name].append(data)
else:
partdict[name] = [data]
return partdict
def _parseparam(s):
while s[:1] == ';':
s = s[1:]
end = s.find(';')
while end > 0 and (s.count('"', 0, end) - s.count('\\"', 0, end)) % 2:
end = s.find(';', end + 1)
if end < 0:
end = len(s)
f = s[:end]
yield f.strip()
s = s[end:]
def parse_header(line):
"""Parse a Content-type like header.
Return the main content-type and a dictionary of options.
"""
parts = _parseparam(';' + line)
key = parts.next()
pdict = {}
for p in parts:
i = p.find('=')
if i >= 0:
name = p[:i].strip().lower()
value = p[i+1:].strip()
if len(value) >= 2 and value[0] == value[-1] == '"':
value = value[1:-1]
value = value.replace('\\\\', '\\').replace('\\"', '"')
pdict[name] = value
return key, pdict
# Classes for field storage
# =========================
class MiniFieldStorage:
"""Like FieldStorage, for use when no file uploads are possible."""
# Dummy attributes
filename = None
list = None
type = None
file = None
type_options = {}
disposition = None
disposition_options = {}
headers = {}
def __init__(self, name, value):
"""Constructor from field name and value."""
self.name = name
self.value = value
# self.file = StringIO(value)
def __repr__(self):
"""Return printable representation."""
return "MiniFieldStorage(%r, %r)" % (self.name, self.value)
class FieldStorage:
"""Store a sequence of fields, reading multipart/form-data.
This class provides naming, typing, files stored on disk, and
more. At the top level, it is accessible like a dictionary, whose
keys are the field names. (Note: None can occur as a field name.)
The items are either a Python list (if there's multiple values) or
another FieldStorage or MiniFieldStorage object. If it's a single
object, it has the following attributes:
name: the field name, if specified; otherwise None
filename: the filename, if specified; otherwise None; this is the
client side filename, *not* the file name on which it is
stored (that's a temporary file you don't deal with)
value: the value as a *string*; for file uploads, this
transparently reads the file every time you request the value
file: the file(-like) object from which you can read the data;
None if the data is stored a simple string
type: the content-type, or None if not specified
type_options: dictionary of options specified on the content-type
line
disposition: content-disposition, or None if not specified
disposition_options: dictionary of corresponding options
headers: a dictionary(-like) object (sometimes rfc822.Message or a
subclass thereof) containing *all* headers
The class is subclassable, mostly for the purpose of overriding
the make_file() method, which is called internally to come up with
a file open for reading and writing. This makes it possible to
override the default choice of storing all files in a temporary
directory and unlinking them as soon as they have been opened.
"""
def __init__(self, fp=None, headers=None, outerboundary="",
environ=os.environ, keep_blank_values=0, strict_parsing=0):
"""Constructor. Read multipart/* until last part.
Arguments, all optional:
fp : file pointer; default: sys.stdin
(not used when the request method is GET)
headers : header dictionary-like object; default:
taken from environ as per CGI spec
outerboundary : terminating multipart boundary
(for internal use only)
environ : environment dictionary; default: os.environ
keep_blank_values: flag indicating whether blank values in
percent-encoded forms should be treated as blank strings.
A true value indicates that blanks should be retained as
blank strings. The default false value indicates that
blank values are to be ignored and treated as if they were
not included.
strict_parsing: flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored.
If true, errors raise a ValueError exception.
"""
method = 'GET'
self.keep_blank_values = keep_blank_values
self.strict_parsing = strict_parsing
if 'REQUEST_METHOD' in environ:
method = environ['REQUEST_METHOD'].upper()
self.qs_on_post = None
if method == 'GET' or method == 'HEAD':
if 'QUERY_STRING' in environ:
qs = environ['QUERY_STRING']
elif sys.argv[1:]:
qs = sys.argv[1]
else:
qs = ""
fp = StringIO(qs)
if headers is None:
headers = {'content-type':
"application/x-www-form-urlencoded"}
if headers is None:
headers = {}
if method == 'POST':
# Set default content-type for POST to what's traditional
headers['content-type'] = "application/x-www-form-urlencoded"
if 'CONTENT_TYPE' in environ:
headers['content-type'] = environ['CONTENT_TYPE']
if 'QUERY_STRING' in environ:
self.qs_on_post = environ['QUERY_STRING']
if 'CONTENT_LENGTH' in environ:
headers['content-length'] = environ['CONTENT_LENGTH']
self.fp = fp or sys.stdin
self.headers = headers
self.outerboundary = outerboundary
# Process content-disposition header
cdisp, pdict = "", {}
if 'content-disposition' in self.headers:
cdisp, pdict = parse_header(self.headers['content-disposition'])
self.disposition = cdisp
self.disposition_options = pdict
self.name = None
if 'name' in pdict:
self.name = pdict['name']
self.filename = None
if 'filename' in pdict:
self.filename = pdict['filename']
# Process content-type header
#
# Honor any existing content-type header. But if there is no
# content-type header, use some sensible defaults. Assume
# outerboundary is "" at the outer level, but something non-false
# inside a multi-part. The default for an inner part is text/plain,
# but for an outer part it should be urlencoded. This should catch
# bogus clients which erroneously forget to include a content-type
# header.
#
# See below for what we do if there does exist a content-type header,
# but it happens to be something we don't understand.
if 'content-type' in self.headers:
ctype, pdict = parse_header(self.headers['content-type'])
elif self.outerboundary or method != 'POST':
ctype, pdict = "text/plain", {}
else:
ctype, pdict = 'application/x-www-form-urlencoded', {}
self.type = ctype
self.type_options = pdict
self.innerboundary = ""
if 'boundary' in pdict:
self.innerboundary = pdict['boundary']
clen = -1
if 'content-length' in self.headers:
try:
clen = int(self.headers['content-length'])
except ValueError:
pass
if maxlen and clen > maxlen:
raise ValueError, 'Maximum content length exceeded'
self.length = clen
self.list = self.file = None
self.done = 0
if ctype == 'application/x-www-form-urlencoded':
self.read_urlencoded()
elif ctype[:10] == 'multipart/':
self.read_multi(environ, keep_blank_values, strict_parsing)
else:
self.read_single()
def __repr__(self):
"""Return a printable representation."""
return "FieldStorage(%r, %r, %r)" % (
self.name, self.filename, self.value)
def __iter__(self):
return iter(self.keys())
def __getattr__(self, name):
if name != 'value':
raise AttributeError, name
if self.file:
self.file.seek(0)
value = self.file.read()
self.file.seek(0)
elif self.list is not None:
value = self.list
else:
value = None
return value
def __getitem__(self, key):
"""Dictionary style indexing."""
if self.list is None:
raise TypeError, "not indexable"
found = []
for item in self.list:
if item.name == key: found.append(item)
if not found:
raise KeyError, key
if len(found) == 1:
return found[0]
else:
return found
def getvalue(self, key, default=None):
"""Dictionary style get() method, including 'value' lookup."""
if key in self:
value = self[key]
if type(value) is type([]):
return map(attrgetter('value'), value)
else:
return value.value
else:
return default
def getfirst(self, key, default=None):
""" Return the first value received."""
if key in self:
value = self[key]
if type(value) is type([]):
return value[0].value
else:
return value.value
else:
return default
def getlist(self, key):
""" Return list of received values."""
if key in self:
value = self[key]
if type(value) is type([]):
return map(attrgetter('value'), value)
else:
return [value.value]
else:
return []
def keys(self):
"""Dictionary style keys() method."""
if self.list is None:
raise TypeError, "not indexable"
return list(set(item.name for item in self.list))
def has_key(self, key):
"""Dictionary style has_key() method."""
if self.list is None:
raise TypeError, "not indexable"
return any(item.name == key for item in self.list)
def __contains__(self, key):
"""Dictionary style __contains__ method."""
if self.list is None:
raise TypeError, "not indexable"
return any(item.name == key for item in self.list)
def __len__(self):
"""Dictionary style len(x) support."""
return len(self.keys())
def __nonzero__(self):
return bool(self.list)
def read_urlencoded(self):
"""Internal: read data in query string format."""
qs = self.fp.read(self.length)
if self.qs_on_post:
qs += '&' + self.qs_on_post
self.list = list = []
for key, value in urlparse.parse_qsl(qs, self.keep_blank_values,
self.strict_parsing):
list.append(MiniFieldStorage(key, value))
self.skip_lines()
FieldStorageClass = None
def read_multi(self, environ, keep_blank_values, strict_parsing):
"""Internal: read a part that is itself multipart."""
ib = self.innerboundary
if not valid_boundary(ib):
raise ValueError, 'Invalid boundary in multipart form: %r' % (ib,)
self.list = []
if self.qs_on_post:
for key, value in urlparse.parse_qsl(self.qs_on_post,
self.keep_blank_values, self.strict_parsing):
self.list.append(MiniFieldStorage(key, value))
FieldStorageClass = None
klass = self.FieldStorageClass or self.__class__
part = klass(self.fp, {}, ib,
environ, keep_blank_values, strict_parsing)
# Throw first part away
while not part.done:
headers = rfc822.Message(self.fp)
part = klass(self.fp, headers, ib,
environ, keep_blank_values, strict_parsing)
self.list.append(part)
self.skip_lines()
def read_single(self):
"""Internal: read an atomic part."""
if self.length >= 0:
self.read_binary()
self.skip_lines()
else:
self.read_lines()
self.file.seek(0)
bufsize = 8*1024 # I/O buffering size for copy to file
def read_binary(self):
"""Internal: read binary data."""
self.file = self.make_file('b')
todo = self.length
if todo >= 0:
while todo > 0:
data = self.fp.read(min(todo, self.bufsize))
if not data:
self.done = -1
break
self.file.write(data)
todo = todo - len(data)
def read_lines(self):
"""Internal: read lines until EOF or outerboundary."""
self.file = self.__file = StringIO()
if self.outerboundary:
self.read_lines_to_outerboundary()
else:
self.read_lines_to_eof()
def __write(self, line):
if self.__file is not None:
if self.__file.tell() + len(line) > 1000:
self.file = self.make_file('')
self.file.write(self.__file.getvalue())
self.__file = None
self.file.write(line)
def read_lines_to_eof(self):
"""Internal: read lines until EOF."""
while 1:
line = self.fp.readline(1<<16)
if not line:
self.done = -1
break
self.__write(line)
def read_lines_to_outerboundary(self):
"""Internal: read lines until outerboundary."""
next = "--" + self.outerboundary
last = next + "--"
delim = ""
last_line_lfend = True
while 1:
line = self.fp.readline(1<<16)
if not line:
self.done = -1
break
if line[:2] == "--" and last_line_lfend:
strippedline = line.strip()
if strippedline == next:
break
if strippedline == last:
self.done = 1
break
odelim = delim
if line[-2:] == "\r\n":
delim = "\r\n"
line = line[:-2]
last_line_lfend = True
elif line[-1] == "\n":
delim = "\n"
line = line[:-1]
last_line_lfend = True
else:
delim = ""
last_line_lfend = False
self.__write(odelim + line)
def skip_lines(self):
"""Internal: skip lines until outer boundary if defined."""
if not self.outerboundary or self.done:
return
next = "--" + self.outerboundary
last = next + "--"
last_line_lfend = True
while 1:
line = self.fp.readline(1<<16)
if not line:
self.done = -1
break
if line[:2] == "--" and last_line_lfend:
strippedline = line.strip()
if strippedline == next:
break
if strippedline == last:
self.done = 1
break
last_line_lfend = line.endswith('\n')
def make_file(self, binary=None):
"""Overridable: return a readable & writable file.
The file will be used as follows:
- data is written to it
- seek(0)
- data is read from it
The 'binary' argument is unused -- the file is always opened
in binary mode.
This version opens a temporary file for reading and writing,
and immediately deletes (unlinks) it. The trick (on Unix!) is
that the file can still be used, but it can't be opened by
another process, and it will automatically be deleted when it
is closed or when the current process terminates.
If you want a more permanent file, you derive a class which
overrides this method. If you want a visible temporary file
that is nevertheless automatically deleted when the script
terminates, try defining a __del__ method in a derived class
which unlinks the temporary files you have created.
"""
import tempfile
return tempfile.TemporaryFile("w+b")
# Backwards Compatibility Classes
# ===============================
class FormContentDict(UserDict.UserDict):
"""Form content as dictionary with a list of values per field.
form = FormContentDict()
form[key] -> [value, value, ...]
key in form -> Boolean
form.keys() -> [key, key, ...]
form.values() -> [[val, val, ...], [val, val, ...], ...]
form.items() -> [(key, [val, val, ...]), (key, [val, val, ...]), ...]
form.dict == {key: [val, val, ...], ...}
"""
def __init__(self, environ=os.environ, keep_blank_values=0, strict_parsing=0):
self.dict = self.data = parse(environ=environ,
keep_blank_values=keep_blank_values,
strict_parsing=strict_parsing)
self.query_string = environ['QUERY_STRING']
class SvFormContentDict(FormContentDict):
"""Form content as dictionary expecting a single value per field.
If you only expect a single value for each field, then form[key]
will return that single value. It will raise an IndexError if
that expectation is not true. If you expect a field to have
possible multiple values, than you can use form.getlist(key) to
get all of the values. values() and items() are a compromise:
they return single strings where there is a single value, and
lists of strings otherwise.
"""
def __getitem__(self, key):
if len(self.dict[key]) > 1:
raise IndexError, 'expecting a single value'
return self.dict[key][0]
def getlist(self, key):
return self.dict[key]
def values(self):
result = []
for value in self.dict.values():
if len(value) == 1:
result.append(value[0])
else: result.append(value)
return result
def items(self):
result = []
for key, value in self.dict.items():
if len(value) == 1:
result.append((key, value[0]))
else: result.append((key, value))
return result
class InterpFormContentDict(SvFormContentDict):
"""This class is present for backwards compatibility only."""
def __getitem__(self, key):
v = SvFormContentDict.__getitem__(self, key)
if v[0] in '0123456789+-.':
try: return int(v)
except ValueError:
try: return float(v)
except ValueError: pass
return v.strip()
def values(self):
result = []
for key in self.keys():
try:
result.append(self[key])
except IndexError:
result.append(self.dict[key])
return result
def items(self):
result = []
for key in self.keys():
try:
result.append((key, self[key]))
except IndexError:
result.append((key, self.dict[key]))
return result
class FormContent(FormContentDict):
"""This class is present for backwards compatibility only."""
def values(self, key):
if key in self.dict :return self.dict[key]
else: return None
def indexed_value(self, key, location):
if key in self.dict:
if len(self.dict[key]) > location:
return self.dict[key][location]
else: return None
else: return None
def value(self, key):
if key in self.dict: return self.dict[key][0]
else: return None
def length(self, key):
return len(self.dict[key])
def stripped(self, key):
if key in self.dict: return self.dict[key][0].strip()
else: return None
def pars(self):
return self.dict
# Test/debug code
# ===============
def test(environ=os.environ):
"""Robust test CGI script, usable as main program.
Write minimal HTTP headers and dump all information provided to
the script in HTML form.
"""
print "Content-type: text/html"
print
sys.stderr = sys.stdout
try:
form = FieldStorage() # Replace with other classes to test those
print_directory()
print_arguments()
print_form(form)
print_environ(environ)
print_environ_usage()
def f():
exec "testing print_exception() -- <I>italics?</I>"
def g(f=f):
f()
print "<H3>What follows is a test, not an actual exception:</H3>"
g()
except:
print_exception()
print "<H1>Second try with a small maxlen...</H1>"
global maxlen
maxlen = 50
try:
form = FieldStorage() # Replace with other classes to test those
print_directory()
print_arguments()
print_form(form)
print_environ(environ)
except:
print_exception()
def print_exception(type=None, value=None, tb=None, limit=None):
if type is None:
type, value, tb = sys.exc_info()
import traceback
print
print "<H3>Traceback (most recent call last):</H3>"
list = traceback.format_tb(tb, limit) + \
traceback.format_exception_only(type, value)
print "<PRE>%s<B>%s</B></PRE>" % (
escape("".join(list[:-1])),
escape(list[-1]),
)
del tb
def print_environ(environ=os.environ):
"""Dump the shell environment as HTML."""
keys = environ.keys()
keys.sort()
print
print "<H3>Shell Environment:</H3>"
print "<DL>"
for key in keys:
print "<DT>", escape(key), "<DD>", escape(environ[key])
print "</DL>"
print
def print_form(form):
"""Dump the contents of a form as HTML."""
keys = form.keys()
keys.sort()
print
print "<H3>Form Contents:</H3>"
if not keys:
print "<P>No form fields."
print "<DL>"
for key in keys:
print "<DT>" + escape(key) + ":",
value = form[key]
print "<i>" + escape(repr(type(value))) + "</i>"
print "<DD>" + escape(repr(value))
print "</DL>"
print
def print_directory():
"""Dump the current directory as HTML."""
print
print "<H3>Current Working Directory:</H3>"
try:
pwd = os.getcwd()
except os.error, msg:
print "os.error:", escape(str(msg))
else:
print escape(pwd)
print
def print_arguments():
print
print "<H3>Command Line Arguments:</H3>"
print
print sys.argv
print
def print_environ_usage():
"""Dump a list of environment variables used by CGI as HTML."""
print """
<H3>These environment variables could have been set:</H3>
<UL>
<LI>AUTH_TYPE
<LI>CONTENT_LENGTH
<LI>CONTENT_TYPE
<LI>DATE_GMT
<LI>DATE_LOCAL
<LI>DOCUMENT_NAME
<LI>DOCUMENT_ROOT
<LI>DOCUMENT_URI
<LI>GATEWAY_INTERFACE
<LI>LAST_MODIFIED
<LI>PATH
<LI>PATH_INFO
<LI>PATH_TRANSLATED
<LI>QUERY_STRING
<LI>REMOTE_ADDR
<LI>REMOTE_HOST
<LI>REMOTE_IDENT
<LI>REMOTE_USER
<LI>REQUEST_METHOD
<LI>SCRIPT_NAME
<LI>SERVER_NAME
<LI>SERVER_PORT
<LI>SERVER_PROTOCOL
<LI>SERVER_ROOT
<LI>SERVER_SOFTWARE
</UL>
In addition, HTTP headers sent by the server may be passed in the
environment as well. Here are some common variable names:
<UL>
<LI>HTTP_ACCEPT
<LI>HTTP_CONNECTION
<LI>HTTP_HOST
<LI>HTTP_PRAGMA
<LI>HTTP_REFERER
<LI>HTTP_USER_AGENT
</UL>
"""
# Utilities
# =========
def escape(s, quote=None):
'''Replace special characters "&", "<" and ">" to HTML-safe sequences.
If the optional flag quote is true, the quotation mark character (")
is also translated.'''
s = s.replace("&", "&") # Must be done first!
s = s.replace("<", "<")
s = s.replace(">", ">")
if quote:
s = s.replace('"', """)
return s
def valid_boundary(s, _vb_pattern="^[ -~]{0,200}[!-~]$"):
import re
return re.match(_vb_pattern, s)
# Invoke mainline
# ===============
# Call test() when this file is run as a script (not imported as a module)
if __name__ == '__main__':
test()
|
vadimtk/chrome4sdp | refs/heads/master | build/toolchain/mac/setup_toolchain.py | 146 | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import stat
import sys
def CopyTool(source_path):
"""Copies the given tool to the current directory, including a warning not
to edit it."""
with open(source_path) as source_file:
tool_source = source_file.readlines()
# Add header and write it out to the current directory (which should be the
# root build dir).
out_path = 'gyp-mac-tool'
with open(out_path, 'w') as tool_file:
tool_file.write(''.join([tool_source[0],
'# Generated by setup_toolchain.py do not edit.\n']
+ tool_source[1:]))
st = os.stat(out_path)
os.chmod(out_path, st.st_mode | stat.S_IEXEC)
# Find the tool source, it's the first argument, and copy it.
if len(sys.argv) != 2:
print "Need one argument (mac_tool source path)."
sys.exit(1)
CopyTool(sys.argv[1])
|
CoderBotOrg/coderbot | refs/heads/develop | rotary_encoder_tests/velocity_test2.py | 1 | from time import sleep
from motorencoder import MotorEncoder
import pigpio
pi = pigpio.pi()
m = MotorEncoder(pi, 22, 25, 24, 14, 16)
m.control(power=100.0, time_elapse=5)
|
obi-two/Rebelion | refs/heads/master | data/scripts/templates/object/tangible/ship/components/shield_generator/shared_shd_incom_mark9.py | 2 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/components/shield_generator/shared_shd_incom_mark9.iff"
result.attribute_template_id = 8
result.stfName("space/space_item","shd_incom_mark9_n")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
jeffery-do/Vizdoombot | refs/heads/master | doom/lib/python3.5/site-packages/matplotlib/tri/trirefine.py | 8 | """
Mesh refinement for triangular grids.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import numpy as np
from matplotlib.tri.triangulation import Triangulation
import matplotlib.tri.triinterpolate
class TriRefiner(object):
"""
Abstract base class for classes implementing mesh refinement.
A TriRefiner encapsulates a Triangulation object and provides tools for
mesh refinement and interpolation.
Derived classes must implements:
- ``refine_triangulation(return_tri_index=False, **kwargs)`` , where
the optional keyword arguments *kwargs* are defined in each
TriRefiner concrete implementation, and which returns :
- a refined triangulation
- optionally (depending on *return_tri_index*), for each
point of the refined triangulation: the index of
the initial triangulation triangle to which it belongs.
- ``refine_field(z, triinterpolator=None, **kwargs)`` , where:
- *z* array of field values (to refine) defined at the base
triangulation nodes
- *triinterpolator* is a
:class:`~matplotlib.tri.TriInterpolator` (optional)
- the other optional keyword arguments *kwargs* are defined in
each TriRefiner concrete implementation
and which returns (as a tuple) a refined triangular mesh and the
interpolated values of the field at the refined triangulation nodes.
"""
def __init__(self, triangulation):
if not isinstance(triangulation, Triangulation):
raise ValueError("Expected a Triangulation object")
self._triangulation = triangulation
class UniformTriRefiner(TriRefiner):
"""
Uniform mesh refinement by recursive subdivisions.
Parameters
----------
triangulation : :class:`~matplotlib.tri.Triangulation`
The encapsulated triangulation (to be refined)
"""
# See Also
# --------
# :class:`~matplotlib.tri.CubicTriInterpolator` and
# :class:`~matplotlib.tri.TriAnalyzer`.
# """
def __init__(self, triangulation):
TriRefiner.__init__(self, triangulation)
def refine_triangulation(self, return_tri_index=False, subdiv=3):
"""
Computes an uniformly refined triangulation *refi_triangulation* of
the encapsulated :attr:`triangulation`.
This function refines the encapsulated triangulation by splitting each
father triangle into 4 child sub-triangles built on the edges midside
nodes, recursively (level of recursion *subdiv*).
In the end, each triangle is hence divided into ``4**subdiv``
child triangles.
The default value for *subdiv* is 3 resulting in 64 refined
subtriangles for each triangle of the initial triangulation.
Parameters
----------
return_tri_index : boolean, optional
Boolean indicating whether an index table indicating the father
triangle index of each point will be returned. Default value
False.
subdiv : integer, optional
Recursion level for the subdivision. Defaults value 3.
Each triangle will be divided into ``4**subdiv`` child triangles.
Returns
-------
refi_triangulation : :class:`~matplotlib.tri.Triangulation`
The returned refined triangulation
found_index : array-like of integers
Index of the initial triangulation containing triangle, for each
point of *refi_triangulation*.
Returned only if *return_tri_index* is set to True.
"""
refi_triangulation = self._triangulation
ntri = refi_triangulation.triangles.shape[0]
# Computes the triangulation ancestors numbers in the reference
# triangulation.
ancestors = np.arange(ntri, dtype=np.int32)
for _ in range(subdiv):
refi_triangulation, ancestors = self._refine_triangulation_once(
refi_triangulation, ancestors)
refi_npts = refi_triangulation.x.shape[0]
refi_triangles = refi_triangulation.triangles
# Now we compute found_index table if needed
if return_tri_index:
# We have to initialize found_index with -1 because some nodes
# may very well belong to no triangle at all, e.g., in case of
# Delaunay Triangulation with DuplicatePointWarning.
found_index = - np.ones(refi_npts, dtype=np.int32)
tri_mask = self._triangulation.mask
if tri_mask is None:
found_index[refi_triangles] = np.repeat(ancestors,
3).reshape(-1, 3)
else:
# There is a subtlety here: we want to avoid whenever possible
# that refined points container is a masked triangle (which
# would result in artifacts in plots).
# So we impose the numbering from masked ancestors first,
# then overwrite it with unmasked ancestor numbers.
ancestor_mask = tri_mask[ancestors]
found_index[refi_triangles[ancestor_mask, :]
] = np.repeat(ancestors[ancestor_mask],
3).reshape(-1, 3)
found_index[refi_triangles[~ancestor_mask, :]
] = np.repeat(ancestors[~ancestor_mask],
3).reshape(-1, 3)
return refi_triangulation, found_index
else:
return refi_triangulation
def refine_field(self, z, triinterpolator=None, subdiv=3):
"""
Refines a field defined on the encapsulated triangulation.
Returns *refi_tri* (refined triangulation), *refi_z* (interpolated
values of the field at the node of the refined triangulation).
Parameters
----------
z : 1d-array-like of length ``n_points``
Values of the field to refine, defined at the nodes of the
encapsulated triangulation. (``n_points`` is the number of points
in the initial triangulation)
triinterpolator : :class:`~matplotlib.tri.TriInterpolator`, optional
Interpolator used for field interpolation. If not specified,
a :class:`~matplotlib.tri.CubicTriInterpolator` will
be used.
subdiv : integer, optional
Recursion level for the subdivision. Defaults to 3.
Each triangle will be divided into ``4**subdiv`` child triangles.
Returns
-------
refi_tri : :class:`~matplotlib.tri.Triangulation` object
The returned refined triangulation
refi_z : 1d array of length: *refi_tri* node count.
The returned interpolated field (at *refi_tri* nodes)
Examples
--------
The main application of this method is to plot high-quality
iso-contours on a coarse triangular grid (e.g., triangulation built
from relatively sparse test data):
.. plot:: mpl_examples/pylab_examples/tricontour_smooth_user.py
"""
if triinterpolator is None:
interp = matplotlib.tri.CubicTriInterpolator(
self._triangulation, z)
else:
if not isinstance(triinterpolator,
matplotlib.tri.TriInterpolator):
raise ValueError("Expected a TriInterpolator object")
interp = triinterpolator
refi_tri, found_index = self.refine_triangulation(
subdiv=subdiv, return_tri_index=True)
refi_z = interp._interpolate_multikeys(
refi_tri.x, refi_tri.y, tri_index=found_index)[0]
return refi_tri, refi_z
@staticmethod
def _refine_triangulation_once(triangulation, ancestors=None):
"""
This function refines a matplotlib.tri *triangulation* by splitting
each triangle into 4 child-masked_triangles built on the edges midside
nodes.
The masked triangles, if present, are also splitted but their children
returned masked.
If *ancestors* is not provided, returns only a new triangulation:
child_triangulation.
If the array-like key table *ancestor* is given, it shall be of shape
(ntri,) where ntri is the number of *triangulation* masked_triangles.
In this case, the function returns
(child_triangulation, child_ancestors)
child_ancestors is defined so that the 4 child masked_triangles share
the same index as their father: child_ancestors.shape = (4 * ntri,).
"""
x = triangulation.x
y = triangulation.y
# According to tri.triangulation doc:
# neighbors[i,j] is the triangle that is the neighbor
# to the edge from point index masked_triangles[i,j] to point
# index masked_triangles[i,(j+1)%3].
neighbors = triangulation.neighbors
triangles = triangulation.triangles
npts = np.shape(x)[0]
ntri = np.shape(triangles)[0]
if ancestors is not None:
ancestors = np.asarray(ancestors)
if np.shape(ancestors) != (ntri,):
raise ValueError(
"Incompatible shapes provide for triangulation"
".masked_triangles and ancestors: {0} and {1}".format(
np.shape(triangles), np.shape(ancestors)))
# Initiating tables refi_x and refi_y of the refined triangulation
# points
# hint: each apex is shared by 2 masked_triangles except the borders.
borders = np.sum(neighbors == -1)
added_pts = (3*ntri + borders) // 2
refi_npts = npts + added_pts
refi_x = np.zeros(refi_npts)
refi_y = np.zeros(refi_npts)
# First part of refi_x, refi_y is just the initial points
refi_x[:npts] = x
refi_y[:npts] = y
# Second part contains the edge midside nodes.
# Each edge belongs to 1 triangle (if border edge) or is shared by 2
# masked_triangles (interior edge).
# We first build 2 * ntri arrays of edge starting nodes (edge_elems,
# edge_apexes) ; we then extract only the masters to avoid overlaps.
# The so-called 'master' is the triangle with biggest index
# The 'slave' is the triangle with lower index
# (can be -1 if border edge)
# For slave and master we will identify the apex pointing to the edge
# start
edge_elems = np.ravel(np.vstack([np.arange(ntri, dtype=np.int32),
np.arange(ntri, dtype=np.int32),
np.arange(ntri, dtype=np.int32)]))
edge_apexes = np.ravel(np.vstack([np.zeros(ntri, dtype=np.int32),
np.ones(ntri, dtype=np.int32),
np.ones(ntri, dtype=np.int32)*2]))
edge_neighbors = neighbors[edge_elems, edge_apexes]
mask_masters = (edge_elems > edge_neighbors)
# Identifying the "masters" and adding to refi_x, refi_y vec
masters = edge_elems[mask_masters]
apex_masters = edge_apexes[mask_masters]
x_add = (x[triangles[masters, apex_masters]] +
x[triangles[masters, (apex_masters+1) % 3]]) * 0.5
y_add = (y[triangles[masters, apex_masters]] +
y[triangles[masters, (apex_masters+1) % 3]]) * 0.5
refi_x[npts:] = x_add
refi_y[npts:] = y_add
# Building the new masked_triangles ; each old masked_triangles hosts
# 4 new masked_triangles
# there are 6 pts to identify per 'old' triangle, 3 new_pt_corner and
# 3 new_pt_midside
new_pt_corner = triangles
# What is the index in refi_x, refi_y of point at middle of apex iapex
# of elem ielem ?
# If ielem is the apex master: simple count, given the way refi_x was
# built.
# If ielem is the apex slave: yet we do not know ; but we will soon
# using the neighbors table.
new_pt_midside = np.empty([ntri, 3], dtype=np.int32)
cum_sum = npts
for imid in range(3):
mask_st_loc = (imid == apex_masters)
n_masters_loc = np.sum(mask_st_loc)
elem_masters_loc = masters[mask_st_loc]
new_pt_midside[:, imid][elem_masters_loc] = np.arange(
n_masters_loc, dtype=np.int32) + cum_sum
cum_sum += n_masters_loc
# Now dealing with slave elems.
# for each slave element we identify the master and then the inode
# onces slave_masters is indentified, slave_masters_apex is such that:
# neighbors[slaves_masters, slave_masters_apex] == slaves
mask_slaves = np.logical_not(mask_masters)
slaves = edge_elems[mask_slaves]
slaves_masters = edge_neighbors[mask_slaves]
diff_table = np.abs(neighbors[slaves_masters, :] -
np.outer(slaves, np.ones(3, dtype=np.int32)))
slave_masters_apex = np.argmin(diff_table, axis=1)
slaves_apex = edge_apexes[mask_slaves]
new_pt_midside[slaves, slaves_apex] = new_pt_midside[
slaves_masters, slave_masters_apex]
# Builds the 4 child masked_triangles
child_triangles = np.empty([ntri*4, 3], dtype=np.int32)
child_triangles[0::4, :] = np.vstack([
new_pt_corner[:, 0], new_pt_midside[:, 0],
new_pt_midside[:, 2]]).T
child_triangles[1::4, :] = np.vstack([
new_pt_corner[:, 1], new_pt_midside[:, 1],
new_pt_midside[:, 0]]).T
child_triangles[2::4, :] = np.vstack([
new_pt_corner[:, 2], new_pt_midside[:, 2],
new_pt_midside[:, 1]]).T
child_triangles[3::4, :] = np.vstack([
new_pt_midside[:, 0], new_pt_midside[:, 1],
new_pt_midside[:, 2]]).T
child_triangulation = Triangulation(refi_x, refi_y, child_triangles)
# Builds the child mask
if triangulation.mask is not None:
child_triangulation.set_mask(np.repeat(triangulation.mask, 4))
if ancestors is None:
return child_triangulation
else:
return child_triangulation, np.repeat(ancestors, 4)
|
adrianschroeter/kiwi | refs/heads/master | kiwi/partitioner/base.py | 2 | # Copyright (c) 2015 SUSE Linux GmbH. All rights reserved.
#
# This file is part of kiwi.
#
# kiwi is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# kiwi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with kiwi. If not, see <http://www.gnu.org/licenses/>
#
class PartitionerBase(object):
"""
**Base class for partitioners**
:param object disk_provider: Instance of DeviceProvider
:param int start_sector: sector number
"""
def __init__(self, disk_provider, start_sector=None):
self.disk_device = disk_provider.get_device()
self.partition_id = 0
self.start_sector = start_sector
self.flag_map = None
self.post_init()
def post_init(self):
"""
Post initialization method
Implementation in specialized partitioner class
"""
pass
def get_id(self):
"""
Current partition number
Zero indicates no partition has been created so far
:return: partition number
:rtype: int
"""
return self.partition_id
def create(self, name, mbsize, type_name, flags=None):
"""
Create partition
Implementation in specialized partitioner class
:param string name: unused
:param int mbsize: unused
:param string type_name: unused
:param list flags: unused
"""
raise NotImplementedError
def set_flag(self, partition_id, flag_name):
"""
Set partition flag
Implementation in specialized partitioner class
:param int partition_id: unused
:param string flag_name: unused
"""
raise NotImplementedError
def set_hybrid_mbr(self):
"""
Turn partition table into hybrid table if supported
Implementation in specialized partitioner class
"""
raise NotImplementedError
def set_mbr(self):
"""
Turn partition table into MBR (msdos table)
Implementation in specialized partitioner class
"""
raise NotImplementedError
def resize_table(self, entries=None):
"""
Resize partition table
:param int entries: unused
"""
raise NotImplementedError
|
jjdmol/LOFAR | refs/heads/master | LCU/checkhardware/lib/general_lib.py | 1 | r"""
general script
"""
from subprocess import (Popen, PIPE)
import traceback
import time
import os
import sys
import logging
general_version = '0913'
logger = logging.getLogger()
def writeMessage(msg):
sendCmd('wall', msg)
return
# Return date string in the following format YYYYMMDD
def getShortDateStr(tm=time.gmtime()):
return (time.strftime("%Y%m%d", tm))
# Return time string in the following format HH:MM:SS
def getDateStr(tm=time.gmtime()):
return (time.strftime("%d-%m-%Y", tm))
# Return time string in the following format HH:MM:SS
def getTimeStr(tm=time.gmtime()):
return (time.strftime("%H:%M:%S", tm))
# Return time string in the following format HH:MM:SS
def getDateTimeStr(tm=time.gmtime()):
return (time.strftime("%d-%m-%YT%H:%M:%S", tm))
# Run cmd with args and return response
def sendCmd(cmd='', args=''):
if cmd != '':
try:
args = args.replace(' =','=').replace('= ','=')
cmdList = [cmd] + args.split()
#print cmdList
cmdline = Popen(cmdList, stdout=PIPE, stderr=PIPE )
(so, se) = cmdline.communicate()
if len(so) != 0:
return (so)
else:
return ('Error, %s' % se)
except:
logger.error('Caught %s', str(sys.exc_info()[0]))
logger.error(str(sys.exc_info()[1]))
logger.error('TRACEBACK:\n%s', traceback.format_exc())
return ('Exception Error')
return ('')
# Get Host name
def getHostName():
try:
host = sendCmd('hostname', '-s')
if host == 'Exception Error':
host = 'Unknown'
except:
host = 'Unknown'
return (host.strip())
# file logger
class cLogger:
def __init__(self, logdir, filename, screenPrefix=''):
self.fullFilename = os.path.join(logdir,filename)
self.logfile = open(self.fullFilename, 'w')
self.prefix = screenPrefix
self.starttime = time.time()
def __del__(self):
self.logfile.close()
def getFullFileName(self):
return (self.fullFilename)
def resetStartTime(self, screen=False):
self.starttime = time.time()
self.info("Start time %s" %(time.strftime("%H:%M:%S", time.gmtime(self.starttime))), screen=screen)
def printBusyTime(self, screen=False):
self.info("Time from start %s" %(time.strftime("%H:%M:%S", (time.gmtime(time.time() - self.starttime)))), screen=screen)
def printTimeNow(self, screen=False):
self.info("Time %s" %(time.strftime("%H:%M:%S", time.gmtime(time.time()))), screen=screen)
def info(self, msg, noEnd=False, screen=False):
if len(msg) != 0:
if screen:
print self.prefix+' '+msg
if noEnd == False:
msg += '\n'
self.logfile.write(msg)
self.logfile.flush()
class cTestLogger(cLogger):
def __init__(self, logdir):
filename = getHostName() + "_StationTest" + '.csv'
cLogger.__init__(self, logdir, filename)
def addLine(self, info):
cLogger.info(self, info)
class cStationLogger(cLogger):
def __init__(self, logdir, filetime=time.gmtime()):
filename = "stationtest_" + getHostName() + '.log'
cLogger.__init__(self, logdir, filename)
cLogger.info(self, "StID >: %s" %(getHostName()))
cLogger.info(self, "Lgfl >: %s" %(os.path.join(logdir,filename)))
testdate = time.strftime("%a, %d %b %Y %H:%M:%S", filetime)
cLogger.info(self, "Time >: %s" %(testdate))
def addLine(self, info):
cLogger.info(self, info)
class cPVSSLogger(cLogger):
def __init__(self, logdir):
filename = getHostName() + "_StationTest_PVSS" + '.log'
cLogger.__init__(self, logdir, filename)
#cLogger.info(self, "# PVSS input file")
def addLine(self, info):
cLogger.info(self, info)
|
jiaphuan/models | refs/heads/master | research/compression/entropy_coder/lib/blocks_masked_conv2d_test.py | 5 | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests of the 2D masked convolution blocks."""
from __future__ import division
from __future__ import unicode_literals
import numpy as np
from six.moves import xrange
import tensorflow as tf
import blocks_masked_conv2d
class MaskedConv2DTest(tf.test.TestCase):
def testRasterScanKernel(self):
kernel_size = 5
input_depth = 1
output_depth = 1
kernel_shape = [kernel_size, kernel_size, input_depth, output_depth]
# pylint: disable=bad-whitespace
kernel_feed = [[ 1.0, 2.0, 3.0, 4.0, 5.0],
[ 6.0, 7.0, 8.0, 9.0, 10.0],
[11.0, 12.0, 13.0, 14.0, 15.0],
[16.0, 17.0, 18.0, 19.0, 20.0],
[21.0, 22.0, 23.0, 24.0, 25.0]]
kernel_feed = np.reshape(kernel_feed, kernel_shape)
kernel_expected = [[ 1.0, 2.0, 3.0, 4.0, 5.0],
[ 6.0, 7.0, 8.0, 9.0, 10.0],
[11.0, 12.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0]]
kernel_expected = np.reshape(kernel_expected, kernel_shape)
# pylint: enable=bad-whitespace
init_kernel = lambda s, t: tf.constant(kernel_feed, dtype=t, shape=s)
masked_conv2d = blocks_masked_conv2d.RasterScanConv2D(
output_depth, [kernel_size] * 2, [1] * 2, 'SAME',
initializer=init_kernel)
x = tf.placeholder(dtype=tf.float32, shape=[10] * 3 + [input_depth])
_ = masked_conv2d(x)
with self.test_session():
tf.global_variables_initializer().run()
kernel_value = masked_conv2d._kernel.eval()
self.assertAllEqual(kernel_expected, kernel_value)
def testDepthOrderKernel(self):
kernel_size = 1
input_depth = 7
output_depth = input_depth
kernel_shape = [kernel_size, kernel_size, input_depth, output_depth]
kernel_feed = np.ones(kernel_shape)
x_shape = [5] * 3 + [input_depth]
x_feed = np.ones(x_shape)
y_expected = np.zeros(x_shape[0:3] + [output_depth])
y_expected[:, :, :] = np.arange(output_depth)
init_kernel = lambda s, t: tf.constant(kernel_feed, dtype=t, shape=s)
masked_conv2d = blocks_masked_conv2d.DepthOrderConv2D(
output_depth, [kernel_size] * 2, [1] * 2, 'SAME',
strict_order=True,
initializer=init_kernel)
x = tf.placeholder(dtype=tf.float32, shape=x_shape)
y = masked_conv2d(x)
with self.test_session():
tf.global_variables_initializer().run()
y_value = y.eval(feed_dict={x: x_feed})
self.assertAllEqual(y_expected, y_value)
def testGroupRasterScanKernel(self):
kernel_size = 3
input_depth = 4
input_group_size = 2
output_depth = 2
output_group_size = 1
kernel_shape = [kernel_size, kernel_size, input_depth, output_depth]
kernel_feed = np.ones(shape=kernel_shape)
height = 5
width = 5
x_shape = [1, height, width, input_depth]
x_feed = np.ones(shape=x_shape)
# pylint: disable=bad-whitespace
y_expected = [
[[ 0, 2], [ 4, 6], [ 4, 6], [ 4, 6], [ 4, 6]],
[[ 8, 10], [16, 18], [16, 18], [16, 18], [12, 14]],
[[ 8, 10], [16, 18], [16, 18], [16, 18], [12, 14]],
[[ 8, 10], [16, 18], [16, 18], [16, 18], [12, 14]],
[[ 8, 10], [16, 18], [16, 18], [16, 18], [12, 14]],
]
y_expected = np.reshape(y_expected, [1, height, width, output_depth])
# pylint: enable=bad-whitespace
init_kernel = lambda s, t: tf.constant(kernel_feed, dtype=t, shape=s)
masked_conv2d = blocks_masked_conv2d.GroupRasterScanConv2D(
output_depth, [kernel_size] * 2, [1] * 2, 'SAME',
strict_order=True,
input_group_size=input_group_size,
output_group_size=output_group_size,
initializer=init_kernel)
x = tf.placeholder(dtype=tf.float32, shape=x_shape)
y = masked_conv2d(x)
with self.test_session():
tf.global_variables_initializer().run()
y_value = y.eval(feed_dict={x: x_feed})
self.assertAllEqual(y_expected, y_value)
def testInFillingKernel(self):
kernel_size = 5
input_depth = 1
output_depth = 1
kernel_shape = [kernel_size, kernel_size, input_depth, output_depth]
# pylint: disable=bad-whitespace
kernel_feed = [[ 1.0, 2.0, 3.0, 4.0, 5.0],
[ 6.0, 7.0, 8.0, 9.0, 10.0],
[11.0, 12.0, 13.0, 14.0, 15.0],
[16.0, 17.0, 18.0, 19.0, 20.0],
[21.0, 22.0, 23.0, 24.0, 25.0]]
kernel_feed = np.reshape(kernel_feed, kernel_shape)
kernel_expected = [[ 1.0, 2.0, 3.0, 4.0, 5.0],
[ 6.0, 7.0, 8.0, 9.0, 10.0],
[11.0, 12.0, 0.0, 14.0, 15.0],
[16.0, 17.0, 18.0, 19.0, 20.0],
[21.0, 22.0, 23.0, 24.0, 25.0]]
kernel_expected = np.reshape(kernel_expected, kernel_shape)
# pylint: enable=bad-whitespace
init_kernel = lambda s, t: tf.constant(kernel_feed, dtype=t, shape=s)
masked_conv2d = blocks_masked_conv2d.InFillingConv2D(
output_depth, [kernel_size] * 2, [1] * 2, 'SAME',
initializer=init_kernel)
x = tf.placeholder(dtype=tf.float32, shape=[10] * 3 + [input_depth])
_ = masked_conv2d(x)
with self.test_session():
tf.global_variables_initializer().run()
kernel_value = masked_conv2d._kernel.eval()
self.assertAllEqual(kernel_expected, kernel_value)
def testConv2DMaskedNumerics(self):
kernel_size = 5
input_shape = [1, 10, 10, 1]
filter_shape = [kernel_size, kernel_size, 1, 1]
strides = [1, 1, 1, 1]
output_shape = [1, 10, 10, 1]
conv = blocks_masked_conv2d.RasterScanConv2D(
depth=filter_shape[-1],
filter_size=filter_shape[0:2],
strides=strides[1:3],
padding='SAME',
initializer=tf.constant_initializer(value=1.0))
x = tf.placeholder(dtype=tf.float32, shape=input_shape)
y = conv(x)
x_feed = - np.ones(input_shape, dtype=float)
y_expected = np.ones(output_shape, dtype=float)
for i in xrange(input_shape[1]):
for j in xrange(input_shape[2]):
x_feed[0, i, j, 0] = 10 * (j + 1) + i
v = 0
ki_start = max(i - kernel_size // 2, 0)
kj_start = max(j - kernel_size // 2, 0)
kj_end = min(j + kernel_size // 2, input_shape[2] - 1)
for ki in range(ki_start, i + 1):
for kj in range(kj_start, kj_end + 1):
if ki > i:
continue
if ki == i and kj >= j:
continue
v += 10 * (kj + 1) + ki
y_expected[0, i, j, 0] = v
with self.test_session():
tf.global_variables_initializer().run()
y_value = y.eval(feed_dict={x: x_feed})
self.assertAllEqual(y_expected, y_value)
if __name__ == '__main__':
tf.test.main()
|
stephane-martin/salt-debian-packaging | refs/heads/master | salt-2016.3.3/salt/utils/nb_popen.py | 2 | # -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Pedro Algarvio (pedro@algarvio.me)`
salt.utils.nb_popen
~~~~~~~~~~~~~~~~~~~
Non blocking subprocess Popen.
This functionality has been adapted to work on windows following the recipe
found on:
http://code.activestate.com/recipes/440554/
'''
from __future__ import absolute_import
# Import python libs
import os
import sys
import time
import errno
import select
import logging
import tempfile
import subprocess
mswindows = (sys.platform == "win32")
if mswindows:
from win32file import ReadFile, WriteFile
from win32pipe import PeekNamedPipe
import msvcrt
else:
import fcntl
log = logging.getLogger(__name__)
class NonBlockingPopen(subprocess.Popen):
#_stdin_logger_name_ = 'salt.utils.nb_popen.STDIN.PID-{pid}'
_stdout_logger_name_ = 'salt.utils.nb_popen.STDOUT.PID-{pid}'
_stderr_logger_name_ = 'salt.utils.nb_popen.STDERR.PID-{pid}'
def __init__(self, *args, **kwargs):
self.stream_stds = kwargs.pop('stream_stds', False)
# Half a megabyte in memory is more than enough to start writing to
# a temporary file.
self.max_size_in_mem = kwargs.pop('max_size_in_mem', 512000)
# Let's configure the std{in, out,err} logging handler names
#self._stdin_logger_name_ = kwargs.pop(
# 'stdin_logger_name', self._stdin_logger_name_
#)
self._stdout_logger_name_ = kwargs.pop(
'stdout_logger_name', self._stdout_logger_name_
)
self._stderr_logger_name_ = kwargs.pop(
'stderr_logger_name', self._stderr_logger_name_
)
logging_command = kwargs.pop('logging_command', None)
stderr = kwargs.get('stderr', None)
super(NonBlockingPopen, self).__init__(*args, **kwargs)
#self._stdin_logger = logging.getLogger(
# self._stdin_logger_name_.format(pid=self.pid)
#)
self.stdout_buff = tempfile.SpooledTemporaryFile(self.max_size_in_mem)
self._stdout_logger = logging.getLogger(
self._stdout_logger_name_.format(pid=self.pid)
)
if stderr is subprocess.STDOUT:
self.stderr_buff = self.stdout_buff
self._stderr_logger = self._stdout_logger
else:
self.stderr_buff = tempfile.SpooledTemporaryFile(
self.max_size_in_mem
)
self._stderr_logger = logging.getLogger(
self._stderr_logger_name_.format(pid=self.pid)
)
self._stderr_logger = logging.getLogger(
self._stderr_logger_name_.format(pid=self.pid)
)
if logging_command is None:
log.info(
'Running command under pid {0}: {1!r}'.format(
self.pid,
*args
)
)
else:
log.info(
'Running command under pid {0}: {1!r}'.format(
self.pid,
logging_command
)
)
def recv(self, maxsize=None):
return self._recv('stdout', maxsize)
def recv_err(self, maxsize=None):
return self._recv('stderr', maxsize)
def send_recv(self, input='', maxsize=None):
return self.send(input), self.recv(maxsize), self.recv_err(maxsize)
def get_conn_maxsize(self, which, maxsize):
if maxsize is None:
maxsize = 1024
elif maxsize < 1:
maxsize = 1
return getattr(self, which), maxsize
def _close(self, which):
getattr(self, which).close()
setattr(self, which, None)
if mswindows:
def send(self, input):
if not self.stdin:
return None
try:
x = msvcrt.get_osfhandle(self.stdin.fileno())
(errCode, written) = WriteFile(x, input)
#self._stdin_logger.debug(input.rstrip())
except ValueError:
return self._close('stdin')
except (subprocess.pywintypes.error, Exception) as why:
if why.args[0] in (109, errno.ESHUTDOWN):
return self._close('stdin')
raise
return written
def _recv(self, which, maxsize):
conn, maxsize = self.get_conn_maxsize(which, maxsize)
if conn is None:
return None
try:
x = msvcrt.get_osfhandle(conn.fileno())
(read, nAvail, nMessage) = PeekNamedPipe(x, 0)
if maxsize < nAvail:
nAvail = maxsize
if nAvail > 0:
(errCode, read) = ReadFile(x, nAvail, None)
except ValueError:
return self._close(which)
except (subprocess.pywintypes.error, Exception) as why:
if why.args[0] in (109, errno.ESHUTDOWN):
return self._close(which)
raise
getattr(self, '{0}_buff'.format(which)).write(read)
getattr(self, '_{0}_logger'.format(which)).debug(read.rstrip())
if self.stream_stds:
getattr(sys, which).write(read)
if self.universal_newlines:
read = self._translate_newlines(read)
return read
else:
def send(self, input):
if not self.stdin:
return None
if not select.select([], [self.stdin], [], 0)[1]:
return 0
try:
written = os.write(self.stdin.fileno(), input)
#self._stdin_logger.debug(input.rstrip())
except OSError as why:
if why.args[0] == errno.EPIPE: # broken pipe
return self._close('stdin')
raise
return written
def _recv(self, which, maxsize):
conn, maxsize = self.get_conn_maxsize(which, maxsize)
if conn is None:
return None
flags = fcntl.fcntl(conn, fcntl.F_GETFL)
if not conn.closed:
fcntl.fcntl(conn, fcntl.F_SETFL, flags | os.O_NONBLOCK)
try:
if not select.select([conn], [], [], 0)[0]:
return ''
buff = conn.read(maxsize)
if not buff:
return self._close(which)
if self.universal_newlines:
buff = self._translate_newlines(buff)
getattr(self, '{0}_buff'.format(which)).write(buff)
getattr(self, '_{0}_logger'.format(which)).debug(buff.rstrip())
if self.stream_stds:
getattr(sys, which).write(buff)
return buff
finally:
if not conn.closed:
fcntl.fcntl(conn, fcntl.F_SETFL, flags)
def poll_and_read_until_finish(self, interval=0.01):
silent_iterations = 0
while self.poll() is None:
if self.stdout is not None:
silent_iterations = 0
self.recv()
if self.stderr is not None:
silent_iterations = 0
self.recv_err()
silent_iterations += 1
if silent_iterations > 100:
silent_iterations = 0
(stdoutdata, stderrdata) = self.communicate()
if stdoutdata:
log.debug(stdoutdata)
if stderrdata:
log.error(stderrdata)
time.sleep(interval)
def communicate(self, input=None):
super(NonBlockingPopen, self).communicate(input)
self.stdout_buff.flush()
self.stdout_buff.seek(0)
self.stderr_buff.flush()
self.stderr_buff.seek(0)
return self.stdout_buff.read(), self.stderr_buff.read()
|
vladmm/intellij-community | refs/heads/master | python/testData/refactoring/extractmethod/Nonlocal.before.py | 83 | def foo():
x = 1
def bar():
nonlocal x
<selection>x = 2</selection>
print(x)
bar()
foo() |
Ballz0fSteel/Umeko | refs/heads/master | lib/youtube_dl/extractor/mit.py | 77 | from __future__ import unicode_literals
import re
import json
from .common import InfoExtractor
from .youtube import YoutubeIE
from ..utils import (
clean_html,
ExtractorError,
get_element_by_id,
)
class TechTVMITIE(InfoExtractor):
IE_NAME = 'techtv.mit.edu'
_VALID_URL = r'https?://techtv\.mit\.edu/(?:videos|embeds)/(?P<id>\d+)'
_TEST = {
'url': 'http://techtv.mit.edu/videos/25418-mit-dna-learning-center-set',
'md5': '00a3a27ee20d44bcaa0933ccec4a2cf7',
'info_dict': {
'id': '25418',
'ext': 'mp4',
'title': 'MIT DNA and Protein Sets',
'description': 'md5:46f5c69ce434f0a97e7c628cc142802d',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
raw_page = self._download_webpage(
'http://techtv.mit.edu/videos/%s' % video_id, video_id)
clean_page = re.compile(r'<!--.*?-->', re.S).sub('', raw_page)
base_url = self._proto_relative_url(self._search_regex(
r'ipadUrl: \'(.+?cloudfront.net/)', raw_page, 'base url'), 'http:')
formats_json = self._search_regex(
r'bitrates: (\[.+?\])', raw_page, 'video formats')
formats_mit = json.loads(formats_json)
formats = [
{
'format_id': f['label'],
'url': base_url + f['url'].partition(':')[2],
'ext': f['url'].partition(':')[0],
'format': f['label'],
'width': f['width'],
'vbr': f['bitrate'],
}
for f in formats_mit
]
title = get_element_by_id('edit-title', clean_page)
description = clean_html(get_element_by_id('edit-description', clean_page))
thumbnail = self._search_regex(
r'playlist:.*?url: \'(.+?)\'',
raw_page, 'thumbnail', flags=re.DOTALL)
return {
'id': video_id,
'title': title,
'formats': formats,
'description': description,
'thumbnail': thumbnail,
}
class MITIE(TechTVMITIE):
IE_NAME = 'video.mit.edu'
_VALID_URL = r'https?://video\.mit\.edu/watch/(?P<title>[^/]+)'
_TEST = {
'url': 'http://video.mit.edu/watch/the-government-is-profiling-you-13222/',
'md5': '7db01d5ccc1895fc5010e9c9e13648da',
'info_dict': {
'id': '21783',
'ext': 'mp4',
'title': 'The Government is Profiling You',
'description': 'md5:ad5795fe1e1623b73620dbfd47df9afd',
},
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
page_title = mobj.group('title')
webpage = self._download_webpage(url, page_title)
embed_url = self._search_regex(
r'<iframe .*?src="(.+?)"', webpage, 'embed url')
return self.url_result(embed_url)
class OCWMITIE(InfoExtractor):
IE_NAME = 'ocw.mit.edu'
_VALID_URL = r'^https?://ocw\.mit\.edu/courses/(?P<topic>[a-z0-9\-]+)'
_BASE_URL = 'http://ocw.mit.edu/'
_TESTS = [
{
'url': 'http://ocw.mit.edu/courses/electrical-engineering-and-computer-science/6-041-probabilistic-systems-analysis-and-applied-probability-fall-2010/video-lectures/lecture-7-multiple-variables-expectations-independence/',
'info_dict': {
'id': 'EObHWIEKGjA',
'ext': 'webm',
'title': 'Lecture 7: Multiple Discrete Random Variables: Expectations, Conditioning, Independence',
'description': 'In this lecture, the professor discussed multiple random variables, expectations, and binomial distribution.',
'upload_date': '20121109',
'uploader_id': 'MIT',
'uploader': 'MIT OpenCourseWare',
}
},
{
'url': 'http://ocw.mit.edu/courses/mathematics/18-01sc-single-variable-calculus-fall-2010/1.-differentiation/part-a-definition-and-basic-rules/session-1-introduction-to-derivatives/',
'info_dict': {
'id': '7K1sB05pE0A',
'ext': 'mp4',
'title': 'Session 1: Introduction to Derivatives',
'upload_date': '20090818',
'uploader_id': 'MIT',
'uploader': 'MIT OpenCourseWare',
'description': 'This section contains lecture video excerpts, lecture notes, an interactive mathlet with supporting documents, and problem solving videos.',
}
}
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
topic = mobj.group('topic')
webpage = self._download_webpage(url, topic)
title = self._html_search_meta('WT.cg_s', webpage)
description = self._html_search_meta('Description', webpage)
# search for call to ocw_embed_chapter_media(container_id, media_url, provider, page_url, image_url, start, stop, captions_file)
embed_chapter_media = re.search(r'ocw_embed_chapter_media\((.+?)\)', webpage)
if embed_chapter_media:
metadata = re.sub(r'[\'"]', '', embed_chapter_media.group(1))
metadata = re.split(r', ?', metadata)
yt = metadata[1]
else:
# search for call to ocw_embed_chapter_media(container_id, media_url, provider, page_url, image_url, captions_file)
embed_media = re.search(r'ocw_embed_media\((.+?)\)', webpage)
if embed_media:
metadata = re.sub(r'[\'"]', '', embed_media.group(1))
metadata = re.split(r', ?', metadata)
yt = metadata[1]
else:
raise ExtractorError('Unable to find embedded YouTube video.')
video_id = YoutubeIE.extract_id(yt)
return {
'_type': 'url_transparent',
'id': video_id,
'title': title,
'description': description,
'url': yt,
'ie_key': 'Youtube',
}
|
BigGillyStyle/exercism | refs/heads/master | python/hello-world/hello_world.py | 12 | #
# Skeleton file for the Python "Hello World" exercise.
#
def hello(name=''):
return
|
mosaic-cloud/mosaic-distribution-dependencies | refs/heads/development | dependencies/otp/17.1/lib/asn1/test/asn1_SUITE_data/SeqSetLib.py | 97 | SeqSetLib DEFINITIONS IMPLICIT TAGS ::=
BEGIN
EXPORTS Seq1,Set1;
Seq1 ::= SEQUENCE
{
bool1 BOOLEAN,
int1 INTEGER,
seq1 SeqIn
}
Set1 ::= SET
{
bool1 BOOLEAN,
int1 INTEGER,
set1 SetIn
}
SetIn ::= SET
{
boolIn BOOLEAN,
intIn INTEGER
}
SeqIn ::= SEQUENCE
{
boolIn BOOLEAN,
intIn INTEGER
}
END
|
JordanReiter/django-notification | refs/heads/master | notification/lockfile.py | 39 |
"""
lockfile.py - Platform-independent advisory file locks.
Requires Python 2.5 unless you apply 2.4.diff
Locking is done on a per-thread basis instead of a per-process basis.
Usage:
>>> lock = FileLock('somefile')
>>> try:
... lock.acquire()
... except AlreadyLocked:
... print 'somefile', 'is locked already.'
... except LockFailed:
... print 'somefile', 'can\\'t be locked.'
... else:
... print 'got lock'
got lock
>>> print lock.is_locked()
True
>>> lock.release()
>>> lock = FileLock('somefile')
>>> print lock.is_locked()
False
>>> with lock:
... print lock.is_locked()
True
>>> print lock.is_locked()
False
>>> # It is okay to lock twice from the same thread...
>>> with lock:
... lock.acquire()
...
>>> # Though no counter is kept, so you can't unlock multiple times...
>>> print lock.is_locked()
False
Exceptions:
Error - base class for other exceptions
LockError - base class for all locking exceptions
AlreadyLocked - Another thread or process already holds the lock
LockFailed - Lock failed for some other reason
UnlockError - base class for all unlocking exceptions
AlreadyUnlocked - File was not locked.
NotMyLock - File was locked but not by the current thread/process
"""
from __future__ import division
import sys
import socket
import os
import threading
import time
import errno
# Work with PEP8 and non-PEP8 versions of threading module.
try:
threading.current_thread
except AttributeError:
threading.current_thread = threading.currentThread
try:
# python 2.6 has threading.current_thread so we need to do this separately.
threading.Thread.get_name
except AttributeError:
threading.Thread.get_name = threading.Thread.getName
__all__ = ['Error', 'LockError', 'LockTimeout', 'AlreadyLocked',
'LockFailed', 'UnlockError', 'NotLocked', 'NotMyLock',
'LinkFileLock', 'MkdirFileLock', 'SQLiteFileLock']
class Error(Exception):
"""
Base class for other exceptions.
>>> try:
... raise Error
... except Exception:
... pass
"""
pass
class LockError(Error):
"""
Base class for error arising from attempts to acquire the lock.
>>> try:
... raise LockError
... except Error:
... pass
"""
pass
class LockTimeout(LockError):
"""Raised when lock creation fails within a user-defined period of time.
>>> try:
... raise LockTimeout
... except LockError:
... pass
"""
pass
class AlreadyLocked(LockError):
"""Some other thread/process is locking the file.
>>> try:
... raise AlreadyLocked
... except LockError:
... pass
"""
pass
class LockFailed(LockError):
"""Lock file creation failed for some other reason.
>>> try:
... raise LockFailed
... except LockError:
... pass
"""
pass
class UnlockError(Error):
"""
Base class for errors arising from attempts to release the lock.
>>> try:
... raise UnlockError
... except Error:
... pass
"""
pass
class NotLocked(UnlockError):
"""Raised when an attempt is made to unlock an unlocked file.
>>> try:
... raise NotLocked
... except UnlockError:
... pass
"""
pass
class NotMyLock(UnlockError):
"""Raised when an attempt is made to unlock a file someone else locked.
>>> try:
... raise NotMyLock
... except UnlockError:
... pass
"""
pass
class LockBase:
"""Base class for platform-specific lock classes."""
def __init__(self, path, threaded=True):
"""
>>> lock = LockBase('somefile')
>>> lock = LockBase('somefile', threaded=False)
"""
self.path = path
self.lock_file = os.path.abspath(path) + ".lock"
self.hostname = socket.gethostname()
self.pid = os.getpid()
if threaded:
tname = "%s-" % threading.current_thread().get_name()
else:
tname = ""
dirname = os.path.dirname(self.lock_file)
self.unique_name = os.path.join(dirname,
"%s.%s%s" % (self.hostname,
tname,
self.pid))
def acquire(self, timeout=None):
"""
Acquire the lock.
* If timeout is omitted (or None), wait forever trying to lock the
file.
* If timeout > 0, try to acquire the lock for that many seconds. If
the lock period expires and the file is still locked, raise
LockTimeout.
* If timeout <= 0, raise AlreadyLocked immediately if the file is
already locked.
"""
raise NotImplemented("implement in subclass")
def release(self):
"""
Release the lock.
If the file is not locked, raise NotLocked.
"""
raise NotImplemented("implement in subclass")
def is_locked(self):
"""
Tell whether or not the file is locked.
"""
raise NotImplemented("implement in subclass")
def i_am_locking(self):
"""
Return True if this object is locking the file.
"""
raise NotImplemented("implement in subclass")
def break_lock(self):
"""
Remove a lock. Useful if a locking thread failed to unlock.
"""
raise NotImplemented("implement in subclass")
def __enter__(self):
"""
Context manager support.
"""
self.acquire()
return self
def __exit__(self, *_exc):
"""
Context manager support.
"""
self.release()
class LinkFileLock(LockBase):
"""Lock access to a file using atomic property of link(2)."""
def acquire(self, timeout=None):
try:
open(self.unique_name, "wb").close()
except IOError:
raise LockFailed
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
while True:
# Try and create a hard link to it.
try:
os.link(self.unique_name, self.lock_file)
except OSError:
# Link creation failed. Maybe we've double-locked?
nlinks = os.stat(self.unique_name).st_nlink
if nlinks == 2:
# The original link plus the one I created == 2. We're
# good to go.
return
else:
# Otherwise the lock creation failed.
if timeout is not None and time.time() > end_time:
os.unlink(self.unique_name)
if timeout > 0:
raise LockTimeout
else:
raise AlreadyLocked
time.sleep(timeout is not None and timeout/10 or 0.1)
else:
# Link creation succeeded. We're good to go.
return
def release(self):
if not self.is_locked():
raise NotLocked
elif not os.path.exists(self.unique_name):
raise NotMyLock
os.unlink(self.unique_name)
os.unlink(self.lock_file)
def is_locked(self):
return os.path.exists(self.lock_file)
def i_am_locking(self):
return (self.is_locked() and
os.path.exists(self.unique_name) and
os.stat(self.unique_name).st_nlink == 2)
def break_lock(self):
if os.path.exists(self.lock_file):
os.unlink(self.lock_file)
class MkdirFileLock(LockBase):
"""Lock file by creating a directory."""
def __init__(self, path, threaded=True):
"""
>>> lock = MkdirFileLock('somefile')
>>> lock = MkdirFileLock('somefile', threaded=False)
"""
LockBase.__init__(self, path, threaded)
if threaded:
tname = "%x-" % thread.get_ident()
else:
tname = ""
# Lock file itself is a directory. Place the unique file name into
# it.
self.unique_name = os.path.join(self.lock_file,
"%s.%s%s" % (self.hostname,
tname,
self.pid))
def acquire(self, timeout=None):
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
if timeout is None:
wait = 0.1
else:
wait = max(0, timeout / 10)
while True:
try:
os.mkdir(self.lock_file)
except OSError:
err = sys.exc_info()[1]
if err.errno == errno.EEXIST:
# Already locked.
if os.path.exists(self.unique_name):
# Already locked by me.
return
if timeout is not None and time.time() > end_time:
if timeout > 0:
raise LockTimeout
else:
# Someone else has the lock.
raise AlreadyLocked
time.sleep(wait)
else:
# Couldn't create the lock for some other reason
raise LockFailed
else:
open(self.unique_name, "wb").close()
return
def release(self):
if not self.is_locked():
raise NotLocked
elif not os.path.exists(self.unique_name):
raise NotMyLock
os.unlink(self.unique_name)
os.rmdir(self.lock_file)
def is_locked(self):
return os.path.exists(self.lock_file)
def i_am_locking(self):
return (self.is_locked() and
os.path.exists(self.unique_name))
def break_lock(self):
if os.path.exists(self.lock_file):
for name in os.listdir(self.lock_file):
os.unlink(os.path.join(self.lock_file, name))
os.rmdir(self.lock_file)
class SQLiteFileLock(LockBase):
"Demonstration of using same SQL-based locking."
import tempfile
_fd, testdb = tempfile.mkstemp()
os.close(_fd)
os.unlink(testdb)
del _fd, tempfile
def __init__(self, path, threaded=True):
LockBase.__init__(self, path, threaded)
self.lock_file = unicode(self.lock_file)
self.unique_name = unicode(self.unique_name)
import sqlite3
self.connection = sqlite3.connect(SQLiteFileLock.testdb)
c = self.connection.cursor()
try:
c.execute("create table locks"
"("
" lock_file varchar(32),"
" unique_name varchar(32)"
")")
except sqlite3.OperationalError:
pass
else:
self.connection.commit()
import atexit
atexit.register(os.unlink, SQLiteFileLock.testdb)
def acquire(self, timeout=None):
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
if timeout is None:
wait = 0.1
elif timeout <= 0:
wait = 0
else:
wait = timeout / 10
cursor = self.connection.cursor()
while True:
if not self.is_locked():
# Not locked. Try to lock it.
cursor.execute("insert into locks"
" (lock_file, unique_name)"
" values"
" (?, ?)",
(self.lock_file, self.unique_name))
self.connection.commit()
# Check to see if we are the only lock holder.
cursor.execute("select * from locks"
" where unique_name = ?",
(self.unique_name,))
rows = cursor.fetchall()
if len(rows) > 1:
# Nope. Someone else got there. Remove our lock.
cursor.execute("delete from locks"
" where unique_name = ?",
(self.unique_name,))
self.connection.commit()
else:
# Yup. We're done, so go home.
return
else:
# Check to see if we are the only lock holder.
cursor.execute("select * from locks"
" where unique_name = ?",
(self.unique_name,))
rows = cursor.fetchall()
if len(rows) == 1:
# We're the locker, so go home.
return
# Maybe we should wait a bit longer.
if timeout is not None and time.time() > end_time:
if timeout > 0:
# No more waiting.
raise LockTimeout
else:
# Someone else has the lock and we are impatient..
raise AlreadyLocked
# Well, okay. We'll give it a bit longer.
time.sleep(wait)
def release(self):
if not self.is_locked():
raise NotLocked
if not self.i_am_locking():
raise NotMyLock((self._who_is_locking(), self.unique_name))
cursor = self.connection.cursor()
cursor.execute("delete from locks"
" where unique_name = ?",
(self.unique_name,))
self.connection.commit()
def _who_is_locking(self):
cursor = self.connection.cursor()
cursor.execute("select unique_name from locks"
" where lock_file = ?",
(self.lock_file,))
return cursor.fetchone()[0]
def is_locked(self):
cursor = self.connection.cursor()
cursor.execute("select * from locks"
" where lock_file = ?",
(self.lock_file,))
rows = cursor.fetchall()
return not not rows
def i_am_locking(self):
cursor = self.connection.cursor()
cursor.execute("select * from locks"
" where lock_file = ?"
" and unique_name = ?",
(self.lock_file, self.unique_name))
return not not cursor.fetchall()
def break_lock(self):
cursor = self.connection.cursor()
cursor.execute("delete from locks"
" where lock_file = ?",
(self.lock_file,))
self.connection.commit()
if hasattr(os, "link"):
FileLock = LinkFileLock
else:
FileLock = MkdirFileLock
|
pschwartz/ansible | refs/heads/devel | v1/ansible/runner/connection_plugins/accelerate.py | 109 | # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import json
import os
import base64
import socket
import struct
import time
from ansible.callbacks import vvv, vvvv
from ansible.errors import AnsibleError, AnsibleFileNotFound
from ansible.runner.connection_plugins.ssh import Connection as SSHConnection
from ansible.runner.connection_plugins.paramiko_ssh import Connection as ParamikoConnection
from ansible import utils
from ansible import constants
# the chunk size to read and send, assuming mtu 1500 and
# leaving room for base64 (+33%) encoding and header (8 bytes)
# ((1400-8)/4)*3) = 1044
# which leaves room for the TCP/IP header. We set this to a
# multiple of the value to speed up file reads.
CHUNK_SIZE=1044*20
class Connection(object):
''' raw socket accelerated connection '''
def __init__(self, runner, host, port, user, password, private_key_file, *args, **kwargs):
self.runner = runner
self.host = host
self.context = None
self.conn = None
self.user = user
self.key = utils.key_for_hostname(host)
self.port = port[0]
self.accport = port[1]
self.is_connected = False
self.has_pipelining = False
self.become_methods_supported=['sudo']
if not self.port:
self.port = constants.DEFAULT_REMOTE_PORT
elif not isinstance(self.port, int):
self.port = int(self.port)
if not self.accport:
self.accport = constants.ACCELERATE_PORT
elif not isinstance(self.accport, int):
self.accport = int(self.accport)
if self.runner.original_transport == "paramiko":
self.ssh = ParamikoConnection(
runner=self.runner,
host=self.host,
port=self.port,
user=self.user,
password=password,
private_key_file=private_key_file
)
else:
self.ssh = SSHConnection(
runner=self.runner,
host=self.host,
port=self.port,
user=self.user,
password=password,
private_key_file=private_key_file
)
if not getattr(self.ssh, 'shell', None):
self.ssh.shell = utils.plugins.shell_loader.get('sh')
# attempt to work around shared-memory funness
if getattr(self.runner, 'aes_keys', None):
utils.AES_KEYS = self.runner.aes_keys
def _execute_accelerate_module(self):
args = "password=%s port=%s minutes=%d debug=%d ipv6=%s" % (
base64.b64encode(self.key.__str__()),
str(self.accport),
constants.ACCELERATE_DAEMON_TIMEOUT,
int(utils.VERBOSITY),
self.runner.accelerate_ipv6,
)
if constants.ACCELERATE_MULTI_KEY:
args += " multi_key=yes"
inject = dict(password=self.key)
if getattr(self.runner, 'accelerate_inventory_host', False):
inject = utils.combine_vars(inject, self.runner.inventory.get_variables(self.runner.accelerate_inventory_host))
else:
inject = utils.combine_vars(inject, self.runner.inventory.get_variables(self.host))
vvvv("attempting to start up the accelerate daemon...")
self.ssh.connect()
tmp_path = self.runner._make_tmp_path(self.ssh)
return self.runner._execute_module(self.ssh, tmp_path, 'accelerate', args, inject=inject)
def connect(self, allow_ssh=True):
''' activates the connection object '''
try:
if not self.is_connected:
wrong_user = False
tries = 3
self.conn = socket.socket()
self.conn.settimeout(constants.ACCELERATE_CONNECT_TIMEOUT)
vvvv("attempting connection to %s via the accelerated port %d" % (self.host,self.accport))
while tries > 0:
try:
self.conn.connect((self.host,self.accport))
break
except socket.error:
vvvv("connection to %s failed, retrying..." % self.host)
time.sleep(0.1)
tries -= 1
if tries == 0:
vvv("Could not connect via the accelerated connection, exceeded # of tries")
raise AnsibleError("FAILED")
elif wrong_user:
vvv("Restarting daemon with a different remote_user")
raise AnsibleError("WRONG_USER")
self.conn.settimeout(constants.ACCELERATE_TIMEOUT)
if not self.validate_user():
# the accelerated daemon was started with a
# different remote_user. The above command
# should have caused the accelerate daemon to
# shutdown, so we'll reconnect.
wrong_user = True
except AnsibleError, e:
if allow_ssh:
if "WRONG_USER" in e:
vvv("Switching users, waiting for the daemon on %s to shutdown completely..." % self.host)
time.sleep(5)
vvv("Falling back to ssh to startup accelerated mode")
res = self._execute_accelerate_module()
if not res.is_successful():
raise AnsibleError("Failed to launch the accelerated daemon on %s (reason: %s)" % (self.host,res.result.get('msg')))
return self.connect(allow_ssh=False)
else:
raise AnsibleError("Failed to connect to %s:%s" % (self.host,self.accport))
self.is_connected = True
return self
def send_data(self, data):
packed_len = struct.pack('!Q',len(data))
return self.conn.sendall(packed_len + data)
def recv_data(self):
header_len = 8 # size of a packed unsigned long long
data = b""
try:
vvvv("%s: in recv_data(), waiting for the header" % self.host)
while len(data) < header_len:
d = self.conn.recv(header_len - len(data))
if not d:
vvvv("%s: received nothing, bailing out" % self.host)
return None
data += d
vvvv("%s: got the header, unpacking" % self.host)
data_len = struct.unpack('!Q',data[:header_len])[0]
data = data[header_len:]
vvvv("%s: data received so far (expecting %d): %d" % (self.host,data_len,len(data)))
while len(data) < data_len:
d = self.conn.recv(data_len - len(data))
if not d:
vvvv("%s: received nothing, bailing out" % self.host)
return None
vvvv("%s: received %d bytes" % (self.host, len(d)))
data += d
vvvv("%s: received all of the data, returning" % self.host)
return data
except socket.timeout:
raise AnsibleError("timed out while waiting to receive data")
def validate_user(self):
'''
Checks the remote uid of the accelerated daemon vs. the
one specified for this play and will cause the accel
daemon to exit if they don't match
'''
vvvv("%s: sending request for validate_user" % self.host)
data = dict(
mode='validate_user',
username=self.user,
)
data = utils.jsonify(data)
data = utils.encrypt(self.key, data)
if self.send_data(data):
raise AnsibleError("Failed to send command to %s" % self.host)
vvvv("%s: waiting for validate_user response" % self.host)
while True:
# we loop here while waiting for the response, because a
# long running command may cause us to receive keepalive packets
# ({"pong":"true"}) rather than the response we want.
response = self.recv_data()
if not response:
raise AnsibleError("Failed to get a response from %s" % self.host)
response = utils.decrypt(self.key, response)
response = utils.parse_json(response)
if "pong" in response:
# it's a keepalive, go back to waiting
vvvv("%s: received a keepalive packet" % self.host)
continue
else:
vvvv("%s: received the validate_user response: %s" % (self.host, response))
break
if response.get('failed'):
return False
else:
return response.get('rc') == 0
def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None):
''' run a command on the remote host '''
if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported:
raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method)
if in_data:
raise AnsibleError("Internal Error: this module does not support optimized module pipelining")
if executable == "":
executable = constants.DEFAULT_EXECUTABLE
if self.runner.become and sudoable:
cmd, prompt, success_key = utils.make_become_cmd(cmd, become_user, executable, self.runner.become_method, '', self.runner.become_exe)
vvv("EXEC COMMAND %s" % cmd)
data = dict(
mode='command',
cmd=cmd,
tmp_path=tmp_path,
executable=executable,
)
data = utils.jsonify(data)
data = utils.encrypt(self.key, data)
if self.send_data(data):
raise AnsibleError("Failed to send command to %s" % self.host)
while True:
# we loop here while waiting for the response, because a
# long running command may cause us to receive keepalive packets
# ({"pong":"true"}) rather than the response we want.
response = self.recv_data()
if not response:
raise AnsibleError("Failed to get a response from %s" % self.host)
response = utils.decrypt(self.key, response)
response = utils.parse_json(response)
if "pong" in response:
# it's a keepalive, go back to waiting
vvvv("%s: received a keepalive packet" % self.host)
continue
else:
vvvv("%s: received the response" % self.host)
break
return (response.get('rc',None), '', response.get('stdout',''), response.get('stderr',''))
def put_file(self, in_path, out_path):
''' transfer a file from local to remote '''
vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
if not os.path.exists(in_path):
raise AnsibleFileNotFound("file or module does not exist: %s" % in_path)
fd = file(in_path, 'rb')
fstat = os.stat(in_path)
try:
vvv("PUT file is %d bytes" % fstat.st_size)
last = False
while fd.tell() <= fstat.st_size and not last:
vvvv("file position currently %ld, file size is %ld" % (fd.tell(), fstat.st_size))
data = fd.read(CHUNK_SIZE)
if fd.tell() >= fstat.st_size:
last = True
data = dict(mode='put', data=base64.b64encode(data), out_path=out_path, last=last)
if self.runner.become:
data['user'] = self.runner.become_user
data = utils.jsonify(data)
data = utils.encrypt(self.key, data)
if self.send_data(data):
raise AnsibleError("failed to send the file to %s" % self.host)
response = self.recv_data()
if not response:
raise AnsibleError("Failed to get a response from %s" % self.host)
response = utils.decrypt(self.key, response)
response = utils.parse_json(response)
if response.get('failed',False):
raise AnsibleError("failed to put the file in the requested location")
finally:
fd.close()
vvvv("waiting for final response after PUT")
response = self.recv_data()
if not response:
raise AnsibleError("Failed to get a response from %s" % self.host)
response = utils.decrypt(self.key, response)
response = utils.parse_json(response)
if response.get('failed',False):
raise AnsibleError("failed to put the file in the requested location")
def fetch_file(self, in_path, out_path):
''' save a remote file to the specified path '''
vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
data = dict(mode='fetch', in_path=in_path)
data = utils.jsonify(data)
data = utils.encrypt(self.key, data)
if self.send_data(data):
raise AnsibleError("failed to initiate the file fetch with %s" % self.host)
fh = open(out_path, "w")
try:
bytes = 0
while True:
response = self.recv_data()
if not response:
raise AnsibleError("Failed to get a response from %s" % self.host)
response = utils.decrypt(self.key, response)
response = utils.parse_json(response)
if response.get('failed', False):
raise AnsibleError("Error during file fetch, aborting")
out = base64.b64decode(response['data'])
fh.write(out)
bytes += len(out)
# send an empty response back to signify we
# received the last chunk without errors
data = utils.jsonify(dict())
data = utils.encrypt(self.key, data)
if self.send_data(data):
raise AnsibleError("failed to send ack during file fetch")
if response.get('last', False):
break
finally:
# we don't currently care about this final response,
# we just receive it and drop it. It may be used at some
# point in the future or we may just have the put/fetch
# operations not send back a final response at all
response = self.recv_data()
vvv("FETCH wrote %d bytes to %s" % (bytes, out_path))
fh.close()
def close(self):
''' terminate the connection '''
# Be a good citizen
try:
self.conn.close()
except:
pass
|
takaaptech/sky_engine | refs/heads/master | build/android/gyp/jar.py | 36 | #!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import fnmatch
import optparse
import os
import sys
from util import build_utils
from util import md5_check
def Jar(class_files, classes_dir, jar_path, manifest_file=None):
jar_path = os.path.abspath(jar_path)
# The paths of the files in the jar will be the same as they are passed in to
# the command. Because of this, the command should be run in
# options.classes_dir so the .class file paths in the jar are correct.
jar_cwd = classes_dir
class_files_rel = [os.path.relpath(f, jar_cwd) for f in class_files]
jar_cmd = ['jar', 'cf0', jar_path]
if manifest_file:
jar_cmd[1] += 'm'
jar_cmd.append(os.path.abspath(manifest_file))
jar_cmd.extend(class_files_rel)
with build_utils.TempDir() as temp_dir:
empty_file = os.path.join(temp_dir, '.empty')
build_utils.Touch(empty_file)
jar_cmd.append(os.path.relpath(empty_file, jar_cwd))
record_path = '%s.md5.stamp' % jar_path
md5_check.CallAndRecordIfStale(
lambda: build_utils.CheckOutput(jar_cmd, cwd=jar_cwd),
record_path=record_path,
input_paths=class_files,
input_strings=jar_cmd,
force=not os.path.exists(jar_path),
)
build_utils.Touch(jar_path, fail_if_missing=True)
def JarDirectory(classes_dir, excluded_classes, jar_path, manifest_file=None):
class_files = build_utils.FindInDirectory(classes_dir, '*.class')
for exclude in excluded_classes:
class_files = filter(
lambda f: not fnmatch.fnmatch(f, exclude), class_files)
Jar(class_files, classes_dir, jar_path, manifest_file=manifest_file)
def main():
parser = optparse.OptionParser()
parser.add_option('--classes-dir', help='Directory containing .class files.')
parser.add_option('--jar-path', help='Jar output path.')
parser.add_option('--excluded-classes',
help='List of .class file patterns to exclude from the jar.')
parser.add_option('--stamp', help='Path to touch on success.')
options, _ = parser.parse_args()
if options.excluded_classes:
excluded_classes = build_utils.ParseGypList(options.excluded_classes)
else:
excluded_classes = []
JarDirectory(options.classes_dir,
excluded_classes,
options.jar_path)
if options.stamp:
build_utils.Touch(options.stamp)
if __name__ == '__main__':
sys.exit(main())
|
ajyoon/brown | refs/heads/master | doc/generator.py | 1 | import os
import shutil
import sys
from jinja2 import Environment, PackageLoader
from scss.compiler import Compiler as ScssCompiler
import doc.doc_config as doc_config
from doc.utils import (ensure_path_exists,
module_path_to_import_name,
package_path_to_import_name)
from doc.package_doc import PackageDoc
from doc.module_doc import ModuleDoc
from doc.class_doc import ClassDoc
def find_in_set_by_name(search_set, name):
return next((item for item in search_set if item.name == name),
None)
def parse_dir(top):
packages = set()
modules = set()
global_index = set()
# Discover packages and modules
for root, dirs, files in os.walk(top):
if root.endswith('__pycache__') or '__init__.py' not in files:
continue
package_name = package_path_to_import_name(root)
packages.add(PackageDoc(root, package_name, global_index))
for file in files:
if file.endswith('.py') and file != '__init__.py':
module_name = module_path_to_import_name(root, file)
modules.add(ModuleDoc(os.path.join(root, file),
module_name, global_index))
# Link subpackages to parent packages
for package in packages:
if '.' in package.name:
parent_package_name = package.name.rsplit('.', 1)[0]
parent_package = find_in_set_by_name(packages, parent_package_name)
if parent_package:
parent_package.subpackages.add(package)
package.parent_package = parent_package
# Link modules to packages
for module in modules:
if '.' in module.name:
parent_package_name = module.name.rsplit('.', 1)[0]
parent_package = find_in_set_by_name(packages, parent_package_name)
if parent_package:
parent_package.modules.add(module)
module.package = parent_package
for class_doc in (item for item in global_index
if isinstance(item, ClassDoc)):
class_doc.resolve_superclasses()
for doc_item in global_index:
doc_item.resolve_names_and_parse_html()
return packages, modules, global_index
def make_custom_pages(build_dir):
"""Copy the `custom_pages` tree into the build root."""
custom_pages_source_dir = os.path.join(os.path.dirname(__file__),
'custom_pages')
for item in os.listdir(custom_pages_source_dir):
source = os.path.join(custom_pages_source_dir, item)
target = os.path.join(build_dir, item)
if os.path.isdir(source):
shutil.copytree(source, target)
else:
shutil.copy2(source, target)
def url_to_path(build_root, url):
return os.path.join(build_root, url[1:].replace('/', os.sep))
def generate(build_dir):
build_root = build_dir
static_source_dir = os.path.join(os.path.dirname(__file__), 'static')
js_source_dir = os.path.join(static_source_dir, 'js')
scss_source_dir = os.path.join(static_source_dir, 'scss')
static_build_dir = os.path.join(build_root, 'static')
js_build_dir = os.path.join(static_build_dir, 'js')
css_build_dir = os.path.join(static_build_dir, 'css')
if os.path.exists(build_root):
shutil.rmtree(build_root)
if os.path.exists(js_source_dir):
shutil.copytree(js_source_dir, js_build_dir)
os.makedirs(os.path.join(build_root, 'static', 'css'))
scss_compiler = ScssCompiler()
for scss_file_name in os.listdir(scss_source_dir):
scss_file_path = os.path.join(scss_source_dir, scss_file_name)
with open(scss_file_path, 'r') as source:
compiled = scss_compiler.compile_string(source.read())
target_path = os.path.join(css_build_dir,
os.path.split(scss_file_path)[1])
if not target_path.endswith('.css'):
target_path = os.path.splitext(target_path)[0] + '.css'
with open(target_path, 'w') as target:
target.write(compiled)
packages, modules, global_index = parse_dir(doc_config.PACKAGE_TO_DOC)
env = Environment(
loader=PackageLoader('doc', 'templates'),
autoescape=None
)
module_template = env.get_template('module.html')
package_template = env.get_template('package.html')
packages = [item for item in global_index
if type(item).__name__ == 'PackageDoc']
for item in global_index:
if type(item).__name__ == 'ModuleDoc':
path = url_to_path(build_root, item.url)
ensure_path_exists(path)
with open(path, 'w') as file:
file.write(module_template.render(
module=item,
global_index=item.global_index,
packages=packages,
domain=doc_config.DOMAIN,
page_url=item.url))
elif type(item).__name__ == 'PackageDoc':
path = url_to_path(build_root, item.url)
ensure_path_exists(path)
with open(path, 'w') as file:
file.write(package_template.render(
package=item,
global_index=item.global_index,
packages=packages,
domain=doc_config.DOMAIN,
page_url=item.url))
make_custom_pages(build_dir)
def print_help():
print("generator.py\n"
"USAGE: python doc/generator.py [out_dir]")
if __name__ == '__main__':
if len(sys.argv) == 1:
build_dir = doc_config.DEFAULT_BUILD_DIR
elif len(sys.argv) == 2:
if sys.argv[1] in ['-h', '--h', '-help', '--help']:
print_help()
sys.exit(0)
build_dir = sys.argv[1]
else:
print_help()
sys.exit(1)
generate(build_dir)
|
bitcoinknots/bitcoin | refs/heads/0.19.x-knots | test/functional/feature_notifications.py | 1 | #!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the -alertnotify, -blocknotify and -walletnotify options."""
import os
from test_framework.address import ADDRESS_BCRT1_UNSPENDABLE
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
wait_until,
connect_nodes,
)
# Linux allow all characters other than \x00
# Windows disallow control characters (0-31) and /\?%:|"<>
FILE_CHAR_START = 43 if os.name == 'nt' else 1
FILE_CHAR_END = 128
FILE_CHAR_BLACKLIST = '/:;<>?@[\\]^`{|}' if os.name == 'nt' else '/'
class NotificationsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def setup_network(self):
self.wallet = 'wallet' + ''.join(chr(i) for i in range(FILE_CHAR_START, FILE_CHAR_END) if chr(i) not in FILE_CHAR_BLACKLIST)
print(self.wallet )
self.alertnotify_dir = os.path.join(self.options.tmpdir, "alertnotify")
self.blocknotify_dir = os.path.join(self.options.tmpdir, "blocknotify")
self.walletnotify_dir = os.path.join(self.options.tmpdir, "walletnotify")
os.mkdir(self.alertnotify_dir)
os.mkdir(self.blocknotify_dir)
os.mkdir(self.walletnotify_dir)
# -alertnotify and -blocknotify on node0, walletnotify on node1
self.extra_args = [[
"-alertnotify=echo > {}".format(os.path.join(self.alertnotify_dir, '%s')),
"-blocknotify=echo > {}".format(os.path.join(self.blocknotify_dir, '%s'))],
["-blockversion=211",
"-rescan",
"-wallet={}".format(self.wallet),
"-walletnotify=echo > {}".format(os.path.join(self.walletnotify_dir, '%w_%s'))]]
super().setup_network()
def run_test(self):
self.log.info("test -blocknotify")
block_count = 10
blocks = self.nodes[1].generatetoaddress(block_count, self.nodes[1].getnewaddress() if self.is_wallet_compiled() else ADDRESS_BCRT1_UNSPENDABLE)
# wait at most 10 seconds for expected number of files before reading the content
wait_until(lambda: len(os.listdir(self.blocknotify_dir)) == block_count, timeout=10)
# directory content should equal the generated blocks hashes
assert_equal(sorted(blocks), sorted(os.listdir(self.blocknotify_dir)))
if self.is_wallet_compiled():
self.log.info("test -walletnotify")
# wait at most 10 seconds for expected number of files before reading the content
wait_until(lambda: len(os.listdir(self.walletnotify_dir)) == block_count, timeout=10)
# directory content should equal the generated transaction hashes
txids_rpc = list(map(lambda t: '{}_{}'.format(self.wallet, t['txid']), self.nodes[1].listtransactions("*", block_count)))
assert_equal(sorted(txids_rpc), sorted(os.listdir(self.walletnotify_dir)))
self.stop_node(1)
for tx_file in os.listdir(self.walletnotify_dir):
os.remove(os.path.join(self.walletnotify_dir, tx_file))
self.log.info("test -walletnotify after rescan")
# restart node to rescan to force wallet notifications
self.start_node(1)
connect_nodes(self.nodes[0], 1)
wait_until(lambda: len(os.listdir(self.walletnotify_dir)) == block_count, timeout=10)
# directory content should equal the generated transaction hashes
txids_rpc = list(map(lambda t: '{}_{}'.format(self.wallet, t['txid']), self.nodes[1].listtransactions("*", block_count)))
assert_equal(sorted(txids_rpc), sorted(os.listdir(self.walletnotify_dir)))
# TODO: add test for `-alertnotify` large fork notifications
# Mine another 41 up-version blocks. -alertnotify should trigger on the 51st.
self.log.info("test -alertnotify")
self.nodes[1].generatetoaddress(41, ADDRESS_BCRT1_UNSPENDABLE)
self.sync_all()
# Give bitcoind 10 seconds to write the alert notification
wait_until(lambda: len(os.listdir(self.alertnotify_dir)), timeout=10)
for notify_file in os.listdir(self.alertnotify_dir):
os.remove(os.path.join(self.alertnotify_dir, notify_file))
# Mine more up-version blocks, should not get more alerts:
self.nodes[1].generatetoaddress(2, ADDRESS_BCRT1_UNSPENDABLE)
self.sync_all()
self.log.info("-alertnotify should not continue notifying for more unknown version blocks")
assert_equal(len(os.listdir(self.alertnotify_dir)), 0)
if __name__ == '__main__':
NotificationsTest().main()
|
digitalreasoning/vowpal_wabbit | refs/heads/master | python/test_search.py | 29 | import sys
import pyvw
class SequenceLabeler(pyvw.SearchTask):
def __init__(self, vw, sch, num_actions):
# you must must must initialize the parent class
# this will automatically store self.sch <- sch, self.vw <- vw
pyvw.SearchTask.__init__(self, vw, sch, num_actions)
# you can test program options with sch.po_exists
# and get their values with sch.po_get -> string and
# sch.po_get_int -> int
if sch.po_exists('search'):
print 'found --search'
print '--search value =', sch.po_get('search'), ', type =', type(sch.po_get('search'))
# set whatever options you want
sch.set_options( sch.AUTO_HAMMING_LOSS | sch.AUTO_CONDITION_FEATURES )
def _run(self, sentence): # it's called _run to remind you that you shouldn't call it directly!
output = []
for n in range(len(sentence)):
pos,word = sentence[n]
# use "with...as..." to guarantee that the example is finished properly
with self.vw.example({'w': [word]}) as ex:
pred = self.sch.predict(examples=ex, my_tag=n+1, oracle=pos, condition=(n,'p'))
output.append(pred)
return output
# wow! your data can be ANY type you want... does NOT have to be VW examples
DET = 1
NOUN = 2
VERB = 3
ADJ = 4
my_dataset = [ [(DET , 'the'),
(NOUN, 'monster'),
(VERB, 'ate'),
(DET , 'a'),
(ADJ , 'big'),
(NOUN, 'sandwich')],
[(DET , 'the'),
(NOUN, 'sandwich'),
(VERB, 'was'),
(ADJ , 'tasty')],
[(NOUN, 'it'),
(VERB, 'ate'),
(NOUN, 'it'),
(ADJ , 'all')] ]
# initialize VW as usual, but use 'hook' as the search_task
vw = pyvw.vw("--search 4 --quiet --search_task hook --ring_size 1024")
# tell VW to construct your search task object
sequenceLabeler = vw.init_search_task(SequenceLabeler)
# train it on the above dataset ten times; the my_dataset.__iter__ feeds into _run above
print >>sys.stderr, 'training!'
for i in xrange(10):
sequenceLabeler.learn(my_dataset)
# now see the predictions on a test sentence
print >>sys.stderr, 'predicting!'
print sequenceLabeler.predict( [(0,w) for w in "the sandwich ate a monster".split()] )
print 'should have printed: [1, 2, 3, 1, 2]'
|
kenshay/ImageScript | refs/heads/master | ProgramData/SystemFiles/Python/Lib/site-packages/win32comext/shell/__init__.py | 186 | # See if we have a special directory for the binaries (for developers)
import win32com
win32com.__PackageSupportBuildPath__(__path__)
|
d40223223/w16b_test | refs/heads/master | static/Brython3.1.1-20150328-091302/Lib/_io.py | 629 | """
Python implementation of the io module.
"""
import os
import abc
import codecs
import errno
# Import _thread instead of threading to reduce startup cost
try:
from _thread import allocate_lock as Lock
except ImportError:
from _dummy_thread import allocate_lock as Lock
import io
#brython fix me
#from io import (__all__, SEEK_SET, SEEK_CUR, SEEK_END)
SEEK_SET=0
SEEK_CUR=1
SEEK_END=2
valid_seek_flags = {0, 1, 2} # Hardwired values
if hasattr(os, 'SEEK_HOLE') :
valid_seek_flags.add(os.SEEK_HOLE)
valid_seek_flags.add(os.SEEK_DATA)
# open() uses st_blksize whenever we can
DEFAULT_BUFFER_SIZE = 8 * 1024 # bytes
# NOTE: Base classes defined here are registered with the "official" ABCs
# defined in io.py. We don't use real inheritance though, because we don't
# want to inherit the C implementations.
# Rebind for compatibility
BlockingIOError = BlockingIOError
def __open(file, mode="r", buffering=-1, encoding=None, errors=None,
newline=None, closefd=True, opener=None):
r"""Open file and return a stream. Raise IOError upon failure.
file is either a text or byte string giving the name (and the path
if the file isn't in the current working directory) of the file to
be opened or an integer file descriptor of the file to be
wrapped. (If a file descriptor is given, it is closed when the
returned I/O object is closed, unless closefd is set to False.)
mode is an optional string that specifies the mode in which the file is
opened. It defaults to 'r' which means open for reading in text mode. Other
common values are 'w' for writing (truncating the file if it already
exists), 'x' for exclusive creation of a new file, and 'a' for appending
(which on some Unix systems, means that all writes append to the end of the
file regardless of the current seek position). In text mode, if encoding is
not specified the encoding used is platform dependent. (For reading and
writing raw bytes use binary mode and leave encoding unspecified.) The
available modes are:
========= ===============================================================
Character Meaning
--------- ---------------------------------------------------------------
'r' open for reading (default)
'w' open for writing, truncating the file first
'x' create a new file and open it for writing
'a' open for writing, appending to the end of the file if it exists
'b' binary mode
't' text mode (default)
'+' open a disk file for updating (reading and writing)
'U' universal newline mode (for backwards compatibility; unneeded
for new code)
========= ===============================================================
The default mode is 'rt' (open for reading text). For binary random
access, the mode 'w+b' opens and truncates the file to 0 bytes, while
'r+b' opens the file without truncation. The 'x' mode implies 'w' and
raises an `FileExistsError` if the file already exists.
Python distinguishes between files opened in binary and text modes,
even when the underlying operating system doesn't. Files opened in
binary mode (appending 'b' to the mode argument) return contents as
bytes objects without any decoding. In text mode (the default, or when
't' is appended to the mode argument), the contents of the file are
returned as strings, the bytes having been first decoded using a
platform-dependent encoding or using the specified encoding if given.
buffering is an optional integer used to set the buffering policy.
Pass 0 to switch buffering off (only allowed in binary mode), 1 to select
line buffering (only usable in text mode), and an integer > 1 to indicate
the size of a fixed-size chunk buffer. When no buffering argument is
given, the default buffering policy works as follows:
* Binary files are buffered in fixed-size chunks; the size of the buffer
is chosen using a heuristic trying to determine the underlying device's
"block size" and falling back on `io.DEFAULT_BUFFER_SIZE`.
On many systems, the buffer will typically be 4096 or 8192 bytes long.
* "Interactive" text files (files for which isatty() returns True)
use line buffering. Other text files use the policy described above
for binary files.
encoding is the str name of the encoding used to decode or encode the
file. This should only be used in text mode. The default encoding is
platform dependent, but any encoding supported by Python can be
passed. See the codecs module for the list of supported encodings.
errors is an optional string that specifies how encoding errors are to
be handled---this argument should not be used in binary mode. Pass
'strict' to raise a ValueError exception if there is an encoding error
(the default of None has the same effect), or pass 'ignore' to ignore
errors. (Note that ignoring encoding errors can lead to data loss.)
See the documentation for codecs.register for a list of the permitted
encoding error strings.
newline is a string controlling how universal newlines works (it only
applies to text mode). It can be None, '', '\n', '\r', and '\r\n'. It works
as follows:
* On input, if newline is None, universal newlines mode is
enabled. Lines in the input can end in '\n', '\r', or '\r\n', and
these are translated into '\n' before being returned to the
caller. If it is '', universal newline mode is enabled, but line
endings are returned to the caller untranslated. If it has any of
the other legal values, input lines are only terminated by the given
string, and the line ending is returned to the caller untranslated.
* On output, if newline is None, any '\n' characters written are
translated to the system default line separator, os.linesep. If
newline is '', no translation takes place. If newline is any of the
other legal values, any '\n' characters written are translated to
the given string.
closedfd is a bool. If closefd is False, the underlying file descriptor will
be kept open when the file is closed. This does not work when a file name is
given and must be True in that case.
A custom opener can be used by passing a callable as *opener*. The
underlying file descriptor for the file object is then obtained by calling
*opener* with (*file*, *flags*). *opener* must return an open file
descriptor (passing os.open as *opener* results in functionality similar to
passing None).
open() returns a file object whose type depends on the mode, and
through which the standard file operations such as reading and writing
are performed. When open() is used to open a file in a text mode ('w',
'r', 'wt', 'rt', etc.), it returns a TextIOWrapper. When used to open
a file in a binary mode, the returned class varies: in read binary
mode, it returns a BufferedReader; in write binary and append binary
modes, it returns a BufferedWriter, and in read/write mode, it returns
a BufferedRandom.
It is also possible to use a string or bytearray as a file for both
reading and writing. For strings StringIO can be used like a file
opened in a text mode, and for bytes a BytesIO can be used like a file
opened in a binary mode.
"""
if not isinstance(file, (str, bytes, int)):
raise TypeError("invalid file: %r" % file)
if not isinstance(mode, str):
raise TypeError("invalid mode: %r" % mode)
if not isinstance(buffering, int):
raise TypeError("invalid buffering: %r" % buffering)
if encoding is not None and not isinstance(encoding, str):
raise TypeError("invalid encoding: %r" % encoding)
if errors is not None and not isinstance(errors, str):
raise TypeError("invalid errors: %r" % errors)
modes = set(mode)
if modes - set("axrwb+tU") or len(mode) > len(modes):
raise ValueError("invalid mode: %r" % mode)
creating = "x" in modes
reading = "r" in modes
writing = "w" in modes
appending = "a" in modes
updating = "+" in modes
text = "t" in modes
binary = "b" in modes
if "U" in modes:
if creating or writing or appending:
raise ValueError("can't use U and writing mode at once")
reading = True
if text and binary:
raise ValueError("can't have text and binary mode at once")
if creating + reading + writing + appending > 1:
raise ValueError("can't have read/write/append mode at once")
if not (creating or reading or writing or appending):
raise ValueError("must have exactly one of read/write/append mode")
if binary and encoding is not None:
raise ValueError("binary mode doesn't take an encoding argument")
if binary and errors is not None:
raise ValueError("binary mode doesn't take an errors argument")
if binary and newline is not None:
raise ValueError("binary mode doesn't take a newline argument")
raw = FileIO(file,
(creating and "x" or "") +
(reading and "r" or "") +
(writing and "w" or "") +
(appending and "a" or "") +
(updating and "+" or ""),
closefd, opener=opener)
line_buffering = False
if buffering == 1 or buffering < 0 and raw.isatty():
buffering = -1
line_buffering = True
if buffering < 0:
buffering = DEFAULT_BUFFER_SIZE
try:
bs = os.fstat(raw.fileno()).st_blksize
except (os.error, AttributeError):
pass
else:
if bs > 1:
buffering = bs
if buffering < 0:
raise ValueError("invalid buffering size")
if buffering == 0:
if binary:
return raw
raise ValueError("can't have unbuffered text I/O")
if updating:
buffer = BufferedRandom(raw, buffering)
elif creating or writing or appending:
buffer = BufferedWriter(raw, buffering)
elif reading:
buffer = BufferedReader(raw, buffering)
else:
raise ValueError("unknown mode: %r" % mode)
if binary:
return buffer
text = TextIOWrapper(buffer, encoding, errors, newline, line_buffering)
text.mode = mode
return text
class DocDescriptor:
"""Helper for builtins.open.__doc__
"""
def __get__(self, obj, typ):
return (
"open(file, mode='r', buffering=-1, encoding=None, "
"errors=None, newline=None, closefd=True)\n\n" +
open.__doc__)
class OpenWrapper:
"""Wrapper for builtins.open
Trick so that open won't become a bound method when stored
as a class variable (as dbm.dumb does).
See initstdio() in Python/pythonrun.c.
"""
__doc__ = DocDescriptor()
def __new__(cls, *args, **kwargs):
return open(*args, **kwargs)
# In normal operation, both `UnsupportedOperation`s should be bound to the
# same object.
try:
UnsupportedOperation = io.UnsupportedOperation
except AttributeError:
class UnsupportedOperation(ValueError, IOError):
pass
class IOBase(metaclass=abc.ABCMeta):
"""The abstract base class for all I/O classes, acting on streams of
bytes. There is no public constructor.
This class provides dummy implementations for many methods that
derived classes can override selectively; the default implementations
represent a file that cannot be read, written or seeked.
Even though IOBase does not declare read, readinto, or write because
their signatures will vary, implementations and clients should
consider those methods part of the interface. Also, implementations
may raise UnsupportedOperation when operations they do not support are
called.
The basic type used for binary data read from or written to a file is
bytes. bytearrays are accepted too, and in some cases (such as
readinto) needed. Text I/O classes work with str data.
Note that calling any method (even inquiries) on a closed stream is
undefined. Implementations may raise IOError in this case.
IOBase (and its subclasses) support the iterator protocol, meaning
that an IOBase object can be iterated over yielding the lines in a
stream.
IOBase also supports the :keyword:`with` statement. In this example,
fp is closed after the suite of the with statement is complete:
with open('spam.txt', 'r') as fp:
fp.write('Spam and eggs!')
"""
### Internal ###
def _unsupported(self, name):
"""Internal: raise an IOError exception for unsupported operations."""
raise UnsupportedOperation("%s.%s() not supported" %
(self.__class__.__name__, name))
### Positioning ###
def seek(self, pos, whence=0):
"""Change stream position.
Change the stream position to byte offset pos. Argument pos is
interpreted relative to the position indicated by whence. Values
for whence are ints:
* 0 -- start of stream (the default); offset should be zero or positive
* 1 -- current stream position; offset may be negative
* 2 -- end of stream; offset is usually negative
Some operating systems / file systems could provide additional values.
Return an int indicating the new absolute position.
"""
self._unsupported("seek")
def tell(self):
"""Return an int indicating the current stream position."""
return self.seek(0, 1)
def truncate(self, pos=None):
"""Truncate file to size bytes.
Size defaults to the current IO position as reported by tell(). Return
the new size.
"""
self._unsupported("truncate")
### Flush and close ###
def flush(self):
"""Flush write buffers, if applicable.
This is not implemented for read-only and non-blocking streams.
"""
self._checkClosed()
# XXX Should this return the number of bytes written???
__closed = False
def close(self):
"""Flush and close the IO object.
This method has no effect if the file is already closed.
"""
if not self.__closed:
try:
self.flush()
finally:
self.__closed = True
def __del__(self):
"""Destructor. Calls close()."""
# The try/except block is in case this is called at program
# exit time, when it's possible that globals have already been
# deleted, and then the close() call might fail. Since
# there's nothing we can do about such failures and they annoy
# the end users, we suppress the traceback.
try:
self.close()
except:
pass
### Inquiries ###
def seekable(self):
"""Return a bool indicating whether object supports random access.
If False, seek(), tell() and truncate() will raise UnsupportedOperation.
This method may need to do a test seek().
"""
return False
def _checkSeekable(self, msg=None):
"""Internal: raise UnsupportedOperation if file is not seekable
"""
if not self.seekable():
raise UnsupportedOperation("File or stream is not seekable."
if msg is None else msg)
def readable(self):
"""Return a bool indicating whether object was opened for reading.
If False, read() will raise UnsupportedOperation.
"""
return False
def _checkReadable(self, msg=None):
"""Internal: raise UnsupportedOperation if file is not readable
"""
if not self.readable():
raise UnsupportedOperation("File or stream is not readable."
if msg is None else msg)
def writable(self):
"""Return a bool indicating whether object was opened for writing.
If False, write() and truncate() will raise UnsupportedOperation.
"""
return False
def _checkWritable(self, msg=None):
"""Internal: raise UnsupportedOperation if file is not writable
"""
if not self.writable():
raise UnsupportedOperation("File or stream is not writable."
if msg is None else msg)
@property
def closed(self):
"""closed: bool. True iff the file has been closed.
For backwards compatibility, this is a property, not a predicate.
"""
return self.__closed
def _checkClosed(self, msg=None):
"""Internal: raise an ValueError if file is closed
"""
if self.closed:
raise ValueError("I/O operation on closed file."
if msg is None else msg)
### Context manager ###
def __enter__(self): # That's a forward reference
"""Context management protocol. Returns self (an instance of IOBase)."""
self._checkClosed()
return self
def __exit__(self, *args):
"""Context management protocol. Calls close()"""
self.close()
### Lower-level APIs ###
# XXX Should these be present even if unimplemented?
def fileno(self):
"""Returns underlying file descriptor (an int) if one exists.
An IOError is raised if the IO object does not use a file descriptor.
"""
self._unsupported("fileno")
def isatty(self):
"""Return a bool indicating whether this is an 'interactive' stream.
Return False if it can't be determined.
"""
self._checkClosed()
return False
### Readline[s] and writelines ###
def readline(self, limit=-1):
r"""Read and return a line of bytes from the stream.
If limit is specified, at most limit bytes will be read.
Limit should be an int.
The line terminator is always b'\n' for binary files; for text
files, the newlines argument to open can be used to select the line
terminator(s) recognized.
"""
# For backwards compatibility, a (slowish) readline().
if hasattr(self, "peek"):
def nreadahead():
readahead = self.peek(1)
if not readahead:
return 1
n = (readahead.find(b"\n") + 1) or len(readahead)
if limit >= 0:
n = min(n, limit)
return n
else:
def nreadahead():
return 1
if limit is None:
limit = -1
elif not isinstance(limit, int):
raise TypeError("limit must be an integer")
res = bytearray()
while limit < 0 or len(res) < limit:
b = self.read(nreadahead())
if not b:
break
res += b
if res.endswith(b"\n"):
break
return bytes(res)
def __iter__(self):
self._checkClosed()
return self
def __next__(self):
line = self.readline()
if not line:
raise StopIteration
return line
def readlines(self, hint=None):
"""Return a list of lines from the stream.
hint can be specified to control the number of lines read: no more
lines will be read if the total size (in bytes/characters) of all
lines so far exceeds hint.
"""
if hint is None or hint <= 0:
return list(self)
n = 0
lines = []
for line in self:
lines.append(line)
n += len(line)
if n >= hint:
break
return lines
def writelines(self, lines):
self._checkClosed()
for line in lines:
self.write(line)
#fix me brython
#io.IOBase.register(IOBase)
class RawIOBase(IOBase):
"""Base class for raw binary I/O."""
# The read() method is implemented by calling readinto(); derived
# classes that want to support read() only need to implement
# readinto() as a primitive operation. In general, readinto() can be
# more efficient than read().
# (It would be tempting to also provide an implementation of
# readinto() in terms of read(), in case the latter is a more suitable
# primitive operation, but that would lead to nasty recursion in case
# a subclass doesn't implement either.)
def read(self, n=-1):
"""Read and return up to n bytes, where n is an int.
Returns an empty bytes object on EOF, or None if the object is
set not to block and has no data to read.
"""
if n is None:
n = -1
if n < 0:
return self.readall()
b = bytearray(n.__index__())
n = self.readinto(b)
if n is None:
return None
del b[n:]
return bytes(b)
def readall(self):
"""Read until EOF, using multiple read() call."""
res = bytearray()
while True:
data = self.read(DEFAULT_BUFFER_SIZE)
if not data:
break
res += data
if res:
return bytes(res)
else:
# b'' or None
return data
def readinto(self, b):
"""Read up to len(b) bytes into bytearray b.
Returns an int representing the number of bytes read (0 for EOF), or
None if the object is set not to block and has no data to read.
"""
self._unsupported("readinto")
def write(self, b):
"""Write the given buffer to the IO stream.
Returns the number of bytes written, which may be less than len(b).
"""
self._unsupported("write")
#io.RawIOBase.register(RawIOBase)
#fix me brython
#from _io import FileIO
#RawIOBase.register(FileIO)
class BufferedIOBase(IOBase):
"""Base class for buffered IO objects.
The main difference with RawIOBase is that the read() method
supports omitting the size argument, and does not have a default
implementation that defers to readinto().
In addition, read(), readinto() and write() may raise
BlockingIOError if the underlying raw stream is in non-blocking
mode and not ready; unlike their raw counterparts, they will never
return None.
A typical implementation should not inherit from a RawIOBase
implementation, but wrap one.
"""
def read(self, n=None):
"""Read and return up to n bytes, where n is an int.
If the argument is omitted, None, or negative, reads and
returns all data until EOF.
If the argument is positive, and the underlying raw stream is
not 'interactive', multiple raw reads may be issued to satisfy
the byte count (unless EOF is reached first). But for
interactive raw streams (XXX and for pipes?), at most one raw
read will be issued, and a short result does not imply that
EOF is imminent.
Returns an empty bytes array on EOF.
Raises BlockingIOError if the underlying raw stream has no
data at the moment.
"""
self._unsupported("read")
def read1(self, n=None):
"""Read up to n bytes with at most one read() system call,
where n is an int.
"""
self._unsupported("read1")
def readinto(self, b):
"""Read up to len(b) bytes into bytearray b.
Like read(), this may issue multiple reads to the underlying raw
stream, unless the latter is 'interactive'.
Returns an int representing the number of bytes read (0 for EOF).
Raises BlockingIOError if the underlying raw stream has no
data at the moment.
"""
# XXX This ought to work with anything that supports the buffer API
data = self.read(len(b))
n = len(data)
try:
b[:n] = data
except TypeError as err:
import array
if not isinstance(b, array.array):
raise err
b[:n] = array.array('b', data)
return n
def write(self, b):
"""Write the given bytes buffer to the IO stream.
Return the number of bytes written, which is never less than
len(b).
Raises BlockingIOError if the buffer is full and the
underlying raw stream cannot accept more data at the moment.
"""
self._unsupported("write")
def detach(self):
"""
Separate the underlying raw stream from the buffer and return it.
After the raw stream has been detached, the buffer is in an unusable
state.
"""
self._unsupported("detach")
#fix me brython
#io.BufferedIOBase.register(BufferedIOBase)
class _BufferedIOMixin(BufferedIOBase):
"""A mixin implementation of BufferedIOBase with an underlying raw stream.
This passes most requests on to the underlying raw stream. It
does *not* provide implementations of read(), readinto() or
write().
"""
def __init__(self, raw):
self._raw = raw
### Positioning ###
def seek(self, pos, whence=0):
new_position = self.raw.seek(pos, whence)
if new_position < 0:
raise IOError("seek() returned an invalid position")
return new_position
def tell(self):
pos = self.raw.tell()
if pos < 0:
raise IOError("tell() returned an invalid position")
return pos
def truncate(self, pos=None):
# Flush the stream. We're mixing buffered I/O with lower-level I/O,
# and a flush may be necessary to synch both views of the current
# file state.
self.flush()
if pos is None:
pos = self.tell()
# XXX: Should seek() be used, instead of passing the position
# XXX directly to truncate?
return self.raw.truncate(pos)
### Flush and close ###
def flush(self):
if self.closed:
raise ValueError("flush of closed file")
self.raw.flush()
def close(self):
if self.raw is not None and not self.closed:
try:
# may raise BlockingIOError or BrokenPipeError etc
self.flush()
finally:
self.raw.close()
def detach(self):
if self.raw is None:
raise ValueError("raw stream already detached")
self.flush()
raw = self._raw
self._raw = None
return raw
### Inquiries ###
def seekable(self):
return self.raw.seekable()
def readable(self):
return self.raw.readable()
def writable(self):
return self.raw.writable()
@property
def raw(self):
return self._raw
@property
def closed(self):
return self.raw.closed
@property
def name(self):
return self.raw.name
@property
def mode(self):
return self.raw.mode
def __getstate__(self):
raise TypeError("can not serialize a '{0}' object"
.format(self.__class__.__name__))
def __repr__(self):
clsname = self.__class__.__name__
try:
name = self.name
except AttributeError:
return "<_io.{0}>".format(clsname)
else:
return "<_io.{0} name={1!r}>".format(clsname, name)
### Lower-level APIs ###
def fileno(self):
return self.raw.fileno()
def isatty(self):
return self.raw.isatty()
class BytesIO(BufferedIOBase):
"""Buffered I/O implementation using an in-memory bytes buffer."""
def __init__(self, initial_bytes=None):
buf = bytearray()
if initial_bytes is not None:
buf += initial_bytes
self._buffer = buf
self._pos = 0
def __getstate__(self):
if self.closed:
raise ValueError("__getstate__ on closed file")
return self.__dict__.copy()
def getvalue(self):
"""Return the bytes value (contents) of the buffer
"""
if self.closed:
raise ValueError("getvalue on closed file")
return bytes(self._buffer)
def getbuffer(self):
"""Return a readable and writable view of the buffer.
"""
return memoryview(self._buffer)
def read(self, n=None):
if self.closed:
raise ValueError("read from closed file")
if n is None:
n = -1
if n < 0:
n = len(self._buffer)
if len(self._buffer) <= self._pos:
return b""
newpos = min(len(self._buffer), self._pos + n)
b = self._buffer[self._pos : newpos]
self._pos = newpos
return bytes(b)
def read1(self, n):
"""This is the same as read.
"""
return self.read(n)
def write(self, b):
if self.closed:
raise ValueError("write to closed file")
if isinstance(b, str):
raise TypeError("can't write str to binary stream")
n = len(b)
if n == 0:
return 0
pos = self._pos
if pos > len(self._buffer):
# Inserts null bytes between the current end of the file
# and the new write position.
padding = b'\x00' * (pos - len(self._buffer))
self._buffer += padding
self._buffer[pos:pos + n] = b
self._pos += n
return n
def seek(self, pos, whence=0):
if self.closed:
raise ValueError("seek on closed file")
try:
pos.__index__
except AttributeError as err:
raise TypeError("an integer is required") from err
if whence == 0:
if pos < 0:
raise ValueError("negative seek position %r" % (pos,))
self._pos = pos
elif whence == 1:
self._pos = max(0, self._pos + pos)
elif whence == 2:
self._pos = max(0, len(self._buffer) + pos)
else:
raise ValueError("unsupported whence value")
return self._pos
def tell(self):
if self.closed:
raise ValueError("tell on closed file")
return self._pos
def truncate(self, pos=None):
if self.closed:
raise ValueError("truncate on closed file")
if pos is None:
pos = self._pos
else:
try:
pos.__index__
except AttributeError as err:
raise TypeError("an integer is required") from err
if pos < 0:
raise ValueError("negative truncate position %r" % (pos,))
del self._buffer[pos:]
return pos
def readable(self):
if self.closed:
raise ValueError("I/O operation on closed file.")
return True
def writable(self):
if self.closed:
raise ValueError("I/O operation on closed file.")
return True
def seekable(self):
if self.closed:
raise ValueError("I/O operation on closed file.")
return True
class BufferedReader(_BufferedIOMixin):
"""BufferedReader(raw[, buffer_size])
A buffer for a readable, sequential BaseRawIO object.
The constructor creates a BufferedReader for the given readable raw
stream and buffer_size. If buffer_size is omitted, DEFAULT_BUFFER_SIZE
is used.
"""
def __init__(self, raw, buffer_size=DEFAULT_BUFFER_SIZE):
"""Create a new buffered reader using the given readable raw IO object.
"""
if not raw.readable():
raise IOError('"raw" argument must be readable.')
_BufferedIOMixin.__init__(self, raw)
if buffer_size <= 0:
raise ValueError("invalid buffer size")
self.buffer_size = buffer_size
self._reset_read_buf()
self._read_lock = Lock()
def _reset_read_buf(self):
self._read_buf = b""
self._read_pos = 0
def read(self, n=None):
"""Read n bytes.
Returns exactly n bytes of data unless the underlying raw IO
stream reaches EOF or if the call would block in non-blocking
mode. If n is negative, read until EOF or until read() would
block.
"""
if n is not None and n < -1:
raise ValueError("invalid number of bytes to read")
with self._read_lock:
return self._read_unlocked(n)
def _read_unlocked(self, n=None):
nodata_val = b""
empty_values = (b"", None)
buf = self._read_buf
pos = self._read_pos
# Special case for when the number of bytes to read is unspecified.
if n is None or n == -1:
self._reset_read_buf()
if hasattr(self.raw, 'readall'):
chunk = self.raw.readall()
if chunk is None:
return buf[pos:] or None
else:
return buf[pos:] + chunk
chunks = [buf[pos:]] # Strip the consumed bytes.
current_size = 0
while True:
# Read until EOF or until read() would block.
try:
chunk = self.raw.read()
except InterruptedError:
continue
if chunk in empty_values:
nodata_val = chunk
break
current_size += len(chunk)
chunks.append(chunk)
return b"".join(chunks) or nodata_val
# The number of bytes to read is specified, return at most n bytes.
avail = len(buf) - pos # Length of the available buffered data.
if n <= avail:
# Fast path: the data to read is fully buffered.
self._read_pos += n
return buf[pos:pos+n]
# Slow path: read from the stream until enough bytes are read,
# or until an EOF occurs or until read() would block.
chunks = [buf[pos:]]
wanted = max(self.buffer_size, n)
while avail < n:
try:
chunk = self.raw.read(wanted)
except InterruptedError:
continue
if chunk in empty_values:
nodata_val = chunk
break
avail += len(chunk)
chunks.append(chunk)
# n is more then avail only when an EOF occurred or when
# read() would have blocked.
n = min(n, avail)
out = b"".join(chunks)
self._read_buf = out[n:] # Save the extra data in the buffer.
self._read_pos = 0
return out[:n] if out else nodata_val
def peek(self, n=0):
"""Returns buffered bytes without advancing the position.
The argument indicates a desired minimal number of bytes; we
do at most one raw read to satisfy it. We never return more
than self.buffer_size.
"""
with self._read_lock:
return self._peek_unlocked(n)
def _peek_unlocked(self, n=0):
want = min(n, self.buffer_size)
have = len(self._read_buf) - self._read_pos
if have < want or have <= 0:
to_read = self.buffer_size - have
while True:
try:
current = self.raw.read(to_read)
except InterruptedError:
continue
break
if current:
self._read_buf = self._read_buf[self._read_pos:] + current
self._read_pos = 0
return self._read_buf[self._read_pos:]
def read1(self, n):
"""Reads up to n bytes, with at most one read() system call."""
# Returns up to n bytes. If at least one byte is buffered, we
# only return buffered bytes. Otherwise, we do one raw read.
if n < 0:
raise ValueError("number of bytes to read must be positive")
if n == 0:
return b""
with self._read_lock:
self._peek_unlocked(1)
return self._read_unlocked(
min(n, len(self._read_buf) - self._read_pos))
def tell(self):
return _BufferedIOMixin.tell(self) - len(self._read_buf) + self._read_pos
def seek(self, pos, whence=0):
if whence not in valid_seek_flags:
raise ValueError("invalid whence value")
with self._read_lock:
if whence == 1:
pos -= len(self._read_buf) - self._read_pos
pos = _BufferedIOMixin.seek(self, pos, whence)
self._reset_read_buf()
return pos
class BufferedWriter(_BufferedIOMixin):
"""A buffer for a writeable sequential RawIO object.
The constructor creates a BufferedWriter for the given writeable raw
stream. If the buffer_size is not given, it defaults to
DEFAULT_BUFFER_SIZE.
"""
def __init__(self, raw, buffer_size=DEFAULT_BUFFER_SIZE):
if not raw.writable():
raise IOError('"raw" argument must be writable.')
_BufferedIOMixin.__init__(self, raw)
if buffer_size <= 0:
raise ValueError("invalid buffer size")
self.buffer_size = buffer_size
self._write_buf = bytearray()
self._write_lock = Lock()
def write(self, b):
if self.closed:
raise ValueError("write to closed file")
if isinstance(b, str):
raise TypeError("can't write str to binary stream")
with self._write_lock:
# XXX we can implement some more tricks to try and avoid
# partial writes
if len(self._write_buf) > self.buffer_size:
# We're full, so let's pre-flush the buffer. (This may
# raise BlockingIOError with characters_written == 0.)
self._flush_unlocked()
before = len(self._write_buf)
self._write_buf.extend(b)
written = len(self._write_buf) - before
if len(self._write_buf) > self.buffer_size:
try:
self._flush_unlocked()
except BlockingIOError as e:
if len(self._write_buf) > self.buffer_size:
# We've hit the buffer_size. We have to accept a partial
# write and cut back our buffer.
overage = len(self._write_buf) - self.buffer_size
written -= overage
self._write_buf = self._write_buf[:self.buffer_size]
raise BlockingIOError(e.errno, e.strerror, written)
return written
def truncate(self, pos=None):
with self._write_lock:
self._flush_unlocked()
if pos is None:
pos = self.raw.tell()
return self.raw.truncate(pos)
def flush(self):
with self._write_lock:
self._flush_unlocked()
def _flush_unlocked(self):
if self.closed:
raise ValueError("flush of closed file")
while self._write_buf:
try:
n = self.raw.write(self._write_buf)
except InterruptedError:
continue
except BlockingIOError:
raise RuntimeError("self.raw should implement RawIOBase: it "
"should not raise BlockingIOError")
if n is None:
raise BlockingIOError(
errno.EAGAIN,
"write could not complete without blocking", 0)
if n > len(self._write_buf) or n < 0:
raise IOError("write() returned incorrect number of bytes")
del self._write_buf[:n]
def tell(self):
return _BufferedIOMixin.tell(self) + len(self._write_buf)
def seek(self, pos, whence=0):
if whence not in valid_seek_flags:
raise ValueError("invalid whence value")
with self._write_lock:
self._flush_unlocked()
return _BufferedIOMixin.seek(self, pos, whence)
class BufferedRWPair(BufferedIOBase):
"""A buffered reader and writer object together.
A buffered reader object and buffered writer object put together to
form a sequential IO object that can read and write. This is typically
used with a socket or two-way pipe.
reader and writer are RawIOBase objects that are readable and
writeable respectively. If the buffer_size is omitted it defaults to
DEFAULT_BUFFER_SIZE.
"""
# XXX The usefulness of this (compared to having two separate IO
# objects) is questionable.
def __init__(self, reader, writer, buffer_size=DEFAULT_BUFFER_SIZE):
"""Constructor.
The arguments are two RawIO instances.
"""
if not reader.readable():
raise IOError('"reader" argument must be readable.')
if not writer.writable():
raise IOError('"writer" argument must be writable.')
self.reader = BufferedReader(reader, buffer_size)
self.writer = BufferedWriter(writer, buffer_size)
def read(self, n=None):
if n is None:
n = -1
return self.reader.read(n)
def readinto(self, b):
return self.reader.readinto(b)
def write(self, b):
return self.writer.write(b)
def peek(self, n=0):
return self.reader.peek(n)
def read1(self, n):
return self.reader.read1(n)
def readable(self):
return self.reader.readable()
def writable(self):
return self.writer.writable()
def flush(self):
return self.writer.flush()
def close(self):
self.writer.close()
self.reader.close()
def isatty(self):
return self.reader.isatty() or self.writer.isatty()
@property
def closed(self):
return self.writer.closed
class BufferedRandom(BufferedWriter, BufferedReader):
"""A buffered interface to random access streams.
The constructor creates a reader and writer for a seekable stream,
raw, given in the first argument. If the buffer_size is omitted it
defaults to DEFAULT_BUFFER_SIZE.
"""
def __init__(self, raw, buffer_size=DEFAULT_BUFFER_SIZE):
raw._checkSeekable()
BufferedReader.__init__(self, raw, buffer_size)
BufferedWriter.__init__(self, raw, buffer_size)
def seek(self, pos, whence=0):
if whence not in valid_seek_flags:
raise ValueError("invalid whence value")
self.flush()
if self._read_buf:
# Undo read ahead.
with self._read_lock:
self.raw.seek(self._read_pos - len(self._read_buf), 1)
# First do the raw seek, then empty the read buffer, so that
# if the raw seek fails, we don't lose buffered data forever.
pos = self.raw.seek(pos, whence)
with self._read_lock:
self._reset_read_buf()
if pos < 0:
raise IOError("seek() returned invalid position")
return pos
def tell(self):
if self._write_buf:
return BufferedWriter.tell(self)
else:
return BufferedReader.tell(self)
def truncate(self, pos=None):
if pos is None:
pos = self.tell()
# Use seek to flush the read buffer.
return BufferedWriter.truncate(self, pos)
def read(self, n=None):
if n is None:
n = -1
self.flush()
return BufferedReader.read(self, n)
def readinto(self, b):
self.flush()
return BufferedReader.readinto(self, b)
def peek(self, n=0):
self.flush()
return BufferedReader.peek(self, n)
def read1(self, n):
self.flush()
return BufferedReader.read1(self, n)
def write(self, b):
if self._read_buf:
# Undo readahead
with self._read_lock:
self.raw.seek(self._read_pos - len(self._read_buf), 1)
self._reset_read_buf()
return BufferedWriter.write(self, b)
class TextIOBase(IOBase):
"""Base class for text I/O.
This class provides a character and line based interface to stream
I/O. There is no readinto method because Python's character strings
are immutable. There is no public constructor.
"""
def read(self, n=-1):
"""Read at most n characters from stream, where n is an int.
Read from underlying buffer until we have n characters or we hit EOF.
If n is negative or omitted, read until EOF.
Returns a string.
"""
self._unsupported("read")
def write(self, s):
"""Write string s to stream and returning an int."""
self._unsupported("write")
def truncate(self, pos=None):
"""Truncate size to pos, where pos is an int."""
self._unsupported("truncate")
def readline(self):
"""Read until newline or EOF.
Returns an empty string if EOF is hit immediately.
"""
self._unsupported("readline")
def detach(self):
"""
Separate the underlying buffer from the TextIOBase and return it.
After the underlying buffer has been detached, the TextIO is in an
unusable state.
"""
self._unsupported("detach")
@property
def encoding(self):
"""Subclasses should override."""
return None
@property
def newlines(self):
"""Line endings translated so far.
Only line endings translated during reading are considered.
Subclasses should override.
"""
return None
@property
def errors(self):
"""Error setting of the decoder or encoder.
Subclasses should override."""
return None
#fix me brython
#io.TextIOBase.register(TextIOBase)
class IncrementalNewlineDecoder(codecs.IncrementalDecoder):
r"""Codec used when reading a file in universal newlines mode. It wraps
another incremental decoder, translating \r\n and \r into \n. It also
records the types of newlines encountered. When used with
translate=False, it ensures that the newline sequence is returned in
one piece.
"""
def __init__(self, decoder, translate, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors=errors)
self.translate = translate
self.decoder = decoder
self.seennl = 0
self.pendingcr = False
def decode(self, input, final=False):
# decode input (with the eventual \r from a previous pass)
if self.decoder is None:
output = input
else:
output = self.decoder.decode(input, final=final)
if self.pendingcr and (output or final):
output = "\r" + output
self.pendingcr = False
# retain last \r even when not translating data:
# then readline() is sure to get \r\n in one pass
if output.endswith("\r") and not final:
output = output[:-1]
self.pendingcr = True
# Record which newlines are read
crlf = output.count('\r\n')
cr = output.count('\r') - crlf
lf = output.count('\n') - crlf
self.seennl |= (lf and self._LF) | (cr and self._CR) \
| (crlf and self._CRLF)
if self.translate:
if crlf:
output = output.replace("\r\n", "\n")
if cr:
output = output.replace("\r", "\n")
return output
def getstate(self):
if self.decoder is None:
buf = b""
flag = 0
else:
buf, flag = self.decoder.getstate()
flag <<= 1
if self.pendingcr:
flag |= 1
return buf, flag
def setstate(self, state):
buf, flag = state
self.pendingcr = bool(flag & 1)
if self.decoder is not None:
self.decoder.setstate((buf, flag >> 1))
def reset(self):
self.seennl = 0
self.pendingcr = False
if self.decoder is not None:
self.decoder.reset()
_LF = 1
_CR = 2
_CRLF = 4
@property
def newlines(self):
return (None,
"\n",
"\r",
("\r", "\n"),
"\r\n",
("\n", "\r\n"),
("\r", "\r\n"),
("\r", "\n", "\r\n")
)[self.seennl]
class TextIOWrapper(TextIOBase):
r"""Character and line based layer over a BufferedIOBase object, buffer.
encoding gives the name of the encoding that the stream will be
decoded or encoded with. It defaults to locale.getpreferredencoding(False).
errors determines the strictness of encoding and decoding (see the
codecs.register) and defaults to "strict".
newline can be None, '', '\n', '\r', or '\r\n'. It controls the
handling of line endings. If it is None, universal newlines is
enabled. With this enabled, on input, the lines endings '\n', '\r',
or '\r\n' are translated to '\n' before being returned to the
caller. Conversely, on output, '\n' is translated to the system
default line separator, os.linesep. If newline is any other of its
legal values, that newline becomes the newline when the file is read
and it is returned untranslated. On output, '\n' is converted to the
newline.
If line_buffering is True, a call to flush is implied when a call to
write contains a newline character.
"""
_CHUNK_SIZE = 2048
# The write_through argument has no effect here since this
# implementation always writes through. The argument is present only
# so that the signature can match the signature of the C version.
def __init__(self, buffer, encoding=None, errors=None, newline=None,
line_buffering=False, write_through=False):
if newline is not None and not isinstance(newline, str):
raise TypeError("illegal newline type: %r" % (type(newline),))
if newline not in (None, "", "\n", "\r", "\r\n"):
raise ValueError("illegal newline value: %r" % (newline,))
if encoding is None:
try:
encoding = os.device_encoding(buffer.fileno())
except (AttributeError, UnsupportedOperation):
pass
if encoding is None:
try:
import locale
except ImportError:
# Importing locale may fail if Python is being built
encoding = "ascii"
else:
encoding = locale.getpreferredencoding(False)
if not isinstance(encoding, str):
raise ValueError("invalid encoding: %r" % encoding)
if errors is None:
errors = "strict"
else:
if not isinstance(errors, str):
raise ValueError("invalid errors: %r" % errors)
self._buffer = buffer
self._line_buffering = line_buffering
self._encoding = encoding
self._errors = errors
self._readuniversal = not newline
self._readtranslate = newline is None
self._readnl = newline
self._writetranslate = newline != ''
self._writenl = newline or os.linesep
self._encoder = None
self._decoder = None
self._decoded_chars = '' # buffer for text returned from decoder
self._decoded_chars_used = 0 # offset into _decoded_chars for read()
self._snapshot = None # info for reconstructing decoder state
self._seekable = self._telling = self.buffer.seekable()
self._has_read1 = hasattr(self.buffer, 'read1')
self._b2cratio = 0.0
if self._seekable and self.writable():
position = self.buffer.tell()
if position != 0:
try:
self._get_encoder().setstate(0)
except LookupError:
# Sometimes the encoder doesn't exist
pass
# self._snapshot is either None, or a tuple (dec_flags, next_input)
# where dec_flags is the second (integer) item of the decoder state
# and next_input is the chunk of input bytes that comes next after the
# snapshot point. We use this to reconstruct decoder states in tell().
# Naming convention:
# - "bytes_..." for integer variables that count input bytes
# - "chars_..." for integer variables that count decoded characters
def __repr__(self):
result = "<_io.TextIOWrapper"
try:
name = self.name
except AttributeError:
pass
else:
result += " name={0!r}".format(name)
try:
mode = self.mode
except AttributeError:
pass
else:
result += " mode={0!r}".format(mode)
return result + " encoding={0!r}>".format(self.encoding)
@property
def encoding(self):
return self._encoding
@property
def errors(self):
return self._errors
@property
def line_buffering(self):
return self._line_buffering
@property
def buffer(self):
return self._buffer
def seekable(self):
if self.closed:
raise ValueError("I/O operation on closed file.")
return self._seekable
def readable(self):
return self.buffer.readable()
def writable(self):
return self.buffer.writable()
def flush(self):
self.buffer.flush()
self._telling = self._seekable
def close(self):
if self.buffer is not None and not self.closed:
try:
self.flush()
finally:
self.buffer.close()
@property
def closed(self):
return self.buffer.closed
@property
def name(self):
return self.buffer.name
def fileno(self):
return self.buffer.fileno()
def isatty(self):
return self.buffer.isatty()
def write(self, s):
'Write data, where s is a str'
if self.closed:
raise ValueError("write to closed file")
if not isinstance(s, str):
raise TypeError("can't write %s to text stream" %
s.__class__.__name__)
length = len(s)
haslf = (self._writetranslate or self._line_buffering) and "\n" in s
if haslf and self._writetranslate and self._writenl != "\n":
s = s.replace("\n", self._writenl)
encoder = self._encoder or self._get_encoder()
# XXX What if we were just reading?
b = encoder.encode(s)
self.buffer.write(b)
if self._line_buffering and (haslf or "\r" in s):
self.flush()
self._snapshot = None
if self._decoder:
self._decoder.reset()
return length
def _get_encoder(self):
make_encoder = codecs.getincrementalencoder(self._encoding)
self._encoder = make_encoder(self._errors)
return self._encoder
def _get_decoder(self):
make_decoder = codecs.getincrementaldecoder(self._encoding)
decoder = make_decoder(self._errors)
if self._readuniversal:
decoder = IncrementalNewlineDecoder(decoder, self._readtranslate)
self._decoder = decoder
return decoder
# The following three methods implement an ADT for _decoded_chars.
# Text returned from the decoder is buffered here until the client
# requests it by calling our read() or readline() method.
def _set_decoded_chars(self, chars):
"""Set the _decoded_chars buffer."""
self._decoded_chars = chars
self._decoded_chars_used = 0
def _get_decoded_chars(self, n=None):
"""Advance into the _decoded_chars buffer."""
offset = self._decoded_chars_used
if n is None:
chars = self._decoded_chars[offset:]
else:
chars = self._decoded_chars[offset:offset + n]
self._decoded_chars_used += len(chars)
return chars
def _rewind_decoded_chars(self, n):
"""Rewind the _decoded_chars buffer."""
if self._decoded_chars_used < n:
raise AssertionError("rewind decoded_chars out of bounds")
self._decoded_chars_used -= n
def _read_chunk(self):
"""
Read and decode the next chunk of data from the BufferedReader.
"""
# The return value is True unless EOF was reached. The decoded
# string is placed in self._decoded_chars (replacing its previous
# value). The entire input chunk is sent to the decoder, though
# some of it may remain buffered in the decoder, yet to be
# converted.
if self._decoder is None:
raise ValueError("no decoder")
if self._telling:
# To prepare for tell(), we need to snapshot a point in the
# file where the decoder's input buffer is empty.
dec_buffer, dec_flags = self._decoder.getstate()
# Given this, we know there was a valid snapshot point
# len(dec_buffer) bytes ago with decoder state (b'', dec_flags).
# Read a chunk, decode it, and put the result in self._decoded_chars.
if self._has_read1:
input_chunk = self.buffer.read1(self._CHUNK_SIZE)
else:
input_chunk = self.buffer.read(self._CHUNK_SIZE)
eof = not input_chunk
decoded_chars = self._decoder.decode(input_chunk, eof)
self._set_decoded_chars(decoded_chars)
if decoded_chars:
self._b2cratio = len(input_chunk) / len(self._decoded_chars)
else:
self._b2cratio = 0.0
if self._telling:
# At the snapshot point, len(dec_buffer) bytes before the read,
# the next input to be decoded is dec_buffer + input_chunk.
self._snapshot = (dec_flags, dec_buffer + input_chunk)
return not eof
def _pack_cookie(self, position, dec_flags=0,
bytes_to_feed=0, need_eof=0, chars_to_skip=0):
# The meaning of a tell() cookie is: seek to position, set the
# decoder flags to dec_flags, read bytes_to_feed bytes, feed them
# into the decoder with need_eof as the EOF flag, then skip
# chars_to_skip characters of the decoded result. For most simple
# decoders, tell() will often just give a byte offset in the file.
return (position | (dec_flags<<64) | (bytes_to_feed<<128) |
(chars_to_skip<<192) | bool(need_eof)<<256)
def _unpack_cookie(self, bigint):
rest, position = divmod(bigint, 1<<64)
rest, dec_flags = divmod(rest, 1<<64)
rest, bytes_to_feed = divmod(rest, 1<<64)
need_eof, chars_to_skip = divmod(rest, 1<<64)
return position, dec_flags, bytes_to_feed, need_eof, chars_to_skip
def tell(self):
if not self._seekable:
raise UnsupportedOperation("underlying stream is not seekable")
if not self._telling:
raise IOError("telling position disabled by next() call")
self.flush()
position = self.buffer.tell()
decoder = self._decoder
if decoder is None or self._snapshot is None:
if self._decoded_chars:
# This should never happen.
raise AssertionError("pending decoded text")
return position
# Skip backward to the snapshot point (see _read_chunk).
dec_flags, next_input = self._snapshot
position -= len(next_input)
# How many decoded characters have been used up since the snapshot?
chars_to_skip = self._decoded_chars_used
if chars_to_skip == 0:
# We haven't moved from the snapshot point.
return self._pack_cookie(position, dec_flags)
# Starting from the snapshot position, we will walk the decoder
# forward until it gives us enough decoded characters.
saved_state = decoder.getstate()
try:
# Fast search for an acceptable start point, close to our
# current pos.
# Rationale: calling decoder.decode() has a large overhead
# regardless of chunk size; we want the number of such calls to
# be O(1) in most situations (common decoders, non-crazy input).
# Actually, it will be exactly 1 for fixed-size codecs (all
# 8-bit codecs, also UTF-16 and UTF-32).
skip_bytes = int(self._b2cratio * chars_to_skip)
skip_back = 1
assert skip_bytes <= len(next_input)
while skip_bytes > 0:
decoder.setstate((b'', dec_flags))
# Decode up to temptative start point
n = len(decoder.decode(next_input[:skip_bytes]))
if n <= chars_to_skip:
b, d = decoder.getstate()
if not b:
# Before pos and no bytes buffered in decoder => OK
dec_flags = d
chars_to_skip -= n
break
# Skip back by buffered amount and reset heuristic
skip_bytes -= len(b)
skip_back = 1
else:
# We're too far ahead, skip back a bit
skip_bytes -= skip_back
skip_back = skip_back * 2
else:
skip_bytes = 0
decoder.setstate((b'', dec_flags))
# Note our initial start point.
start_pos = position + skip_bytes
start_flags = dec_flags
if chars_to_skip == 0:
# We haven't moved from the start point.
return self._pack_cookie(start_pos, start_flags)
# Feed the decoder one byte at a time. As we go, note the
# nearest "safe start point" before the current location
# (a point where the decoder has nothing buffered, so seek()
# can safely start from there and advance to this location).
bytes_fed = 0
need_eof = 0
# Chars decoded since `start_pos`
chars_decoded = 0
for i in range(skip_bytes, len(next_input)):
bytes_fed += 1
chars_decoded += len(decoder.decode(next_input[i:i+1]))
dec_buffer, dec_flags = decoder.getstate()
if not dec_buffer and chars_decoded <= chars_to_skip:
# Decoder buffer is empty, so this is a safe start point.
start_pos += bytes_fed
chars_to_skip -= chars_decoded
start_flags, bytes_fed, chars_decoded = dec_flags, 0, 0
if chars_decoded >= chars_to_skip:
break
else:
# We didn't get enough decoded data; signal EOF to get more.
chars_decoded += len(decoder.decode(b'', final=True))
need_eof = 1
if chars_decoded < chars_to_skip:
raise IOError("can't reconstruct logical file position")
# The returned cookie corresponds to the last safe start point.
return self._pack_cookie(
start_pos, start_flags, bytes_fed, need_eof, chars_to_skip)
finally:
decoder.setstate(saved_state)
def truncate(self, pos=None):
self.flush()
if pos is None:
pos = self.tell()
return self.buffer.truncate(pos)
def detach(self):
if self.buffer is None:
raise ValueError("buffer is already detached")
self.flush()
buffer = self._buffer
self._buffer = None
return buffer
def seek(self, cookie, whence=0):
if self.closed:
raise ValueError("tell on closed file")
if not self._seekable:
raise UnsupportedOperation("underlying stream is not seekable")
if whence == 1: # seek relative to current position
if cookie != 0:
raise UnsupportedOperation("can't do nonzero cur-relative seeks")
# Seeking to the current position should attempt to
# sync the underlying buffer with the current position.
whence = 0
cookie = self.tell()
if whence == 2: # seek relative to end of file
if cookie != 0:
raise UnsupportedOperation("can't do nonzero end-relative seeks")
self.flush()
position = self.buffer.seek(0, 2)
self._set_decoded_chars('')
self._snapshot = None
if self._decoder:
self._decoder.reset()
return position
if whence != 0:
raise ValueError("unsupported whence (%r)" % (whence,))
if cookie < 0:
raise ValueError("negative seek position %r" % (cookie,))
self.flush()
# The strategy of seek() is to go back to the safe start point
# and replay the effect of read(chars_to_skip) from there.
start_pos, dec_flags, bytes_to_feed, need_eof, chars_to_skip = \
self._unpack_cookie(cookie)
# Seek back to the safe start point.
self.buffer.seek(start_pos)
self._set_decoded_chars('')
self._snapshot = None
# Restore the decoder to its state from the safe start point.
if cookie == 0 and self._decoder:
self._decoder.reset()
elif self._decoder or dec_flags or chars_to_skip:
self._decoder = self._decoder or self._get_decoder()
self._decoder.setstate((b'', dec_flags))
self._snapshot = (dec_flags, b'')
if chars_to_skip:
# Just like _read_chunk, feed the decoder and save a snapshot.
input_chunk = self.buffer.read(bytes_to_feed)
self._set_decoded_chars(
self._decoder.decode(input_chunk, need_eof))
self._snapshot = (dec_flags, input_chunk)
# Skip chars_to_skip of the decoded characters.
if len(self._decoded_chars) < chars_to_skip:
raise IOError("can't restore logical file position")
self._decoded_chars_used = chars_to_skip
# Finally, reset the encoder (merely useful for proper BOM handling)
try:
encoder = self._encoder or self._get_encoder()
except LookupError:
# Sometimes the encoder doesn't exist
pass
else:
if cookie != 0:
encoder.setstate(0)
else:
encoder.reset()
return cookie
def read(self, n=None):
self._checkReadable()
if n is None:
n = -1
decoder = self._decoder or self._get_decoder()
try:
n.__index__
except AttributeError as err:
raise TypeError("an integer is required") from err
if n < 0:
# Read everything.
result = (self._get_decoded_chars() +
decoder.decode(self.buffer.read(), final=True))
self._set_decoded_chars('')
self._snapshot = None
return result
else:
# Keep reading chunks until we have n characters to return.
eof = False
result = self._get_decoded_chars(n)
while len(result) < n and not eof:
eof = not self._read_chunk()
result += self._get_decoded_chars(n - len(result))
return result
def __next__(self):
self._telling = False
line = self.readline()
if not line:
self._snapshot = None
self._telling = self._seekable
raise StopIteration
return line
def readline(self, limit=None):
if self.closed:
raise ValueError("read from closed file")
if limit is None:
limit = -1
elif not isinstance(limit, int):
raise TypeError("limit must be an integer")
# Grab all the decoded text (we will rewind any extra bits later).
line = self._get_decoded_chars()
start = 0
# Make the decoder if it doesn't already exist.
if not self._decoder:
self._get_decoder()
pos = endpos = None
while True:
if self._readtranslate:
# Newlines are already translated, only search for \n
pos = line.find('\n', start)
if pos >= 0:
endpos = pos + 1
break
else:
start = len(line)
elif self._readuniversal:
# Universal newline search. Find any of \r, \r\n, \n
# The decoder ensures that \r\n are not split in two pieces
# In C we'd look for these in parallel of course.
nlpos = line.find("\n", start)
crpos = line.find("\r", start)
if crpos == -1:
if nlpos == -1:
# Nothing found
start = len(line)
else:
# Found \n
endpos = nlpos + 1
break
elif nlpos == -1:
# Found lone \r
endpos = crpos + 1
break
elif nlpos < crpos:
# Found \n
endpos = nlpos + 1
break
elif nlpos == crpos + 1:
# Found \r\n
endpos = crpos + 2
break
else:
# Found \r
endpos = crpos + 1
break
else:
# non-universal
pos = line.find(self._readnl)
if pos >= 0:
endpos = pos + len(self._readnl)
break
if limit >= 0 and len(line) >= limit:
endpos = limit # reached length limit
break
# No line ending seen yet - get more data'
while self._read_chunk():
if self._decoded_chars:
break
if self._decoded_chars:
line += self._get_decoded_chars()
else:
# end of file
self._set_decoded_chars('')
self._snapshot = None
return line
if limit >= 0 and endpos > limit:
endpos = limit # don't exceed limit
# Rewind _decoded_chars to just after the line ending we found.
self._rewind_decoded_chars(len(line) - endpos)
return line[:endpos]
@property
def newlines(self):
return self._decoder.newlines if self._decoder else None
class StringIO(TextIOWrapper):
"""Text I/O implementation using an in-memory buffer.
The initial_value argument sets the value of object. The newline
argument is like the one of TextIOWrapper's constructor.
"""
def __init__(self, initial_value="", newline="\n"):
super(StringIO, self).__init__(BytesIO(),
encoding="utf-8",
errors="strict",
newline=newline)
# Issue #5645: make universal newlines semantics the same as in the
# C version, even under Windows.
if newline is None:
self._writetranslate = False
if initial_value is not None:
if not isinstance(initial_value, str):
raise TypeError("initial_value must be str or None, not {0}"
.format(type(initial_value).__name__))
initial_value = str(initial_value)
self.write(initial_value)
self.seek(0)
def getvalue(self):
self.flush()
return self.buffer.getvalue().decode(self._encoding, self._errors)
def __repr__(self):
# TextIOWrapper tells the encoding in its repr. In StringIO,
# that's a implementation detail.
return object.__repr__(self)
@property
def errors(self):
return None
@property
def encoding(self):
return None
def detach(self):
# This doesn't make sense on StringIO.
self._unsupported("detach")
|
awkspace/ansible | refs/heads/devel | test/units/contrib/inventory/test_vmware_inventory.py | 70 | import json
import os
import pytest
import sys
BASIC_INVENTORY = {
'all': {
'hosts': ['foo', 'bar']
},
'_meta': {
'hostvars': {
'foo': {'hostname': 'foo'},
'bar': {'hostname': 'bar'}
}
}
}
class FakeArgs(object):
debug = False
write_dumpfile = None
load_dumpfile = None
host = False
list = True
@pytest.fixture
def vmware_inventory():
inventory_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..', '..', 'contrib', 'inventory'))
sys.path.append(inventory_dir)
try:
import vmware_inventory
except BaseException as ex:
pytest.skip(ex)
finally:
sys.path.remove(inventory_dir)
return vmware_inventory
def test_host_info_returns_single_host(vmware_inventory):
vmw = vmware_inventory.VMWareInventory(load=False)
vmw.inventory = BASIC_INVENTORY
foo = vmw.get_host_info('foo')
bar = vmw.get_host_info('bar')
assert foo == {'hostname': 'foo'}
assert bar == {'hostname': 'bar'}
def test_show_returns_serializable_data(vmware_inventory):
fakeargs = FakeArgs()
vmw = vmware_inventory.VMWareInventory(load=False)
vmw.args = fakeargs
vmw.inventory = BASIC_INVENTORY
showdata = vmw.show()
json.loads(showdata)
def test_show_list_returns_serializable_data(vmware_inventory):
fakeargs = FakeArgs()
vmw = vmware_inventory.VMWareInventory(load=False)
vmw.args = fakeargs
vmw.args.list = True
vmw.inventory = BASIC_INVENTORY
showdata = vmw.show()
json.loads(showdata)
def test_show_list_returns_all_data(vmware_inventory):
fakeargs = FakeArgs()
vmw = vmware_inventory.VMWareInventory(load=False)
vmw.args = fakeargs
vmw.args.list = True
vmw.inventory = BASIC_INVENTORY
showdata = vmw.show()
expected = json.dumps(BASIC_INVENTORY, indent=2)
assert showdata == expected
def test_show_host_returns_serializable_data(vmware_inventory):
fakeargs = FakeArgs()
vmw = vmware_inventory.VMWareInventory(load=False)
vmw.args = fakeargs
vmw.args.host = 'foo'
vmw.inventory = BASIC_INVENTORY
showdata = vmw.show()
json.loads(showdata)
def test_show_host_returns_just_host(vmware_inventory):
fakeargs = FakeArgs()
vmw = vmware_inventory.VMWareInventory(load=False)
vmw.args = fakeargs
vmw.args.list = False
vmw.args.host = 'foo'
vmw.inventory = BASIC_INVENTORY
showdata = vmw.show()
expected = BASIC_INVENTORY['_meta']['hostvars']['foo']
expected = json.dumps(expected, indent=2)
assert showdata == expected
|
amwelch/a10sdk-python | refs/heads/master | a10sdk/core/interface/interface_ve_ip_stateful_firewall.py | 2 | from a10sdk.common.A10BaseClass import A10BaseClass
class StatefulFirewall(A10BaseClass):
"""Class Description::
Configure Stateful Firewall direction.
Class stateful-firewall supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param access_list: {"default": 0, "optional": true, "type": "number", "description": "Access-list for traffic from the outside", "format": "flag"}
:param inside: {"default": 0, "optional": true, "type": "number", "description": "Inside (private) interface for stateful firewall", "format": "flag"}
:param acl_id: {"description": "ACL id", "format": "number", "type": "number", "maximum": 199, "minimum": 1, "optional": true}
:param outside: {"default": 0, "optional": true, "type": "number", "description": "Outside (public) interface for stateful firewall", "format": "flag"}
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/interface/ve/{ifnum}/ip/stateful-firewall`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "stateful-firewall"
self.a10_url="/axapi/v3/interface/ve/{ifnum}/ip/stateful-firewall"
self.DeviceProxy = ""
self.access_list = ""
self.inside = ""
self.acl_id = ""
self.outside = ""
self.uuid = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
|
google/NeuroNER-CSPMC | refs/heads/master | neuroner/utils_tf.py | 2 | import tensorflow as tf
def variable_summaries(var):
'''
Attach a lot of summaries to a Tensor (for TensorBoard visualization).
From https://www.tensorflow.org/get_started/summaries_and_tensorboard
'''
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
def resize_tensor_variable(sess, tensor_variable, shape):
sess.run(tf.assign(tensor_variable, tf.zeros(shape), validate_shape=False))
|
cpennington/edx-platform | refs/heads/master | openedx/core/djangoapps/catalog/management/commands/create_catalog_integrations.py | 4 | '''CatalogIntegration management command'''
from django.core.management import BaseCommand, CommandError
from openedx.core.djangoapps.catalog.models import CatalogIntegration
class Command(BaseCommand):
"""Management command used to create catalog integrations."""
help = "Create catalog integration record in LMS"
def add_arguments(self, parser):
parser.add_argument(
'--enabled',
dest='enabled',
action='store_true',
help='Enable the catalog integration'
)
parser.add_argument(
'--internal_api_url',
help='The url of the internal api',
required=True
)
parser.add_argument(
'--service_username',
help='The username for the service',
required=True
)
parser.add_argument(
'--cache_ttl',
type=int,
default=0,
help='Enable caching of API responses by setting this to a value greater than 0 (seconds)'
)
parser.add_argument(
'--long_term_cache_ttl',
type=int,
default=86400,
help='Enable long term caching of API responses by setting this to a value greater than 0 (seconds)'
)
parser.add_argument(
'--page_size',
type=int,
default=100,
help='Maximum number of records in paginated response of a single request to catalog service'
)
def handle(self, *args, **options):
enabled = options.get('enabled')
internal_api_url = options.get('internal_api_url')
service_username = options.get('service_username')
cache_ttl = options.get('cache_ttl')
long_term_cache_ttl = options.get('long_term_cache_ttl')
page_size = options.get('page_size')
try:
catalog_integration = CatalogIntegration.objects.create(
enabled=enabled,
internal_api_url=internal_api_url,
service_username=service_username,
cache_ttl=cache_ttl,
long_term_cache_ttl=long_term_cache_ttl,
page_size=page_size
)
except Exception as err:
raise CommandError(u'Error creating CatalogIntegration: {}'.format(err))
self.stdout.write(self.style.SUCCESS(
u'Successfully created CatalogIntegration enabled={} url={} service_username={}').format(
catalog_integration.enabled,
catalog_integration.internal_api_url,
catalog_integration.service_username
))
|
julian-seward1/servo | refs/heads/master | tests/wpt/web-platform-tests/webdriver/actions/mouse.py | 24 | from support.refine import get_events, filter_dict
def test_click_at_coordinates(session, test_actions_page, mouse_chain):
div_point = {
"x": 82,
"y": 187,
}
button = 0
mouse_chain \
.pointer_move(div_point["x"], div_point["y"], duration=1000) \
.pointer_down(button) \
.pointer_up(button) \
.perform()
events = get_events(session)
assert len(events) == 4
for e in events:
if e["type"] != "mousemove":
assert e["pageX"] == div_point["x"]
assert e["pageY"] == div_point["y"]
assert e["target"] == "outer"
if e["type"] != "mousedown":
assert e["buttons"] == 0
assert e["button"] == button
expected = [
{"type": "mousedown", "buttons": 1},
{"type": "mouseup", "buttons": 0},
{"type": "click", "buttons": 0},
]
filtered_events = [filter_dict(e, expected[0]) for e in events]
assert expected == filtered_events[1:]
|
farlandliu/zinnia | refs/heads/master | migrations/0006_entry_template.py | 4 | from south.db import db
from south.v2 import SchemaMigration
from zinnia.migrations import user_name
from zinnia.migrations import user_table
from zinnia.migrations import user_orm_label
from zinnia.migrations import user_model_label
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Entry.template'
db.add_column('zinnia_entry', 'template', self.gf('django.db.models.fields.CharField')(default='zinnia/entry_detail.html', max_length=250), keep_default=False)
def backwards(self, orm):
# Deleting field 'Entry.template'
db.delete_column('zinnia_entry', 'template')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model_label: {
'Meta': {'object_name': user_name, 'db_table': "'%s'" % user_table},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sites.site': {
'Meta': {'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'zinnia.category': {
'Meta': {'object_name': 'Category'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['zinnia.Category']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'zinnia.entry': {
'Meta': {'object_name': 'Entry'},
'authors': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['%s']" % user_orm_label, 'symmetrical': 'False', 'blank': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['zinnia.Category']", 'symmetrical': 'False'}),
'comment_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'end_publication': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2042, 3, 15, 0, 0)'}),
'excerpt': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'pingback_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'related': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'related_rel_+'", 'null': 'True', 'to': "orm['zinnia.Entry']"}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sites.Site']", 'symmetrical': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'}),
'start_publication': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tags': ('tagging.fields.TagField', [], {}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['zinnia']
|
iseppi/zookeepr | refs/heads/master | zk/model/proposal.py | 4 | """The application's model objects"""
import sqlalchemy as sa
from meta import Base
from pylons.controllers.util import abort
from meta import Session
from person import Person
from person_proposal_map import person_proposal_map
from attachment import Attachment
from review import Review
from stream import Stream
class ProposalStatus(Base):
"""Stores both account login details and personal information.
"""
__tablename__ = 'proposal_status'
id = sa.Column(sa.types.Integer, primary_key=True)
# title of proposal
name = sa.Column(sa.types.String(40), unique=True, nullable=False)
def __init__(self, **kwargs):
# remove the args that should never be set via creation
super(ProposalStatus, self).__init__(**kwargs)
@classmethod
def find_by_id(cls, id):
return Session.query(ProposalStatus).filter_by(id=id).first()
@classmethod
def find_by_name(cls, name):
return Session.query(ProposalStatus).filter_by(name=name).first()
@classmethod
def find_all(cls):
return Session.query(ProposalStatus).order_by(ProposalStatus.name).all()
class ProposalType(Base):
"""Stores both account login details and personal information.
"""
__tablename__ = 'proposal_type'
id = sa.Column(sa.types.Integer, primary_key=True)
# title of proposal
name = sa.Column(sa.types.String(40), unique=True, nullable=False)
notify_email = sa.Column(sa.types.Text, nullable=True)
def __init__(self, **kwargs):
# remove the args that should never be set via creation
super(ProposalType, self).__init__(**kwargs)
@classmethod
def find_by_id(cls, id):
return Session.query(ProposalType).filter_by(id=id).first()
@classmethod
def find_by_name(cls, name):
return Session.query(ProposalType).filter_by(name=name).first()
@classmethod
def find_all(cls):
return Session.query(ProposalType).order_by(ProposalType.name).all()
class TravelAssistanceType(Base):
__tablename__ = 'travel_assistance_type'
id = sa.Column(sa.types.Integer, primary_key=True)
name = sa.Column(sa.types.String(60), unique=True, nullable=False)
def __init__(self, **kwargs):
# remove the args that should never be set via creation
super(TravelAssistanceType, self).__init__(**kwargs)
@classmethod
def find_by_id(cls, id):
return Session.query(TravelAssistanceType).filter_by(id=id).first()
@classmethod
def find_by_name(cls, name):
return Session.query(TravelAssistanceType).filter_by(name=name).first()
@classmethod
def find_all(cls):
return Session.query(TravelAssistanceType).order_by(TravelAssistanceType.name).all()
class TargetAudience(Base):
__tablename__ = 'target_audience'
id = sa.Column(sa.types.Integer, primary_key=True)
name = sa.Column(sa.types.String(40), unique=True, nullable=False)
def __init__(self, **kwargs):
# remove the args that should never be set via creation
super(TargetAudience, self).__init__(**kwargs)
@classmethod
def find_by_id(cls, id):
return Session.query(TargetAudience).filter_by(id=id).first()
@classmethod
def find_by_name(cls, name):
return Session.query(TargetAudience).filter_by(name=name).first()
@classmethod
def find_all(cls):
return Session.query(TargetAudience).order_by(TargetAudience.name).all()
class AccommodationAssistanceType(Base):
__tablename__ = 'accommodation_assistance_type'
id = sa.Column(sa.types.Integer, primary_key=True)
# title of proposal
name = sa.Column(sa.types.String(120), unique=True, nullable=False)
def __init__(self, **kwargs):
# remove the args that should never be set via creation
super(AccommodationAssistanceType, self).__init__(**kwargs)
@classmethod
def find_by_id(cls, id):
return Session.query(AccommodationAssistanceType).filter_by(id=id).first()
@classmethod
def find_by_name(cls, name):
return Session.query(AccommodationAssistanceType).filter_by(name=name).first()
@classmethod
def find_all(cls):
return Session.query(AccommodationAssistanceType).order_by(AccommodationAssistanceType.name).all()
class Proposal(Base):
"""Stores both account login details and personal information.
"""
__tablename__ = 'proposal'
id = sa.Column(sa.types.Integer, primary_key=True)
# title of proposal
title = sa.Column(sa.types.Text, nullable=False)
# abstract or description
abstract = sa.Column(sa.types.Text, nullable=False)
private_abstract = sa.Column(sa.types.Text, nullable=False)
technical_requirements = sa.Column(sa.types.Text, nullable=False)
# type, enumerated in the proposal_type table
proposal_type_id = sa.Column(sa.types.Integer, sa.ForeignKey('proposal_type.id'), nullable=False)
# allocated stream of talk
stream_id = sa.Column(sa.types.Integer, sa.ForeignKey('stream.id'))
# type, enumerated in the assistance_type table
travel_assistance_type_id = sa.Column(sa.types.Integer, sa.ForeignKey('travel_assistance_type.id'), nullable=False)
accommodation_assistance_type_id = sa.Column(sa.types.Integer, sa.ForeignKey('accommodation_assistance_type.id'), nullable=False)
status_id = sa.Column(sa.types.Integer, sa.ForeignKey('proposal_status.id'), nullable=False)
target_audience_id = sa.Column(sa.types.Integer, sa.ForeignKey('target_audience.id'), nullable=False)
video_release = sa.Column(sa.types.Boolean, nullable=False)
slides_release = sa.Column(sa.types.Boolean, nullable=False)
# name and url of the project
project = sa.Column(sa.types.Text, nullable=False)
url = sa.Column(sa.types.Text, nullable=True)
# url to a short video
abstract_video_url = sa.Column(sa.types.Text, nullable=True)
creation_timestamp = sa.Column(sa.types.DateTime, nullable=False, default=sa.func.current_timestamp())
last_modification_timestamp = sa.Column(sa.types.DateTime, nullable=False, default=sa.func.current_timestamp(), onupdate=sa.func.current_timestamp())
# relations
type = sa.orm.relation(ProposalType, backref='proposals')
stream = sa.orm.relation(Stream)
accommodation_assistance = sa.orm.relation(AccommodationAssistanceType)
travel_assistance = sa.orm.relation(TravelAssistanceType)
status = sa.orm.relation(ProposalStatus, backref='proposals')
audience = sa.orm.relation(TargetAudience)
people = sa.orm.relation(Person, secondary=person_proposal_map, backref='proposals')
attachments = sa.orm.relation(Attachment, cascade='all, delete-orphan')
reviews = sa.orm.relation(Review, backref='proposal', cascade='all, delete-orphan')
def __init__(self, **kwargs):
# remove the args that should never be set via creation
super(Proposal, self).__init__(**kwargs)
stream_id = None
def __repr__(self):
return '<Proposal id="%r" title="%s">' % (self.id, self.title)
def _get_accepted(self):
return self.status.name == 'Accepted'
accepted = property(_get_accepted)
def _get_offered(self):
return 'Offered' in self.status.name
offered = property(_get_offered)
def _get_withdrawn(self):
return self.status.name == 'Withdrawn'
withdrawn = property(_get_withdrawn)
def _get_declined(self):
return self.status.name == 'Declined'
declined = property(_get_declined)
def _get_proposer_status(self):
if self.accepted or self.withdrawn or self.declined or self.offered:
return self.status.name
else:
return "Under Review"
proposer_status = property(_get_proposer_status)
@classmethod
def find_by_id(cls, id, abort_404 = True):
result = Session.query(Proposal).filter_by(id=id).first()
if result is None and abort_404:
abort(404, "No such proposal object")
return result
@classmethod
def find_by_title(cls, title):
return Session.query(Proposal).filter_by(title=title).order_by(Proposal.title).all()
@classmethod
def find_all(cls):
return Session.query(Proposal).order_by(Proposal.id).all()
@classmethod
def find_all_by_accommodation_assistance_type_id(cls, id, abort_404 = True):
result = Session.query(Proposal).filter_by(accommodation_assistance_type_id=id).all()
if result is None and abort_404:
abort(404, "No such proposal object")
return result
@classmethod
def find_all_by_travel_assistance_type_id(cls, id, abort_404 = True):
result = Session.query(Proposal).filter_by(travel_assistance_type_id=id).all()
if result is None and abort_404:
abort(404, "No such proposal object")
return result
# TODO: add an optional filter for removing the signed in user's proposals
@classmethod
def find_all_by_proposal_type_id(cls, id, abort_404 = True, include_withdrawn=True):
result = Session.query(Proposal).filter_by(proposal_type_id=id)
if not include_withdrawn:
withdrawn = ProposalStatus.find_by_name('Withdrawn')
result = result.filter(Proposal.status_id != withdrawn.id)
result = result.all()
if result is None and abort_404:
abort(404, "No such proposal object")
return result
@classmethod
def find_all_accepted(cls):
return Session.query(Proposal).filter(ProposalStatus.name=='Accepted')
@classmethod
def find_all_accepted_without_event(cls):
status = ProposalStatus.find_by_name('Accepted')
return Session.query(Proposal).filter_by(status=status).filter_by(event=None).all()
@classmethod
def find_accepted_by_id(cls, id):
#status = ProposalStatus.find_by_name('Accepted')
#result = Session.query(Proposal).filter_by(id=id,status_id=status.id)
# Optimisation: assume that ProposalStatus of ID=1 is Accepted
result = Session.query(Proposal).filter_by(id=id,status_id=1).one()
return result
# TODO: add an optional filter for removing the signed in user's proposals
@classmethod
def find_next_proposal(cls, id, type_id, signed_in_person_id):
withdrawn = ProposalStatus.find_by_name('Withdrawn')
next = Session.query(Proposal).from_statement("""
SELECT
p.id
FROM
(SELECT id
FROM proposal
WHERE id <> %d
AND status_id <> %d
AND proposal_type_id = %d
EXCEPT
SELECT proposal_id AS id
FROM review
WHERE review.reviewer_id = %d) AS p
LEFT JOIN
review AS r
ON(p.id=r.proposal_id)
GROUP BY
p.id
ORDER BY COUNT(r.reviewer_id), RANDOM()
LIMIT 1
""" % (id, withdrawn.id, type_id, signed_in_person_id))
next = next.first()
if next is not None:
return next.id
else:
# looks like you've reviewed everything!
return None
@classmethod
def find_review_summary(cls):
from review import Review
return Review.stats_query().join(cls).add_entity(cls).group_by(cls)
|
Sensis/pyaem | refs/heads/master | test/test_webconsole.py | 1 | from mock import MagicMock
import pyaem
from pyaem import bagofrequests as bag
import unittest
from .util import HandlersMatcher
class TestWebConsole(unittest.TestCase):
def setUp(self):
self.web_console = pyaem.webconsole.WebConsole('http://localhost:4502', debug=True)
bag.request = MagicMock()
bag.upload_file = MagicMock()
def test_init(self):
self.assertEqual(self.web_console.url, 'http://localhost:4502')
self.assertEqual(self.web_console.kwargs['debug'], True)
self.assertTrue(401 in self.web_console.handlers)
self.assertTrue(404 in self.web_console.handlers)
self.assertTrue(405 in self.web_console.handlers)
def test_init_bundle_not_found(self):
handler = self.web_console.handlers[404]
response = None
result = handler(response, bundle_name='some_bundle_name')
self.assertEquals(result.is_failure(), True)
self.assertEquals(result.message, 'Bundle some_bundle_name not found')
self.assertEquals(result.response, response)
def test_start_bundle(self):
_self = self
class StartBundleHandlerMatcher(HandlersMatcher):
def __eq__(self, handlers):
response = None
result = handlers[200](response, bundle_name='mybundle')
_self.assertEquals(result.is_success(), True)
_self.assertEquals(result.message, 'Bundle mybundle started')
_self.assertEquals(result.response, response)
response = None
result = handlers[201](response, bundle_name='mybundle')
_self.assertEquals(result.is_success(), True)
_self.assertEquals(result.message, 'Bundle mybundle started')
_self.assertEquals(result.response, response)
return super(StartBundleHandlerMatcher, self).__eq__(handlers)
self.web_console.start_bundle('mybundle', foo='bar')
bag.request.assert_called_once_with(
'post',
'http://localhost:4502/system/console/bundles/mybundle',
{'action': 'start',
'foo': 'bar'},
StartBundleHandlerMatcher([200, 201, 401, 404, 405]),
bundle_name='mybundle',
debug=True)
def test_stop_bundle(self):
_self = self
class StopBundleHandlerMatcher(HandlersMatcher):
def __eq__(self, handlers):
response = None
result = handlers[200](response, bundle_name='mybundle')
_self.assertEquals(result.is_success(), True)
_self.assertEquals(result.message, 'Bundle mybundle stopped')
_self.assertEquals(result.response, response)
response = None
result = handlers[201](response, bundle_name='mybundle')
_self.assertEquals(result.is_success(), True)
_self.assertEquals(result.message, 'Bundle mybundle stopped')
_self.assertEquals(result.response, response)
return super(StopBundleHandlerMatcher, self).__eq__(handlers)
self.web_console.stop_bundle('mybundle', foo='bar')
bag.request.assert_called_once_with(
'post',
'http://localhost:4502/system/console/bundles/mybundle',
{'action': 'stop',
'foo': 'bar'},
StopBundleHandlerMatcher([200, 401, 404, 405]),
bundle_name='mybundle',
debug=True)
def test_install_bundle(self):
_self = self
class InstallBundleHandlerMatcher(HandlersMatcher):
def __eq__(self, handlers):
response = None
result = handlers[200](response, bundle_name='mybundle')
_self.assertEquals(result.is_success(), True)
_self.assertEquals(result.message, 'Bundle mybundle installed')
_self.assertEquals(result.response, response)
response = None
result = handlers[201](response, bundle_name='mybundle')
_self.assertEquals(result.is_success(), True)
_self.assertEquals(result.message, 'Bundle mybundle installed')
_self.assertEquals(result.response, response)
return super(InstallBundleHandlerMatcher, self).__eq__(handlers)
self.web_console.install_bundle('mybundle', '1.2.3', '/mnt/ephemeral0/', foo='bar')
bag.upload_file.assert_called_once_with(
'http://localhost:4502/system/console/bundles',
{'action': 'install',
'bundlefile': (10, '/mnt/ephemeral0/mybundle-1.2.3.jar'),
'foo': 'bar'},
InstallBundleHandlerMatcher([200, 201, 401, 404, 405]),
bundle_name='mybundle',
debug=True)
if __name__ == '__main__':
unittest.main()
|
lucastanure/GoGo-Real-Software | refs/heads/master | qtcreator/update.py | 1 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010-2012 Lucas Aníbal Tanure Alves - ME
# Contact Lucas Tanure [ltanure@gmail.com]
#
# This file is part of GoGo Real.
#
# GoGo Real is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GoGo Real is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GoGo Real. If not, see <http://www.gnu.org/licenses/>.
import subprocess
import os.path
import time
def replace_motor_class(*args, **kwargs):
line = kwargs['line']
for group in range(4):
for motor in range(4):
if 'self.motor_group' + str(group) + '_motor' + str(motor) + '_icon_label = QtGui.QLabel(' in line:
new_line = line.replace('QtGui.QLabel(', 'MotorLabel(')[:-1] + ', ' + str(group)
if group == motor:
return new_line + ", '" + chr(motor + 64) + "')"
return new_line + ')'
return None
def replace_line_edit_class(*args, **kwargs):
line = kwargs['line']
if 'self.send_line_edit =' in line:
return line.replace('QtGui.QLineEdit', 'SerialLineEdit')
return None
def change_pyside_lines(*args, **kwargs):
line = kwargs['line']
class_name = kwargs['class_name']
if line != '' and line[0] != '#':
line = line.replace(class_name + '.', 'self.')\
.replace('(' + class_name + ')', '(self)')\
.replace('self.retranslateUi(self)', 'self.retranslateUi()')
if ' | ' not in line:
return line.replace('|', ' | ')
return None
def change_pyside_size_policy_lines(*args, **kwargs):
line = kwargs['line']
return line.replace(' sizePolicy = ', ' size_policy = ')\
.replace('(sizePolicy)', '(size_policy)')\
.replace(' sizePolicy.', ' size_policy.')
def change_pyside_spacer_item_lines(*args, **kwargs):
line = kwargs['line']
return line.replace(' spacerItem', ' spacer_item')\
.replace('(spacerItem', '(spacer_item')
def change_pyside_utf8_lines(*args, **kwargs):
line = kwargs['line']
def_line = 'def retranslateUi(self):'
if def_line in line:
return [line,
' '*(line.index(def_line)+4) + 'unicode_utf8 = QtGui.QApplication.UnicodeUTF8',
' '*(line.index(def_line)+4) + 'translate = QtGui.QApplication.translate']
if 'unicode_utf8 = QtGui.QApplication.UnicodeUTF8' in line or 'translate = QtGui.QApplication.translate' in line:
return None
return line.replace('QtGui.QApplication.UnicodeUTF8', 'unicode_utf8')\
.replace('QtGui.QApplication.translate', 'translate')
def method_name(def_line):
return def_line[def_line.find(' def ') + len(' def '):def_line.find('(')].replace(' ', '')
def get_class_name(def_line):
return def_line[def_line.find('class ') + len('class '):def_line.find('(')].replace(' ', '')
def get_methods(file_lines):
methods = {}
last_def_found = None
for line in file_lines:
if ' def ' in line:
methods[method_name(line)] = []
last_def_found = method_name(line)
elif last_def_found is not None:
if 'import ' not in line: # No imports inside def
methods[last_def_found].append(line)
return methods
def merge(new_class, class_name, file_name, replacing_methods):
new_methods = get_methods(new_class)
old_lines = open(file_name, 'r').read().split('\n')
class_found = False
def_found = False
new_lines = []
for line in old_lines:
if len(line) > 0 and line[0] != ' ':
def_found = False
if def_found is False or ' def ' in line:
new_lines.append(line)
if line.find('class ') == 0:
if class_name == get_class_name(line):
class_found = True
else:
class_found = False
def_found = False
continue
if class_found:
if ' def ' in line:
if method_name(line) in new_methods.keys():
def_found = True
for oldLine in new_methods[method_name(line)]:
new_lines.append(oldLine)
else:
def_found = False
if new_lines[-1] == '':
del (new_lines[-1])
class_found = False
for each_method in replacing_methods:
new_lines2 = []
for class_line in new_lines:
if class_line.find('class ') == 0:
if class_name == get_class_name(class_line):
class_found = True
else:
class_found = False
if class_found:
new_class_line = each_method(line=class_line, class_name=class_name)
if new_class_line is None:
new_lines2.append(class_line)
elif isinstance(new_class_line, list):
for each_line in new_class_line:
new_lines2.append(each_line)
else:
new_lines2.append(new_class_line)
else:
new_lines2.append(class_line)
new_lines = new_lines2
class_file = open(file_name, 'w')
for each in new_lines:
if each == '\n':
class_file.write(each)
else:
class_file.write(each + '\n')
if __name__ == "__main__":
if time.time() - os.path.getmtime('files.qrc') < 3 * 60:
subprocess.Popen('pyside-rcc files.qrc -o ../gui/files_rc.py', shell=True)
out, _ = subprocess.Popen('pyside-uic firmwareDownloader.ui', shell=True, stdout=subprocess.PIPE).communicate()
merge(out.split('\n'), 'FirmwareDownloader', '../gui/dialogs.py', [change_pyside_lines,
change_pyside_utf8_lines])
out, _ = subprocess.Popen('pyside-uic newPort.ui', shell=True, stdout=subprocess.PIPE).communicate()
merge(out.split('\n'), 'NewPort', '../gui/dialogs.py', [change_pyside_lines,
change_pyside_utf8_lines])
out, _ = subprocess.Popen('pyside-uic about.ui', shell=True, stdout=subprocess.PIPE).communicate()
merge(out.split('\n'), 'About', '../gui/dialogs.py', [change_pyside_lines,
change_pyside_utf8_lines,
change_pyside_spacer_item_lines])
out, _ = subprocess.Popen('pyside-uic console.ui', shell=True, stdout=subprocess.PIPE).communicate()
merge(out.split('\n'), 'Console', '../gui/dialogs.py', [replace_line_edit_class,
change_pyside_lines,
change_pyside_spacer_item_lines,
change_pyside_utf8_lines])
out, _ = subprocess.Popen('pyside-uic real.ui', shell=True, stdout=subprocess.PIPE).communicate()
merge(out.split('\n'), 'GoGoRealGui', '../gui/real.py', [replace_motor_class,
change_pyside_lines,
change_pyside_utf8_lines,
change_pyside_size_policy_lines,
change_pyside_spacer_item_lines])
|
stopstalk/stopstalk-deployment | refs/heads/master | aws_lambda/spoj_aws_lambda_function/lambda_code/pip/_internal/resolve.py | 8 | """Dependency Resolution
The dependency resolution in pip is performed as follows:
for top-level requirements:
a. only one spec allowed per project, regardless of conflicts or not.
otherwise a "double requirement" exception is raised
b. they override sub-dependency requirements.
for sub-dependencies
a. "first found, wins" (where the order is breadth first)
"""
import logging
from collections import defaultdict
from itertools import chain
from pip._internal.exceptions import (
BestVersionAlreadyInstalled, DistributionNotFound, HashError, HashErrors,
UnsupportedPythonVersion,
)
from pip._internal.req.req_install import InstallRequirement
from pip._internal.utils.logging import indent_log
from pip._internal.utils.misc import dist_in_usersite, ensure_dir
from pip._internal.utils.packaging import check_dist_requires_python
logger = logging.getLogger(__name__)
class Resolver(object):
"""Resolves which packages need to be installed/uninstalled to perform \
the requested operation without breaking the requirements of any package.
"""
_allowed_strategies = {"eager", "only-if-needed", "to-satisfy-only"}
def __init__(self, preparer, session, finder, wheel_cache, use_user_site,
ignore_dependencies, ignore_installed, ignore_requires_python,
force_reinstall, isolated, upgrade_strategy):
super(Resolver, self).__init__()
assert upgrade_strategy in self._allowed_strategies
self.preparer = preparer
self.finder = finder
self.session = session
# NOTE: This would eventually be replaced with a cache that can give
# information about both sdist and wheels transparently.
self.wheel_cache = wheel_cache
self.require_hashes = None # This is set in resolve
self.upgrade_strategy = upgrade_strategy
self.force_reinstall = force_reinstall
self.isolated = isolated
self.ignore_dependencies = ignore_dependencies
self.ignore_installed = ignore_installed
self.ignore_requires_python = ignore_requires_python
self.use_user_site = use_user_site
self._discovered_dependencies = defaultdict(list)
def resolve(self, requirement_set):
"""Resolve what operations need to be done
As a side-effect of this method, the packages (and their dependencies)
are downloaded, unpacked and prepared for installation. This
preparation is done by ``pip.operations.prepare``.
Once PyPI has static dependency metadata available, it would be
possible to move the preparation to become a step separated from
dependency resolution.
"""
# make the wheelhouse
if self.preparer.wheel_download_dir:
ensure_dir(self.preparer.wheel_download_dir)
# If any top-level requirement has a hash specified, enter
# hash-checking mode, which requires hashes from all.
root_reqs = (
requirement_set.unnamed_requirements +
list(requirement_set.requirements.values())
)
self.require_hashes = (
requirement_set.require_hashes or
any(req.has_hash_options for req in root_reqs)
)
# Display where finder is looking for packages
locations = self.finder.get_formatted_locations()
if locations:
logger.info(locations)
# Actually prepare the files, and collect any exceptions. Most hash
# exceptions cannot be checked ahead of time, because
# req.populate_link() needs to be called before we can make decisions
# based on link type.
discovered_reqs = []
hash_errors = HashErrors()
for req in chain(root_reqs, discovered_reqs):
try:
discovered_reqs.extend(
self._resolve_one(requirement_set, req)
)
except HashError as exc:
exc.req = req
hash_errors.append(exc)
if hash_errors:
raise hash_errors
def _is_upgrade_allowed(self, req):
if self.upgrade_strategy == "to-satisfy-only":
return False
elif self.upgrade_strategy == "eager":
return True
else:
assert self.upgrade_strategy == "only-if-needed"
return req.is_direct
def _set_req_to_reinstall(self, req):
"""
Set a requirement to be installed.
"""
# Don't uninstall the conflict if doing a user install and the
# conflict is not a user install.
if not self.use_user_site or dist_in_usersite(req.satisfied_by):
req.conflicts_with = req.satisfied_by
req.satisfied_by = None
# XXX: Stop passing requirement_set for options
def _check_skip_installed(self, req_to_install):
"""Check if req_to_install should be skipped.
This will check if the req is installed, and whether we should upgrade
or reinstall it, taking into account all the relevant user options.
After calling this req_to_install will only have satisfied_by set to
None if the req_to_install is to be upgraded/reinstalled etc. Any
other value will be a dist recording the current thing installed that
satisfies the requirement.
Note that for vcs urls and the like we can't assess skipping in this
routine - we simply identify that we need to pull the thing down,
then later on it is pulled down and introspected to assess upgrade/
reinstalls etc.
:return: A text reason for why it was skipped, or None.
"""
if self.ignore_installed:
return None
req_to_install.check_if_exists(self.use_user_site)
if not req_to_install.satisfied_by:
return None
if self.force_reinstall:
self._set_req_to_reinstall(req_to_install)
return None
if not self._is_upgrade_allowed(req_to_install):
if self.upgrade_strategy == "only-if-needed":
return 'already satisfied, skipping upgrade'
return 'already satisfied'
# Check for the possibility of an upgrade. For link-based
# requirements we have to pull the tree down and inspect to assess
# the version #, so it's handled way down.
if not req_to_install.link:
try:
self.finder.find_requirement(req_to_install, upgrade=True)
except BestVersionAlreadyInstalled:
# Then the best version is installed.
return 'already up-to-date'
except DistributionNotFound:
# No distribution found, so we squash the error. It will
# be raised later when we re-try later to do the install.
# Why don't we just raise here?
pass
self._set_req_to_reinstall(req_to_install)
return None
def _get_abstract_dist_for(self, req):
"""Takes a InstallRequirement and returns a single AbstractDist \
representing a prepared variant of the same.
"""
assert self.require_hashes is not None, (
"require_hashes should have been set in Resolver.resolve()"
)
if req.editable:
return self.preparer.prepare_editable_requirement(
req, self.require_hashes, self.use_user_site, self.finder,
)
# satisfied_by is only evaluated by calling _check_skip_installed,
# so it must be None here.
assert req.satisfied_by is None
skip_reason = self._check_skip_installed(req)
if req.satisfied_by:
return self.preparer.prepare_installed_requirement(
req, self.require_hashes, skip_reason
)
upgrade_allowed = self._is_upgrade_allowed(req)
abstract_dist = self.preparer.prepare_linked_requirement(
req, self.session, self.finder, upgrade_allowed,
self.require_hashes
)
# NOTE
# The following portion is for determining if a certain package is
# going to be re-installed/upgraded or not and reporting to the user.
# This should probably get cleaned up in a future refactor.
# req.req is only avail after unpack for URL
# pkgs repeat check_if_exists to uninstall-on-upgrade
# (#14)
if not self.ignore_installed:
req.check_if_exists(self.use_user_site)
if req.satisfied_by:
should_modify = (
self.upgrade_strategy != "to-satisfy-only" or
self.force_reinstall or
self.ignore_installed or
req.link.scheme == 'file'
)
if should_modify:
self._set_req_to_reinstall(req)
else:
logger.info(
'Requirement already satisfied (use --upgrade to upgrade):'
' %s', req,
)
return abstract_dist
def _resolve_one(self, requirement_set, req_to_install):
"""Prepare a single requirements file.
:return: A list of additional InstallRequirements to also install.
"""
# Tell user what we are doing for this requirement:
# obtain (editable), skipping, processing (local url), collecting
# (remote url or package name)
if req_to_install.constraint or req_to_install.prepared:
return []
req_to_install.prepared = True
# register tmp src for cleanup in case something goes wrong
requirement_set.reqs_to_cleanup.append(req_to_install)
abstract_dist = self._get_abstract_dist_for(req_to_install)
# Parse and return dependencies
dist = abstract_dist.dist(self.finder)
try:
check_dist_requires_python(dist)
except UnsupportedPythonVersion as err:
if self.ignore_requires_python:
logger.warning(err.args[0])
else:
raise
more_reqs = []
def add_req(subreq, extras_requested):
sub_install_req = InstallRequirement.from_req(
str(subreq),
req_to_install,
isolated=self.isolated,
wheel_cache=self.wheel_cache,
)
parent_req_name = req_to_install.name
to_scan_again, add_to_parent = requirement_set.add_requirement(
sub_install_req,
parent_req_name=parent_req_name,
extras_requested=extras_requested,
)
if parent_req_name and add_to_parent:
self._discovered_dependencies[parent_req_name].append(
add_to_parent
)
more_reqs.extend(to_scan_again)
with indent_log():
# We add req_to_install before its dependencies, so that we
# can refer to it when adding dependencies.
if not requirement_set.has_requirement(req_to_install.name):
# 'unnamed' requirements will get added here
req_to_install.is_direct = True
requirement_set.add_requirement(
req_to_install, parent_req_name=None,
)
if not self.ignore_dependencies:
if req_to_install.extras:
logger.debug(
"Installing extra requirements: %r",
','.join(req_to_install.extras),
)
missing_requested = sorted(
set(req_to_install.extras) - set(dist.extras)
)
for missing in missing_requested:
logger.warning(
'%s does not provide the extra \'%s\'',
dist, missing
)
available_requested = sorted(
set(dist.extras) & set(req_to_install.extras)
)
for subreq in dist.requires(available_requested):
add_req(subreq, extras_requested=available_requested)
if not req_to_install.editable and not req_to_install.satisfied_by:
# XXX: --no-install leads this to report 'Successfully
# downloaded' for only non-editable reqs, even though we took
# action on them.
requirement_set.successfully_downloaded.append(req_to_install)
return more_reqs
def get_installation_order(self, req_set):
"""Create the installation order.
The installation order is topological - requirements are installed
before the requiring thing. We break cycles at an arbitrary point,
and make no other guarantees.
"""
# The current implementation, which we may change at any point
# installs the user specified things in the order given, except when
# dependencies must come earlier to achieve topological order.
order = []
ordered_reqs = set()
def schedule(req):
if req.satisfied_by or req in ordered_reqs:
return
if req.constraint:
return
ordered_reqs.add(req)
for dep in self._discovered_dependencies[req.name]:
schedule(dep)
order.append(req)
for install_req in req_set.requirements.values():
schedule(install_req)
return order
|
alkalait/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers | refs/heads/master | ExamplesFromChapters/Chapter2/FreqOfCheaters.py | 97 | import pymc as pm
p = pm.Uniform("freq_cheating", 0, 1)
@pm.deterministic
def p_skewed(p=p):
return 0.5 * p + 0.25
yes_responses = pm.Binomial(
"number_cheaters", 100, p_skewed, value=35, observed=True)
model = pm.Model([yes_responses, p_skewed, p])
# To Be Explained in Chapter 3!
mcmc = pm.MCMC(model)
mcmc.sample(50000, 25000)
|
Edeleon4/PoolShark | refs/heads/master | Python279/Tools/i18n/makelocalealias.py | 24 | #!/usr/bin/env python
"""
Convert the X11 locale.alias file into a mapping dictionary suitable
for locale.py.
Written by Marc-Andre Lemburg <mal@genix.com>, 2004-12-10.
"""
import locale
# Location of the alias file
LOCALE_ALIAS = '/usr/share/X11/locale/locale.alias'
def parse(filename):
f = open(filename)
lines = f.read().splitlines()
data = {}
for line in lines:
line = line.strip()
if not line:
continue
if line[:1] == '#':
continue
locale, alias = line.split()
# Fix non-standard locale names, e.g. ks_IN@devanagari.UTF-8
if '@' in alias:
alias_lang, _, alias_mod = alias.partition('@')
if '.' in alias_mod:
alias_mod, _, alias_enc = alias_mod.partition('.')
alias = alias_lang + '.' + alias_enc + '@' + alias_mod
# Strip ':'
if locale[-1] == ':':
locale = locale[:-1]
# Lower-case locale
locale = locale.lower()
# Ignore one letter locale mappings (except for 'c')
if len(locale) == 1 and locale != 'c':
continue
# Normalize encoding, if given
if '.' in locale:
lang, encoding = locale.split('.')[:2]
encoding = encoding.replace('-', '')
encoding = encoding.replace('_', '')
locale = lang + '.' + encoding
if encoding.lower() == 'utf8':
# Ignore UTF-8 mappings - this encoding should be
# available for all locales
continue
data[locale] = alias
return data
def pprint(data):
items = data.items()
items.sort()
for k,v in items:
print ' %-40s%r,' % ('%r:' % k, v)
def print_differences(data, olddata):
items = olddata.items()
items.sort()
for k, v in items:
if not data.has_key(k):
print '# removed %r' % k
elif olddata[k] != data[k]:
print '# updated %r -> %r to %r' % \
(k, olddata[k], data[k])
# Additions are not mentioned
if __name__ == '__main__':
data = locale.locale_alias.copy()
data.update(parse(LOCALE_ALIAS))
print_differences(data, locale.locale_alias)
print
print 'locale_alias = {'
pprint(data)
print '}'
|
AlmogCohen/flask-admin | refs/heads/master | examples/quickstart/app2.py | 42 | from flask import Flask
from flask_admin import Admin, BaseView, expose
class MyView(BaseView):
@expose('/')
def index(self):
return self.render('index.html')
app = Flask(__name__)
app.debug = True
admin = Admin(app, name="Example: Quickstart2")
admin.add_view(MyView(name='Hello'))
if __name__ == '__main__':
# Start app
app.run()
|
Immortalin/python-for-android | refs/heads/master | python3-alpha/extra_modules/oauth2/clients/imap.py | 885 | """
The MIT License
Copyright (c) 2007-2010 Leah Culver, Joe Stump, Mark Paschal, Vic Fryzel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import oauth2
import imaplib
class IMAP4_SSL(imaplib.IMAP4_SSL):
"""IMAP wrapper for imaplib.IMAP4_SSL that implements XOAUTH."""
def authenticate(self, url, consumer, token):
if consumer is not None and not isinstance(consumer, oauth2.Consumer):
raise ValueError("Invalid consumer.")
if token is not None and not isinstance(token, oauth2.Token):
raise ValueError("Invalid token.")
imaplib.IMAP4_SSL.authenticate(self, 'XOAUTH',
lambda x: oauth2.build_xoauth_string(url, consumer, token))
|
ROMFactory/android_external_chromium_org | refs/heads/kitkat | chrome/common/extensions/docs/examples/apps/hello-python/oauth2/clients/imap.py | 885 | """
The MIT License
Copyright (c) 2007-2010 Leah Culver, Joe Stump, Mark Paschal, Vic Fryzel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import oauth2
import imaplib
class IMAP4_SSL(imaplib.IMAP4_SSL):
"""IMAP wrapper for imaplib.IMAP4_SSL that implements XOAUTH."""
def authenticate(self, url, consumer, token):
if consumer is not None and not isinstance(consumer, oauth2.Consumer):
raise ValueError("Invalid consumer.")
if token is not None and not isinstance(token, oauth2.Token):
raise ValueError("Invalid token.")
imaplib.IMAP4_SSL.authenticate(self, 'XOAUTH',
lambda x: oauth2.build_xoauth_string(url, consumer, token))
|
emiliop/webservice | refs/heads/master | vendor/psy/psysh/test/tools/gen_unvis_fixtures.py | 536 | #! /usr/bin/env python3
import sys
from os.path import abspath, expanduser, dirname, join
from itertools import chain
import json
import argparse
from vis import vis, unvis, VIS_WHITE
__dir__ = dirname(abspath(__file__))
OUTPUT_FILE = join(__dir__, '..', 'fixtures', 'unvis_fixtures.json')
# Add custom fixtures here
CUSTOM_FIXTURES = [
# test long multibyte string
''.join(chr(cp) for cp in range(1024)),
'foo bar',
'foo\nbar',
"$bar = 'baz';",
r'$foo = "\x20\\x20\\\x20\\\\x20"',
'$foo = function($bar) use($baz) {\n\treturn $baz->getFoo()\n};'
]
RANGES = {
# All valid codepoints in the BMP
'bmp': chain(range(0x0000, 0xD800), range(0xE000, 0xFFFF)),
# Smaller set of pertinent? codepoints inside BMP
# see: http://en.wikipedia.org/wiki/Plane_(Unicode)#Basic_Multilingual_Plane
'small': chain(
# latin blocks
range(0x0000, 0x0250),
# Greek, Cyrillic
range(0x0370, 0x0530),
# Hebrew, Arabic
range(0x590, 0x0700),
# CJK radicals
range(0x2E80, 0x2F00),
# Hiragana, Katakana
range(0x3040, 0x3100)
)
}
if __name__ == '__main__':
argp = argparse.ArgumentParser(
description='Generates test data for Psy\\Test\\Util\\StrTest')
argp.add_argument('-f', '--format-output', action='store_true',
help='Indent JSON output to ease debugging')
argp.add_argument('-a', '--all', action='store_true',
help="""Generates test data for all codepoints of the BMP.
(same as --range=bmp). WARNING: You will need quite
a lot of RAM to run the testsuite !
""")
argp.add_argument('-r', '--range',
help="""Choose the range of codepoints used to generate
test data.""",
choices=list(RANGES.keys()),
default='small')
argp.add_argument('-o', '--output-file',
help="""Write test data to OUTPUT_FILE
(defaults to PSYSH_DIR/test/fixtures)""")
args = argp.parse_args()
cp_range = RANGES['bmp'] if args.all else RANGES[args.range]
indent = 2 if args.format_output else None
if args.output_file:
OUTPUT_FILE = abspath(expanduser(args.output_file))
fixtures = []
# use SMALL_RANGE by default, it should be enough.
# use BMP_RANGE for a more complete smoke test
for codepoint in cp_range:
char = chr(codepoint)
encoded = vis(char, VIS_WHITE)
decoded = unvis(encoded)
fixtures.append((encoded, decoded))
# Add our own custom fixtures at the end,
# since they would fail anyway if one of the previous did.
for fixture in CUSTOM_FIXTURES:
encoded = vis(fixture, VIS_WHITE)
decoded = unvis(encoded)
fixtures.append((encoded, decoded))
with open(OUTPUT_FILE, 'w') as fp:
# dump as json to avoid backslashin and quotin nightmare
# between php and python
json.dump(fixtures, fp, indent=indent)
sys.exit(0)
|
indautgrp/erpnext | refs/heads/develop | erpnext/schools/doctype/program_fee/program_fee.py | 54 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class ProgramFee(Document):
pass
|
hellwen/ivy_3dprint_site | refs/heads/master | list/lib/python3.3/site-packages/pip/vendor/html5lib/inputstream.py | 79 | from __future__ import absolute_import, division, unicode_literals
from pip.vendor.six import text_type
import codecs
import re
from .constants import EOF, spaceCharacters, asciiLetters, asciiUppercase
from .constants import encodings, ReparseException
from . import utils
from io import StringIO
try:
from io import BytesIO
except ImportError:
BytesIO = StringIO
try:
from io import BufferedIOBase
except ImportError:
class BufferedIOBase(object):
pass
# Non-unicode versions of constants for use in the pre-parser
spaceCharactersBytes = frozenset([item.encode("ascii") for item in spaceCharacters])
asciiLettersBytes = frozenset([item.encode("ascii") for item in asciiLetters])
asciiUppercaseBytes = frozenset([item.encode("ascii") for item in asciiUppercase])
spacesAngleBrackets = spaceCharactersBytes | frozenset([b">", b"<"])
invalid_unicode_re = re.compile("[\u0001-\u0008\u000B\u000E-\u001F\u007F-\u009F\uD800-\uDFFF\uFDD0-\uFDEF\uFFFE\uFFFF\U0001FFFE\U0001FFFF\U0002FFFE\U0002FFFF\U0003FFFE\U0003FFFF\U0004FFFE\U0004FFFF\U0005FFFE\U0005FFFF\U0006FFFE\U0006FFFF\U0007FFFE\U0007FFFF\U0008FFFE\U0008FFFF\U0009FFFE\U0009FFFF\U000AFFFE\U000AFFFF\U000BFFFE\U000BFFFF\U000CFFFE\U000CFFFF\U000DFFFE\U000DFFFF\U000EFFFE\U000EFFFF\U000FFFFE\U000FFFFF\U0010FFFE\U0010FFFF]")
non_bmp_invalid_codepoints = set([0x1FFFE, 0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE, 0x5FFFF,
0x6FFFE, 0x6FFFF, 0x7FFFE, 0x7FFFF, 0x8FFFE,
0x8FFFF, 0x9FFFE, 0x9FFFF, 0xAFFFE, 0xAFFFF,
0xBFFFE, 0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE, 0xFFFFF,
0x10FFFE, 0x10FFFF])
ascii_punctuation_re = re.compile("[\u0009-\u000D\u0020-\u002F\u003A-\u0040\u005B-\u0060\u007B-\u007E]")
# Cache for charsUntil()
charsUntilRegEx = {}
class BufferedStream:
"""Buffering for streams that do not have buffering of their own
The buffer is implemented as a list of chunks on the assumption that
joining many strings will be slow since it is O(n**2)
"""
def __init__(self, stream):
self.stream = stream
self.buffer = []
self.position = [-1, 0] # chunk number, offset
def tell(self):
pos = 0
for chunk in self.buffer[:self.position[0]]:
pos += len(chunk)
pos += self.position[1]
return pos
def seek(self, pos):
assert pos < self._bufferedBytes()
offset = pos
i = 0
while len(self.buffer[i]) < offset:
offset -= pos
i += 1
self.position = [i, offset]
def read(self, bytes):
if not self.buffer:
return self._readStream(bytes)
elif (self.position[0] == len(self.buffer) and
self.position[1] == len(self.buffer[-1])):
return self._readStream(bytes)
else:
return self._readFromBuffer(bytes)
def _bufferedBytes(self):
return sum([len(item) for item in self.buffer])
def _readStream(self, bytes):
data = self.stream.read(bytes)
self.buffer.append(data)
self.position[0] += 1
self.position[1] = len(data)
return data
def _readFromBuffer(self, bytes):
remainingBytes = bytes
rv = []
bufferIndex = self.position[0]
bufferOffset = self.position[1]
while bufferIndex < len(self.buffer) and remainingBytes != 0:
assert remainingBytes > 0
bufferedData = self.buffer[bufferIndex]
if remainingBytes <= len(bufferedData) - bufferOffset:
bytesToRead = remainingBytes
self.position = [bufferIndex, bufferOffset + bytesToRead]
else:
bytesToRead = len(bufferedData) - bufferOffset
self.position = [bufferIndex, len(bufferedData)]
bufferIndex += 1
rv.append(bufferedData[bufferOffset:bufferOffset + bytesToRead])
remainingBytes -= bytesToRead
bufferOffset = 0
if remainingBytes:
rv.append(self._readStream(remainingBytes))
return "".join(rv)
def HTMLInputStream(source, encoding=None, parseMeta=True, chardet=True):
if hasattr(source, "read"):
isUnicode = isinstance(source.read(0), text_type)
else:
isUnicode = isinstance(source, text_type)
if isUnicode:
if encoding is not None:
raise TypeError("Cannot explicitly set an encoding with a unicode string")
return HTMLUnicodeInputStream(source)
else:
return HTMLBinaryInputStream(source, encoding, parseMeta, chardet)
class HTMLUnicodeInputStream:
"""Provides a unicode stream of characters to the HTMLTokenizer.
This class takes care of character encoding and removing or replacing
incorrect byte-sequences and also provides column and line tracking.
"""
_defaultChunkSize = 10240
def __init__(self, source):
"""Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
parseMeta - Look for a <meta> element containing encoding information
"""
# Craziness
if len("\U0010FFFF") == 1:
self.reportCharacterErrors = self.characterErrorsUCS4
self.replaceCharactersRegexp = re.compile("[\uD800-\uDFFF]")
else:
self.reportCharacterErrors = self.characterErrorsUCS2
self.replaceCharactersRegexp = re.compile("([\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?<![\uD800-\uDBFF])[\uDC00-\uDFFF])")
# List of where new lines occur
self.newLines = [0]
self.charEncoding = ("utf-8", "certain")
self.dataStream = self.openStream(source)
self.reset()
def reset(self):
self.chunk = ""
self.chunkSize = 0
self.chunkOffset = 0
self.errors = []
# number of (complete) lines in previous chunks
self.prevNumLines = 0
# number of columns in the last line of the previous chunk
self.prevNumCols = 0
# Deal with CR LF and surrogates split over chunk boundaries
self._bufferedCharacter = None
def openStream(self, source):
"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, 'read'):
stream = source
else:
stream = StringIO(source)
return stream
def _position(self, offset):
chunk = self.chunk
nLines = chunk.count('\n', 0, offset)
positionLine = self.prevNumLines + nLines
lastLinePos = chunk.rfind('\n', 0, offset)
if lastLinePos == -1:
positionColumn = self.prevNumCols + offset
else:
positionColumn = offset - (lastLinePos + 1)
return (positionLine, positionColumn)
def position(self):
"""Returns (line, col) of the current position in the stream."""
line, col = self._position(self.chunkOffset)
return (line + 1, col)
def char(self):
""" Read one character from the stream or queue if available. Return
EOF when EOF is reached.
"""
# Read a new chunk from the input stream if necessary
if self.chunkOffset >= self.chunkSize:
if not self.readChunk():
return EOF
chunkOffset = self.chunkOffset
char = self.chunk[chunkOffset]
self.chunkOffset = chunkOffset + 1
return char
def readChunk(self, chunkSize=None):
if chunkSize is None:
chunkSize = self._defaultChunkSize
self.prevNumLines, self.prevNumCols = self._position(self.chunkSize)
self.chunk = ""
self.chunkSize = 0
self.chunkOffset = 0
data = self.dataStream.read(chunkSize)
# Deal with CR LF and surrogates broken across chunks
if self._bufferedCharacter:
data = self._bufferedCharacter + data
self._bufferedCharacter = None
elif not data:
# We have no more data, bye-bye stream
return False
if len(data) > 1:
lastv = ord(data[-1])
if lastv == 0x0D or 0xD800 <= lastv <= 0xDBFF:
self._bufferedCharacter = data[-1]
data = data[:-1]
self.reportCharacterErrors(data)
# Replace invalid characters
# Note U+0000 is dealt with in the tokenizer
data = self.replaceCharactersRegexp.sub("\ufffd", data)
data = data.replace("\r\n", "\n")
data = data.replace("\r", "\n")
self.chunk = data
self.chunkSize = len(data)
return True
def characterErrorsUCS4(self, data):
for i in range(len(invalid_unicode_re.findall(data))):
self.errors.append("invalid-codepoint")
def characterErrorsUCS2(self, data):
# Someone picked the wrong compile option
# You lose
skip = False
for match in invalid_unicode_re.finditer(data):
if skip:
continue
codepoint = ord(match.group())
pos = match.start()
# Pretty sure there should be endianness issues here
if utils.isSurrogatePair(data[pos:pos + 2]):
# We have a surrogate pair!
char_val = utils.surrogatePairToCodepoint(data[pos:pos + 2])
if char_val in non_bmp_invalid_codepoints:
self.errors.append("invalid-codepoint")
skip = True
elif (codepoint >= 0xD800 and codepoint <= 0xDFFF and
pos == len(data) - 1):
self.errors.append("invalid-codepoint")
else:
skip = False
self.errors.append("invalid-codepoint")
def charsUntil(self, characters, opposite=False):
""" Returns a string of characters from the stream up to but not
including any character in 'characters' or EOF. 'characters' must be
a container that supports the 'in' method and iteration over its
characters.
"""
# Use a cache of regexps to find the required characters
try:
chars = charsUntilRegEx[(characters, opposite)]
except KeyError:
if __debug__:
for c in characters:
assert(ord(c) < 128)
regex = "".join(["\\x%02x" % ord(c) for c in characters])
if not opposite:
regex = "^%s" % regex
chars = charsUntilRegEx[(characters, opposite)] = re.compile("[%s]+" % regex)
rv = []
while True:
# Find the longest matching prefix
m = chars.match(self.chunk, self.chunkOffset)
if m is None:
# If nothing matched, and it wasn't because we ran out of chunk,
# then stop
if self.chunkOffset != self.chunkSize:
break
else:
end = m.end()
# If not the whole chunk matched, return everything
# up to the part that didn't match
if end != self.chunkSize:
rv.append(self.chunk[self.chunkOffset:end])
self.chunkOffset = end
break
# If the whole remainder of the chunk matched,
# use it all and read the next chunk
rv.append(self.chunk[self.chunkOffset:])
if not self.readChunk():
# Reached EOF
break
r = "".join(rv)
return r
def unget(self, char):
# Only one character is allowed to be ungotten at once - it must
# be consumed again before any further call to unget
if char is not None:
if self.chunkOffset == 0:
# unget is called quite rarely, so it's a good idea to do
# more work here if it saves a bit of work in the frequently
# called char and charsUntil.
# So, just prepend the ungotten character onto the current
# chunk:
self.chunk = char + self.chunk
self.chunkSize += 1
else:
self.chunkOffset -= 1
assert self.chunk[self.chunkOffset] == char
class HTMLBinaryInputStream(HTMLUnicodeInputStream):
"""Provides a unicode stream of characters to the HTMLTokenizer.
This class takes care of character encoding and removing or replacing
incorrect byte-sequences and also provides column and line tracking.
"""
def __init__(self, source, encoding=None, parseMeta=True, chardet=True):
"""Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
parseMeta - Look for a <meta> element containing encoding information
"""
# Raw Stream - for unicode objects this will encode to utf-8 and set
# self.charEncoding as appropriate
self.rawStream = self.openStream(source)
HTMLUnicodeInputStream.__init__(self, self.rawStream)
self.charEncoding = (codecName(encoding), "certain")
# Encoding Information
# Number of bytes to use when looking for a meta element with
# encoding information
self.numBytesMeta = 512
# Number of bytes to use when using detecting encoding using chardet
self.numBytesChardet = 100
# Encoding to use if no other information can be found
self.defaultEncoding = "windows-1252"
# Detect encoding iff no explicit "transport level" encoding is supplied
if (self.charEncoding[0] is None):
self.charEncoding = self.detectEncoding(parseMeta, chardet)
# Call superclass
self.reset()
def reset(self):
self.dataStream = codecs.getreader(self.charEncoding[0])(self.rawStream,
'replace')
HTMLUnicodeInputStream.reset(self)
def openStream(self, source):
"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, 'read'):
stream = source
else:
stream = BytesIO(source)
try:
stream.seek(stream.tell())
except:
stream = BufferedStream(stream)
return stream
def detectEncoding(self, parseMeta=True, chardet=True):
# First look for a BOM
# This will also read past the BOM if present
encoding = self.detectBOM()
confidence = "certain"
# If there is no BOM need to look for meta elements with encoding
# information
if encoding is None and parseMeta:
encoding = self.detectEncodingMeta()
confidence = "tentative"
# Guess with chardet, if avaliable
if encoding is None and chardet:
confidence = "tentative"
try:
try:
from charade.universaldetector import UniversalDetector
except ImportError:
from chardet.universaldetector import UniversalDetector
buffers = []
detector = UniversalDetector()
while not detector.done:
buffer = self.rawStream.read(self.numBytesChardet)
assert isinstance(buffer, bytes)
if not buffer:
break
buffers.append(buffer)
detector.feed(buffer)
detector.close()
encoding = detector.result['encoding']
self.rawStream.seek(0)
except ImportError:
pass
# If all else fails use the default encoding
if encoding is None:
confidence = "tentative"
encoding = self.defaultEncoding
# Substitute for equivalent encodings:
encodingSub = {"iso-8859-1": "windows-1252"}
if encoding.lower() in encodingSub:
encoding = encodingSub[encoding.lower()]
return encoding, confidence
def changeEncoding(self, newEncoding):
assert self.charEncoding[1] != "certain"
newEncoding = codecName(newEncoding)
if newEncoding in ("utf-16", "utf-16-be", "utf-16-le"):
newEncoding = "utf-8"
if newEncoding is None:
return
elif newEncoding == self.charEncoding[0]:
self.charEncoding = (self.charEncoding[0], "certain")
else:
self.rawStream.seek(0)
self.reset()
self.charEncoding = (newEncoding, "certain")
raise ReparseException("Encoding changed from %s to %s" % (self.charEncoding[0], newEncoding))
def detectBOM(self):
"""Attempts to detect at BOM at the start of the stream. If
an encoding can be determined from the BOM return the name of the
encoding otherwise return None"""
bomDict = {
codecs.BOM_UTF8: 'utf-8',
codecs.BOM_UTF16_LE: 'utf-16-le', codecs.BOM_UTF16_BE: 'utf-16-be',
codecs.BOM_UTF32_LE: 'utf-32-le', codecs.BOM_UTF32_BE: 'utf-32-be'
}
# Go to beginning of file and read in 4 bytes
string = self.rawStream.read(4)
assert isinstance(string, bytes)
# Try detecting the BOM using bytes from the string
encoding = bomDict.get(string[:3]) # UTF-8
seek = 3
if not encoding:
# Need to detect UTF-32 before UTF-16
encoding = bomDict.get(string) # UTF-32
seek = 4
if not encoding:
encoding = bomDict.get(string[:2]) # UTF-16
seek = 2
# Set the read position past the BOM if one was found, otherwise
# set it to the start of the stream
self.rawStream.seek(encoding and seek or 0)
return encoding
def detectEncodingMeta(self):
"""Report the encoding declared by the meta element
"""
buffer = self.rawStream.read(self.numBytesMeta)
assert isinstance(buffer, bytes)
parser = EncodingParser(buffer)
self.rawStream.seek(0)
encoding = parser.getEncoding()
if encoding in ("utf-16", "utf-16-be", "utf-16-le"):
encoding = "utf-8"
return encoding
class EncodingBytes(bytes):
"""String-like object with an associated position and various extra methods
If the position is ever greater than the string length then an exception is
raised"""
def __new__(self, value):
assert isinstance(value, bytes)
return bytes.__new__(self, value.lower())
def __init__(self, value):
self._position = -1
def __iter__(self):
return self
def __next__(self):
p = self._position = self._position + 1
if p >= len(self):
raise StopIteration
elif p < 0:
raise TypeError
return self[p:p + 1]
def next(self):
# Py2 compat
return self.__next__()
def previous(self):
p = self._position
if p >= len(self):
raise StopIteration
elif p < 0:
raise TypeError
self._position = p = p - 1
return self[p:p + 1]
def setPosition(self, position):
if self._position >= len(self):
raise StopIteration
self._position = position
def getPosition(self):
if self._position >= len(self):
raise StopIteration
if self._position >= 0:
return self._position
else:
return None
position = property(getPosition, setPosition)
def getCurrentByte(self):
return self[self.position:self.position + 1]
currentByte = property(getCurrentByte)
def skip(self, chars=spaceCharactersBytes):
"""Skip past a list of characters"""
p = self.position # use property for the error-checking
while p < len(self):
c = self[p:p + 1]
if c not in chars:
self._position = p
return c
p += 1
self._position = p
return None
def skipUntil(self, chars):
p = self.position
while p < len(self):
c = self[p:p + 1]
if c in chars:
self._position = p
return c
p += 1
self._position = p
return None
def matchBytes(self, bytes):
"""Look for a sequence of bytes at the start of a string. If the bytes
are found return True and advance the position to the byte after the
match. Otherwise return False and leave the position alone"""
p = self.position
data = self[p:p + len(bytes)]
rv = data.startswith(bytes)
if rv:
self.position += len(bytes)
return rv
def jumpTo(self, bytes):
"""Look for the next sequence of bytes matching a given sequence. If
a match is found advance the position to the last byte of the match"""
newPosition = self[self.position:].find(bytes)
if newPosition > -1:
# XXX: This is ugly, but I can't see a nicer way to fix this.
if self._position == -1:
self._position = 0
self._position += (newPosition + len(bytes) - 1)
return True
else:
raise StopIteration
class EncodingParser(object):
"""Mini parser for detecting character encoding from meta elements"""
def __init__(self, data):
"""string - the data to work on for encoding detection"""
self.data = EncodingBytes(data)
self.encoding = None
def getEncoding(self):
methodDispatch = (
(b"<!--", self.handleComment),
(b"<meta", self.handleMeta),
(b"</", self.handlePossibleEndTag),
(b"<!", self.handleOther),
(b"<?", self.handleOther),
(b"<", self.handlePossibleStartTag))
for byte in self.data:
keepParsing = True
for key, method in methodDispatch:
if self.data.matchBytes(key):
try:
keepParsing = method()
break
except StopIteration:
keepParsing = False
break
if not keepParsing:
break
return self.encoding
def handleComment(self):
"""Skip over comments"""
return self.data.jumpTo(b"-->")
def handleMeta(self):
if self.data.currentByte not in spaceCharactersBytes:
# if we have <meta not followed by a space so just keep going
return True
# We have a valid meta element we want to search for attributes
hasPragma = False
pendingEncoding = None
while True:
# Try to find the next attribute after the current position
attr = self.getAttribute()
if attr is None:
return True
else:
if attr[0] == b"http-equiv":
hasPragma = attr[1] == b"content-type"
if hasPragma and pendingEncoding is not None:
self.encoding = pendingEncoding
return False
elif attr[0] == b"charset":
tentativeEncoding = attr[1]
codec = codecName(tentativeEncoding)
if codec is not None:
self.encoding = codec
return False
elif attr[0] == b"content":
contentParser = ContentAttrParser(EncodingBytes(attr[1]))
tentativeEncoding = contentParser.parse()
if tentativeEncoding is not None:
codec = codecName(tentativeEncoding)
if codec is not None:
if hasPragma:
self.encoding = codec
return False
else:
pendingEncoding = codec
def handlePossibleStartTag(self):
return self.handlePossibleTag(False)
def handlePossibleEndTag(self):
next(self.data)
return self.handlePossibleTag(True)
def handlePossibleTag(self, endTag):
data = self.data
if data.currentByte not in asciiLettersBytes:
# If the next byte is not an ascii letter either ignore this
# fragment (possible start tag case) or treat it according to
# handleOther
if endTag:
data.previous()
self.handleOther()
return True
c = data.skipUntil(spacesAngleBrackets)
if c == b"<":
# return to the first step in the overall "two step" algorithm
# reprocessing the < byte
data.previous()
else:
# Read all attributes
attr = self.getAttribute()
while attr is not None:
attr = self.getAttribute()
return True
def handleOther(self):
return self.data.jumpTo(b">")
def getAttribute(self):
"""Return a name,value pair for the next attribute in the stream,
if one is found, or None"""
data = self.data
# Step 1 (skip chars)
c = data.skip(spaceCharactersBytes | frozenset([b"/"]))
assert c is None or len(c) == 1
# Step 2
if c in (b">", None):
return None
# Step 3
attrName = []
attrValue = []
# Step 4 attribute name
while True:
if c == b"=" and attrName:
break
elif c in spaceCharactersBytes:
# Step 6!
c = data.skip()
break
elif c in (b"/", b">"):
return b"".join(attrName), b""
elif c in asciiUppercaseBytes:
attrName.append(c.lower())
elif c is None:
return None
else:
attrName.append(c)
# Step 5
c = next(data)
# Step 7
if c != b"=":
data.previous()
return b"".join(attrName), b""
# Step 8
next(data)
# Step 9
c = data.skip()
# Step 10
if c in (b"'", b'"'):
# 10.1
quoteChar = c
while True:
# 10.2
c = next(data)
# 10.3
if c == quoteChar:
next(data)
return b"".join(attrName), b"".join(attrValue)
# 10.4
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
# 10.5
else:
attrValue.append(c)
elif c == b">":
return b"".join(attrName), b""
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
elif c is None:
return None
else:
attrValue.append(c)
# Step 11
while True:
c = next(data)
if c in spacesAngleBrackets:
return b"".join(attrName), b"".join(attrValue)
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
elif c is None:
return None
else:
attrValue.append(c)
class ContentAttrParser(object):
def __init__(self, data):
assert isinstance(data, bytes)
self.data = data
def parse(self):
try:
# Check if the attr name is charset
# otherwise return
self.data.jumpTo(b"charset")
self.data.position += 1
self.data.skip()
if not self.data.currentByte == b"=":
# If there is no = sign keep looking for attrs
return None
self.data.position += 1
self.data.skip()
# Look for an encoding between matching quote marks
if self.data.currentByte in (b'"', b"'"):
quoteMark = self.data.currentByte
self.data.position += 1
oldPosition = self.data.position
if self.data.jumpTo(quoteMark):
return self.data[oldPosition:self.data.position]
else:
return None
else:
# Unquoted value
oldPosition = self.data.position
try:
self.data.skipUntil(spaceCharactersBytes)
return self.data[oldPosition:self.data.position]
except StopIteration:
# Return the whole remaining value
return self.data[oldPosition:]
except StopIteration:
return None
def codecName(encoding):
"""Return the python codec name corresponding to an encoding or None if the
string doesn't correspond to a valid encoding."""
if isinstance(encoding, bytes):
try:
encoding = encoding.decode("ascii")
except UnicodeDecodeError:
return None
if encoding:
canonicalName = ascii_punctuation_re.sub("", encoding).lower()
return encodings.get(canonicalName, None)
else:
return None
|
andy928/xpenology | refs/heads/master | arch/ia64/scripts/unwcheck.py | 13143 | #!/usr/bin/python
#
# Usage: unwcheck.py FILE
#
# This script checks the unwind info of each function in file FILE
# and verifies that the sum of the region-lengths matches the total
# length of the function.
#
# Based on a shell/awk script originally written by Harish Patil,
# which was converted to Perl by Matthew Chapman, which was converted
# to Python by David Mosberger.
#
import os
import re
import sys
if len(sys.argv) != 2:
print "Usage: %s FILE" % sys.argv[0]
sys.exit(2)
readelf = os.getenv("READELF", "readelf")
start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]")
rlen_pattern = re.compile(".*rlen=([0-9]+)")
def check_func (func, slots, rlen_sum):
if slots != rlen_sum:
global num_errors
num_errors += 1
if not func: func = "[%#x-%#x]" % (start, end)
print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)
return
num_funcs = 0
num_errors = 0
func = False
slots = 0
rlen_sum = 0
for line in os.popen("%s -u %s" % (readelf, sys.argv[1])):
m = start_pattern.match(line)
if m:
check_func(func, slots, rlen_sum)
func = m.group(1)
start = long(m.group(2), 16)
end = long(m.group(3), 16)
slots = 3 * (end - start) / 16
rlen_sum = 0L
num_funcs += 1
else:
m = rlen_pattern.match(line)
if m:
rlen_sum += long(m.group(1))
check_func(func, slots, rlen_sum)
if num_errors == 0:
print "No errors detected in %u functions." % num_funcs
else:
if num_errors > 1:
err="errors"
else:
err="error"
print "%u %s detected in %u functions." % (num_errors, err, num_funcs)
sys.exit(1)
|
joongh/robotframework | refs/heads/master | atest/testresources/listeners/ListenAll.py | 27 | import os
import time
class ListenAll:
ROBOT_LISTENER_API_VERSION = '2'
def __init__(self, *path):
path = ':'.join(path) if path else self._get_default_path()
self.outfile = open(path, 'w')
def _get_default_path(self):
return os.path.join(os.getenv('TEMPDIR'), 'listen_all.txt')
def start_suite(self, name, attrs):
metastr = ' '.join('%s: %s' % (k, v) for k, v in attrs['metadata'].items())
self.outfile.write("SUITE START: %s (%s) '%s' [%s]\n"
% (name, attrs['id'], attrs['doc'], metastr))
def start_test(self, name, attrs):
tags = [str(tag) for tag in attrs['tags']]
self.outfile.write("TEST START: %s (%s) '%s' %s crit: %s\n"
% (name, attrs['id'], attrs['doc'], tags, attrs['critical']))
def start_keyword(self, name, attrs):
args = [str(arg) for arg in attrs['args']]
if attrs['assign']:
assign = '%s = ' % ', '.join(attrs['assign'])
else:
assign = ''
self.outfile.write("KW START: %s%s %s\n" % (assign, name, args))
def log_message(self, message):
msg, level = self._check_message_validity(message)
if level != 'TRACE' and 'Traceback' not in msg:
self.outfile.write('LOG MESSAGE: [%s] %s\n' % (level, msg))
def message(self, message):
msg, level = self._check_message_validity(message)
if 'Settings' in msg:
self.outfile.write('Got settings on level: %s\n' % level)
def _check_message_validity(self, message):
if message['html'] not in ['yes', 'no']:
self.outfile.write('Log message has invalid `html` attribute %s' %
message['html'])
if not message['timestamp'].startswith(str(time.localtime()[0])):
self.outfile.write('Log message has invalid timestamp %s' %
message['timestamp'])
return message['message'], message['level']
def end_keyword(self, name, attrs):
self.outfile.write("KW END: %s\n" % (attrs['status']))
def end_test(self, name, attrs):
if attrs['status'] == 'PASS':
self.outfile.write('TEST END: PASS crit: %s\n' % attrs['critical'])
else:
self.outfile.write("TEST END: %s %s crit: %s\n"
% (attrs['status'], attrs['message'], attrs['critical']))
def end_suite(self, name, attrs):
self.outfile.write('SUITE END: %s %s\n'
% (attrs['status'], attrs['statistics']))
def output_file(self, path):
self._out_file('Output', path)
def report_file(self, path):
self._out_file('Report', path)
def log_file(self, path):
self._out_file('Log', path)
def debug_file(self, path):
self._out_file('Debug', path)
def _out_file(self, name, path):
assert os.path.isabs(path)
self.outfile.write('%s: %s\n' % (name, os.path.basename(path)))
def close(self):
self.outfile.write('Closing...\n')
self.outfile.close()
|
allenta/varnish-bans-manager | refs/heads/master | varnish_bans_manager/core/views/user.py | 1 | # -*- coding: utf-8 -*-
'''
:copyright: (c) 2012 by Allenta Consulting, see AUTHORS.txt for more details.
:license: GPL, see LICENSE.txt for more details.
'''
from __future__ import absolute_import
import urlparse
from abc import ABCMeta
from django.contrib import auth, messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.tokens import default_token_generator
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.utils.http import base36_to_int
from django.utils.translation import ugettext as _
from django.utils.decorators import method_decorator
from django.views.generic import View
from varnish_bans_manager.core.helpers import commands, DEFAULT_SUCCESS_MESSAGE, DEFAULT_FORM_ERROR_MESSAGE
from varnish_bans_manager.core.helpers.http import HttpResponseAjax
from varnish_bans_manager.core.helpers.views import ajaxify
from varnish_bans_manager.core.forms.user import LoginForm, PasswordResetForm, PasswordResetConfirmationForm, ProfilePreferencesForm, PasswordChangeForm
from varnish_bans_manager.core.models import User
class Base(View):
__metaclass__ = ABCMeta
@method_decorator(ajaxify)
def dispatch(self, *args, **kwargs):
return super(Base, self).dispatch(*args, **kwargs)
class AnonymousBase(Base):
__metaclass__ = ABCMeta
def dispatch(self, request, *args, **kwargs):
if not request.user.is_authenticated():
return super(AnonymousBase, self).dispatch(
request, *args, **kwargs)
else:
return HttpResponseRedirect(reverse('home'))
class AuthenticatedBase(Base):
__metaclass__ = ABCMeta
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(AuthenticatedBase, self).dispatch(
request, *args, **kwargs)
class Login(AnonymousBase):
def get(self, request):
# Destination?
destination = request.GET.get(
auth.REDIRECT_FIELD_NAME, reverse('home'))
# Don't allow redirection to a different host.
netloc = urlparse.urlparse(destination)[1]
if netloc and netloc != request.get_host():
destination = reverse('home')
# Done!
form = LoginForm(initial={'destination': destination})
return self._render(form)
def post(self, request):
form = LoginForm(data=request.POST)
if form.is_valid():
# Log user in.
auth.login(request, form.user)
# Done!
messages.info(request, _('Welcome back!'))
return HttpResponseAjax([
commands.redirect(form.cleaned_data.get('destination')),
], request)
else:
return self._render(form)
def _render(self, form):
return {
'template': 'varnish-bans-manager/core/user/login.html',
'context': {
'form': form,
},
}
class Logout(Base):
def get(self, request):
auth.logout(request)
messages.success(request, _(
'You have been disconnected. See you soon!'))
return HttpResponseRedirect(reverse('index'))
class PasswordReset(AnonymousBase):
def get(self, request):
form = PasswordResetForm()
return self._render(form)
def post(self, request):
form = PasswordResetForm(data=request.POST)
if form.is_valid():
# Save.
form.save(request)
# Done!
messages.success(request, _(
'An e-mail with password reset instructions has been '
'delivered to %(email)s. Please, check your inbox and '
'follow the instructions.') % {
'email': form.user.email
})
return HttpResponseAjax([
commands.navigate(reverse('user-login')),
], request)
else:
return self._render(form)
def _render(self, form):
return {
'template': 'varnish-bans-manager/core/user/password_reset.html',
'context': {
'form': form,
},
}
class PasswordResetConfirm(AnonymousBase):
def dispatch(self, request, uidb36=None, token=None, *args, **kwargs):
# Check input.
assert uidb36 is not None and token is not None
# Fetch user.
try:
uid_int = base36_to_int(uidb36)
user = User.objects.get(id=uid_int)
except (ValueError, User.DoesNotExist):
user = None
# Valid link?
if user is not None and \
default_token_generator.check_token(user, token):
kwargs['user'] = user
return super(PasswordResetConfirm, self).dispatch(
request, *args, **kwargs)
else:
messages.error(request, _(
'The password reset link is not valid anymore. Please, '
'request a new one.'))
return HttpResponseRedirect(reverse('user-password-reset'))
def get(self, request, user):
form = PasswordResetConfirmationForm(user)
return self._render(form, request)
def post(self, request, user):
form = PasswordResetConfirmationForm(user, data=request.POST)
if form.is_valid():
# Save.
form.save()
# Done!
messages.success(request, _('Your password has been updated.'))
return HttpResponseAjax([
commands.navigate(reverse('user-login')),
], request)
else:
return self._render(form, request)
def _render(self, form, request):
return {
'template':
'varnish-bans-manager/core/user/password_reset_confirm.html',
'context': {
'form': form,
'url': request.path,
},
}
class Profile(AuthenticatedBase):
def get(self, request):
form = ProfilePreferencesForm(request.user)
return self._render(form)
def post(self, request):
form = ProfilePreferencesForm(
request.user, data=request.POST, files=request.FILES)
if form.is_valid():
form.save()
messages.success(request, DEFAULT_SUCCESS_MESSAGE)
return HttpResponseAjax([
commands.reload(request),
], request)
else:
messages.error(request, DEFAULT_FORM_ERROR_MESSAGE)
return self._render(form)
def _render(self, form):
return {
'template': 'varnish-bans-manager/core/user/profile.html',
'context': {
'form': form,
},
}
class Password(AuthenticatedBase):
def get(self, request):
form = PasswordChangeForm(request.user)
return self._render(form)
def post(self, request):
form = PasswordChangeForm(request.user, data=request.POST)
if form.is_valid():
form.save()
messages.success(request, _('Your password has been updated.'))
return HttpResponseAjax([
commands.reload(request),
], request)
else:
messages.error(request, DEFAULT_FORM_ERROR_MESSAGE)
return self._render(form)
def _render(self, form):
return {
'template': 'varnish-bans-manager/core/user/password.html',
'context': {
'form': form,
},
}
|
Nowheresly/odoo | refs/heads/8.0 | addons/base_gengo/wizard/base_gengo_translations.py | 296 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import uuid
import logging
import re
import time
from openerp.osv import osv, fields
from openerp import tools, SUPERUSER_ID
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
try:
from gengo import Gengo
except ImportError:
_logger.warning('Gengo library not found, Gengo features disabled. If you plan to use it, please install the gengo library from http://pypi.python.org/pypi/gengo')
GENGO_DEFAULT_LIMIT = 20
class base_gengo_translations(osv.osv_memory):
GENGO_KEY = "Gengo.UUID"
GROUPS = ['base.group_system']
_name = 'base.gengo.translations'
_columns = {
'sync_type': fields.selection([('send', 'Send New Terms'),
('receive', 'Receive Translation'),
('both', 'Both')], "Sync Type"),
'lang_id': fields.many2one('res.lang', 'Language', required=True),
'sync_limit': fields.integer("No. of terms to sync"),
}
_defaults = {
'sync_type': 'both',
'sync_limit': 20
}
def init(self, cr):
icp = self.pool['ir.config_parameter']
if not icp.get_param(cr, SUPERUSER_ID, self.GENGO_KEY, default=None):
icp.set_param(cr, SUPERUSER_ID, self.GENGO_KEY, str(uuid.uuid4()), groups=self.GROUPS)
def get_gengo_key(self, cr):
icp = self.pool['ir.config_parameter']
return icp.get_param(cr, SUPERUSER_ID, self.GENGO_KEY, default="Undefined")
def gengo_authentication(self, cr, uid, context=None):
'''
This method tries to open a connection with Gengo. For that, it uses the Public and Private
keys that are linked to the company (given by Gengo on subscription). It returns a tuple with
* as first element: a boolean depicting if the authentication was a success or not
* as second element: the connection, if it was a success, or the error message returned by
Gengo when the connection failed.
This error message can either be displayed in the server logs (if the authentication was called
by the cron) or in a dialog box (if requested by the user), thus it's important to return it
translated.
'''
user = self.pool.get('res.users').browse(cr, 1, uid, context=context)
if not user.company_id.gengo_public_key or not user.company_id.gengo_private_key:
return (False, _("Gengo `Public Key` or `Private Key` are missing. Enter your Gengo authentication parameters under `Settings > Companies > Gengo Parameters`."))
try:
gengo = Gengo(
public_key=user.company_id.gengo_public_key.encode('ascii'),
private_key=user.company_id.gengo_private_key.encode('ascii'),
sandbox=user.company_id.gengo_sandbox,
)
gengo.getAccountStats()
return (True, gengo)
except Exception, e:
_logger.exception('Gengo connection failed')
return (False, _("Gengo connection failed with this message:\n``%s``") % e)
def act_update(self, cr, uid, ids, context=None):
'''
Function called by the wizard.
'''
if context is None:
context = {}
flag, gengo = self.gengo_authentication(cr, uid, context=context)
if not flag:
raise osv.except_osv(_('Gengo Authentication Error'), gengo)
for wizard in self.browse(cr, uid, ids, context=context):
supported_langs = self.pool.get('ir.translation')._get_all_supported_languages(cr, uid, context=context)
language = self.pool.get('ir.translation')._get_gengo_corresponding_language(wizard.lang_id.code)
if language not in supported_langs:
raise osv.except_osv(_("Warning"), _('This language is not supported by the Gengo translation services.'))
ctx = context.copy()
ctx['gengo_language'] = wizard.lang_id.id
if wizard.sync_limit > 200 or wizard.sync_limit < 1:
raise osv.except_osv(_("Warning"), _('Sync limit should between 1 to 200 for Gengo translation services.'))
if wizard.sync_type in ['send', 'both']:
self._sync_request(cr, uid, wizard.sync_limit, context=ctx)
if wizard.sync_type in ['receive', 'both']:
self._sync_response(cr, uid, wizard.sync_limit, context=ctx)
return {'type': 'ir.actions.act_window_close'}
def _sync_response(self, cr, uid, limit=GENGO_DEFAULT_LIMIT, context=None):
"""
This method will be called by cron services to get translations from
Gengo. It will read translated terms and comments from Gengo and will
update respective ir.translation in Odoo.
"""
translation_pool = self.pool.get('ir.translation')
flag, gengo = self.gengo_authentication(cr, uid, context=context)
if not flag:
_logger.warning("%s", gengo)
else:
offset = 0
all_translation_ids = translation_pool.search(cr, uid, [('state', '=', 'inprogress'), ('gengo_translation', 'in', ('machine', 'standard', 'pro', 'ultra')), ('order_id', "!=", False)], context=context)
while True:
translation_ids = all_translation_ids[offset:offset + limit]
offset += limit
if not translation_ids:
break
terms_progress = {
'gengo_order_ids': set(),
'ir_translation_ids': set(),
}
translation_terms = translation_pool.browse(cr, uid, translation_ids, context=context)
for term in translation_terms:
terms_progress['gengo_order_ids'].add(term.order_id)
terms_progress['ir_translation_ids'].add(tools.ustr(term.id))
for order_id in terms_progress['gengo_order_ids']:
order_response = gengo.getTranslationOrderJobs(id=order_id)
jobs_approved = order_response.get('response', []).get('order', []).get('jobs_approved', [])
gengo_ids = ','.join(jobs_approved)
if gengo_ids: # Need to check, because getTranslationJobBatch don't catch this case and so call the getTranslationJobs because no ids in url
try:
job_response = gengo.getTranslationJobBatch(id=gengo_ids)
except:
continue
if job_response['opstat'] == 'ok':
for job in job_response['response'].get('jobs', []):
if job.get('custom_data') in terms_progress['ir_translation_ids']:
self._update_terms_job(cr, uid, job, context=context)
return True
def _update_terms_job(self, cr, uid, job, context=None):
translation_pool = self.pool.get('ir.translation')
tid = int(job['custom_data'])
vals = {}
if job.get('status', False) in ('queued', 'available', 'pending', 'reviewable'):
vals['state'] = 'inprogress'
if job.get('body_tgt', False) and job.get('status', False) == 'approved':
vals['value'] = job['body_tgt']
if job.get('status', False) in ('approved', 'canceled'):
vals['state'] = 'translated'
if vals:
translation_pool.write(cr, uid, [tid], vals, context=context)
def _update_terms(self, cr, uid, response, term_ids, context=None):
"""
Update the terms after their translation were requested to Gengo
"""
translation_pool = self.pool.get('ir.translation')
vals = {
'order_id': response.get('order_id', ''),
'state': 'inprogress'
}
translation_pool.write(cr, uid, term_ids, vals, context=context)
jobs = response.get('jobs', [])
if jobs:
for t_id, res in jobs.items():
self._update_terms_job(cr, uid, res, context=context)
return
def pack_jobs_request(self, cr, uid, term_ids, context=None):
''' prepare the terms that will be requested to gengo and returns them in a dictionary with following format
{'jobs': {
'term1.id': {...}
'term2.id': {...}
}
}'''
base_url = self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url')
translation_pool = self.pool.get('ir.translation')
jobs = {}
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
auto_approve = 1 if user.company_id.gengo_auto_approve else 0
for term in translation_pool.browse(cr, uid, term_ids, context=context):
if re.search(r"\w", term.src or ""):
comment = user.company_id.gengo_comment or ''
if term.gengo_comment:
comment += '\n' + term.gengo_comment
jobs[time.strftime('%Y%m%d%H%M%S') + '-' + str(term.id)] = {
'type': 'text',
'slug': 'Single :: English to ' + term.lang,
'tier': tools.ustr(term.gengo_translation),
'custom_data': str(term.id),
'body_src': term.src,
'lc_src': 'en',
'lc_tgt': translation_pool._get_gengo_corresponding_language(term.lang),
'auto_approve': auto_approve,
'comment': comment,
'callback_url': "%s/website/gengo_callback?pgk=%s&db=%s" % (base_url, self.get_gengo_key(cr), cr.dbname)
}
return {'jobs': jobs, 'as_group': 0}
def _send_translation_terms(self, cr, uid, term_ids, context=None):
"""
Send a request to Gengo with all the term_ids in a different job, get the response and update the terms in
database accordingly.
"""
flag, gengo = self.gengo_authentication(cr, uid, context=context)
if flag:
request = self.pack_jobs_request(cr, uid, term_ids, context=context)
if request['jobs']:
result = gengo.postTranslationJobs(jobs=request)
if result['opstat'] == 'ok':
self._update_terms(cr, uid, result['response'], term_ids, context=context)
else:
_logger.error(gengo)
return True
def _sync_request(self, cr, uid, limit=GENGO_DEFAULT_LIMIT, context=None):
"""
This scheduler will send a job request to the gengo , which terms are
waiing to be translated and for which gengo_translation is enabled.
A special key 'gengo_language' can be passed in the context in order to
request only translations of that language only. Its value is the language
ID in Odoo.
"""
if context is None:
context = {}
language_pool = self.pool.get('res.lang')
translation_pool = self.pool.get('ir.translation')
domain = [('state', '=', 'to_translate'), ('gengo_translation', 'in', ('machine', 'standard', 'pro', 'ultra')), ('order_id', "=", False)]
if context.get('gengo_language', False):
lc = language_pool.browse(cr, uid, context['gengo_language'], context=context).code
domain.append(('lang', '=', lc))
all_term_ids = translation_pool.search(cr, uid, domain, context=context)
try:
offset = 0
while True:
#search for the n first terms to translate
term_ids = all_term_ids[offset:offset + limit]
if term_ids:
offset += limit
self._send_translation_terms(cr, uid, term_ids, context=context)
_logger.info("%s Translation terms have been posted to Gengo successfully", len(term_ids))
if not len(term_ids) == limit:
break
except Exception, e:
_logger.error("%s", e)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
NESCent/dplace | refs/heads/master | dplace_app/tests/test_serializers.py | 4 | from django.test import TestCase
from dplace_app.serializers import Legend
# much of the testing of serializers is done in test_api, this is here to catch the
# remainder of the unexposed/untested code.
class LegendTestCase(TestCase):
"""Tests the Legend"""
def test_name(self):
assert Legend('name', 'svg').name == 'name'
def test_svg(self):
assert Legend('name', 'svg').svg == 'svg'
|
BasuCert/DefaceDetector | refs/heads/master | app/utils.py | 1 | import os
import yaml
def get_parent_path(file_path):
curfilePath = os.path.abspath(file_path)
curDir = os.path.abspath(os.path.join(curfilePath, os.pardir))
return os.path.abspath(os.path.join(curDir, os.pardir))
def get_config(term, section="general"):
try:
conf_file = get_parent_path(__file__) + '/config.yaml'
with open(conf_file, 'r') as ymlfile:
cfg = yaml.load(ymlfile)
except FileNotFoundError:
print("file not found")
return {}
try:
return cfg.get(section, {}).get(term, {})
except KeyError:
print("key not found")
return {}
|
DataDog/integrations-core | refs/heads/master | envoy/tests/test_metrics.py | 1 | from datadog_checks.envoy.metrics import METRIC_PREFIX, METRIC_TREE, METRICS
from datadog_checks.envoy.utils import make_metric_tree
def test_metric_prefix():
assert METRIC_PREFIX == 'envoy.'
def test_metric_tags_sequence_type():
for metric in METRICS:
for tag in METRICS[metric]['tags']:
assert isinstance(tag, tuple)
def test_metric_tags_length():
for metric in METRICS:
assert len(metric.split('.')) == len(METRICS[metric]['tags'])
def test_metric_tree():
assert METRIC_TREE == make_metric_tree(METRICS)
|
tntnatbry/tensorflow | refs/heads/master | tensorflow/contrib/slim/python/slim/nets/overfeat.py | 164 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the model definition for the OverFeat network.
The definition for the network was obtained from:
OverFeat: Integrated Recognition, Localization and Detection using
Convolutional Networks
Pierre Sermanet, David Eigen, Xiang Zhang, Michael Mathieu, Rob Fergus and
Yann LeCun, 2014
http://arxiv.org/abs/1312.6229
Usage:
with slim.arg_scope(overfeat.overfeat_arg_scope()):
outputs, end_points = overfeat.overfeat(inputs)
@@overfeat
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import layers
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.layers.python.layers import layers as layers_lib
from tensorflow.contrib.layers.python.layers import regularizers
from tensorflow.contrib.layers.python.layers import utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
trunc_normal = lambda stddev: init_ops.truncated_normal_initializer(0.0, stddev)
def overfeat_arg_scope(weight_decay=0.0005):
with arg_scope(
[layers.conv2d, layers_lib.fully_connected],
activation_fn=nn_ops.relu,
weights_regularizer=regularizers.l2_regularizer(weight_decay),
biases_initializer=init_ops.zeros_initializer()):
with arg_scope([layers.conv2d], padding='SAME'):
with arg_scope([layers_lib.max_pool2d], padding='VALID') as arg_sc:
return arg_sc
def overfeat(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.5,
spatial_squeeze=True,
scope='overfeat'):
"""Contains the model definition for the OverFeat network.
The definition for the network was obtained from:
OverFeat: Integrated Recognition, Localization and Detection using
Convolutional Networks
Pierre Sermanet, David Eigen, Xiang Zhang, Michael Mathieu, Rob Fergus and
Yann LeCun, 2014
http://arxiv.org/abs/1312.6229
Note: All the fully_connected layers have been transformed to conv2d layers.
To use in classification mode, resize input to 231x231. To use in fully
convolutional mode, set spatial_squeeze to false.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes.
is_training: whether or not the model is being trained.
dropout_keep_prob: the probability that activations are kept in the dropout
layers during training.
spatial_squeeze: whether or not should squeeze the spatial dimensions of the
outputs. Useful to remove unnecessary dimensions for classification.
scope: Optional scope for the variables.
Returns:
the last op containing the log predictions and end_points dict.
"""
with variable_scope.variable_scope(scope, 'overfeat', [inputs]) as sc:
end_points_collection = sc.name + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d
with arg_scope(
[layers.conv2d, layers_lib.fully_connected, layers_lib.max_pool2d],
outputs_collections=end_points_collection):
net = layers.conv2d(
inputs, 64, [11, 11], 4, padding='VALID', scope='conv1')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool1')
net = layers.conv2d(net, 256, [5, 5], padding='VALID', scope='conv2')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool2')
net = layers.conv2d(net, 512, [3, 3], scope='conv3')
net = layers.conv2d(net, 1024, [3, 3], scope='conv4')
net = layers.conv2d(net, 1024, [3, 3], scope='conv5')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool5')
with arg_scope(
[layers.conv2d],
weights_initializer=trunc_normal(0.005),
biases_initializer=init_ops.constant_initializer(0.1)):
# Use conv2d instead of fully_connected layers.
net = layers.conv2d(net, 3072, [6, 6], padding='VALID', scope='fc6')
net = layers_lib.dropout(
net, dropout_keep_prob, is_training=is_training, scope='dropout6')
net = layers.conv2d(net, 4096, [1, 1], scope='fc7')
net = layers_lib.dropout(
net, dropout_keep_prob, is_training=is_training, scope='dropout7')
net = layers.conv2d(
net,
num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
biases_initializer=init_ops.zeros_initializer(),
scope='fc8')
# Convert end_points_collection into a end_point dict.
end_points = utils.convert_collection_to_dict(end_points_collection)
if spatial_squeeze:
net = array_ops.squeeze(net, [1, 2], name='fc8/squeezed')
end_points[sc.name + '/fc8'] = net
return net, end_points
|
k2wl/evolution_i9082 | refs/heads/cm11exp | tools/perf/scripts/python/netdev-times.py | 11271 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
|
rwecho/fflask_blog | refs/heads/master | five8/five8/mydb/db_context.py | 1 | from sqlalchemy import create_engine, func, cast, desc, distinct, exists, Column, String, INT, DATETIME, TEXT
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from datetime import datetime
import pytz
import five8.config as config
Base = declarative_base()
connection = config.SQLALCHEMY_DATABASE_URI
ENGINE = create_engine(connection,
connect_args={'connect_timeout': 20}, echo=False)
DBSession = sessionmaker(bind=ENGINE)
class Five8HouseUrl(Base):
__tablename__ = 'five8_houseurl'
Url = Column('Url', String(200), primary_key=True)
Rdate = Column('Rdate', DATETIME)
Ldate = Column('Ldate', DATETIME)
Status = Column('Status', INT)
def __init__(self):
self.Rdate = beijing_now();
self.Ldate = beijing_now();
self.Status = -1;
def exists_five8_houseurl(url):
session = DBSession()
try:
any = session.query(exists().where(Five8HouseUrl.Url == url)).scalar()
if not any:
m = Five8HouseUrl()
m.Url = url
session.add(m)
session.commit()
except:
session.rollback()
raise
finally:
session.close()
return any
class Five8HouseInfo(Base):
__tablename__ = 'five8_houseinfo'
Phone = Column(String(20), primary_key=True)
PhoneBelong = Column(String(20))
Expense = Column(String(2000))
Situation = Column(String(2000))
Description = Column(String(2000))
Owner = Column(String(50))
District = Column(String(50))
SubDistrict = Column(String(50))
Rdate = Column('Rdate', DATETIME)
Ldate = Column('Ldate', DATETIME)
Status = Column('Status', INT)
def __init__(self, phone=None, phone_belong=None,\
expense=None, situation=None, \
description=None, owner=None,\
district=None, sub_district=None):
self.Phone = phone
self.PhoneBelong = phone_belong
self.Expense = expense
self.Situation = situation
self.Description = description
self.Owner = owner
self.District = district
self.SubDistrict = sub_district
self.Rdate = beijing_now()
self.Ldate = beijing_now()
self.Status = -1;
def save_five8_houseinfo(houseinfo):
""" save houseinfo to database """
try:
session = DBSession()
any = session.query(Five8HouseInfo).filter(Five8HouseInfo.Phone == houseinfo['phone']).first()
if not any:
session.add(Five8HouseInfo(**houseinfo))
elif not any.District:
any.District = houseinfo['district']
any.SubDistrict = houseinfo['sub_district']
any.Ldate = beijing_now()
print('update district:', houseinfo['phone'])
session.commit()
except:
print('save houseinfo error:')
session.rollback()
raise
finally:
session.close()
class ProxyIp(Base):
__tablename__ = 'proxy_ip'
Ip = Column('Ip', String(15), primary_key=True)
Port = Column('Port', INT)
Anonymous = Column('Anonymous', String(10))
ProxyType = Column('ProxyType', String(10))
Location = Column('Location', String(50))
Speed = Column('Speed', String(10))
LastVerify = Column('LastVerify', DATETIME)
Rdate = Column('Rdate', DATETIME)
Ldate = Column('Ldate', DATETIME)
Status = Column('Status', INT)
def __init__(self, Ip=None, Port=None, Anonymous=None, ProxyType=None, Location=None, Speed=None, LastVerify=None):
self.Ip = Ip
self.Port = Port
self.Anonymous = Anonymous
self.ProxyType = ProxyType
self.Location = Location
self.Speed = Speed
self.LastVerify = LastVerify
self.Rdate = beijing_now();
self.Ldate = beijing_now();
self.Status = -1;
def __repr__(self):
return "<ProxyIp %r %r>" % (self.Ip, self.Port)
def url(self):
if self.ProxyType == 'HTTP':
return "http://%s:%s" % (self.Ip, self.Port)
elif self.ProxyType == 'HTTPS':
return "https://%s:%s" % (self.Ip, self.Ip)
return None
def get_proxyips():
""" get all """
session = DBSession()
models = session.query(ProxyIp).filter(ProxyIp.Anonymous == None).all()
session.close()
return models
def save_proxyip(proxyip):
""" save proxyip to database """
session = DBSession()
try:
any = session.query(exists().where(ProxyIp.Ip == proxyip['Ip'])).scalar()
if not any:
session.add(ProxyIp(**proxyip))
session.commit()
except:
session.rollback()
raise
finally:
session.close()
def beijing_now():
"""
turn local time to beijing time
"""
return datetime.now(pytz.timezone('Etc/GMT-8'))
def main():
Base.metadata.create_all(ENGINE)
models = get_proxyips()
for m in models:
print(m.url())
if __name__ == '__main__':
main()
|
Juliusz/ntlmaps | refs/heads/master | ntlmaps/http_header.py | 5 | # This file is part of 'NTLM Authorization Proxy Server'
# Copyright 2001 Dmitry A. Rozmanov <dima@xenon.spb.ru>
#
# NTLM APS is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# NTLM APS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the sofware; see the file COPYING. If not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
#
import string, urlparse
http_debug_file_name = 'http.debug'
#-----------------------------------------------------------------------
# tests client's header for correctness
def test_client_http_header(header_str):
request = string.split(header_str, '\012')[0]
parts = string.split(request)
# we have to have at least 3 words in the request
# poor check
if len(parts) < 3:
return 0
else:
return 1
#-----------------------------------------------------------------------
# tests server's response header for correctness
def test_server_http_header(header_str):
response = string.split(header_str, '\012')[0]
parts = string.split(response)
# we have to have at least 2 words in the response
# poor check
if len(parts) < 2:
return 0
else:
return 1
#-----------------------------------------------------------------------
def extract_http_header_str(buffer):
# let's remove possible leading newlines
t = string.lstrip(buffer)
# searching for the RFC header's end
delimiter = '\015\012\015\012'
header_end = string.find(t, delimiter)
if header_end < 0:
# may be it is defective header made by junkbuster
delimiter = '\012\012'
header_end = string.find(t, delimiter)
if header_end >=0:
# we have found it, possibly
ld = len(delimiter)
header_str = t[0:header_end + ld]
# Let's check if it is a proper header
if test_server_http_header(header_str) or test_client_http_header(header_str):
# if yes then let's do our work
if (header_end + ld) >= len(t):
rest_str = ''
else:
rest_str = t[header_end + ld:]
else:
# if not then let's leave the buffer as it is
# NOTE: if there is some junk before right header we will never
# find that header. Till timeout, I think. Not that good solution.
header_str = ''
rest_str = buffer
else:
# there is no complete header in the buffer
header_str = ''
rest_str = buffer
return (header_str, rest_str)
#-----------------------------------------------------------------------
def extract_server_header(buffer):
header_str, rest_str = extract_http_header_str(buffer)
if header_str:
header_obj = HTTP_SERVER_HEAD(header_str)
else:
header_obj = None
return (header_obj, rest_str)
#-----------------------------------------------------------------------
def extract_client_header(buffer):
header_str, rest_str = extract_http_header_str(buffer)
if header_str:
header_obj = HTTP_CLIENT_HEAD(header_str)
else:
header_obj = None
return (header_obj, rest_str)
#-----------------------------------------------------------------------
def capitalize_value_name(str):
tl = string.split(str, '-')
for i in range(len(tl)):
tl[i] = string.capitalize(tl[i])
return string.join(tl, '-')
#-----------------------------------------------------------------------
# some helper classes
#-----------------------------------------------------------------------
class HTTP_HEAD:
pass
#-------------------------------
def __init__(self, head_str):
self.head_source = ''
self.params = None
self.fields = None
self.order_list = []
self.head_source = head_str
head_str = string.strip(head_str)
records = string.split(head_str, '\012')
# Dealing with response line
#fields = string.split(records[0], ' ', 2)
t = string.split(string.strip(records[0]))
fields = t[:2] + [string.join(t[2:])]
self.fields = []
for i in fields:
self.fields.append(string.strip(i))
# Dealing with params
params = {}
order_list = []
for i in records[1:]:
parts = string.split(string.strip(i), ':', 1)
pname = string.lower(string.strip(parts[0]))
if not params.has_key(pname):
params[pname] = []
order_list.append(string.lower(pname))
try:
params[pname].append(string.strip(parts[1]))
except:
msg = "ERROR: Exception in head parsing. ValueName: '%s'" % pname
#print msg
self.debug(msg)
self.params = params
self.order_list = order_list
#-------------------------------
def debug(self, message):
try:
f = open(http_debug_file_name, 'a')
f.write(message)
f.write('\n=====\n')
f.write(self.head_source)
f.close()
except IOError:
pass
# Yes, yes, I know, this is just sweeping it under the rug...
# TODO: implement a persistent filehandle for logging debug messages to.
#-------------------------------
def copy(self):
import copy
return copy.deepcopy(self)
#-------------------------------
def get_param_values(self, param_name):
param_name = string.lower(param_name)
if self.params.has_key(param_name):
return self.params[param_name]
else:
return []
#-------------------------------
def del_param(self, param_name):
param_name = string.lower(param_name)
if self.params.has_key(param_name): del self.params[param_name]
#-------------------------------
def has_param(self, param_name):
param_name = string.lower(param_name)
return self.params.has_key(param_name)
#-------------------------------
def add_param_value(self, param_name, value):
param_name = string.lower(param_name)
if not self.params.has_key(param_name):
self.params[param_name] = []
if param_name not in self.order_list:
self.order_list.append(param_name)
self.params[param_name].append(value)
#-------------------------------
def replace_param_value(self, param_name, value):
self.del_param(param_name)
self.add_param_value(param_name, value)
#-------------------------------
def __repr__(self, delimiter='\n'):
res = ''
cookies = ''
res = string.join(self.fields, ' ') + '\n'
for i in self.order_list:
if self.params.has_key(i):
if i == 'cookie':
for k in self.params[i]:
cookies = cookies + capitalize_value_name(i) + ': ' + k + '\n'
else:
for k in self.params[i]:
res = res + capitalize_value_name(i) + ': ' + k + '\n'
res = res + cookies
res = res + '\n'
return res
#-------------------------------
def send(self, socket):
#"""
res = ''
cookies = ''
res = string.join(self.fields, ' ') + '\015\012'
for i in self.order_list:
if self.params.has_key(i):
if i == 'cookie':
for k in self.params[i]:
cookies = cookies + capitalize_value_name(i) + ': ' + k + '\015\012'
else:
for k in self.params[i]:
res = res + capitalize_value_name(i) + ': ' + k + '\015\012'
res = res + cookies
res = res + '\015\012'
#"""
#res = self.__repr__('\015\012')
# NOTE!!! 0.9.1 worked, 0.9.5 and 0.9.7 did not with MSN Messenger.
# We had problem here that prevent MSN Messenger from working.
# Some work is needed to make __rerp__ working instead of current code..
try:
#socket.send(self.head_source)
socket.send(res)
# self.debug(res)
return 1
except:
return 0
#-----------------------------------------------------------------------
class HTTP_SERVER_HEAD(HTTP_HEAD):
#-------------------------------
def get_http_version(self):
return self.fields[0]
#-------------------------------
def get_http_code(self):
return self.fields[1]
#-------------------------------
def get_http_message(self):
return self.fields[2]
#-----------------------------------------------------------------------
class HTTP_CLIENT_HEAD(HTTP_HEAD):
#-------------------------------
def get_http_version(self):
return self.fields[2]
#-------------------------------
def get_http_method(self):
return self.fields[0]
#-------------------------------
def get_http_url(self):
return self.fields[1]
#-------------------------------
def set_http_url(self, new_url):
self.fields[1] = new_url
#-------------------------------
# There is some problem with www request header...
# not all servers want to answer to requests with full url in request
# but want have net location in 'Host' value and path in url.
def make_right_header(self):
if self.get_http_method() == 'CONNECT':
net_location = self.get_http_url()
else:
url_tuple = urlparse.urlparse(self.get_http_url())
net_location = url_tuple[1]
self.replace_param_value('Host', net_location)
if self.get_http_method() != 'CONNECT':
path = urlparse.urlunparse(tuple(['', ''] + list(url_tuple[2:])))
self.set_http_url(path)
#-------------------------------
def get_http_server(self):
# trying to get host from url
if self.get_http_method() == 'CONNECT':
net_location = self.get_http_url()
else:
url_tuple = urlparse.urlparse(self.get_http_url())
net_location = url_tuple[1]
# if there was no host in url then get it from 'Host' value
if not net_location:
net_location = self.get_param_values('Host')[0]
if not net_location:
net_location = 'localhost'
# trying to parse user:passwd@www.some.domain:8080
# is it needed?
if '@' in net_location:
cred, net_location = string.split(net_location, '@')
if ':' in net_location:
server, port = string.split(net_location, ':')
port = int(port)
else:
server = net_location
port = 80
return server, port
|
khwang/plum | refs/heads/master | node_modules/gulp-sass/node_modules/node-sass/node_modules/pangyp/gyp/pylib/gyp/msvs_emulation.py | 388 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This module helps emulate Visual Studio 2008 behavior on top of other
build systems, primarily ninja.
"""
import os
import re
import subprocess
import sys
from gyp.common import OrderedSet
import gyp.MSVSUtil
import gyp.MSVSVersion
windows_quoter_regex = re.compile(r'(\\*)"')
def QuoteForRspFile(arg):
"""Quote a command line argument so that it appears as one argument when
processed via cmd.exe and parsed by CommandLineToArgvW (as is typical for
Windows programs)."""
# See http://goo.gl/cuFbX and http://goo.gl/dhPnp including the comment
# threads. This is actually the quoting rules for CommandLineToArgvW, not
# for the shell, because the shell doesn't do anything in Windows. This
# works more or less because most programs (including the compiler, etc.)
# use that function to handle command line arguments.
# For a literal quote, CommandLineToArgvW requires 2n+1 backslashes
# preceding it, and results in n backslashes + the quote. So we substitute
# in 2* what we match, +1 more, plus the quote.
arg = windows_quoter_regex.sub(lambda mo: 2 * mo.group(1) + '\\"', arg)
# %'s also need to be doubled otherwise they're interpreted as batch
# positional arguments. Also make sure to escape the % so that they're
# passed literally through escaping so they can be singled to just the
# original %. Otherwise, trying to pass the literal representation that
# looks like an environment variable to the shell (e.g. %PATH%) would fail.
arg = arg.replace('%', '%%')
# These commands are used in rsp files, so no escaping for the shell (via ^)
# is necessary.
# Finally, wrap the whole thing in quotes so that the above quote rule
# applies and whitespace isn't a word break.
return '"' + arg + '"'
def EncodeRspFileList(args):
"""Process a list of arguments using QuoteCmdExeArgument."""
# Note that the first argument is assumed to be the command. Don't add
# quotes around it because then built-ins like 'echo', etc. won't work.
# Take care to normpath only the path in the case of 'call ../x.bat' because
# otherwise the whole thing is incorrectly interpreted as a path and not
# normalized correctly.
if not args: return ''
if args[0].startswith('call '):
call, program = args[0].split(' ', 1)
program = call + ' ' + os.path.normpath(program)
else:
program = os.path.normpath(args[0])
return program + ' ' + ' '.join(QuoteForRspFile(arg) for arg in args[1:])
def _GenericRetrieve(root, default, path):
"""Given a list of dictionary keys |path| and a tree of dicts |root|, find
value at path, or return |default| if any of the path doesn't exist."""
if not root:
return default
if not path:
return root
return _GenericRetrieve(root.get(path[0]), default, path[1:])
def _AddPrefix(element, prefix):
"""Add |prefix| to |element| or each subelement if element is iterable."""
if element is None:
return element
# Note, not Iterable because we don't want to handle strings like that.
if isinstance(element, list) or isinstance(element, tuple):
return [prefix + e for e in element]
else:
return prefix + element
def _DoRemapping(element, map):
"""If |element| then remap it through |map|. If |element| is iterable then
each item will be remapped. Any elements not found will be removed."""
if map is not None and element is not None:
if not callable(map):
map = map.get # Assume it's a dict, otherwise a callable to do the remap.
if isinstance(element, list) or isinstance(element, tuple):
element = filter(None, [map(elem) for elem in element])
else:
element = map(element)
return element
def _AppendOrReturn(append, element):
"""If |append| is None, simply return |element|. If |append| is not None,
then add |element| to it, adding each item in |element| if it's a list or
tuple."""
if append is not None and element is not None:
if isinstance(element, list) or isinstance(element, tuple):
append.extend(element)
else:
append.append(element)
else:
return element
def _FindDirectXInstallation():
"""Try to find an installation location for the DirectX SDK. Check for the
standard environment variable, and if that doesn't exist, try to find
via the registry. May return None if not found in either location."""
# Return previously calculated value, if there is one
if hasattr(_FindDirectXInstallation, 'dxsdk_dir'):
return _FindDirectXInstallation.dxsdk_dir
dxsdk_dir = os.environ.get('DXSDK_DIR')
if not dxsdk_dir:
# Setup params to pass to and attempt to launch reg.exe.
cmd = ['reg.exe', 'query', r'HKLM\Software\Microsoft\DirectX', '/s']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
for line in p.communicate()[0].splitlines():
if 'InstallPath' in line:
dxsdk_dir = line.split(' ')[3] + "\\"
# Cache return value
_FindDirectXInstallation.dxsdk_dir = dxsdk_dir
return dxsdk_dir
def GetGlobalVSMacroEnv(vs_version):
"""Get a dict of variables mapping internal VS macro names to their gyp
equivalents. Returns all variables that are independent of the target."""
env = {}
# '$(VSInstallDir)' and '$(VCInstallDir)' are available when and only when
# Visual Studio is actually installed.
if vs_version.Path():
env['$(VSInstallDir)'] = vs_version.Path()
env['$(VCInstallDir)'] = os.path.join(vs_version.Path(), 'VC') + '\\'
# Chromium uses DXSDK_DIR in include/lib paths, but it may or may not be
# set. This happens when the SDK is sync'd via src-internal, rather than
# by typical end-user installation of the SDK. If it's not set, we don't
# want to leave the unexpanded variable in the path, so simply strip it.
dxsdk_dir = _FindDirectXInstallation()
env['$(DXSDK_DIR)'] = dxsdk_dir if dxsdk_dir else ''
# Try to find an installation location for the Windows DDK by checking
# the WDK_DIR environment variable, may be None.
env['$(WDK_DIR)'] = os.environ.get('WDK_DIR', '')
return env
def ExtractSharedMSVSSystemIncludes(configs, generator_flags):
"""Finds msvs_system_include_dirs that are common to all targets, removes
them from all targets, and returns an OrderedSet containing them."""
all_system_includes = OrderedSet(
configs[0].get('msvs_system_include_dirs', []))
for config in configs[1:]:
system_includes = config.get('msvs_system_include_dirs', [])
all_system_includes = all_system_includes & OrderedSet(system_includes)
if not all_system_includes:
return None
# Expand macros in all_system_includes.
env = GetGlobalVSMacroEnv(GetVSVersion(generator_flags))
expanded_system_includes = OrderedSet([ExpandMacros(include, env)
for include in all_system_includes])
if any(['$' in include for include in expanded_system_includes]):
# Some path relies on target-specific variables, bail.
return None
# Remove system includes shared by all targets from the targets.
for config in configs:
includes = config.get('msvs_system_include_dirs', [])
if includes: # Don't insert a msvs_system_include_dirs key if not needed.
# This must check the unexpanded includes list:
new_includes = [i for i in includes if i not in all_system_includes]
config['msvs_system_include_dirs'] = new_includes
return expanded_system_includes
class MsvsSettings(object):
"""A class that understands the gyp 'msvs_...' values (especially the
msvs_settings field). They largely correpond to the VS2008 IDE DOM. This
class helps map those settings to command line options."""
def __init__(self, spec, generator_flags):
self.spec = spec
self.vs_version = GetVSVersion(generator_flags)
supported_fields = [
('msvs_configuration_attributes', dict),
('msvs_settings', dict),
('msvs_system_include_dirs', list),
('msvs_disabled_warnings', list),
('msvs_precompiled_header', str),
('msvs_precompiled_source', str),
('msvs_configuration_platform', str),
('msvs_target_platform', str),
]
configs = spec['configurations']
for field, default in supported_fields:
setattr(self, field, {})
for configname, config in configs.iteritems():
getattr(self, field)[configname] = config.get(field, default())
self.msvs_cygwin_dirs = spec.get('msvs_cygwin_dirs', ['.'])
unsupported_fields = [
'msvs_prebuild',
'msvs_postbuild',
]
unsupported = []
for field in unsupported_fields:
for config in configs.values():
if field in config:
unsupported += ["%s not supported (target %s)." %
(field, spec['target_name'])]
if unsupported:
raise Exception('\n'.join(unsupported))
def GetExtension(self):
"""Returns the extension for the target, with no leading dot.
Uses 'product_extension' if specified, otherwise uses MSVS defaults based on
the target type.
"""
ext = self.spec.get('product_extension', None)
if ext:
return ext
return gyp.MSVSUtil.TARGET_TYPE_EXT.get(self.spec['type'], '')
def GetVSMacroEnv(self, base_to_build=None, config=None):
"""Get a dict of variables mapping internal VS macro names to their gyp
equivalents."""
target_platform = 'Win32' if self.GetArch(config) == 'x86' else 'x64'
target_name = self.spec.get('product_prefix', '') + \
self.spec.get('product_name', self.spec['target_name'])
target_dir = base_to_build + '\\' if base_to_build else ''
target_ext = '.' + self.GetExtension()
target_file_name = target_name + target_ext
replacements = {
'$(InputName)': '${root}',
'$(InputPath)': '${source}',
'$(IntDir)': '$!INTERMEDIATE_DIR',
'$(OutDir)\\': target_dir,
'$(PlatformName)': target_platform,
'$(ProjectDir)\\': '',
'$(ProjectName)': self.spec['target_name'],
'$(TargetDir)\\': target_dir,
'$(TargetExt)': target_ext,
'$(TargetFileName)': target_file_name,
'$(TargetName)': target_name,
'$(TargetPath)': os.path.join(target_dir, target_file_name),
}
replacements.update(GetGlobalVSMacroEnv(self.vs_version))
return replacements
def ConvertVSMacros(self, s, base_to_build=None, config=None):
"""Convert from VS macro names to something equivalent."""
env = self.GetVSMacroEnv(base_to_build, config=config)
return ExpandMacros(s, env)
def AdjustLibraries(self, libraries):
"""Strip -l from library if it's specified with that."""
libs = [lib[2:] if lib.startswith('-l') else lib for lib in libraries]
return [lib + '.lib' if not lib.endswith('.lib') else lib for lib in libs]
def _GetAndMunge(self, field, path, default, prefix, append, map):
"""Retrieve a value from |field| at |path| or return |default|. If
|append| is specified, and the item is found, it will be appended to that
object instead of returned. If |map| is specified, results will be
remapped through |map| before being returned or appended."""
result = _GenericRetrieve(field, default, path)
result = _DoRemapping(result, map)
result = _AddPrefix(result, prefix)
return _AppendOrReturn(append, result)
class _GetWrapper(object):
def __init__(self, parent, field, base_path, append=None):
self.parent = parent
self.field = field
self.base_path = [base_path]
self.append = append
def __call__(self, name, map=None, prefix='', default=None):
return self.parent._GetAndMunge(self.field, self.base_path + [name],
default=default, prefix=prefix, append=self.append, map=map)
def GetArch(self, config):
"""Get architecture based on msvs_configuration_platform and
msvs_target_platform. Returns either 'x86' or 'x64'."""
configuration_platform = self.msvs_configuration_platform.get(config, '')
platform = self.msvs_target_platform.get(config, '')
if not platform: # If no specific override, use the configuration's.
platform = configuration_platform
# Map from platform to architecture.
return {'Win32': 'x86', 'x64': 'x64'}.get(platform, 'x86')
def _TargetConfig(self, config):
"""Returns the target-specific configuration."""
# There's two levels of architecture/platform specification in VS. The
# first level is globally for the configuration (this is what we consider
# "the" config at the gyp level, which will be something like 'Debug' or
# 'Release_x64'), and a second target-specific configuration, which is an
# override for the global one. |config| is remapped here to take into
# account the local target-specific overrides to the global configuration.
arch = self.GetArch(config)
if arch == 'x64' and not config.endswith('_x64'):
config += '_x64'
if arch == 'x86' and config.endswith('_x64'):
config = config.rsplit('_', 1)[0]
return config
def _Setting(self, path, config,
default=None, prefix='', append=None, map=None):
"""_GetAndMunge for msvs_settings."""
return self._GetAndMunge(
self.msvs_settings[config], path, default, prefix, append, map)
def _ConfigAttrib(self, path, config,
default=None, prefix='', append=None, map=None):
"""_GetAndMunge for msvs_configuration_attributes."""
return self._GetAndMunge(
self.msvs_configuration_attributes[config],
path, default, prefix, append, map)
def AdjustIncludeDirs(self, include_dirs, config):
"""Updates include_dirs to expand VS specific paths, and adds the system
include dirs used for platform SDK and similar."""
config = self._TargetConfig(config)
includes = include_dirs + self.msvs_system_include_dirs[config]
includes.extend(self._Setting(
('VCCLCompilerTool', 'AdditionalIncludeDirectories'), config, default=[]))
return [self.ConvertVSMacros(p, config=config) for p in includes]
def AdjustMidlIncludeDirs(self, midl_include_dirs, config):
"""Updates midl_include_dirs to expand VS specific paths, and adds the
system include dirs used for platform SDK and similar."""
config = self._TargetConfig(config)
includes = midl_include_dirs + self.msvs_system_include_dirs[config]
includes.extend(self._Setting(
('VCMIDLTool', 'AdditionalIncludeDirectories'), config, default=[]))
return [self.ConvertVSMacros(p, config=config) for p in includes]
def GetComputedDefines(self, config):
"""Returns the set of defines that are injected to the defines list based
on other VS settings."""
config = self._TargetConfig(config)
defines = []
if self._ConfigAttrib(['CharacterSet'], config) == '1':
defines.extend(('_UNICODE', 'UNICODE'))
if self._ConfigAttrib(['CharacterSet'], config) == '2':
defines.append('_MBCS')
defines.extend(self._Setting(
('VCCLCompilerTool', 'PreprocessorDefinitions'), config, default=[]))
return defines
def GetCompilerPdbName(self, config, expand_special):
"""Get the pdb file name that should be used for compiler invocations, or
None if there's no explicit name specified."""
config = self._TargetConfig(config)
pdbname = self._Setting(
('VCCLCompilerTool', 'ProgramDataBaseFileName'), config)
if pdbname:
pdbname = expand_special(self.ConvertVSMacros(pdbname))
return pdbname
def GetMapFileName(self, config, expand_special):
"""Gets the explicitly overriden map file name for a target or returns None
if it's not set."""
config = self._TargetConfig(config)
map_file = self._Setting(('VCLinkerTool', 'MapFileName'), config)
if map_file:
map_file = expand_special(self.ConvertVSMacros(map_file, config=config))
return map_file
def GetOutputName(self, config, expand_special):
"""Gets the explicitly overridden output name for a target or returns None
if it's not overridden."""
config = self._TargetConfig(config)
type = self.spec['type']
root = 'VCLibrarianTool' if type == 'static_library' else 'VCLinkerTool'
# TODO(scottmg): Handle OutputDirectory without OutputFile.
output_file = self._Setting((root, 'OutputFile'), config)
if output_file:
output_file = expand_special(self.ConvertVSMacros(
output_file, config=config))
return output_file
def GetPDBName(self, config, expand_special, default):
"""Gets the explicitly overridden pdb name for a target or returns
default if it's not overridden, or if no pdb will be generated."""
config = self._TargetConfig(config)
output_file = self._Setting(('VCLinkerTool', 'ProgramDatabaseFile'), config)
generate_debug_info = self._Setting(
('VCLinkerTool', 'GenerateDebugInformation'), config)
if generate_debug_info == 'true':
if output_file:
return expand_special(self.ConvertVSMacros(output_file, config=config))
else:
return default
else:
return None
def GetNoImportLibrary(self, config):
"""If NoImportLibrary: true, ninja will not expect the output to include
an import library."""
config = self._TargetConfig(config)
noimplib = self._Setting(('NoImportLibrary',), config)
return noimplib == 'true'
def GetAsmflags(self, config):
"""Returns the flags that need to be added to ml invocations."""
config = self._TargetConfig(config)
asmflags = []
safeseh = self._Setting(('MASM', 'UseSafeExceptionHandlers'), config)
if safeseh == 'true':
asmflags.append('/safeseh')
return asmflags
def GetCflags(self, config):
"""Returns the flags that need to be added to .c and .cc compilations."""
config = self._TargetConfig(config)
cflags = []
cflags.extend(['/wd' + w for w in self.msvs_disabled_warnings[config]])
cl = self._GetWrapper(self, self.msvs_settings[config],
'VCCLCompilerTool', append=cflags)
cl('Optimization',
map={'0': 'd', '1': '1', '2': '2', '3': 'x'}, prefix='/O', default='2')
cl('InlineFunctionExpansion', prefix='/Ob')
cl('DisableSpecificWarnings', prefix='/wd')
cl('StringPooling', map={'true': '/GF'})
cl('EnableFiberSafeOptimizations', map={'true': '/GT'})
cl('OmitFramePointers', map={'false': '-', 'true': ''}, prefix='/Oy')
cl('EnableIntrinsicFunctions', map={'false': '-', 'true': ''}, prefix='/Oi')
cl('FavorSizeOrSpeed', map={'1': 't', '2': 's'}, prefix='/O')
cl('FloatingPointModel',
map={'0': 'precise', '1': 'strict', '2': 'fast'}, prefix='/fp:',
default='0')
cl('WholeProgramOptimization', map={'true': '/GL'})
cl('WarningLevel', prefix='/W')
cl('WarnAsError', map={'true': '/WX'})
cl('CallingConvention',
map={'0': 'd', '1': 'r', '2': 'z', '3': 'v'}, prefix='/G')
cl('DebugInformationFormat',
map={'1': '7', '3': 'i', '4': 'I'}, prefix='/Z')
cl('RuntimeTypeInfo', map={'true': '/GR', 'false': '/GR-'})
cl('EnableFunctionLevelLinking', map={'true': '/Gy', 'false': '/Gy-'})
cl('MinimalRebuild', map={'true': '/Gm'})
cl('BufferSecurityCheck', map={'true': '/GS', 'false': '/GS-'})
cl('BasicRuntimeChecks', map={'1': 's', '2': 'u', '3': '1'}, prefix='/RTC')
cl('RuntimeLibrary',
map={'0': 'T', '1': 'Td', '2': 'D', '3': 'Dd'}, prefix='/M')
cl('ExceptionHandling', map={'1': 'sc','2': 'a'}, prefix='/EH')
cl('DefaultCharIsUnsigned', map={'true': '/J'})
cl('TreatWChar_tAsBuiltInType',
map={'false': '-', 'true': ''}, prefix='/Zc:wchar_t')
cl('EnablePREfast', map={'true': '/analyze'})
cl('AdditionalOptions', prefix='')
cl('EnableEnhancedInstructionSet',
map={'1': 'SSE', '2': 'SSE2', '3': 'AVX', '4': 'IA32', '5': 'AVX2'},
prefix='/arch:')
cflags.extend(['/FI' + f for f in self._Setting(
('VCCLCompilerTool', 'ForcedIncludeFiles'), config, default=[])])
if self.vs_version.short_name in ('2013', '2013e', '2015'):
# New flag required in 2013 to maintain previous PDB behavior.
cflags.append('/FS')
# ninja handles parallelism by itself, don't have the compiler do it too.
cflags = filter(lambda x: not x.startswith('/MP'), cflags)
return cflags
def _GetPchFlags(self, config, extension):
"""Get the flags to be added to the cflags for precompiled header support.
"""
config = self._TargetConfig(config)
# The PCH is only built once by a particular source file. Usage of PCH must
# only be for the same language (i.e. C vs. C++), so only include the pch
# flags when the language matches.
if self.msvs_precompiled_header[config]:
source_ext = os.path.splitext(self.msvs_precompiled_source[config])[1]
if _LanguageMatchesForPch(source_ext, extension):
pch = os.path.split(self.msvs_precompiled_header[config])[1]
return ['/Yu' + pch, '/FI' + pch, '/Fp${pchprefix}.' + pch + '.pch']
return []
def GetCflagsC(self, config):
"""Returns the flags that need to be added to .c compilations."""
config = self._TargetConfig(config)
return self._GetPchFlags(config, '.c')
def GetCflagsCC(self, config):
"""Returns the flags that need to be added to .cc compilations."""
config = self._TargetConfig(config)
return ['/TP'] + self._GetPchFlags(config, '.cc')
def _GetAdditionalLibraryDirectories(self, root, config, gyp_to_build_path):
"""Get and normalize the list of paths in AdditionalLibraryDirectories
setting."""
config = self._TargetConfig(config)
libpaths = self._Setting((root, 'AdditionalLibraryDirectories'),
config, default=[])
libpaths = [os.path.normpath(
gyp_to_build_path(self.ConvertVSMacros(p, config=config)))
for p in libpaths]
return ['/LIBPATH:"' + p + '"' for p in libpaths]
def GetLibFlags(self, config, gyp_to_build_path):
"""Returns the flags that need to be added to lib commands."""
config = self._TargetConfig(config)
libflags = []
lib = self._GetWrapper(self, self.msvs_settings[config],
'VCLibrarianTool', append=libflags)
libflags.extend(self._GetAdditionalLibraryDirectories(
'VCLibrarianTool', config, gyp_to_build_path))
lib('LinkTimeCodeGeneration', map={'true': '/LTCG'})
lib('TargetMachine', map={'1': 'X86', '17': 'X64', '3': 'ARM'},
prefix='/MACHINE:')
lib('AdditionalOptions')
return libflags
def GetDefFile(self, gyp_to_build_path):
"""Returns the .def file from sources, if any. Otherwise returns None."""
spec = self.spec
if spec['type'] in ('shared_library', 'loadable_module', 'executable'):
def_files = [s for s in spec.get('sources', []) if s.endswith('.def')]
if len(def_files) == 1:
return gyp_to_build_path(def_files[0])
elif len(def_files) > 1:
raise Exception("Multiple .def files")
return None
def _GetDefFileAsLdflags(self, ldflags, gyp_to_build_path):
""".def files get implicitly converted to a ModuleDefinitionFile for the
linker in the VS generator. Emulate that behaviour here."""
def_file = self.GetDefFile(gyp_to_build_path)
if def_file:
ldflags.append('/DEF:"%s"' % def_file)
def GetPGDName(self, config, expand_special):
"""Gets the explicitly overridden pgd name for a target or returns None
if it's not overridden."""
config = self._TargetConfig(config)
output_file = self._Setting(
('VCLinkerTool', 'ProfileGuidedDatabase'), config)
if output_file:
output_file = expand_special(self.ConvertVSMacros(
output_file, config=config))
return output_file
def GetLdflags(self, config, gyp_to_build_path, expand_special,
manifest_base_name, output_name, is_executable, build_dir):
"""Returns the flags that need to be added to link commands, and the
manifest files."""
config = self._TargetConfig(config)
ldflags = []
ld = self._GetWrapper(self, self.msvs_settings[config],
'VCLinkerTool', append=ldflags)
self._GetDefFileAsLdflags(ldflags, gyp_to_build_path)
ld('GenerateDebugInformation', map={'true': '/DEBUG'})
ld('TargetMachine', map={'1': 'X86', '17': 'X64', '3': 'ARM'},
prefix='/MACHINE:')
ldflags.extend(self._GetAdditionalLibraryDirectories(
'VCLinkerTool', config, gyp_to_build_path))
ld('DelayLoadDLLs', prefix='/DELAYLOAD:')
ld('TreatLinkerWarningAsErrors', prefix='/WX',
map={'true': '', 'false': ':NO'})
out = self.GetOutputName(config, expand_special)
if out:
ldflags.append('/OUT:' + out)
pdb = self.GetPDBName(config, expand_special, output_name + '.pdb')
if pdb:
ldflags.append('/PDB:' + pdb)
pgd = self.GetPGDName(config, expand_special)
if pgd:
ldflags.append('/PGD:' + pgd)
map_file = self.GetMapFileName(config, expand_special)
ld('GenerateMapFile', map={'true': '/MAP:' + map_file if map_file
else '/MAP'})
ld('MapExports', map={'true': '/MAPINFO:EXPORTS'})
ld('AdditionalOptions', prefix='')
minimum_required_version = self._Setting(
('VCLinkerTool', 'MinimumRequiredVersion'), config, default='')
if minimum_required_version:
minimum_required_version = ',' + minimum_required_version
ld('SubSystem',
map={'1': 'CONSOLE%s' % minimum_required_version,
'2': 'WINDOWS%s' % minimum_required_version},
prefix='/SUBSYSTEM:')
ld('TerminalServerAware', map={'1': ':NO', '2': ''}, prefix='/TSAWARE')
ld('LinkIncremental', map={'1': ':NO', '2': ''}, prefix='/INCREMENTAL')
ld('BaseAddress', prefix='/BASE:')
ld('FixedBaseAddress', map={'1': ':NO', '2': ''}, prefix='/FIXED')
ld('RandomizedBaseAddress',
map={'1': ':NO', '2': ''}, prefix='/DYNAMICBASE')
ld('DataExecutionPrevention',
map={'1': ':NO', '2': ''}, prefix='/NXCOMPAT')
ld('OptimizeReferences', map={'1': 'NOREF', '2': 'REF'}, prefix='/OPT:')
ld('ForceSymbolReferences', prefix='/INCLUDE:')
ld('EnableCOMDATFolding', map={'1': 'NOICF', '2': 'ICF'}, prefix='/OPT:')
ld('LinkTimeCodeGeneration',
map={'1': '', '2': ':PGINSTRUMENT', '3': ':PGOPTIMIZE',
'4': ':PGUPDATE'},
prefix='/LTCG')
ld('IgnoreDefaultLibraryNames', prefix='/NODEFAULTLIB:')
ld('ResourceOnlyDLL', map={'true': '/NOENTRY'})
ld('EntryPointSymbol', prefix='/ENTRY:')
ld('Profile', map={'true': '/PROFILE'})
ld('LargeAddressAware',
map={'1': ':NO', '2': ''}, prefix='/LARGEADDRESSAWARE')
# TODO(scottmg): This should sort of be somewhere else (not really a flag).
ld('AdditionalDependencies', prefix='')
if self.GetArch(config) == 'x86':
safeseh_default = 'true'
else:
safeseh_default = None
ld('ImageHasSafeExceptionHandlers',
map={'false': ':NO', 'true': ''}, prefix='/SAFESEH',
default=safeseh_default)
# If the base address is not specifically controlled, DYNAMICBASE should
# be on by default.
base_flags = filter(lambda x: 'DYNAMICBASE' in x or x == '/FIXED',
ldflags)
if not base_flags:
ldflags.append('/DYNAMICBASE')
# If the NXCOMPAT flag has not been specified, default to on. Despite the
# documentation that says this only defaults to on when the subsystem is
# Vista or greater (which applies to the linker), the IDE defaults it on
# unless it's explicitly off.
if not filter(lambda x: 'NXCOMPAT' in x, ldflags):
ldflags.append('/NXCOMPAT')
have_def_file = filter(lambda x: x.startswith('/DEF:'), ldflags)
manifest_flags, intermediate_manifest, manifest_files = \
self._GetLdManifestFlags(config, manifest_base_name, gyp_to_build_path,
is_executable and not have_def_file, build_dir)
ldflags.extend(manifest_flags)
return ldflags, intermediate_manifest, manifest_files
def _GetLdManifestFlags(self, config, name, gyp_to_build_path,
allow_isolation, build_dir):
"""Returns a 3-tuple:
- the set of flags that need to be added to the link to generate
a default manifest
- the intermediate manifest that the linker will generate that should be
used to assert it doesn't add anything to the merged one.
- the list of all the manifest files to be merged by the manifest tool and
included into the link."""
generate_manifest = self._Setting(('VCLinkerTool', 'GenerateManifest'),
config,
default='true')
if generate_manifest != 'true':
# This means not only that the linker should not generate the intermediate
# manifest but also that the manifest tool should do nothing even when
# additional manifests are specified.
return ['/MANIFEST:NO'], [], []
output_name = name + '.intermediate.manifest'
flags = [
'/MANIFEST',
'/ManifestFile:' + output_name,
]
# Instead of using the MANIFESTUAC flags, we generate a .manifest to
# include into the list of manifests. This allows us to avoid the need to
# do two passes during linking. The /MANIFEST flag and /ManifestFile are
# still used, and the intermediate manifest is used to assert that the
# final manifest we get from merging all the additional manifest files
# (plus the one we generate here) isn't modified by merging the
# intermediate into it.
# Always NO, because we generate a manifest file that has what we want.
flags.append('/MANIFESTUAC:NO')
config = self._TargetConfig(config)
enable_uac = self._Setting(('VCLinkerTool', 'EnableUAC'), config,
default='true')
manifest_files = []
generated_manifest_outer = \
"<?xml version='1.0' encoding='UTF-8' standalone='yes'?>" \
"<assembly xmlns='urn:schemas-microsoft-com:asm.v1' manifestVersion='1.0'>%s" \
"</assembly>"
if enable_uac == 'true':
execution_level = self._Setting(('VCLinkerTool', 'UACExecutionLevel'),
config, default='0')
execution_level_map = {
'0': 'asInvoker',
'1': 'highestAvailable',
'2': 'requireAdministrator'
}
ui_access = self._Setting(('VCLinkerTool', 'UACUIAccess'), config,
default='false')
inner = '''
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel level='%s' uiAccess='%s' />
</requestedPrivileges>
</security>
</trustInfo>''' % (execution_level_map[execution_level], ui_access)
else:
inner = ''
generated_manifest_contents = generated_manifest_outer % inner
generated_name = name + '.generated.manifest'
# Need to join with the build_dir here as we're writing it during
# generation time, but we return the un-joined version because the build
# will occur in that directory. We only write the file if the contents
# have changed so that simply regenerating the project files doesn't
# cause a relink.
build_dir_generated_name = os.path.join(build_dir, generated_name)
gyp.common.EnsureDirExists(build_dir_generated_name)
f = gyp.common.WriteOnDiff(build_dir_generated_name)
f.write(generated_manifest_contents)
f.close()
manifest_files = [generated_name]
if allow_isolation:
flags.append('/ALLOWISOLATION')
manifest_files += self._GetAdditionalManifestFiles(config,
gyp_to_build_path)
return flags, output_name, manifest_files
def _GetAdditionalManifestFiles(self, config, gyp_to_build_path):
"""Gets additional manifest files that are added to the default one
generated by the linker."""
files = self._Setting(('VCManifestTool', 'AdditionalManifestFiles'), config,
default=[])
if isinstance(files, str):
files = files.split(';')
return [os.path.normpath(
gyp_to_build_path(self.ConvertVSMacros(f, config=config)))
for f in files]
def IsUseLibraryDependencyInputs(self, config):
"""Returns whether the target should be linked via Use Library Dependency
Inputs (using component .objs of a given .lib)."""
config = self._TargetConfig(config)
uldi = self._Setting(('VCLinkerTool', 'UseLibraryDependencyInputs'), config)
return uldi == 'true'
def IsEmbedManifest(self, config):
"""Returns whether manifest should be linked into binary."""
config = self._TargetConfig(config)
embed = self._Setting(('VCManifestTool', 'EmbedManifest'), config,
default='true')
return embed == 'true'
def IsLinkIncremental(self, config):
"""Returns whether the target should be linked incrementally."""
config = self._TargetConfig(config)
link_inc = self._Setting(('VCLinkerTool', 'LinkIncremental'), config)
return link_inc != '1'
def GetRcflags(self, config, gyp_to_ninja_path):
"""Returns the flags that need to be added to invocations of the resource
compiler."""
config = self._TargetConfig(config)
rcflags = []
rc = self._GetWrapper(self, self.msvs_settings[config],
'VCResourceCompilerTool', append=rcflags)
rc('AdditionalIncludeDirectories', map=gyp_to_ninja_path, prefix='/I')
rcflags.append('/I' + gyp_to_ninja_path('.'))
rc('PreprocessorDefinitions', prefix='/d')
# /l arg must be in hex without leading '0x'
rc('Culture', prefix='/l', map=lambda x: hex(int(x))[2:])
return rcflags
def BuildCygwinBashCommandLine(self, args, path_to_base):
"""Build a command line that runs args via cygwin bash. We assume that all
incoming paths are in Windows normpath'd form, so they need to be
converted to posix style for the part of the command line that's passed to
bash. We also have to do some Visual Studio macro emulation here because
various rules use magic VS names for things. Also note that rules that
contain ninja variables cannot be fixed here (for example ${source}), so
the outer generator needs to make sure that the paths that are written out
are in posix style, if the command line will be used here."""
cygwin_dir = os.path.normpath(
os.path.join(path_to_base, self.msvs_cygwin_dirs[0]))
cd = ('cd %s' % path_to_base).replace('\\', '/')
args = [a.replace('\\', '/').replace('"', '\\"') for a in args]
args = ["'%s'" % a.replace("'", "'\\''") for a in args]
bash_cmd = ' '.join(args)
cmd = (
'call "%s\\setup_env.bat" && set CYGWIN=nontsec && ' % cygwin_dir +
'bash -c "%s ; %s"' % (cd, bash_cmd))
return cmd
def IsRuleRunUnderCygwin(self, rule):
"""Determine if an action should be run under cygwin. If the variable is
unset, or set to 1 we use cygwin."""
return int(rule.get('msvs_cygwin_shell',
self.spec.get('msvs_cygwin_shell', 1))) != 0
def _HasExplicitRuleForExtension(self, spec, extension):
"""Determine if there's an explicit rule for a particular extension."""
for rule in spec.get('rules', []):
if rule['extension'] == extension:
return True
return False
def _HasExplicitIdlActions(self, spec):
"""Determine if an action should not run midl for .idl files."""
return any([action.get('explicit_idl_action', 0)
for action in spec.get('actions', [])])
def HasExplicitIdlRulesOrActions(self, spec):
"""Determine if there's an explicit rule or action for idl files. When
there isn't we need to generate implicit rules to build MIDL .idl files."""
return (self._HasExplicitRuleForExtension(spec, 'idl') or
self._HasExplicitIdlActions(spec))
def HasExplicitAsmRules(self, spec):
"""Determine if there's an explicit rule for asm files. When there isn't we
need to generate implicit rules to assemble .asm files."""
return self._HasExplicitRuleForExtension(spec, 'asm')
def GetIdlBuildData(self, source, config):
"""Determine the implicit outputs for an idl file. Returns output
directory, outputs, and variables and flags that are required."""
config = self._TargetConfig(config)
midl_get = self._GetWrapper(self, self.msvs_settings[config], 'VCMIDLTool')
def midl(name, default=None):
return self.ConvertVSMacros(midl_get(name, default=default),
config=config)
tlb = midl('TypeLibraryName', default='${root}.tlb')
header = midl('HeaderFileName', default='${root}.h')
dlldata = midl('DLLDataFileName', default='dlldata.c')
iid = midl('InterfaceIdentifierFileName', default='${root}_i.c')
proxy = midl('ProxyFileName', default='${root}_p.c')
# Note that .tlb is not included in the outputs as it is not always
# generated depending on the content of the input idl file.
outdir = midl('OutputDirectory', default='')
output = [header, dlldata, iid, proxy]
variables = [('tlb', tlb),
('h', header),
('dlldata', dlldata),
('iid', iid),
('proxy', proxy)]
# TODO(scottmg): Are there configuration settings to set these flags?
target_platform = 'win32' if self.GetArch(config) == 'x86' else 'x64'
flags = ['/char', 'signed', '/env', target_platform, '/Oicf']
return outdir, output, variables, flags
def _LanguageMatchesForPch(source_ext, pch_source_ext):
c_exts = ('.c',)
cc_exts = ('.cc', '.cxx', '.cpp')
return ((source_ext in c_exts and pch_source_ext in c_exts) or
(source_ext in cc_exts and pch_source_ext in cc_exts))
class PrecompiledHeader(object):
"""Helper to generate dependencies and build rules to handle generation of
precompiled headers. Interface matches the GCH handler in xcode_emulation.py.
"""
def __init__(
self, settings, config, gyp_to_build_path, gyp_to_unique_output, obj_ext):
self.settings = settings
self.config = config
pch_source = self.settings.msvs_precompiled_source[self.config]
self.pch_source = gyp_to_build_path(pch_source)
filename, _ = os.path.splitext(pch_source)
self.output_obj = gyp_to_unique_output(filename + obj_ext).lower()
def _PchHeader(self):
"""Get the header that will appear in an #include line for all source
files."""
return os.path.split(self.settings.msvs_precompiled_header[self.config])[1]
def GetObjDependencies(self, sources, objs, arch):
"""Given a list of sources files and the corresponding object files,
returns a list of the pch files that should be depended upon. The
additional wrapping in the return value is for interface compatibility
with make.py on Mac, and xcode_emulation.py."""
assert arch is None
if not self._PchHeader():
return []
pch_ext = os.path.splitext(self.pch_source)[1]
for source in sources:
if _LanguageMatchesForPch(os.path.splitext(source)[1], pch_ext):
return [(None, None, self.output_obj)]
return []
def GetPchBuildCommands(self, arch):
"""Not used on Windows as there are no additional build steps required
(instead, existing steps are modified in GetFlagsModifications below)."""
return []
def GetFlagsModifications(self, input, output, implicit, command,
cflags_c, cflags_cc, expand_special):
"""Get the modified cflags and implicit dependencies that should be used
for the pch compilation step."""
if input == self.pch_source:
pch_output = ['/Yc' + self._PchHeader()]
if command == 'cxx':
return ([('cflags_cc', map(expand_special, cflags_cc + pch_output))],
self.output_obj, [])
elif command == 'cc':
return ([('cflags_c', map(expand_special, cflags_c + pch_output))],
self.output_obj, [])
return [], output, implicit
vs_version = None
def GetVSVersion(generator_flags):
global vs_version
if not vs_version:
vs_version = gyp.MSVSVersion.SelectVisualStudioVersion(
generator_flags.get('msvs_version', 'auto'),
allow_fallback=False)
return vs_version
def _GetVsvarsSetupArgs(generator_flags, arch):
vs = GetVSVersion(generator_flags)
return vs.SetupScript()
def ExpandMacros(string, expansions):
"""Expand $(Variable) per expansions dict. See MsvsSettings.GetVSMacroEnv
for the canonical way to retrieve a suitable dict."""
if '$' in string:
for old, new in expansions.iteritems():
assert '$(' not in new, new
string = string.replace(old, new)
return string
def _ExtractImportantEnvironment(output_of_set):
"""Extracts environment variables required for the toolchain to run from
a textual dump output by the cmd.exe 'set' command."""
envvars_to_save = (
'goma_.*', # TODO(scottmg): This is ugly, but needed for goma.
'include',
'lib',
'libpath',
'path',
'pathext',
'systemroot',
'temp',
'tmp',
)
env = {}
for line in output_of_set.splitlines():
for envvar in envvars_to_save:
if re.match(envvar + '=', line.lower()):
var, setting = line.split('=', 1)
if envvar == 'path':
# Our own rules (for running gyp-win-tool) and other actions in
# Chromium rely on python being in the path. Add the path to this
# python here so that if it's not in the path when ninja is run
# later, python will still be found.
setting = os.path.dirname(sys.executable) + os.pathsep + setting
env[var.upper()] = setting
break
for required in ('SYSTEMROOT', 'TEMP', 'TMP'):
if required not in env:
raise Exception('Environment variable "%s" '
'required to be set to valid path' % required)
return env
def _FormatAsEnvironmentBlock(envvar_dict):
"""Format as an 'environment block' directly suitable for CreateProcess.
Briefly this is a list of key=value\0, terminated by an additional \0. See
CreateProcess documentation for more details."""
block = ''
nul = '\0'
for key, value in envvar_dict.iteritems():
block += key + '=' + value + nul
block += nul
return block
def _ExtractCLPath(output_of_where):
"""Gets the path to cl.exe based on the output of calling the environment
setup batch file, followed by the equivalent of `where`."""
# Take the first line, as that's the first found in the PATH.
for line in output_of_where.strip().splitlines():
if line.startswith('LOC:'):
return line[len('LOC:'):].strip()
def GenerateEnvironmentFiles(toplevel_build_dir, generator_flags,
system_includes, open_out):
"""It's not sufficient to have the absolute path to the compiler, linker,
etc. on Windows, as those tools rely on .dlls being in the PATH. We also
need to support both x86 and x64 compilers within the same build (to support
msvs_target_platform hackery). Different architectures require a different
compiler binary, and different supporting environment variables (INCLUDE,
LIB, LIBPATH). So, we extract the environment here, wrap all invocations
of compiler tools (cl, link, lib, rc, midl, etc.) via win_tool.py which
sets up the environment, and then we do not prefix the compiler with
an absolute path, instead preferring something like "cl.exe" in the rule
which will then run whichever the environment setup has put in the path.
When the following procedure to generate environment files does not
meet your requirement (e.g. for custom toolchains), you can pass
"-G ninja_use_custom_environment_files" to the gyp to suppress file
generation and use custom environment files prepared by yourself."""
archs = ('x86', 'x64')
if generator_flags.get('ninja_use_custom_environment_files', 0):
cl_paths = {}
for arch in archs:
cl_paths[arch] = 'cl.exe'
return cl_paths
vs = GetVSVersion(generator_flags)
cl_paths = {}
for arch in archs:
# Extract environment variables for subprocesses.
args = vs.SetupScript(arch)
args.extend(('&&', 'set'))
popen = subprocess.Popen(
args, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
variables, _ = popen.communicate()
env = _ExtractImportantEnvironment(variables)
# Inject system includes from gyp files into INCLUDE.
if system_includes:
system_includes = system_includes | OrderedSet(
env.get('INCLUDE', '').split(';'))
env['INCLUDE'] = ';'.join(system_includes)
env_block = _FormatAsEnvironmentBlock(env)
f = open_out(os.path.join(toplevel_build_dir, 'environment.' + arch), 'wb')
f.write(env_block)
f.close()
# Find cl.exe location for this architecture.
args = vs.SetupScript(arch)
args.extend(('&&',
'for', '%i', 'in', '(cl.exe)', 'do', '@echo', 'LOC:%~$PATH:i'))
popen = subprocess.Popen(args, shell=True, stdout=subprocess.PIPE)
output, _ = popen.communicate()
cl_paths[arch] = _ExtractCLPath(output)
return cl_paths
def VerifyMissingSources(sources, build_dir, generator_flags, gyp_to_ninja):
"""Emulate behavior of msvs_error_on_missing_sources present in the msvs
generator: Check that all regular source files, i.e. not created at run time,
exist on disk. Missing files cause needless recompilation when building via
VS, and we want this check to match for people/bots that build using ninja,
so they're not surprised when the VS build fails."""
if int(generator_flags.get('msvs_error_on_missing_sources', 0)):
no_specials = filter(lambda x: '$' not in x, sources)
relative = [os.path.join(build_dir, gyp_to_ninja(s)) for s in no_specials]
missing = filter(lambda x: not os.path.exists(x), relative)
if missing:
# They'll look like out\Release\..\..\stuff\things.cc, so normalize the
# path for a slightly less crazy looking output.
cleaned_up = [os.path.normpath(x) for x in missing]
raise Exception('Missing input files:\n%s' % '\n'.join(cleaned_up))
# Sets some values in default_variables, which are required for many
# generators, run on Windows.
def CalculateCommonVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
# Set a variable so conditions can be based on msvs_version.
msvs_version = gyp.msvs_emulation.GetVSVersion(generator_flags)
default_variables['MSVS_VERSION'] = msvs_version.ShortName()
# To determine processor word size on Windows, in addition to checking
# PROCESSOR_ARCHITECTURE (which reflects the word size of the current
# process), it is also necessary to check PROCESSOR_ARCHITEW6432 (which
# contains the actual word size of the system when running thru WOW64).
if ('64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', '')):
default_variables['MSVS_OS_BITS'] = 64
else:
default_variables['MSVS_OS_BITS'] = 32
|
googleapis/python-translate | refs/heads/master | google/cloud/__init__.py | 120 | # -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import pkg_resources
pkg_resources.declare_namespace(__name__)
except ImportError:
import pkgutil
__path__ = pkgutil.extend_path(__path__, __name__)
|
crcox/NEXT | refs/heads/master | next/database/database_lib.py | 1 | #!/usr/bin/python
"""
Helper methods for database backup and restore.
"""
import shutil
import tempfile
import subprocess
import next.constants as constants
def make_mongodump(name,exp_uid_list=[]):
tmp_dir = tempfile.mkdtemp()
if len(exp_uid_list)==0:
subprocess.call(('/usr/bin/mongodump -vvvvv --host {hostname}:{port} '
'--out {path}').format(hostname=constants.MONGODB_HOST,
port=constants.MONGODB_PORT,
path=tmp_dir),
shell=True)
else:
exp_uid_list_str = '["'+'","'.join(exp_uid_list)+'"]'
query_str = '\'{ $or: [ {"exp_uid":{$in:%s}}, {"object_id":{$in:%s}} ] }\'' % (exp_uid_list_str,exp_uid_list_str)
subprocess.call(('/usr/bin/mongodump -vvvvv --host {hostname}:{port} '
'--out {path} --query {query_str}').format(hostname=constants.MONGODB_HOST,
port=constants.MONGODB_PORT,
path=tmp_dir,
query_str=query_str),
shell=True)
subprocess.call('mkdir -p /dump',shell=True)
subprocess.call(('tar czf /dump/{name} '
'{path}').format(name=name,path=tmp_dir),
shell=True)
shutil.rmtree(tmp_dir)
return '/dump/{}'.format(name)
def remove_mongodump(name):
subprocess.call(('rm /dump/{name}').format(name=name),
shell=True)
def restore_mongodump(src_filename):
tmp_dir = tempfile.mkdtemp()
subprocess.call(('tar -xvf {src_filename} -C {dst_path}').format(src_filename=src_filename,dst_path=tmp_dir),shell=True)
subprocess.call(('/usr/bin/mongorestore --host {hostname} --port {port} '
'{path}').format(hostname=constants.MONGODB_HOST,
port=constants.MONGODB_PORT,
path=tmp_dir),
shell=True)
shutil.rmtree(tmp_dir)
# import next.database.database_lib as db_lib
# >>> db_lib.make_mongodump('test_619') |
denovator/myfriki | refs/heads/master | lib/jinja2/jinja2/testsuite/lexnparse.py | 402 | # -*- coding: utf-8 -*-
"""
jinja2.testsuite.lexnparse
~~~~~~~~~~~~~~~~~~~~~~~~~~
All the unittests regarding lexing, parsing and syntax.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import unittest
from jinja2.testsuite import JinjaTestCase
from jinja2 import Environment, Template, TemplateSyntaxError, \
UndefinedError, nodes
from jinja2._compat import next, iteritems, text_type, PY2
from jinja2.lexer import Token, TokenStream, TOKEN_EOF, \
TOKEN_BLOCK_BEGIN, TOKEN_BLOCK_END
env = Environment()
# how does a string look like in jinja syntax?
if PY2:
def jinja_string_repr(string):
return repr(string)[1:]
else:
jinja_string_repr = repr
class TokenStreamTestCase(JinjaTestCase):
test_tokens = [Token(1, TOKEN_BLOCK_BEGIN, ''),
Token(2, TOKEN_BLOCK_END, ''),
]
def test_simple(self):
ts = TokenStream(self.test_tokens, "foo", "bar")
assert ts.current.type is TOKEN_BLOCK_BEGIN
assert bool(ts)
assert not bool(ts.eos)
next(ts)
assert ts.current.type is TOKEN_BLOCK_END
assert bool(ts)
assert not bool(ts.eos)
next(ts)
assert ts.current.type is TOKEN_EOF
assert not bool(ts)
assert bool(ts.eos)
def test_iter(self):
token_types = [t.type for t in TokenStream(self.test_tokens, "foo", "bar")]
assert token_types == ['block_begin', 'block_end', ]
class LexerTestCase(JinjaTestCase):
def test_raw1(self):
tmpl = env.from_string('{% raw %}foo{% endraw %}|'
'{%raw%}{{ bar }}|{% baz %}{% endraw %}')
assert tmpl.render() == 'foo|{{ bar }}|{% baz %}'
def test_raw2(self):
tmpl = env.from_string('1 {%- raw -%} 2 {%- endraw -%} 3')
assert tmpl.render() == '123'
def test_balancing(self):
env = Environment('{%', '%}', '${', '}')
tmpl = env.from_string('''{% for item in seq
%}${{'foo': item}|upper}{% endfor %}''')
assert tmpl.render(seq=list(range(3))) == "{'FOO': 0}{'FOO': 1}{'FOO': 2}"
def test_comments(self):
env = Environment('<!--', '-->', '{', '}')
tmpl = env.from_string('''\
<ul>
<!--- for item in seq -->
<li>{item}</li>
<!--- endfor -->
</ul>''')
assert tmpl.render(seq=list(range(3))) == ("<ul>\n <li>0</li>\n "
"<li>1</li>\n <li>2</li>\n</ul>")
def test_string_escapes(self):
for char in u'\0', u'\u2668', u'\xe4', u'\t', u'\r', u'\n':
tmpl = env.from_string('{{ %s }}' % jinja_string_repr(char))
assert tmpl.render() == char
assert env.from_string('{{ "\N{HOT SPRINGS}" }}').render() == u'\u2668'
def test_bytefallback(self):
from pprint import pformat
tmpl = env.from_string(u'''{{ 'foo'|pprint }}|{{ 'bär'|pprint }}''')
assert tmpl.render() == pformat('foo') + '|' + pformat(u'bär')
def test_operators(self):
from jinja2.lexer import operators
for test, expect in iteritems(operators):
if test in '([{}])':
continue
stream = env.lexer.tokenize('{{ %s }}' % test)
next(stream)
assert stream.current.type == expect
def test_normalizing(self):
for seq in '\r', '\r\n', '\n':
env = Environment(newline_sequence=seq)
tmpl = env.from_string('1\n2\r\n3\n4\n')
result = tmpl.render()
assert result.replace(seq, 'X') == '1X2X3X4'
def test_trailing_newline(self):
for keep in [True, False]:
env = Environment(keep_trailing_newline=keep)
for template,expected in [
('', {}),
('no\nnewline', {}),
('with\nnewline\n', {False: 'with\nnewline'}),
('with\nseveral\n\n\n', {False: 'with\nseveral\n\n'}),
]:
tmpl = env.from_string(template)
expect = expected.get(keep, template)
result = tmpl.render()
assert result == expect, (keep, template, result, expect)
class ParserTestCase(JinjaTestCase):
def test_php_syntax(self):
env = Environment('<?', '?>', '<?=', '?>', '<!--', '-->')
tmpl = env.from_string('''\
<!-- I'm a comment, I'm not interesting -->\
<? for item in seq -?>
<?= item ?>
<?- endfor ?>''')
assert tmpl.render(seq=list(range(5))) == '01234'
def test_erb_syntax(self):
env = Environment('<%', '%>', '<%=', '%>', '<%#', '%>')
tmpl = env.from_string('''\
<%# I'm a comment, I'm not interesting %>\
<% for item in seq -%>
<%= item %>
<%- endfor %>''')
assert tmpl.render(seq=list(range(5))) == '01234'
def test_comment_syntax(self):
env = Environment('<!--', '-->', '${', '}', '<!--#', '-->')
tmpl = env.from_string('''\
<!--# I'm a comment, I'm not interesting -->\
<!-- for item in seq --->
${item}
<!--- endfor -->''')
assert tmpl.render(seq=list(range(5))) == '01234'
def test_balancing(self):
tmpl = env.from_string('''{{{'foo':'bar'}.foo}}''')
assert tmpl.render() == 'bar'
def test_start_comment(self):
tmpl = env.from_string('''{# foo comment
and bar comment #}
{% macro blub() %}foo{% endmacro %}
{{ blub() }}''')
assert tmpl.render().strip() == 'foo'
def test_line_syntax(self):
env = Environment('<%', '%>', '${', '}', '<%#', '%>', '%')
tmpl = env.from_string('''\
<%# regular comment %>
% for item in seq:
${item}
% endfor''')
assert [int(x.strip()) for x in tmpl.render(seq=list(range(5))).split()] == \
list(range(5))
env = Environment('<%', '%>', '${', '}', '<%#', '%>', '%', '##')
tmpl = env.from_string('''\
<%# regular comment %>
% for item in seq:
${item} ## the rest of the stuff
% endfor''')
assert [int(x.strip()) for x in tmpl.render(seq=list(range(5))).split()] == \
list(range(5))
def test_line_syntax_priority(self):
# XXX: why is the whitespace there in front of the newline?
env = Environment('{%', '%}', '${', '}', '/*', '*/', '##', '#')
tmpl = env.from_string('''\
/* ignore me.
I'm a multiline comment */
## for item in seq:
* ${item} # this is just extra stuff
## endfor''')
assert tmpl.render(seq=[1, 2]).strip() == '* 1\n* 2'
env = Environment('{%', '%}', '${', '}', '/*', '*/', '#', '##')
tmpl = env.from_string('''\
/* ignore me.
I'm a multiline comment */
# for item in seq:
* ${item} ## this is just extra stuff
## extra stuff i just want to ignore
# endfor''')
assert tmpl.render(seq=[1, 2]).strip() == '* 1\n\n* 2'
def test_error_messages(self):
def assert_error(code, expected):
try:
Template(code)
except TemplateSyntaxError as e:
assert str(e) == expected, 'unexpected error message'
else:
assert False, 'that was supposed to be an error'
assert_error('{% for item in seq %}...{% endif %}',
"Encountered unknown tag 'endif'. Jinja was looking "
"for the following tags: 'endfor' or 'else'. The "
"innermost block that needs to be closed is 'for'.")
assert_error('{% if foo %}{% for item in seq %}...{% endfor %}{% endfor %}',
"Encountered unknown tag 'endfor'. Jinja was looking for "
"the following tags: 'elif' or 'else' or 'endif'. The "
"innermost block that needs to be closed is 'if'.")
assert_error('{% if foo %}',
"Unexpected end of template. Jinja was looking for the "
"following tags: 'elif' or 'else' or 'endif'. The "
"innermost block that needs to be closed is 'if'.")
assert_error('{% for item in seq %}',
"Unexpected end of template. Jinja was looking for the "
"following tags: 'endfor' or 'else'. The innermost block "
"that needs to be closed is 'for'.")
assert_error('{% block foo-bar-baz %}',
"Block names in Jinja have to be valid Python identifiers "
"and may not contain hyphens, use an underscore instead.")
assert_error('{% unknown_tag %}',
"Encountered unknown tag 'unknown_tag'.")
class SyntaxTestCase(JinjaTestCase):
def test_call(self):
env = Environment()
env.globals['foo'] = lambda a, b, c, e, g: a + b + c + e + g
tmpl = env.from_string("{{ foo('a', c='d', e='f', *['b'], **{'g': 'h'}) }}")
assert tmpl.render() == 'abdfh'
def test_slicing(self):
tmpl = env.from_string('{{ [1, 2, 3][:] }}|{{ [1, 2, 3][::-1] }}')
assert tmpl.render() == '[1, 2, 3]|[3, 2, 1]'
def test_attr(self):
tmpl = env.from_string("{{ foo.bar }}|{{ foo['bar'] }}")
assert tmpl.render(foo={'bar': 42}) == '42|42'
def test_subscript(self):
tmpl = env.from_string("{{ foo[0] }}|{{ foo[-1] }}")
assert tmpl.render(foo=[0, 1, 2]) == '0|2'
def test_tuple(self):
tmpl = env.from_string('{{ () }}|{{ (1,) }}|{{ (1, 2) }}')
assert tmpl.render() == '()|(1,)|(1, 2)'
def test_math(self):
tmpl = env.from_string('{{ (1 + 1 * 2) - 3 / 2 }}|{{ 2**3 }}')
assert tmpl.render() == '1.5|8'
def test_div(self):
tmpl = env.from_string('{{ 3 // 2 }}|{{ 3 / 2 }}|{{ 3 % 2 }}')
assert tmpl.render() == '1|1.5|1'
def test_unary(self):
tmpl = env.from_string('{{ +3 }}|{{ -3 }}')
assert tmpl.render() == '3|-3'
def test_concat(self):
tmpl = env.from_string("{{ [1, 2] ~ 'foo' }}")
assert tmpl.render() == '[1, 2]foo'
def test_compare(self):
tmpl = env.from_string('{{ 1 > 0 }}|{{ 1 >= 1 }}|{{ 2 < 3 }}|'
'{{ 2 == 2 }}|{{ 1 <= 1 }}')
assert tmpl.render() == 'True|True|True|True|True'
def test_inop(self):
tmpl = env.from_string('{{ 1 in [1, 2, 3] }}|{{ 1 not in [1, 2, 3] }}')
assert tmpl.render() == 'True|False'
def test_literals(self):
tmpl = env.from_string('{{ [] }}|{{ {} }}|{{ () }}')
assert tmpl.render().lower() == '[]|{}|()'
def test_bool(self):
tmpl = env.from_string('{{ true and false }}|{{ false '
'or true }}|{{ not false }}')
assert tmpl.render() == 'False|True|True'
def test_grouping(self):
tmpl = env.from_string('{{ (true and false) or (false and true) and not false }}')
assert tmpl.render() == 'False'
def test_django_attr(self):
tmpl = env.from_string('{{ [1, 2, 3].0 }}|{{ [[1]].0.0 }}')
assert tmpl.render() == '1|1'
def test_conditional_expression(self):
tmpl = env.from_string('''{{ 0 if true else 1 }}''')
assert tmpl.render() == '0'
def test_short_conditional_expression(self):
tmpl = env.from_string('<{{ 1 if false }}>')
assert tmpl.render() == '<>'
tmpl = env.from_string('<{{ (1 if false).bar }}>')
self.assert_raises(UndefinedError, tmpl.render)
def test_filter_priority(self):
tmpl = env.from_string('{{ "foo"|upper + "bar"|upper }}')
assert tmpl.render() == 'FOOBAR'
def test_function_calls(self):
tests = [
(True, '*foo, bar'),
(True, '*foo, *bar'),
(True, '*foo, bar=42'),
(True, '**foo, *bar'),
(True, '**foo, bar'),
(False, 'foo, bar'),
(False, 'foo, bar=42'),
(False, 'foo, bar=23, *args'),
(False, 'a, b=c, *d, **e'),
(False, '*foo, **bar')
]
for should_fail, sig in tests:
if should_fail:
self.assert_raises(TemplateSyntaxError,
env.from_string, '{{ foo(%s) }}' % sig)
else:
env.from_string('foo(%s)' % sig)
def test_tuple_expr(self):
for tmpl in [
'{{ () }}',
'{{ (1, 2) }}',
'{{ (1, 2,) }}',
'{{ 1, }}',
'{{ 1, 2 }}',
'{% for foo, bar in seq %}...{% endfor %}',
'{% for x in foo, bar %}...{% endfor %}',
'{% for x in foo, %}...{% endfor %}'
]:
assert env.from_string(tmpl)
def test_trailing_comma(self):
tmpl = env.from_string('{{ (1, 2,) }}|{{ [1, 2,] }}|{{ {1: 2,} }}')
assert tmpl.render().lower() == '(1, 2)|[1, 2]|{1: 2}'
def test_block_end_name(self):
env.from_string('{% block foo %}...{% endblock foo %}')
self.assert_raises(TemplateSyntaxError, env.from_string,
'{% block x %}{% endblock y %}')
def test_constant_casing(self):
for const in True, False, None:
tmpl = env.from_string('{{ %s }}|{{ %s }}|{{ %s }}' % (
str(const), str(const).lower(), str(const).upper()
))
assert tmpl.render() == '%s|%s|' % (const, const)
def test_test_chaining(self):
self.assert_raises(TemplateSyntaxError, env.from_string,
'{{ foo is string is sequence }}')
assert env.from_string('{{ 42 is string or 42 is number }}'
).render() == 'True'
def test_string_concatenation(self):
tmpl = env.from_string('{{ "foo" "bar" "baz" }}')
assert tmpl.render() == 'foobarbaz'
def test_notin(self):
bar = range(100)
tmpl = env.from_string('''{{ not 42 in bar }}''')
assert tmpl.render(bar=bar) == text_type(not 42 in bar)
def test_implicit_subscribed_tuple(self):
class Foo(object):
def __getitem__(self, x):
return x
t = env.from_string('{{ foo[1, 2] }}')
assert t.render(foo=Foo()) == u'(1, 2)'
def test_raw2(self):
tmpl = env.from_string('{% raw %}{{ FOO }} and {% BAR %}{% endraw %}')
assert tmpl.render() == '{{ FOO }} and {% BAR %}'
def test_const(self):
tmpl = env.from_string('{{ true }}|{{ false }}|{{ none }}|'
'{{ none is defined }}|{{ missing is defined }}')
assert tmpl.render() == 'True|False|None|True|False'
def test_neg_filter_priority(self):
node = env.parse('{{ -1|foo }}')
assert isinstance(node.body[0].nodes[0], nodes.Filter)
assert isinstance(node.body[0].nodes[0].node, nodes.Neg)
def test_const_assign(self):
constass1 = '''{% set true = 42 %}'''
constass2 = '''{% for none in seq %}{% endfor %}'''
for tmpl in constass1, constass2:
self.assert_raises(TemplateSyntaxError, env.from_string, tmpl)
def test_localset(self):
tmpl = env.from_string('''{% set foo = 0 %}\
{% for item in [1, 2] %}{% set foo = 1 %}{% endfor %}\
{{ foo }}''')
assert tmpl.render() == '0'
def test_parse_unary(self):
tmpl = env.from_string('{{ -foo["bar"] }}')
assert tmpl.render(foo={'bar': 42}) == '-42'
tmpl = env.from_string('{{ -foo["bar"]|abs }}')
assert tmpl.render(foo={'bar': 42}) == '42'
class LstripBlocksTestCase(JinjaTestCase):
def test_lstrip(self):
env = Environment(lstrip_blocks=True, trim_blocks=False)
tmpl = env.from_string(''' {% if True %}\n {% endif %}''')
assert tmpl.render() == "\n"
def test_lstrip_trim(self):
env = Environment(lstrip_blocks=True, trim_blocks=True)
tmpl = env.from_string(''' {% if True %}\n {% endif %}''')
assert tmpl.render() == ""
def test_no_lstrip(self):
env = Environment(lstrip_blocks=True, trim_blocks=False)
tmpl = env.from_string(''' {%+ if True %}\n {%+ endif %}''')
assert tmpl.render() == " \n "
def test_lstrip_endline(self):
env = Environment(lstrip_blocks=True, trim_blocks=False)
tmpl = env.from_string(''' hello{% if True %}\n goodbye{% endif %}''')
assert tmpl.render() == " hello\n goodbye"
def test_lstrip_inline(self):
env = Environment(lstrip_blocks=True, trim_blocks=False)
tmpl = env.from_string(''' {% if True %}hello {% endif %}''')
assert tmpl.render() == 'hello '
def test_lstrip_nested(self):
env = Environment(lstrip_blocks=True, trim_blocks=False)
tmpl = env.from_string(''' {% if True %}a {% if True %}b {% endif %}c {% endif %}''')
assert tmpl.render() == 'a b c '
def test_lstrip_left_chars(self):
env = Environment(lstrip_blocks=True, trim_blocks=False)
tmpl = env.from_string(''' abc {% if True %}
hello{% endif %}''')
assert tmpl.render() == ' abc \n hello'
def test_lstrip_embeded_strings(self):
env = Environment(lstrip_blocks=True, trim_blocks=False)
tmpl = env.from_string(''' {% set x = " {% str %} " %}{{ x }}''')
assert tmpl.render() == ' {% str %} '
def test_lstrip_preserve_leading_newlines(self):
env = Environment(lstrip_blocks=True, trim_blocks=False)
tmpl = env.from_string('''\n\n\n{% set hello = 1 %}''')
assert tmpl.render() == '\n\n\n'
def test_lstrip_comment(self):
env = Environment(lstrip_blocks=True, trim_blocks=False)
tmpl = env.from_string(''' {# if True #}
hello
{#endif#}''')
assert tmpl.render() == '\nhello\n'
def test_lstrip_angle_bracket_simple(self):
env = Environment('<%', '%>', '${', '}', '<%#', '%>', '%', '##',
lstrip_blocks=True, trim_blocks=True)
tmpl = env.from_string(''' <% if True %>hello <% endif %>''')
assert tmpl.render() == 'hello '
def test_lstrip_angle_bracket_comment(self):
env = Environment('<%', '%>', '${', '}', '<%#', '%>', '%', '##',
lstrip_blocks=True, trim_blocks=True)
tmpl = env.from_string(''' <%# if True %>hello <%# endif %>''')
assert tmpl.render() == 'hello '
def test_lstrip_angle_bracket(self):
env = Environment('<%', '%>', '${', '}', '<%#', '%>', '%', '##',
lstrip_blocks=True, trim_blocks=True)
tmpl = env.from_string('''\
<%# regular comment %>
<% for item in seq %>
${item} ## the rest of the stuff
<% endfor %>''')
assert tmpl.render(seq=range(5)) == \
''.join('%s\n' % x for x in range(5))
def test_lstrip_angle_bracket_compact(self):
env = Environment('<%', '%>', '${', '}', '<%#', '%>', '%', '##',
lstrip_blocks=True, trim_blocks=True)
tmpl = env.from_string('''\
<%#regular comment%>
<%for item in seq%>
${item} ## the rest of the stuff
<%endfor%>''')
assert tmpl.render(seq=range(5)) == \
''.join('%s\n' % x for x in range(5))
def test_php_syntax_with_manual(self):
env = Environment('<?', '?>', '<?=', '?>', '<!--', '-->',
lstrip_blocks=True, trim_blocks=True)
tmpl = env.from_string('''\
<!-- I'm a comment, I'm not interesting -->
<? for item in seq -?>
<?= item ?>
<?- endfor ?>''')
assert tmpl.render(seq=range(5)) == '01234'
def test_php_syntax(self):
env = Environment('<?', '?>', '<?=', '?>', '<!--', '-->',
lstrip_blocks=True, trim_blocks=True)
tmpl = env.from_string('''\
<!-- I'm a comment, I'm not interesting -->
<? for item in seq ?>
<?= item ?>
<? endfor ?>''')
assert tmpl.render(seq=range(5)) == ''.join(' %s\n' % x for x in range(5))
def test_php_syntax_compact(self):
env = Environment('<?', '?>', '<?=', '?>', '<!--', '-->',
lstrip_blocks=True, trim_blocks=True)
tmpl = env.from_string('''\
<!-- I'm a comment, I'm not interesting -->
<?for item in seq?>
<?=item?>
<?endfor?>''')
assert tmpl.render(seq=range(5)) == ''.join(' %s\n' % x for x in range(5))
def test_erb_syntax(self):
env = Environment('<%', '%>', '<%=', '%>', '<%#', '%>',
lstrip_blocks=True, trim_blocks=True)
#env.from_string('')
#for n,r in env.lexer.rules.iteritems():
# print n
#print env.lexer.rules['root'][0][0].pattern
#print "'%s'" % tmpl.render(seq=range(5))
tmpl = env.from_string('''\
<%# I'm a comment, I'm not interesting %>
<% for item in seq %>
<%= item %>
<% endfor %>
''')
assert tmpl.render(seq=range(5)) == ''.join(' %s\n' % x for x in range(5))
def test_erb_syntax_with_manual(self):
env = Environment('<%', '%>', '<%=', '%>', '<%#', '%>',
lstrip_blocks=True, trim_blocks=True)
tmpl = env.from_string('''\
<%# I'm a comment, I'm not interesting %>
<% for item in seq -%>
<%= item %>
<%- endfor %>''')
assert tmpl.render(seq=range(5)) == '01234'
def test_erb_syntax_no_lstrip(self):
env = Environment('<%', '%>', '<%=', '%>', '<%#', '%>',
lstrip_blocks=True, trim_blocks=True)
tmpl = env.from_string('''\
<%# I'm a comment, I'm not interesting %>
<%+ for item in seq -%>
<%= item %>
<%- endfor %>''')
assert tmpl.render(seq=range(5)) == ' 01234'
def test_comment_syntax(self):
env = Environment('<!--', '-->', '${', '}', '<!--#', '-->',
lstrip_blocks=True, trim_blocks=True)
tmpl = env.from_string('''\
<!--# I'm a comment, I'm not interesting -->\
<!-- for item in seq --->
${item}
<!--- endfor -->''')
assert tmpl.render(seq=range(5)) == '01234'
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TokenStreamTestCase))
suite.addTest(unittest.makeSuite(LexerTestCase))
suite.addTest(unittest.makeSuite(ParserTestCase))
suite.addTest(unittest.makeSuite(SyntaxTestCase))
suite.addTest(unittest.makeSuite(LstripBlocksTestCase))
return suite
|
helinb/Tickeys-linux | refs/heads/master | tickeys/kivy/core/image/img_tex.py | 54 | '''
Tex: Compressed texture
'''
__all__ = ('ImageLoaderTex', )
import json
from struct import unpack
from kivy.logger import Logger
from kivy.core.image import ImageLoaderBase, ImageData, ImageLoader
class ImageLoaderTex(ImageLoaderBase):
@staticmethod
def extensions():
return ('tex', )
def load(self, filename):
try:
fd = open(filename, 'rb')
if fd.read(4) != 'KTEX':
raise Exception('Invalid tex identifier')
headersize = unpack('I', fd.read(4))[0]
header = fd.read(headersize)
if len(header) != headersize:
raise Exception('Truncated tex header')
info = json.loads(header)
data = fd.read()
if len(data) != info['datalen']:
raise Exception('Truncated tex data')
except:
Logger.warning('Image: Image <%s> is corrupted' % filename)
raise
width, height = info['image_size']
tw, th = info['texture_size']
images = [data]
im = ImageData(width, height, str(info['format']), images[0],
source=filename)
'''
if len(dds.images) > 1:
images = dds.images
images_size = dds.images_size
for index in range(1, len(dds.images)):
w, h = images_size[index]
data = images[index]
im.add_mipmap(index, w, h, data)
'''
return [im]
# register
ImageLoader.register(ImageLoaderTex)
|
JohnHowland/kivy | refs/heads/master | kivy/core/image/img_tex.py | 54 | '''
Tex: Compressed texture
'''
__all__ = ('ImageLoaderTex', )
import json
from struct import unpack
from kivy.logger import Logger
from kivy.core.image import ImageLoaderBase, ImageData, ImageLoader
class ImageLoaderTex(ImageLoaderBase):
@staticmethod
def extensions():
return ('tex', )
def load(self, filename):
try:
fd = open(filename, 'rb')
if fd.read(4) != 'KTEX':
raise Exception('Invalid tex identifier')
headersize = unpack('I', fd.read(4))[0]
header = fd.read(headersize)
if len(header) != headersize:
raise Exception('Truncated tex header')
info = json.loads(header)
data = fd.read()
if len(data) != info['datalen']:
raise Exception('Truncated tex data')
except:
Logger.warning('Image: Image <%s> is corrupted' % filename)
raise
width, height = info['image_size']
tw, th = info['texture_size']
images = [data]
im = ImageData(width, height, str(info['format']), images[0],
source=filename)
'''
if len(dds.images) > 1:
images = dds.images
images_size = dds.images_size
for index in range(1, len(dds.images)):
w, h = images_size[index]
data = images[index]
im.add_mipmap(index, w, h, data)
'''
return [im]
# register
ImageLoader.register(ImageLoaderTex)
|
cafecivet/django_girls_tutorial | refs/heads/master | Lib/site-packages/django/db/backends/mysql/client.py | 84 | import os
import sys
from django.db.backends import BaseDatabaseClient
class DatabaseClient(BaseDatabaseClient):
executable_name = 'mysql'
def runshell(self):
settings_dict = self.connection.settings_dict
args = [self.executable_name]
db = settings_dict['OPTIONS'].get('db', settings_dict['NAME'])
user = settings_dict['OPTIONS'].get('user', settings_dict['USER'])
passwd = settings_dict['OPTIONS'].get('passwd', settings_dict['PASSWORD'])
host = settings_dict['OPTIONS'].get('host', settings_dict['HOST'])
port = settings_dict['OPTIONS'].get('port', settings_dict['PORT'])
defaults_file = settings_dict['OPTIONS'].get('read_default_file')
# Seems to be no good way to set sql_mode with CLI.
if defaults_file:
args += ["--defaults-file=%s" % defaults_file]
if user:
args += ["--user=%s" % user]
if passwd:
args += ["--password=%s" % passwd]
if host:
if '/' in host:
args += ["--socket=%s" % host]
else:
args += ["--host=%s" % host]
if port:
args += ["--port=%s" % port]
if db:
args += [db]
if os.name == 'nt':
sys.exit(os.system(" ".join(args)))
else:
os.execvp(self.executable_name, args)
|
partofthething/home-assistant | refs/heads/dev | homeassistant/components/hikvisioncam/switch.py | 14 | """Support turning on/off motion detection on Hikvision cameras."""
import logging
import hikvision.api
from hikvision.error import HikvisionError, MissingParamError
import voluptuous as vol
from homeassistant.components.switch import PLATFORM_SCHEMA, SwitchEntity
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
STATE_OFF,
STATE_ON,
)
import homeassistant.helpers.config_validation as cv
# This is the last working version, please test before updating
_LOGGING = logging.getLogger(__name__)
DEFAULT_NAME = "Hikvision Camera Motion Detection"
DEFAULT_PASSWORD = "12345"
DEFAULT_PORT = 80
DEFAULT_USERNAME = "admin"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PASSWORD, default=DEFAULT_PASSWORD): cv.string,
vol.Optional(CONF_PORT): cv.port,
vol.Optional(CONF_USERNAME, default=DEFAULT_USERNAME): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up Hikvision camera."""
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
name = config.get(CONF_NAME)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
try:
hikvision_cam = hikvision.api.CreateDevice(
host, port=port, username=username, password=password, is_https=False
)
except MissingParamError as param_err:
_LOGGING.error("Missing required param: %s", param_err)
return False
except HikvisionError as conn_err:
_LOGGING.error("Unable to connect: %s", conn_err)
return False
add_entities([HikvisionMotionSwitch(name, hikvision_cam)])
class HikvisionMotionSwitch(SwitchEntity):
"""Representation of a switch to toggle on/off motion detection."""
def __init__(self, name, hikvision_cam):
"""Initialize the switch."""
self._name = name
self._hikvision_cam = hikvision_cam
self._state = STATE_OFF
@property
def name(self):
"""Return the name of the device if any."""
return self._name
@property
def state(self):
"""Return the state of the device if any."""
return self._state
@property
def is_on(self):
"""Return true if device is on."""
return self._state == STATE_ON
def turn_on(self, **kwargs):
"""Turn the device on."""
_LOGGING.info("Turning on Motion Detection ")
self._hikvision_cam.enable_motion_detection()
def turn_off(self, **kwargs):
"""Turn the device off."""
_LOGGING.info("Turning off Motion Detection ")
self._hikvision_cam.disable_motion_detection()
def update(self):
"""Update Motion Detection state."""
enabled = self._hikvision_cam.is_motion_detection_enabled()
_LOGGING.info("enabled: %s", enabled)
self._state = STATE_ON if enabled else STATE_OFF
|
timy/dm_spec | refs/heads/exp | ana/seidner/plot_ppar_2d.py | 1 | import numpy as np
import sys
sys.path.append( '../../pkg' )
import pymod_indx, pymod_data, pymod_plot
dat_idx = [[0, '[-1, 1, 1] in z-axis']]
func = np.abs
time = np.loadtxt( "res/time.dat" )
grid = np.loadtxt( "res/grid_2d_0.dat")
i_dir = 40
# data = np.loadtxt( "res/ppar_2d_dpl0_coo2_%d_0.dat" % i_dir )
# for i in range(1, 4):
# data += np.loadtxt( "res/ppar_2d_dpl%d_coo2_%d_0.dat" % (i, i_dir) )
data = np.loadtxt( "res/ppar_2d_dpl0_coo2_%d_0.dat" % i_dir )
# import matplotlib.pyplot as plt
# plt.plot( data[0:500,0] )
# plt.show()
# sys.exit(-1)
ax = pymod_plot.plot_1d_init()
# w_shift = [-16000, +16000] means the signal is in the fourth quadrant
# because for t, we use -16000 to move the signal to frequency origin
# and for tau, we use +16000 to move the signa to frequency origin
# so the signal has the frequency ~(+16000, -16000)
pymod_data.plot_signal_2d( time, grid, data, func, ax, padding=[0.0, 0.0],
w_shift=[-16000.0, +16000.0], obj='p', t0=[0,0] )
pymod_plot.plot_show( ax,
xlabel=r"Freq. for detection $t$ (cm$^{-1}$)",
ylabel=r"Freq. for coherence $\tau$ (cm$^{-1}$)",
title=r"2-dimensional Spectrum of Dimer System")
#xlim=[15000, 17000], ylim=[15400, 16600] )
#xlim=[0, 400], ylim=[0, 400] )
# import matplotlib.pyplot as plt
# import numpy as np
# import sys
# n_tau = 40
# n_t = 2000
# data = np.loadtxt("res/ppar_2d0_ 3_ 0.dat")
# tau_lower, tau_upper = 0.0, 500.0
# t_lower, t_upper = -700, 500.0
# font = {
# 'family' : 'serif',
# 'weight' : 'normal',
# 'size' : 20,}
# font_label = {
# 'weight' : 'normal',
# 'size' : 16,}
# tau, t = data[::n_t,0], data[:n_t,1]
# #idx_tau, = np.nonzero( (tau >= tau_lower) & (tau <= tau_upper))
# #idx_t, = np.nonzero( (t >= t_lower) & (t <= t_upper))
# #idx_data = np.nonzero( (data[:,0] >= tau_lower) & \
# # (data[:,0] <= tau_upper) & \
# # (data[:,1] >= t_lower) & (data[:,1] <= t_upper) )
# dat = data[:, 2]
# #dat = np.sqrt(data[idx_data,2]**2 + data[idx_data,3]**2)
# #n_tau, n_t = np.size(idx_tau), np.size(idx_t)
# #print( "Number of elements used for display: ", np.size(idx_data) )
# print( "Number of tau: ", n_tau )
# print( "Number of t: ", n_t )
# spec = np.reshape(dat, (n_tau, n_t))
# #spec = np.log(spec)
# #extent = ( t[idx_t[0]], t[idx_t[n_t-1]], \
# #tau[idx_tau[0]], tau[idx_tau[n_tau-1]] )
# # tau[idx_tau[n_tau-1]], tau[idx_tau[0]] )
# origin = 'upper'
# norm = plt.cm.colors.Normalize(vmax=spec.max(), vmin=spec.min())
# im = plt.imshow(spec, cmap=plt.cm.summer, norm=norm,
# origin=origin)#, extent=extent)
# plt.contour(spec, 5, hold='on', colors = 'k', origin=origin)#,
# #extent=extent)
# plt.colorbar(im)
# plt.title(r'2-dimensional polarization', fontdict=font)
# plt.xlabel(r'$t$', fontdict=font_label)
# plt.ylabel(r'$\tau$', fontdict=font_label)
# plt.grid(True)
# plt.savefig("fig/spec.svg")
# plt.show()
|
AOSP-S4-KK/platform_external_chromium_org | refs/heads/kk-4.4 | net/tools/testserver/testserver_base.py | 63 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import BaseHTTPServer
import errno
import json
import optparse
import os
import re
import socket
import SocketServer
import struct
import sys
import warnings
# Ignore deprecation warnings, they make our output more cluttered.
warnings.filterwarnings("ignore", category=DeprecationWarning)
if sys.platform == 'win32':
import msvcrt
# Using debug() seems to cause hangs on XP: see http://crbug.com/64515.
debug_output = sys.stderr
def debug(string):
debug_output.write(string + "\n")
debug_output.flush()
class Error(Exception):
"""Error class for this module."""
class OptionError(Error):
"""Error for bad command line options."""
class FileMultiplexer(object):
def __init__(self, fd1, fd2) :
self.__fd1 = fd1
self.__fd2 = fd2
def __del__(self) :
if self.__fd1 != sys.stdout and self.__fd1 != sys.stderr:
self.__fd1.close()
if self.__fd2 != sys.stdout and self.__fd2 != sys.stderr:
self.__fd2.close()
def write(self, text) :
self.__fd1.write(text)
self.__fd2.write(text)
def flush(self) :
self.__fd1.flush()
self.__fd2.flush()
class ClientRestrictingServerMixIn:
"""Implements verify_request to limit connections to our configured IP
address."""
def verify_request(self, _request, client_address):
return client_address[0] == self.server_address[0]
class BrokenPipeHandlerMixIn:
"""Allows the server to deal with "broken pipe" errors (which happen if the
browser quits with outstanding requests, like for the favicon). This mix-in
requires the class to derive from SocketServer.BaseServer and not override its
handle_error() method. """
def handle_error(self, request, client_address):
value = sys.exc_info()[1]
if isinstance(value, socket.error):
err = value.args[0]
if sys.platform in ('win32', 'cygwin'):
# "An established connection was aborted by the software in your host."
pipe_err = 10053
else:
pipe_err = errno.EPIPE
if err == pipe_err:
print "testserver.py: Broken pipe"
return
SocketServer.BaseServer.handle_error(self, request, client_address)
class StoppableHTTPServer(BaseHTTPServer.HTTPServer):
"""This is a specialization of BaseHTTPServer to allow it
to be exited cleanly (by setting its "stop" member to True)."""
def serve_forever(self):
self.stop = False
self.nonce_time = None
while not self.stop:
self.handle_request()
self.socket.close()
def MultiplexerHack(std_fd, log_fd):
"""Creates a FileMultiplexer that will write to both specified files.
When running on Windows XP bots, stdout and stderr will be invalid file
handles, so log_fd will be returned directly. (This does not occur if you
run the test suite directly from a console, but only if the output of the
test executable is redirected.)
"""
if std_fd.fileno() <= 0:
return log_fd
return FileMultiplexer(std_fd, log_fd)
class BasePageHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def __init__(self, request, client_address, socket_server,
connect_handlers, get_handlers, head_handlers, post_handlers,
put_handlers):
self._connect_handlers = connect_handlers
self._get_handlers = get_handlers
self._head_handlers = head_handlers
self._post_handlers = post_handlers
self._put_handlers = put_handlers
BaseHTTPServer.BaseHTTPRequestHandler.__init__(
self, request, client_address, socket_server)
def log_request(self, *args, **kwargs):
# Disable request logging to declutter test log output.
pass
def _ShouldHandleRequest(self, handler_name):
"""Determines if the path can be handled by the handler.
We consider a handler valid if the path begins with the
handler name. It can optionally be followed by "?*", "/*".
"""
pattern = re.compile('%s($|\?|/).*' % handler_name)
return pattern.match(self.path)
def do_CONNECT(self):
for handler in self._connect_handlers:
if handler():
return
def do_GET(self):
for handler in self._get_handlers:
if handler():
return
def do_HEAD(self):
for handler in self._head_handlers:
if handler():
return
def do_POST(self):
for handler in self._post_handlers:
if handler():
return
def do_PUT(self):
for handler in self._put_handlers:
if handler():
return
class TestServerRunner(object):
"""Runs a test server and communicates with the controlling C++ test code.
Subclasses should override the create_server method to create their server
object, and the add_options method to add their own options.
"""
def __init__(self):
self.option_parser = optparse.OptionParser()
self.add_options()
def main(self):
self.options, self.args = self.option_parser.parse_args()
logfile = open(self.options.log_file, 'w')
sys.stderr = MultiplexerHack(sys.stderr, logfile)
if self.options.log_to_console:
sys.stdout = MultiplexerHack(sys.stdout, logfile)
else:
sys.stdout = logfile
server_data = {
'host': self.options.host,
}
self.server = self.create_server(server_data)
self._notify_startup_complete(server_data)
self.run_server()
def create_server(self, server_data):
"""Creates a server object and returns it.
Must populate server_data['port'], and can set additional server_data
elements if desired."""
raise NotImplementedError()
def run_server(self):
try:
self.server.serve_forever()
except KeyboardInterrupt:
print 'shutting down server'
self.server.stop = True
def add_options(self):
self.option_parser.add_option('--startup-pipe', type='int',
dest='startup_pipe',
help='File handle of pipe to parent process')
self.option_parser.add_option('--log-to-console', action='store_const',
const=True, default=False,
dest='log_to_console',
help='Enables or disables sys.stdout logging '
'to the console.')
self.option_parser.add_option('--log-file', default='testserver.log',
dest='log_file',
help='The name of the server log file.')
self.option_parser.add_option('--port', default=0, type='int',
help='Port used by the server. If '
'unspecified, the server will listen on an '
'ephemeral port.')
self.option_parser.add_option('--host', default='127.0.0.1',
dest='host',
help='Hostname or IP upon which the server '
'will listen. Client connections will also '
'only be allowed from this address.')
self.option_parser.add_option('--data-dir', dest='data_dir',
help='Directory from which to read the '
'files.')
def _notify_startup_complete(self, server_data):
# Notify the parent that we've started. (BaseServer subclasses
# bind their sockets on construction.)
if self.options.startup_pipe is not None:
server_data_json = json.dumps(server_data)
server_data_len = len(server_data_json)
print 'sending server_data: %s (%d bytes)' % (
server_data_json, server_data_len)
if sys.platform == 'win32':
fd = msvcrt.open_osfhandle(self.options.startup_pipe, 0)
else:
fd = self.options.startup_pipe
startup_pipe = os.fdopen(fd, "w")
# First write the data length as an unsigned 4-byte value. This
# is _not_ using network byte ordering since the other end of the
# pipe is on the same machine.
startup_pipe.write(struct.pack('=L', server_data_len))
startup_pipe.write(server_data_json)
startup_pipe.close()
|
auready/django | refs/heads/master | tests/migrations/test_migrations_squashed_complex_multi_apps/app1/3_auto.py | 133 | from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("app1", "2_auto"), ("app2", "2_auto")]
operations = [
migrations.RunPython(migrations.RunPython.noop)
]
|
ccrouch/tuskar | refs/heads/master | tuskar/openstack/common/timeutils.py | 39 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Time related utilities and helper functions.
"""
import calendar
import datetime
import iso8601
# ISO 8601 extended time format with microseconds
_ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f'
_ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S'
PERFECT_TIME_FORMAT = _ISO8601_TIME_FORMAT_SUBSECOND
def isotime(at=None, subsecond=False):
"""Stringify time in ISO 8601 format"""
if not at:
at = utcnow()
st = at.strftime(_ISO8601_TIME_FORMAT
if not subsecond
else _ISO8601_TIME_FORMAT_SUBSECOND)
tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC'
st += ('Z' if tz == 'UTC' else tz)
return st
def parse_isotime(timestr):
"""Parse time from ISO 8601 format"""
try:
return iso8601.parse_date(timestr)
except iso8601.ParseError as e:
raise ValueError(e.message)
except TypeError as e:
raise ValueError(e.message)
def strtime(at=None, fmt=PERFECT_TIME_FORMAT):
"""Returns formatted utcnow."""
if not at:
at = utcnow()
return at.strftime(fmt)
def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT):
"""Turn a formatted time back into a datetime."""
return datetime.datetime.strptime(timestr, fmt)
def normalize_time(timestamp):
"""Normalize time in arbitrary timezone to UTC naive object"""
offset = timestamp.utcoffset()
if offset is None:
return timestamp
return timestamp.replace(tzinfo=None) - offset
def is_older_than(before, seconds):
"""Return True if before is older than seconds."""
if isinstance(before, basestring):
before = parse_strtime(before).replace(tzinfo=None)
return utcnow() - before > datetime.timedelta(seconds=seconds)
def is_newer_than(after, seconds):
"""Return True if after is newer than seconds."""
if isinstance(after, basestring):
after = parse_strtime(after).replace(tzinfo=None)
return after - utcnow() > datetime.timedelta(seconds=seconds)
def utcnow_ts():
"""Timestamp version of our utcnow function."""
return calendar.timegm(utcnow().timetuple())
def utcnow():
"""Overridable version of utils.utcnow."""
if utcnow.override_time:
try:
return utcnow.override_time.pop(0)
except AttributeError:
return utcnow.override_time
return datetime.datetime.utcnow()
def iso8601_from_timestamp(timestamp):
"""Returns a iso8601 formated date from timestamp"""
return isotime(datetime.datetime.utcfromtimestamp(timestamp))
utcnow.override_time = None
def set_time_override(override_time=datetime.datetime.utcnow()):
"""
Override utils.utcnow to return a constant time or a list thereof,
one at a time.
"""
utcnow.override_time = override_time
def advance_time_delta(timedelta):
"""Advance overridden time using a datetime.timedelta."""
assert(not utcnow.override_time is None)
try:
for dt in utcnow.override_time:
dt += timedelta
except TypeError:
utcnow.override_time += timedelta
def advance_time_seconds(seconds):
"""Advance overridden time by seconds."""
advance_time_delta(datetime.timedelta(0, seconds))
def clear_time_override():
"""Remove the overridden time."""
utcnow.override_time = None
def marshall_now(now=None):
"""Make an rpc-safe datetime with microseconds.
Note: tzinfo is stripped, but not required for relative times."""
if not now:
now = utcnow()
return dict(day=now.day, month=now.month, year=now.year, hour=now.hour,
minute=now.minute, second=now.second,
microsecond=now.microsecond)
def unmarshall_time(tyme):
"""Unmarshall a datetime dict."""
return datetime.datetime(day=tyme['day'],
month=tyme['month'],
year=tyme['year'],
hour=tyme['hour'],
minute=tyme['minute'],
second=tyme['second'],
microsecond=tyme['microsecond'])
def delta_seconds(before, after):
"""
Compute the difference in seconds between two date, time, or
datetime objects (as a float, to microsecond resolution).
"""
delta = after - before
try:
return delta.total_seconds()
except AttributeError:
return ((delta.days * 24 * 3600) + delta.seconds +
float(delta.microseconds) / (10 ** 6))
def is_soon(dt, window):
"""
Determines if time is going to happen in the next window seconds.
:params dt: the time
:params window: minimum seconds to remain to consider the time not soon
:return: True if expiration is within the given duration
"""
soon = (utcnow() + datetime.timedelta(seconds=window))
return normalize_time(dt) <= soon
|
alfayez/gnuradio | refs/heads/master | gr-analog/python/qa_dpll.py | 3 | #!/usr/bin/env python
#
# Copyright 2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
import analog_swig as analog
import math
class test_dpll_bb(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_dpll_bb_001(self):
# Test set/gets
period = 1.0
gain = 0.1
op = analog.dpll_bb(period, gain)
op.set_gain(0.2)
g = op.gain()
self.assertAlmostEqual(g, 0.2)
f = op.freq()
self.assertEqual(1/period, f)
d0 = 1.0 - 0.5*f;
d1 = op.decision_threshold()
self.assertAlmostEqual(d0, d1)
p = op.phase()
self.assertEqual(0, p)
def test_dpll_bb_002(self):
period = 4
gain = 0.1
src_data = 10*((period-1)*[0,] + [1,])
expected_result = src_data
src = gr.vector_source_b(src_data)
op = analog.dpll_bb(period, gain)
dst = gr.vector_sink_b()
self.tb.connect(src, op)
self.tb.connect(op, dst)
self.tb.run()
result_data = dst.data()
self.assertComplexTuplesAlmostEqual(expected_result, result_data, 4)
if __name__ == '__main__':
gr_unittest.run(test_dpll_bb, "test_dpll_bb.xml")
|
PFCal-dev/HGCanalysis | refs/heads/master | test/recoStep_cfg.py | 2 | # Auto generated configuration file
# using:
# Revision: 1.20
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: step3_diPhotonP10 --conditions auto:upgradePLS3 -n 100 --eventcontent FEVTDEBUGHLT -s RAW2DIGI,L1Reco,RECO --datatier GEN-SIM-RECO --customise SLHCUpgradeSimulations/Configuration/combinedCustoms.cust_2023HGCalMuon --geometry Extended2023HGCalMuon,Extended2023HGCalMuonReco --magField 38T_PostLS1 --filein file:input.root --fileout file:Events.root --no_exec
import FWCore.ParameterSet.Config as cms
process = cms.Process('RECO')
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('SimGeneral.MixingModule.mixNoPU_cfi')
process.load('Configuration.Geometry.GeometryExtended2023HGCalMuonReco_cff')
process.load('Configuration.StandardSequences.MagneticField_38T_PostLS1_cff')
process.load('Configuration.StandardSequences.RawToDigi_cff')
process.load('Configuration.StandardSequences.L1Reco_cff')
process.load('Configuration.StandardSequences.Reconstruction_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
import os,sys
if(len(sys.argv)<5):
print '\ncmsRun recoStep_cfg.py SampleName First_File Step_File [outputDir] [Skip_NEvents] [Process_NEvents]'
sys.exit()
preFix = sys.argv[2]
ffile = int(sys.argv[3])
step = int(sys.argv[4])
outputDir='./'
if len(sys.argv)>5 : outputDir=sys.argv[5]
skipThese=0
if len(sys.argv)>6 : skipThese=int(sys.argv[6])
processThese=-1
if len(sys.argv)>7 : processThese=int(sys.argv[7])
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
# Input source
process.source = cms.Source("PoolSource",
secondaryFileNames = cms.untracked.vstring(),
fileNames = cms.untracked.vstring()
)
from UserCode.HGCanalysis.storeTools_cff import fillFromStore
process.source = cms.Source("PoolSource",
secondaryFileNames = cms.untracked.vstring(),
fileNames = cms.untracked.vstring()
)
if preFix.find('/store')>=0:
process.source.fileNames=fillFromStore(preFix,ffile,step)
else:
process.source.fileNames=fillFromStore('/store/cmst3/group/hgcal/CMSSW/%s'%preFix,ffile,step)
process.source.skipEvents = cms.untracked.uint32(skipThese)
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(processThese) )
print 'Will process %d events from '%processThese
print process.source.fileNames
print ' ....starting at %d'%skipThese
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('$Revision: 1.20 $'),
annotation = cms.untracked.string('step3_diPhotonP10 nevts:100'),
name = cms.untracked.string('Applications')
)
# Output definition
process.FEVTDEBUGHLToutput = cms.OutputModule("PoolOutputModule",
splitLevel = cms.untracked.int32(0),
eventAutoFlushCompressedSize = cms.untracked.int32(5242880),
outputCommands = process.FEVTDEBUGHLTEventContent.outputCommands,
fileName = cms.untracked.string('file:%s/Events_%d_%d_%d.root'%(outputDir,ffile,skipThese,processThese)),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string(''),
dataTier = cms.untracked.string('GEN-SIM-RECO')
)
)
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:upgradePLS3', '')
# Path and EndPath definitions
process.raw2digi_step = cms.Path(process.RawToDigi)
process.L1Reco_step = cms.Path(process.L1Reco)
process.reconstruction_step = cms.Path(process.reconstruction)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.FEVTDEBUGHLToutput_step = cms.EndPath(process.FEVTDEBUGHLToutput)
# Schedule definition
process.schedule = cms.Schedule(process.raw2digi_step,process.L1Reco_step,process.reconstruction_step,process.endjob_step,process.FEVTDEBUGHLToutput_step)
# customisation of the process.
# Automatic addition of the customisation function from SLHCUpgradeSimulations.Configuration.combinedCustoms
from SLHCUpgradeSimulations.Configuration.combinedCustoms import cust_2023HGCalMuon
#call to customisation function cust_2023HGCalMuon imported from SLHCUpgradeSimulations.Configuration.combinedCustoms
process = cust_2023HGCalMuon(process)
# End of customisation functions
|
yxtj/Neuron | refs/heads/master | dataAnalysis/method_base.py | 2 | import math
def _check_length(l1,l2):
if l1!=l2:
raise ValueError('length of line1 ('+str(l1)+') do not match length of line2 ('+str(l2)+')!')
#Calculate Pearson coefficient for two vector on SAME LENGTH
def pearson_lines(line1,line2,param=None):
l=len(line1)
_check_length(l,len(line2))
c=0; s1=[0,0]; s2=[0,0];
for i in range(l):
a=line1[i]; b=line2[i]
s1[0]+=a; s1[1]+=b;
s2[0]+=a**2; s2[1]+=b**2;
c+=a*b
n_var1=l*s2[0]-s1[0]**2;
n_var2=l*s2[1]-s1[1]**2;
return (n*c-s1[0]*s1[1])/math.sqrt(n_var1*n_var2)
#Calculate Dot-Product (sum of XOR on elements) for two vector on same length
def dot_product(line1,line2,param=None):
l=len(line1)
_check_length(l,len(line2))
return sum(line1[i]^line2[i] for i in range(l))
#Calculate Norm_1 (sum absolute difference) for two vector on same length
def norm1(line1,line2,param=None):
l=len(line1)
_check_length(l,len(line2))
return sum(abs(line1[i]-line2[i]) for i in range(l))
#Calculate Norm_2 (Euclidean distance) for two vector on same length
def norm2(line1,line2,param=None):
l=len(line1)
_check_length(l,len(line2))
return math.sqrt(sum((line1[i]-line2[i])**2 for i in range(l)))
#Calculate Norm_n for two vector on same length
def norm(line1,line2,param=None):
n=param['n']
l=len(line1)
_check_length(l,len(line2))
if n==1:
return norm1(line1,line2)
elif n==2:
return norm2(line1,line2)
return math.pow(sum((line1[i]-line2[i])**n for i in range(l)),1.0/n)
|
unless/-Wind_iproj_JB_kernel_temp | refs/heads/master | Documentation/target/tcm_mod_builder.py | 4981 | #!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: nab@kernel.org
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
buf += " u64 nport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* SCSI protocol the lport is providing */\n"
buf += " u8 lport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
buf += " u64 iport_wwpn;\n"
buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* ASCII formatted InitiatorName */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n"
buf += " u32 nexus_depth;\n\n"
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!se_nacl_new)\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
buf += " /*\n"
buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
buf += " * when converting a NodeACL from demo mode -> explict\n"
buf += " */\n"
buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
buf += " name, nexus_depth);\n"
buf += " if (IS_ERR(se_nacl)) {\n"
buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
buf += " return se_nacl;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
buf += " */\n"
buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return se_nacl;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!tpg) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
buf += " &tpg->se_tpg, (void *)tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!" + fabric_mod_port + ") {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .get_fabric_sense_len = " + fabric_mod_name + "_get_fabric_sense_len,\n"
buf += " .set_fabric_sense_len = " + fabric_mod_name + "_set_fabric_sense_len,\n"
buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += " .fabric_post_link = NULL,\n"
buf += " .fabric_pre_unlink = NULL,\n"
buf += " .fabric_make_np = NULL,\n"
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
buf += "};\n\n"
buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
buf += "{\n"
buf += " struct target_fabric_configfs *fabric;\n"
buf += " int ret;\n\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += " /*\n"
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
buf += " if (IS_ERR(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return PTR_ERR(fabric);\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
buf += " */\n"
buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
buf += " /*\n"
buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
buf += " */\n"
buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
buf += " /*\n"
buf += " * Register the fabric for use within TCM\n"
buf += " */\n"
buf += " ret = target_fabric_configfs_register(fabric);\n"
buf += " if (ret < 0) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n"
buf += " return ret;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup our local pointer to *fabric\n"
buf += " */\n"
buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "{\n"
buf += " if (!" + fabric_mod_name + "_fabric_configfs)\n"
buf += " return;\n\n"
buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " int ret;\n\n"
buf += " ret = " + fabric_mod_name + "_register_configfs();\n"
buf += " if (ret < 0)\n"
buf += " return ret;\n\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " " + fabric_mod_name + "_deregister_configfs();\n"
buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi.h>\n"
buf += "#include <scsi/scsi_host.h>\n"
buf += "#include <scsi/scsi_device.h>\n"
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_configfs.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name[4:] + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_fabric_proto_ident', fo):
buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " u8 proto_id;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return proto_id;\n"
buf += "}\n\n"
bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('get_default_depth', fo):
buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
if re.search('get_pr_transport_id\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code,\n"
buf += " unsigned char *buf)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *, unsigned char *);\n"
if re.search('get_pr_transport_id_len\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *);\n"
if re.search('parse_pr_out_transport_id\)\(', fo):
buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " const char *buf,\n"
buf += " u32 *out_tid_len,\n"
buf += " char **port_nexus_ptr)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " char *tid = NULL;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
buf += " }\n\n"
buf += " return tid;\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
bufi += " const char *, u32 *, char **);\n"
if re.search('alloc_fabric_acl\)\(', fo):
buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!nacl) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
buf += " return &nacl->se_node_acl;\n"
buf += "}\n\n"
bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
if re.search('release_fabric_acl\)\(', fo):
buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
bufi += " struct se_node_acl *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('\*release_cmd\)\(', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('stop_session\)\(', fo):
buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
if re.search('fall_back_to_erl0\)\(', fo):
buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
if re.search('sess_logged_in\)\(', fo):
buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_task_tag\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('get_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void);\n"
if re.search('set_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *, u32);\n"
if re.search('is_state_remove\)\(', fo):
buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
|
himikof/aiohttp_debugtoolbar | refs/heads/master | aiohttp_debugtoolbar/panels/performance.py | 1 | try:
import cProfile as profile
except ImportError: # pragma: no cover
try:
import profile
except ImportError: # pragma: no cover
profile = None
try:
import resource
except ImportError: # pragma: no cover
resource = None # Will fail on Win32 systems
try:
import pstats
except ImportError: # pragma: no cover
pstats = None # will fail on braindead Debian systems that package pstats
# separately from python for god-knows-what-reason
import asyncio
import time
from .base import DebugPanel
from ..utils import format_fname
__all__ = ['PerformanceDebugPanel']
class PerformanceDebugPanel(DebugPanel):
"""
Panel that looks at the performance of a request.
It will display the time a request took and, optionally, the
cProfile output.
"""
name = 'Performance'
user_activate = True
stats = None
function_calls = None
has_resource = bool(resource)
has_content = bool(pstats and profile)
template = 'performance.jinja2'
title = 'Performance'
nav_title = title
def __init__(self, request):
super().__init__(request)
if profile is not None:
self.profiler = profile.Profile()
def _wrap_timer_handler(self, handler):
if self.has_resource:
def resource_timer_handler(request):
_start_time = time.time()
self._start_rusage = resource.getrusage(resource.RUSAGE_SELF)
try:
result = yield from handler(request)
except:
raise
finally:
self._end_rusage = resource.getrusage(resource.RUSAGE_SELF)
self.total_time = (time.time() - _start_time) * 1000
return result
return resource_timer_handler
def noresource_timer_handler(request):
_start_time = time.time()
try:
result = yield from handler(request)
except:
raise
finally:
self.total_time = (time.time() - _start_time) * 1000
return result
return noresource_timer_handler
def _wrap_profile_handler(self, handler):
if not self.is_active:
return handler
@asyncio.coroutine
def profile_handler(request):
try:
self.profiler.enable()
try:
result = yield from handler(request)
finally:
self.profiler.disable()
except:
raise
finally:
stats = pstats.Stats(self.profiler)
function_calls = []
flist = stats.sort_stats('cumulative').fcn_list
for func in flist:
current = {}
info = stats.stats[func]
# Number of calls
if info[0] != info[1]:
current['ncalls'] = '%d/%d' % (info[1], info[0])
else:
current['ncalls'] = info[1]
# Total time
current['tottime'] = info[2] * 1000
# Quotient of total time divided by number of calls
if info[1]:
current['percall'] = info[2] * 1000 / info[1]
else:
current['percall'] = 0
# Cumulative time
current['cumtime'] = info[3] * 1000
# Quotient of the cumulative time divded by the number
# of primitive calls.
if info[0]:
current['percall_cum'] = info[3] * 1000 / info[0]
else:
current['percall_cum'] = 0
# Filename
filename = pstats.func_std_string(func)
current['filename_long'] = filename
current['filename'] = format_fname(filename)
function_calls.append(current)
self.stats = stats
self.function_calls = function_calls
return result
return profile_handler
def wrap_handler(self, handler, context_switcher):
handler = self._wrap_profile_handler(handler)
handler = self._wrap_timer_handler(handler)
return handler
@property
def nav_subtitle(self):
return '%0.2fms' % (self.total_time)
def _elapsed_ru(self, name):
return getattr(self._end_rusage, name) - getattr(self._start_rusage,
name)
@asyncio.coroutine
def process_response(self, response):
vars = {'timing_rows': None, 'stats': None, 'function_calls': []}
if self.has_resource:
utime = 1000 * self._elapsed_ru('ru_utime')
stime = 1000 * self._elapsed_ru('ru_stime')
vcsw = self._elapsed_ru('ru_nvcsw')
ivcsw = self._elapsed_ru('ru_nivcsw')
# minflt = self._elapsed_ru('ru_minflt')
# majflt = self._elapsed_ru('ru_majflt')
# these are documented as not meaningful under Linux. If you're
# running BSD # feel free to enable them, and add any others that
# I hadn't gotten to before I noticed that I was getting nothing
# but zeroes and that the docs agreed. :-(
#
# blkin = self._elapsed_ru('ru_inblock')
# blkout = self._elapsed_ru('ru_oublock')
# swap = self._elapsed_ru('ru_nswap')
# rss = self._end_rusage.ru_maxrss
# srss = self._end_rusage.ru_ixrss
# urss = self._end_rusage.ru_idrss
# usrss = self._end_rusage.ru_isrss
# TODO l10n on values
rows = (
('User CPU time', '%0.3f msec' % utime),
('System CPU time', '%0.3f msec' % stime),
('Total CPU time', '%0.3f msec' % (utime + stime)),
('Elapsed time', '%0.3f msec' % self.total_time),
('Context switches', '%d voluntary, %d involuntary' % (
vcsw, ivcsw)),
# (_('Memory use'), '%d max RSS, %d shared, %d unshared' % (
# rss, srss, urss + usrss)),
# (_('Page faults'), '%d no i/o, %d requiring i/o' % (
# minflt, majflt)),
# (_('Disk operations'), '%d in, %d out, %d swapout' % (
# blkin, blkout, swap)),
)
vars['timing_rows'] = rows
if self.is_active:
vars['stats'] = self.stats
vars['function_calls'] = self.function_calls
self.data = vars
|
DMLoy/ECommerceBasic | refs/heads/master | lib/python2.7/site-packages/django/contrib/localflavor/mk/forms.py | 109 | from __future__ import absolute_import, unicode_literals
import datetime
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import RegexField, Select
from django.utils.translation import ugettext_lazy as _
from django.contrib.localflavor.mk.mk_choices import MK_MUNICIPALITIES
class MKIdentityCardNumberField(RegexField):
"""
A Macedonian ID card number. Accepts both old and new format.
"""
default_error_messages = {
'invalid': _('Identity card numbers must contain'
' either 4 to 7 digits or an uppercase letter and 7 digits.'),
}
def __init__(self, *args, **kwargs):
kwargs['min_length'] = None
kwargs['max_length'] = 8
regex = r'(^[A-Z]{1}\d{7}$)|(^\d{4,7}$)'
super(MKIdentityCardNumberField, self).__init__(regex, *args, **kwargs)
class MKMunicipalitySelect(Select):
"""
A form ``Select`` widget that uses a list of Macedonian municipalities as
choices. The label is the name of the municipality and the value
is a 2 character code for the municipality.
"""
def __init__(self, attrs=None):
super(MKMunicipalitySelect, self).__init__(attrs, choices = MK_MUNICIPALITIES)
class UMCNField(RegexField):
"""
A form field that validates input as a unique master citizen
number.
The format of the unique master citizen number has been kept the same from
Yugoslavia. It is still in use in other countries as well, it is not applicable
solely in Macedonia. For more information see:
https://secure.wikimedia.org/wikipedia/en/wiki/Unique_Master_Citizen_Number
A value will pass validation if it complies to the following rules:
* Consists of exactly 13 digits
* The first 7 digits represent a valid past date in the format DDMMYYY
* The last digit of the UMCN passes a checksum test
"""
default_error_messages = {
'invalid': _('This field should contain exactly 13 digits.'),
'date': _('The first 7 digits of the UMCN must represent a valid past date.'),
'checksum': _('The UMCN is not valid.'),
}
def __init__(self, *args, **kwargs):
kwargs['min_length'] = None
kwargs['max_length'] = 13
super(UMCNField, self).__init__(r'^\d{13}$', *args, **kwargs)
def clean(self, value):
value = super(UMCNField, self).clean(value)
if value in EMPTY_VALUES:
return ''
if not self._validate_date_part(value):
raise ValidationError(self.error_messages['date'])
if self._validate_checksum(value):
return value
else:
raise ValidationError(self.error_messages['checksum'])
def _validate_checksum(self, value):
a,b,c,d,e,f,g,h,i,j,k,l,K = [int(digit) for digit in value]
m = 11 - (( 7*(a+g) + 6*(b+h) + 5*(c+i) + 4*(d+j) + 3*(e+k) + 2*(f+l)) % 11)
if (m >= 1 and m <= 9) and K == m:
return True
elif m == 11 and K == 0:
return True
else:
return False
def _validate_date_part(self, value):
daypart, monthpart, yearpart = int(value[:2]), int(value[2:4]), int(value[4:7])
if yearpart >= 800:
yearpart += 1000
else:
yearpart += 2000
try:
date = datetime.datetime(year = yearpart, month = monthpart, day = daypart).date()
except ValueError:
return False
if date >= datetime.datetime.now().date():
return False
return True
|
dyrock/trafficserver | refs/heads/master | doc/conf.py | 2 | # -*- coding: utf-8 -*-
#
# Apache Traffic Server documentation build configuration file, created by
# sphinx-quickstart on Mon Mar 4 06:23:15 2013.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
from datetime import date
from sphinx import version_info
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('ext'))
sys.path.insert(0, os.path.abspath('.'))
from manpages import man_pages
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.graphviz',
'sphinx.ext.intersphinx',
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinxcontrib.plantuml',
'traffic-server',
]
# Contains values that are dependent on configure.ac.
execfile('ext/local-config.py')
if version_info >= (1, 4):
extensions.append('sphinx.ext.imgmath')
else:
extensions.append('sphinx.ext.pngmath')
# XXX Disabling docxygen for now, since it make RTD documentation builds time
# out, eg. https://readthedocs.org/projects/trafficserver/builds/3525976/
# extensions += [
# 'doxygen',
# ]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Apache Traffic Server'
copyright = u'{}, dev@trafficserver.apache.org'.format(date.today().year)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# Extract the version from the configure.ac file with regex so as to
# work identically when building with Autotools (e.g. $ make html)
# and without (e.g. on Read the Docs)
import re
contents = open('../configure.ac').read()
match = re.compile('m4_define\(\[TS_VERSION_S],\[(.*?)]\)').search(contents)
# The full version, including alpha/beta/rc tags.
release = match.group(1)
# The short X.Y version.
version = '.'.join(release.split('.', 2)[:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
locale_dirs = ['locale/']
gettext_compact = False
# HACK for Read-the-Docs
# Generate .mo files just in time
if os.environ.get('READTHEDOCS') == 'True':
import polib
print "Generating .mo files",
for locale_dir in locale_dirs:
for path, dummy, filenames in os.walk(locale_dir):
for filename in filenames:
po_file = os.path.join(path, filename)
base, ext = os.path.splitext(po_file)
if ext == ".po":
mo_file = base + ".mo"
po = polib.pofile(po_file)
po.save_as_mofile(fpath=mo_file)
print "done"
else:
# On RedHat-based distributions, install the python-sphinx_rtd_theme package
# to get an end result tht looks more like readthedoc.org.
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except:
pass
# End of HACK
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'default'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
nitpicky = True
nitpick_ignore = [ ('c:type', 'int64_t')
, ('c:type', 'bool')
, ('c:type', 'sockaddr')
, ('cpp:identifier', 'T') # template arg
, ('cpp:identifier', 'F') # template arg
, ('cpp:identifier', 'Args') # variadic template arg
, ('cpp:identifier', 'Rest') # variadic template arg
]
# Autolink issue references.
# See Customizing the Parser in the docutils.parsers.rst module.
from docutils import nodes
from docutils.parsers.rst import states
from docutils.utils import punctuation_chars
from docutils.utils import unescape
# Customize parser.inliner in the only way that Sphinx supports.
# docutils.parsers.rst.Parser takes an instance of states.Inliner or a
# subclass, but Sphinx initializes the parser without any arguments,
# in SphinxStandaloneReader.set_parser('restructuredtext'),
# which is called from Publisher.set_components().
# states.Inliner isn't a new-style class, so super() isn't an option.
BaseInliner = states.Inliner
class Inliner(states.Inliner):
def init_customizations(self, settings):
self.__class__ = BaseInliner
BaseInliner.init_customizations(self, settings)
self.__class__ = Inliner
# Copied from states.Inliner.init_customizations().
# In Docutils 0.13 these are locals.
if not hasattr(self, 'start_string_prefix'):
self.start_string_prefix = (u'(^|(?<=\\s|[%s%s]))' %
(punctuation_chars.openers,
punctuation_chars.delimiters))
if not hasattr(self, 'end_string_suffix'):
self.end_string_suffix = (u'($|(?=\\s|[\x00%s%s%s]))' %
(punctuation_chars.closing_delimiters,
punctuation_chars.delimiters,
punctuation_chars.closers))
issue = re.compile(
ur'''
{start_string_prefix}
TS-\d+
{end_string_suffix}'''.format(
start_string_prefix=self.start_string_prefix,
end_string_suffix=self.end_string_suffix),
re.VERBOSE | re.UNICODE)
self.implicit_dispatch.append((issue, self.issue_reference))
def issue_reference(self, match, lineno):
text = match.group(0)
rawsource = unescape(text, True)
text = unescape(text, False)
refuri = 'https://issues.apache.org/jira/browse/' + text
return [nodes.reference(rawsource, text, refuri=refuri)]
states.Inliner = Inliner
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'agogo'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'static/images/trans_logo_tm_380x69.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'static/images/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static']
# Include a stylesheet that overrides default table styling, to provide
# content wrapping.
html_context = {
'css_files': [
'_static/override.css'
]
}
if os.environ.get('READTHEDOCS', None) == 'True':
html_context = {
'css_files': [
'https://media.readthedocs.org/css/sphinx_rtd_theme.css',
'https://media.readthedocs.org/css/readthedocs-doc-embed.css',
'_static/override.css'
]
}
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ApacheTrafficServerdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
if tags.has('latex_a4'):
latex_elements['papersize'] = 'a4paper'
elif tags.has('latex_paper'):
latex_elements['papersiize'] = 'letterpaper'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'ApacheTrafficServer.tex', u'Apache Traffic Server Documentation',
u'dev@trafficserver.apache.org', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# The global "man_pages" is imported from ts/manpages.py
# If true, show URL addresses after external links.
#man_show_urls = False
# Get the manual page description from the reStructuredText document.
# This keeps the list of manual pages consistent with the source
# documents and includes the same brief description in both the HTML
# and manual page outputs.
from docutils.transforms import frontmatter
from sphinx.writers import manpage
# Override ManualPageWriter and ManualPageTranslator in the only way
# that Sphinx supports
BaseWriter = manpage.ManualPageWriter
class ManualPageWriter(BaseWriter):
def translate(self):
transform = frontmatter.DocTitle(self.document)
section, index = transform.candidate_index(self.document)
if index:
# A sentence after the title is the manual page description
if len(section) > 1 and isinstance(section[1], nodes.paragraph):
description = section.pop(1).astext()
description = description[:1].lower() + description[1:]
description = description.rstrip('.')
self.document.settings.subtitle = description
# Instead of section_level = -1, use the standard Docutils
# DocTitle transform to hide the top level title
transform.promote_title(self.document)
# The title is the manual page name
transform.set_metadata()
BaseWriter.translate(self)
manpage.ManualPageWriter = ManualPageWriter
BaseTranslator = manpage.ManualPageTranslator
class ManualPageTranslator(BaseTranslator):
def __init__(self, builder, *args, **kwds):
BaseTranslator.__init__(self, builder, *args, **kwds)
# Instead of section_level = -1, use the standard Docutils
# DocTitle transform to hide the top level title
self.section_level = 0
manpage.ManualPageTranslator = ManualPageTranslator
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ApacheTrafficServer', u'Apache Traffic Server Documentation',
u'dev@trafficserver.apache.org', 'ApacheTrafficServer', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'Apache Traffic Server'
epub_author = u'dev@trafficserver.apache.org'
epub_publisher = u'dev@trafficserver.apache.org'
epub_copyright = u'2013, dev@trafficserver.apache.org'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
mathjax_path = 'https://docs.trafficserver.apache.org/__RTD/MathJax.js'
# Enabling marking bit fields as 'bitfield_N`.
# Currently parameterized fields don't work. When they do, we should change to
# 'bitfield(N)'.
cpp_id_attributes = [ 'bitfield_1', 'bitfield_3', 'bitfield_24' ]
|
mzupan/Diamond | refs/heads/master | src/collectors/aurora/aurora.py | 48 | import diamond.collector
import urllib2
class AuroraCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(AuroraCollector,
self).get_default_config_help()
config_help.update({
'host': 'Scheduler Hostname',
'port': 'Scheduler HTTP Metrics Port',
'path': 'Collector path. Defaults to "aurora"',
'scheme': 'http'
})
return config_help
def get_default_config(self):
config = super(AuroraCollector, self).get_default_config()
config.update({
'path': 'aurora',
'host': 'localhost',
'port': 8081,
'scheme': 'http'
})
return config
def collect(self):
url = "%s://%s:%s/vars" % (self.config['scheme'],
self.config['host'],
self.config['port'])
response = urllib2.urlopen(url)
for line in response.readlines():
properties = line.split()
# Not all lines returned will have a numeric metric.
# To account for this, we attempt to cast the 'value'
# portion as a float. If that's not possible, NBD, we
# just move on.
try:
if len(properties) > 1:
subpath = properties[0].replace('/', '.').replace('_', '.')
value = float(properties[1])
self.publish(subpath, value)
except ValueError:
continue
|
wangmiao1981/spark | refs/heads/master | examples/src/main/python/sql/streaming/structured_network_wordcount_windowed.py | 27 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
r"""
Counts words in UTF8 encoded, '\n' delimited text received from the network over a
sliding window of configurable duration. Each line from the network is tagged
with a timestamp that is used to determine the windows into which it falls.
Usage: structured_network_wordcount_windowed.py <hostname> <port> <window duration>
[<slide duration>]
<hostname> and <port> describe the TCP server that Structured Streaming
would connect to receive data.
<window duration> gives the size of window, specified as integer number of seconds
<slide duration> gives the amount of time successive windows are offset from one another,
given in the same units as above. <slide duration> should be less than or equal to
<window duration>. If the two are equal, successive windows have no overlap. If
<slide duration> is not provided, it defaults to <window duration>.
To run this on your local machine, you need to first run a Netcat server
`$ nc -lk 9999`
and then run the example
`$ bin/spark-submit
examples/src/main/python/sql/streaming/structured_network_wordcount_windowed.py
localhost 9999 <window duration> [<slide duration>]`
One recommended <window duration>, <slide duration> pair is 10, 5
"""
import sys
from pyspark.sql import SparkSession
from pyspark.sql.functions import explode
from pyspark.sql.functions import split
from pyspark.sql.functions import window
if __name__ == "__main__":
if len(sys.argv) != 5 and len(sys.argv) != 4:
msg = ("Usage: structured_network_wordcount_windowed.py <hostname> <port> "
"<window duration in seconds> [<slide duration in seconds>]")
print(msg, file=sys.stderr)
sys.exit(-1)
host = sys.argv[1]
port = int(sys.argv[2])
windowSize = int(sys.argv[3])
slideSize = int(sys.argv[4]) if (len(sys.argv) == 5) else windowSize
if slideSize > windowSize:
print("<slide duration> must be less than or equal to <window duration>", file=sys.stderr)
windowDuration = '{} seconds'.format(windowSize)
slideDuration = '{} seconds'.format(slideSize)
spark = SparkSession\
.builder\
.appName("StructuredNetworkWordCountWindowed")\
.getOrCreate()
# Create DataFrame representing the stream of input lines from connection to host:port
lines = spark\
.readStream\
.format('socket')\
.option('host', host)\
.option('port', port)\
.option('includeTimestamp', 'true')\
.load()
# Split the lines into words, retaining timestamps
# split() splits each line into an array, and explode() turns the array into multiple rows
words = lines.select(
explode(split(lines.value, ' ')).alias('word'),
lines.timestamp
)
# Group the data by window and word and compute the count of each group
windowedCounts = words.groupBy(
window(words.timestamp, windowDuration, slideDuration),
words.word
).count().orderBy('window')
# Start running the query that prints the windowed word counts to the console
query = windowedCounts\
.writeStream\
.outputMode('complete')\
.format('console')\
.option('truncate', 'false')\
.start()
query.awaitTermination()
|
huangzonghao/ycmd | refs/heads/master | examples/.ycm_extra_conf.py | 37 | #!/usr/bin/env python
#
# Copyright (C) 2014 Google Inc.
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
'-Werror',
'-fexceptions',
'-DNDEBUG',
# THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which
# language to use when compiling headers. So it will guess. Badly. So C++
# headers will be compiled as C headers. You don't want that so ALWAYS specify
# a "-std=<something>".
# For a C project, you would set this to something like 'c99' instead of
# 'c++11'.
'-std=c++11',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x',
'c++',
'-isystem',
'/usr/include',
'-isystem',
'/usr/local/include',
'-isystem',
'/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/../lib/c++/v1',
'-isystem',
'/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/include',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
# This is the entry point; this function is called by ycmd to produce flags for
# a file.
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
|
malcolmw/seismic-python | refs/heads/master | seispy/pandas/__init__.py | 3 | from . import io
from . import catalog
from . import time
|
miguelparaiso/OdooAccessible | refs/heads/master | addons/l10n_fr/wizard/fr_report_bilan.py | 374 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2008 JAILLET Simon - CrysaLEAD - www.crysalead.fr
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
from openerp.osv import fields, osv
class account_bilan_report(osv.osv_memory):
_name = 'account.bilan.report'
_description = 'Account Bilan Report'
def _get_default_fiscalyear(self, cr, uid, context=None):
fiscalyear_id = self.pool.get('account.fiscalyear').find(cr, uid)
return fiscalyear_id
_columns = {
'fiscalyear_id': fields.many2one('account.fiscalyear', 'Fiscal Year', required=True),
}
_defaults = {
'fiscalyear_id': _get_default_fiscalyear
}
def print_bilan_report(self, cr, uid, ids, context=None):
active_ids = context.get('active_ids', [])
data = {}
data['form'] = {}
data['ids'] = active_ids
data['form']['fiscalyear_id'] = self.browse(cr, uid, ids)[0].fiscalyear_id.id
return self.pool['report'].get_action(
cr, uid, ids, 'l10n_fr.report_l10nfrbilan', data=data, context=context
)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.