repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
timothycrosley/webelements_site
|
refs/heads/master
|
pygments/styles/manni.py
|
26
|
# -*- coding: utf-8 -*-
"""
pygments.styles.manni
~~~~~~~~~~~~~~~~~~~~~
A colorful style, inspired by the terminal highlighting style.
This is a port of the style used in the `php port`_ of pygments
by Manni. The style is called 'default' there.
:copyright: Copyright 2006-2012 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace
class ManniStyle(Style):
"""
A colorful style, inspired by the terminal highlighting style.
"""
background_color = '#f0f3f3'
styles = {
Whitespace: '#bbbbbb',
Comment: 'italic #0099FF',
Comment.Preproc: 'noitalic #009999',
Comment.Special: 'bold',
Keyword: 'bold #006699',
Keyword.Pseudo: 'nobold',
Keyword.Type: '#007788',
Operator: '#555555',
Operator.Word: 'bold #000000',
Name.Builtin: '#336666',
Name.Function: '#CC00FF',
Name.Class: 'bold #00AA88',
Name.Namespace: 'bold #00CCFF',
Name.Exception: 'bold #CC0000',
Name.Variable: '#003333',
Name.Constant: '#336600',
Name.Label: '#9999FF',
Name.Entity: 'bold #999999',
Name.Attribute: '#330099',
Name.Tag: 'bold #330099',
Name.Decorator: '#9999FF',
String: '#CC3300',
String.Doc: 'italic',
String.Interpol: '#AA0000',
String.Escape: 'bold #CC3300',
String.Regex: '#33AAAA',
String.Symbol: '#FFCC33',
String.Other: '#CC3300',
Number: '#FF6600',
Generic.Heading: 'bold #003300',
Generic.Subheading: 'bold #003300',
Generic.Deleted: 'border:#CC0000 bg:#FFCCCC',
Generic.Inserted: 'border:#00CC00 bg:#CCFFCC',
Generic.Error: '#FF0000',
Generic.Emph: 'italic',
Generic.Strong: 'bold',
Generic.Prompt: 'bold #000099',
Generic.Output: '#AAAAAA',
Generic.Traceback: '#99CC66',
Error: 'bg:#FFAAAA #AA0000'
}
|
jamesblunt/gunicorn
|
refs/heads/master
|
examples/longpoll.py
|
22
|
# -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import sys
import time
class TestIter(object):
def __iter__(self):
lines = ['line 1\n', 'line 2\n']
for line in lines:
yield line
time.sleep(20)
def app(environ, start_response):
"""Application which cooperatively pauses 20 seconds (needed to surpass normal timeouts) before responding"""
data = b'Hello, World!\n'
status = '200 OK'
response_headers = [
('Content-type', 'text/plain'),
('Transfer-Encoding', "chunked"),
]
sys.stdout.write('request received')
sys.stdout.flush()
start_response(status, response_headers)
return TestIter()
|
kunaaljain/gnome-news
|
refs/heads/master
|
gnomenews/post.py
|
1
|
# Copyright (C) 2015 Felipe Borges <felipeborges@gnome.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import WebKit2, GObject, GLib
import hashlib
import os.path
import re
from gnomenews import log
import logging
logger = logging.getLogger(__name__)
THUMBNAIL_WIDTH = 256
THUMBNAIL_HEIGHT = 256
# FIXME: Remove duplication with application.py
CACHE_PATH = "~/.cache/gnome-news"
NAME_REGEX = re.compile("\(([^\)]+)\)")
class Post(GObject.GObject):
__gsignals__ = {
'info-updated': (GObject.SignalFlags.RUN_FIRST, None, (GObject.GObject,)),
}
@log
def __init__(self, cursor):
GObject.GObject.__init__(self)
self.cursor = cursor
self.title = cursor['title']
self.content = cursor['content']
self.url = cursor['url']
self.author = self.sanitize_author(cursor['fullname'])
self.author_email = cursor['author_email']
self.author_homepage = cursor['author_homepage']
# Check cache first
hashed_url = hashlib.md5(cursor['url'].encode()).hexdigest()
self.cached_thumbnail_path = os.path.join(os.path.expanduser(CACHE_PATH), '%s.png' % hashed_url)
GLib.idle_add(self.try_to_load_image_from_cache)
@staticmethod
@log
def sanitize_author(author):
"""Separates Name from Email in an author string
RSS 2.0 from https://planet.gnome.org/rss20.xml returns
"me@example.com - (Firstname Lastname)" as author fiels. This is a
workaround for it.
TODO: fix this hack in pluggable methods or in libgrss
Args:
author (str): an author string extracted from a rss feed
"""
try:
return re.findall(NAME_REGEX, author)[0]
except:
return author
@log
def try_to_load_image_from_cache(self):
if os.path.isfile(self.cached_thumbnail_path):
self.thumbnail = self.cached_thumbnail_path
self.emit('info-updated', self)
else:
self.webview = WebKit2.WebView(sensitive=False)
self.webview.connect('load-changed', self._draw_thumbnail)
self._generate_thumbnail()
@log
def _generate_thumbnail(self):
self.webview.load_html("""
<div style="width: 250px">
<h3 style="margin-bottom: 2px">%s</h3>
<small style="color: #333">%s</small>
<small style="color: #9F9F9F">%s</small>
</div>""" % (self.title, self.author, self.content))
@log
def _draw_thumbnail(self, webview, event):
if event == WebKit2.LoadEvent.FINISHED:
self.webview.get_snapshot(WebKit2.SnapshotRegion.FULL_DOCUMENT,
WebKit2.SnapshotOptions.NONE,
None, self._save_thumbnail, None)
@log
def _save_thumbnail(self, webview, res, data):
try:
original_surface = self.webview.get_snapshot_finish(res)
import cairo
new_surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, THUMBNAIL_WIDTH, THUMBNAIL_HEIGHT)
ctx = cairo.Context(new_surface)
ctx.set_source_surface(original_surface, 0, 0)
ctx.paint()
new_surface.write_to_png(self.cached_thumbnail_path)
self.thumbnail = self.cached_thumbnail_path
self.emit('info-updated', self)
except Exception as e:
logger.error("Could not draw thumbnail for %s: %s" % (self.title, str(e)))
|
amyvmiwei/kbengine
|
refs/heads/master
|
kbe/res/scripts/common/Lib/ctypes/test/test_repr.py
|
170
|
from ctypes import *
import unittest
subclasses = []
for base in [c_byte, c_short, c_int, c_long, c_longlong,
c_ubyte, c_ushort, c_uint, c_ulong, c_ulonglong,
c_float, c_double, c_longdouble, c_bool]:
class X(base):
pass
subclasses.append(X)
class X(c_char):
pass
# This test checks if the __repr__ is correct for subclasses of simple types
class ReprTest(unittest.TestCase):
def test_numbers(self):
for typ in subclasses:
base = typ.__bases__[0]
self.assertTrue(repr(base(42)).startswith(base.__name__))
self.assertEqual("<X object at", repr(typ(42))[:12])
def test_char(self):
self.assertEqual("c_char(b'x')", repr(c_char(b'x')))
self.assertEqual("<X object at", repr(X(b'x'))[:12])
if __name__ == "__main__":
unittest.main()
|
zaccoz/odoo
|
refs/heads/8.0
|
addons/procurement/wizard/schedulers_all.py
|
306
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import threading
from openerp import SUPERUSER_ID
from openerp import tools
from openerp.osv import osv
from openerp.api import Environment
_logger = logging.getLogger(__name__)
class procurement_compute_all(osv.osv_memory):
_name = 'procurement.order.compute.all'
_description = 'Compute all schedulers'
def _procure_calculation_all(self, cr, uid, ids, context=None):
"""
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
"""
with Environment.manage():
proc_obj = self.pool.get('procurement.order')
#As this function is in a new thread, i need to open a new cursor, because the old one may be closed
new_cr = self.pool.cursor()
scheduler_cron_id = self.pool['ir.model.data'].get_object_reference(new_cr, SUPERUSER_ID, 'procurement', 'ir_cron_scheduler_action')[1]
# Avoid to run the scheduler multiple times in the same time
try:
with tools.mute_logger('openerp.sql_db'):
new_cr.execute("SELECT id FROM ir_cron WHERE id = %s FOR UPDATE NOWAIT", (scheduler_cron_id,))
except Exception:
_logger.info('Attempt to run procurement scheduler aborted, as already running')
new_cr.rollback()
new_cr.close()
return {}
user = self.pool.get('res.users').browse(new_cr, uid, uid, context=context)
comps = [x.id for x in user.company_ids]
for comp in comps:
proc_obj.run_scheduler(new_cr, uid, use_new_cursor=new_cr.dbname, company_id = comp, context=context)
#close the new cursor
new_cr.close()
return {}
def procure_calculation(self, cr, uid, ids, context=None):
"""
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
"""
threaded_calculation = threading.Thread(target=self._procure_calculation_all, args=(cr, uid, ids, context))
threaded_calculation.start()
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
ppanczyk/ansible
|
refs/heads/devel
|
lib/ansible/plugins/lookup/indexed_items.py
|
57
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: indexed_items
author: Michael DeHaan <michael.dehaan@gmail.com>
version_added: "1.3"
short_description: rewrites lists to return 'indexed items'
description:
- use this lookup if you want to loop over an array and also get the numeric index of where you are in the array as you go
- any list given will be transformed with each resulting element having the it's previous position in item.0 and its value in item.1
options:
_terms:
description: list of items
required: True
"""
EXAMPLES = """
- name: indexed loop demo
debug:
msg: "at array position {{ item.0 }} there is a value {{ item.1 }}"
with_indexed_items:
- "{{ some_list }}"
"""
RETURN = """
_raw:
description:
- list with each item.0 giving you the postiion and item.1 the value
type: list
"""
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
class LookupModule(LookupBase):
def __init__(self, basedir=None, **kwargs):
self.basedir = basedir
def run(self, terms, variables, **kwargs):
if not isinstance(terms, list):
raise AnsibleError("with_indexed_items expects a list")
items = self._flatten(terms)
return list(zip(range(len(items)), items))
|
siosio/intellij-community
|
refs/heads/master
|
python/testData/refactoring/inlineFunction/localClassUse/main.py
|
12
|
def MyClass():
def __init__(self): # init is necessary to check resolve target
pass
def my_function():
res = MyClass() # should not be renamed
return res
x = my_fun<caret>ction()
|
nathanaevitas/odoo
|
refs/heads/master
|
openerp/addons/l10n_gr/__openerp__.py
|
260
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2009 P. Christeas <p_christ@hol.gr>. All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Greece - Accounting',
'version': '0.2',
'author': 'P. Christeas, OpenERP SA.',
'website': 'http://openerp.hellug.gr/',
'category': 'Localization/Account Charts',
'description': """
This is the base module to manage the accounting chart for Greece.
==================================================================
Greek accounting chart and localization.
""",
'depends': ['base', 'account', 'base_iban', 'base_vat', 'account_chart'],
'demo': [],
'data': [ 'account_types.xml',
'account_chart.xml',
'account_full_chart.xml',
'account_tax.xml',
'account_tax_vat.xml',
'l10n_gr_wizard.xml'
],
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
floh1695/projecteuler
|
refs/heads/master
|
python/projecteuler/problem_0.py
|
1
|
#!/usr/bin/python2
"""
"""
from __future__ import print_function
def run():
return 0, None
if __name__ == '__main__':
from timeit import timeit
timeit(run)
|
jamestwebber/scipy
|
refs/heads/master
|
scipy/optimize/_linprog_rs.py
|
2
|
"""Revised simplex method for linear programming
The *revised simplex* method uses the method described in [1]_, except
that a factorization [2]_ of the basis matrix, rather than its inverse,
is efficiently maintained and used to solve the linear systems at each
iteration of the algorithm.
.. versionadded:: 1.3.0
References
----------
.. [1] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear
programming." Athena Scientific 1 (1997): 997.
.. [2] Bartels, Richard H. "A stabilization of the simplex method."
Journal in Numerische Mathematik 16.5 (1971): 414-434.
"""
# Author: Matt Haberland
from __future__ import division, absolute_import, print_function
import numpy as np
from scipy.linalg import solve
from .optimize import _check_unknown_options
from ._bglu_dense import LU
from ._bglu_dense import BGLU as BGLU
from scipy.linalg import LinAlgError
from numpy.linalg.linalg import LinAlgError as LinAlgError2
from ._linprog_util import _postsolve
from .optimize import OptimizeResult
def _phase_one(A, b, x0, callback, postsolve_args, maxiter, tol, disp,
maxupdate, mast, pivot):
"""
The purpose of phase one is to find an initial basic feasible solution
(BFS) to the original problem.
Generates an auxiliary problem with a trivial BFS and an objective that
minimizes infeasibility of the original problem. Solves the auxiliary
problem using the main simplex routine (phase two). This either yields
a BFS to the original problem or determines that the original problem is
infeasible. If feasible, phase one detects redundant rows in the original
constraint matrix and removes them, then chooses additional indices as
necessary to complete a basis/BFS for the original problem.
"""
m, n = A.shape
status = 0
# generate auxiliary problem to get initial BFS
A, b, c, basis, x, status = _generate_auxiliary_problem(A, b, x0, tol)
if status == 6:
residual = c.dot(x)
iter_k = 0
return x, basis, A, b, residual, status, iter_k
# solve auxiliary problem
phase_one_n = n
iter_k = 0
x, basis, status, iter_k = _phase_two(c, A, x, basis, callback,
postsolve_args,
maxiter, tol, disp,
maxupdate, mast, pivot,
iter_k, phase_one_n)
# check for infeasibility
residual = c.dot(x)
if status == 0 and residual > tol:
status = 2
# drive artificial variables out of basis
# TODO: test redundant row removal better
# TODO: make solve more efficient with BGLU? This could take a while.
keep_rows = np.ones(m, dtype=bool)
for basis_column in basis[basis >= n]:
B = A[:, basis]
try:
basis_finder = np.abs(solve(B, A)) # inefficient
pertinent_row = np.argmax(basis_finder[:, basis_column])
eligible_columns = np.ones(n, dtype=bool)
eligible_columns[basis[basis < n]] = 0
eligible_column_indices = np.where(eligible_columns)[0]
index = np.argmax(basis_finder[:, :n]
[pertinent_row, eligible_columns])
new_basis_column = eligible_column_indices[index]
if basis_finder[pertinent_row, new_basis_column] < tol:
keep_rows[pertinent_row] = False
else:
basis[basis == basis_column] = new_basis_column
except (LinAlgError, LinAlgError2):
status = 4
# form solution to original problem
A = A[keep_rows, :n]
basis = basis[keep_rows]
x = x[:n]
m = A.shape[0]
return x, basis, A, b, residual, status, iter_k
def _get_more_basis_columns(A, basis):
"""
Called when the auxiliary problem terminates with artificial columns in
the basis, which must be removed and replaced with non-artificial
columns. Finds additional columns that do not make the matrix singular.
"""
m, n = A.shape
# options for inclusion are those that aren't already in the basis
a = np.arange(m+n)
bl = np.zeros(len(a), dtype=bool)
bl[basis] = 1
options = a[~bl]
options = options[options < n] # and they have to be non-artificial
# form basis matrix
B = np.zeros((m, m))
B[:, 0:len(basis)] = A[:, basis]
if (basis.size > 0 and
np.linalg.matrix_rank(B[:, :len(basis)]) < len(basis)):
raise Exception("Basis has dependent columns")
rank = 0 # just enter the loop
for i in range(n): # somewhat arbitrary, but we need another way out
# permute the options, and take as many as needed
new_basis = np.random.permutation(options)[:m-len(basis)]
B[:, len(basis):] = A[:, new_basis] # update the basis matrix
rank = np.linalg.matrix_rank(B) # check the rank
if rank == m:
break
return np.concatenate((basis, new_basis))
def _generate_auxiliary_problem(A, b, x0, tol):
"""
Modifies original problem to create an auxiliary problem with a trivial
initial basic feasible solution and an objective that minimizes
infeasibility in the original problem.
Conceptually, this is done by stacking an identity matrix on the right of
the original constraint matrix, adding artificial variables to correspond
with each of these new columns, and generating a cost vector that is all
zeros except for ones corresponding with each of the new variables.
A initial basic feasible solution is trivial: all variables are zero
except for the artificial variables, which are set equal to the
corresponding element of the right hand side `b`.
Runnning the simplex method on this auxiliary problem drives all of the
artificial variables - and thus the cost - to zero if the original problem
is feasible. The original problem is declared infeasible otherwise.
Much of the complexity below is to improve efficiency by using singleton
columns in the original problem where possible, thus generating artificial
variables only as necessary, and using an initial 'guess' basic feasible
solution.
"""
status = 0
m, n = A.shape
if x0 is not None:
x = x0
else:
x = np.zeros(n)
r = b - A@x # residual; this must be all zeros for feasibility
A[r < 0] = -A[r < 0] # express problem with RHS positive for trivial BFS
b[r < 0] = -b[r < 0] # to the auxiliary problem
r[r < 0] *= -1
# Rows which we will need to find a trivial way to zero.
# This should just be the rows where there is a nonzero residual.
# But then we would not necessarily have a column singleton in every row.
# This makes it difficult to find an initial basis.
if x0 is None:
nonzero_constraints = np.arange(m)
else:
nonzero_constraints = np.where(r > tol)[0]
# these are (at least some of) the initial basis columns
basis = np.where(np.abs(x) > tol)[0]
if len(nonzero_constraints) == 0 and len(basis) <= m: # already a BFS
c = np.zeros(n)
basis = _get_more_basis_columns(A, basis)
return A, b, c, basis, x, status
elif (len(nonzero_constraints) > m - len(basis) or
np.any(x < 0)): # can't get trivial BFS
c = np.zeros(n)
status = 6
return A, b, c, basis, x, status
# chooses existing columns appropriate for inclusion in initial basis
cols, rows = _select_singleton_columns(A, r)
# find the rows we need to zero that we _can_ zero with column singletons
i_tofix = np.isin(rows, nonzero_constraints)
# these columns can't already be in the basis, though
# we are going to add them to the basis and change the corresponding x val
i_notinbasis = np.logical_not(np.isin(cols, basis))
i_fix_without_aux = np.logical_and(i_tofix, i_notinbasis)
rows = rows[i_fix_without_aux]
cols = cols[i_fix_without_aux]
# indices of the rows we can only zero with auxiliary variable
# these rows will get a one in each auxiliary column
arows = nonzero_constraints[np.logical_not(
np.isin(nonzero_constraints, rows))]
n_aux = len(arows)
acols = n + np.arange(n_aux) # indices of auxiliary columns
basis_ng = np.concatenate((cols, acols)) # basis columns not from guess
basis_ng_rows = np.concatenate((rows, arows)) # rows we need to zero
# add auxiliary singleton columns
A = np.hstack((A, np.zeros((m, n_aux))))
A[arows, acols] = 1
# generate initial BFS
x = np.concatenate((x, np.zeros(n_aux)))
x[basis_ng] = r[basis_ng_rows]/A[basis_ng_rows, basis_ng]
# generate costs to minimize infeasibility
c = np.zeros(n_aux + n)
c[acols] = 1
# basis columns correspond with nonzeros in guess, those with column
# singletons we used to zero remaining constraints, and any additional
# columns to get a full set (m columns)
basis = np.concatenate((basis, basis_ng))
basis = _get_more_basis_columns(A, basis) # add columns as needed
return A, b, c, basis, x, status
def _select_singleton_columns(A, b):
"""
Finds singleton columns for which the singleton entry is of the same sign
as the right-hand side; these columns are eligible for inclusion in an
initial basis. Determines the rows in which the singleton entries are
located. For each of these rows, returns the indices of the one singleton
column and its corresponding row.
"""
# find indices of all singleton columns and corresponding row indicies
column_indices = np.nonzero(np.sum(np.abs(A) != 0, axis=0) == 1)[0]
columns = A[:, column_indices] # array of singleton columns
row_indices = np.zeros(len(column_indices), dtype=int)
nonzero_rows, nonzero_columns = np.nonzero(columns)
row_indices[nonzero_columns] = nonzero_rows # corresponding row indicies
# keep only singletons with entries that have same sign as RHS
# this is necessary because all elements of BFS must be non-negative
same_sign = A[row_indices, column_indices]*b[row_indices] >= 0
column_indices = column_indices[same_sign][::-1]
row_indices = row_indices[same_sign][::-1]
# Reversing the order so that steps below select rightmost columns
# for initial basis, which will tend to be slack variables. (If the
# guess corresponds with a basic feasible solution but a constraint
# is not satisfied with the corresponding slack variable zero, the slack
# variable must be basic.)
# for each row, keep rightmost singleton column with an entry in that row
unique_row_indices, first_columns = np.unique(row_indices,
return_index=True)
return column_indices[first_columns], unique_row_indices
def _find_nonzero_rows(A, tol):
"""
Returns logical array indicating the locations of rows with at least
one nonzero element.
"""
return np.any(np.abs(A) > tol, axis=1)
def _select_enter_pivot(c_hat, bl, a, rule="bland", tol=1e-12):
"""
Selects a pivot to enter the basis. Currently Bland's rule - the smallest
index that has a negative reduced cost - is the default.
"""
if rule.lower() == "mrc": # index with minimum reduced cost
return a[~bl][np.argmin(c_hat)]
else: # smallest index w/ negative reduced cost
return a[~bl][c_hat < -tol][0]
def _display_iter(phase, iteration, slack, con, fun):
"""
Print indicators of optimization status to the console.
"""
header = True if not iteration % 20 else False
if header:
print("Phase",
"Iteration",
"Minimum Slack ",
"Constraint Residual",
"Objective ")
# :<X.Y left aligns Y digits in X digit spaces
fmt = '{0:<6}{1:<10}{2:<20.13}{3:<20.13}{4:<20.13}'
try:
slack = np.min(slack)
except ValueError:
slack = "NA"
print(fmt.format(phase, iteration, slack, np.linalg.norm(con), fun))
def _phase_two(c, A, x, b, callback, postsolve_args, maxiter, tol, disp,
maxupdate, mast, pivot, iteration=0, phase_one_n=None):
"""
The heart of the simplex method. Beginning with a basic feasible solution,
moves to adjacent basic feasible solutions successively lower reduced cost.
Terminates when there are no basic feasible solutions with lower reduced
cost or if the problem is determined to be unbounded.
This implementation follows the revised simplex method based on LU
decomposition. Rather than maintaining a tableau or an inverse of the
basis matrix, we keep a factorization of the basis matrix that allows
efficient solution of linear systems while avoiding stability issues
associated with inverted matrices.
"""
m, n = A.shape
status = 0
a = np.arange(n) # indices of columns of A
ab = np.arange(m) # indices of columns of B
if maxupdate:
# basis matrix factorization object; similar to B = A[:, b]
B = BGLU(A, b, maxupdate, mast)
else:
B = LU(A, b)
for iteration in range(iteration, iteration + maxiter):
if disp or callback is not None:
if phase_one_n is not None:
phase = 1
x_postsolve = x[:phase_one_n]
else:
phase = 2
x_postsolve = x
x_o, fun, slack, con, _, _ = _postsolve(x_postsolve,
postsolve_args,
tol=tol, copy=True)
if callback is not None:
res = OptimizeResult({'x': x_o, 'fun': fun, 'slack': slack,
'con': con, 'nit': iteration,
'phase': phase, 'complete': False,
'status': 0, 'message': "",
'success': False})
callback(res)
else:
_display_iter(phase, iteration, slack, con, fun)
bl = np.zeros(len(a), dtype=bool)
bl[b] = 1
xb = x[b] # basic variables
cb = c[b] # basic costs
try:
v = B.solve(cb, transposed=True) # similar to v = solve(B.T, cb)
except LinAlgError:
status = 4
break
# TODO: cythonize?
c_hat = c - v.dot(A) # reduced cost
c_hat = c_hat[~bl]
# Above is much faster than:
# N = A[:, ~bl] # slow!
# c_hat = c[~bl] - v.T.dot(N)
# Can we perform the multiplication only on the nonbasic columns?
if np.all(c_hat >= -tol): # all reduced costs positive -> terminate
break
j = _select_enter_pivot(c_hat, bl, a, rule=pivot, tol=tol)
u = B.solve(A[:, j]) # similar to u = solve(B, A[:, j])
i = u > tol # if none of the u are positive, unbounded
if not np.any(i):
status = 3
break
th = xb[i]/u[i]
l = np.argmin(th) # implicitly selects smallest subscript
th_star = th[l] # step size
x[b] = x[b] - th_star*u # take step
x[j] = th_star
B.update(ab[i][l], j) # modify basis
b = B.b # similar to b[ab[i][l]] = j
else:
status = 1
return x, b, status, iteration
def _linprog_rs(c, c0, A, b, x0, callback, postsolve_args,
maxiter=5000, tol=1e-12, disp=False,
maxupdate=10, mast=False, pivot="mrc",
**unknown_options):
"""
Solve the following linear programming problem via a two-phase
revised simplex algorithm.::
minimize: c @ x
subject to: A @ x == b
0 <= x < oo
Parameters
----------
c : 1-D array
Coefficients of the linear objective function to be minimized.
c0 : float
Constant term in objective function due to fixed (and eliminated)
variables. (Currently unused.)
A : 2-D array
2-D array which, when matrix-multiplied by ``x``, gives the values of
the equality constraints at ``x``.
b : 1-D array
1-D array of values representing the RHS of each equality constraint
(row) in ``A_eq``.
x0 : 1-D array, optional
Starting values of the independent variables, which will be refined by
the optimization algorithm. For the revised simplex method, these must
correspond with a basic feasible solution.
callback : callable, optional
If a callback function is provided, it will be called within each
iteration of the algorithm. The callback function must accept a single
`scipy.optimize.OptimizeResult` consisting of the following fields:
x : 1-D array
Current solution vector.
fun : float
Current value of the objective function ``c @ x``.
success : bool
True only when an algorithm has completed successfully,
so this is always False as the callback function is called
only while the algorithm is still iterating.
slack : 1-D array
The values of the slack variables. Each slack variable
corresponds to an inequality constraint. If the slack is zero,
the corresponding constraint is active.
con : 1-D array
The (nominally zero) residuals of the equality constraints,
that is, ``b - A_eq @ x``.
phase : int
The phase of the algorithm being executed.
status : int
For revised simplex, this is always 0 because if a different
status is detected, the algorithm terminates.
nit : int
The number of iterations performed.
message : str
A string descriptor of the exit status of the optimization.
postsolve_args : tuple
Data needed by _postsolve to convert the solution to the standard-form
problem into the solution to the original problem.
Options
-------
maxiter : int
The maximum number of iterations to perform in either phase.
tol : float
The tolerance which determines when a solution is "close enough" to
zero in Phase 1 to be considered a basic feasible solution or close
enough to positive to serve as an optimal solution.
disp : bool
Set to ``True`` if indicators of optimization status are to be printed
to the console each iteration.
maxupdate : int
The maximum number of updates performed on the LU factorization.
After this many updates is reached, the basis matrix is factorized
from scratch.
mast : bool
Minimize Amortized Solve Time. If enabled, the average time to solve
a linear system using the basis factorization is measured. Typically,
the average solve time will decrease with each successive solve after
initial factorization, as factorization takes much more time than the
solve operation (and updates). Eventually, however, the updated
factorization becomes sufficiently complex that the average solve time
begins to increase. When this is detected, the basis is refactorized
from scratch. Enable this option to maximize speed at the risk of
nondeterministic behavior. Ignored if ``maxupdate`` is 0.
pivot : "mrc" or "bland"
Pivot rule: Minimum Reduced Cost (default) or Bland's rule. Choose
Bland's rule if iteration limit is reached and cycling is suspected.
unknown_options : dict
Optional arguments not used by this particular solver. If
`unknown_options` is non-empty a warning is issued listing all
unused options.
Returns
-------
x : 1-D array
Solution vector.
status : int
An integer representing the exit status of the optimization::
0 : Optimization terminated successfully
1 : Iteration limit reached
2 : Problem appears to be infeasible
3 : Problem appears to be unbounded
4 : Numerical difficulties encountered
5 : No constraints; turn presolve on
6 : Guess x0 cannot be converted to a basic feasible solution
message : str
A string descriptor of the exit status of the optimization.
iteration : int
The number of iterations taken to solve the problem.
"""
_check_unknown_options(unknown_options)
messages = ["Optimization terminated successfully.",
"Iteration limit reached.",
"The problem appears infeasible, as the phase one auxiliary "
"problem terminated successfully with a residual of {0:.1e}, "
"greater than the tolerance {1} required for the solution to "
"be considered feasible. Consider increasing the tolerance to "
"be greater than {0:.1e}. If this tolerance is unnaceptably "
"large, the problem is likely infeasible.",
"The problem is unbounded, as the simplex algorithm found "
"a basic feasible solution from which there is a direction "
"with negative reduced cost in which all decision variables "
"increase.",
"Numerical difficulties encountered; consider trying "
"method='interior-point'.",
"Problems with no constraints are trivially solved; please "
"turn presolve on.",
"The guess x0 cannot be converted to a basic feasible "
"solution. "
]
if A.size == 0: # address test_unbounded_below_no_presolve_corrected
return np.zeros(c.shape), 5, messages[5], 0
x, basis, A, b, residual, status, iteration = (
_phase_one(A, b, x0, callback, postsolve_args,
maxiter, tol, disp, maxupdate, mast, pivot))
if status == 0:
x, basis, status, iteration = _phase_two(c, A, x, basis, callback,
postsolve_args,
maxiter, tol, disp,
maxupdate, mast, pivot,
iteration)
return x, status, messages[status].format(residual, tol), iteration
|
garbas/mozilla-releng-services
|
refs/heads/master
|
lib/backend_common/backend_common/cors.py
|
1
|
# -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import
import cli_common.log
import flask_cors
logger = cli_common.log.get_logger(__name__)
cors = flask_cors.CORS()
def init_app(app):
origins = app.config.get('CORS_ORIGINS', '*').split(' ')
cors.init_app(app, origins=origins)
return cors
|
halmd-org/h5md-tools
|
refs/heads/master
|
h5mdtools/_plot/__init__.py
|
1
|
__all__=["msv", "pdf", "ssf", "tcf", ]
|
priyom/priyomdb
|
refs/heads/master
|
PriyomHTTP/Server/Selectors/CatchDisconnect.py
|
1
|
# encoding=utf-8
"""
File name: CatchDisconnect.py
This file is part of: priyomdb
LICENSE
The contents of this file are subject to the Mozilla Public License
Version 1.1 (the "License"); you may not use this file except in
compliance with the License. You may obtain a copy of the License at
http://www.mozilla.org/MPL/
Software distributed under the License is distributed on an "AS IS"
basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
License for the specific language governing rights and limitations under
the License.
Alternatively, the contents of this file may be used under the terms of
the GNU General Public license (the "GPL License"), in which case the
provisions of GPL License are applicable instead of those above.
FEEDBACK & QUESTIONS
For feedback and questions about priyomdb please e-mail one of the
authors:
Jonas Wielicki <j.wielicki@sotecware.net>
"""
from WebStack.Generic import EndOfResponse, ContentType
from storm.exceptions import DisconnectionError
from _mysql_exceptions import ProgrammingError
class CatchDisconnectSelector(object):
maxNesting = 10
def __init__(self, resource, store):
self.resource = resource
self.store = store
self.nesting = 0
def catch(self, trans, error):
self.store.rollback()
trans.rollback()
self.nesting += 1
if self.nesting >= CatchDisconnectSelector.maxNesting:
trans.rollback()
trans.set_response_code(503)
trans.set_content_type(ContentType("text/plain", "utf-8"))
print >>trans.get_response_stream(), error.encode("utf-8")
else:
self.handle(trans)
def respond(self, trans):
self.nesting = 0
return self.handle(trans)
def handle(self, trans):
try:
return self.resource.respond(trans)
except DisconnectionError:
# In that case, we perform a reconnect, flush the cache,
# write a message into the errorlog and try to re-execute the
# resource.
print("[{0}]: Attempting to reconnect after receiving a DisconnectionError".format(type(self).__name__))
self.catch(trans, u"Every attempt to get a database connection working failed")
except ProgrammingError as e:
# This may be a commands out of sync. Lets check for that,
# otherwise we just pass this on
if "commands out of sync" in str(e).lower():
print("[{0}]: Caught a {1}".format(type(self).__name__, str(e)))
self.catch(trans, u"Keeping to receive a commands out of sync from mysql.")
else:
raise
|
csalom/puput
|
refs/heads/master
|
puput/migrations/0001_initial.py
|
2
|
from __future__ import unicode_literals
from django.db import models, migrations
import wagtail.core.fields
import puput.routes
import datetime
import django.db.models.deletion
import modelcluster.contrib.taggit
import modelcluster.fields
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0001_squashed_0016_change_page_url_path_to_text_field'),
('wagtailimages', '0006_add_verbose_names'),
]
operations = [
migrations.CreateModel(
name='BlogPage',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='wagtailcore.Page', on_delete=models.CASCADE)),
('description', models.CharField(help_text='The page description that will appear under the title.', max_length=255, verbose_name='Description', blank=True)),
('display_comments', models.BooleanField(default=False, verbose_name='Display comments')),
('display_categories', models.BooleanField(default=True, verbose_name='Display categories')),
('display_tags', models.BooleanField(default=True, verbose_name='Display tags')),
('display_popular_entries', models.BooleanField(default=True, verbose_name='Display popular entries')),
('display_last_entries', models.BooleanField(default=True, verbose_name='Display last entries')),
('display_archive', models.BooleanField(default=True, verbose_name='Display archive')),
('disqus_api_secret', models.TextField(blank=True)),
('disqus_shortname', models.CharField(max_length=128, blank=True)),
('num_entries_page', models.IntegerField(default=5, verbose_name='Entries per page')),
('num_last_entries', models.IntegerField(default=3, verbose_name='Last entries limit')),
('num_popular_entries', models.IntegerField(default=3, verbose_name='Popular entries limit')),
('num_tags_entry_header', models.IntegerField(default=5, verbose_name='Tags limit entry header')),
('header_image', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, verbose_name='Header image', blank=True, to='wagtailimages.Image', null=True)),
],
options={
'verbose_name': 'Blog',
},
bases=(puput.routes.BlogRoutes, 'wagtailcore.page'),
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=80, verbose_name='Category Name')),
('slug', models.SlugField(unique=True, max_length=80)),
('description', models.CharField(max_length=500, blank=True)),
('parent', models.ForeignKey(related_name='children', blank=True, to='puput.Category', null=True, on_delete=models.SET_NULL)),
],
options={
'ordering': ['name'],
'verbose_name': 'Category',
'verbose_name_plural': 'Categories',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='CategoryEntryPage',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('category', models.ForeignKey(related_name='+', verbose_name='Category', to='puput.Category', on_delete=models.CASCADE)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='EntryPage',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='wagtailcore.Page', on_delete=models.CASCADE)),
('body', wagtail.core.fields.RichTextField(verbose_name='body')),
('date', models.DateTimeField(default=datetime.datetime.today, verbose_name='Post date')),
('excerpt', wagtail.core.fields.RichTextField(help_text='Used to display on puput pages list. If this field is not filled, a truncate version of body text will be used.', verbose_name='excerpt', blank=True)),
('num_comments', models.IntegerField(default=0, editable=False)),
('categories', models.ManyToManyField(to='puput.Category', through='puput.CategoryEntryPage', blank=True)),
('header_image', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, verbose_name='Header image', blank=True, to='wagtailimages.Image', null=True)),
],
options={
'verbose_name': 'Entry',
'verbose_name_plural': 'Entries',
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='EntryPageRelated',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('entrypage_from', modelcluster.fields.ParentalKey(related_name='related_entrypage_from', verbose_name='Entry', to='puput.EntryPage')),
('entrypage_to', modelcluster.fields.ParentalKey(related_name='related_entrypage_to', verbose_name='Entry', to='puput.EntryPage')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='TagEntryPage',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('content_object', modelcluster.fields.ParentalKey(related_name='entry_tags', to='puput.EntryPage')),
('tag', models.ForeignKey(related_name='puput_tagentrypage_items', to='taggit.Tag', on_delete=models.CASCADE)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.AddField(
model_name='entrypage',
name='tags',
field=modelcluster.contrib.taggit.ClusterTaggableManager(to='taggit.Tag', through='puput.TagEntryPage', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'),
preserve_default=True,
),
migrations.AddField(
model_name='categoryentrypage',
name='page',
field=modelcluster.fields.ParentalKey(related_name='entry_categories', to='puput.EntryPage'),
preserve_default=True,
),
migrations.CreateModel(
name='Tag',
fields=[
],
options={
'proxy': True,
},
bases=('taggit.tag',),
),
]
|
vhaupert/mitmproxy
|
refs/heads/master
|
test/mitmproxy/net/test_wsgi.py
|
11
|
from io import BytesIO
import sys
from mitmproxy.net import wsgi
from mitmproxy.net.http import Headers
def tflow():
headers = Headers(test=b"value")
req = wsgi.Request("http", "GET", "/", "HTTP/1.1", headers, "")
return wsgi.Flow(("127.0.0.1", 8888), req)
class ExampleApp:
def __init__(self):
self.called = False
def __call__(self, environ, start_response):
self.called = True
status = '200 OK'
response_headers = [('Content-type', 'text/plain')]
start_response(status, response_headers)
return [b'Hello', b' world!\n']
class TestWSGI:
def test_make_environ(self):
w = wsgi.WSGIAdaptor(None, "foo", 80, "version")
tf = tflow()
assert w.make_environ(tf, None)
tf.request.path = "/foo?bar=voing"
r = w.make_environ(tf, None)
assert r["QUERY_STRING"] == "bar=voing"
def test_serve(self):
ta = ExampleApp()
w = wsgi.WSGIAdaptor(ta, "foo", 80, "version")
f = tflow()
f.request.host = "foo"
f.request.port = 80
wfile = BytesIO()
err = w.serve(f, wfile)
assert ta.called
assert not err
val = wfile.getvalue()
assert b"Hello world" in val
assert b"Server:" in val
def _serve(self, app):
w = wsgi.WSGIAdaptor(app, "foo", 80, "version")
f = tflow()
f.request.host = "foo"
f.request.port = 80
wfile = BytesIO()
w.serve(f, wfile)
return wfile.getvalue()
def test_serve_empty_body(self):
def app(environ, start_response):
status = '200 OK'
response_headers = [('Foo', 'bar')]
start_response(status, response_headers)
return []
assert self._serve(app)
def test_serve_double_start(self):
def app(environ, start_response):
try:
raise ValueError("foo")
except:
sys.exc_info()
status = '200 OK'
response_headers = [('Content-type', 'text/plain')]
start_response(status, response_headers)
start_response(status, response_headers)
assert b"Internal Server Error" in self._serve(app)
def test_serve_single_err(self):
def app(environ, start_response):
try:
raise ValueError("foo")
except:
ei = sys.exc_info()
status = '200 OK'
response_headers = [('Content-type', 'text/plain')]
start_response(status, response_headers, ei)
yield b""
assert b"Internal Server Error" in self._serve(app)
def test_serve_double_err(self):
def app(environ, start_response):
try:
raise ValueError("foo")
except:
ei = sys.exc_info()
status = '200 OK'
response_headers = [('Content-type', 'text/plain')]
start_response(status, response_headers)
yield b"aaa"
start_response(status, response_headers, ei)
yield b"bbb"
assert b"Internal Server Error" in self._serve(app)
|
LimitedCoin/LimitedCoin
|
refs/heads/master
|
share/qt/make_spinner.py
|
4415
|
#!/usr/bin/env python
# W.J. van der Laan, 2011
# Make spinning .mng animation from a .png
# Requires imagemagick 6.7+
from __future__ import division
from os import path
from PIL import Image
from subprocess import Popen
SRC='img/reload_scaled.png'
DST='../../src/qt/res/movies/update_spinner.mng'
TMPDIR='/tmp'
TMPNAME='tmp-%03i.png'
NUMFRAMES=35
FRAMERATE=10.0
CONVERT='convert'
CLOCKWISE=True
DSIZE=(16,16)
im_src = Image.open(SRC)
if CLOCKWISE:
im_src = im_src.transpose(Image.FLIP_LEFT_RIGHT)
def frame_to_filename(frame):
return path.join(TMPDIR, TMPNAME % frame)
frame_files = []
for frame in xrange(NUMFRAMES):
rotation = (frame + 0.5) / NUMFRAMES * 360.0
if CLOCKWISE:
rotation = -rotation
im_new = im_src.rotate(rotation, Image.BICUBIC)
im_new.thumbnail(DSIZE, Image.ANTIALIAS)
outfile = frame_to_filename(frame)
im_new.save(outfile, 'png')
frame_files.append(outfile)
p = Popen([CONVERT, "-delay", str(FRAMERATE), "-dispose", "2"] + frame_files + [DST])
p.communicate()
|
pkimber/compose
|
refs/heads/master
|
example_compose/dev_greg.py
|
13
|
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
from .base import *
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'temp.db', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
|
striglia/ipyimport
|
refs/heads/master
|
ipyimport/__init__.py
|
1
|
from gather import find_failed_imports
from gather import find_failed_imports_by_directory
_silence_pyflakes = [find_failed_imports, find_failed_imports_by_directory]
|
n0m4dz/odoo
|
refs/heads/8.0
|
openerp/report/print_fnc.py
|
458
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
functions = {
'today': lambda x: time.strftime('%d/%m/%Y', time.localtime()).decode('latin1')
}
#
# TODO: call an object internal function too
#
def print_fnc(fnc, arg):
if fnc in functions:
return functions[fnc](arg)
return ''
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
joelddiaz/openshift-tools
|
refs/heads/prod
|
openshift/installer/vendored/openshift-ansible-3.7.42-1/roles/openshift_health_checker/openshift_checks/ovs_version.py
|
6
|
"""
Ansible module for determining if an installed version of Open vSwitch is incompatible with the
currently installed version of OpenShift.
"""
from openshift_checks import OpenShiftCheck, OpenShiftCheckException
from openshift_checks.mixins import NotContainerizedMixin
class OvsVersion(NotContainerizedMixin, OpenShiftCheck):
"""Check that packages in a package_list are installed on the host
and are the correct version as determined by an OpenShift installation.
"""
name = "ovs_version"
tags = ["health"]
openshift_to_ovs_version = {
"3.7": ["2.6", "2.7", "2.8", "2.9"],
"3.6": ["2.6", "2.7", "2.8", "2.9"],
"3.5": ["2.6", "2.7"],
"3.4": "2.4",
}
def is_active(self):
"""Skip hosts that do not have package requirements."""
group_names = self.get_var("group_names", default=[])
master_or_node = 'oo_masters_to_config' in group_names or 'oo_nodes_to_config' in group_names
return super(OvsVersion, self).is_active() and master_or_node
def run(self):
args = {
"package_list": [
{
"name": "openvswitch",
"version": self.get_required_ovs_version(),
},
],
}
return self.execute_module("rpm_version", args)
def get_required_ovs_version(self):
"""Return the correct Open vSwitch version for the current OpenShift version"""
openshift_version_tuple = self.get_major_minor_version(self.get_var("openshift_image_tag"))
if openshift_version_tuple < (3, 5):
return self.openshift_to_ovs_version["3.4"]
openshift_version = ".".join(str(x) for x in openshift_version_tuple)
ovs_version = self.openshift_to_ovs_version.get(openshift_version)
if ovs_version:
return self.openshift_to_ovs_version[openshift_version]
msg = "There is no recommended version of Open vSwitch for the current version of OpenShift: {}"
raise OpenShiftCheckException(msg.format(openshift_version))
|
PaoloP74/Arduino
|
refs/heads/master
|
arduino-core/src/processing/app/i18n/python/requests/__init__.py
|
184
|
# -*- coding: utf-8 -*-
# __
# /__) _ _ _ _ _/ _
# / ( (- (/ (/ (- _) / _)
# /
"""
requests HTTP library
~~~~~~~~~~~~~~~~~~~~~
Requests is an HTTP library, written in Python, for human beings. Basic GET
usage:
>>> import requests
>>> r = requests.get('http://python.org')
>>> r.status_code
200
>>> 'Python is a programming language' in r.content
True
... or POST:
>>> payload = dict(key1='value1', key2='value2')
>>> r = requests.post("http://httpbin.org/post", data=payload)
>>> print r.text
{
...
"form": {
"key2": "value2",
"key1": "value1"
},
...
}
The other HTTP methods are supported - see `requests.api`. Full documentation
is at <http://python-requests.org>.
:copyright: (c) 2013 by Kenneth Reitz.
:license: Apache 2.0, see LICENSE for more details.
"""
__title__ = 'requests'
__version__ = '1.1.0'
__build__ = 0x010100
__author__ = 'Kenneth Reitz'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2013 Kenneth Reitz'
from . import utils
from .models import Request, Response, PreparedRequest
from .api import request, get, head, post, patch, put, delete, options
from .sessions import session, Session
from .status_codes import codes
from .exceptions import (
RequestException, Timeout, URLRequired,
TooManyRedirects, HTTPError, ConnectionError
)
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
|
saradbowman/osf.io
|
refs/heads/develop
|
addons/twofactor/__init__.py
|
32
|
default_app_config = 'addons.twofactor.apps.TwoFactorAddonAppConfig'
|
igemsoftware/SYSU-Software2013
|
refs/heads/master
|
project/Python27/Lib/site-packages/werkzeug/testsuite/wrappers.py
|
66
|
# -*- coding: utf-8 -*-
"""
werkzeug.testsuite.wrappers
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests for the response and request objects.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import unittest
import pickle
from StringIO import StringIO
from datetime import datetime
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug import wrappers
from werkzeug.datastructures import MultiDict, ImmutableOrderedMultiDict, \
ImmutableList, ImmutableTypeConversionDict, CharsetAccept, \
CombinedMultiDict
from werkzeug.test import Client, create_environ, run_wsgi_app
class RequestTestResponse(wrappers.BaseResponse):
"""Subclass of the normal response class we use to test response
and base classes. Has some methods to test if things in the
response match.
"""
def __init__(self, response, status, headers):
wrappers.BaseResponse.__init__(self, response, status, headers)
self.body_data = pickle.loads(self.data)
def __getitem__(self, key):
return self.body_data[key]
def request_demo_app(environ, start_response):
request = wrappers.BaseRequest(environ)
assert 'werkzeug.request' in environ
start_response('200 OK', [('Content-Type', 'text/plain')])
return [pickle.dumps({
'args': request.args,
'args_as_list': request.args.lists(),
'form': request.form,
'form_as_list': request.form.lists(),
'environ': prepare_environ_pickle(request.environ),
'data': request.data
})]
def prepare_environ_pickle(environ):
result = {}
for key, value in environ.iteritems():
try:
pickle.dumps((key, value))
except Exception:
continue
result[key] = value
return result
class WrappersTestCase(WerkzeugTestCase):
def assert_environ(self, environ, method):
assert environ['REQUEST_METHOD'] == method
assert environ['PATH_INFO'] == '/'
assert environ['SCRIPT_NAME'] == ''
assert environ['SERVER_NAME'] == 'localhost'
assert environ['wsgi.version'] == (1, 0)
assert environ['wsgi.url_scheme'] == 'http'
def test_base_request(self):
client = Client(request_demo_app, RequestTestResponse)
# get requests
response = client.get('/?foo=bar&foo=hehe')
assert response['args'] == MultiDict([('foo', 'bar'), ('foo', 'hehe')])
assert response['args_as_list'] == [('foo', ['bar', 'hehe'])]
assert response['form'] == MultiDict()
assert response['form_as_list'] == []
assert response['data'] == ''
self.assert_environ(response['environ'], 'GET')
# post requests with form data
response = client.post('/?blub=blah', data='foo=blub+hehe&blah=42',
content_type='application/x-www-form-urlencoded')
assert response['args'] == MultiDict([('blub', 'blah')])
assert response['args_as_list'] == [('blub', ['blah'])]
assert response['form'] == MultiDict([('foo', 'blub hehe'), ('blah', '42')])
assert response['data'] == ''
# currently we do not guarantee that the values are ordered correctly
# for post data.
## assert response['form_as_list'] == [('foo', ['blub hehe']), ('blah', ['42'])]
self.assert_environ(response['environ'], 'POST')
# patch requests with form data
response = client.patch('/?blub=blah', data='foo=blub+hehe&blah=42',
content_type='application/x-www-form-urlencoded')
assert response['args'] == MultiDict([('blub', 'blah')])
assert response['args_as_list'] == [('blub', ['blah'])]
assert response['form'] == MultiDict([('foo', 'blub hehe'), ('blah', '42')])
assert response['data'] == ''
self.assert_environ(response['environ'], 'PATCH')
# post requests with json data
json = '{"foo": "bar", "blub": "blah"}'
response = client.post('/?a=b', data=json, content_type='application/json')
assert response['data'] == json
assert response['args'] == MultiDict([('a', 'b')])
assert response['form'] == MultiDict()
def test_access_route(self):
req = wrappers.Request.from_values(headers={
'X-Forwarded-For': '192.168.1.2, 192.168.1.1'
})
req.environ['REMOTE_ADDR'] = '192.168.1.3'
assert req.access_route == ['192.168.1.2', '192.168.1.1']
assert req.remote_addr == '192.168.1.3'
req = wrappers.Request.from_values()
req.environ['REMOTE_ADDR'] = '192.168.1.3'
assert req.access_route == ['192.168.1.3']
def test_url_request_descriptors(self):
req = wrappers.Request.from_values('/bar?foo=baz', 'http://example.com/test')
assert req.path == u'/bar'
assert req.script_root == u'/test'
assert req.url == 'http://example.com/test/bar?foo=baz'
assert req.base_url == 'http://example.com/test/bar'
assert req.url_root == 'http://example.com/test/'
assert req.host_url == 'http://example.com/'
assert req.host == 'example.com'
assert req.scheme == 'http'
req = wrappers.Request.from_values('/bar?foo=baz', 'https://example.com/test')
assert req.scheme == 'https'
def test_authorization_mixin(self):
request = wrappers.Request.from_values(headers={
'Authorization': 'Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ=='
})
a = request.authorization
assert a.type == 'basic'
assert a.username == 'Aladdin'
assert a.password == 'open sesame'
def test_base_response(self):
# unicode
response = wrappers.BaseResponse(u'öäü')
assert response.data == 'öäü'
# writing
response = wrappers.Response('foo')
response.stream.write('bar')
assert response.data == 'foobar'
# set cookie
response = wrappers.BaseResponse()
response.set_cookie('foo', 'bar', 60, 0, '/blub', 'example.org', False)
assert response.headers.to_list() == [
('Content-Type', 'text/plain; charset=utf-8'),
('Set-Cookie', 'foo=bar; Domain=example.org; expires=Thu, '
'01-Jan-1970 00:00:00 GMT; Max-Age=60; Path=/blub')
]
# delete cookie
response = wrappers.BaseResponse()
response.delete_cookie('foo')
assert response.headers.to_list() == [
('Content-Type', 'text/plain; charset=utf-8'),
('Set-Cookie', 'foo=; expires=Thu, 01-Jan-1970 00:00:00 GMT; Max-Age=0; Path=/')
]
# close call forwarding
closed = []
class Iterable(object):
def next(self):
raise StopIteration()
def __iter__(self):
return self
def close(self):
closed.append(True)
response = wrappers.BaseResponse(Iterable())
response.call_on_close(lambda: closed.append(True))
app_iter, status, headers = run_wsgi_app(response,
create_environ(),
buffered=True)
assert status == '200 OK'
assert ''.join(app_iter) == ''
assert len(closed) == 2
def test_response_status_codes(self):
response = wrappers.BaseResponse()
response.status_code = 404
assert response.status == '404 NOT FOUND'
response.status = '200 OK'
assert response.status_code == 200
response.status = '999 WTF'
assert response.status_code == 999
response.status_code = 588
assert response.status_code == 588
assert response.status == '588 UNKNOWN'
response.status = 'wtf'
assert response.status_code == 0
def test_type_forcing(self):
def wsgi_application(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html')])
return ['Hello World!']
base_response = wrappers.BaseResponse('Hello World!', content_type='text/html')
class SpecialResponse(wrappers.Response):
def foo(self):
return 42
# good enough for this simple application, but don't ever use that in
# real world examples!
fake_env = {}
for orig_resp in wsgi_application, base_response:
response = SpecialResponse.force_type(orig_resp, fake_env)
assert response.__class__ is SpecialResponse
assert response.foo() == 42
assert response.data == 'Hello World!'
assert response.content_type == 'text/html'
# without env, no arbitrary conversion
self.assert_raises(TypeError, SpecialResponse.force_type, wsgi_application)
def test_accept_mixin(self):
request = wrappers.Request({
'HTTP_ACCEPT': 'text/xml,application/xml,application/xhtml+xml,'
'text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.5',
'HTTP_ACCEPT_CHARSET': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
'HTTP_ACCEPT_ENCODING': 'gzip,deflate',
'HTTP_ACCEPT_LANGUAGE': 'en-us,en;q=0.5'
})
assert request.accept_mimetypes == CharsetAccept([
('text/xml', 1), ('image/png', 1), ('application/xml', 1),
('application/xhtml+xml', 1), ('text/html', 0.9),
('text/plain', 0.8), ('*/*', 0.5)
])
assert request.accept_charsets == CharsetAccept([
('ISO-8859-1', 1), ('utf-8', 0.7), ('*', 0.7)
])
assert request.accept_encodings == CharsetAccept([('gzip', 1), ('deflate', 1)])
assert request.accept_languages == CharsetAccept([('en-us', 1), ('en', 0.5)])
request = wrappers.Request({'HTTP_ACCEPT': ''})
assert request.accept_mimetypes == CharsetAccept()
def test_etag_request_mixin(self):
request = wrappers.Request({
'HTTP_CACHE_CONTROL': 'no-store, no-cache',
'HTTP_IF_MATCH': 'w/"foo", bar, "baz"',
'HTTP_IF_NONE_MATCH': 'w/"foo", bar, "baz"',
'HTTP_IF_MODIFIED_SINCE': 'Tue, 22 Jan 2008 11:18:44 GMT',
'HTTP_IF_UNMODIFIED_SINCE': 'Tue, 22 Jan 2008 11:18:44 GMT'
})
assert request.cache_control.no_store
assert request.cache_control.no_cache
for etags in request.if_match, request.if_none_match:
assert etags('bar')
assert etags.contains_raw('w/"foo"')
assert etags.contains_weak('foo')
assert not etags.contains('foo')
assert request.if_modified_since == datetime(2008, 1, 22, 11, 18, 44)
assert request.if_unmodified_since == datetime(2008, 1, 22, 11, 18, 44)
def test_user_agent_mixin(self):
user_agents = [
('Mozilla/5.0 (Macintosh; U; Intel Mac OS X; en-US; rv:1.8.1.11) '
'Gecko/20071127 Firefox/2.0.0.11', 'firefox', 'macos', '2.0.0.11',
'en-US'),
('Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; de-DE) Opera 8.54',
'opera', 'windows', '8.54', 'de-DE'),
('Mozilla/5.0 (iPhone; U; CPU like Mac OS X; en) AppleWebKit/420 '
'(KHTML, like Gecko) Version/3.0 Mobile/1A543a Safari/419.3',
'safari', 'iphone', '419.3', 'en'),
('Bot Googlebot/2.1 ( http://www.googlebot.com/bot.html)',
'google', None, '2.1', None)
]
for ua, browser, platform, version, lang in user_agents:
request = wrappers.Request({'HTTP_USER_AGENT': ua})
assert request.user_agent.browser == browser
assert request.user_agent.platform == platform
assert request.user_agent.version == version
assert request.user_agent.language == lang
assert bool(request.user_agent)
assert request.user_agent.to_header() == ua
assert str(request.user_agent) == ua
request = wrappers.Request({'HTTP_USER_AGENT': 'foo'})
assert not request.user_agent
def test_etag_response_mixin(self):
response = wrappers.Response('Hello World')
assert response.get_etag() == (None, None)
response.add_etag()
assert response.get_etag() == ('b10a8db164e0754105b7a99be72e3fe5', False)
assert not response.cache_control
response.cache_control.must_revalidate = True
response.cache_control.max_age = 60
response.headers['Content-Length'] = len(response.data)
assert response.headers['Cache-Control'] == 'must-revalidate, max-age=60'
assert 'date' not in response.headers
env = create_environ()
env.update({
'REQUEST_METHOD': 'GET',
'HTTP_IF_NONE_MATCH': response.get_etag()[0]
})
response.make_conditional(env)
assert 'date' in response.headers
# after the thing is invoked by the server as wsgi application
# (we're emulating this here), there must not be any entity
# headers left and the status code would have to be 304
resp = wrappers.Response.from_app(response, env)
assert resp.status_code == 304
assert not 'content-length' in resp.headers
# make sure date is not overriden
response = wrappers.Response('Hello World')
response.date = 1337
d = response.date
response.make_conditional(env)
assert response.date == d
# make sure content length is only set if missing
response = wrappers.Response('Hello World')
response.content_length = 999
response.make_conditional(env)
self.assert_equal(response.content_length, 999)
def test_etag_response_mixin_freezing(self):
class WithFreeze(wrappers.ETagResponseMixin, wrappers.BaseResponse):
pass
class WithoutFreeze(wrappers.BaseResponse, wrappers.ETagResponseMixin):
pass
response = WithFreeze('Hello World')
response.freeze()
assert response.get_etag() == (wrappers.generate_etag('Hello World'), False)
response = WithoutFreeze('Hello World')
response.freeze()
assert response.get_etag() == (None, None)
response = wrappers.Response('Hello World')
response.freeze()
assert response.get_etag() == (None, None)
def test_authenticate_mixin(self):
resp = wrappers.Response()
resp.www_authenticate.type = 'basic'
resp.www_authenticate.realm = 'Testing'
assert resp.headers['WWW-Authenticate'] == 'Basic realm="Testing"'
resp.www_authenticate.realm = None
resp.www_authenticate.type = None
assert 'WWW-Authenticate' not in resp.headers
def test_response_stream_mixin(self):
response = wrappers.Response()
response.stream.write('Hello ')
response.stream.write('World!')
assert response.response == ['Hello ', 'World!']
assert response.data == 'Hello World!'
def test_common_response_descriptors_mixin(self):
response = wrappers.Response()
response.mimetype = 'text/html'
assert response.mimetype == 'text/html'
assert response.content_type == 'text/html; charset=utf-8'
assert response.mimetype_params == {'charset': 'utf-8'}
response.mimetype_params['x-foo'] = 'yep'
del response.mimetype_params['charset']
assert response.content_type == 'text/html; x-foo=yep'
now = datetime.utcnow().replace(microsecond=0)
assert response.content_length is None
response.content_length = '42'
assert response.content_length == 42
for attr in 'date', 'age', 'expires':
assert getattr(response, attr) is None
setattr(response, attr, now)
assert getattr(response, attr) == now
assert response.retry_after is None
response.retry_after = now
assert response.retry_after == now
assert not response.vary
response.vary.add('Cookie')
response.vary.add('Content-Language')
assert 'cookie' in response.vary
assert response.vary.to_header() == 'Cookie, Content-Language'
response.headers['Vary'] = 'Content-Encoding'
assert response.vary.as_set() == set(['content-encoding'])
response.allow.update(['GET', 'POST'])
assert response.headers['Allow'] == 'GET, POST'
response.content_language.add('en-US')
response.content_language.add('fr')
assert response.headers['Content-Language'] == 'en-US, fr'
def test_common_request_descriptors_mixin(self):
request = wrappers.Request.from_values(content_type='text/html; charset=utf-8',
content_length='23',
headers={
'Referer': 'http://www.example.com/',
'Date': 'Sat, 28 Feb 2009 19:04:35 GMT',
'Max-Forwards': '10',
'Pragma': 'no-cache'
})
assert request.content_type == 'text/html; charset=utf-8'
assert request.mimetype == 'text/html'
assert request.mimetype_params == {'charset': 'utf-8'}
assert request.content_length == 23
assert request.referrer == 'http://www.example.com/'
assert request.date == datetime(2009, 2, 28, 19, 4, 35)
assert request.max_forwards == 10
assert 'no-cache' in request.pragma
def test_shallow_mode(self):
"""Request object shallow mode"""
request = wrappers.Request({'QUERY_STRING': 'foo=bar'}, shallow=True)
assert request.args['foo'] == 'bar'
self.assert_raises(RuntimeError, lambda: request.form['foo'])
def test_form_parsing_failed(self):
data = (
'--blah\r\n'
)
data = wrappers.Request.from_values(input_stream=StringIO(data),
content_length=len(data),
content_type='multipart/form-data; boundary=foo',
method='POST')
assert not data.files
assert not data.form
def test_url_charset_reflection(self):
req = wrappers.Request.from_values()
req.charset = 'utf-7'
assert req.url_charset == 'utf-7'
def test_response_streamed(self):
r = wrappers.Response()
assert not r.is_streamed
r = wrappers.Response("Hello World")
assert not r.is_streamed
r = wrappers.Response(["foo", "bar"])
assert not r.is_streamed
def gen():
if 0:
yield None
r = wrappers.Response(gen())
assert r.is_streamed
def test_response_freeze(self):
def generate():
yield "foo"
yield "bar"
resp = wrappers.Response(generate())
resp.freeze()
assert resp.response == ['foo', 'bar']
assert resp.headers['content-length'] == '6'
def test_other_method_payload(self):
data = 'Hello World'
req = wrappers.Request.from_values(input_stream=StringIO(data),
content_length=len(data),
content_type='text/plain',
method='WHAT_THE_FUCK')
assert req.data == data
assert isinstance(req.stream, wrappers.LimitedStream)
def test_urlfication(self):
resp = wrappers.Response()
resp.headers['Location'] = u'http://üser:pässword@☃.net/påth'
resp.headers['Content-Location'] = u'http://☃.net/'
headers = resp.get_wsgi_headers(create_environ())
assert headers['location'] == \
'http://%C3%BCser:p%C3%A4ssword@xn--n3h.net/p%C3%A5th'
assert headers['content-location'] == 'http://xn--n3h.net/'
def test_new_response_iterator_behavior(self):
req = wrappers.Request.from_values()
resp = wrappers.Response(u'Hello Wörld!')
def get_content_length(resp):
headers = wrappers.Headers.linked(resp.get_wsgi_headers(req.environ))
return headers.get('content-length', type=int)
def generate_items():
yield "Hello "
yield u"Wörld!"
# werkzeug encodes when set to `data` now, which happens
# if a string is passed to the response object.
assert resp.response == [u'Hello Wörld!'.encode('utf-8')]
assert resp.data == u'Hello Wörld!'.encode('utf-8')
assert get_content_length(resp) == 13
assert not resp.is_streamed
assert resp.is_sequence
# try the same for manual assignment
resp.data = u'Wörd'
assert resp.response == [u'Wörd'.encode('utf-8')]
assert resp.data == u'Wörd'.encode('utf-8')
assert get_content_length(resp) == 5
assert not resp.is_streamed
assert resp.is_sequence
# automatic generator sequence conversion
resp.response = generate_items()
assert resp.is_streamed
assert not resp.is_sequence
assert resp.data == u'Hello Wörld!'.encode('utf-8')
assert resp.response == ['Hello ', u'Wörld!'.encode('utf-8')]
assert not resp.is_streamed
assert resp.is_sequence
# automatic generator sequence conversion
resp.response = generate_items()
resp.implicit_sequence_conversion = False
assert resp.is_streamed
assert not resp.is_sequence
self.assert_raises(RuntimeError, lambda: resp.data)
resp.make_sequence()
assert resp.data == u'Hello Wörld!'.encode('utf-8')
assert resp.response == ['Hello ', u'Wörld!'.encode('utf-8')]
assert not resp.is_streamed
assert resp.is_sequence
# stream makes it a list no matter how the conversion is set
for val in True, False:
resp.implicit_sequence_conversion = val
resp.response = ("foo", "bar")
assert resp.is_sequence
resp.stream.write('baz')
assert resp.response == ['foo', 'bar', 'baz']
def test_form_data_ordering(self):
class MyRequest(wrappers.Request):
parameter_storage_class = ImmutableOrderedMultiDict
req = MyRequest.from_values('/?foo=1&bar=0&foo=3')
assert list(req.args) == ['foo', 'bar']
assert req.args.items(multi=True) == [
('foo', '1'),
('bar', '0'),
('foo', '3')
]
assert isinstance(req.args, ImmutableOrderedMultiDict)
assert isinstance(req.values, CombinedMultiDict)
assert req.values['foo'] == '1'
assert req.values.getlist('foo') == ['1', '3']
def test_storage_classes(self):
class MyRequest(wrappers.Request):
dict_storage_class = dict
list_storage_class = list
parameter_storage_class = dict
req = MyRequest.from_values('/?foo=baz', headers={
'Cookie': 'foo=bar'
})
assert type(req.cookies) is dict
assert req.cookies == {'foo': 'bar'}
assert type(req.access_route) is list
assert type(req.args) is dict
assert type(req.values) is CombinedMultiDict
assert req.values['foo'] == 'baz'
req = wrappers.Request.from_values(headers={
'Cookie': 'foo=bar'
})
assert type(req.cookies) is ImmutableTypeConversionDict
assert req.cookies == {'foo': 'bar'}
assert type(req.access_route) is ImmutableList
MyRequest.list_storage_class = tuple
req = MyRequest.from_values()
assert type(req.access_route) is tuple
def test_response_headers_passthrough(self):
headers = wrappers.Headers()
resp = wrappers.Response(headers=headers)
assert resp.headers is headers
def test_response_304_no_content_length(self):
resp = wrappers.Response('Test', status=304)
env = create_environ()
assert 'content-length' not in resp.get_wsgi_headers(env)
def test_ranges(self):
# basic range stuff
req = wrappers.Request.from_values()
assert req.range is None
req = wrappers.Request.from_values(headers={'Range': 'bytes=0-499'})
assert req.range.ranges == [(0, 500)]
resp = wrappers.Response()
resp.content_range = req.range.make_content_range(1000)
assert resp.content_range.units == 'bytes'
assert resp.content_range.start == 0
assert resp.content_range.stop == 500
assert resp.content_range.length == 1000
assert resp.headers['Content-Range'] == 'bytes 0-499/1000'
resp.content_range.unset()
assert 'Content-Range' not in resp.headers
resp.headers['Content-Range'] = 'bytes 0-499/1000'
assert resp.content_range.units == 'bytes'
assert resp.content_range.start == 0
assert resp.content_range.stop == 500
assert resp.content_range.length == 1000
def test_auto_content_length(self):
resp = wrappers.Response('Hello World!')
assert resp.content_length == 12
resp = wrappers.Response(['Hello World!'])
assert resp.content_length is None
assert resp.get_wsgi_headers({})['Content-Length'] == '12'
def test_disabled_auto_content_length(self):
class MyResponse(wrappers.Response):
automatically_set_content_length = False
resp = MyResponse('Hello World!')
self.assert_(resp.content_length is None)
resp = MyResponse(['Hello World!'])
self.assert_(resp.content_length is None)
self.assert_('Content-Length' not in resp.get_wsgi_headers({}))
def test_location_header_autocorrect(self):
env = create_environ()
class MyResponse(wrappers.Response):
autocorrect_location_header = False
resp = MyResponse('Hello World!')
resp.headers['Location'] = '/test'
self.assert_equal(resp.get_wsgi_headers(env)['Location'], '/test')
resp = wrappers.Response('Hello World!')
resp.headers['Location'] = '/test'
self.assert_equal(resp.get_wsgi_headers(env)['Location'], 'http://localhost/test')
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(WrappersTestCase))
return suite
|
achernakov/flugegeheimen
|
refs/heads/master
|
vendor/gtest/test/gtest_break_on_failure_unittest.py
|
2140
|
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's break-on-failure mode.
A user can ask Google Test to seg-fault when an assertion fails, using
either the GTEST_BREAK_ON_FAILURE environment variable or the
--gtest_break_on_failure flag. This script tests such functionality
by invoking gtest_break_on_failure_unittest_ (a program written with
Google Test) with different environments and command line flags.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import gtest_test_utils
import os
import sys
# Constants.
IS_WINDOWS = os.name == 'nt'
# The environment variable for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_ENV_VAR = 'GTEST_BREAK_ON_FAILURE'
# The command line flag for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_FLAG = 'gtest_break_on_failure'
# The environment variable for enabling/disabling the throw-on-failure mode.
THROW_ON_FAILURE_ENV_VAR = 'GTEST_THROW_ON_FAILURE'
# The environment variable for enabling/disabling the catch-exceptions mode.
CATCH_EXCEPTIONS_ENV_VAR = 'GTEST_CATCH_EXCEPTIONS'
# Path to the gtest_break_on_failure_unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_break_on_failure_unittest_')
environ = gtest_test_utils.environ
SetEnvVar = gtest_test_utils.SetEnvVar
# Tests in this file run a Google-Test-based test program and expect it
# to terminate prematurely. Therefore they are incompatible with
# the premature-exit-file protocol by design. Unset the
# premature-exit filepath to prevent Google Test from creating
# the file.
SetEnvVar(gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None)
def Run(command):
"""Runs a command; returns 1 if it was killed by a signal, or 0 otherwise."""
p = gtest_test_utils.Subprocess(command, env=environ)
if p.terminated_by_signal:
return 1
else:
return 0
# The tests.
class GTestBreakOnFailureUnitTest(gtest_test_utils.TestCase):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable or
the --gtest_break_on_failure flag to turn assertion failures into
segmentation faults.
"""
def RunAndVerify(self, env_var_value, flag_value, expect_seg_fault):
"""Runs gtest_break_on_failure_unittest_ and verifies that it does
(or does not) have a seg-fault.
Args:
env_var_value: value of the GTEST_BREAK_ON_FAILURE environment
variable; None if the variable should be unset.
flag_value: value of the --gtest_break_on_failure flag;
None if the flag should not be present.
expect_seg_fault: 1 if the program is expected to generate a seg-fault;
0 otherwise.
"""
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, env_var_value)
if env_var_value is None:
env_var_value_msg = ' is not set'
else:
env_var_value_msg = '=' + env_var_value
if flag_value is None:
flag = ''
elif flag_value == '0':
flag = '--%s=0' % BREAK_ON_FAILURE_FLAG
else:
flag = '--%s' % BREAK_ON_FAILURE_FLAG
command = [EXE_PATH]
if flag:
command.append(flag)
if expect_seg_fault:
should_or_not = 'should'
else:
should_or_not = 'should not'
has_seg_fault = Run(command)
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, None)
msg = ('when %s%s, an assertion failure in "%s" %s cause a seg-fault.' %
(BREAK_ON_FAILURE_ENV_VAR, env_var_value_msg, ' '.join(command),
should_or_not))
self.assert_(has_seg_fault == expect_seg_fault, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(env_var_value=None,
flag_value=None,
expect_seg_fault=0)
def testEnvVar(self):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value=None,
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value=None,
expect_seg_fault=1)
def testFlag(self):
"""Tests using the --gtest_break_on_failure flag."""
self.RunAndVerify(env_var_value=None,
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
def testFlagOverridesEnvVar(self):
"""Tests that the flag overrides the environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='0',
flag_value='1',
expect_seg_fault=1)
self.RunAndVerify(env_var_value='1',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
def testBreakOnFailureOverridesThrowOnFailure(self):
"""Tests that gtest_break_on_failure overrides gtest_throw_on_failure."""
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, None)
if IS_WINDOWS:
def testCatchExceptionsDoesNotInterfere(self):
"""Tests that gtest_catch_exceptions doesn't interfere."""
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, None)
if __name__ == '__main__':
gtest_test_utils.Main()
|
aabbox/kbengine
|
refs/heads/master
|
kbe/res/scripts/common/Lib/encodings/cp863.py
|
272
|
""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP863.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp863',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00b6, # PILCROW SIGN
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x2017, # DOUBLE LOW LINE
0x008e: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE
0x008f: 0x00a7, # SECTION SIGN
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE
0x0092: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x0095: 0x00cf, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x0098: 0x00a4, # CURRENCY SIGN
0x0099: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00a2, # CENT SIGN
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE
0x009e: 0x00db, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00a0: 0x00a6, # BROKEN BAR
0x00a1: 0x00b4, # ACUTE ACCENT
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00a8, # DIAERESIS
0x00a5: 0x00b8, # CEDILLA
0x00a6: 0x00b3, # SUPERSCRIPT THREE
0x00a7: 0x00af, # MACRON
0x00a8: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00a9: 0x2310, # REVERSED NOT SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00be, # VULGAR FRACTION THREE QUARTERS
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00e3: 0x03c0, # GREEK SMALL LETTER PI
0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
0x00ec: 0x221e, # INFINITY
0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00ef: 0x2229, # INTERSECTION
0x00f0: 0x2261, # IDENTICAL TO
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x2320, # TOP HALF INTEGRAL
0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
'\x00' # 0x0000 -> NULL
'\x01' # 0x0001 -> START OF HEADING
'\x02' # 0x0002 -> START OF TEXT
'\x03' # 0x0003 -> END OF TEXT
'\x04' # 0x0004 -> END OF TRANSMISSION
'\x05' # 0x0005 -> ENQUIRY
'\x06' # 0x0006 -> ACKNOWLEDGE
'\x07' # 0x0007 -> BELL
'\x08' # 0x0008 -> BACKSPACE
'\t' # 0x0009 -> HORIZONTAL TABULATION
'\n' # 0x000a -> LINE FEED
'\x0b' # 0x000b -> VERTICAL TABULATION
'\x0c' # 0x000c -> FORM FEED
'\r' # 0x000d -> CARRIAGE RETURN
'\x0e' # 0x000e -> SHIFT OUT
'\x0f' # 0x000f -> SHIFT IN
'\x10' # 0x0010 -> DATA LINK ESCAPE
'\x11' # 0x0011 -> DEVICE CONTROL ONE
'\x12' # 0x0012 -> DEVICE CONTROL TWO
'\x13' # 0x0013 -> DEVICE CONTROL THREE
'\x14' # 0x0014 -> DEVICE CONTROL FOUR
'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x0016 -> SYNCHRONOUS IDLE
'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
'\x18' # 0x0018 -> CANCEL
'\x19' # 0x0019 -> END OF MEDIUM
'\x1a' # 0x001a -> SUBSTITUTE
'\x1b' # 0x001b -> ESCAPE
'\x1c' # 0x001c -> FILE SEPARATOR
'\x1d' # 0x001d -> GROUP SEPARATOR
'\x1e' # 0x001e -> RECORD SEPARATOR
'\x1f' # 0x001f -> UNIT SEPARATOR
' ' # 0x0020 -> SPACE
'!' # 0x0021 -> EXCLAMATION MARK
'"' # 0x0022 -> QUOTATION MARK
'#' # 0x0023 -> NUMBER SIGN
'$' # 0x0024 -> DOLLAR SIGN
'%' # 0x0025 -> PERCENT SIGN
'&' # 0x0026 -> AMPERSAND
"'" # 0x0027 -> APOSTROPHE
'(' # 0x0028 -> LEFT PARENTHESIS
')' # 0x0029 -> RIGHT PARENTHESIS
'*' # 0x002a -> ASTERISK
'+' # 0x002b -> PLUS SIGN
',' # 0x002c -> COMMA
'-' # 0x002d -> HYPHEN-MINUS
'.' # 0x002e -> FULL STOP
'/' # 0x002f -> SOLIDUS
'0' # 0x0030 -> DIGIT ZERO
'1' # 0x0031 -> DIGIT ONE
'2' # 0x0032 -> DIGIT TWO
'3' # 0x0033 -> DIGIT THREE
'4' # 0x0034 -> DIGIT FOUR
'5' # 0x0035 -> DIGIT FIVE
'6' # 0x0036 -> DIGIT SIX
'7' # 0x0037 -> DIGIT SEVEN
'8' # 0x0038 -> DIGIT EIGHT
'9' # 0x0039 -> DIGIT NINE
':' # 0x003a -> COLON
';' # 0x003b -> SEMICOLON
'<' # 0x003c -> LESS-THAN SIGN
'=' # 0x003d -> EQUALS SIGN
'>' # 0x003e -> GREATER-THAN SIGN
'?' # 0x003f -> QUESTION MARK
'@' # 0x0040 -> COMMERCIAL AT
'A' # 0x0041 -> LATIN CAPITAL LETTER A
'B' # 0x0042 -> LATIN CAPITAL LETTER B
'C' # 0x0043 -> LATIN CAPITAL LETTER C
'D' # 0x0044 -> LATIN CAPITAL LETTER D
'E' # 0x0045 -> LATIN CAPITAL LETTER E
'F' # 0x0046 -> LATIN CAPITAL LETTER F
'G' # 0x0047 -> LATIN CAPITAL LETTER G
'H' # 0x0048 -> LATIN CAPITAL LETTER H
'I' # 0x0049 -> LATIN CAPITAL LETTER I
'J' # 0x004a -> LATIN CAPITAL LETTER J
'K' # 0x004b -> LATIN CAPITAL LETTER K
'L' # 0x004c -> LATIN CAPITAL LETTER L
'M' # 0x004d -> LATIN CAPITAL LETTER M
'N' # 0x004e -> LATIN CAPITAL LETTER N
'O' # 0x004f -> LATIN CAPITAL LETTER O
'P' # 0x0050 -> LATIN CAPITAL LETTER P
'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
'R' # 0x0052 -> LATIN CAPITAL LETTER R
'S' # 0x0053 -> LATIN CAPITAL LETTER S
'T' # 0x0054 -> LATIN CAPITAL LETTER T
'U' # 0x0055 -> LATIN CAPITAL LETTER U
'V' # 0x0056 -> LATIN CAPITAL LETTER V
'W' # 0x0057 -> LATIN CAPITAL LETTER W
'X' # 0x0058 -> LATIN CAPITAL LETTER X
'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
'Z' # 0x005a -> LATIN CAPITAL LETTER Z
'[' # 0x005b -> LEFT SQUARE BRACKET
'\\' # 0x005c -> REVERSE SOLIDUS
']' # 0x005d -> RIGHT SQUARE BRACKET
'^' # 0x005e -> CIRCUMFLEX ACCENT
'_' # 0x005f -> LOW LINE
'`' # 0x0060 -> GRAVE ACCENT
'a' # 0x0061 -> LATIN SMALL LETTER A
'b' # 0x0062 -> LATIN SMALL LETTER B
'c' # 0x0063 -> LATIN SMALL LETTER C
'd' # 0x0064 -> LATIN SMALL LETTER D
'e' # 0x0065 -> LATIN SMALL LETTER E
'f' # 0x0066 -> LATIN SMALL LETTER F
'g' # 0x0067 -> LATIN SMALL LETTER G
'h' # 0x0068 -> LATIN SMALL LETTER H
'i' # 0x0069 -> LATIN SMALL LETTER I
'j' # 0x006a -> LATIN SMALL LETTER J
'k' # 0x006b -> LATIN SMALL LETTER K
'l' # 0x006c -> LATIN SMALL LETTER L
'm' # 0x006d -> LATIN SMALL LETTER M
'n' # 0x006e -> LATIN SMALL LETTER N
'o' # 0x006f -> LATIN SMALL LETTER O
'p' # 0x0070 -> LATIN SMALL LETTER P
'q' # 0x0071 -> LATIN SMALL LETTER Q
'r' # 0x0072 -> LATIN SMALL LETTER R
's' # 0x0073 -> LATIN SMALL LETTER S
't' # 0x0074 -> LATIN SMALL LETTER T
'u' # 0x0075 -> LATIN SMALL LETTER U
'v' # 0x0076 -> LATIN SMALL LETTER V
'w' # 0x0077 -> LATIN SMALL LETTER W
'x' # 0x0078 -> LATIN SMALL LETTER X
'y' # 0x0079 -> LATIN SMALL LETTER Y
'z' # 0x007a -> LATIN SMALL LETTER Z
'{' # 0x007b -> LEFT CURLY BRACKET
'|' # 0x007c -> VERTICAL LINE
'}' # 0x007d -> RIGHT CURLY BRACKET
'~' # 0x007e -> TILDE
'\x7f' # 0x007f -> DELETE
'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xc2' # 0x0084 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
'\xb6' # 0x0086 -> PILCROW SIGN
'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
'\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS
'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\u2017' # 0x008d -> DOUBLE LOW LINE
'\xc0' # 0x008e -> LATIN CAPITAL LETTER A WITH GRAVE
'\xa7' # 0x008f -> SECTION SIGN
'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xc8' # 0x0091 -> LATIN CAPITAL LETTER E WITH GRAVE
'\xca' # 0x0092 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xcb' # 0x0094 -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\xcf' # 0x0095 -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE
'\xa4' # 0x0098 -> CURRENCY SIGN
'\xd4' # 0x0099 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xa2' # 0x009b -> CENT SIGN
'\xa3' # 0x009c -> POUND SIGN
'\xd9' # 0x009d -> LATIN CAPITAL LETTER U WITH GRAVE
'\xdb' # 0x009e -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
'\xa6' # 0x00a0 -> BROKEN BAR
'\xb4' # 0x00a1 -> ACUTE ACCENT
'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
'\xa8' # 0x00a4 -> DIAERESIS
'\xb8' # 0x00a5 -> CEDILLA
'\xb3' # 0x00a6 -> SUPERSCRIPT THREE
'\xaf' # 0x00a7 -> MACRON
'\xce' # 0x00a8 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\u2310' # 0x00a9 -> REVERSED NOT SIGN
'\xac' # 0x00aa -> NOT SIGN
'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
'\xbe' # 0x00ad -> VULGAR FRACTION THREE QUARTERS
'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u2591' # 0x00b0 -> LIGHT SHADE
'\u2592' # 0x00b1 -> MEDIUM SHADE
'\u2593' # 0x00b2 -> DARK SHADE
'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
'\u2588' # 0x00db -> FULL BLOCK
'\u2584' # 0x00dc -> LOWER HALF BLOCK
'\u258c' # 0x00dd -> LEFT HALF BLOCK
'\u2590' # 0x00de -> RIGHT HALF BLOCK
'\u2580' # 0x00df -> UPPER HALF BLOCK
'\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA
'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
'\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA
'\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI
'\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA
'\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA
'\xb5' # 0x00e6 -> MICRO SIGN
'\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU
'\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI
'\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA
'\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA
'\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA
'\u221e' # 0x00ec -> INFINITY
'\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI
'\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON
'\u2229' # 0x00ef -> INTERSECTION
'\u2261' # 0x00f0 -> IDENTICAL TO
'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
'\u2320' # 0x00f4 -> TOP HALF INTEGRAL
'\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL
'\xf7' # 0x00f6 -> DIVISION SIGN
'\u2248' # 0x00f7 -> ALMOST EQUAL TO
'\xb0' # 0x00f8 -> DEGREE SIGN
'\u2219' # 0x00f9 -> BULLET OPERATOR
'\xb7' # 0x00fa -> MIDDLE DOT
'\u221a' # 0x00fb -> SQUARE ROOT
'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
'\xb2' # 0x00fd -> SUPERSCRIPT TWO
'\u25a0' # 0x00fe -> BLACK SQUARE
'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a2: 0x009b, # CENT SIGN
0x00a3: 0x009c, # POUND SIGN
0x00a4: 0x0098, # CURRENCY SIGN
0x00a6: 0x00a0, # BROKEN BAR
0x00a7: 0x008f, # SECTION SIGN
0x00a8: 0x00a4, # DIAERESIS
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00af: 0x00a7, # MACRON
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b3: 0x00a6, # SUPERSCRIPT THREE
0x00b4: 0x00a1, # ACUTE ACCENT
0x00b5: 0x00e6, # MICRO SIGN
0x00b6: 0x0086, # PILCROW SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00b8: 0x00a5, # CEDILLA
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00be: 0x00ad, # VULGAR FRACTION THREE QUARTERS
0x00c0: 0x008e, # LATIN CAPITAL LETTER A WITH GRAVE
0x00c2: 0x0084, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c8: 0x0091, # LATIN CAPITAL LETTER E WITH GRAVE
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00ca: 0x0092, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x00cb: 0x0094, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00ce: 0x00a8, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00cf: 0x0095, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x00d4: 0x0099, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00d9: 0x009d, # LATIN CAPITAL LETTER U WITH GRAVE
0x00db: 0x009e, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f7: 0x00f6, # DIVISION SIGN
0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA
0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA
0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA
0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI
0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA
0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA
0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA
0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON
0x03c0: 0x00e3, # GREEK SMALL LETTER PI
0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA
0x03c4: 0x00e7, # GREEK SMALL LETTER TAU
0x03c6: 0x00ed, # GREEK SMALL LETTER PHI
0x2017: 0x008d, # DOUBLE LOW LINE
0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x221e: 0x00ec, # INFINITY
0x2229: 0x00ef, # INTERSECTION
0x2248: 0x00f7, # ALMOST EQUAL TO
0x2261: 0x00f0, # IDENTICAL TO
0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
0x2310: 0x00a9, # REVERSED NOT SIGN
0x2320: 0x00f4, # TOP HALF INTEGRAL
0x2321: 0x00f5, # BOTTOM HALF INTEGRAL
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
|
rebost/django
|
refs/heads/master
|
django/contrib/gis/gdal/layer.py
|
401
|
# Needed ctypes routines
from ctypes import c_double, byref
# Other GDAL imports.
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.envelope import Envelope, OGREnvelope
from django.contrib.gis.gdal.error import OGRException, OGRIndexError, SRSException
from django.contrib.gis.gdal.feature import Feature
from django.contrib.gis.gdal.field import OGRFieldTypes
from django.contrib.gis.gdal.geomtype import OGRGeomType
from django.contrib.gis.gdal.geometries import OGRGeometry
from django.contrib.gis.gdal.srs import SpatialReference
# GDAL ctypes function prototypes.
from django.contrib.gis.gdal.prototypes import ds as capi, geom as geom_api, srs as srs_api
# For more information, see the OGR C API source code:
# http://www.gdal.org/ogr/ogr__api_8h.html
#
# The OGR_L_* routines are relevant here.
class Layer(GDALBase):
"A class that wraps an OGR Layer, needs to be instantiated from a DataSource object."
#### Python 'magic' routines ####
def __init__(self, layer_ptr, ds):
"""
Initializes on an OGR C pointer to the Layer and the `DataSource` object
that owns this layer. The `DataSource` object is required so that a
reference to it is kept with this Layer. This prevents garbage
collection of the `DataSource` while this Layer is still active.
"""
if not layer_ptr:
raise OGRException('Cannot create Layer, invalid pointer given')
self.ptr = layer_ptr
self._ds = ds
self._ldefn = capi.get_layer_defn(self._ptr)
# Does the Layer support random reading?
self._random_read = self.test_capability('RandomRead')
def __getitem__(self, index):
"Gets the Feature at the specified index."
if isinstance(index, (int, long)):
# An integer index was given -- we cannot do a check based on the
# number of features because the beginning and ending feature IDs
# are not guaranteed to be 0 and len(layer)-1, respectively.
if index < 0: raise OGRIndexError('Negative indices are not allowed on OGR Layers.')
return self._make_feature(index)
elif isinstance(index, slice):
# A slice was given
start, stop, stride = index.indices(self.num_feat)
return [self._make_feature(fid) for fid in xrange(start, stop, stride)]
else:
raise TypeError('Integers and slices may only be used when indexing OGR Layers.')
def __iter__(self):
"Iterates over each Feature in the Layer."
# ResetReading() must be called before iteration is to begin.
capi.reset_reading(self._ptr)
for i in xrange(self.num_feat):
yield Feature(capi.get_next_feature(self._ptr), self._ldefn)
def __len__(self):
"The length is the number of features."
return self.num_feat
def __str__(self):
"The string name of the layer."
return self.name
def _make_feature(self, feat_id):
"""
Helper routine for __getitem__ that constructs a Feature from the given
Feature ID. If the OGR Layer does not support random-access reading,
then each feature of the layer will be incremented through until the
a Feature is found matching the given feature ID.
"""
if self._random_read:
# If the Layer supports random reading, return.
try:
return Feature(capi.get_feature(self.ptr, feat_id), self._ldefn)
except OGRException:
pass
else:
# Random access isn't supported, have to increment through
# each feature until the given feature ID is encountered.
for feat in self:
if feat.fid == feat_id: return feat
# Should have returned a Feature, raise an OGRIndexError.
raise OGRIndexError('Invalid feature id: %s.' % feat_id)
#### Layer properties ####
@property
def extent(self):
"Returns the extent (an Envelope) of this layer."
env = OGREnvelope()
capi.get_extent(self.ptr, byref(env), 1)
return Envelope(env)
@property
def name(self):
"Returns the name of this layer in the Data Source."
return capi.get_fd_name(self._ldefn)
@property
def num_feat(self, force=1):
"Returns the number of features in the Layer."
return capi.get_feature_count(self.ptr, force)
@property
def num_fields(self):
"Returns the number of fields in the Layer."
return capi.get_field_count(self._ldefn)
@property
def geom_type(self):
"Returns the geometry type (OGRGeomType) of the Layer."
return OGRGeomType(capi.get_fd_geom_type(self._ldefn))
@property
def srs(self):
"Returns the Spatial Reference used in this Layer."
try:
ptr = capi.get_layer_srs(self.ptr)
return SpatialReference(srs_api.clone_srs(ptr))
except SRSException:
return None
@property
def fields(self):
"""
Returns a list of string names corresponding to each of the Fields
available in this Layer.
"""
return [capi.get_field_name(capi.get_field_defn(self._ldefn, i))
for i in xrange(self.num_fields) ]
@property
def field_types(self):
"""
Returns a list of the types of fields in this Layer. For example,
the list [OFTInteger, OFTReal, OFTString] would be returned for
an OGR layer that had an integer, a floating-point, and string
fields.
"""
return [OGRFieldTypes[capi.get_field_type(capi.get_field_defn(self._ldefn, i))]
for i in xrange(self.num_fields)]
@property
def field_widths(self):
"Returns a list of the maximum field widths for the features."
return [capi.get_field_width(capi.get_field_defn(self._ldefn, i))
for i in xrange(self.num_fields)]
@property
def field_precisions(self):
"Returns the field precisions for the features."
return [capi.get_field_precision(capi.get_field_defn(self._ldefn, i))
for i in xrange(self.num_fields)]
def _get_spatial_filter(self):
try:
return OGRGeometry(geom_api.clone_geom(capi.get_spatial_filter(self.ptr)))
except OGRException:
return None
def _set_spatial_filter(self, filter):
if isinstance(filter, OGRGeometry):
capi.set_spatial_filter(self.ptr, filter.ptr)
elif isinstance(filter, (tuple, list)):
if not len(filter) == 4:
raise ValueError('Spatial filter list/tuple must have 4 elements.')
# Map c_double onto params -- if a bad type is passed in it
# will be caught here.
xmin, ymin, xmax, ymax = map(c_double, filter)
capi.set_spatial_filter_rect(self.ptr, xmin, ymin, xmax, ymax)
elif filter is None:
capi.set_spatial_filter(self.ptr, None)
else:
raise TypeError('Spatial filter must be either an OGRGeometry instance, a 4-tuple, or None.')
spatial_filter = property(_get_spatial_filter, _set_spatial_filter)
#### Layer Methods ####
def get_fields(self, field_name):
"""
Returns a list containing the given field name for every Feature
in the Layer.
"""
if not field_name in self.fields:
raise OGRException('invalid field name: %s' % field_name)
return [feat.get(field_name) for feat in self]
def get_geoms(self, geos=False):
"""
Returns a list containing the OGRGeometry for every Feature in
the Layer.
"""
if geos:
from django.contrib.gis.geos import GEOSGeometry
return [GEOSGeometry(feat.geom.wkb) for feat in self]
else:
return [feat.geom for feat in self]
def test_capability(self, capability):
"""
Returns a bool indicating whether the this Layer supports the given
capability (a string). Valid capability strings include:
'RandomRead', 'SequentialWrite', 'RandomWrite', 'FastSpatialFilter',
'FastFeatureCount', 'FastGetExtent', 'CreateField', 'Transactions',
'DeleteFeature', and 'FastSetNextByIndex'.
"""
return bool(capi.test_capability(self.ptr, capability))
|
alfa-addon/addon
|
refs/heads/master
|
plugin.video.alfa/lib/python_libtorrent/linux_x86_64/1.1.0/__init__.py
|
362
|
#-*- coding: utf-8 -*-
'''
python-libtorrent for Kodi (script.module.libtorrent)
Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
|
perkinslr/pypyjs
|
refs/heads/master
|
addedLibraries/twisted/conch/insults/__init__.py
|
138
|
"""
Insults: a replacement for Curses/S-Lang.
Very basic at the moment."""
from twisted.python import deprecate, versions
deprecate.deprecatedModuleAttribute(
versions.Version("Twisted", 10, 1, 0),
"Please use twisted.conch.insults.helper instead.",
__name__, "colors")
deprecate.deprecatedModuleAttribute(
versions.Version("Twisted", 10, 1, 0),
"Please use twisted.conch.insults.insults instead.",
__name__, "client")
|
gauribhoite/personfinder
|
refs/heads/master
|
env/site-packages/django/conf/locale/fy/formats.py
|
852
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
# DATE_FORMAT =
# TIME_FORMAT =
# DATETIME_FORMAT =
# YEAR_MONTH_FORMAT =
# MONTH_DAY_FORMAT =
# SHORT_DATE_FORMAT =
# SHORT_DATETIME_FORMAT =
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
# DECIMAL_SEPARATOR =
# THOUSAND_SEPARATOR =
# NUMBER_GROUPING =
|
chienlieu2017/it_management
|
refs/heads/master
|
odoo/addons/website_crm/__init__.py
|
1023
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import controllers
import models
|
toontownfunserver/Panda3D-1.9.0
|
refs/heads/master
|
python/Lib/site-packages/pip/_vendor/requests/packages/chardet/langbulgarianmodel.py
|
2964
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
# this table is modified base on win1251BulgarianCharToOrderMap, so
# only number <64 is sure valid
Latin5_BulgarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82, # 40
110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253, # 50
253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71, # 60
116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253, # 70
194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209, # 80
210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225, # 90
81,226,227,228,229,230,105,231,232,233,234,235,236, 45,237,238, # a0
31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30, # b0
39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,239, 67,240, 60, 56, # c0
1, 18, 9, 20, 11, 3, 23, 15, 2, 26, 12, 10, 14, 6, 4, 13, # d0
7, 8, 5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,241, 42, 16, # e0
62,242,243,244, 58,245, 98,246,247,248,249,250,251, 91,252,253, # f0
)
win1251BulgarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82, # 40
110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253, # 50
253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71, # 60
116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253, # 70
206,207,208,209,210,211,212,213,120,214,215,216,217,218,219,220, # 80
221, 78, 64, 83,121, 98,117,105,222,223,224,225,226,227,228,229, # 90
88,230,231,232,233,122, 89,106,234,235,236,237,238, 45,239,240, # a0
73, 80,118,114,241,242,243,244,245, 62, 58,246,247,248,249,250, # b0
31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30, # c0
39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,251, 67,252, 60, 56, # d0
1, 18, 9, 20, 11, 3, 23, 15, 2, 26, 12, 10, 14, 6, 4, 13, # e0
7, 8, 5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,253, 42, 16, # f0
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 96.9392%
# first 1024 sequences:3.0618%
# rest sequences: 0.2992%
# negative sequences: 0.0020%
BulgarianLangModel = (
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,2,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,2,2,1,2,2,
3,1,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,0,1,
0,0,0,0,0,0,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,2,3,3,3,3,3,3,3,3,0,3,1,0,
0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,2,3,2,2,1,3,3,3,3,2,2,2,1,1,2,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,2,3,2,2,3,3,1,1,2,3,3,2,3,3,3,3,2,1,2,0,2,0,3,0,0,
0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,1,3,3,3,3,3,2,3,2,3,3,3,3,3,2,3,3,1,3,0,3,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,1,3,3,2,3,3,3,1,3,3,2,3,2,2,2,0,0,2,0,2,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,3,3,1,2,2,3,2,1,1,2,0,2,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,2,3,3,1,2,3,2,2,2,3,3,3,3,3,2,2,3,1,2,0,2,1,2,0,0,
0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,1,3,3,3,3,3,2,3,3,3,2,3,3,2,3,2,2,2,3,1,2,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,1,1,1,2,2,1,3,1,3,2,2,3,0,0,1,0,1,0,1,0,0,
0,0,0,1,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,2,2,3,2,2,3,1,2,1,1,1,2,3,1,3,1,2,2,0,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,1,3,2,2,3,3,1,2,3,1,1,3,3,3,3,1,2,2,1,1,1,0,2,0,2,0,1,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,2,2,3,3,3,2,2,1,1,2,0,2,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,0,1,2,1,3,3,2,3,3,3,3,3,2,3,2,1,0,3,1,2,1,2,1,2,3,2,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,1,2,3,3,3,3,3,3,3,3,3,3,3,3,0,0,3,1,3,3,2,3,3,2,2,2,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,0,3,3,3,3,3,2,1,1,2,1,3,3,0,3,1,1,1,1,3,2,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,1,1,3,1,3,3,2,3,2,2,2,3,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,3,2,2,3,2,1,1,1,1,1,3,1,3,1,1,0,0,0,1,0,0,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,2,0,3,2,0,3,0,2,0,0,2,1,3,1,0,0,1,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,2,1,1,1,1,2,1,1,2,1,1,1,2,2,1,2,1,1,1,0,1,1,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,2,1,3,1,1,2,1,3,2,1,1,0,1,2,3,2,1,1,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,2,2,1,0,1,0,0,1,0,0,0,2,1,0,3,0,0,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,2,3,2,3,3,1,3,2,1,1,1,2,1,1,2,1,3,0,1,0,0,0,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,2,2,3,3,2,3,2,2,2,3,1,2,2,1,1,2,1,1,2,2,0,1,1,0,1,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,3,1,0,2,2,1,3,2,1,0,0,2,0,2,0,1,0,0,0,0,0,0,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,1,2,0,2,3,1,2,3,2,0,1,3,1,2,1,1,1,0,0,1,0,0,2,2,2,3,
2,2,2,2,1,2,1,1,2,2,1,1,2,0,1,1,1,0,0,1,1,0,0,1,1,0,0,0,1,1,0,1,
3,3,3,3,3,2,1,2,2,1,2,0,2,0,1,0,1,2,1,2,1,1,0,0,0,1,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,2,3,3,1,1,3,1,0,3,2,1,0,0,0,1,2,0,2,0,1,0,0,0,1,0,1,2,1,2,2,
1,1,1,1,1,1,1,2,2,2,1,1,1,1,1,1,1,0,1,2,1,1,1,0,0,0,0,0,1,1,0,0,
3,1,0,1,0,2,3,2,2,2,3,2,2,2,2,2,1,0,2,1,2,1,1,1,0,1,2,1,2,2,2,1,
1,1,2,2,2,2,1,2,1,1,0,1,2,1,2,2,2,1,1,1,0,1,1,1,1,2,0,1,0,0,0,0,
2,3,2,3,3,0,0,2,1,0,2,1,0,0,0,0,2,3,0,2,0,0,0,0,0,1,0,0,2,0,1,2,
2,1,2,1,2,2,1,1,1,2,1,1,1,0,1,2,2,1,1,1,1,1,0,1,1,1,0,0,1,2,0,0,
3,3,2,2,3,0,2,3,1,1,2,0,0,0,1,0,0,2,0,2,0,0,0,1,0,1,0,1,2,0,2,2,
1,1,1,1,2,1,0,1,2,2,2,1,1,1,1,1,1,1,0,1,1,1,0,0,0,0,0,0,1,1,0,0,
2,3,2,3,3,0,0,3,0,1,1,0,1,0,0,0,2,2,1,2,0,0,0,0,0,0,0,0,2,0,1,2,
2,2,1,1,1,1,1,2,2,2,1,0,2,0,1,0,1,0,0,1,0,1,0,0,1,0,0,0,0,1,0,0,
3,3,3,3,2,2,2,2,2,0,2,1,1,1,1,2,1,2,1,1,0,2,0,1,0,1,0,0,2,0,1,2,
1,1,1,1,1,1,1,2,2,1,1,0,2,0,1,0,2,0,0,1,1,1,0,0,2,0,0,0,1,1,0,0,
2,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,0,0,0,1,2,0,1,2,
2,2,2,1,1,2,1,1,2,2,2,1,2,0,1,1,1,1,1,1,0,1,1,1,1,0,0,1,1,1,0,0,
2,3,3,3,3,0,2,2,0,2,1,0,0,0,1,1,1,2,0,2,0,0,0,3,0,0,0,0,2,0,2,2,
1,1,1,2,1,2,1,1,2,2,2,1,2,0,1,1,1,0,1,1,1,1,0,2,1,0,0,0,1,1,0,0,
2,3,3,3,3,0,2,1,0,0,2,0,0,0,0,0,1,2,0,2,0,0,0,0,0,0,0,0,2,0,1,2,
1,1,1,2,1,1,1,1,2,2,2,0,1,0,1,1,1,0,0,1,1,1,0,0,1,0,0,0,0,1,0,0,
3,3,2,2,3,0,1,0,1,0,0,0,0,0,0,0,1,1,0,3,0,0,0,0,0,0,0,0,1,0,2,2,
1,1,1,1,1,2,1,1,2,2,1,2,2,1,0,1,1,1,1,1,0,1,0,0,1,0,0,0,1,1,0,0,
3,1,0,1,0,2,2,2,2,3,2,1,1,1,2,3,0,0,1,0,2,1,1,0,1,1,1,1,2,1,1,1,
1,2,2,1,2,1,2,2,1,1,0,1,2,1,2,2,1,1,1,0,0,1,1,1,2,1,0,1,0,0,0,0,
2,1,0,1,0,3,1,2,2,2,2,1,2,2,1,1,1,0,2,1,2,2,1,1,2,1,1,0,2,1,1,1,
1,2,2,2,2,2,2,2,1,2,0,1,1,0,2,1,1,1,1,1,0,0,1,1,1,1,0,1,0,0,0,0,
2,1,1,1,1,2,2,2,2,1,2,2,2,1,2,2,1,1,2,1,2,3,2,2,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,3,2,0,1,2,0,1,2,1,1,0,1,0,1,2,1,2,0,0,0,1,1,0,0,0,1,0,0,2,
1,1,0,0,1,1,0,1,1,1,1,0,2,0,1,1,1,0,0,1,1,0,0,0,0,1,0,0,0,1,0,0,
2,0,0,0,0,1,2,2,2,2,2,2,2,1,2,1,1,1,1,1,1,1,0,1,1,1,1,1,2,1,1,1,
1,2,2,2,2,1,1,2,1,2,1,1,1,0,2,1,2,1,1,1,0,2,1,1,1,1,0,1,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,
1,1,0,1,0,1,1,1,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,3,2,0,0,0,0,1,0,0,0,0,0,0,1,1,0,2,0,0,0,0,0,0,0,0,1,0,1,2,
1,1,1,1,1,1,0,0,2,2,2,2,2,0,1,1,0,1,1,1,1,1,0,0,1,0,0,0,1,1,0,1,
2,3,1,2,1,0,1,1,0,2,2,2,0,0,1,0,0,1,1,1,1,0,0,0,0,0,0,0,1,0,1,2,
1,1,1,1,2,1,1,1,1,1,1,1,1,0,1,1,0,1,0,1,0,1,0,0,1,0,0,0,0,1,0,0,
2,2,2,2,2,0,0,2,0,0,2,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,2,0,2,2,
1,1,1,1,1,0,0,1,2,1,1,0,1,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,2,0,1,1,0,0,0,1,0,0,2,0,2,0,0,0,0,0,0,0,0,0,0,1,1,
0,0,0,1,1,1,1,1,1,1,1,1,1,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,3,2,0,0,1,0,0,1,0,0,0,0,0,0,1,0,2,0,0,0,1,0,0,0,0,0,0,0,2,
1,1,0,0,1,0,0,0,1,1,0,0,1,0,1,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,1,2,2,2,1,2,1,2,2,1,1,2,1,1,1,0,1,1,1,1,2,0,1,0,1,1,1,1,0,1,1,
1,1,2,1,1,1,1,1,1,0,0,1,2,1,1,1,1,1,1,0,0,1,1,1,0,0,0,0,0,0,0,0,
1,0,0,1,3,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,1,0,0,1,0,2,0,0,0,0,0,1,1,1,0,1,0,0,0,0,0,0,0,0,2,0,0,1,
0,2,0,1,0,0,1,1,2,0,1,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,1,1,0,2,1,0,1,1,1,0,0,1,0,2,0,1,0,0,0,0,0,0,0,0,0,1,
0,1,0,0,1,0,0,0,1,1,0,0,1,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,2,0,0,1,0,0,0,1,0,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1,
0,1,0,1,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,0,1,2,1,1,1,1,1,1,2,2,1,0,0,1,0,1,0,0,0,0,1,1,1,1,0,0,0,
1,1,2,1,1,1,1,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,1,2,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,
0,1,1,0,1,1,1,0,0,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,
1,0,1,0,0,1,1,1,1,1,1,1,1,1,1,1,0,0,1,0,2,0,0,2,0,1,0,0,1,0,0,1,
1,1,0,0,1,1,0,1,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,
1,1,1,1,1,1,1,2,0,0,0,0,0,0,2,1,0,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,0,1,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
)
Latin5BulgarianModel = {
'charToOrderMap': Latin5_BulgarianCharToOrderMap,
'precedenceMatrix': BulgarianLangModel,
'mTypicalPositiveRatio': 0.969392,
'keepEnglishLetter': False,
'charsetName': "ISO-8859-5"
}
Win1251BulgarianModel = {
'charToOrderMap': win1251BulgarianCharToOrderMap,
'precedenceMatrix': BulgarianLangModel,
'mTypicalPositiveRatio': 0.969392,
'keepEnglishLetter': False,
'charsetName': "windows-1251"
}
# flake8: noqa
|
unreal666/outwiker
|
refs/heads/master
|
src/outwiker/pages/wiki/actions/updatehtml.py
|
3
|
# -*- coding: utf-8 -*-
from outwiker.core.events import PageUpdateNeededParams
from outwiker.gui.baseaction import BaseAction
class WikiUpdateHtmlAction (BaseAction):
"""
Обновить (пересоздать) код HTML
"""
stringId = u"WikiUpdateHtml"
def __init__(self, application):
self._application = application
@property
def title(self):
return _(u"Update HTML Code")
@property
def description(self):
return _(u"Update HTML code for wiki page")
def run(self, params):
selectedPage = self._application.selectedPage
assert selectedPage is not None
assert self._application.mainWindow is not None
assert self._application.mainWindow.pagePanel is not None
self._application.onPageUpdateNeeded(selectedPage,
PageUpdateNeededParams(False))
self._application.mainWindow.pagePanel.pageView.updateHtml()
|
suyashphadtare/test
|
refs/heads/develop
|
erpnext/accounts/report/profit_and_loss_statement/profit_and_loss_statement.py
|
39
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import flt
from erpnext.accounts.report.financial_statements import (get_period_list, get_columns, get_data)
def execute(filters=None):
period_list = get_period_list(filters.fiscal_year, filters.periodicity)
income = get_data(filters.company, "Income", "Credit", period_list, ignore_closing_entries=True)
expense = get_data(filters.company, "Expense", "Debit", period_list, ignore_closing_entries=True)
net_profit_loss = get_net_profit_loss(income, expense, period_list)
data = []
data.extend(income or [])
data.extend(expense or [])
if net_profit_loss:
data.append(net_profit_loss)
columns = get_columns(period_list)
return columns, data
def get_net_profit_loss(income, expense, period_list):
if income and expense:
net_profit_loss = {
"account_name": _("Net Profit / Loss"),
"account": None,
"warn_if_negative": True
}
for period in period_list:
net_profit_loss[period.key] = flt(income[-2][period.key] - expense[-2][period.key], 3)
return net_profit_loss
|
defionscode/ansible
|
refs/heads/devel
|
lib/ansible/module_utils/acme.py
|
8
|
# -*- coding: utf-8 -*-
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Michael Gruener <michael.gruener@chaosmoon.net>, 2016
#
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import base64
import binascii
import copy
import datetime
import hashlib
import json
import os
import re
import shutil
import sys
import tempfile
import traceback
from ansible.module_utils._text import to_native, to_text, to_bytes
from ansible.module_utils.urls import fetch_url
try:
import cryptography
import cryptography.hazmat.backends
import cryptography.hazmat.primitives.serialization
import cryptography.hazmat.primitives.asymmetric.rsa
import cryptography.hazmat.primitives.asymmetric.ec
import cryptography.hazmat.primitives.asymmetric.padding
import cryptography.hazmat.primitives.hashes
import cryptography.hazmat.primitives.asymmetric.utils
import cryptography.x509
import cryptography.x509.oid
from distutils.version import LooseVersion
CRYPTOGRAPHY_VERSION = cryptography.__version__
HAS_CURRENT_CRYPTOGRAPHY = (LooseVersion(CRYPTOGRAPHY_VERSION) >= LooseVersion('1.5'))
if HAS_CURRENT_CRYPTOGRAPHY:
_cryptography_backend = cryptography.hazmat.backends.default_backend()
except Exception as _:
HAS_CURRENT_CRYPTOGRAPHY = False
class ModuleFailException(Exception):
'''
If raised, module.fail_json() will be called with the given parameters after cleanup.
'''
def __init__(self, msg, **args):
super(ModuleFailException, self).__init__(self, msg)
self.msg = msg
self.module_fail_args = args
def do_fail(self, module):
module.fail_json(msg=self.msg, other=self.module_fail_args)
def nopad_b64(data):
return base64.urlsafe_b64encode(data).decode('utf8').replace("=", "")
def read_file(fn, mode='b'):
try:
with open(fn, 'r' + mode) as f:
return f.read()
except Exception as e:
raise ModuleFailException('Error while reading file "{0}": {1}'.format(fn, e))
# function source: network/basics/uri.py
def write_file(module, dest, content):
'''
Write content to destination file dest, only if the content
has changed.
'''
changed = False
# create a tempfile
fd, tmpsrc = tempfile.mkstemp(text=False)
f = os.fdopen(fd, 'wb')
try:
f.write(content)
except Exception as err:
try:
f.close()
except Exception as e:
pass
os.remove(tmpsrc)
raise ModuleFailException("failed to create temporary content file: %s" % to_native(err), exception=traceback.format_exc())
f.close()
checksum_src = None
checksum_dest = None
# raise an error if there is no tmpsrc file
if not os.path.exists(tmpsrc):
try:
os.remove(tmpsrc)
except Exception as e:
pass
raise ModuleFailException("Source %s does not exist" % (tmpsrc))
if not os.access(tmpsrc, os.R_OK):
os.remove(tmpsrc)
raise ModuleFailException("Source %s not readable" % (tmpsrc))
checksum_src = module.sha1(tmpsrc)
# check if there is no dest file
if os.path.exists(dest):
# raise an error if copy has no permission on dest
if not os.access(dest, os.W_OK):
os.remove(tmpsrc)
raise ModuleFailException("Destination %s not writable" % (dest))
if not os.access(dest, os.R_OK):
os.remove(tmpsrc)
raise ModuleFailException("Destination %s not readable" % (dest))
checksum_dest = module.sha1(dest)
else:
if not os.access(os.path.dirname(dest), os.W_OK):
os.remove(tmpsrc)
raise ModuleFailException("Destination dir %s not writable" % (os.path.dirname(dest)))
if checksum_src != checksum_dest:
try:
shutil.copyfile(tmpsrc, dest)
changed = True
except Exception as err:
os.remove(tmpsrc)
raise ModuleFailException("failed to copy %s to %s: %s" % (tmpsrc, dest, to_native(err)), exception=traceback.format_exc())
os.remove(tmpsrc)
return changed
def pem_to_der(pem_filename):
'''
Load PEM file, and convert to DER.
If PEM contains multiple entities, the first entity will be used.
'''
certificate_lines = []
try:
with open(pem_filename, "rt") as f:
header_line_count = 0
for line in f:
if line.startswith('-----'):
header_line_count += 1
if header_line_count == 2:
# If certificate file contains other certs appended
# (like intermediate certificates), ignore these.
break
continue
certificate_lines.append(line.strip())
except Exception as err:
raise ModuleFailException("cannot load PEM file {0}: {1}".format(pem_filename, to_native(err)), exception=traceback.format_exc())
return base64.b64decode(''.join(certificate_lines))
def _parse_key_openssl(openssl_binary, module, key_file=None, key_content=None):
'''
Parses an RSA or Elliptic Curve key file in PEM format and returns a pair
(error, key_data).
'''
# If key_file isn't given, but key_content, write that to a temporary file
if key_file is None:
fd, tmpsrc = tempfile.mkstemp()
module.add_cleanup_file(tmpsrc) # Ansible will delete the file on exit
f = os.fdopen(fd, 'wb')
try:
f.write(key_content.encode('utf-8'))
key_file = tmpsrc
except Exception as err:
try:
f.close()
except Exception as e:
pass
raise ModuleFailException("failed to create temporary content file: %s" % to_native(err), exception=traceback.format_exc())
f.close()
# Parse key
account_key_type = None
with open(key_file, "rt") as f:
for line in f:
m = re.match(r"^\s*-{5,}BEGIN\s+(EC|RSA)\s+PRIVATE\s+KEY-{5,}\s*$", line)
if m is not None:
account_key_type = m.group(1).lower()
break
if account_key_type is None:
# This happens for example if openssl_privatekey created this key
# (as opposed to the OpenSSL binary). For now, we assume this is
# an RSA key.
# FIXME: add some kind of auto-detection
account_key_type = "rsa"
if account_key_type not in ("rsa", "ec"):
return 'unknown key type "%s"' % account_key_type, {}
openssl_keydump_cmd = [openssl_binary, account_key_type, "-in", key_file, "-noout", "-text"]
dummy, out, dummy = module.run_command(openssl_keydump_cmd, check_rc=True)
if account_key_type == 'rsa':
pub_hex, pub_exp = re.search(
r"modulus:\n\s+00:([a-f0-9\:\s]+?)\npublicExponent: ([0-9]+)",
to_text(out, errors='surrogate_or_strict'), re.MULTILINE | re.DOTALL).groups()
pub_exp = "{0:x}".format(int(pub_exp))
if len(pub_exp) % 2:
pub_exp = "0{0}".format(pub_exp)
return None, {
'key_file': key_file,
'type': 'rsa',
'alg': 'RS256',
'jwk': {
"kty": "RSA",
"e": nopad_b64(binascii.unhexlify(pub_exp.encode("utf-8"))),
"n": nopad_b64(binascii.unhexlify(re.sub(r"(\s|:)", "", pub_hex).encode("utf-8"))),
},
'hash': 'sha256',
}
elif account_key_type == 'ec':
pub_data = re.search(
r"pub:\s*\n\s+04:([a-f0-9\:\s]+?)\nASN1 OID: (\S+)(?:\nNIST CURVE: (\S+))?",
to_text(out, errors='surrogate_or_strict'), re.MULTILINE | re.DOTALL)
if pub_data is None:
return 'cannot parse elliptic curve key', {}
pub_hex = binascii.unhexlify(re.sub(r"(\s|:)", "", pub_data.group(1)).encode("utf-8"))
asn1_oid_curve = pub_data.group(2).lower()
nist_curve = pub_data.group(3).lower() if pub_data.group(3) else None
if asn1_oid_curve == 'prime256v1' or nist_curve == 'p-256':
bits = 256
alg = 'ES256'
hash = 'sha256'
point_size = 32
curve = 'P-256'
elif asn1_oid_curve == 'secp384r1' or nist_curve == 'p-384':
bits = 384
alg = 'ES384'
hash = 'sha384'
point_size = 48
curve = 'P-384'
elif asn1_oid_curve == 'secp521r1' or nist_curve == 'p-521':
# Not yet supported on Let's Encrypt side, see
# https://github.com/letsencrypt/boulder/issues/2217
bits = 521
alg = 'ES512'
hash = 'sha512'
point_size = 66
curve = 'P-521'
else:
return 'unknown elliptic curve: %s / %s' % (asn1_oid_curve, nist_curve), {}
bytes = (bits + 7) // 8
if len(pub_hex) != 2 * bytes:
return 'bad elliptic curve point (%s / %s)' % (asn1_oid_curve, nist_curve), {}
return None, {
'key_file': key_file,
'type': 'ec',
'alg': alg,
'jwk': {
"kty": "EC",
"crv": curve,
"x": nopad_b64(pub_hex[:bytes]),
"y": nopad_b64(pub_hex[bytes:]),
},
'hash': hash,
'point_size': point_size,
}
def _sign_request_openssl(openssl_binary, module, payload64, protected64, key_data):
openssl_sign_cmd = [openssl_binary, "dgst", "-{0}".format(key_data['hash']), "-sign", key_data['key_file']]
sign_payload = "{0}.{1}".format(protected64, payload64).encode('utf8')
dummy, out, dummy = module.run_command(openssl_sign_cmd, data=sign_payload, check_rc=True, binary_data=True)
if key_data['type'] == 'ec':
dummy, der_out, dummy = module.run_command(
[openssl_binary, "asn1parse", "-inform", "DER"],
data=out, binary_data=True)
expected_len = 2 * key_data['point_size']
sig = re.findall(
r"prim:\s+INTEGER\s+:([0-9A-F]{1,%s})\n" % expected_len,
to_text(der_out, errors='surrogate_or_strict'))
if len(sig) != 2:
raise ModuleFailException(
"failed to generate Elliptic Curve signature; cannot parse DER output: {0}".format(
to_text(der_out, errors='surrogate_or_strict')))
sig[0] = (expected_len - len(sig[0])) * '0' + sig[0]
sig[1] = (expected_len - len(sig[1])) * '0' + sig[1]
out = binascii.unhexlify(sig[0]) + binascii.unhexlify(sig[1])
return {
"protected": protected64,
"payload": payload64,
"signature": nopad_b64(to_bytes(out)),
}
if sys.version_info[0] >= 3:
# Python 3 (and newer)
def _count_bytes(n):
return (n.bit_length() + 7) // 8 if n > 0 else 0
def _convert_int_to_bytes(count, no):
return no.to_bytes(count, byteorder='big')
def _pad_hex(n, digits):
res = hex(n)[2:]
if len(res) < digits:
res = '0' * (digits - len(res)) + res
return res
else:
# Python 2
def _count_bytes(n):
if n <= 0:
return 0
h = '%x' % n
return (len(h) + 1) // 2
def _convert_int_to_bytes(count, n):
h = '%x' % n
if len(h) > 2 * count:
raise Exception('Number {1} needs more than {0} bytes!'.format(count, n))
return ('0' * (2 * count - len(h)) + h).decode('hex')
def _pad_hex(n, digits):
h = '%x' % n
if len(h) < digits:
h = '0' * (digits - len(h)) + h
return h
def _parse_key_cryptography(module, key_file=None, key_content=None):
'''
Parses an RSA or Elliptic Curve key file in PEM format and returns a pair
(error, key_data).
'''
# If key_content isn't given, read key_file
if key_content is None:
key_content = read_file(key_file)
else:
key_content = to_bytes(key_content)
# Parse key
try:
key = cryptography.hazmat.primitives.serialization.load_pem_private_key(key_content, password=None, backend=_cryptography_backend)
except Exception as e:
return 'error while loading key: {0}'.format(e), None
if isinstance(key, cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey):
pk = key.public_key().public_numbers()
return None, {
'key_obj': key,
'type': 'rsa',
'alg': 'RS256',
'jwk': {
"kty": "RSA",
"e": nopad_b64(_convert_int_to_bytes(_count_bytes(pk.e), pk.e)),
"n": nopad_b64(_convert_int_to_bytes(_count_bytes(pk.n), pk.n)),
},
'hash': 'sha256',
}
elif isinstance(key, cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePrivateKey):
pk = key.public_key().public_numbers()
if pk.curve.name == 'secp256r1':
bits = 256
alg = 'ES256'
hash = 'sha256'
point_size = 32
curve = 'P-256'
elif pk.curve.name == 'secp384r1':
bits = 384
alg = 'ES384'
hash = 'sha384'
point_size = 48
curve = 'P-384'
elif pk.curve.name == 'secp521r1':
# Not yet supported on Let's Encrypt side, see
# https://github.com/letsencrypt/boulder/issues/2217
bits = 521
alg = 'ES512'
hash = 'sha512'
point_size = 66
curve = 'P-521'
else:
return 'unknown elliptic curve: {0}'.format(pk.curve.name), {}
bytes = (bits + 7) // 8
return None, {
'key_obj': key,
'type': 'ec',
'alg': alg,
'jwk': {
"kty": "EC",
"crv": curve,
"x": nopad_b64(_convert_int_to_bytes(bytes, pk.x)),
"y": nopad_b64(_convert_int_to_bytes(bytes, pk.y)),
},
'hash': hash,
'point_size': point_size,
}
else:
return 'unknown key type "{0}"'.format(type(key)), {}
def _sign_request_cryptography(module, payload64, protected64, key_data):
sign_payload = "{0}.{1}".format(protected64, payload64).encode('utf8')
if isinstance(key_data['key_obj'], cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey):
padding = cryptography.hazmat.primitives.asymmetric.padding.PKCS1v15()
hash = cryptography.hazmat.primitives.hashes.SHA256()
signature = key_data['key_obj'].sign(sign_payload, padding, hash)
elif isinstance(key_data['key_obj'], cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePrivateKey):
if key_data['hash'] == 'sha256':
hash = cryptography.hazmat.primitives.hashes.SHA256
elif key_data['hash'] == 'sha384':
hash = cryptography.hazmat.primitives.hashes.SHA384
elif key_data['hash'] == 'sha512':
hash = cryptography.hazmat.primitives.hashes.SHA512
ecdsa = cryptography.hazmat.primitives.asymmetric.ec.ECDSA(hash())
r, s = cryptography.hazmat.primitives.asymmetric.utils.decode_dss_signature(key_data['key_obj'].sign(sign_payload, ecdsa))
rr = _pad_hex(r, 2 * key_data['point_size'])
ss = _pad_hex(s, 2 * key_data['point_size'])
signature = binascii.unhexlify(rr) + binascii.unhexlify(ss)
return {
"protected": protected64,
"payload": payload64,
"signature": nopad_b64(signature),
}
class ACMEDirectory(object):
'''
The ACME server directory. Gives access to the available resources,
and allows to obtain a Replay-Nonce. The acme_directory URL
needs to support unauthenticated GET requests; ACME endpoints
requiring authentication are not supported.
https://tools.ietf.org/html/draft-ietf-acme-acme-14#section-7.1.1
'''
def __init__(self, module, account):
self.module = module
self.directory_root = module.params['acme_directory']
self.version = module.params['acme_version']
self.directory, dummy = account.get_request(self.directory_root)
# Check whether self.version matches what we expect
if self.version == 1:
for key in ('new-reg', 'new-authz', 'new-cert'):
if key not in self.directory:
raise ModuleFailException("ACME directory does not seem to follow protocol ACME v1")
if self.version == 2:
for key in ('newNonce', 'newAccount', 'newOrder'):
if key not in self.directory:
raise ModuleFailException("ACME directory does not seem to follow protocol ACME v2")
def __getitem__(self, key):
return self.directory[key]
def get_nonce(self, resource=None):
url = self.directory_root if self.version == 1 else self.directory['newNonce']
if resource is not None:
url = resource
dummy, info = fetch_url(self.module, url, method='HEAD')
if info['status'] not in (200, 204):
raise ModuleFailException("Failed to get replay-nonce, got status {0}".format(info['status']))
return info['replay-nonce']
class ACMEAccount(object):
'''
ACME account object. Handles the authorized communication with the
ACME server. Provides access to account bound information like
the currently active authorizations and valid certificates
'''
def __init__(self, module):
self.module = module
self.version = module.params['acme_version']
# account_key path and content are mutually exclusive
self.key = module.params['account_key_src']
self.key_content = module.params['account_key_content']
# Grab account URI from module parameters.
# Make sure empty string is treated as None.
self.uri = module.params.get('account_uri') or None
self._openssl_bin = module.get_bin_path('openssl', True)
if self.key is not None or self.key_content is not None:
error, self.key_data = self.parse_key(self.key, self.key_content)
if error:
raise ModuleFailException("error while parsing account key: %s" % error)
self.jwk = self.key_data['jwk']
self.jws_header = {
"alg": self.key_data['alg'],
"jwk": self.jwk,
}
if self.uri:
# Make sure self.jws_header is updated
self.set_account_uri(self.uri)
self.directory = ACMEDirectory(module, self)
def get_keyauthorization(self, token):
'''
Returns the key authorization for the given token
https://tools.ietf.org/html/draft-ietf-acme-acme-14#section-8.1
'''
accountkey_json = json.dumps(self.jwk, sort_keys=True, separators=(',', ':'))
thumbprint = nopad_b64(hashlib.sha256(accountkey_json.encode('utf8')).digest())
return "{0}.{1}".format(token, thumbprint)
def parse_key(self, key_file=None, key_content=None):
'''
Parses an RSA or Elliptic Curve key file in PEM format and returns a pair
(error, key_data).
'''
if key_file is None and key_content is None:
raise AssertionError('One of key_file and key_content must be specified!')
if HAS_CURRENT_CRYPTOGRAPHY:
return _parse_key_cryptography(self.module, key_file, key_content)
else:
return _parse_key_openssl(self._openssl_bin, self.module, key_file, key_content)
def sign_request(self, protected, payload, key_data):
try:
payload64 = nopad_b64(self.module.jsonify(payload).encode('utf8'))
protected64 = nopad_b64(self.module.jsonify(protected).encode('utf8'))
except Exception as e:
raise ModuleFailException("Failed to encode payload / headers as JSON: {0}".format(e))
if HAS_CURRENT_CRYPTOGRAPHY:
return _sign_request_cryptography(self.module, payload64, protected64, key_data)
else:
return _sign_request_openssl(self._openssl_bin, self.module, payload64, protected64, key_data)
def send_signed_request(self, url, payload, key_data=None, jws_header=None, parse_json_result=True):
'''
Sends a JWS signed HTTP POST request to the ACME server and returns
the response as dictionary
https://tools.ietf.org/html/draft-ietf-acme-acme-14#section-6.2
'''
key_data = key_data or self.key_data
jws_header = jws_header or self.jws_header
failed_tries = 0
while True:
protected = copy.deepcopy(jws_header)
protected["nonce"] = self.directory.get_nonce()
if self.version != 1:
protected["url"] = url
data = self.sign_request(protected, payload, key_data)
if self.version == 1:
data["header"] = jws_header
data = self.module.jsonify(data)
headers = {
'Content-Type': 'application/jose+json',
}
resp, info = fetch_url(self.module, url, data=data, headers=headers, method='POST')
result = {}
try:
content = resp.read()
except AttributeError:
content = info.get('body')
if content or not parse_json_result:
if (parse_json_result and info['content-type'].startswith('application/json')) or 400 <= info['status'] < 600:
try:
decoded_result = self.module.from_json(content.decode('utf8'))
# In case of badNonce error, try again (up to 5 times)
# (https://tools.ietf.org/html/draft-ietf-acme-acme-14#section-6.6)
if (400 <= info['status'] < 600 and
decoded_result.get('type') == 'urn:ietf:params:acme:error:badNonce' and
failed_tries <= 5):
failed_tries += 1
continue
if parse_json_result:
result = decoded_result
except ValueError:
raise ModuleFailException("Failed to parse the ACME response: {0} {1}".format(url, content))
else:
result = content
return result, info
def get_request(self, uri, parse_json_result=True, headers=None):
resp, info = fetch_url(self.module, uri, method='GET', headers=headers)
try:
content = resp.read()
except AttributeError:
content = info.get('body')
if parse_json_result:
result = {}
if content:
if info['content-type'].startswith('application/json'):
try:
result = self.module.from_json(content.decode('utf8'))
except ValueError:
raise ModuleFailException("Failed to parse the ACME response: {0} {1}".format(uri, content))
else:
result = content
else:
result = content
if info['status'] >= 400:
raise ModuleFailException("ACME request failed: CODE: {0} RESULT: {1}".format(info['status'], result))
return result, info
def set_account_uri(self, uri):
'''
Set account URI. For ACME v2, it needs to be used to sending signed
requests.
'''
self.uri = uri
if self.version != 1:
self.jws_header.pop('jwk')
self.jws_header['kid'] = self.uri
def _new_reg(self, contact=None, agreement=None, terms_agreed=False, allow_creation=True):
'''
Registers a new ACME account. Returns True if the account was
created and False if it already existed (e.g. it was not newly
created).
https://tools.ietf.org/html/draft-ietf-acme-acme-14#section-7.3
'''
contact = [] if contact is None else contact
if self.version == 1:
new_reg = {
'resource': 'new-reg',
'contact': contact
}
if agreement:
new_reg['agreement'] = agreement
else:
new_reg['agreement'] = self.directory['meta']['terms-of-service']
url = self.directory['new-reg']
else:
new_reg = {
'contact': contact
}
if not allow_creation:
new_reg['onlyReturnExisting'] = True
if terms_agreed:
new_reg['termsOfServiceAgreed'] = True
url = self.directory['newAccount']
result, info = self.send_signed_request(url, new_reg)
if 'location' in info:
self.set_account_uri(info['location'])
if info['status'] in ([200, 201] if self.version == 1 else [201]):
# Account did not exist
return True
elif info['status'] == (409 if self.version == 1 else 200):
# Account did exist
return False
elif info['status'] == 400 and result['type'] == 'urn:ietf:params:acme:error:accountDoesNotExist' and not allow_creation:
# Account does not exist (and we didn't try to create it)
return False
else:
raise ModuleFailException("Error registering: {0} {1}".format(info['status'], result))
def get_account_data(self):
'''
Retrieve account information. Can only be called when the account
URI is already known (such as after calling init_account).
Return None if the account was deactivated, or a dict otherwise.
'''
if self.uri is None:
raise ModuleFailException("Account URI unknown")
data = {}
if self.version == 1:
data['resource'] = 'reg'
result, info = self.send_signed_request(self.uri, data)
if info['status'] in (400, 403) and result.get('type') == 'urn:ietf:params:acme:error:unauthorized':
# Returned when account is deactivated
return None
if info['status'] in (400, 404) and result.get('type') == 'urn:ietf:params:acme:error:accountDoesNotExist':
# Returned when account does not exist
return None
if info['status'] < 200 or info['status'] >= 300:
raise ModuleFailException("Error getting account data from {2}: {0} {1}".format(info['status'], result, self.uri))
return result
def init_account(self, contact, agreement=None, terms_agreed=False, allow_creation=True, update_contact=True, remove_account_uri_if_not_exists=False):
'''
Create or update an account on the ACME server. For ACME v1,
as the only way (without knowing an account URI) to test if an
account exists is to try and create one with the provided account
key, this method will always result in an account being present
(except on error situations). For ACME v2, a new account will
only be created if allow_creation is set to True.
For ACME v2, check_mode is fully respected. For ACME v1, the account
might be created if it does not yet exist.
If the account already exists and if update_contact is set to
True, this method will update the contact information.
Return True in case something changed (account was created, contact
info updated) or would be changed (check_mode). The account URI
will be stored in self.uri; if it is None, the account does not
exist.
https://tools.ietf.org/html/draft-ietf-acme-acme-14#section-7.3
'''
new_account = True
changed = False
if self.uri is not None:
new_account = False
if not update_contact:
# Verify that the account key belongs to the URI.
# (If update_contact is True, this will be done below.)
if self.get_account_data() is None:
if remove_account_uri_if_not_exists and not allow_creation:
self.uri = None
return False
raise ModuleFailException("Account is deactivated or does not exist!")
else:
new_account = self._new_reg(
contact,
agreement=agreement,
terms_agreed=terms_agreed,
allow_creation=allow_creation and not self.module.check_mode
)
if self.module.check_mode and self.uri is None and allow_creation:
return True
if not new_account and self.uri and update_contact:
result = self.get_account_data()
if result is None:
if not allow_creation:
self.uri = None
return False
raise ModuleFailException("Account is deactivated or does not exist!")
# ...and check if update is necessary
if result.get('contact', []) != contact:
if not self.module.check_mode:
upd_reg = result
upd_reg['contact'] = contact
result, dummy = self.send_signed_request(self.uri, upd_reg)
changed = True
return new_account or changed
def cryptography_get_csr_domains(module, csr_filename):
'''
Return a set of requested domains (CN and SANs) for the CSR.
'''
domains = set([])
csr = cryptography.x509.load_pem_x509_csr(read_file(csr_filename), _cryptography_backend)
for sub in csr.subject:
if sub.oid == cryptography.x509.oid.NameOID.COMMON_NAME:
domains.add(sub.value)
for extension in csr.extensions:
if extension.oid == cryptography.x509.oid.ExtensionOID.SUBJECT_ALTERNATIVE_NAME:
for name in extension.value:
if isinstance(name, cryptography.x509.DNSName):
domains.add(name.value)
return domains
def cryptography_get_cert_days(module, cert_file):
'''
Return the days the certificate in cert_file remains valid and -1
if the file was not found. If cert_file contains more than one
certificate, only the first one will be considered.
'''
if not os.path.exists(cert_file):
return -1
try:
cert = cryptography.x509.load_pem_x509_certificate(read_file(cert_file), _cryptography_backend)
except Exception as e:
raise ModuleFailException('Cannot parse certificate {0}: {1}'.format(cert_file, e))
now = datetime.datetime.now()
return (cert.not_valid_after - now).days
def set_crypto_backend(module):
'''
Sets which crypto backend to use (default: auto detection).
Does not care whether a new enough cryptoraphy is available or not. Must
be called before any real stuff is done which might evaluate
``HAS_CURRENT_CRYPTOGRAPHY``.
'''
global HAS_CURRENT_CRYPTOGRAPHY
# Choose backend
backend = module.params['select_crypto_backend']
if backend == 'auto':
pass
elif backend == 'openssl':
HAS_CURRENT_CRYPTOGRAPHY = False
elif backend == 'cryptography':
try:
cryptography.__version__
except Exception as _:
module.fail_json(msg='Cannot find cryptography module!')
HAS_CURRENT_CRYPTOGRAPHY = True
else:
module.fail_json(msg='Unknown crypto backend "{0}"!'.format(backend))
# Inform about choices
if HAS_CURRENT_CRYPTOGRAPHY:
module.debug('Using cryptography backend (library version {0})'.format(CRYPTOGRAPHY_VERSION))
else:
module.debug('Using OpenSSL binary backend')
|
arnavd96/Cinemiezer
|
refs/heads/master
|
myvenv/lib/python3.4/site-packages/provider/views.py
|
6
|
import json
import urlparse
from django.http import HttpResponse
from django.http import HttpResponseRedirect, QueryDict
from django.utils.translation import ugettext as _
from django.views.generic.base import TemplateView
from django.core.exceptions import ObjectDoesNotExist
from oauth2.models import Client
from . import constants, scope
class OAuthError(Exception):
"""
Exception to throw inside any views defined in :attr:`provider.views`.
Any :attr:`OAuthError` thrown will be signalled to the API consumer.
:attr:`OAuthError` expects a dictionary as its first argument outlining the
type of error that occured.
:example:
::
raise OAuthError({'error': 'invalid_request'})
The different types of errors are outlined in :rfc:`4.2.2.1` and
:rfc:`5.2`.
"""
class OAuthView(TemplateView):
"""
Base class for any view dealing with the OAuth flow. This class overrides
the dispatch method of :attr:`TemplateView` to add no-caching headers to
every response as outlined in :rfc:`5.1`.
"""
def dispatch(self, request, *args, **kwargs):
response = super(OAuthView, self).dispatch(request, *args, **kwargs)
response['Cache-Control'] = 'no-store'
response['Pragma'] = 'no-cache'
return response
class Mixin(object):
"""
Mixin providing common methods required in the OAuth view defined in
:attr:`provider.views`.
"""
def get_data(self, request, key='params'):
"""
Return stored data from the session store.
:param key: `str` The key under which the data was stored.
"""
return request.session.get('%s:%s' % (constants.SESSION_KEY, key))
def cache_data(self, request, data, key='params'):
"""
Cache data in the session store.
:param request: :attr:`django.http.HttpRequest`
:param data: Arbitrary data to store.
:param key: `str` The key under which to store the data.
"""
request.session['%s:%s' % (constants.SESSION_KEY, key)] = data
def clear_data(self, request):
"""
Clear all OAuth related data from the session store.
"""
for key in request.session.keys():
if key.startswith(constants.SESSION_KEY):
del request.session[key]
def authenticate(self, request):
"""
Authenticate a client against all the backends configured in
:attr:`authentication`.
"""
for backend in self.authentication:
client = backend().authenticate(request)
if client is not None:
return client
return None
class Capture(OAuthView, Mixin):
"""
As stated in section :rfc:`3.1.2.5` this view captures all the request
parameters and redirects to another URL to avoid any leakage of request
parameters to potentially harmful JavaScripts.
This application assumes that whatever web-server is used as front-end will
handle SSL transport.
If you want strict enforcement of secure communication at application
level, set :attr:`settings.OAUTH_ENFORCE_SECURE` to ``True``.
The actual implementation is required to override :meth:`get_redirect_url`.
"""
template_name = 'provider/authorize.html'
def get_redirect_url(self, request):
"""
Return a redirect to a URL where the resource owner (see :rfc:`1`)
authorizes the client (also :rfc:`1`).
:return: :class:`django.http.HttpResponseRedirect`
"""
raise NotImplementedError
def handle(self, request, data):
self.cache_data(request, data)
if constants.ENFORCE_SECURE and not request.is_secure():
return self.render_to_response({'error': 'access_denied',
'error_description': _("A secure connection is required."),
'next': None},
status=400)
return HttpResponseRedirect(self.get_redirect_url(request))
def get(self, request):
return self.handle(request, request.GET)
def post(self, request):
return self.handle(request, request.POST)
class Authorize(OAuthView, Mixin):
"""
View to handle the client authorization as outlined in :rfc:`4`.
Implementation must override a set of methods:
* :attr:`get_redirect_url`
* :attr:`get_request_form`
* :attr:`get_authorization_form`
* :attr:`get_client`
* :attr:`save_authorization`
:attr:`Authorize` renders the ``provider/authorize.html`` template to
display the authorization form.
On successful authorization, it redirects the user back to the defined
client callback as defined in :rfc:`4.1.2`.
On authorization fail :attr:`Authorize` displays an error message to the
user with a modified redirect URL to the callback including the error
and possibly description of the error as defined in :rfc:`4.1.2.1`.
"""
template_name = 'provider/authorize.html'
def get_redirect_url(self, request):
"""
:return: ``str`` - The client URL to display in the template after
authorization succeeded or failed.
"""
raise NotImplementedError
def get_request_form(self, client, data):
"""
Return a form that is capable of validating the request data captured
by the :class:`Capture` view.
The form must accept a keyword argument ``client``.
"""
raise NotImplementedError
def get_authorization_form(self, request, client, data, client_data):
"""
Return a form that is capable of authorizing the client to the resource
owner.
:return: :attr:`django.forms.Form`
"""
raise NotImplementedError
def get_client(self, client_id):
"""
Return a client object from a given client identifier. Return ``None``
if no client is found. An error will be displayed to the resource owner
and presented to the client upon the final redirect.
"""
raise NotImplementedError
def save_authorization(self, request, client, form, client_data):
"""
Save the authorization that the user granted to the client, involving
the creation of a time limited authorization code as outlined in
:rfc:`4.1.2`.
Should return ``None`` in case authorization is not granted.
Should return a string representing the authorization code grant.
:return: ``None``, ``str``
"""
raise NotImplementedError
def _validate_client(self, request, data):
"""
:return: ``tuple`` - ``(client or False, data or error)``
"""
client = self.get_client(data.get('client_id'))
if client is None:
raise OAuthError({
'error': 'unauthorized_client',
'error_description': _("An unauthorized client tried to access"
" your resources.")
})
form = self.get_request_form(client, data)
if not form.is_valid():
raise OAuthError(form.errors)
return client, form.cleaned_data
def error_response(self, request, error, **kwargs):
"""
Return an error to be displayed to the resource owner if anything goes
awry. Errors can include invalid clients, authorization denials and
other edge cases such as a wrong ``redirect_uri`` in the authorization
request.
:param request: :attr:`django.http.HttpRequest`
:param error: ``dict``
The different types of errors are outlined in :rfc:`4.2.2.1`
"""
ctx = {}
ctx.update(error)
# If we got a malicious redirect_uri or client_id, remove all the
# cached data and tell the resource owner. We will *not* redirect back
# to the URL.
if error['error'] in ['redirect_uri', 'unauthorized_client']:
ctx.update(next='/')
return self.render_to_response(ctx, **kwargs)
ctx.update(next=self.get_redirect_url(request))
return self.render_to_response(ctx, **kwargs)
def handle(self, request, post_data=None):
data = self.get_data(request)
if data is None:
return self.error_response(request, {
'error': 'expired_authorization',
'error_description': _('Authorization session has expired.')})
try:
client, data = self._validate_client(request, data)
except OAuthError, e:
return self.error_response(request, e.args[0], status=400)
authorization_form = self.get_authorization_form(request, client,
post_data, data)
if not authorization_form.is_bound or not authorization_form.is_valid():
return self.render_to_response({
'client': client,
'form': authorization_form,
'oauth_data': data, })
code = self.save_authorization(request, client,
authorization_form, data)
# be sure to serialize any objects that aren't natively json
# serializable because these values are stored as session data
self.cache_data(request, data)
self.cache_data(request, code, "code")
self.cache_data(request, client.serialize(), "client")
return HttpResponseRedirect(self.get_redirect_url(request))
def get(self, request):
return self.handle(request, None)
def post(self, request):
return self.handle(request, request.POST)
class Redirect(OAuthView, Mixin):
"""
Redirect the user back to the client with the right query parameters set.
This can be either parameters indicating success or parameters indicating
an error.
"""
def error_response(self, error, mimetype='application/json', status=400,
**kwargs):
"""
Return an error response to the client with default status code of
*400* stating the error as outlined in :rfc:`5.2`.
"""
return HttpResponse(json.dumps(error), mimetype=mimetype,
status=status, **kwargs)
def get(self, request):
data = self.get_data(request)
code = self.get_data(request, "code")
error = self.get_data(request, "error")
client = self.get_data(request, "client")
# client must be properly deserialized to become a valid instance
client = Client.deserialize(client)
# this is an edge case that is caused by making a request with no data
# it should only happen if this view is called manually, out of the
# normal capture-authorize-redirect flow.
if data is None or client is None:
return self.error_response({
'error': 'invalid_data',
'error_description': _('Data has not been captured')})
redirect_uri = data.get('redirect_uri', None) or client.redirect_uri
parsed = urlparse.urlparse(redirect_uri)
query = QueryDict('', mutable=True)
if 'state' in data:
query['state'] = data['state']
if error is not None:
query.update(error)
elif code is None:
query['error'] = 'access_denied'
else:
query['code'] = code
parsed = parsed[:4] + (query.urlencode(), '')
redirect_uri = urlparse.ParseResult(*parsed).geturl()
self.clear_data(request)
return HttpResponseRedirect(redirect_uri)
class AccessToken(OAuthView, Mixin):
"""
:attr:`AccessToken` handles creation and refreshing of access tokens.
Implementations must implement a number of methods:
* :attr:`get_authorization_code_grant`
* :attr:`get_refresh_token_grant`
* :attr:`get_password_grant`
* :attr:`get_access_token`
* :attr:`create_access_token`
* :attr:`create_refresh_token`
* :attr:`invalidate_grant`
* :attr:`invalidate_access_token`
* :attr:`invalidate_refresh_token`
The default implementation supports the grant types defined in
:attr:`grant_types`.
According to :rfc:`4.4.2` this endpoint too must support secure
communication. For strict enforcement of secure communication at
application level set :attr:`settings.OAUTH_ENFORCE_SECURE` to ``True``.
According to :rfc:`3.2` we can only accept POST requests.
Returns with a status code of *400* in case of errors. *200* in case of
success.
"""
authentication = ()
"""
Authentication backends used to authenticate a particular client.
"""
grant_types = ['authorization_code', 'refresh_token', 'password']
"""
The default grant types supported by this view.
"""
def get_authorization_code_grant(self, request, data, client):
"""
Return the grant associated with this request or an error dict.
:return: ``tuple`` - ``(True or False, grant or error_dict)``
"""
raise NotImplementedError
def get_refresh_token_grant(self, request, data, client):
"""
Return the refresh token associated with this request or an error dict.
:return: ``tuple`` - ``(True or False, token or error_dict)``
"""
raise NotImplementedError
def get_password_grant(self, request, data, client):
"""
Return a user associated with this request or an error dict.
:return: ``tuple`` - ``(True or False, user or error_dict)``
"""
raise NotImplementedError
def get_access_token(self, request, user, scope, client):
"""
Override to handle fetching of an existing access token.
:return: ``object`` - Access token
"""
raise NotImplementedError
def create_access_token(self, request, user, scope, client):
"""
Override to handle access token creation.
:return: ``object`` - Access token
"""
raise NotImplementedError
def create_refresh_token(self, request, user, scope, access_token, client):
"""
Override to handle refresh token creation.
:return: ``object`` - Refresh token
"""
raise NotImplementedError
def invalidate_grant(self, grant):
"""
Override to handle grant invalidation. A grant is invalidated right
after creating an access token from it.
:return None:
"""
raise NotImplementedError
def invalidate_refresh_token(self, refresh_token):
"""
Override to handle refresh token invalidation. When requesting a new
access token from a refresh token, the old one is *always* invalidated.
:return None:
"""
raise NotImplementedError
def invalidate_access_token(self, access_token):
"""
Override to handle access token invalidation. When a new access token
is created from a refresh token, the old one is *always* invalidated.
:return None:
"""
raise NotImplementedError
def error_response(self, error, mimetype='application/json', status=400,
**kwargs):
"""
Return an error response to the client with default status code of
*400* stating the error as outlined in :rfc:`5.2`.
"""
return HttpResponse(json.dumps(error), mimetype=mimetype,
status=status, **kwargs)
def access_token_response(self, access_token):
"""
Returns a successful response after creating the access token
as defined in :rfc:`5.1`.
"""
response_data = {
'access_token': access_token.token,
'token_type': constants.TOKEN_TYPE,
'expires_in': access_token.get_expire_delta(),
'scope': ' '.join(scope.names(access_token.scope)),
}
# Not all access_tokens are given a refresh_token
# (for example, public clients doing password auth)
try:
rt = access_token.refresh_token
response_data['refresh_token'] = rt.token
except ObjectDoesNotExist:
pass
return HttpResponse(
json.dumps(response_data), mimetype='application/json'
)
def authorization_code(self, request, data, client):
"""
Handle ``grant_type=authorization_code`` requests as defined in
:rfc:`4.1.3`.
"""
grant = self.get_authorization_code_grant(request, request.POST,
client)
if constants.SINGLE_ACCESS_TOKEN:
at = self.get_access_token(request, grant.user, grant.scope, client)
else:
at = self.create_access_token(request, grant.user, grant.scope, client)
rt = self.create_refresh_token(request, grant.user, grant.scope, at,
client)
self.invalidate_grant(grant)
return self.access_token_response(at)
def refresh_token(self, request, data, client):
"""
Handle ``grant_type=refresh_token`` requests as defined in :rfc:`6`.
"""
rt = self.get_refresh_token_grant(request, data, client)
# this must be called first in case we need to purge expired tokens
self.invalidate_refresh_token(rt)
self.invalidate_access_token(rt.access_token)
at = self.create_access_token(request, rt.user, rt.access_token.scope,
client)
rt = self.create_refresh_token(request, at.user, at.scope, at, client)
return self.access_token_response(at)
def password(self, request, data, client):
"""
Handle ``grant_type=password`` requests as defined in :rfc:`4.3`.
"""
data = self.get_password_grant(request, data, client)
user = data.get('user')
scope = data.get('scope')
if constants.SINGLE_ACCESS_TOKEN:
at = self.get_access_token(request, user, scope, client)
else:
at = self.create_access_token(request, user, scope, client)
# Public clients don't get refresh tokens
if client.client_type != 1:
rt = self.create_refresh_token(request, user, scope, at, client)
return self.access_token_response(at)
def get_handler(self, grant_type):
"""
Return a function or method that is capable handling the ``grant_type``
requested by the client or return ``None`` to indicate that this type
of grant type is not supported, resulting in an error response.
"""
if grant_type == 'authorization_code':
return self.authorization_code
elif grant_type == 'refresh_token':
return self.refresh_token
elif grant_type == 'password':
return self.password
return None
def get(self, request):
"""
As per :rfc:`3.2` the token endpoint *only* supports POST requests.
Returns an error response.
"""
return self.error_response({
'error': 'invalid_request',
'error_description': _("Only POST requests allowed.")})
def post(self, request):
"""
As per :rfc:`3.2` the token endpoint *only* supports POST requests.
"""
if constants.ENFORCE_SECURE and not request.is_secure():
return self.error_response({
'error': 'invalid_request',
'error_description': _("A secure connection is required.")})
if not 'grant_type' in request.POST:
return self.error_response({
'error': 'invalid_request',
'error_description': _("No 'grant_type' included in the "
"request.")})
grant_type = request.POST['grant_type']
if grant_type not in self.grant_types:
return self.error_response({'error': 'unsupported_grant_type'})
client = self.authenticate(request)
if client is None:
return self.error_response({'error': 'invalid_client'})
handler = self.get_handler(grant_type)
try:
return handler(request, request.POST, client)
except OAuthError, e:
return self.error_response(e.args[0])
|
DavisDevelopment/app_engine_demo
|
refs/heads/master
|
lib/flask/testsuite/test_apps/config_package_app/__init__.py
|
1257
|
import os
import flask
here = os.path.abspath(os.path.dirname(__file__))
app = flask.Flask(__name__)
|
jdobes/spacewalk
|
refs/heads/master
|
client/debian/packages-already-in-debian/rhn-client-tools/test/testrhnregGui.py
|
36
|
#!/usr/bin/python
import settestpath
from up2date_client import up2dateErrors
##from up2date_client import config
import unittest
#Import the modules you need to test...
from up2date_client import rhnregGui
from up2date_client.rhnregGui import callAndFilterExceptions
class MyError(Exception):
pass
class TestcallAndFilterExceptions(unittest.TestCase):
def storeMessage(self, message, dummy):
self.message = message
def returnsNumber(self):
return 6
def raiseError(self):
raise MyError("I am a banana!")
def testCallableDoesntRaiseException(self):
value = callAndFilterExceptions(self.returnsNumber, [], "Error",
self.storeMessage)
self.assertEqual(value, 6)
def testCallableRaisesValidException(self):
self.assertRaises(MyError,
callAndFilterExceptions, self.raiseError,
[MyError], "Error",
self.storeMessage)
def testCallableRaisesInvalidException(self):
value = callAndFilterExceptions(self.raiseError, [], "Error3",
self.storeMessage)
self.assertEqual(value, None)
self.assertEqual(self.message, "Error3")
#TODO: things we don't test yet but need to
# authToken timeout (need some server magic)
# verifying that the authTokens we get actually work
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestcallAndFilterExceptions))
return suite
if __name__ == "__main__":
unittest.main(defaultTest="suite")
|
pescobar/easybuild-framework
|
refs/heads/master
|
test/framework/variables.py
|
2
|
# #
# Copyright 2012-2020 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
# #
"""
Unit tests for tools/variables.py.
@author: Kenneth Hoste (Ghent University)
@author: Stijn De Weirdt (Ghent University)
"""
import sys
from test.framework.utilities import EnhancedTestCase, TestLoaderFiltered
from unittest import TextTestRunner
from easybuild.tools.variables import CommaList, StrList, Variables
from easybuild.tools.toolchain.variables import CommandFlagList
class VariablesTest(EnhancedTestCase):
""" Baseclass for easyblock testcases """
def test_variables(self):
class TestVariables(Variables):
MAP_CLASS = {'FOO': CommaList}
v = TestVariables()
self.assertEqual(str(v), "{}")
# DEFAULTCLASS is StrList
v['BAR'] = range(3)
self.assertEqual(str(v), "{'BAR': [[0, 1, 2]]}")
self.assertEqual(str(v['BAR']), "0 1 2")
v['BAR'].append(StrList(range(10, 12)))
self.assertEqual(str(v['BAR']), "0 1 2 10 11")
v.nappend('BAR', 20)
self.assertEqual(str(v['BAR']), "0 1 2 10 11 20")
v.nappend_el('BAR', 30, idx=-2)
self.assertEqual(str(v), "{'BAR': [[0, 1, 2], [10, 11, 30], [20]]}")
self.assertEqual(str(v['BAR']), '0 1 2 10 11 30 20')
v['FOO'] = range(3)
self.assertEqual(str(v['FOO']), "0,1,2")
v['BARSTR'] = 'XYZ'
self.assertEqual(v['BARSTR'].__repr__(), "[['XYZ']]")
v['BARINT'] = 0
self.assertEqual(v['BARINT'].__repr__(), "[[0]]")
v.join('BAR2', 'FOO', 'BARINT')
self.assertEqual(str(v['BAR2']), "0,1,2 0")
self.assertErrorRegex(Exception, 'not found in self', v.join, 'BAZ', 'DOESNOTEXIST')
cmd = CommandFlagList(["gcc", "bar", "baz"])
self.assertEqual(str(cmd), "gcc -bar -baz")
def test_empty_variables(self):
"""Test playing around with empty variables."""
v = Variables()
v.nappend('FOO', [])
self.assertEqual(v['FOO'], [])
v.join('BAR', 'FOO')
self.assertEqual(v['BAR'], [])
v.join('FOOBAR', 'BAR')
self.assertEqual(v['FOOBAR'], [])
def suite():
""" return all the tests"""
return TestLoaderFiltered().loadTestsFromTestCase(VariablesTest, sys.argv[1:])
if __name__ == '__main__':
res = TextTestRunner(verbosity=1).run(suite())
sys.exit(len(res.failures))
|
arximboldi/pigeoncide
|
refs/heads/master
|
src/core/task.py
|
1
|
#
# Copyright (C) 2009 Juan Pedro Bolivar Puente, Alberto Villegas Erce
#
# This file is part of Pigeoncide.
#
# Pigeoncide is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Pigeoncide is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from base.util import *
from weakref import proxy
from error import *
from base.log import *
from base import util
import math
from functools import partial
_log = get_log (__name__)
class TaskError (CoreError):
pass
killed = 0
running = 1
paused = 2
class Task (object):
def __init__ (self, *a, **k):
super (Task, self).__init__ (*a, **k)
self._state = running
self._next = []
self._task_manager = None
def do_update (self, timer):
pass
def do_restart (self):
pass
def add_next (self, task):
self._next.append (task)
return task
def update (self, timer):
if self._state == running:
self.do_update (timer)
return self._state
def pause (self):
if self._state != killed:
self._state = paused
def resume (self):
if self._state != killed:
self._state = running
def toggle_pause (self):
if self._state != killed:
self._state = running if self._state == paused else paused
def restart (self):
self.do_restart ()
self._state = running
def kill (self):
self._state = killed
if self._task_manager:
for task in self._next:
self._task_manager.add (task)
def is_paused (self):
return self._state == paused
def is_killed (self):
return self._state == killed
@property
def state (self):
return self._state
@property
def parent_task (self):
return self._task_manager
def _set_parent (self, manager):
if self._task_manager and manager:
raise TaskError ("Already attached to: " + str (self._task_manager))
self._task_manager = manager
class WrapperTask (Task):
def __init__ (self, wrapped_task = None, *a, **k):
super (WrapperTask, self).__init__ (*a, **k)
self.wrapped_task = wrapped_task
def update (self, timer):
super (WrapperTask, self).update (timer)
self.wrapped_task.update (timer)
def _set_parent (self, manager):
super (WrapperTask, self)._set_parent (manager)
self.wrapped_task._set_parent (manager)
class FuncTask (Task):
def __init__ (self, func = None, *a, **k):
assert func != None
super (FuncTask, self).__init__ (*a, **k)
self._set_func (func)
def _set_func (self, func):
if func.func_code.co_argcount < 1:
self._orig = func
self._func = lambda t: func ()
else:
self._orig = self._func = func
def _get_func (self):
return self._orig
def do_update (self, timer):
super (FuncTask, self).do_update (timer)
action = self._func (timer)
if not action or action == killed:
self.kill ()
elif action == paused:
self.pause ()
func = property (_get_func, _set_func)
def totask (task):
if not isinstance (task, Task):
if not callable (task):
raise TaskError ('You can add either tasks or callables. ' +
str(task))
task = FuncTask (func = task)
return task
class TaskGroup (Task):
auto_kill = True
auto_remove = True
loop = False
def __init__ (self,
tasks = [],
auto_kill = None,
auto_remove = None,
loop = None,
*a, **k):
super (TaskGroup, self).__init__ (*a, **k)
if auto_kill is not None:
self.auto_kill = auto_kill
if auto_remove is not None:
self.auto_remove = auto_remove
if loop is not None:
self.loop = loop
self._tasks = []
for task in tasks:
self.add (task)
def __del__ (self):
self.clear ()
def clear (self):
for t in self._tasks:
t._set_parent (None)
self._tasks = []
def do_update (self, timer):
super (TaskGroup, self).do_update (timer)
for task in self._tasks:
task.update (timer)
if self.auto_remove:
self._tasks = remove_if (Task.is_killed, self._tasks)
all_killed = len (filter (Task.is_killed, self._tasks)) == self.count
if self.auto_kill and all_killed:
self.kill ()
elif self.loop and all_killed:
self.restart ()
def add (self, task):
task = totask (task)
task._set_parent (self)
self._tasks.append (task)
return task
def remove (self, task):
self._tasks.remove (task)
task._set_parent (None)
def find (self, func):
for task in self._tasks:
if isinstance (task, FuncTask) and \
task.func == func:
return task
def restart (self):
super (TaskGroup, self).restart ()
for x in self._tasks:
x.restart ()
@property
def count (self):
return len (self._tasks)
class WaitTask (Task):
duration = 1.
def __init__ (self, duration = None, *a, **k):
super (WaitTask, self).__init__ (*a, **k)
if duration is not None:
self.duration = duration
self.remaining = self.duration
def do_update (self, timer):
super (WaitTask, self).do_update (timer)
self.remaining -= timer.delta
if self.remaining <= 0:
self.kill ()
self.remaining = 0
def do_restart (self):
self.remaining = self.duration
class DelayTask (Task):
duration = 1
def __init__ (self, duration = None, *a, **k):
super (DelayTask, self).__init__ (*a, **k)
if duration is not None:
self.duration = duration
self.remaining = self.duration
def restart_wait (self):
self.remaining = self.duration
def do_update (self, timer):
super (DelayTask, self).do_update (timer)
self.remaining -= 1
if self.remaining < 0:
self.kill ()
self.remaining = 0
class TimerTask (WaitTask):
def do_update (self, timer):
super (TimerTask, self).do_update (timer)
if self.remaining == 0:
self.on_finish ()
else:
self.on_tick ()
def on_tick (self):
pass
def on_finish (self):
pass
class FadeTask (Task):
def __init__ (self,
func = nop,
duration = 1.0,
loop = False,
init = False,
*a, **k):
super (FadeTask, self).__init__ (*a, **k)
self.func = func
self.curr = 0.
self.loop = loop
self.duration = duration
if init:
func (0.0)
def do_update (self, timer):
super (FadeTask, self).do_update (timer)
self.func (self.curr / self.duration)
self.curr += timer.delta / self.duration
if self.curr >= self.duration:
if self.loop:
self.curr -= self.duration
else:
self.curr = self.duration
self.kill ()
self.func (self.curr)
class FuncWaitTask (WaitTask):
def __init__ (self, wait_func = None, *a, **k):
super (FuncWaitTask, self).__init__ (duration = wait_func (), *a, **k)
self.wait_func = wait_func
def do_restart (self):
self.duration = self.wait_func ()
self.remaining = self.duration
def sequence (fst, *tasks):
task = totask (fst)
tasks = map (totask, tasks)
for nxt in tasks:
task = task.add_next (nxt)
return fst
def parallel (*tasks):
return TaskGroup (tasks = tasks,
auto_remove = False)
def loop (*tasks):
return TaskGroup (tasks = tasks,
auto_remove = False,
auto_kill = False,
loop = True)
func_wait = FuncWaitTask
wait = WaitTask
fade = FadeTask
delay = DelayTask
def invfade (f, *a, **k):
return fade (lambda x: f (1.0 - x), *a, **k)
def linear (f, min, max, *a, **k):
return fade (lambda x: f (util.linear (min, max, x)), *a, **k)
def sinusoid (f, min = 0.0, max = 1.0, *a, **k):
return fade (
lambda x: f (min + (max - min) * math.sin (x * math.pi / 2.)), *a, **k)
def run (func):
return FuncTask (lambda t: None if func () else None)
def repeat (task):
task = totask (task)
task.kill = task.restart
return WrapperTask (task)
|
timj/scons
|
refs/heads/master
|
src/engine/SCons/Tool/__init__.py
|
1
|
"""SCons.Tool
SCons tool selection.
This looks for modules that define a callable object that can modify
a construction environment as appropriate for a given tool (or tool
chain).
Note that because this subsystem just *selects* a callable that can
modify a construction environment, it's possible for people to define
their own "tool specification" in an arbitrary callable function. No
one needs to use or tie in to this subsystem in order to roll their own
tool definition.
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import imp
import importlib
import sys
import re
import os
import shutil
import SCons.Builder
import SCons.Errors
import SCons.Node.FS
import SCons.Scanner
import SCons.Scanner.C
import SCons.Scanner.D
import SCons.Scanner.LaTeX
import SCons.Scanner.Prog
import SCons.Scanner.SWIG
import collections
DefaultToolpath=[]
CScanner = SCons.Scanner.C.CScanner()
DScanner = SCons.Scanner.D.DScanner()
LaTeXScanner = SCons.Scanner.LaTeX.LaTeXScanner()
PDFLaTeXScanner = SCons.Scanner.LaTeX.PDFLaTeXScanner()
ProgramScanner = SCons.Scanner.Prog.ProgramScanner()
SourceFileScanner = SCons.Scanner.Base({}, name='SourceFileScanner')
SWIGScanner = SCons.Scanner.SWIG.SWIGScanner()
CSuffixes = [".c", ".C", ".cxx", ".cpp", ".c++", ".cc",
".h", ".H", ".hxx", ".hpp", ".hh",
".F", ".fpp", ".FPP",
".m", ".mm",
".S", ".spp", ".SPP", ".sx"]
DSuffixes = ['.d']
IDLSuffixes = [".idl", ".IDL"]
LaTeXSuffixes = [".tex", ".ltx", ".latex"]
SWIGSuffixes = ['.i']
for suffix in CSuffixes:
SourceFileScanner.add_scanner(suffix, CScanner)
for suffix in DSuffixes:
SourceFileScanner.add_scanner(suffix, DScanner)
for suffix in SWIGSuffixes:
SourceFileScanner.add_scanner(suffix, SWIGScanner)
# FIXME: what should be done here? Two scanners scan the same extensions,
# but look for different files, e.g., "picture.eps" vs. "picture.pdf".
# The builders for DVI and PDF explicitly reference their scanners
# I think that means this is not needed???
for suffix in LaTeXSuffixes:
SourceFileScanner.add_scanner(suffix, LaTeXScanner)
SourceFileScanner.add_scanner(suffix, PDFLaTeXScanner)
class Tool(object):
def __init__(self, name, toolpath=[], **kw):
self.name = name
self.toolpath = toolpath + DefaultToolpath
# remember these so we can merge them into the call
self.init_kw = kw
module = self._tool_module()
self.generate = module.generate
self.exists = module.exists
if hasattr(module, 'options'):
self.options = module.options
def _tool_module(self):
# TODO: Interchange zipimport with normal initialization for better error reporting
oldpythonpath = sys.path
sys.path = self.toolpath + sys.path
if sys.version_info[0] < 3:
# Py 2 code
try:
try:
file, path, desc = imp.find_module(self.name, self.toolpath)
try:
return imp.load_module(self.name, file, path, desc)
finally:
if file:
file.close()
except ImportError as e:
if str(e)!="No module named %s"%self.name:
raise SCons.Errors.EnvironmentError(e)
try:
import zipimport
except ImportError:
pass
else:
for aPath in self.toolpath:
try:
importer = zipimport.zipimporter(aPath)
return importer.load_module(self.name)
except ImportError as e:
pass
finally:
sys.path = oldpythonpath
else:
# Py 3 code
try:
# Try site_tools first
return importlib.import_module(self.name)
except ImportError as e:
# Then try modules in main distribution
try:
return importlib.import_module('SCons.Tool.'+self.name)
except ImportError as e:
if str(e) != "No module named %s" % self.name:
raise SCons.Errors.EnvironmentError(e)
try:
import zipimport
except ImportError:
pass
else:
for aPath in self.toolpath:
try:
importer = zipimport.zipimporter(aPath)
return importer.load_module(self.name)
except ImportError as e:
pass
finally:
sys.path = oldpythonpath
full_name = 'SCons.Tool.' + self.name
try:
return sys.modules[full_name]
except KeyError:
try:
smpath = sys.modules['SCons.Tool'].__path__
try:
file, path, desc = imp.find_module(self.name, smpath)
module = imp.load_module(full_name, file, path, desc)
setattr(SCons.Tool, self.name, module)
if file:
file.close()
return module
except ImportError as e:
if str(e)!="No module named %s"%self.name:
raise SCons.Errors.EnvironmentError(e)
try:
import zipimport
importer = zipimport.zipimporter( sys.modules['SCons.Tool'].__path__[0] )
module = importer.load_module(full_name)
setattr(SCons.Tool, self.name, module)
return module
except ImportError as e:
m = "No tool named '%s': %s" % (self.name, e)
raise SCons.Errors.EnvironmentError(m)
except ImportError as e:
m = "No tool named '%s': %s" % (self.name, e)
raise SCons.Errors.EnvironmentError(m)
def __call__(self, env, *args, **kw):
if self.init_kw is not None:
# Merge call kws into init kws;
# but don't bash self.init_kw.
if kw is not None:
call_kw = kw
kw = self.init_kw.copy()
kw.update(call_kw)
else:
kw = self.init_kw
env.Append(TOOLS = [ self.name ])
if hasattr(self, 'options'):
import SCons.Variables
if 'options' not in env:
from SCons.Script import ARGUMENTS
env['options']=SCons.Variables.Variables(args=ARGUMENTS)
opts=env['options']
self.options(opts)
opts.Update(env)
self.generate(env, *args, **kw)
def __str__(self):
return self.name
##########################################################################
# Create common executable program / library / object builders
def createProgBuilder(env):
"""This is a utility function that creates the Program
Builder in an Environment if it is not there already.
If it is already there, we return the existing one.
"""
try:
program = env['BUILDERS']['Program']
except KeyError:
import SCons.Defaults
program = SCons.Builder.Builder(action = SCons.Defaults.LinkAction,
emitter = '$PROGEMITTER',
prefix = '$PROGPREFIX',
suffix = '$PROGSUFFIX',
src_suffix = '$OBJSUFFIX',
src_builder = 'Object',
target_scanner = ProgramScanner)
env['BUILDERS']['Program'] = program
return program
def createStaticLibBuilder(env):
"""This is a utility function that creates the StaticLibrary
Builder in an Environment if it is not there already.
If it is already there, we return the existing one.
"""
try:
static_lib = env['BUILDERS']['StaticLibrary']
except KeyError:
action_list = [ SCons.Action.Action("$ARCOM", "$ARCOMSTR") ]
if env.Detect('ranlib'):
ranlib_action = SCons.Action.Action("$RANLIBCOM", "$RANLIBCOMSTR")
action_list.append(ranlib_action)
static_lib = SCons.Builder.Builder(action = action_list,
emitter = '$LIBEMITTER',
prefix = '$LIBPREFIX',
suffix = '$LIBSUFFIX',
src_suffix = '$OBJSUFFIX',
src_builder = 'StaticObject')
env['BUILDERS']['StaticLibrary'] = static_lib
env['BUILDERS']['Library'] = static_lib
return static_lib
def _call_linker_cb(env, callback, args, result = None):
"""Returns the result of env['LINKCALLBACKS'][callback](*args)
if env['LINKCALLBACKS'] is a dictionary and env['LINKCALLBACKS'][callback]
is callable. If these conditions are not met, return the value provided as
the *result* argument. This function is mainly used for generating library
info such as versioned suffixes, symlink maps, sonames etc. by delegating
the core job to callbacks configured by current linker tool"""
Verbose = False
if Verbose:
print('_call_linker_cb: args=%r' % args)
print('_call_linker_cb: callback=%r' % callback)
try:
cbfun = env['LINKCALLBACKS'][callback]
except (KeyError, TypeError):
if Verbose:
print('_call_linker_cb: env["LINKCALLBACKS"][%r] not found or can not be used' % callback)
pass
else:
if Verbose:
print('_call_linker_cb: env["LINKCALLBACKS"][%r] found' % callback)
print('_call_linker_cb: env["LINKCALLBACKS"][%r]=%r' % (callback, cbfun))
if(isinstance(cbfun, collections.Callable)):
if Verbose:
print('_call_linker_cb: env["LINKCALLBACKS"][%r] is callable' % callback)
result = cbfun(env, *args)
return result
def _call_env_subst(env, string, *args, **kw):
kw2 = {}
for k in ('raw', 'target', 'source', 'conv', 'executor'):
try: kw2[k] = kw[k]
except KeyError: pass
return env.subst(string, *args, **kw2)
class _ShLibInfoSupport(object):
def get_libtype(self):
return 'ShLib'
def get_lib_prefix(self, env, *args, **kw):
return _call_env_subst(env,'$SHLIBPREFIX', *args, **kw)
def get_lib_suffix(self, env, *args, **kw):
return _call_env_subst(env,'$SHLIBSUFFIX', *args, **kw)
def get_lib_version(self, env, *args, **kw):
return _call_env_subst(env,'$SHLIBVERSION', *args, **kw)
def get_lib_noversionsymlinks(self, env, *args, **kw):
return _call_env_subst(env,'$SHLIBNOVERSIONSYMLINKS', *args, **kw)
class _LdModInfoSupport(object):
def get_libtype(self):
return 'LdMod'
def get_lib_prefix(self, env, *args, **kw):
return _call_env_subst(env,'$LDMODULEPREFIX', *args, **kw)
def get_lib_suffix(self, env, *args, **kw):
return _call_env_subst(env,'$LDMODULESUFFIX', *args, **kw)
def get_lib_version(self, env, *args, **kw):
return _call_env_subst(env,'$LDMODULEVERSION', *args, **kw)
def get_lib_noversionsymlinks(self, env, *args, **kw):
return _call_env_subst(env,'$LDMODULENOVERSIONSYMLINKS', *args, **kw)
class _ImpLibInfoSupport(object):
def get_libtype(self):
return 'ImpLib'
def get_lib_prefix(self, env, *args, **kw):
return _call_env_subst(env,'$IMPLIBPREFIX', *args, **kw)
def get_lib_suffix(self, env, *args, **kw):
return _call_env_subst(env,'$IMPLIBSUFFIX', *args, **kw)
def get_lib_version(self, env, *args, **kw):
version = _call_env_subst(env,'$IMPLIBVERSION', *args, **kw)
if not version:
try: lt = kw['implib_libtype']
except KeyError: pass
else:
if lt == 'ShLib':
version = _call_env_subst(env,'$SHLIBVERSION', *args, **kw)
elif lt == 'LdMod':
version = _call_env_subst(env,'$LDMODULEVERSION', *args, **kw)
return version
def get_lib_noversionsymlinks(self, env, *args, **kw):
disable = None
try: env['IMPLIBNOVERSIONSYMLINKS']
except KeyError:
try: lt = kw['implib_libtype']
except KeyError: pass
else:
if lt == 'ShLib':
disable = _call_env_subst(env,'$SHLIBNOVERSIONSYMLINKS', *args, **kw)
elif lt == 'LdMod':
disable = _call_env_subst(env,'$LDMODULENOVERSIONSYMLINKS', *args, **kw)
else:
disable = _call_env_subst(env,'$IMPLIBNOVERSIONSYMLINKS', *args, **kw)
return disable
class _LibInfoGeneratorBase(object):
"""Generator base class for library-related info such as suffixes for
versioned libraries, symlink maps, sonames etc. It handles commonities
of SharedLibrary and LoadableModule
"""
_support_classes = { 'ShLib' : _ShLibInfoSupport,
'LdMod' : _LdModInfoSupport,
'ImpLib' : _ImpLibInfoSupport }
def __init__(self, libtype, infoname):
self.set_libtype(libtype)
self.set_infoname(infoname)
def set_libtype(self, libtype):
try:
support_class = self._support_classes[libtype]
except KeyError:
raise ValueError('unsupported libtype %r' % libtype)
self._support = support_class()
def get_libtype(self):
return self._support.get_libtype()
def set_infoname(self, infoname):
self.infoname = infoname
def get_infoname(self):
return self.infoname
def get_lib_prefix(self, env, *args, **kw):
return self._support.get_lib_prefix(env,*args,**kw)
def get_lib_suffix(self, env, *args, **kw):
return self._support.get_lib_suffix(env,*args,**kw)
def get_lib_version(self, env, *args, **kw):
return self._support.get_lib_version(env,*args,**kw)
def get_lib_noversionsymlinks(self, env, *args, **kw):
return self._support.get_lib_noversionsymlinks(env,*args,**kw)
# Returns name of generator linker callback that shall be used to generate
# our info for a versioned library. For example, if our libtype is 'ShLib'
# and infoname is 'Prefix', it would return 'VersionedShLibPrefix'.
def get_versioned_lib_info_generator(self, **kw):
try: libtype = kw['generator_libtype']
except KeyError: libtype = self.get_libtype()
infoname = self.get_infoname()
return 'Versioned%s%s' % (libtype, infoname)
def generate_versioned_lib_info(self, env, args, result = None, **kw):
callback = self.get_versioned_lib_info_generator(**kw)
return _call_linker_cb(env, callback, args, result)
class _LibPrefixGenerator(_LibInfoGeneratorBase):
"""Library prefix generator, used as target_prefix in SharedLibrary and
LoadableModule builders"""
def __init__(self, libtype):
super(_LibPrefixGenerator, self).__init__(libtype, 'Prefix')
def __call__(self, env, sources = None, **kw):
Verbose = False
if sources and 'source' not in kw:
kw2 = kw.copy()
kw2['source'] = sources
else:
kw2 = kw
prefix = self.get_lib_prefix(env,**kw2)
if Verbose:
print("_LibPrefixGenerator: input prefix=%r" % prefix)
version = self.get_lib_version(env, **kw2)
if Verbose:
print("_LibPrefixGenerator: version=%r" % version)
if version:
prefix = self.generate_versioned_lib_info(env, [prefix, version], prefix, **kw2)
if Verbose:
print("_LibPrefixGenerator: return prefix=%r" % prefix)
return prefix
ShLibPrefixGenerator = _LibPrefixGenerator('ShLib')
LdModPrefixGenerator = _LibPrefixGenerator('LdMod')
ImpLibPrefixGenerator = _LibPrefixGenerator('ImpLib')
class _LibSuffixGenerator(_LibInfoGeneratorBase):
"""Library suffix generator, used as target_suffix in SharedLibrary and
LoadableModule builders"""
def __init__(self, libtype):
super(_LibSuffixGenerator, self).__init__(libtype, 'Suffix')
def __call__(self, env, sources = None, **kw):
Verbose = False
if sources and 'source' not in kw:
kw2 = kw.copy()
kw2['source'] = sources
else:
kw2 = kw
suffix = self.get_lib_suffix(env, **kw2)
if Verbose:
print("_LibSuffixGenerator: input suffix=%r" % suffix)
version = self.get_lib_version(env, **kw2)
if Verbose:
print("_LibSuffixGenerator: version=%r" % version)
if version:
suffix = self.generate_versioned_lib_info(env, [suffix, version], suffix, **kw2)
if Verbose:
print("_LibSuffixGenerator: return suffix=%r" % suffix)
return suffix
ShLibSuffixGenerator = _LibSuffixGenerator('ShLib')
LdModSuffixGenerator = _LibSuffixGenerator('LdMod')
ImpLibSuffixGenerator = _LibSuffixGenerator('ImpLib')
class _LibSymlinkGenerator(_LibInfoGeneratorBase):
"""Library symlink map generator. It generates a list of symlinks that
should be created by SharedLibrary or LoadableModule builders"""
def __init__(self, libtype):
super(_LibSymlinkGenerator, self).__init__(libtype, 'Symlinks')
def __call__(self, env, libnode, **kw):
Verbose = False
if libnode and 'target' not in kw:
kw2 = kw.copy()
kw2['target'] = libnode
else:
kw2 = kw
if Verbose:
print("_LibSymLinkGenerator: libnode=%r" % libnode.get_path())
symlinks = None
version = self.get_lib_version(env, **kw2)
disable = self.get_lib_noversionsymlinks(env, **kw2)
if Verbose:
print('_LibSymlinkGenerator: version=%r' % version)
print('_LibSymlinkGenerator: disable=%r' % disable)
if version and not disable:
prefix = self.get_lib_prefix(env,**kw2)
suffix = self.get_lib_suffix(env,**kw2)
symlinks = self.generate_versioned_lib_info(env, [libnode, version, prefix, suffix], **kw2)
if Verbose:
print('_LibSymlinkGenerator: return symlinks=%r' % StringizeLibSymlinks(symlinks))
return symlinks
ShLibSymlinkGenerator = _LibSymlinkGenerator('ShLib')
LdModSymlinkGenerator = _LibSymlinkGenerator('LdMod')
ImpLibSymlinkGenerator = _LibSymlinkGenerator('ImpLib')
class _LibNameGenerator(_LibInfoGeneratorBase):
"""Generates "unmangled" library name from a library file node.
Generally, it's thought to revert modifications done by prefix/suffix
generators (_LibPrefixGenerator/_LibSuffixGenerator) used by a library
builder. For example, on gnulink the suffix generator used by SharedLibrary
builder appends $SHLIBVERSION to $SHLIBSUFFIX producing node name which
ends with "$SHLIBSUFFIX.$SHLIBVERSION". Correspondingly, the implementation
of _LibNameGenerator replaces "$SHLIBSUFFIX.$SHLIBVERSION" with
"$SHLIBSUFFIX" in the node's basename. So that, if $SHLIBSUFFIX is ".so",
$SHLIBVERSION is "0.1.2" and the node path is "/foo/bar/libfoo.so.0.1.2",
the _LibNameGenerator shall return "libfoo.so". Other link tools may
implement it's own way of library name unmangling.
"""
def __init__(self, libtype):
super(_LibNameGenerator, self).__init__(libtype, 'Name')
def __call__(self, env, libnode, **kw):
"""Returns "demangled" library name"""
Verbose = False
if libnode and 'target' not in kw:
kw2 = kw.copy()
kw2['target'] = libnode
else:
kw2 = kw
if Verbose:
print("_LibNameGenerator: libnode=%r" % libnode.get_path())
version = self.get_lib_version(env, **kw2)
if Verbose:
print('_LibNameGenerator: version=%r' % version)
name = None
if version:
prefix = self.get_lib_prefix(env,**kw2)
suffix = self.get_lib_suffix(env,**kw2)
name = self.generate_versioned_lib_info(env, [libnode, version, prefix, suffix], **kw2)
if not name:
name = os.path.basename(libnode.get_path())
if Verbose:
print('_LibNameGenerator: return name=%r' % name)
return name
ShLibNameGenerator = _LibNameGenerator('ShLib')
LdModNameGenerator = _LibNameGenerator('LdMod')
ImpLibNameGenerator = _LibNameGenerator('ImpLib')
class _LibSonameGenerator(_LibInfoGeneratorBase):
"""Library soname generator. Returns library soname (e.g. libfoo.so.0) for
a given node (e.g. /foo/bar/libfoo.so.0.1.2)"""
def __init__(self, libtype):
super(_LibSonameGenerator, self).__init__(libtype, 'Soname')
def __call__(self, env, libnode, **kw):
"""Returns a SONAME based on a shared library's node path"""
Verbose = False
if libnode and 'target' not in kw:
kw2 = kw.copy()
kw2['target'] = libnode
else:
kw2 = kw
if Verbose:
print("_LibSonameGenerator: libnode=%r" % libnode.get_path())
soname = _call_env_subst(env, '$SONAME', **kw2)
if not soname:
version = self.get_lib_version(env,**kw2)
if Verbose:
print("_LibSonameGenerator: version=%r" % version)
if version:
prefix = self.get_lib_prefix(env,**kw2)
suffix = self.get_lib_suffix(env,**kw2)
soname = self.generate_versioned_lib_info(env, [libnode, version, prefix, suffix], **kw2)
if not soname:
# fallback to library name (as returned by appropriate _LibNameGenerator)
soname = _LibNameGenerator(self.get_libtype())(env, libnode)
if Verbose:
print("_LibSonameGenerator: FALLBACK: soname=%r" % soname)
if Verbose:
print("_LibSonameGenerator: return soname=%r" % soname)
return soname
ShLibSonameGenerator = _LibSonameGenerator('ShLib')
LdModSonameGenerator = _LibSonameGenerator('LdMod')
def StringizeLibSymlinks(symlinks):
"""Converts list with pairs of nodes to list with pairs of node paths
(strings). Used mainly for debugging."""
if SCons.Util.is_List(symlinks):
try:
return [ (k.get_path(), v.get_path()) for k,v in symlinks ]
except (TypeError, ValueError):
return symlinks
else:
return symlinks
def EmitLibSymlinks(env, symlinks, libnode, **kw):
"""Used by emitters to handle (shared/versioned) library symlinks"""
Verbose = False
# nodes involved in process... all symlinks + library
nodes = list(set([ x for x,y in symlinks ] + [libnode]))
clean_targets = kw.get('clean_targets', [])
if not SCons.Util.is_List(clean_targets):
clean_targets = [ clean_targets ]
for link, linktgt in symlinks:
env.SideEffect(link, linktgt)
if(Verbose):
print("EmitLibSymlinks: SideEffect(%r,%r)" % (link.get_path(), linktgt.get_path()))
clean_list = [x for x in nodes if x != linktgt]
env.Clean(list(set([linktgt] + clean_targets)), clean_list)
if(Verbose):
print("EmitLibSymlinks: Clean(%r,%r)" % (linktgt.get_path(), [x.get_path() for x in clean_list]))
def CreateLibSymlinks(env, symlinks):
"""Physically creates symlinks. The symlinks argument must be a list in
form [ (link, linktarget), ... ], where link and linktarget are SCons
nodes.
"""
Verbose = False
for link, linktgt in symlinks:
linktgt = link.get_dir().rel_path(linktgt)
link = link.get_path()
if(Verbose):
print("CreateLibSymlinks: preparing to add symlink %r -> %r" % (link, linktgt))
# Delete the (previously created) symlink if exists. Let only symlinks
# to be deleted to prevent accidental deletion of source files...
if env.fs.islink(link):
env.fs.unlink(link)
if(Verbose):
print("CreateLibSymlinks: removed old symlink %r" % link)
# If a file or directory exists with the same name as link, an OSError
# will be thrown, which should be enough, I think.
env.fs.symlink(linktgt, link)
if(Verbose):
print("CreateLibSymlinks: add symlink %r -> %r" % (link, linktgt))
return 0
def LibSymlinksActionFunction(target, source, env):
for tgt in target:
symlinks = getattr(getattr(tgt,'attributes', None), 'shliblinks', None)
if symlinks:
CreateLibSymlinks(env, symlinks)
return 0
def LibSymlinksStrFun(target, source, env, *args):
cmd = None
for tgt in target:
symlinks = getattr(getattr(tgt,'attributes', None), 'shliblinks', None)
if symlinks:
if cmd is None: cmd = ""
if cmd: cmd += "\n"
cmd += "Create symlinks for: %r" % tgt.get_path()
try:
linkstr = ', '.join([ "%r->%r" %(k,v) for k,v in StringizeLibSymlinks(symlinks)])
except (KeyError, ValueError):
pass
else:
cmd += ": %s" % linkstr
return cmd
LibSymlinksAction = SCons.Action.Action(LibSymlinksActionFunction, LibSymlinksStrFun)
def createSharedLibBuilder(env):
"""This is a utility function that creates the SharedLibrary
Builder in an Environment if it is not there already.
If it is already there, we return the existing one.
"""
try:
shared_lib = env['BUILDERS']['SharedLibrary']
except KeyError:
import SCons.Defaults
action_list = [ SCons.Defaults.SharedCheck,
SCons.Defaults.ShLinkAction,
LibSymlinksAction ]
shared_lib = SCons.Builder.Builder(action = action_list,
emitter = "$SHLIBEMITTER",
prefix = ShLibPrefixGenerator,
suffix = ShLibSuffixGenerator,
target_scanner = ProgramScanner,
src_suffix = '$SHOBJSUFFIX',
src_builder = 'SharedObject')
env['BUILDERS']['SharedLibrary'] = shared_lib
return shared_lib
def createLoadableModuleBuilder(env):
"""This is a utility function that creates the LoadableModule
Builder in an Environment if it is not there already.
If it is already there, we return the existing one.
"""
try:
ld_module = env['BUILDERS']['LoadableModule']
except KeyError:
import SCons.Defaults
action_list = [ SCons.Defaults.SharedCheck,
SCons.Defaults.LdModuleLinkAction,
LibSymlinksAction ]
ld_module = SCons.Builder.Builder(action = action_list,
emitter = "$LDMODULEEMITTER",
prefix = LdModPrefixGenerator,
suffix = LdModSuffixGenerator,
target_scanner = ProgramScanner,
src_suffix = '$SHOBJSUFFIX',
src_builder = 'SharedObject')
env['BUILDERS']['LoadableModule'] = ld_module
return ld_module
def createObjBuilders(env):
"""This is a utility function that creates the StaticObject
and SharedObject Builders in an Environment if they
are not there already.
If they are there already, we return the existing ones.
This is a separate function because soooo many Tools
use this functionality.
The return is a 2-tuple of (StaticObject, SharedObject)
"""
try:
static_obj = env['BUILDERS']['StaticObject']
except KeyError:
static_obj = SCons.Builder.Builder(action = {},
emitter = {},
prefix = '$OBJPREFIX',
suffix = '$OBJSUFFIX',
src_builder = ['CFile', 'CXXFile'],
source_scanner = SourceFileScanner,
single_source = 1)
env['BUILDERS']['StaticObject'] = static_obj
env['BUILDERS']['Object'] = static_obj
try:
shared_obj = env['BUILDERS']['SharedObject']
except KeyError:
shared_obj = SCons.Builder.Builder(action = {},
emitter = {},
prefix = '$SHOBJPREFIX',
suffix = '$SHOBJSUFFIX',
src_builder = ['CFile', 'CXXFile'],
source_scanner = SourceFileScanner,
single_source = 1)
env['BUILDERS']['SharedObject'] = shared_obj
return (static_obj, shared_obj)
def createCFileBuilders(env):
"""This is a utility function that creates the CFile/CXXFile
Builders in an Environment if they
are not there already.
If they are there already, we return the existing ones.
This is a separate function because soooo many Tools
use this functionality.
The return is a 2-tuple of (CFile, CXXFile)
"""
try:
c_file = env['BUILDERS']['CFile']
except KeyError:
c_file = SCons.Builder.Builder(action = {},
emitter = {},
suffix = {None:'$CFILESUFFIX'})
env['BUILDERS']['CFile'] = c_file
env.SetDefault(CFILESUFFIX = '.c')
try:
cxx_file = env['BUILDERS']['CXXFile']
except KeyError:
cxx_file = SCons.Builder.Builder(action = {},
emitter = {},
suffix = {None:'$CXXFILESUFFIX'})
env['BUILDERS']['CXXFile'] = cxx_file
env.SetDefault(CXXFILESUFFIX = '.cc')
return (c_file, cxx_file)
##########################################################################
# Create common Java builders
def CreateJarBuilder(env):
try:
java_jar = env['BUILDERS']['Jar']
except KeyError:
fs = SCons.Node.FS.get_default_fs()
jar_com = SCons.Action.Action('$JARCOM', '$JARCOMSTR')
java_jar = SCons.Builder.Builder(action = jar_com,
suffix = '$JARSUFFIX',
src_suffix = '$JAVACLASSSUFIX',
src_builder = 'JavaClassFile',
source_factory = fs.Entry)
env['BUILDERS']['Jar'] = java_jar
return java_jar
def CreateJavaHBuilder(env):
try:
java_javah = env['BUILDERS']['JavaH']
except KeyError:
fs = SCons.Node.FS.get_default_fs()
java_javah_com = SCons.Action.Action('$JAVAHCOM', '$JAVAHCOMSTR')
java_javah = SCons.Builder.Builder(action = java_javah_com,
src_suffix = '$JAVACLASSSUFFIX',
target_factory = fs.Entry,
source_factory = fs.File,
src_builder = 'JavaClassFile')
env['BUILDERS']['JavaH'] = java_javah
return java_javah
def CreateJavaClassFileBuilder(env):
try:
java_class_file = env['BUILDERS']['JavaClassFile']
except KeyError:
fs = SCons.Node.FS.get_default_fs()
javac_com = SCons.Action.Action('$JAVACCOM', '$JAVACCOMSTR')
java_class_file = SCons.Builder.Builder(action = javac_com,
emitter = {},
#suffix = '$JAVACLASSSUFFIX',
src_suffix = '$JAVASUFFIX',
src_builder = ['JavaFile'],
target_factory = fs.Entry,
source_factory = fs.File)
env['BUILDERS']['JavaClassFile'] = java_class_file
return java_class_file
def CreateJavaClassDirBuilder(env):
try:
java_class_dir = env['BUILDERS']['JavaClassDir']
except KeyError:
fs = SCons.Node.FS.get_default_fs()
javac_com = SCons.Action.Action('$JAVACCOM', '$JAVACCOMSTR')
java_class_dir = SCons.Builder.Builder(action = javac_com,
emitter = {},
target_factory = fs.Dir,
source_factory = fs.Dir)
env['BUILDERS']['JavaClassDir'] = java_class_dir
return java_class_dir
def CreateJavaFileBuilder(env):
try:
java_file = env['BUILDERS']['JavaFile']
except KeyError:
java_file = SCons.Builder.Builder(action = {},
emitter = {},
suffix = {None:'$JAVASUFFIX'})
env['BUILDERS']['JavaFile'] = java_file
env['JAVASUFFIX'] = '.java'
return java_file
class ToolInitializerMethod(object):
"""
This is added to a construction environment in place of a
method(s) normally called for a Builder (env.Object, env.StaticObject,
etc.). When called, it has its associated ToolInitializer
object search the specified list of tools and apply the first
one that exists to the construction environment. It then calls
whatever builder was (presumably) added to the construction
environment in place of this particular instance.
"""
def __init__(self, name, initializer):
"""
Note: we store the tool name as __name__ so it can be used by
the class that attaches this to a construction environment.
"""
self.__name__ = name
self.initializer = initializer
def get_builder(self, env):
"""
Returns the appropriate real Builder for this method name
after having the associated ToolInitializer object apply
the appropriate Tool module.
"""
builder = getattr(env, self.__name__)
self.initializer.apply_tools(env)
builder = getattr(env, self.__name__)
if builder is self:
# There was no Builder added, which means no valid Tool
# for this name was found (or possibly there's a mismatch
# between the name we were called by and the Builder name
# added by the Tool module).
return None
self.initializer.remove_methods(env)
return builder
def __call__(self, env, *args, **kw):
"""
"""
builder = self.get_builder(env)
if builder is None:
return [], []
return builder(*args, **kw)
class ToolInitializer(object):
"""
A class for delayed initialization of Tools modules.
Instances of this class associate a list of Tool modules with
a list of Builder method names that will be added by those Tool
modules. As part of instantiating this object for a particular
construction environment, we also add the appropriate
ToolInitializerMethod objects for the various Builder methods
that we want to use to delay Tool searches until necessary.
"""
def __init__(self, env, tools, names):
if not SCons.Util.is_List(tools):
tools = [tools]
if not SCons.Util.is_List(names):
names = [names]
self.env = env
self.tools = tools
self.names = names
self.methods = {}
for name in names:
method = ToolInitializerMethod(name, self)
self.methods[name] = method
env.AddMethod(method)
def remove_methods(self, env):
"""
Removes the methods that were added by the tool initialization
so we no longer copy and re-bind them when the construction
environment gets cloned.
"""
for method in list(self.methods.values()):
env.RemoveMethod(method)
def apply_tools(self, env):
"""
Searches the list of associated Tool modules for one that
exists, and applies that to the construction environment.
"""
for t in self.tools:
tool = SCons.Tool.Tool(t)
if tool.exists(env):
env.Tool(tool)
return
# If we fall through here, there was no tool module found.
# This is where we can put an informative error message
# about the inability to find the tool. We'll start doing
# this as we cut over more pre-defined Builder+Tools to use
# the ToolInitializer class.
def Initializers(env):
ToolInitializer(env, ['install'], ['_InternalInstall', '_InternalInstallAs', '_InternalInstallVersionedLib'])
def Install(self, *args, **kw):
return self._InternalInstall(*args, **kw)
def InstallAs(self, *args, **kw):
return self._InternalInstallAs(*args, **kw)
def InstallVersionedLib(self, *args, **kw):
return self._InternalInstallVersionedLib(*args, **kw)
env.AddMethod(Install)
env.AddMethod(InstallAs)
env.AddMethod(InstallVersionedLib)
def FindTool(tools, env):
for tool in tools:
t = Tool(tool)
if t.exists(env):
return tool
return None
def FindAllTools(tools, env):
def ToolExists(tool, env=env):
return Tool(tool).exists(env)
return list(filter (ToolExists, tools))
def tool_list(platform, env):
other_plat_tools=[]
# XXX this logic about what tool to prefer on which platform
# should be moved into either the platform files or
# the tool files themselves.
# The search orders here are described in the man page. If you
# change these search orders, update the man page as well.
if str(platform) == 'win32':
"prefer Microsoft tools on Windows"
linkers = ['mslink', 'gnulink', 'ilink', 'linkloc', 'ilink32' ]
c_compilers = ['msvc', 'mingw', 'gcc', 'intelc', 'icl', 'icc', 'cc', 'bcc32' ]
cxx_compilers = ['msvc', 'intelc', 'icc', 'g++', 'c++', 'bcc32' ]
assemblers = ['masm', 'nasm', 'gas', '386asm' ]
fortran_compilers = ['gfortran', 'g77', 'ifl', 'cvf', 'f95', 'f90', 'fortran']
ars = ['mslib', 'ar', 'tlib']
other_plat_tools = ['msvs', 'midl']
elif str(platform) == 'os2':
"prefer IBM tools on OS/2"
linkers = ['ilink', 'gnulink', ]#'mslink']
c_compilers = ['icc', 'gcc',]# 'msvc', 'cc']
cxx_compilers = ['icc', 'g++',]# 'msvc', 'c++']
assemblers = ['nasm',]# 'masm', 'gas']
fortran_compilers = ['ifl', 'g77']
ars = ['ar',]# 'mslib']
elif str(platform) == 'irix':
"prefer MIPSPro on IRIX"
linkers = ['sgilink', 'gnulink']
c_compilers = ['sgicc', 'gcc', 'cc']
cxx_compilers = ['sgic++', 'g++', 'c++']
assemblers = ['as', 'gas']
fortran_compilers = ['f95', 'f90', 'f77', 'g77', 'fortran']
ars = ['sgiar']
elif str(platform) == 'sunos':
"prefer Forte tools on SunOS"
linkers = ['sunlink', 'gnulink']
c_compilers = ['suncc', 'gcc', 'cc']
cxx_compilers = ['sunc++', 'g++', 'c++']
assemblers = ['as', 'gas']
fortran_compilers = ['sunf95', 'sunf90', 'sunf77', 'f95', 'f90', 'f77',
'gfortran', 'g77', 'fortran']
ars = ['sunar']
elif str(platform) == 'hpux':
"prefer aCC tools on HP-UX"
linkers = ['hplink', 'gnulink']
c_compilers = ['hpcc', 'gcc', 'cc']
cxx_compilers = ['hpc++', 'g++', 'c++']
assemblers = ['as', 'gas']
fortran_compilers = ['f95', 'f90', 'f77', 'g77', 'fortran']
ars = ['ar']
elif str(platform) == 'aix':
"prefer AIX Visual Age tools on AIX"
linkers = ['aixlink', 'gnulink']
c_compilers = ['aixcc', 'gcc', 'cc']
cxx_compilers = ['aixc++', 'g++', 'c++']
assemblers = ['as', 'gas']
fortran_compilers = ['f95', 'f90', 'aixf77', 'g77', 'fortran']
ars = ['ar']
elif str(platform) == 'darwin':
"prefer GNU tools on Mac OS X, except for some linkers and IBM tools"
linkers = ['applelink', 'gnulink']
c_compilers = ['gcc', 'cc']
cxx_compilers = ['g++', 'c++']
assemblers = ['as']
fortran_compilers = ['gfortran', 'f95', 'f90', 'g77']
ars = ['ar']
elif str(platform) == 'cygwin':
"prefer GNU tools on Cygwin, except for a platform-specific linker"
linkers = ['cyglink', 'mslink', 'ilink']
c_compilers = ['gcc', 'msvc', 'intelc', 'icc', 'cc']
cxx_compilers = ['g++', 'msvc', 'intelc', 'icc', 'c++']
assemblers = ['gas', 'nasm', 'masm']
fortran_compilers = ['gfortran', 'g77', 'ifort', 'ifl', 'f95', 'f90', 'f77']
ars = ['ar', 'mslib']
else:
"prefer GNU tools on all other platforms"
linkers = ['gnulink', 'mslink', 'ilink']
c_compilers = ['gcc', 'msvc', 'intelc', 'icc', 'cc']
cxx_compilers = ['g++', 'msvc', 'intelc', 'icc', 'c++']
assemblers = ['gas', 'nasm', 'masm']
fortran_compilers = ['gfortran', 'g77', 'ifort', 'ifl', 'f95', 'f90', 'f77']
ars = ['ar', 'mslib']
if not str(platform) == 'win32':
other_plat_tools += ['m4', 'rpm']
c_compiler = FindTool(c_compilers, env) or c_compilers[0]
# XXX this logic about what tool provides what should somehow be
# moved into the tool files themselves.
if c_compiler and c_compiler == 'mingw':
# MinGW contains a linker, C compiler, C++ compiler,
# Fortran compiler, archiver and assembler:
cxx_compiler = None
linker = None
assembler = None
fortran_compiler = None
ar = None
else:
# Don't use g++ if the C compiler has built-in C++ support:
if c_compiler in ('msvc', 'intelc', 'icc'):
cxx_compiler = None
else:
cxx_compiler = FindTool(cxx_compilers, env) or cxx_compilers[0]
linker = FindTool(linkers, env) or linkers[0]
assembler = FindTool(assemblers, env) or assemblers[0]
fortran_compiler = FindTool(fortran_compilers, env) or fortran_compilers[0]
ar = FindTool(ars, env) or ars[0]
d_compilers = ['dmd', 'gdc', 'ldc']
d_compiler = FindTool(d_compilers, env) or d_compilers[0]
other_tools = FindAllTools(other_plat_tools + [
#TODO: merge 'install' into 'filesystem' and
# make 'filesystem' the default
'filesystem',
'wix', #'midl', 'msvs',
# Parser generators
'lex', 'yacc',
# Foreign function interface
'rpcgen', 'swig',
# Java
'jar', 'javac', 'javah', 'rmic',
# TeX
'dvipdf', 'dvips', 'gs',
'tex', 'latex', 'pdflatex', 'pdftex',
# Archivers
'tar', 'zip',
# SourceCode factories
'BitKeeper', 'CVS', 'Perforce',
'RCS', 'SCCS', # 'Subversion',
], env)
tools = ([linker, c_compiler, cxx_compiler,
fortran_compiler, assembler, ar, d_compiler]
+ other_tools)
return [x for x in tools if x]
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
sabi0/intellij-community
|
refs/heads/master
|
python/testData/resolve/multiFile/fromImportSubModuleDunderAll/pkg1/__init__.py
|
148
|
__all__ = ['m1']
|
jgeewax/googlepersonfinder
|
refs/heads/master
|
app/multiview.py
|
1
|
#!/usr/bin/python2.5
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from model import *
from utils import *
import prefix
import pfif
import reveal
import sys
# Fields to show for side-by-side comparison.
# Alas, we cannot use pfif.PFIF_1_2.fields exactly, because we store
# home_postal_code as home_zip in the datastore.
COMPARE_FIELDS = pfif.PFIF_1_2.fields['person'][:] # Make a copy.
COMPARE_FIELDS[COMPARE_FIELDS.index('home_postal_code')] = 'home_zip'
class MultiView(Handler):
def get(self):
# To handle multiple persons, We create a single object where each property
# is a list of values, one for each person. This makes page rendering
# easier.
person = dict([(prop, []) for prop in COMPARE_FIELDS])
any = dict([(prop, None) for prop in COMPARE_FIELDS])
# Get all persons from db.
# TODO: Can later optimize to use fewer DB calls.
for num in range(1,10):
id = self.request.get('id%d' % num)
if not id:
break
p = Person.get_by_person_record_id(id)
for prop in COMPARE_FIELDS:
val = getattr(p, prop)
person[prop].append(val)
any[prop] = any[prop] or val
# Check if private info should be revealed.
content_id = 'multiview:' + ','.join(person['person_record_id'])
reveal_url = reveal.make_reveal_url(self.request.url, content_id)
show_private_info = reveal.verify(content_id, self.params.signature)
# TODO: Handle no persons found.
# Add a calculated full name property - used in the title.
person['full_name'] = [fname + ' ' + lname for fname, lname in
zip(person['first_name'], person['last_name']) ]
standalone = self.request.get('standalone')
# Note: we're not showing notes and linked persons information here at the
# moment.
self.render('templates/multiview.html', params=self.params,
person=person, any=any, standalone=standalone,
cols=len(person['first_name']) + 1,
onload_function="view_page_loaded()", markdup=True,
show_private_info=show_private_info, reveal_url=reveal_url)
def post(self):
if not self.params.text:
return self.render('templates/error.html',
message=_('Message is required. Please go back and try again.'))
if not self.params.author_name:
return self.render('templates/error.html',
message=_('Your name is required in the "About you" section. Please go back and try again.'))
# TODO: To reduce possible abuse, we currently limit to 3 person
# match. We could guard using e.g. an XSRF token, which I don't know how
# to build in GAE.
ids = set()
for ind in range(1,4):
id = getattr(self.params, 'id%d' % ind)
if not id:
break
ids.add(id)
if len(ids) > 1:
notes = []
for person_id in ids:
for other_id in ids - set([person_id]):
note = Note(
person_record_id=person_id,
linked_person_record_id=other_id,
text=self.params.text,
author_name=self.params.author_name,
author_phone=self.params.author_phone,
author_email=self.params.author_email,
source_date=datetime.now())
notes.append(note)
db.put(notes)
self.redirect('/view', id=self.params.id1)
if __name__ == '__main__':
run([('/multiview', MultiView)], debug=False)
|
drednout/letspython
|
refs/heads/master
|
lesson5/epic_datetime_bug.py
|
1
|
import datetime
def get_midnight_datetime(dt=datetime.datetime.now()):
return datetime.datetime(dt.year, dt.month, dt.day)
if __name__ == "__main__":
print(get_midnight_datetime())
|
OshynSong/scikit-learn
|
refs/heads/master
|
sklearn/tests/test_random_projection.py
|
79
|
from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.metrics import euclidean_distances
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import gaussian_random_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.random_projection import SparseRandomProjection
from sklearn.random_projection import GaussianRandomProjection
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils import DataDimensionalityWarning
all_sparse_random_matrix = [sparse_random_matrix]
all_dense_random_matrix = [gaussian_random_matrix]
all_random_matrix = set(all_sparse_random_matrix + all_dense_random_matrix)
all_SparseRandomProjection = [SparseRandomProjection]
all_DenseRandomProjection = [GaussianRandomProjection]
all_RandomProjection = set(all_SparseRandomProjection +
all_DenseRandomProjection)
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros):
rng = np.random.RandomState(0)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def densify(matrix):
if not sp.issparse(matrix):
return matrix
else:
return matrix.toarray()
n_samples, n_features = (10, 1000)
n_nonzeros = int(n_samples * n_features / 100.)
data, data_csr = make_sparse_random_data(n_samples, n_features, n_nonzeros)
###############################################################################
# test on JL lemma
###############################################################################
def test_invalid_jl_domain():
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 1.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 0.0)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, -0.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 0, 0.5)
def test_input_size_jl_min_dim():
assert_raises(ValueError, johnson_lindenstrauss_min_dim,
3 * [100], 2 * [0.9])
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 3 * [100],
2 * [0.9])
johnson_lindenstrauss_min_dim(np.random.randint(1, 10, size=(10, 10)),
0.5 * np.ones((10, 10)))
###############################################################################
# tests random matrix generation
###############################################################################
def check_input_size_random_matrix(random_matrix):
assert_raises(ValueError, random_matrix, 0, 0)
assert_raises(ValueError, random_matrix, -1, 1)
assert_raises(ValueError, random_matrix, 1, -1)
assert_raises(ValueError, random_matrix, 1, 0)
assert_raises(ValueError, random_matrix, -1, 0)
def check_size_generated(random_matrix):
assert_equal(random_matrix(1, 5).shape, (1, 5))
assert_equal(random_matrix(5, 1).shape, (5, 1))
assert_equal(random_matrix(5, 5).shape, (5, 5))
assert_equal(random_matrix(1, 1).shape, (1, 1))
def check_zero_mean_and_unit_norm(random_matrix):
# All random matrix should produce a transformation matrix
# with zero mean and unit norm for each columns
A = densify(random_matrix(10000, 1, random_state=0))
assert_array_almost_equal(0, np.mean(A), 3)
assert_array_almost_equal(1.0, np.linalg.norm(A), 1)
def check_input_with_sparse_random_matrix(random_matrix):
n_components, n_features = 5, 10
for density in [-1., 0.0, 1.1]:
assert_raises(ValueError,
random_matrix, n_components, n_features, density=density)
def test_basic_property_of_random_matrix():
# Check basic properties of random matrix generation
for random_matrix in all_random_matrix:
yield check_input_size_random_matrix, random_matrix
yield check_size_generated, random_matrix
yield check_zero_mean_and_unit_norm, random_matrix
for random_matrix in all_sparse_random_matrix:
yield check_input_with_sparse_random_matrix, random_matrix
random_matrix_dense = \
lambda n_components, n_features, random_state: random_matrix(
n_components, n_features, random_state=random_state,
density=1.0)
yield check_zero_mean_and_unit_norm, random_matrix_dense
def test_gaussian_random_matrix():
# Check some statical properties of Gaussian random matrix
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
# a_ij ~ N(0.0, 1 / n_components).
#
n_components = 100
n_features = 1000
A = gaussian_random_matrix(n_components, n_features, random_state=0)
assert_array_almost_equal(0.0, np.mean(A), 2)
assert_array_almost_equal(np.var(A, ddof=1), 1 / n_components, 1)
def test_sparse_random_matrix():
# Check some statical properties of sparse random matrix
n_components = 100
n_features = 500
for density in [0.3, 1.]:
s = 1 / density
A = sparse_random_matrix(n_components,
n_features,
density=density,
random_state=0)
A = densify(A)
# Check possible values
values = np.unique(A)
assert_in(np.sqrt(s) / np.sqrt(n_components), values)
assert_in(- np.sqrt(s) / np.sqrt(n_components), values)
if density == 1.0:
assert_equal(np.size(values), 2)
else:
assert_in(0., values)
assert_equal(np.size(values), 3)
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
#
# - -sqrt(s) / sqrt(n_components) with probability 1 / 2s
# - 0 with probability 1 - 1 / s
# - +sqrt(s) / sqrt(n_components) with probability 1 / 2s
#
assert_almost_equal(np.mean(A == 0.0),
1 - 1 / s, decimal=2)
assert_almost_equal(np.mean(A == np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.mean(A == - np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == 0.0, ddof=1),
(1 - 1 / s) * 1 / s, decimal=2)
assert_almost_equal(np.var(A == np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == - np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
###############################################################################
# tests on random projection transformer
###############################################################################
def test_sparse_random_projection_transformer_invalid_density():
for RandomProjection in all_SparseRandomProjection:
assert_raises(ValueError,
RandomProjection(density=1.1).fit, data)
assert_raises(ValueError,
RandomProjection(density=0).fit, data)
assert_raises(ValueError,
RandomProjection(density=-0.1).fit, data)
def test_random_projection_transformer_invalid_input():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').fit, [[0, 1, 2]])
assert_raises(ValueError,
RandomProjection(n_components=-10).fit, data)
def test_try_to_transform_before_fit():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').transform, data)
def test_too_many_samples_to_find_a_safe_embedding():
data, _ = make_sparse_random_data(1000, 100, 1000)
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=0.1)
expected_msg = (
'eps=0.100000 and n_samples=1000 lead to a target dimension'
' of 5920 which is larger than the original space with'
' n_features=100')
assert_raise_message(ValueError, expected_msg, rp.fit, data)
def test_random_projection_embedding_quality():
data, _ = make_sparse_random_data(8, 5000, 15000)
eps = 0.2
original_distances = euclidean_distances(data, squared=True)
original_distances = original_distances.ravel()
non_identical = original_distances != 0.0
# remove 0 distances to avoid division by 0
original_distances = original_distances[non_identical]
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=eps, random_state=0)
projected = rp.fit_transform(data)
projected_distances = euclidean_distances(projected, squared=True)
projected_distances = projected_distances.ravel()
# remove 0 distances to avoid division by 0
projected_distances = projected_distances[non_identical]
distances_ratio = projected_distances / original_distances
# check that the automatically tuned values for the density respect the
# contract for eps: pairwise distances are preserved according to the
# Johnson-Lindenstrauss lemma
assert_less(distances_ratio.max(), 1 + eps)
assert_less(1 - eps, distances_ratio.min())
def test_SparseRandomProjection_output_representation():
for SparseRandomProjection in all_SparseRandomProjection:
# when using sparse input, the projected data can be forced to be a
# dense numpy array
rp = SparseRandomProjection(n_components=10, dense_output=True,
random_state=0)
rp.fit(data)
assert isinstance(rp.transform(data), np.ndarray)
sparse_data = sp.csr_matrix(data)
assert isinstance(rp.transform(sparse_data), np.ndarray)
# the output can be left to a sparse matrix instead
rp = SparseRandomProjection(n_components=10, dense_output=False,
random_state=0)
rp = rp.fit(data)
# output for dense input will stay dense:
assert isinstance(rp.transform(data), np.ndarray)
# output for sparse output will be sparse:
assert sp.issparse(rp.transform(sparse_data))
def test_correct_RandomProjection_dimensions_embedding():
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto',
random_state=0,
eps=0.5).fit(data)
# the number of components is adjusted from the shape of the training
# set
assert_equal(rp.n_components, 'auto')
assert_equal(rp.n_components_, 110)
if RandomProjection in all_SparseRandomProjection:
assert_equal(rp.density, 'auto')
assert_almost_equal(rp.density_, 0.03, 2)
assert_equal(rp.components_.shape, (110, n_features))
projected_1 = rp.transform(data)
assert_equal(projected_1.shape, (n_samples, 110))
# once the RP is 'fitted' the projection is always the same
projected_2 = rp.transform(data)
assert_array_equal(projected_1, projected_2)
# fit transform with same random seed will lead to the same results
rp2 = RandomProjection(random_state=0, eps=0.5)
projected_3 = rp2.fit_transform(data)
assert_array_equal(projected_1, projected_3)
# Try to transform with an input X of size different from fitted.
assert_raises(ValueError, rp.transform, data[:, 1:5])
# it is also possible to fix the number of components and the density
# level
if RandomProjection in all_SparseRandomProjection:
rp = RandomProjection(n_components=100, density=0.001,
random_state=0)
projected = rp.fit_transform(data)
assert_equal(projected.shape, (n_samples, 100))
assert_equal(rp.components_.shape, (100, n_features))
assert_less(rp.components_.nnz, 115) # close to 1% density
assert_less(85, rp.components_.nnz) # close to 1% density
def test_warning_n_components_greater_than_n_features():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
assert_warns(DataDimensionalityWarning,
RandomProjection(n_components=n_features + 1).fit, data)
def test_works_with_sparse_data():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
rp_dense = RandomProjection(n_components=3,
random_state=1).fit(data)
rp_sparse = RandomProjection(n_components=3,
random_state=1).fit(sp.csr_matrix(data))
assert_array_almost_equal(densify(rp_dense.components_),
densify(rp_sparse.components_))
|
RAtechntukan/Sick-Beard
|
refs/heads/development
|
sickbeard/webserve.py
|
4
|
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import os.path
import time
import urllib
import re
import threading
import datetime
import random
import locale
from Cheetah.Template import Template
import cherrypy.lib
import sickbeard
from sickbeard import config, sab
from sickbeard import clients
from sickbeard import history, notifiers, processTV
from sickbeard import ui
from sickbeard import logger, helpers, exceptions, classes, db
from sickbeard import encodingKludge as ek
from sickbeard import search_queue
from sickbeard import image_cache
from sickbeard import scene_exceptions
from sickbeard import naming
from sickbeard import subtitles
from sickbeard.providers import newznab
from sickbeard.common import Quality, Overview, statusStrings
from sickbeard.common import SNATCHED, SKIPPED, UNAIRED, IGNORED, ARCHIVED, WANTED
from sickbeard.exceptions import ex
from sickbeard.webapi import Api
from lib.tvdb_api import tvdb_api
from lib.dateutil import tz
import network_timezones
import subliminal
try:
import json
except ImportError:
from lib import simplejson as json
try:
import xml.etree.cElementTree as etree
except ImportError:
import xml.etree.ElementTree as etree
from sickbeard import browser
class PageTemplate (Template):
def __init__(self, *args, **KWs):
KWs['file'] = os.path.join(sickbeard.PROG_DIR, "data/interfaces/default/", KWs['file'])
super(PageTemplate, self).__init__(*args, **KWs)
self.sbRoot = sickbeard.WEB_ROOT
self.sbHttpPort = sickbeard.WEB_PORT
self.sbHttpsPort = sickbeard.WEB_PORT
self.sbHttpsEnabled = sickbeard.ENABLE_HTTPS
if cherrypy.request.headers['Host'][0] == '[':
self.sbHost = re.match("^\[.*\]", cherrypy.request.headers['Host'], re.X|re.M|re.S).group(0)
else:
self.sbHost = re.match("^[^:]+", cherrypy.request.headers['Host'], re.X|re.M|re.S).group(0)
self.projectHomePage = "http://code.google.com/p/sickbeard/"
if sickbeard.NZBS and sickbeard.NZBS_UID and sickbeard.NZBS_HASH:
logger.log(u"NZBs.org has been replaced, please check the config to configure the new provider!", logger.ERROR)
ui.notifications.error("NZBs.org Config Update", "NZBs.org has a new site. Please <a href=\""+sickbeard.WEB_ROOT+"/config/providers\">update your config</a> with the api key from <a href=\"http://nzbs.org/login\">http://nzbs.org</a> and then disable the old NZBs.org provider.")
if "X-Forwarded-Host" in cherrypy.request.headers:
self.sbHost = cherrypy.request.headers['X-Forwarded-Host']
if "X-Forwarded-Port" in cherrypy.request.headers:
self.sbHttpPort = cherrypy.request.headers['X-Forwarded-Port']
self.sbHttpsPort = self.sbHttpPort
if "X-Forwarded-Proto" in cherrypy.request.headers:
self.sbHttpsEnabled = True if cherrypy.request.headers['X-Forwarded-Proto'] == 'https' else False
logPageTitle = 'Logs & Errors'
if len(classes.ErrorViewer.errors):
logPageTitle += ' ('+str(len(classes.ErrorViewer.errors))+')'
self.logPageTitle = logPageTitle
self.sbPID = str(sickbeard.PID)
self.menu = [
{ 'title': 'Home', 'key': 'home' },
{ 'title': 'Coming Episodes', 'key': 'comingEpisodes' },
{ 'title': 'History', 'key': 'history' },
{ 'title': 'Manage', 'key': 'manage' },
{ 'title': 'Config', 'key': 'config' },
{ 'title': logPageTitle, 'key': 'errorlogs' },
]
def redirect(abspath, *args, **KWs):
assert abspath[0] == '/'
raise cherrypy.HTTPRedirect(sickbeard.WEB_ROOT + abspath, *args, **KWs)
class TVDBWebUI:
def __init__(self, config, log=None):
self.config = config
self.log = log
def selectSeries(self, allSeries):
searchList = ",".join([x['id'] for x in allSeries])
showDirList = ""
for curShowDir in self.config['_showDir']:
showDirList += "showDir="+curShowDir+"&"
redirect("/home/addShows/addShow?" + showDirList + "seriesList=" + searchList)
def _munge(string):
return unicode(string).encode('utf-8', 'xmlcharrefreplace')
def _genericMessage(subject, message):
t = PageTemplate(file="genericMessage.tmpl")
t.submenu = HomeMenu()
t.subject = subject
t.message = message
return _munge(t)
def _getEpisode(show, season, episode):
if show == None or season == None or episode == None:
return "Invalid parameters"
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
return "Show not in show list"
epObj = showObj.getEpisode(int(season), int(episode))
if epObj == None:
return "Episode couldn't be retrieved"
return epObj
ManageMenu = [
{ 'title': 'Backlog Overview', 'path': 'manage/backlogOverview' },
{ 'title': 'Manage Searches', 'path': 'manage/manageSearches' },
{ 'title': 'Episode Status Management', 'path': 'manage/episodeStatuses' },
{ 'title': 'Manage Missed Subtitles', 'path': 'manage/subtitleMissed' },
]
if sickbeard.USE_SUBTITLES:
ManageMenu.append({ 'title': 'Missed Subtitle Management', 'path': 'manage/subtitleMissed' })
class ManageSearches:
@cherrypy.expose
def index(self):
t = PageTemplate(file="manage_manageSearches.tmpl")
#t.backlogPI = sickbeard.backlogSearchScheduler.action.getProgressIndicator()
t.backlogPaused = sickbeard.searchQueueScheduler.action.is_backlog_paused() #@UndefinedVariable
t.backlogRunning = sickbeard.searchQueueScheduler.action.is_backlog_in_progress() #@UndefinedVariable
t.searchStatus = sickbeard.currentSearchScheduler.action.amActive #@UndefinedVariable
t.submenu = ManageMenu
return _munge(t)
@cherrypy.expose
def forceSearch(self):
# force it to run the next time it looks
result = sickbeard.currentSearchScheduler.forceRun()
if result:
logger.log(u"Search forced")
ui.notifications.message('Episode search started',
'Note: RSS feeds may not be updated if retrieved recently')
redirect("/manage/manageSearches")
@cherrypy.expose
def pauseBacklog(self, paused=None):
if paused == "1":
sickbeard.searchQueueScheduler.action.pause_backlog() #@UndefinedVariable
else:
sickbeard.searchQueueScheduler.action.unpause_backlog() #@UndefinedVariable
redirect("/manage/manageSearches")
@cherrypy.expose
def forceVersionCheck(self):
# force a check to see if there is a new version
result = sickbeard.versionCheckScheduler.action.check_for_new_version(force=True) #@UndefinedVariable
if result:
logger.log(u"Forcing version check")
redirect("/manage/manageSearches")
class Manage:
manageSearches = ManageSearches()
@cherrypy.expose
def index(self):
t = PageTemplate(file="manage.tmpl")
t.submenu = ManageMenu
return _munge(t)
@cherrypy.expose
def showEpisodeStatuses(self, tvdb_id, whichStatus):
myDB = db.DBConnection()
status_list = [int(whichStatus)]
if status_list[0] == SNATCHED:
status_list = Quality.SNATCHED + Quality.SNATCHED_PROPER + Quality.SNATCHED_FRENCH
cur_show_results = myDB.select("SELECT season, episode, name FROM tv_episodes WHERE showid = ? AND season != 0 AND status IN ("+','.join(['?']*len(status_list))+")", [int(tvdb_id)] + status_list)
result = {}
for cur_result in cur_show_results:
cur_season = int(cur_result["season"])
cur_episode = int(cur_result["episode"])
if cur_season not in result:
result[cur_season] = {}
result[cur_season][cur_episode] = cur_result["name"]
return json.dumps(result)
@cherrypy.expose
def episodeStatuses(self, whichStatus=None):
if whichStatus:
whichStatus = int(whichStatus)
status_list = [whichStatus]
if status_list[0] == SNATCHED:
status_list = Quality.SNATCHED + Quality.SNATCHED_PROPER + Quality.SNATCHED_FRENCH
else:
status_list = []
t = PageTemplate(file="manage_episodeStatuses.tmpl")
t.submenu = ManageMenu
t.whichStatus = whichStatus
# if we have no status then this is as far as we need to go
if not status_list:
return _munge(t)
myDB = db.DBConnection()
status_results = myDB.select("SELECT show_name, tv_shows.tvdb_id as tvdb_id FROM tv_episodes, tv_shows WHERE tv_episodes.status IN ("+','.join(['?']*len(status_list))+") AND season != 0 AND tv_episodes.showid = tv_shows.tvdb_id ORDER BY show_name", status_list)
ep_counts = {}
show_names = {}
sorted_show_ids = []
for cur_status_result in status_results:
cur_tvdb_id = int(cur_status_result["tvdb_id"])
if cur_tvdb_id not in ep_counts:
ep_counts[cur_tvdb_id] = 1
else:
ep_counts[cur_tvdb_id] += 1
show_names[cur_tvdb_id] = cur_status_result["show_name"]
if cur_tvdb_id not in sorted_show_ids:
sorted_show_ids.append(cur_tvdb_id)
t.show_names = show_names
t.ep_counts = ep_counts
t.sorted_show_ids = sorted_show_ids
return _munge(t)
@cherrypy.expose
def changeEpisodeStatuses(self, oldStatus, newStatus, *args, **kwargs):
status_list = [int(oldStatus)]
if status_list[0] == SNATCHED:
status_list = Quality.SNATCHED + Quality.SNATCHED_PROPER + Quality.SNATCHED_FRENCH
to_change = {}
# make a list of all shows and their associated args
for arg in kwargs:
tvdb_id, what = arg.split('-')
# we don't care about unchecked checkboxes
if kwargs[arg] != 'on':
continue
if tvdb_id not in to_change:
to_change[tvdb_id] = []
to_change[tvdb_id].append(what)
myDB = db.DBConnection()
for cur_tvdb_id in to_change:
# get a list of all the eps we want to change if they just said "all"
if 'all' in to_change[cur_tvdb_id]:
all_eps_results = myDB.select("SELECT season, episode FROM tv_episodes WHERE status IN ("+','.join(['?']*len(status_list))+") AND season != 0 AND showid = ?", status_list + [cur_tvdb_id])
all_eps = [str(x["season"])+'x'+str(x["episode"]) for x in all_eps_results]
to_change[cur_tvdb_id] = all_eps
Home().setStatus(cur_tvdb_id, '|'.join(to_change[cur_tvdb_id]), newStatus, direct=True)
redirect('/manage/episodeStatuses')
@cherrypy.expose
def showSubtitleMissed(self, tvdb_id, whichSubs):
myDB = db.DBConnection()
cur_show_results = myDB.select("SELECT season, episode, name, subtitles FROM tv_episodes WHERE showid = ? AND season != 0 AND status LIKE '%4'", [int(tvdb_id)])
result = {}
for cur_result in cur_show_results:
if whichSubs == 'all':
if len(set(cur_result["subtitles"].split(',')).intersection(set(subtitles.wantedLanguages()))) >= len(subtitles.wantedLanguages()):
continue
elif whichSubs in cur_result["subtitles"].split(','):
continue
cur_season = int(cur_result["season"])
cur_episode = int(cur_result["episode"])
if cur_season not in result:
result[cur_season] = {}
if cur_episode not in result[cur_season]:
result[cur_season][cur_episode] = {}
result[cur_season][cur_episode]["name"] = cur_result["name"]
result[cur_season][cur_episode]["subtitles"] = ",".join(subliminal.language.Language(subtitle).alpha2 for subtitle in cur_result["subtitles"].split(',')) if not cur_result["subtitles"] == '' else ''
return json.dumps(result)
@cherrypy.expose
def subtitleMissed(self, whichSubs=None):
t = PageTemplate(file="manage_subtitleMissed.tmpl")
t.submenu = ManageMenu
t.whichSubs = whichSubs
if not whichSubs:
return _munge(t)
myDB = db.DBConnection()
status_results = myDB.select("SELECT show_name, tv_shows.tvdb_id as tvdb_id, tv_episodes.subtitles subtitles FROM tv_episodes, tv_shows WHERE tv_shows.subtitles = 1 AND tv_episodes.status LIKE '%4' AND tv_episodes.season != 0 AND tv_episodes.showid = tv_shows.tvdb_id ORDER BY show_name")
ep_counts = {}
show_names = {}
sorted_show_ids = []
for cur_status_result in status_results:
if whichSubs == 'all':
if len(set(cur_status_result["subtitles"].split(',')).intersection(set(subtitles.wantedLanguages()))) >= len(subtitles.wantedLanguages()):
continue
elif whichSubs in cur_status_result["subtitles"].split(','):
continue
cur_tvdb_id = int(cur_status_result["tvdb_id"])
if cur_tvdb_id not in ep_counts:
ep_counts[cur_tvdb_id] = 1
else:
ep_counts[cur_tvdb_id] += 1
show_names[cur_tvdb_id] = cur_status_result["show_name"]
if cur_tvdb_id not in sorted_show_ids:
sorted_show_ids.append(cur_tvdb_id)
t.show_names = show_names
t.ep_counts = ep_counts
t.sorted_show_ids = sorted_show_ids
return _munge(t)
@cherrypy.expose
def downloadSubtitleMissed(self, *args, **kwargs):
to_download = {}
# make a list of all shows and their associated args
for arg in kwargs:
tvdb_id, what = arg.split('-')
# we don't care about unchecked checkboxes
if kwargs[arg] != 'on':
continue
if tvdb_id not in to_download:
to_download[tvdb_id] = []
to_download[tvdb_id].append(what)
for cur_tvdb_id in to_download:
# get a list of all the eps we want to download subtitles if they just said "all"
if 'all' in to_download[cur_tvdb_id]:
myDB = db.DBConnection()
all_eps_results = myDB.select("SELECT season, episode FROM tv_episodes WHERE status LIKE '%4' AND season != 0 AND showid = ?", [cur_tvdb_id])
to_download[cur_tvdb_id] = [str(x["season"])+'x'+str(x["episode"]) for x in all_eps_results]
for epResult in to_download[cur_tvdb_id]:
season, episode = epResult.split('x');
show = sickbeard.helpers.findCertainShow(sickbeard.showList, int(cur_tvdb_id))
subtitles = show.getEpisode(int(season), int(episode)).downloadSubtitles()
redirect('/manage/subtitleMissed')
@cherrypy.expose
def backlogShow(self, tvdb_id):
show_obj = helpers.findCertainShow(sickbeard.showList, int(tvdb_id))
if show_obj:
sickbeard.backlogSearchScheduler.action.searchBacklog([show_obj]) #@UndefinedVariable
redirect("/manage/backlogOverview")
@cherrypy.expose
def backlogOverview(self):
t = PageTemplate(file="manage_backlogOverview.tmpl")
t.submenu = ManageMenu
myDB = db.DBConnection()
showCounts = {}
showCats = {}
showSQLResults = {}
for curShow in sickbeard.showList:
epCounts = {}
epCats = {}
epCounts[Overview.SKIPPED] = 0
epCounts[Overview.WANTED] = 0
epCounts[Overview.QUAL] = 0
epCounts[Overview.GOOD] = 0
epCounts[Overview.UNAIRED] = 0
epCounts[Overview.SNATCHED] = 0
sqlResults = myDB.select("SELECT * FROM tv_episodes WHERE showid = ? ORDER BY season DESC, episode DESC", [curShow.tvdbid])
for curResult in sqlResults:
curEpCat = curShow.getOverview(int(curResult["status"]))
epCats[str(curResult["season"]) + "x" + str(curResult["episode"])] = curEpCat
epCounts[curEpCat] += 1
showCounts[curShow.tvdbid] = epCounts
showCats[curShow.tvdbid] = epCats
showSQLResults[curShow.tvdbid] = sqlResults
t.showCounts = showCounts
t.showCats = showCats
t.showSQLResults = showSQLResults
return _munge(t)
@cherrypy.expose
def massEdit(self, toEdit=None):
t = PageTemplate(file="manage_massEdit.tmpl")
t.submenu = ManageMenu
if not toEdit:
redirect("/manage")
showIDs = toEdit.split("|")
showList = []
for curID in showIDs:
curID = int(curID)
showObj = helpers.findCertainShow(sickbeard.showList, curID)
if showObj:
showList.append(showObj)
flatten_folders_all_same = True
last_flatten_folders = None
paused_all_same = True
last_paused = None
frenched_all_same = True
last_frenched = None
quality_all_same = True
last_quality = None
subtitles_all_same = True
last_subtitles = None
lang_all_same = True
last_lang_metadata= None
lang_audio_all_same = True
last_lang_audio = None
root_dir_list = []
for curShow in showList:
cur_root_dir = ek.ek(os.path.dirname, curShow._location)
if cur_root_dir not in root_dir_list:
root_dir_list.append(cur_root_dir)
# if we know they're not all the same then no point even bothering
if paused_all_same:
# if we had a value already and this value is different then they're not all the same
if last_paused not in (curShow.paused, None):
paused_all_same = False
else:
last_paused = curShow.paused
if frenched_all_same:
# if we had a value already and this value is different then they're not all the same
if last_frenched not in (curShow.frenchsearch, None):
frenched_all_same = False
else:
last_frenched = curShow.frenchsearch
if flatten_folders_all_same:
if last_flatten_folders not in (None, curShow.flatten_folders):
flatten_folders_all_same = False
else:
last_flatten_folders = curShow.flatten_folders
if quality_all_same:
if last_quality not in (None, curShow.quality):
quality_all_same = False
else:
last_quality = curShow.quality
if subtitles_all_same:
if last_subtitles not in (None, curShow.subtitles):
subtitles_all_same = False
else:
last_subtitles = curShow.subtitles
if lang_all_same:
if last_lang_metadata not in (None, curShow.lang):
lang_all_same = False
else:
last_lang_metadata = curShow.lang
if lang_audio_all_same:
if last_lang_audio not in (None, curShow.audio_lang):
lang_audio_all_same = False
else:
last_lang_audio = curShow.audio_lang
t.showList = toEdit
t.paused_value = last_paused if paused_all_same else None
t.frenched_value = last_frenched if frenched_all_same else None
t.flatten_folders_value = last_flatten_folders if flatten_folders_all_same else None
t.quality_value = last_quality if quality_all_same else None
t.subtitles_value = last_subtitles if subtitles_all_same else None
t.root_dir_list = root_dir_list
t.lang_value = last_lang_metadata if lang_all_same else None
t.audio_value = last_lang_audio if lang_audio_all_same else None
return _munge(t)
@cherrypy.expose
def massEditSubmit(self, paused=None, frenched=None, flatten_folders=None, quality_preset=False, subtitles=None,
anyQualities=[], bestQualities=[], tvdbLang=None, audioLang = None, toEdit=None, *args, **kwargs):
dir_map = {}
for cur_arg in kwargs:
if not cur_arg.startswith('orig_root_dir_'):
continue
which_index = cur_arg.replace('orig_root_dir_', '')
end_dir = kwargs['new_root_dir_'+which_index]
dir_map[kwargs[cur_arg]] = end_dir
showIDs = toEdit.split("|")
errors = []
for curShow in showIDs:
curErrors = []
showObj = helpers.findCertainShow(sickbeard.showList, int(curShow))
if not showObj:
continue
cur_root_dir = ek.ek(os.path.dirname, showObj._location)
cur_show_dir = ek.ek(os.path.basename, showObj._location)
if cur_root_dir in dir_map and cur_root_dir != dir_map[cur_root_dir]:
new_show_dir = ek.ek(os.path.join, dir_map[cur_root_dir], cur_show_dir)
logger.log(u"For show "+showObj.name+" changing dir from "+showObj._location+" to "+new_show_dir)
else:
new_show_dir = showObj._location
if paused == 'keep':
new_paused = showObj.paused
else:
new_paused = True if paused == 'enable' else False
new_paused = 'on' if new_paused else 'off'
if frenched == 'keep':
new_frenched = showObj.frenchsearch
else:
new_frenched = True if frenched == 'enable' else False
new_frenched = 'on' if new_frenched else 'off'
if flatten_folders == 'keep':
new_flatten_folders = showObj.flatten_folders
else:
new_flatten_folders = True if flatten_folders == 'enable' else False
new_flatten_folders = 'on' if new_flatten_folders else 'off'
if subtitles == 'keep':
new_subtitles = showObj.subtitles
else:
new_subtitles = True if subtitles == 'enable' else False
new_subtitles = 'on' if new_subtitles else 'off'
if quality_preset == 'keep':
anyQualities, bestQualities = Quality.splitQuality(showObj.quality)
if tvdbLang == 'None':
new_lang = 'en'
else:
new_lang = tvdbLang
if audioLang == 'keep':
new_audio_lang = showObj.audio_lang;
else:
new_audio_lang = audioLang
exceptions_list = []
curErrors += Home().editShow(curShow, new_show_dir, anyQualities, bestQualities, exceptions_list, new_flatten_folders, new_paused, new_frenched, subtitles=new_subtitles, tvdbLang=new_lang, audio_lang=new_audio_lang, directCall=True)
if curErrors:
logger.log(u"Errors: "+str(curErrors), logger.ERROR)
errors.append('<b>%s:</b>\n<ul>' % showObj.name + ' '.join(['<li>%s</li>' % error for error in curErrors]) + "</ul>")
if len(errors) > 0:
ui.notifications.error('%d error%s while saving changes:' % (len(errors), "" if len(errors) == 1 else "s"),
" ".join(errors))
redirect("/manage")
@cherrypy.expose
def massUpdate(self, toUpdate=None, toRefresh=None, toRename=None, toDelete=None, toMetadata=None, toSubtitle=None):
if toUpdate != None:
toUpdate = toUpdate.split('|')
else:
toUpdate = []
if toRefresh != None:
toRefresh = toRefresh.split('|')
else:
toRefresh = []
if toRename != None:
toRename = toRename.split('|')
else:
toRename = []
if toSubtitle != None:
toSubtitle = toSubtitle.split('|')
else:
toSubtitle = []
if toDelete != None:
toDelete = toDelete.split('|')
else:
toDelete = []
if toMetadata != None:
toMetadata = toMetadata.split('|')
else:
toMetadata = []
errors = []
refreshes = []
updates = []
renames = []
subtitles = []
for curShowID in set(toUpdate+toRefresh+toRename+toSubtitle+toDelete+toMetadata):
if curShowID == '':
continue
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(curShowID))
if showObj == None:
continue
if curShowID in toDelete:
showObj.deleteShow()
# don't do anything else if it's being deleted
continue
if curShowID in toUpdate:
try:
sickbeard.showQueueScheduler.action.updateShow(showObj, True) #@UndefinedVariable
updates.append(showObj.name)
except exceptions.CantUpdateException, e:
errors.append("Unable to update show "+showObj.name+": "+ex(e))
# don't bother refreshing shows that were updated anyway
if curShowID in toRefresh and curShowID not in toUpdate:
try:
sickbeard.showQueueScheduler.action.refreshShow(showObj) #@UndefinedVariable
refreshes.append(showObj.name)
except exceptions.CantRefreshException, e:
errors.append("Unable to refresh show "+showObj.name+": "+ex(e))
if curShowID in toRename:
sickbeard.showQueueScheduler.action.renameShowEpisodes(showObj) #@UndefinedVariable
renames.append(showObj.name)
if curShowID in toSubtitle:
sickbeard.showQueueScheduler.action.downloadSubtitles(showObj) #@UndefinedVariable
subtitles.append(showObj.name)
if len(errors) > 0:
ui.notifications.error("Errors encountered",
'<br >\n'.join(errors))
messageDetail = ""
if len(updates) > 0:
messageDetail += "<br /><b>Updates</b><br /><ul><li>"
messageDetail += "</li><li>".join(updates)
messageDetail += "</li></ul>"
if len(refreshes) > 0:
messageDetail += "<br /><b>Refreshes</b><br /><ul><li>"
messageDetail += "</li><li>".join(refreshes)
messageDetail += "</li></ul>"
if len(renames) > 0:
messageDetail += "<br /><b>Renames</b><br /><ul><li>"
messageDetail += "</li><li>".join(renames)
messageDetail += "</li></ul>"
if len(subtitles) > 0:
messageDetail += "<br /><b>Subtitles</b><br /><ul><li>"
messageDetail += "</li><li>".join(subtitles)
messageDetail += "</li></ul>"
if len(updates+refreshes+renames+subtitles) > 0:
ui.notifications.message("The following actions were queued:",
messageDetail)
redirect("/manage")
class History:
@cherrypy.expose
def index(self, limit=100):
myDB = db.DBConnection()
# sqlResults = myDB.select("SELECT h.*, show_name, name FROM history h, tv_shows s, tv_episodes e WHERE h.showid=s.tvdb_id AND h.showid=e.showid AND h.season=e.season AND h.episode=e.episode ORDER BY date DESC LIMIT "+str(numPerPage*(p-1))+", "+str(numPerPage))
if limit == "0":
sqlResults = myDB.select("SELECT h.*, show_name FROM history h, tv_shows s WHERE h.showid=s.tvdb_id ORDER BY date DESC")
else:
sqlResults = myDB.select("SELECT h.*, show_name FROM history h, tv_shows s WHERE h.showid=s.tvdb_id ORDER BY date DESC LIMIT ?", [limit])
t = PageTemplate(file="history.tmpl")
t.historyResults = sqlResults
t.limit = limit
t.submenu = [
{ 'title': 'Clear History', 'path': 'history/clearHistory' },
{ 'title': 'Trim History', 'path': 'history/trimHistory' },
{ 'title': 'Trunc Episode Links', 'path': 'history/truncEplinks' },
{ 'title': 'Trunc Episode List Processed', 'path': 'history/truncEpListProc' },
]
return _munge(t)
@cherrypy.expose
def clearHistory(self):
myDB = db.DBConnection()
myDB.action("DELETE FROM history WHERE 1=1")
ui.notifications.message('History cleared')
redirect("/history")
@cherrypy.expose
def trimHistory(self):
myDB = db.DBConnection()
myDB.action("DELETE FROM history WHERE date < "+str((datetime.datetime.today()-datetime.timedelta(days=30)).strftime(history.dateFormat)))
ui.notifications.message('Removed history entries greater than 30 days old')
redirect("/history")
@cherrypy.expose
def truncEplinks(self):
myDB = db.DBConnection()
nbep=myDB.select("SELECT count(*) from episode_links")
myDB.action("DELETE FROM episode_links WHERE 1=1")
messnum = str(nbep[0][0]) + ' history links deleted'
ui.notifications.message('All Episode Links Removed', messnum)
redirect("/history")
@cherrypy.expose
def truncEpListProc(self):
myDB = db.DBConnection()
nbep=myDB.select("SELECT count(*) from processed_files")
myDB.action("DELETE FROM processed_files WHERE 1=1")
messnum = str(nbep[0][0]) + ' record for file processed delete'
ui.notifications.message('Clear list of file processed', messnum)
redirect("/history")
ConfigMenu = [
{ 'title': 'General', 'path': 'config/general/' },
{ 'title': 'Search Settings', 'path': 'config/search/' },
{ 'title': 'Search Providers', 'path': 'config/providers/' },
{ 'title': 'Subtitles Settings','path': 'config/subtitles/' },
{ 'title': 'Post Processing', 'path': 'config/postProcessing/' },
{ 'title': 'Notifications', 'path': 'config/notifications/' },
]
class ConfigGeneral:
@cherrypy.expose
def index(self):
t = PageTemplate(file="config_general.tmpl")
t.submenu = ConfigMenu
return _munge(t)
@cherrypy.expose
def saveRootDirs(self, rootDirString=None):
sickbeard.ROOT_DIRS = rootDirString
sickbeard.save_config()
@cherrypy.expose
def saveAddShowDefaults(self, defaultFlattenFolders, defaultStatus, anyQualities, bestQualities, audio_lang, subtitles=None):
if anyQualities:
anyQualities = anyQualities.split(',')
else:
anyQualities = []
if bestQualities:
bestQualities = bestQualities.split(',')
else:
bestQualities = []
newQuality = Quality.combineQualities(map(int, anyQualities), map(int, bestQualities))
sickbeard.STATUS_DEFAULT = int(defaultStatus)
sickbeard.QUALITY_DEFAULT = int(newQuality)
sickbeard.AUDIO_SHOW_DEFAULT = str(audio_lang)
if defaultFlattenFolders == "true":
defaultFlattenFolders = 1
else:
defaultFlattenFolders = 0
sickbeard.FLATTEN_FOLDERS_DEFAULT = int(defaultFlattenFolders)
if subtitles == "true":
subtitles = 1
else:
subtitles = 0
sickbeard.SUBTITLES_DEFAULT = int(subtitles)
sickbeard.save_config()
@cherrypy.expose
def generateKey(self):
""" Return a new randomized API_KEY
"""
try:
from hashlib import md5
except ImportError:
from md5 import md5
# Create some values to seed md5
t = str(time.time())
r = str(random.random())
# Create the md5 instance and give it the current time
m = md5(t)
# Update the md5 instance with the random variable
m.update(r)
# Return a hex digest of the md5, eg 49f68a5c8493ec2c0bf489821c21fc3b
logger.log(u"New API generated")
return m.hexdigest()
@cherrypy.expose
def saveGeneral(self, log_dir=None, web_port=None, web_log=None, web_ipv6=None,
update_shows_on_start=None,launch_browser=None, web_username=None, use_api=None, api_key=None,
web_password=None, version_notify=None, enable_https=None, https_cert=None, https_key=None, sort_article=None, french_column=None):
results = []
if web_ipv6 == "on":
web_ipv6 = 1
else:
web_ipv6 = 0
if web_log == "on":
web_log = 1
else:
web_log = 0
if launch_browser == "on":
launch_browser = 1
else:
launch_browser = 0
if update_shows_on_start == "on":
update_shows_on_start = 1
else:
update_shows_on_start = 0
if sort_article == "on":
sort_article = 1
else:
sort_article = 0
if french_column == "on":
french_column = 1
else:
french_column= 0
if version_notify == "on":
version_notify = 1
else:
version_notify = 0
if not config.change_LOG_DIR(log_dir):
results += ["Unable to create directory " + os.path.normpath(log_dir) + ", log dir not changed."]
sickbeard.UPDATE_SHOWS_ON_START = update_shows_on_start
sickbeard.LAUNCH_BROWSER = launch_browser
sickbeard.SORT_ARTICLE = sort_article
sickbeard.FRENCH_COLUMN = french_column
sickbeard.WEB_PORT = int(web_port)
sickbeard.WEB_IPV6 = web_ipv6
sickbeard.WEB_LOG = web_log
sickbeard.WEB_USERNAME = web_username
sickbeard.WEB_PASSWORD = web_password
if use_api == "on":
use_api = 1
else:
use_api = 0
sickbeard.USE_API = use_api
sickbeard.API_KEY = api_key
if enable_https == "on":
enable_https = 1
else:
enable_https = 0
sickbeard.ENABLE_HTTPS = enable_https
if not config.change_HTTPS_CERT(https_cert):
results += ["Unable to create directory " + os.path.normpath(https_cert) + ", https cert dir not changed."]
if not config.change_HTTPS_KEY(https_key):
results += ["Unable to create directory " + os.path.normpath(https_key) + ", https key dir not changed."]
config.change_VERSION_NOTIFY(version_notify)
sickbeard.save_config()
if len(results) > 0:
for x in results:
logger.log(x, logger.ERROR)
ui.notifications.error('Error(s) Saving Configuration',
'<br />\n'.join(results))
else:
ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickbeard.CONFIG_FILE) )
redirect("/config/general/")
class ConfigSearch:
@cherrypy.expose
def index(self):
t = PageTemplate(file="config_search.tmpl")
t.submenu = ConfigMenu
return _munge(t)
@cherrypy.expose
def saveSearch(self, use_nzbs=None, use_torrents=None, nzb_dir=None, sab_username=None, sab_password=None,
sab_apikey=None, sab_category=None, sab_host=None, nzbget_password=None, nzbget_category=None, nzbget_host=None,
torrent_dir=None,torrent_method=None, nzb_method=None, usenet_retention=None, search_frequency=None, french_delay=None,
download_propers=None, download_french=None, torrent_username=None, torrent_password=None, torrent_host=None,
torrent_label=None, torrent_path=None, torrent_custom_url=None, torrent_ratio=None, torrent_paused=None, ignore_words=None,
prefered_method=None, torrent_use_ftp = None, ftp_host=None, ftp_port=None, ftp_timeout=None, ftp_passive = None, ftp_login=None,
ftp_password=None, ftp_remotedir=None):
results = []
if not config.change_NZB_DIR(nzb_dir):
results += ["Unable to create directory " + os.path.normpath(nzb_dir) + ", dir not changed."]
if not config.change_TORRENT_DIR(torrent_dir):
results += ["Unable to create directory " + os.path.normpath(torrent_dir) + ", dir not changed."]
config.change_SEARCH_FREQUENCY(search_frequency)
if download_propers == "on":
download_propers = 1
else:
download_propers = 0
if download_french == "on":
download_french = 1
else:
download_french = 0
if use_nzbs == "on":
use_nzbs = 1
else:
use_nzbs = 0
if use_torrents == "on":
use_torrents = 1
else:
use_torrents = 0
if usenet_retention == None:
usenet_retention = 200
if french_delay == None:
french_delay = 120
if ignore_words == None:
ignore_words = ""
if ftp_port == None:
ftp_port = 21
if ftp_timeout == None:
ftp_timeout = 120
sickbeard.USE_NZBS = use_nzbs
sickbeard.USE_TORRENTS = use_torrents
sickbeard.NZB_METHOD = nzb_method
sickbeard.PREFERED_METHOD = prefered_method
sickbeard.TORRENT_METHOD = torrent_method
sickbeard.USENET_RETENTION = int(usenet_retention)
sickbeard.FRENCH_DELAY = int(french_delay)
sickbeard.IGNORE_WORDS = ignore_words
sickbeard.DOWNLOAD_PROPERS = download_propers
sickbeard.DOWNLOAD_FRENCH = download_french
sickbeard.SAB_USERNAME = sab_username
sickbeard.SAB_PASSWORD = sab_password
sickbeard.SAB_APIKEY = sab_apikey.strip()
sickbeard.SAB_CATEGORY = sab_category
if sab_host and not re.match('https?://.*', sab_host):
sab_host = 'http://' + sab_host
if not sab_host.endswith('/'):
sab_host = sab_host + '/'
sickbeard.SAB_HOST = sab_host
sickbeard.NZBGET_PASSWORD = nzbget_password
sickbeard.NZBGET_CATEGORY = nzbget_category
sickbeard.NZBGET_HOST = nzbget_host
sickbeard.TORRENT_USERNAME = torrent_username
sickbeard.TORRENT_PASSWORD = torrent_password
sickbeard.TORRENT_LABEL = torrent_label
sickbeard.TORRENT_PATH = torrent_path
if torrent_custom_url == "on":
torrent_custom_url = 1
else:
torrent_custom_url = 0
sickbeard.TORRENT_CUSTOM_URL = torrent_custom_url
sickbeard.TORRENT_RATIO = torrent_ratio
if torrent_paused == "on":
torrent_paused = 1
else:
torrent_paused = 0
sickbeard.TORRENT_PAUSED = torrent_paused
if torrent_host and not re.match('https?://.*', torrent_host):
torrent_host = 'http://' + torrent_host
if not torrent_host.endswith('/'):
torrent_host = torrent_host + '/'
sickbeard.TORRENT_HOST = torrent_host
if torrent_use_ftp == "on":
torrent_use_ftp = 1
else:
torrent_use_ftp = 0
sickbeard.USE_TORRENT_FTP = torrent_use_ftp
sickbeard.FTP_HOST = ftp_host
sickbeard.FTP_PORT = ftp_port
sickbeard.FTP_TIMEOUT = ftp_timeout
if ftp_passive == "on":
ftp_passive = 1
else:
ftp_passive = 0
sickbeard.FTP_PASSIVE = ftp_passive
sickbeard.FTP_LOGIN = ftp_login
sickbeard.FTP_PASSWORD = ftp_password
sickbeard.FTP_DIR = ftp_remotedir
sickbeard.save_config()
if len(results) > 0:
for x in results:
logger.log(x, logger.ERROR)
ui.notifications.error('Error(s) Saving Configuration',
'<br />\n'.join(results))
else:
ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickbeard.CONFIG_FILE) )
redirect("/config/search/")
class ConfigPostProcessing:
@cherrypy.expose
def index(self):
t = PageTemplate(file="config_postProcessing.tmpl")
t.submenu = ConfigMenu
return _munge(t)
@cherrypy.expose
def savePostProcessing(self, naming_pattern=None, naming_multi_ep=None,
xbmc_data=None, xbmc__frodo__data=None, mediabrowser_data=None, synology_data=None, sony_ps3_data=None, wdtv_data=None, tivo_data=None,
use_banner=None, keep_processed_dir=None, process_method=None, process_automatically=None, process_automatically_torrent=None, rename_episodes=None,
move_associated_files=None, tv_download_dir=None, torrent_download_dir=None, naming_custom_abd=None, naming_abd_pattern=None):
results = []
if not config.change_TV_DOWNLOAD_DIR(tv_download_dir):
results += ["Unable to create directory " + os.path.normpath(tv_download_dir) + ", dir not changed."]
if not config.change_TORRENT_DOWNLOAD_DIR(torrent_download_dir):
results += ["Unable to create directory " + os.path.normpath(torrent_download_dir) + ", dir not changed."]
if use_banner == "on":
use_banner = 1
else:
use_banner = 0
if process_automatically == "on":
process_automatically = 1
else:
process_automatically = 0
if process_automatically_torrent == "on":
process_automatically_torrent = 1
else:
process_automatically_torrent = 0
if rename_episodes == "on":
rename_episodes = 1
else:
rename_episodes = 0
if keep_processed_dir == "on":
keep_processed_dir = 1
else:
keep_processed_dir = 0
if move_associated_files == "on":
move_associated_files = 1
else:
move_associated_files = 0
if naming_custom_abd == "on":
naming_custom_abd = 1
else:
naming_custom_abd = 0
sickbeard.PROCESS_AUTOMATICALLY = process_automatically
sickbeard.PROCESS_AUTOMATICALLY_TORRENT = process_automatically_torrent
sickbeard.KEEP_PROCESSED_DIR = keep_processed_dir
sickbeard.PROCESS_METHOD = process_method
sickbeard.RENAME_EPISODES = rename_episodes
sickbeard.MOVE_ASSOCIATED_FILES = move_associated_files
sickbeard.NAMING_CUSTOM_ABD = naming_custom_abd
sickbeard.metadata_provider_dict['XBMC'].set_config(xbmc_data)
sickbeard.metadata_provider_dict['XBMC (Frodo)'].set_config(xbmc__frodo__data)
sickbeard.metadata_provider_dict['MediaBrowser'].set_config(mediabrowser_data)
sickbeard.metadata_provider_dict['Synology'].set_config(synology_data)
sickbeard.metadata_provider_dict['Sony PS3'].set_config(sony_ps3_data)
sickbeard.metadata_provider_dict['WDTV'].set_config(wdtv_data)
sickbeard.metadata_provider_dict['TIVO'].set_config(tivo_data)
if self.isNamingValid(naming_pattern, naming_multi_ep) != "invalid":
sickbeard.NAMING_PATTERN = naming_pattern
sickbeard.NAMING_MULTI_EP = int(naming_multi_ep)
sickbeard.NAMING_FORCE_FOLDERS = naming.check_force_season_folders()
else:
results.append("You tried saving an invalid naming config, not saving your naming settings")
if self.isNamingValid(naming_abd_pattern, None, True) != "invalid":
sickbeard.NAMING_ABD_PATTERN = naming_abd_pattern
elif naming_custom_abd:
results.append("You tried saving an invalid air-by-date naming config, not saving your air-by-date settings")
sickbeard.USE_BANNER = use_banner
sickbeard.save_config()
if len(results) > 0:
for x in results:
logger.log(x, logger.ERROR)
ui.notifications.error('Error(s) Saving Configuration',
'<br />\n'.join(results))
else:
ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickbeard.CONFIG_FILE) )
redirect("/config/postProcessing/")
@cherrypy.expose
def testNaming(self, pattern=None, multi=None, abd=False):
if multi != None:
multi = int(multi)
result = naming.test_name(pattern, multi, abd)
result = ek.ek(os.path.join, result['dir'], result['name'])
return result
@cherrypy.expose
def isNamingValid(self, pattern=None, multi=None, abd=False):
if pattern == None:
return "invalid"
# air by date shows just need one check, we don't need to worry about season folders
if abd:
is_valid = naming.check_valid_abd_naming(pattern)
require_season_folders = False
else:
# check validity of single and multi ep cases for the whole path
is_valid = naming.check_valid_naming(pattern, multi)
# check validity of single and multi ep cases for only the file name
require_season_folders = naming.check_force_season_folders(pattern, multi)
if is_valid and not require_season_folders:
return "valid"
elif is_valid and require_season_folders:
return "seasonfolders"
else:
return "invalid"
class ConfigProviders:
@cherrypy.expose
def index(self):
t = PageTemplate(file="config_providers.tmpl")
t.submenu = ConfigMenu
return _munge(t)
@cherrypy.expose
def canAddNewznabProvider(self, name):
if not name:
return json.dumps({'error': 'Invalid name specified'})
providerDict = dict(zip([x.getID() for x in sickbeard.newznabProviderList], sickbeard.newznabProviderList))
tempProvider = newznab.NewznabProvider(name, '')
if tempProvider.getID() in providerDict:
return json.dumps({'error': 'Exists as '+providerDict[tempProvider.getID()].name})
else:
return json.dumps({'success': tempProvider.getID()})
@cherrypy.expose
def saveNewznabProvider(self, name, url, key=''):
if not name or not url:
return '0'
if not url.endswith('/'):
url = url + '/'
providerDict = dict(zip([x.name for x in sickbeard.newznabProviderList], sickbeard.newznabProviderList))
if name in providerDict:
if not providerDict[name].default:
providerDict[name].name = name
providerDict[name].url = url
providerDict[name].key = key
return providerDict[name].getID() + '|' + providerDict[name].configStr()
else:
newProvider = newznab.NewznabProvider(name, url, key)
sickbeard.newznabProviderList.append(newProvider)
return newProvider.getID() + '|' + newProvider.configStr()
@cherrypy.expose
def deleteNewznabProvider(self, id):
providerDict = dict(zip([x.getID() for x in sickbeard.newznabProviderList], sickbeard.newznabProviderList))
if id not in providerDict or providerDict[id].default:
return '0'
# delete it from the list
sickbeard.newznabProviderList.remove(providerDict[id])
if id in sickbeard.PROVIDER_ORDER:
sickbeard.PROVIDER_ORDER.remove(id)
return '1'
@cherrypy.expose
def saveProviders(self, nzbmatrix_username=None, nzbmatrix_apikey=None,
nzbs_r_us_uid=None, nzbs_r_us_hash=None, newznab_string='',
omgwtfnzbs_uid=None, omgwtfnzbs_key=None,
tvtorrents_digest=None, tvtorrents_hash=None,
torrentleech_key=None,
btn_api_key=None,
newzbin_username=None, newzbin_password=None,t411_username=None,t411_password=None,ftdb_username=None,ftdb_password=None,tpi_username=None,tpi_password=None,addict_username=None,addict_password=None,fnt_username=None,fnt_password=None,xthor_username=None,xthor_password=None,thinkgeek_username=None,thinkgeek_password=None,
gks_key=None,
ethor_key=None,
provider_order=None):
results = []
provider_str_list = provider_order.split()
provider_list = []
newznabProviderDict = dict(zip([x.getID() for x in sickbeard.newznabProviderList], sickbeard.newznabProviderList))
finishedNames = []
# add all the newznab info we got into our list
for curNewznabProviderStr in newznab_string.split('!!!'):
if not curNewznabProviderStr:
continue
curName, curURL, curKey = curNewznabProviderStr.split('|')
newProvider = newznab.NewznabProvider(curName, curURL, curKey)
curID = newProvider.getID()
# if it already exists then update it
if curID in newznabProviderDict:
newznabProviderDict[curID].name = curName
newznabProviderDict[curID].url = curURL
newznabProviderDict[curID].key = curKey
else:
sickbeard.newznabProviderList.append(newProvider)
finishedNames.append(curID)
# delete anything that is missing
for curProvider in sickbeard.newznabProviderList:
if curProvider.getID() not in finishedNames:
sickbeard.newznabProviderList.remove(curProvider)
# do the enable/disable
for curProviderStr in provider_str_list:
curProvider, curEnabled = curProviderStr.split(':')
curEnabled = int(curEnabled)
provider_list.append(curProvider)
if curProvider == 'nzbs_r_us':
sickbeard.NZBSRUS = curEnabled
elif curProvider == 'nzbs_org_old':
sickbeard.NZBS = curEnabled
elif curProvider == 'nzbmatrix':
sickbeard.NZBMATRIX = curEnabled
elif curProvider == 'newzbin':
sickbeard.NEWZBIN = curEnabled
elif curProvider == 'bin_req':
sickbeard.BINREQ = curEnabled
elif curProvider == 'womble_s_index':
sickbeard.WOMBLE = curEnabled
elif curProvider == 'nzbx':
sickbeard.NZBX = curEnabled
elif curProvider == 'omgwtfnzbs':
sickbeard.OMGWTFNZBS = curEnabled
elif curProvider == 'ezrss':
sickbeard.EZRSS = curEnabled
elif curProvider == 'tvtorrents':
sickbeard.TVTORRENTS = curEnabled
elif curProvider == 'torrentleech':
sickbeard.TORRENTLEECH = curEnabled
elif curProvider == 'btn':
sickbeard.BTN = curEnabled
elif curProvider == 'binnewz':
sickbeard.BINNEWZ = curEnabled
elif curProvider == 't411':
sickbeard.T411 = curEnabled
elif curProvider == 'ftdb':
sickbeard.FTDB = curEnabled
elif curProvider == 'tpi':
sickbeard.TPI = curEnabled
elif curProvider == 'addict':
sickbeard.ADDICT = curEnabled
elif curProvider == 'fnt':
sickbeard.FNT = curEnabled
elif curProvider == 'xthor':
sickbeard.XTHOR = curEnabled
elif curProvider == 'thinkgeek':
sickbeard.THINKGEEK = curEnabled
elif curProvider == 'cpasbien':
sickbeard.Cpasbien = curEnabled
elif curProvider == 'kat':
sickbeard.kat = curEnabled
elif curProvider == 'piratebay':
sickbeard.THEPIRATEBAY = curEnabled
elif curProvider == 'gks':
sickbeard.GKS = curEnabled
elif curProvider == 'ethor':
sickbeard.ETHOR = curEnabled
elif curProvider in newznabProviderDict:
newznabProviderDict[curProvider].enabled = bool(curEnabled)
else:
logger.log(u"don't know what " + curProvider + " is, skipping")
sickbeard.TVTORRENTS_DIGEST = tvtorrents_digest.strip()
sickbeard.TVTORRENTS_HASH = tvtorrents_hash.strip()
sickbeard.TORRENTLEECH_KEY = torrentleech_key.strip()
sickbeard.ETHOR_KEY = ethor_key.strip()
sickbeard.BTN_API_KEY = btn_api_key.strip()
sickbeard.T411_USERNAME = t411_username
sickbeard.T411_PASSWORD = t411_password
sickbeard.FTDB_USERNAME = ftdb_username
sickbeard.FTDB_PASSWORD = ftdb_password
sickbeard.TPI_USERNAME = tpi_username
sickbeard.TPI_PASSWORD = tpi_password
sickbeard.ADDICT_USERNAME = addict_username
sickbeard.ADDICT_PASSWORD = addict_password
sickbeard.FNT_USERNAME = fnt_username
sickbeard.FNT_PASSWORD = fnt_password
sickbeard.XTHOR_USERNAME = xthor_username
sickbeard.XTHOR_PASSWORD = xthor_password
sickbeard.THINKGEEK_USERNAME = thinkgeek_username
sickbeard.THINKGEEK_PASSWORD = thinkgeek_password
sickbeard.NZBSRUS_UID = nzbs_r_us_uid.strip()
sickbeard.NZBSRUS_HASH = nzbs_r_us_hash.strip()
sickbeard.OMGWTFNZBS_UID = omgwtfnzbs_uid.strip()
sickbeard.OMGWTFNZBS_KEY = omgwtfnzbs_key.strip()
sickbeard.GKS_KEY = gks_key.strip()
sickbeard.PROVIDER_ORDER = provider_list
sickbeard.save_config()
if len(results) > 0:
for x in results:
logger.log(x, logger.ERROR)
ui.notifications.error('Error(s) Saving Configuration',
'<br />\n'.join(results))
else:
ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickbeard.CONFIG_FILE) )
redirect("/config/providers/")
class ConfigNotifications:
@cherrypy.expose
def index(self):
t = PageTemplate(file="config_notifications.tmpl")
t.submenu = ConfigMenu
return _munge(t)
@cherrypy.expose
def saveNotifications(self, use_xbmc=None, xbmc_notify_onsnatch=None, xbmc_notify_ondownload=None, xbmc_update_onlyfirst=None, xbmc_notify_onsubtitledownload=None,
xbmc_update_library=None, xbmc_update_full=None, xbmc_host=None, xbmc_username=None, xbmc_password=None,
use_plex=None, plex_notify_onsnatch=None, plex_notify_ondownload=None, plex_notify_onsubtitledownload=None, plex_update_library=None,
plex_server_host=None, plex_host=None, plex_username=None, plex_password=None,
use_growl=None, growl_notify_onsnatch=None, growl_notify_ondownload=None, growl_notify_onsubtitledownload=None, growl_host=None, growl_password=None,
use_prowl=None, prowl_notify_onsnatch=None, prowl_notify_ondownload=None, prowl_notify_onsubtitledownload=None, prowl_api=None, prowl_priority=0,
use_twitter=None, twitter_notify_onsnatch=None, twitter_notify_ondownload=None, twitter_notify_onsubtitledownload=None,
use_boxcar=None, boxcar_notify_onsnatch=None, boxcar_notify_ondownload=None, boxcar_notify_onsubtitledownload=None, boxcar_username=None,
use_pushover=None, pushover_notify_onsnatch=None, pushover_notify_ondownload=None, pushover_notify_onsubtitledownload=None, pushover_userkey=None,
use_libnotify=None, libnotify_notify_onsnatch=None, libnotify_notify_ondownload=None, libnotify_notify_onsubtitledownload=None,
use_nmj=None, nmj_host=None, nmj_database=None, nmj_mount=None, use_synoindex=None,
use_nmjv2=None, nmjv2_host=None, nmjv2_dbloc=None, nmjv2_database=None,
use_trakt=None, trakt_username=None, trakt_password=None, trakt_api=None,trakt_remove_watchlist=None,trakt_use_watchlist=None,trakt_start_paused=None,trakt_method_add=None,
use_synologynotifier=None, synologynotifier_notify_onsnatch=None, synologynotifier_notify_ondownload=None, synologynotifier_notify_onsubtitledownload=None,
use_pytivo=None, pytivo_notify_onsnatch=None, pytivo_notify_ondownload=None, pytivo_notify_onsubtitledownload=None, pytivo_update_library=None,
pytivo_host=None, pytivo_share_name=None, pytivo_tivo_name=None,
use_nma=None, nma_notify_onsnatch=None, nma_notify_ondownload=None, nma_notify_onsubtitledownload=None, nma_api=None, nma_priority=0,
use_pushalot=None, pushalot_notify_onsnatch=None, pushalot_notify_ondownload=None, pushalot_notify_onsubtitledownload=None, pushalot_authorizationtoken=None,
use_pushbullet=None, pushbullet_notify_onsnatch=None, pushbullet_notify_ondownload=None, pushbullet_notify_onsubtitledownload=None, pushbullet_api=None, pushbullet_device=None, pushbullet_device_list=None,
use_mail=None, mail_username=None, mail_password=None, mail_server=None, mail_ssl=None, mail_from=None, mail_to=None, mail_notify_onsnatch=None ):
results = []
if xbmc_notify_onsnatch == "on":
xbmc_notify_onsnatch = 1
else:
xbmc_notify_onsnatch = 0
if xbmc_notify_ondownload == "on":
xbmc_notify_ondownload = 1
else:
xbmc_notify_ondownload = 0
if xbmc_notify_onsubtitledownload == "on":
xbmc_notify_onsubtitledownload = 1
else:
xbmc_notify_onsubtitledownload = 0
if xbmc_update_library == "on":
xbmc_update_library = 1
else:
xbmc_update_library = 0
if xbmc_update_full == "on":
xbmc_update_full = 1
else:
xbmc_update_full = 0
if xbmc_update_onlyfirst == "on":
xbmc_update_onlyfirst = 1
else:
xbmc_update_onlyfirst = 0
if use_xbmc == "on":
use_xbmc = 1
else:
use_xbmc = 0
if plex_update_library == "on":
plex_update_library = 1
else:
plex_update_library = 0
if plex_notify_onsnatch == "on":
plex_notify_onsnatch = 1
else:
plex_notify_onsnatch = 0
if plex_notify_ondownload == "on":
plex_notify_ondownload = 1
else:
plex_notify_ondownload = 0
if plex_notify_onsubtitledownload == "on":
plex_notify_onsubtitledownload = 1
else:
plex_notify_onsubtitledownload = 0
if use_plex == "on":
use_plex = 1
else:
use_plex = 0
if growl_notify_onsnatch == "on":
growl_notify_onsnatch = 1
else:
growl_notify_onsnatch = 0
if growl_notify_ondownload == "on":
growl_notify_ondownload = 1
else:
growl_notify_ondownload = 0
if growl_notify_onsubtitledownload == "on":
growl_notify_onsubtitledownload = 1
else:
growl_notify_onsubtitledownload = 0
if use_growl == "on":
use_growl = 1
else:
use_growl = 0
if prowl_notify_onsnatch == "on":
prowl_notify_onsnatch = 1
else:
prowl_notify_onsnatch = 0
if prowl_notify_ondownload == "on":
prowl_notify_ondownload = 1
else:
prowl_notify_ondownload = 0
if prowl_notify_onsubtitledownload == "on":
prowl_notify_onsubtitledownload = 1
else:
prowl_notify_onsubtitledownload = 0
if use_prowl == "on":
use_prowl = 1
else:
use_prowl = 0
if twitter_notify_onsnatch == "on":
twitter_notify_onsnatch = 1
else:
twitter_notify_onsnatch = 0
if twitter_notify_ondownload == "on":
twitter_notify_ondownload = 1
else:
twitter_notify_ondownload = 0
if twitter_notify_onsubtitledownload == "on":
twitter_notify_onsubtitledownload = 1
else:
twitter_notify_onsubtitledownload = 0
if use_twitter == "on":
use_twitter = 1
else:
use_twitter = 0
if boxcar_notify_onsnatch == "on":
boxcar_notify_onsnatch = 1
else:
boxcar_notify_onsnatch = 0
if boxcar_notify_ondownload == "on":
boxcar_notify_ondownload = 1
else:
boxcar_notify_ondownload = 0
if boxcar_notify_onsubtitledownload == "on":
boxcar_notify_onsubtitledownload = 1
else:
boxcar_notify_onsubtitledownload = 0
if use_boxcar == "on":
use_boxcar = 1
else:
use_boxcar = 0
if pushover_notify_onsnatch == "on":
pushover_notify_onsnatch = 1
else:
pushover_notify_onsnatch = 0
if pushover_notify_ondownload == "on":
pushover_notify_ondownload = 1
else:
pushover_notify_ondownload = 0
if pushover_notify_onsubtitledownload == "on":
pushover_notify_onsubtitledownload = 1
else:
pushover_notify_onsubtitledownload = 0
if use_pushover == "on":
use_pushover = 1
else:
use_pushover = 0
if use_nmj == "on":
use_nmj = 1
else:
use_nmj = 0
if use_synoindex == "on":
use_synoindex = 1
else:
use_synoindex = 0
if use_synologynotifier == "on":
use_synologynotifier = 1
else:
use_synologynotifier = 0
if synologynotifier_notify_onsnatch == "on":
synologynotifier_notify_onsnatch = 1
else:
synologynotifier_notify_onsnatch = 0
if synologynotifier_notify_ondownload == "on":
synologynotifier_notify_ondownload = 1
else:
synologynotifier_notify_ondownload = 0
if synologynotifier_notify_onsubtitledownload == "on":
synologynotifier_notify_onsubtitledownload = 1
else:
synologynotifier_notify_onsubtitledownload = 0
if use_nmjv2 == "on":
use_nmjv2 = 1
else:
use_nmjv2 = 0
if use_trakt == "on":
use_trakt = 1
else:
use_trakt = 0
if trakt_remove_watchlist == "on":
trakt_remove_watchlist = 1
else:
trakt_remove_watchlist = 0
if trakt_use_watchlist == "on":
trakt_use_watchlist = 1
else:
trakt_use_watchlist = 0
if trakt_start_paused == "on":
trakt_start_paused = 1
else:
trakt_start_paused = 0
if use_pytivo == "on":
use_pytivo = 1
else:
use_pytivo = 0
if pytivo_notify_onsnatch == "on":
pytivo_notify_onsnatch = 1
else:
pytivo_notify_onsnatch = 0
if pytivo_notify_ondownload == "on":
pytivo_notify_ondownload = 1
else:
pytivo_notify_ondownload = 0
if pytivo_notify_onsubtitledownload == "on":
pytivo_notify_onsubtitledownload = 1
else:
pytivo_notify_onsubtitledownload = 0
if pytivo_update_library == "on":
pytivo_update_library = 1
else:
pytivo_update_library = 0
if use_nma == "on":
use_nma = 1
else:
use_nma = 0
if nma_notify_onsnatch == "on":
nma_notify_onsnatch = 1
else:
nma_notify_onsnatch = 0
if nma_notify_ondownload == "on":
nma_notify_ondownload = 1
else:
nma_notify_ondownload = 0
if nma_notify_onsubtitledownload == "on":
nma_notify_onsubtitledownload = 1
else:
nma_notify_onsubtitledownload = 0
if use_mail == "on":
use_mail = 1
else:
use_mail = 0
if mail_ssl == "on":
mail_ssl = 1
else:
mail_ssl = 0
if mail_notify_onsnatch == "on":
mail_notify_onsnatch = 1
else:
mail_notify_onsnatch = 0
if use_pushalot == "on":
use_pushalot = 1
else:
use_pushalot = 0
if pushalot_notify_onsnatch == "on":
pushalot_notify_onsnatch = 1
else:
pushalot_notify_onsnatch = 0
if pushalot_notify_ondownload == "on":
pushalot_notify_ondownload = 1
else:
pushalot_notify_ondownload = 0
if pushalot_notify_onsubtitledownload == "on":
pushalot_notify_onsubtitledownload = 1
else:
pushalot_notify_onsubtitledownload = 0
if use_pushbullet == "on":
use_pushbullet = 1
else:
use_pushbullet = 0
if pushbullet_notify_onsnatch == "on":
pushbullet_notify_onsnatch = 1
else:
pushbullet_notify_onsnatch = 0
if pushbullet_notify_ondownload == "on":
pushbullet_notify_ondownload = 1
else:
pushbullet_notify_ondownload = 0
if pushbullet_notify_onsubtitledownload == "on":
pushbullet_notify_onsubtitledownload = 1
else:
pushbullet_notify_onsubtitledownload = 0
sickbeard.USE_XBMC = use_xbmc
sickbeard.XBMC_NOTIFY_ONSNATCH = xbmc_notify_onsnatch
sickbeard.XBMC_NOTIFY_ONDOWNLOAD = xbmc_notify_ondownload
sickbeard.XBMC_NOTIFY_ONSUBTITLEDOWNLOAD = xbmc_notify_onsubtitledownload
sickbeard.XBMC_UPDATE_LIBRARY = xbmc_update_library
sickbeard.XBMC_UPDATE_FULL = xbmc_update_full
sickbeard.XBMC_UPDATE_ONLYFIRST = xbmc_update_onlyfirst
sickbeard.XBMC_HOST = xbmc_host
sickbeard.XBMC_USERNAME = xbmc_username
sickbeard.XBMC_PASSWORD = xbmc_password
sickbeard.USE_PLEX = use_plex
sickbeard.PLEX_NOTIFY_ONSNATCH = plex_notify_onsnatch
sickbeard.PLEX_NOTIFY_ONDOWNLOAD = plex_notify_ondownload
sickbeard.PLEX_NOTIFY_ONSUBTITLEDOWNLOAD = plex_notify_onsubtitledownload
sickbeard.PLEX_UPDATE_LIBRARY = plex_update_library
sickbeard.PLEX_HOST = plex_host
sickbeard.PLEX_SERVER_HOST = plex_server_host
sickbeard.PLEX_USERNAME = plex_username
sickbeard.PLEX_PASSWORD = plex_password
sickbeard.USE_GROWL = use_growl
sickbeard.GROWL_NOTIFY_ONSNATCH = growl_notify_onsnatch
sickbeard.GROWL_NOTIFY_ONDOWNLOAD = growl_notify_ondownload
sickbeard.GROWL_NOTIFY_ONSUBTITLEDOWNLOAD = growl_notify_onsubtitledownload
sickbeard.GROWL_HOST = growl_host
sickbeard.GROWL_PASSWORD = growl_password
sickbeard.USE_PROWL = use_prowl
sickbeard.PROWL_NOTIFY_ONSNATCH = prowl_notify_onsnatch
sickbeard.PROWL_NOTIFY_ONDOWNLOAD = prowl_notify_ondownload
sickbeard.PROWL_NOTIFY_ONSUBTITLEDOWNLOAD = prowl_notify_onsubtitledownload
sickbeard.PROWL_API = prowl_api
sickbeard.PROWL_PRIORITY = prowl_priority
sickbeard.USE_TWITTER = use_twitter
sickbeard.TWITTER_NOTIFY_ONSNATCH = twitter_notify_onsnatch
sickbeard.TWITTER_NOTIFY_ONDOWNLOAD = twitter_notify_ondownload
sickbeard.TWITTER_NOTIFY_ONSUBTITLEDOWNLOAD = twitter_notify_onsubtitledownload
sickbeard.USE_BOXCAR = use_boxcar
sickbeard.BOXCAR_NOTIFY_ONSNATCH = boxcar_notify_onsnatch
sickbeard.BOXCAR_NOTIFY_ONDOWNLOAD = boxcar_notify_ondownload
sickbeard.BOXCAR_NOTIFY_ONSUBTITLEDOWNLOAD = boxcar_notify_onsubtitledownload
sickbeard.BOXCAR_USERNAME = boxcar_username
sickbeard.USE_PUSHOVER = use_pushover
sickbeard.PUSHOVER_NOTIFY_ONSNATCH = pushover_notify_onsnatch
sickbeard.PUSHOVER_NOTIFY_ONDOWNLOAD = pushover_notify_ondownload
sickbeard.PUSHOVER_NOTIFY_ONSUBTITLEDOWNLOAD = pushover_notify_onsubtitledownload
sickbeard.PUSHOVER_USERKEY = pushover_userkey
sickbeard.USE_LIBNOTIFY = use_libnotify == "on"
sickbeard.LIBNOTIFY_NOTIFY_ONSNATCH = libnotify_notify_onsnatch == "on"
sickbeard.LIBNOTIFY_NOTIFY_ONDOWNLOAD = libnotify_notify_ondownload == "on"
sickbeard.LIBNOTIFY_NOTIFY_ONSUBTITLEDOWNLOAD = libnotify_notify_onsubtitledownload == "on"
sickbeard.USE_NMJ = use_nmj
sickbeard.NMJ_HOST = nmj_host
sickbeard.NMJ_DATABASE = nmj_database
sickbeard.NMJ_MOUNT = nmj_mount
sickbeard.USE_SYNOINDEX = use_synoindex
sickbeard.USE_SYNOLOGYNOTIFIER = use_synologynotifier
sickbeard.SYNOLOGYNOTIFIER_NOTIFY_ONSNATCH = synologynotifier_notify_onsnatch
sickbeard.SYNOLOGYNOTIFIER_NOTIFY_ONDOWNLOAD = synologynotifier_notify_ondownload
sickbeard.SYNOLOGYNOTIFIER_NOTIFY_ONSUBTITLEDOWNLOAD = synologynotifier_notify_onsubtitledownload
sickbeard.USE_NMJv2 = use_nmjv2
sickbeard.NMJv2_HOST = nmjv2_host
sickbeard.NMJv2_DATABASE = nmjv2_database
sickbeard.NMJv2_DBLOC = nmjv2_dbloc
sickbeard.USE_TRAKT = use_trakt
sickbeard.TRAKT_USERNAME = trakt_username
sickbeard.TRAKT_PASSWORD = trakt_password
sickbeard.TRAKT_API = trakt_api
sickbeard.TRAKT_REMOVE_WATCHLIST = trakt_remove_watchlist
sickbeard.TRAKT_USE_WATCHLIST = trakt_use_watchlist
sickbeard.TRAKT_METHOD_ADD = trakt_method_add
sickbeard.TRAKT_START_PAUSED = trakt_start_paused
sickbeard.USE_PYTIVO = use_pytivo
sickbeard.PYTIVO_NOTIFY_ONSNATCH = pytivo_notify_onsnatch == "off"
sickbeard.PYTIVO_NOTIFY_ONDOWNLOAD = pytivo_notify_ondownload == "off"
sickbeard.PYTIVO_NOTIFY_ONSUBTITLEDOWNLOAD = pytivo_notify_onsubtitledownload == "off"
sickbeard.PYTIVO_UPDATE_LIBRARY = pytivo_update_library
sickbeard.PYTIVO_HOST = pytivo_host
sickbeard.PYTIVO_SHARE_NAME = pytivo_share_name
sickbeard.PYTIVO_TIVO_NAME = pytivo_tivo_name
sickbeard.USE_NMA = use_nma
sickbeard.NMA_NOTIFY_ONSNATCH = nma_notify_onsnatch
sickbeard.NMA_NOTIFY_ONDOWNLOAD = nma_notify_ondownload
sickbeard.NMA_NOTIFY_ONSUBTITLEDOWNLOAD = nma_notify_onsubtitledownload
sickbeard.NMA_API = nma_api
sickbeard.NMA_PRIORITY = nma_priority
sickbeard.USE_MAIL = use_mail
sickbeard.MAIL_USERNAME = mail_username
sickbeard.MAIL_PASSWORD = mail_password
sickbeard.MAIL_SERVER = mail_server
sickbeard.MAIL_SSL = mail_ssl
sickbeard.MAIL_FROM = mail_from
sickbeard.MAIL_TO = mail_to
sickbeard.MAIL_NOTIFY_ONSNATCH = mail_notify_onsnatch
sickbeard.USE_PUSHALOT = use_pushalot
sickbeard.PUSHALOT_NOTIFY_ONSNATCH = pushalot_notify_onsnatch
sickbeard.PUSHALOT_NOTIFY_ONDOWNLOAD = pushalot_notify_ondownload
sickbeard.PUSHALOT_NOTIFY_ONSUBTITLEDOWNLOAD = pushalot_notify_onsubtitledownload
sickbeard.PUSHALOT_AUTHORIZATIONTOKEN = pushalot_authorizationtoken
sickbeard.USE_PUSHBULLET = use_pushbullet
sickbeard.PUSHBULLET_NOTIFY_ONSNATCH = pushbullet_notify_onsnatch
sickbeard.PUSHBULLET_NOTIFY_ONDOWNLOAD = pushbullet_notify_ondownload
sickbeard.PUSHBULLET_NOTIFY_ONSUBTITLEDOWNLOAD = pushbullet_notify_onsubtitledownload
sickbeard.PUSHBULLET_API = pushbullet_api
sickbeard.PUSHBULLET_DEVICE = pushbullet_device_list
sickbeard.save_config()
if len(results) > 0:
for x in results:
logger.log(x, logger.ERROR)
ui.notifications.error('Error(s) Saving Configuration',
'<br />\n'.join(results))
else:
ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickbeard.CONFIG_FILE) )
redirect("/config/notifications/")
class ConfigSubtitles:
@cherrypy.expose
def index(self):
t = PageTemplate(file="config_subtitles.tmpl")
t.submenu = ConfigMenu
return _munge(t)
@cherrypy.expose
def saveSubtitles(self, use_subtitles=None, subsnewasold=None, subtitles_plugins=None, subtitles_languages=None, subtitles_dir=None, subtitles_dir_sub=None, subsnolang = None, service_order=None, subtitles_history=None, subtitles_clean_hi=None, subtitles_clean_team=None, subtitles_clean_music=None, subtitles_clean_punc=None):
results = []
if use_subtitles == "on":
use_subtitles = 1
if sickbeard.subtitlesFinderScheduler.thread == None or not sickbeard.subtitlesFinderScheduler.thread.isAlive():
sickbeard.subtitlesFinderScheduler.initThread()
else:
use_subtitles = 0
sickbeard.subtitlesFinderScheduler.abort = True
logger.log(u"Waiting for the SUBTITLESFINDER thread to exit")
try:
sickbeard.subtitlesFinderScheduler.thread.join(5)
except:
pass
if subtitles_history == "on":
subtitles_history = 1
else:
subtitles_history = 0
if subtitles_dir_sub == "on":
subtitles_dir_sub = 1
else:
subtitles_dir_sub = 0
if subsnewasold == "on":
subsnewasold = 1
else:
subsnewasold = 0
if subsnolang == "on":
subsnolang = 1
else:
subsnolang = 0
sickbeard.USE_SUBTITLES = use_subtitles
sickbeard.SUBSNEWASOLD = subsnewasold
sickbeard.SUBTITLES_LANGUAGES = [lang.alpha2 for lang in subtitles.isValidLanguage(subtitles_languages.replace(' ', '').split(','))] if subtitles_languages != '' else ''
sickbeard.SUBTITLES_DIR = subtitles_dir
sickbeard.SUBTITLES_DIR_SUB = subtitles_dir_sub
sickbeard.SUBSNOLANG = subsnolang
sickbeard.SUBTITLES_HISTORY = subtitles_history
# Subtitles services
services_str_list = service_order.split()
subtitles_services_list = []
subtitles_services_enabled = []
for curServiceStr in services_str_list:
curService, curEnabled = curServiceStr.split(':')
subtitles_services_list.append(curService)
subtitles_services_enabled.append(int(curEnabled))
sickbeard.SUBTITLES_SERVICES_LIST = subtitles_services_list
sickbeard.SUBTITLES_SERVICES_ENABLED = subtitles_services_enabled
#Subtitles Cleansing
if subtitles_clean_hi == "on":
subtitles_clean_hi = 1
else:
subtitles_clean_hi = 0
if subtitles_clean_team == "on":
subtitles_clean_team = 1
else:
subtitles_clean_team = 0
if subtitles_clean_music == "on":
subtitles_clean_music = 1
else:
subtitles_clean_music = 0
if subtitles_clean_punc == "on":
subtitles_clean_punc = 1
else:
subtitles_clean_punc = 0
sickbeard.SUBTITLES_CLEAN_HI = subtitles_clean_hi
sickbeard.SUBTITLES_CLEAN_TEAM = subtitles_clean_team
sickbeard.SUBTITLES_CLEAN_MUSIC = subtitles_clean_music
sickbeard.SUBTITLES_CLEAN_PUNC = subtitles_clean_punc
sickbeard.save_config()
if len(results) > 0:
for x in results:
logger.log(x, logger.ERROR)
ui.notifications.error('Error(s) Saving Configuration',
'<br />\n'.join(results))
else:
ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickbeard.CONFIG_FILE) )
redirect("/config/subtitles/")
class Config:
@cherrypy.expose
def index(self):
t = PageTemplate(file="config.tmpl")
t.submenu = ConfigMenu
return _munge(t)
general = ConfigGeneral()
search = ConfigSearch()
postProcessing = ConfigPostProcessing()
providers = ConfigProviders()
notifications = ConfigNotifications()
subtitles = ConfigSubtitles()
def haveXBMC():
return sickbeard.USE_XBMC and sickbeard.XBMC_UPDATE_LIBRARY
def havePLEX():
return sickbeard.USE_PLEX and sickbeard.PLEX_UPDATE_LIBRARY
def HomeMenu():
return [
{ 'title': 'Add Shows', 'path': 'home/addShows/', },
{ 'title': 'Manual Post-Processing', 'path': 'home/postprocess/' },
{ 'title': 'Update XBMC', 'path': 'home/updateXBMC/', 'requires': haveXBMC },
{ 'title': 'Update Plex', 'path': 'home/updatePLEX/', 'requires': havePLEX },
{ 'title': 'Update', 'path': 'manage/manageSearches/forceVersionCheck', 'confirm': True},
{ 'title': 'Restart', 'path': 'home/restart/?pid='+str(sickbeard.PID), 'confirm': True },
{ 'title': 'Shutdown', 'path': 'home/shutdown/?pid='+str(sickbeard.PID), 'confirm': True },
]
class HomePostProcess:
@cherrypy.expose
def index(self):
t = PageTemplate(file="home_postprocess.tmpl")
t.submenu = HomeMenu()
return _munge(t)
@cherrypy.expose
def processEpisode(self, dir=None, nzbName=None, jobName=None, quiet=None):
if not dir:
redirect("/home/postprocess")
else:
result = processTV.processDir(dir, nzbName)
if quiet != None and int(quiet) == 1:
return result
result = result.replace("\n","<br />\n")
return _genericMessage("Postprocessing results", result)
class NewHomeAddShows:
@cherrypy.expose
def index(self):
t = PageTemplate(file="home_addShows.tmpl")
t.submenu = HomeMenu()
return _munge(t)
@cherrypy.expose
def getTVDBLanguages(self):
result = tvdb_api.Tvdb().config['valid_languages']
# Make sure list is sorted alphabetically but 'fr' is in front
if 'fr' in result:
del result[result.index('fr')]
result.sort()
result.insert(0, 'fr')
return json.dumps({'results': result})
@cherrypy.expose
def sanitizeFileName(self, name):
return helpers.sanitizeFileName(name)
@cherrypy.expose
def searchTVDBForShowName(self, name, lang="fr"):
if not lang or lang == 'null':
lang = "fr"
baseURL = "http://thetvdb.com/api/GetSeries.php?"
nameUTF8 = name.encode('utf-8')
logger.log(u"Trying to find Show on thetvdb.com with: " + nameUTF8.decode('utf-8'), logger.DEBUG)
# Use each word in the show's name as a possible search term
keywords = nameUTF8.split(' ')
# Insert the whole show's name as the first search term so best results are first
# ex: keywords = ['Some Show Name', 'Some', 'Show', 'Name']
if len(keywords) > 1:
keywords.insert(0, nameUTF8)
# Query the TVDB for each search term and build the list of results
results = []
for searchTerm in keywords:
params = {'seriesname': searchTerm,
'language': lang}
finalURL = baseURL + urllib.urlencode(params)
logger.log(u"Searching for Show with searchterm: \'" + searchTerm.decode('utf-8') + u"\' on URL " + finalURL, logger.DEBUG)
urlData = helpers.getURL(finalURL)
if urlData is None:
# When urlData is None, trouble connecting to TVDB, don't try the rest of the keywords
logger.log(u"Unable to get URL: " + finalURL, logger.ERROR)
break
else:
try:
seriesXML = etree.ElementTree(etree.XML(urlData))
series = seriesXML.getiterator('Series')
except Exception, e:
# use finalURL in log, because urlData can be too much information
logger.log(u"Unable to parse XML for some reason: " + ex(e) + " from XML: " + finalURL, logger.ERROR)
series = ''
# add each result to our list
for curSeries in series:
tvdb_id = int(curSeries.findtext('seriesid'))
# don't add duplicates
if tvdb_id in [x[0] for x in results]:
continue
results.append((tvdb_id, curSeries.findtext('SeriesName'), curSeries.findtext('FirstAired')))
lang_id = tvdb_api.Tvdb().config['langabbv_to_id'][lang]
return json.dumps({'results': results, 'langid': lang_id})
@cherrypy.expose
def massAddTable(self, rootDir=None):
t = PageTemplate(file="home_massAddTable.tmpl")
t.submenu = HomeMenu()
myDB = db.DBConnection()
if not rootDir:
return "No folders selected."
elif type(rootDir) != list:
root_dirs = [rootDir]
else:
root_dirs = rootDir
root_dirs = [urllib.unquote_plus(x) for x in root_dirs]
default_index = int(sickbeard.ROOT_DIRS.split('|')[0])
if len(root_dirs) > default_index:
tmp = root_dirs[default_index]
if tmp in root_dirs:
root_dirs.remove(tmp)
root_dirs = [tmp]+root_dirs
dir_list = []
for root_dir in root_dirs:
try:
file_list = ek.ek(os.listdir, root_dir)
except:
continue
for cur_file in file_list:
cur_path = ek.ek(os.path.normpath, ek.ek(os.path.join, root_dir, cur_file))
if not ek.ek(os.path.isdir, cur_path):
continue
cur_dir = {
'dir': cur_path,
'display_dir': '<b>'+ek.ek(os.path.dirname, cur_path)+os.sep+'</b>'+ek.ek(os.path.basename, cur_path),
}
# see if the folder is in XBMC already
dirResults = myDB.select("SELECT * FROM tv_shows WHERE location = ?", [cur_path])
if dirResults:
cur_dir['added_already'] = True
else:
cur_dir['added_already'] = False
dir_list.append(cur_dir)
tvdb_id = ''
show_name = ''
for cur_provider in sickbeard.metadata_provider_dict.values():
(tvdb_id, show_name) = cur_provider.retrieveShowMetadata(cur_path)
if tvdb_id and show_name:
break
cur_dir['existing_info'] = (tvdb_id, show_name)
if tvdb_id and helpers.findCertainShow(sickbeard.showList, tvdb_id):
cur_dir['added_already'] = True
t.dirList = dir_list
return _munge(t)
@cherrypy.expose
def newShow(self, show_to_add=None, other_shows=None):
"""
Display the new show page which collects a tvdb id, folder, and extra options and
posts them to addNewShow
"""
t = PageTemplate(file="home_newShow.tmpl")
t.submenu = HomeMenu()
show_dir, tvdb_id, show_name = self.split_extra_show(show_to_add)
if tvdb_id and show_name:
use_provided_info = True
else:
use_provided_info = False
# tell the template whether we're giving it show name & TVDB ID
t.use_provided_info = use_provided_info
# use the given show_dir for the tvdb search if available
if not show_dir:
t.default_show_name = ''
elif not show_name:
t.default_show_name = ek.ek(os.path.basename, ek.ek(os.path.normpath, show_dir)).replace('.',' ')
else:
t.default_show_name = show_name
# carry a list of other dirs if given
if not other_shows:
other_shows = []
elif type(other_shows) != list:
other_shows = [other_shows]
if use_provided_info:
t.provided_tvdb_id = tvdb_id
t.provided_tvdb_name = show_name
t.provided_show_dir = show_dir
t.other_shows = other_shows
return _munge(t)
@cherrypy.expose
def addNewShow(self, whichSeries=None, tvdbLang="fr", rootDir=None, defaultStatus=None,
anyQualities=None, bestQualities=None, flatten_folders=None, subtitles=None, fullShowPath=None,
other_shows=None, skipShow=None, audio_lang=None):
"""
Receive tvdb id, dir, and other options and create a show from them. If extra show dirs are
provided then it forwards back to newShow, if not it goes to /home.
"""
# grab our list of other dirs if given
if not other_shows:
other_shows = []
elif type(other_shows) != list:
other_shows = [other_shows]
def finishAddShow():
# if there are no extra shows then go home
if not other_shows:
redirect('/home')
# peel off the next one
next_show_dir = other_shows[0]
rest_of_show_dirs = other_shows[1:]
# go to add the next show
return self.newShow(next_show_dir, rest_of_show_dirs)
# if we're skipping then behave accordingly
if skipShow:
return finishAddShow()
# sanity check on our inputs
if (not rootDir and not fullShowPath) or not whichSeries:
return "Missing params, no tvdb id or folder:"+repr(whichSeries)+" and "+repr(rootDir)+"/"+repr(fullShowPath)
# figure out what show we're adding and where
series_pieces = whichSeries.partition('|')
if len(series_pieces) < 3:
return "Error with show selection."
tvdb_id = int(series_pieces[0])
show_name = series_pieces[2]
# use the whole path if it's given, or else append the show name to the root dir to get the full show path
if fullShowPath:
show_dir = ek.ek(os.path.normpath, fullShowPath)
else:
show_dir = ek.ek(os.path.join, rootDir, helpers.sanitizeFileName(show_name))
# blanket policy - if the dir exists you should have used "add existing show" numbnuts
if ek.ek(os.path.isdir, show_dir) and not fullShowPath:
ui.notifications.error("Unable to add show", "Folder "+show_dir+" exists already")
redirect('/home/addShows/existingShows')
# don't create show dir if config says not to
if sickbeard.ADD_SHOWS_WO_DIR:
logger.log(u"Skipping initial creation of "+show_dir+" due to config.ini setting")
else:
dir_exists = helpers.makeDir(show_dir)
if not dir_exists:
logger.log(u"Unable to create the folder "+show_dir+", can't add the show", logger.ERROR)
ui.notifications.error("Unable to add show", "Unable to create the folder "+show_dir+", can't add the show")
redirect("/home")
else:
helpers.chmodAsParent(show_dir)
# prepare the inputs for passing along
if flatten_folders == "on":
flatten_folders = 1
else:
flatten_folders = 0
if subtitles == "on":
subtitles = 1
else:
subtitles = 0
if not anyQualities:
anyQualities = []
if not bestQualities:
bestQualities = []
if type(anyQualities) != list:
anyQualities = [anyQualities]
if type(bestQualities) != list:
bestQualities = [bestQualities]
newQuality = Quality.combineQualities(map(int, anyQualities), map(int, bestQualities))
# add the show
sickbeard.showQueueScheduler.action.addShow(tvdb_id, show_dir, int(defaultStatus), newQuality, flatten_folders, tvdbLang, subtitles, audio_lang) #@UndefinedVariable
ui.notifications.message('Show added', 'Adding the specified show into '+show_dir)
return finishAddShow()
@cherrypy.expose
def existingShows(self):
"""
Prints out the page to add existing shows from a root dir
"""
t = PageTemplate(file="home_addExistingShow.tmpl")
t.submenu = HomeMenu()
return _munge(t)
def split_extra_show(self, extra_show):
if not extra_show:
return (None, None, None)
split_vals = extra_show.split('|')
if len(split_vals) < 3:
return (extra_show, None, None)
show_dir = split_vals[0]
tvdb_id = split_vals[1]
show_name = '|'.join(split_vals[2:])
return (show_dir, tvdb_id, show_name)
@cherrypy.expose
def addExistingShows(self, shows_to_add=None, promptForSettings=None):
"""
Receives a dir list and add them. Adds the ones with given TVDB IDs first, then forwards
along to the newShow page.
"""
# grab a list of other shows to add, if provided
if not shows_to_add:
shows_to_add = []
elif type(shows_to_add) != list:
shows_to_add = [shows_to_add]
shows_to_add = [urllib.unquote_plus(x) for x in shows_to_add]
if promptForSettings == "on":
promptForSettings = 1
else:
promptForSettings = 0
tvdb_id_given = []
dirs_only = []
# separate all the ones with TVDB IDs
for cur_dir in shows_to_add:
if not '|' in cur_dir:
dirs_only.append(cur_dir)
else:
show_dir, tvdb_id, show_name = self.split_extra_show(cur_dir)
if not show_dir or not tvdb_id or not show_name:
continue
tvdb_id_given.append((show_dir, int(tvdb_id), show_name))
# if they want me to prompt for settings then I will just carry on to the newShow page
if promptForSettings and shows_to_add:
return self.newShow(shows_to_add[0], shows_to_add[1:])
# if they don't want me to prompt for settings then I can just add all the nfo shows now
num_added = 0
for cur_show in tvdb_id_given:
show_dir, tvdb_id, show_name = cur_show
# add the show
sickbeard.showQueueScheduler.action.addShow(tvdb_id, show_dir, int(sickbeard.STATUS_DEFAULT), sickbeard.QUALITY_DEFAULT, sickbeard.FLATTEN_FOLDERS_DEFAULT,"fr", sickbeard.SUBTITLES_DEFAULT, sickbeard.AUDIO_SHOW_DEFAULT) #@UndefinedVariable
num_added += 1
if num_added:
ui.notifications.message("Shows Added", "Automatically added "+str(num_added)+" from their existing metadata files")
# if we're done then go home
if not dirs_only:
redirect('/home')
# for the remaining shows we need to prompt for each one, so forward this on to the newShow page
return self.newShow(dirs_only[0], dirs_only[1:])
ErrorLogsMenu = [
{ 'title': 'Clear Errors', 'path': 'errorlogs/clearerrors' },
#{ 'title': 'View Log', 'path': 'errorlogs/viewlog' },
]
class ErrorLogs:
@cherrypy.expose
def index(self):
t = PageTemplate(file="errorlogs.tmpl")
t.submenu = ErrorLogsMenu
return _munge(t)
@cherrypy.expose
def clearerrors(self):
classes.ErrorViewer.clear()
redirect("/errorlogs")
@cherrypy.expose
def viewlog(self, minLevel=logger.MESSAGE, maxLines=500):
t = PageTemplate(file="viewlogs.tmpl")
t.submenu = ErrorLogsMenu
minLevel = int(minLevel)
data = []
if os.path.isfile(logger.sb_log_instance.log_file):
f = open(logger.sb_log_instance.log_file)
data = f.readlines()
f.close()
regex = "^(\w+).?\-(\d\d)\s+(\d\d)\:(\d\d):(\d\d)\s+([A-Z]+)\s+(.*)$"
finalData = []
numLines = 0
lastLine = False
numToShow = min(maxLines, len(data))
for x in reversed(data):
x = x.decode('utf-8')
match = re.match(regex, x)
if match:
level = match.group(6)
if level not in logger.reverseNames:
lastLine = False
continue
if logger.reverseNames[level] >= minLevel:
lastLine = True
finalData.append(x)
else:
lastLine = False
continue
elif lastLine:
finalData.append("AA"+x)
numLines += 1
if numLines >= numToShow:
break
result = "".join(finalData)
t.logLines = result
t.minLevel = minLevel
return _munge(t)
class Home:
@cherrypy.expose
def is_alive(self, *args, **kwargs):
if 'callback' in kwargs and '_' in kwargs:
callback, _ = kwargs['callback'], kwargs['_']
else:
return "Error: Unsupported Request. Send jsonp request with 'callback' variable in the query stiring."
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
cherrypy.response.headers['Content-Type'] = 'text/javascript'
cherrypy.response.headers['Access-Control-Allow-Origin'] = '*'
cherrypy.response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
if sickbeard.started:
return callback+'('+json.dumps({"msg": str(sickbeard.PID)})+');'
else:
return callback+'('+json.dumps({"msg": "nope"})+');'
@cherrypy.expose
def index(self):
t = PageTemplate(file="home.tmpl")
t.submenu = HomeMenu()
return _munge(t)
addShows = NewHomeAddShows()
postprocess = HomePostProcess()
@cherrypy.expose
def testSABnzbd(self, host=None, username=None, password=None, apikey=None):
if not host.endswith("/"):
host = host + "/"
connection, accesMsg = sab.getSabAccesMethod(host, username, password, apikey)
if connection:
authed, authMsg = sab.testAuthentication(host, username, password, apikey) #@UnusedVariable
if authed:
return "Success. Connected and authenticated"
else:
return "Authentication failed. SABnzbd expects '"+accesMsg+"' as authentication method"
else:
return "Unable to connect to host"
@cherrypy.expose
def testTorrent(self, torrent_method=None, host=None, username=None, password=None):
if not host.endswith("/"):
host = host + "/"
client = clients.getClientIstance(torrent_method)
connection, accesMsg = client(host, username, password).testAuthentication()
return accesMsg
@cherrypy.expose
def testGrowl(self, host=None, password=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.growl_notifier.test_notify(host, password)
if password==None or password=='':
pw_append = ''
else:
pw_append = " with password: " + password
if result:
return "Registered and Tested growl successfully "+urllib.unquote_plus(host)+pw_append
else:
return "Registration and Testing of growl failed "+urllib.unquote_plus(host)+pw_append
@cherrypy.expose
def testProwl(self, prowl_api=None, prowl_priority=0):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.prowl_notifier.test_notify(prowl_api, prowl_priority)
if result:
return "Test prowl notice sent successfully"
else:
return "Test prowl notice failed"
@cherrypy.expose
def testBoxcar(self, username=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.boxcar_notifier.test_notify(username)
if result:
return "Boxcar notification succeeded. Check your Boxcar clients to make sure it worked"
else:
return "Error sending Boxcar notification"
@cherrypy.expose
def testPushover(self, userKey=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.pushover_notifier.test_notify(userKey)
if result:
return "Pushover notification succeeded. Check your Pushover clients to make sure it worked"
else:
return "Error sending Pushover notification"
@cherrypy.expose
def twitterStep1(self):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
return notifiers.twitter_notifier._get_authorization()
@cherrypy.expose
def twitterStep2(self, key):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.twitter_notifier._get_credentials(key)
logger.log(u"result: "+str(result))
if result:
return "Key verification successful"
else:
return "Unable to verify key"
@cherrypy.expose
def testTwitter(self):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.twitter_notifier.test_notify()
if result:
return "Tweet successful, check your twitter to make sure it worked"
else:
return "Error sending tweet"
@cherrypy.expose
def testXBMC(self, host=None, username=None, password=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
finalResult = ''
for curHost in [x.strip() for x in host.split(",")]:
curResult = notifiers.xbmc_notifier.test_notify(urllib.unquote_plus(curHost), username, password)
if len(curResult.split(":")) > 2 and 'OK' in curResult.split(":")[2]:
finalResult += "Test XBMC notice sent successfully to " + urllib.unquote_plus(curHost)
else:
finalResult += "Test XBMC notice failed to " + urllib.unquote_plus(curHost)
finalResult += "<br />\n"
return finalResult
@cherrypy.expose
def testPLEX(self, host=None, username=None, password=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
finalResult = ''
for curHost in [x.strip() for x in host.split(",")]:
curResult = notifiers.plex_notifier.test_notify(urllib.unquote_plus(curHost), username, password)
if len(curResult.split(":")) > 2 and 'OK' in curResult.split(":")[2]:
finalResult += "Test Plex notice sent successfully to " + urllib.unquote_plus(curHost)
else:
finalResult += "Test Plex notice failed to " + urllib.unquote_plus(curHost)
finalResult += "<br />\n"
return finalResult
@cherrypy.expose
def testLibnotify(self):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
if notifiers.libnotify_notifier.test_notify():
return "Tried sending desktop notification via libnotify"
else:
return notifiers.libnotify.diagnose()
@cherrypy.expose
def testNMJ(self, host=None, database=None, mount=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.nmj_notifier.test_notify(urllib.unquote_plus(host), database, mount)
if result:
return "Successfull started the scan update"
else:
return "Test failed to start the scan update"
@cherrypy.expose
def settingsNMJ(self, host=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.nmj_notifier.notify_settings(urllib.unquote_plus(host))
if result:
return '{"message": "Got settings from %(host)s", "database": "%(database)s", "mount": "%(mount)s"}' % {"host": host, "database": sickbeard.NMJ_DATABASE, "mount": sickbeard.NMJ_MOUNT}
else:
return '{"message": "Failed! Make sure your Popcorn is on and NMJ is running. (see Log & Errors -> Debug for detailed info)", "database": "", "mount": ""}'
@cherrypy.expose
def testNMJv2(self, host=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.nmjv2_notifier.test_notify(urllib.unquote_plus(host))
if result:
return "Test notice sent successfully to " + urllib.unquote_plus(host)
else:
return "Test notice failed to " + urllib.unquote_plus(host)
@cherrypy.expose
def settingsNMJv2(self, host=None, dbloc=None, instance=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.nmjv2_notifier.notify_settings(urllib.unquote_plus(host), dbloc, instance)
if result:
return '{"message": "NMJ Database found at: %(host)s", "database": "%(database)s"}' % {"host": host, "database": sickbeard.NMJv2_DATABASE}
else:
return '{"message": "Unable to find NMJ Database at location: %(dbloc)s. Is the right location selected and PCH running?", "database": ""}' % {"dbloc": dbloc}
@cherrypy.expose
def testTrakt(self, api=None, username=None, password=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.trakt_notifier.test_notify(api, username, password)
if result:
return "Test notice sent successfully to Trakt"
else:
return "Test notice failed to Trakt"
@cherrypy.expose
def testMail(self, mail_from=None, mail_to=None, mail_server=None, mail_ssl=None, mail_user=None, mail_password=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.mail_notifier.test_notify(mail_from, mail_to, mail_server, mail_ssl, mail_user, mail_password)
if result:
return "Mail sent"
else:
return "Can't sent mail."
@cherrypy.expose
def testNMA(self, nma_api=None, nma_priority=0):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.nma_notifier.test_notify(nma_api, nma_priority)
if result:
return "Test NMA notice sent successfully"
else:
return "Test NMA notice failed"
@cherrypy.expose
def testPushalot(self, authorizationToken=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.pushalot_notifier.test_notify(authorizationToken)
if result:
return "Pushalot notification succeeded. Check your Pushalot clients to make sure it worked"
else:
return "Error sending Pushalot notification"
@cherrypy.expose
def testPushbullet(self, api=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.pushbullet_notifier.test_notify(api)
if result:
return "Pushbullet notification succeeded. Check your device to make sure it worked"
else:
return "Error sending Pushbullet notification"
@cherrypy.expose
def getPushbulletDevices(self, api=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.pushbullet_notifier.get_devices(api)
if result:
return result
else:
return "Error sending Pushbullet notification"
@cherrypy.expose
def shutdown(self, pid=None):
if str(pid) != str(sickbeard.PID):
redirect("/home")
threading.Timer(2, sickbeard.invoke_shutdown).start()
title = "Shutting down"
message = "Sick Beard is shutting down..."
return _genericMessage(title, message)
@cherrypy.expose
def restart(self, pid=None):
if str(pid) != str(sickbeard.PID):
redirect("/home")
t = PageTemplate(file="restart.tmpl")
t.submenu = HomeMenu()
# do a soft restart
threading.Timer(2, sickbeard.invoke_restart, [False]).start()
return _munge(t)
@cherrypy.expose
def update(self, pid=None):
if str(pid) != str(sickbeard.PID):
redirect("/home")
updated = sickbeard.versionCheckScheduler.action.update() #@UndefinedVariable
if updated:
# do a hard restart
threading.Timer(2, sickbeard.invoke_restart, [False]).start()
t = PageTemplate(file="restart_bare.tmpl")
return _munge(t)
else:
return _genericMessage("Update Failed","Update wasn't successful, not restarting. Check your log for more information.")
@cherrypy.expose
def displayShow(self, show=None):
if show == None:
return _genericMessage("Error", "Invalid show ID")
else:
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
return _genericMessage("Error", "Show not in show list")
showObj.exceptions = scene_exceptions.get_scene_exceptions(showObj.tvdbid)
myDB = db.DBConnection()
seasonResults = myDB.select(
"SELECT DISTINCT season FROM tv_episodes WHERE showid = ? ORDER BY season desc",
[showObj.tvdbid]
)
sqlResults = myDB.select(
"SELECT * FROM tv_episodes WHERE showid = ? ORDER BY season DESC, episode DESC",
[showObj.tvdbid]
)
t = PageTemplate(file="displayShow.tmpl")
t.submenu = [ { 'title': 'Edit', 'path': 'home/editShow?show=%d'%showObj.tvdbid } ]
try:
t.showLoc = (showObj.location, True)
except sickbeard.exceptions.ShowDirNotFoundException:
t.showLoc = (showObj._location, False)
show_message = ''
if sickbeard.showQueueScheduler.action.isBeingAdded(showObj): #@UndefinedVariable
show_message = 'This show is in the process of being downloaded from theTVDB.com - the info below is incomplete.'
elif sickbeard.showQueueScheduler.action.isBeingUpdated(showObj): #@UndefinedVariable
show_message = 'The information below is in the process of being updated.'
elif sickbeard.showQueueScheduler.action.isBeingRefreshed(showObj): #@UndefinedVariable
show_message = 'The episodes below are currently being refreshed from disk'
elif sickbeard.showQueueScheduler.action.isBeingSubtitled(showObj): #@UndefinedVariable
show_message = 'Currently downloading subtitles for this show'
elif sickbeard.showQueueScheduler.action.isBeingCleanedSubtitle(showObj): #@UndefinedVariable
show_message = 'Currently cleaning subtitles for this show'
elif sickbeard.showQueueScheduler.action.isInRefreshQueue(showObj): #@UndefinedVariable
show_message = 'This show is queued to be refreshed.'
elif sickbeard.showQueueScheduler.action.isInUpdateQueue(showObj): #@UndefinedVariable
show_message = 'This show is queued and awaiting an update.'
elif sickbeard.showQueueScheduler.action.isInSubtitleQueue(showObj): #@UndefinedVariable
show_message = 'This show is queued and awaiting subtitles download.'
if not sickbeard.showQueueScheduler.action.isBeingAdded(showObj): #@UndefinedVariable
if not sickbeard.showQueueScheduler.action.isBeingUpdated(showObj): #@UndefinedVariable
t.submenu.append({ 'title': 'Delete', 'path': 'home/deleteShow?show=%d'%showObj.tvdbid, 'confirm': True })
t.submenu.append({ 'title': 'Re-scan files', 'path': 'home/refreshShow?show=%d'%showObj.tvdbid })
t.submenu.append({ 'title': 'Force Full Update', 'path': 'home/updateShow?show=%d&force=1'%showObj.tvdbid })
t.submenu.append({ 'title': 'Update show in XBMC', 'path': 'home/updateXBMC?showName=%s'%urllib.quote_plus(showObj.name.encode('utf-8')), 'requires': haveXBMC })
t.submenu.append({ 'title': 'Preview Rename', 'path': 'home/testRename?show=%d'%showObj.tvdbid })
t.submenu.append({ 'title': 'French Search', 'path': 'home/frenchSearch?show=%d'%showObj.tvdbid })
if sickbeard.USE_SUBTITLES and not sickbeard.showQueueScheduler.action.isBeingSubtitled(showObj) and not sickbeard.showQueueScheduler.action.isBeingCleanedSubtitle(showObj) and showObj.subtitles:
t.submenu.append({ 'title': 'Download Subtitles', 'path': 'home/subtitleShow?show=%d'%showObj.tvdbid })
t.submenu.append({ 'title': 'Clean Subtitles', 'path': 'home/subtitleShowClean?show=%d'%showObj.tvdbid })
t.show = showObj
t.sqlResults = sqlResults
t.seasonResults = seasonResults
t.show_message = show_message
epCounts = {}
epCats = {}
epCounts[Overview.SKIPPED] = 0
epCounts[Overview.WANTED] = 0
epCounts[Overview.QUAL] = 0
epCounts[Overview.GOOD] = 0
epCounts[Overview.UNAIRED] = 0
epCounts[Overview.SNATCHED] = 0
showSceneNumberColum = False
for curResult in sqlResults:
if not showSceneNumberColum and (isinstance(curResult["scene_season"], int) and isinstance(curResult["scene_episode"], int)):
showSceneNumberColum = True
curEpCat = showObj.getOverview(int(curResult["status"]))
epCats[str(curResult["season"])+"x"+str(curResult["episode"])] = curEpCat
epCounts[curEpCat] += 1
t.showSceneNumberColum = showSceneNumberColum
def titler(x):
if not x:
return x
if x.lower().startswith('a '):
x = x[2:]
elif x.lower().startswith('the '):
x = x[4:]
return x
t.sortedShowList = sorted(sickbeard.showList, lambda x, y: cmp(titler(x.name), titler(y.name)))
t.epCounts = epCounts
t.epCats = epCats
return _munge(t)
@cherrypy.expose
def plotDetails(self, show, season, episode):
result = db.DBConnection().action("SELECT description FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ?", (show, season, episode)).fetchone()
return result['description'] if result else 'Episode not found.'
@cherrypy.expose
def editShow(self, show=None, location=None, anyQualities=[], bestQualities=[], exceptions_list=[], flatten_folders=None, paused=None, frenchsearch=None, directCall=False, air_by_date=None, tvdbLang=None, audio_lang=None, subtitles=None):
if show == None:
errString = "Invalid show ID: "+str(show)
if directCall:
return [errString]
else:
return _genericMessage("Error", errString)
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
errString = "Unable to find the specified show: "+str(show)
if directCall:
return [errString]
else:
return _genericMessage("Error", errString)
showObj.exceptions = scene_exceptions.get_scene_exceptions(showObj.tvdbid)
if not location and not anyQualities and not bestQualities and not flatten_folders:
t = PageTemplate(file="editShow.tmpl")
t.submenu = HomeMenu()
with showObj.lock:
t.show = showObj
return _munge(t)
if flatten_folders == "on":
flatten_folders = 1
else:
flatten_folders = 0
logger.log(u"flatten folders: "+str(flatten_folders))
if paused == "on":
paused = 1
else:
paused = 0
if frenchsearch == "on":
frenchsearch = 1
else:
frenchsearch = 0
if air_by_date == "on":
air_by_date = 1
else:
air_by_date = 0
if subtitles == "on":
subtitles = 1
else:
subtitles = 0
if tvdbLang and tvdbLang in tvdb_api.Tvdb().config['valid_languages']:
tvdb_lang = tvdbLang
else:
tvdb_lang = showObj.lang
# if we changed the language then kick off an update
if tvdb_lang == showObj.lang:
do_update = False
else:
do_update = True
if type(anyQualities) != list:
anyQualities = [anyQualities]
if type(bestQualities) != list:
bestQualities = [bestQualities]
if type(exceptions_list) != list:
exceptions_list = [exceptions_list]
#If directCall from mass_edit_update no scene exceptions handling
if directCall:
do_update_exceptions = False
else:
if set(exceptions_list) == set(showObj.exceptions):
do_update_exceptions = False
else:
do_update_exceptions = True
errors = []
with showObj.lock:
newQuality = Quality.combineQualities(map(int, anyQualities), map(int, bestQualities))
showObj.quality = newQuality
# reversed for now
if bool(showObj.flatten_folders) != bool(flatten_folders):
showObj.flatten_folders = flatten_folders
try:
sickbeard.showQueueScheduler.action.refreshShow(showObj) #@UndefinedVariable
except exceptions.CantRefreshException, e:
errors.append("Unable to refresh this show: "+ex(e))
showObj.paused = paused
showObj.air_by_date = air_by_date
showObj.subtitles = subtitles
showObj.frenchsearch = frenchsearch
showObj.lang = tvdb_lang
showObj.audio_lang = audio_lang
# if we change location clear the db of episodes, change it, write to db, and rescan
if os.path.normpath(showObj._location) != os.path.normpath(location):
logger.log(os.path.normpath(showObj._location)+" != "+os.path.normpath(location), logger.DEBUG)
if not ek.ek(os.path.isdir, location):
errors.append("New location <tt>%s</tt> does not exist" % location)
# don't bother if we're going to update anyway
elif not do_update:
# change it
try:
showObj.location = location
try:
sickbeard.showQueueScheduler.action.refreshShow(showObj) #@UndefinedVariable
except exceptions.CantRefreshException, e:
errors.append("Unable to refresh this show:"+ex(e))
# grab updated info from TVDB
#showObj.loadEpisodesFromTVDB()
# rescan the episodes in the new folder
except exceptions.NoNFOException:
errors.append("The folder at <tt>%s</tt> doesn't contain a tvshow.nfo - copy your files to that folder before you change the directory in Sick Beard." % location)
# save it to the DB
showObj.saveToDB()
# force the update
if do_update:
try:
sickbeard.showQueueScheduler.action.updateShow(showObj, True) #@UndefinedVariable
time.sleep(1)
except exceptions.CantUpdateException, e:
errors.append("Unable to force an update on the show.")
if do_update_exceptions:
try:
scene_exceptions.update_scene_exceptions(showObj.tvdbid, exceptions_list) #@UndefinedVariable
time.sleep(1)
except exceptions.CantUpdateException, e:
errors.append("Unable to force an update on scene exceptions of the show.")
if directCall:
return errors
if len(errors) > 0:
ui.notifications.error('%d error%s while saving changes:' % (len(errors), "" if len(errors) == 1 else "s"),
'<ul>' + '\n'.join(['<li>%s</li>' % error for error in errors]) + "</ul>")
redirect("/home/displayShow?show=" + show)
@cherrypy.expose
def deleteShow(self, show=None):
if show == None:
return _genericMessage("Error", "Invalid show ID")
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
return _genericMessage("Error", "Unable to find the specified show")
if sickbeard.showQueueScheduler.action.isBeingAdded(showObj) or sickbeard.showQueueScheduler.action.isBeingUpdated(showObj): #@UndefinedVariable
return _genericMessage("Error", "Shows can't be deleted while they're being added or updated.")
showObj.deleteShow()
ui.notifications.message('<b>%s</b> has been deleted' % showObj.name)
redirect("/home")
@cherrypy.expose
def refreshShow(self, show=None):
if show == None:
return _genericMessage("Error", "Invalid show ID")
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
return _genericMessage("Error", "Unable to find the specified show")
# force the update from the DB
try:
sickbeard.showQueueScheduler.action.refreshShow(showObj) #@UndefinedVariable
except exceptions.CantRefreshException, e:
ui.notifications.error("Unable to refresh this show.",
ex(e))
time.sleep(3)
redirect("/home/displayShow?show="+str(showObj.tvdbid))
@cherrypy.expose
def updateShow(self, show=None, force=0):
if show == None:
return _genericMessage("Error", "Invalid show ID")
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
return _genericMessage("Error", "Unable to find the specified show")
# force the update
try:
sickbeard.showQueueScheduler.action.updateShow(showObj, bool(force)) #@UndefinedVariable
except exceptions.CantUpdateException, e:
ui.notifications.error("Unable to update this show.",
ex(e))
# just give it some time
time.sleep(3)
redirect("/home/displayShow?show=" + str(showObj.tvdbid))
@cherrypy.expose
def subtitleShow(self, show=None, force=0):
if show == None:
return _genericMessage("Error", "Invalid show ID")
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
return _genericMessage("Error", "Unable to find the specified show")
# search and download subtitles
sickbeard.showQueueScheduler.action.downloadSubtitles(showObj, bool(force)) #@UndefinedVariable
time.sleep(3)
redirect("/home/displayShow?show="+str(showObj.tvdbid))
@cherrypy.expose
def subtitleShowClean(self, show=None, force=0):
if show == None:
return _genericMessage("Error", "Invalid show ID")
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
return _genericMessage("Error", "Unable to find the specified show")
# search and download subtitles
sickbeard.showQueueScheduler.action.cleanSubtitles(showObj, bool(force)) #@UndefinedVariable
time.sleep(3)
redirect("/home/displayShow?show="+str(showObj.tvdbid))
@cherrypy.expose
def frenchSearch(self, show=None, force=0):
if show == None:
return _genericMessage("Error", "Invalid show ID")
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
return _genericMessage("Error", "Unable to find the specified show")
# search and download subtitles
sickbeard.showQueueScheduler.action.searchFrench(showObj, bool(force)) #@UndefinedVariable
time.sleep(3)
redirect("/home/displayShow?show="+str(showObj.tvdbid))
@cherrypy.expose
def updateXBMC(self, showName=None):
if sickbeard.XBMC_UPDATE_ONLYFIRST:
# only send update to first host in the list -- workaround for xbmc sql backend users
host = sickbeard.XBMC_HOST.split(",")[0].strip()
else:
host = sickbeard.XBMC_HOST
if notifiers.xbmc_notifier.update_library(showName=showName):
ui.notifications.message("Library update command sent to XBMC host(s): " + host)
else:
ui.notifications.error("Unable to contact one or more XBMC host(s): " + host)
redirect('/home')
@cherrypy.expose
def updatePLEX(self):
if notifiers.plex_notifier.update_library():
ui.notifications.message("Library update command sent to Plex Media Server host: " + sickbeard.PLEX_SERVER_HOST)
else:
ui.notifications.error("Unable to contact Plex Media Server host: " + sickbeard.PLEX_SERVER_HOST)
redirect('/home')
@cherrypy.expose
def setStatus(self, show=None, eps=None, status=None, direct=False):
if show == None or eps == None or status == None:
errMsg = "You must specify a show and at least one episode"
if direct:
ui.notifications.error('Error', errMsg)
return json.dumps({'result': 'error'})
else:
return _genericMessage("Error", errMsg)
if not statusStrings.has_key(int(status)):
errMsg = "Invalid status"
if direct:
ui.notifications.error('Error', errMsg)
return json.dumps({'result': 'error'})
else:
return _genericMessage("Error", errMsg)
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
errMsg = "Error", "Show not in show list"
if direct:
ui.notifications.error('Error', errMsg)
return json.dumps({'result': 'error'})
else:
return _genericMessage("Error", errMsg)
segment_list = []
if eps != None:
for curEp in eps.split('|'):
logger.log(u"Attempting to set status on episode "+curEp+" to "+status, logger.DEBUG)
epInfo = curEp.split('x')
epObj = showObj.getEpisode(int(epInfo[0]), int(epInfo[1]))
if int(status) == WANTED:
# figure out what segment the episode is in and remember it so we can backlog it
if epObj.show.air_by_date:
ep_segment = str(epObj.airdate)[:7]
else:
ep_segment = epObj.season
if ep_segment not in segment_list:
segment_list.append(ep_segment)
if epObj == None:
return _genericMessage("Error", "Episode couldn't be retrieved")
with epObj.lock:
# don't let them mess up UNAIRED episodes
if epObj.status == UNAIRED:
logger.log(u"Refusing to change status of "+curEp+" because it is UNAIRED", logger.ERROR)
continue
if int(status) in Quality.DOWNLOADED and epObj.status not in Quality.SNATCHED + Quality.SNATCHED_PROPER + Quality.SNATCHED_FRENCH + Quality.DOWNLOADED + [IGNORED] and not ek.ek(os.path.isfile, epObj.location):
logger.log(u"Refusing to change status of "+curEp+" to DOWNLOADED because it's not SNATCHED/DOWNLOADED", logger.ERROR)
continue
epObj.status = int(status)
epObj.saveToDB()
msg = "Backlog was automatically started for the following seasons of <b>"+showObj.name+"</b>:<br />"
for cur_segment in segment_list:
msg += "<li>Season "+str(cur_segment)+"</li>"
logger.log(u"Sending backlog for "+showObj.name+" season "+str(cur_segment)+" because some eps were set to wanted")
cur_backlog_queue_item = search_queue.BacklogQueueItem(showObj, cur_segment)
sickbeard.searchQueueScheduler.action.add_item(cur_backlog_queue_item) #@UndefinedVariable
msg += "</ul>"
if segment_list:
ui.notifications.message("Backlog started", msg)
if direct:
return json.dumps({'result': 'success'})
else:
redirect("/home/displayShow?show=" + show)
@cherrypy.expose
def setAudio(self, show=None, eps=None, audio_langs=None, direct=False):
if show == None or eps == None or audio_langs == None:
errMsg = "You must specify a show and at least one episode"
if direct:
ui.notifications.error('Error', errMsg)
return json.dumps({'result': 'error'})
else:
return _genericMessage("Error", errMsg)
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
return _genericMessage("Error", "Show not in show list")
try:
show_loc = showObj.location #@UnusedVariable
except exceptions.ShowDirNotFoundException:
return _genericMessage("Error", "Can't rename episodes when the show dir is missing.")
ep_obj_rename_list = []
for curEp in eps.split('|'):
logger.log(u"Attempting to set audio on episode "+curEp+" to "+audio_langs, logger.DEBUG)
epInfo = curEp.split('x')
epObj = showObj.getEpisode(int(epInfo[0]), int(epInfo[1]))
epObj.audio_langs = str(audio_langs)
epObj.saveToDB()
if direct:
return json.dumps({'result': 'success'})
else:
redirect("/home/displayShow?show=" + show)
@cherrypy.expose
def testRename(self, show=None):
if show == None:
return _genericMessage("Error", "You must specify a show")
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
return _genericMessage("Error", "Show not in show list")
try:
show_loc = showObj.location #@UnusedVariable
except exceptions.ShowDirNotFoundException:
return _genericMessage("Error", "Can't rename episodes when the show dir is missing.")
ep_obj_rename_list = []
ep_obj_list = showObj.getAllEpisodes(has_location=True)
for cur_ep_obj in ep_obj_list:
# Only want to rename if we have a location
if cur_ep_obj.location:
if cur_ep_obj.relatedEps:
# do we have one of multi-episodes in the rename list already
have_already = False
for cur_related_ep in cur_ep_obj.relatedEps + [cur_ep_obj]:
if cur_related_ep in ep_obj_rename_list:
have_already = True
break
if not have_already:
ep_obj_rename_list.append(cur_ep_obj)
else:
ep_obj_rename_list.append(cur_ep_obj)
if ep_obj_rename_list:
# present season DESC episode DESC on screen
ep_obj_rename_list.reverse()
t = PageTemplate(file="testRename.tmpl")
t.submenu = [{'title': 'Edit', 'path': 'home/editShow?show=%d' % showObj.tvdbid}]
t.ep_obj_list = ep_obj_rename_list
t.show = showObj
return _munge(t)
@cherrypy.expose
def doRename(self, show=None, eps=None):
if show == None or eps == None:
errMsg = "You must specify a show and at least one episode"
return _genericMessage("Error", errMsg)
show_obj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if show_obj == None:
errMsg = "Error", "Show not in show list"
return _genericMessage("Error", errMsg)
try:
show_loc = show_obj.location #@UnusedVariable
except exceptions.ShowDirNotFoundException:
return _genericMessage("Error", "Can't rename episodes when the show dir is missing.")
myDB = db.DBConnection()
if eps == None:
redirect("/home/displayShow?show=" + show)
for curEp in eps.split('|'):
epInfo = curEp.split('x')
# this is probably the worst possible way to deal with double eps but I've kinda painted myself into a corner here with this stupid database
ep_result = myDB.select("SELECT * FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ? AND 5=5", [show, epInfo[0], epInfo[1]])
if not ep_result:
logger.log(u"Unable to find an episode for "+curEp+", skipping", logger.WARNING)
continue
related_eps_result = myDB.select("SELECT * FROM tv_episodes WHERE location = ? AND episode != ?", [ep_result[0]["location"], epInfo[1]])
root_ep_obj = show_obj.getEpisode(int(epInfo[0]), int(epInfo[1]))
for cur_related_ep in related_eps_result:
related_ep_obj = show_obj.getEpisode(int(cur_related_ep["season"]), int(cur_related_ep["episode"]))
if related_ep_obj not in root_ep_obj.relatedEps:
root_ep_obj.relatedEps.append(related_ep_obj)
root_ep_obj.rename()
redirect("/home/displayShow?show=" + show)
@cherrypy.expose
def trunchistory(self, epid):
myDB = db.DBConnection()
nbep = myDB.select("Select count(*) from episode_links where episode_id=?",[epid])
myDB.action("DELETE from episode_links where episode_id=?",[epid])
messnum = str(nbep[0][0]) + ' history links deleted'
ui.notifications.message('Episode History Truncated' , messnum)
return json.dumps({'result': 'ok'})
@cherrypy.expose
def searchEpisode(self, show=None, season=None, episode=None):
# retrieve the episode object and fail if we can't get one
ep_obj = _getEpisode(show, season, episode)
if isinstance(ep_obj, str):
return json.dumps({'result': 'failure'})
# make a queue item for it and put it on the queue
ep_queue_item = search_queue.ManualSearchQueueItem(ep_obj)
sickbeard.searchQueueScheduler.action.add_item(ep_queue_item) #@UndefinedVariable
# wait until the queue item tells us whether it worked or not
while ep_queue_item.success == None: #@UndefinedVariable
time.sleep(1)
# return the correct json value
if ep_queue_item.success:
return json.dumps({'result': statusStrings[ep_obj.status]})
return json.dumps({'result': 'failure'})
@cherrypy.expose
def searchEpisodeSubtitles(self, show=None, season=None, episode=None):
# retrieve the episode object and fail if we can't get one
ep_obj = _getEpisode(show, season, episode)
if isinstance(ep_obj, str):
return json.dumps({'result': 'failure'})
# try do download subtitles for that episode
previous_subtitles = ep_obj.subtitles
try:
subtitles = ep_obj.downloadSubtitles()
if sickbeard.SUBTITLES_DIR:
for video in subtitles:
subs_new_path = ek.ek(os.path.join, os.path.dirname(video.path), sickbeard.SUBTITLES_DIR)
dir_exists = helpers.makeDir(subs_new_path)
if not dir_exists:
logger.log(u"Unable to create subtitles folder "+subs_new_path, logger.ERROR)
else:
helpers.chmodAsParent(subs_new_path)
for subtitle in subtitles.get(video):
new_file_path = ek.ek(os.path.join, subs_new_path, os.path.basename(subtitle.path))
helpers.moveFile(subtitle.path, new_file_path)
if sickbeard.SUBSNOLANG:
helpers.copyFile(new_file_path,new_file_path[:-6]+"srt")
helpers.chmodAsParent(new_file_path[:-6]+"srt")
helpers.chmodAsParent(new_file_path)
else:
if sickbeard.SUBTITLES_DIR_SUB:
for video in subtitles:
subs_new_path = os.path.join(os.path.dirname(video.path),"Subs")
dir_exists = helpers.makeDir(subs_new_path)
if not dir_exists:
logger.log(u"Unable to create subtitles folder "+subs_new_path, logger.ERROR)
else:
helpers.chmodAsParent(subs_new_path)
for subtitle in subtitles.get(video):
new_file_path = ek.ek(os.path.join, subs_new_path, os.path.basename(subtitle.path))
helpers.moveFile(subtitle.path, new_file_path)
if sickbeard.SUBSNOLANG:
helpers.copyFile(new_file_path,new_file_path[:-6]+"srt")
helpers.chmodAsParent(new_file_path[:-6]+"srt")
helpers.chmodAsParent(new_file_path)
else:
for video in subtitles:
for subtitle in subtitles.get(video):
if sickbeard.SUBSNOLANG:
helpers.copyFile(subtitle.path,subtitle.path[:-6]+"srt")
helpers.chmodAsParent(subtitle.path[:-6]+"srt")
helpers.chmodAsParent(subtitle.path)
except:
return json.dumps({'result': 'failure'})
# return the correct json value
if previous_subtitles != ep_obj.subtitles:
status = 'New subtitles downloaded: %s' % ' '.join(["<img src='"+sickbeard.WEB_ROOT+"/images/flags/"+subliminal.language.Language(x).alpha2+".png' alt='"+subliminal.language.Language(x).name+"'/>" for x in sorted(list(set(ep_obj.subtitles).difference(previous_subtitles)))])
else:
status = 'No subtitles downloaded'
ui.notifications.message('Subtitles Search', status)
return json.dumps({'result': status, 'subtitles': ','.join([x for x in ep_obj.subtitles])})
@cherrypy.expose
def mergeEpisodeSubtitles(self, show=None, season=None, episode=None):
# retrieve the episode object and fail if we can't get one
ep_obj = _getEpisode(show, season, episode)
if isinstance(ep_obj, str):
return json.dumps({'result': 'failure'})
# try do merge subtitles for that episode
try:
ep_obj.mergeSubtitles()
except Exception as e:
return json.dumps({'result': 'failure', 'exception': str(e)})
# return the correct json value
status = 'Subtitles merged successfully '
ui.notifications.message('Merge Subtitles', status)
return json.dumps({'result': 'ok'})
class UI:
@cherrypy.expose
def add_message(self):
ui.notifications.message('Test 1', 'This is test number 1')
ui.notifications.error('Test 2', 'This is test number 2')
return "ok"
@cherrypy.expose
def get_messages(self):
messages = {}
cur_notification_num = 1
for cur_notification in ui.notifications.get_notifications():
messages['notification-'+str(cur_notification_num)] = {'title': cur_notification.title,
'message': cur_notification.message,
'type': cur_notification.type}
cur_notification_num += 1
return json.dumps(messages)
class WebInterface:
@cherrypy.expose
def index(self):
redirect("/home")
@cherrypy.expose
def showPoster(self, show=None, which=None):
#Redirect initial poster/banner thumb to default images
if which[0:6] == 'poster':
default_image_name = 'poster.png'
else:
default_image_name = 'banner.png'
default_image_path = ek.ek(os.path.join, sickbeard.PROG_DIR, 'data', 'images', default_image_name)
if show is None:
return cherrypy.lib.static.serve_file(default_image_path, content_type="image/png")
else:
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj is None:
return cherrypy.lib.static.serve_file(default_image_path, content_type="image/png")
cache_obj = image_cache.ImageCache()
if which == 'poster':
image_file_name = cache_obj.poster_path(showObj.tvdbid)
if which == 'poster_thumb':
image_file_name = cache_obj.poster_thumb_path(showObj.tvdbid)
if which == 'banner':
image_file_name = cache_obj.banner_path(showObj.tvdbid)
if which == 'banner_thumb':
image_file_name = cache_obj.banner_thumb_path(showObj.tvdbid)
if ek.ek(os.path.isfile, image_file_name):
return cherrypy.lib.static.serve_file(image_file_name, content_type="image/jpeg")
else:
return cherrypy.lib.static.serve_file(default_image_path, content_type="image/png")
@cherrypy.expose
def setHomeLayout(self, layout):
if layout not in ('poster', 'banner', 'simple'):
layout = 'poster'
sickbeard.HOME_LAYOUT = layout
redirect("/home")
@cherrypy.expose
def setHomeSearch(self, search):
if search not in ('True', 'False'):
search = 'False'
sickbeard.TOGGLE_SEARCH= search
redirect("/home")
@cherrypy.expose
def toggleDisplayShowSpecials(self, show):
sickbeard.DISPLAY_SHOW_SPECIALS = not sickbeard.DISPLAY_SHOW_SPECIALS
redirect("/home/displayShow?show=" + show)
@cherrypy.expose
def setComingEpsLayout(self, layout):
if layout not in ('poster', 'banner', 'list'):
layout = 'banner'
sickbeard.COMING_EPS_LAYOUT = layout
redirect("/comingEpisodes")
@cherrypy.expose
def toggleComingEpsDisplayPaused(self):
sickbeard.COMING_EPS_DISPLAY_PAUSED = not sickbeard.COMING_EPS_DISPLAY_PAUSED
redirect("/comingEpisodes")
@cherrypy.expose
def setComingEpsSort(self, sort):
if sort not in ('date', 'network', 'show'):
sort = 'date'
sickbeard.COMING_EPS_SORT = sort
redirect("/comingEpisodes")
@cherrypy.expose
def comingEpisodes(self, layout="None"):
# get local timezone and load network timezones
sb_timezone = tz.tzlocal()
network_dict = network_timezones.load_network_dict()
myDB = db.DBConnection()
today1 = datetime.date.today()
today = today1.toordinal()
next_week1 = (datetime.date.today() + datetime.timedelta(days=7))
next_week = next_week1.toordinal()
recently = (datetime.date.today() - datetime.timedelta(days=sickbeard.COMING_EPS_MISSED_RANGE)).toordinal()
done_show_list = []
qualList = Quality.DOWNLOADED + Quality.SNATCHED + [ARCHIVED, IGNORED]
sql_results1 = myDB.select("SELECT *, 0 as localtime, tv_shows.status as show_status FROM tv_episodes, tv_shows WHERE season != 0 AND airdate >= ? AND airdate < ? AND tv_shows.tvdb_id = tv_episodes.showid AND tv_episodes.status NOT IN ("+','.join(['?']*len(qualList))+")", [today, next_week] + qualList)
for cur_result in sql_results1:
done_show_list.append(helpers.tryInt(cur_result["showid"]))
more_sql_results = myDB.select("SELECT *, tv_shows.status as show_status FROM tv_episodes outer_eps, tv_shows WHERE season != 0 AND showid NOT IN ("+','.join(['?']*len(done_show_list))+") AND tv_shows.tvdb_id = outer_eps.showid AND airdate IN (SELECT airdate FROM tv_episodes inner_eps WHERE inner_eps.showid = outer_eps.showid AND inner_eps.airdate >= ? AND inner_eps.status NOT IN ("+','.join(['?']*len(Quality.DOWNLOADED+Quality.SNATCHED))+") ORDER BY inner_eps.airdate ASC LIMIT 1)", done_show_list + [next_week] + Quality.DOWNLOADED + Quality.SNATCHED)
sql_results1 += more_sql_results
more_sql_results = myDB.select("SELECT *, 0 as localtime, tv_shows.status as show_status FROM tv_episodes, tv_shows WHERE season != 0 AND tv_shows.tvdb_id = tv_episodes.showid AND airdate < ? AND airdate >= ? AND tv_episodes.status = ? AND tv_episodes.status NOT IN ("+','.join(['?']*len(qualList))+")", [today, recently, WANTED] + qualList)
sql_results1 += more_sql_results
# sort by localtime
sorts = {
'date': (lambda x, y: cmp(x["localtime"], y["localtime"])),
'show': (lambda a, b: cmp((a["show_name"], a["localtime"]), (b["show_name"], b["localtime"]))),
'network': (lambda a, b: cmp((a["network"], a["localtime"]), (b["network"], b["localtime"]))),
}
# make a dict out of the sql results
sql_results = [dict(row) for row in sql_results1]
# regex to parse time (12/24 hour format)
time_regex = re.compile(r"(\d{1,2}):(\d{2,2})( [PA]M)?\b", flags=re.IGNORECASE)
# add localtime to the dict
for index, item in enumerate(sql_results1):
mo = time_regex.search(item['airs'])
if mo != None and len(mo.groups()) >= 2:
try:
hr = helpers.tryInt(mo.group(1))
m = helpers.tryInt(mo.group(2))
ap = mo.group(3)
# convert am/pm to 24 hour clock
if ap != None:
if ap.lower() == u" pm" and hr != 12:
hr += 12
elif ap.lower() == u" am" and hr == 12:
hr -= 12
except:
hr = 0
m = 0
else:
hr = 0
m = 0
if hr < 0 or hr > 23 or m < 0 or m > 59:
hr = 0
m = 0
te = datetime.datetime.fromordinal(helpers.tryInt(item['airdate']))
foreign_timezone = network_timezones.get_network_timezone(item['network'], network_dict, sb_timezone)
foreign_naive = datetime.datetime(te.year, te.month, te.day, hr, m,tzinfo=foreign_timezone)
sql_results[index]['localtime'] = foreign_naive.astimezone(sb_timezone)
#Normalize/Format the Airing Time
try:
locale.setlocale(locale.LC_TIME, 'us_US')
sql_results[index]['localtime_string'] = sql_results[index]['localtime'].strftime("%A %H:%M %p")
locale.setlocale(locale.LC_ALL, '') #Reseting to default locale
except:
sql_results[index]['localtime_string'] = sql_results[index]['localtime'].strftime("%A %H:%M %p")
sql_results.sort(sorts[sickbeard.COMING_EPS_SORT])
t = PageTemplate(file="comingEpisodes.tmpl")
# paused_item = { 'title': '', 'path': 'toggleComingEpsDisplayPaused' }
# paused_item['title'] = 'Hide Paused' if sickbeard.COMING_EPS_DISPLAY_PAUSED else 'Show Paused'
paused_item = { 'title': 'View Paused:', 'path': {'': ''} }
paused_item['path'] = {'Hide': 'toggleComingEpsDisplayPaused'} if sickbeard.COMING_EPS_DISPLAY_PAUSED else {'Show': 'toggleComingEpsDisplayPaused'}
t.submenu = [
{ 'title': 'Sort by:', 'path': {'Date': 'setComingEpsSort/?sort=date',
'Show': 'setComingEpsSort/?sort=show',
'Network': 'setComingEpsSort/?sort=network',
}},
{ 'title': 'Layout:', 'path': {'Banner': 'setComingEpsLayout/?layout=banner',
'Poster': 'setComingEpsLayout/?layout=poster',
'List': 'setComingEpsLayout/?layout=list',
}},
paused_item,
]
t.next_week = datetime.datetime.combine(next_week1, datetime.time(tzinfo=sb_timezone))
t.today = datetime.datetime.now().replace(tzinfo=sb_timezone)
t.sql_results = sql_results
# Allow local overriding of layout parameter
if layout and layout in ('poster', 'banner', 'list'):
t.layout = layout
else:
t.layout = sickbeard.COMING_EPS_LAYOUT
return _munge(t)
# Raw iCalendar implementation by Pedro Jose Pereira Vieito (@pvieito).
#
# iCalendar (iCal) - Standard RFC 5545 <http://tools.ietf.org/html/rfc5546>
# Works with iCloud, Google Calendar and Outlook.
@cherrypy.expose
def calendar(self):
""" Provides a subscribeable URL for iCal subscriptions
"""
logger.log(u"Receiving iCal request from %s" % cherrypy.request.remote.ip)
poster_url = cherrypy.url().replace('ical', '')
time_re = re.compile('([0-9]{1,2})\:([0-9]{2})(\ |)([AM|am|PM|pm]{2})')
# Create a iCal string
ical = 'BEGIN:VCALENDAR\n'
ical += 'VERSION:2.0\n'
ical += 'PRODID://Sick-Beard Upcoming Episodes//\n'
# Get shows info
myDB = db.DBConnection()
# Limit dates
past_date = (datetime.date.today() + datetime.timedelta(weeks=-52)).toordinal()
future_date = (datetime.date.today() + datetime.timedelta(weeks=52)).toordinal()
# Get all the shows that are not paused and are currently on air (from kjoconnor Fork)
calendar_shows = myDB.select("SELECT show_name, tvdb_id, network, airs, runtime FROM tv_shows WHERE status = 'Continuing' AND paused != '1'")
for show in calendar_shows:
# Get all episodes of this show airing between today and next month
episode_list = myDB.select("SELECT tvdbid, name, season, episode, description, airdate FROM tv_episodes WHERE airdate >= ? AND airdate < ? AND showid = ?", (past_date, future_date, int(show["tvdb_id"])))
for episode in episode_list:
# Get local timezone and load network timezones
local_zone = tz.tzlocal()
try:
network_zone = network_timezones.get_network_timezone(show['network'], network_timezones.load_network_dict(), local_zone)
except:
# Dummy network_zone for exceptions
network_zone = None
# Get the air date and time
air_date = datetime.datetime.fromordinal(int(episode['airdate']))
air_time = re.compile('([0-9]{1,2})\:([0-9]{2})(\ |)([AM|am|PM|pm]{2})').search(show["airs"])
# Parse out the air time
try:
if (air_time.group(4).lower() == 'pm' and int(air_time.group(1)) == 12):
t = datetime.time(12, int(air_time.group(2)), 0, tzinfo=network_zone)
elif (air_time.group(4).lower() == 'pm'):
t = datetime.time((int(air_time.group(1)) + 12), int(air_time.group(2)), 0, tzinfo=network_zone)
elif (air_time.group(4).lower() == 'am' and int(air_time.group(1)) == 12):
t = datetime.time(0, int(air_time.group(2)), 0, tzinfo=network_zone)
else:
t = datetime.time(int(air_time.group(1)), int(air_time.group(2)), 0, tzinfo=network_zone)
except:
# Dummy time for exceptions
t = datetime.time(22, 0, 0, tzinfo=network_zone)
# Combine air time and air date into one datetime object
air_date_time = datetime.datetime.combine(air_date, t).astimezone(local_zone)
# Create event for episode
ical = ical + 'BEGIN:VEVENT\n'
ical = ical + 'DTSTART:' + str(air_date_time.date()).replace("-", "") + '\n'
ical = ical + 'SUMMARY:' + show['show_name'] + ': ' + episode['name'] + '\n'
ical = ical + 'UID:' + str(datetime.date.today().isoformat()) + '-' + str(random.randint(10000,99999)) + '@Sick-Beard\n'
if (episode['description'] != ''):
ical = ical + 'DESCRIPTION:' + show['airs'] + ' on ' + show['network'] + '\\n\\n' + episode['description'] + '\n'
else:
ical = ical + 'DESCRIPTION:' + show['airs'] + ' on ' + show['network'] + '\n'
ical = ical + 'LOCATION:' + 'Episode ' + str(episode['episode']) + ' - Season ' + str(episode['season']) + '\n'
ical = ical + 'END:VEVENT\n'
# Ending the iCal
ical += 'END:VCALENDAR\n'
return ical
manage = Manage()
history = History()
config = Config()
home = Home()
api = Api()
browser = browser.WebFileBrowser()
errorlogs = ErrorLogs()
ui = UI()
|
b1acknight/linux
|
refs/heads/master
|
Documentation/target/tcm_mod_builder.py
|
237
|
#!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: nab@kernel.org
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi_proto.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops;\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!tpg) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
if proto_ident == "FC":
buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP);\n"
elif proto_ident == "SAS":
buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SAS);\n"
elif proto_ident == "iSCSI":
buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_ISCSI);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!" + fabric_mod_port + ") {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .module = THIS_MODULE,\n"
buf += " .name = \"" + fabric_mod_name + "\",\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .aborted_task = " + fabric_mod_name + "_aborted_task,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " return target_register_template(&" + fabric_mod_name + "_ops);\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " target_unregister_template(&" + fabric_mod_name + "_ops);\n"
buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi_common.h>\n"
buf += "#include <scsi/scsi_proto.h>\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('\*release_cmd\)\(', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('aborted_task\)\(', fo):
buf += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
|
mesosphere/dcos-template
|
refs/heads/master
|
dcos_template/mesos.py
|
1
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import requests
from . import log
# XXX - Make this configurable
TIMEOUT = 5
# {
# "services": [
# {
# "name": "derp",
# "tasks": [
# {
# "ip": "127.0.1.1",
# "port": 31261
# }
# ]
# }
# ]
# }
# XXX - Should cache this
@log.duration
def fetch():
try:
# XXX - Make the mesos source configurable.
return requests.get(
"http://leader.mesos:5050/master/state",
timeout=TIMEOUT).json()
except requests.exceptions.ConnectionError:
log.fatal("Could not get state.json")
def get_task_data(task):
return {
"ip": task.get("statuses", [])[-1].get("container_status").get(
"network_infos")[0].get("ip_addresses")[0].get("ip_address"),
"port": int(task.get("resources").get("ports").split("-")[0][1:])
}
def services():
tasks = [x for y in
[fwk.get("tasks") for fwk in fetch().get("frameworks")] for x in y]
return [{
"name": name,
"tasks": map(get_task_data, data)
} for name, data in itertools.groupby(tasks,
key=lambda x: x.get("name", None))]
|
jkitchin/pycse
|
refs/heads/master
|
pycse/obipython.py
|
1
|
import IPython
from tabulate import tabulate
class OrgFormatter(IPython.core.formatters.BaseFormatter):
def __call__(self, obj):
try:
return tabulate(obj, headers='keys',
tablefmt='orgtbl', showindex='always')
except:
return None
ip = get_ipython()
ip.display_formatter.formatters['text/org'] = OrgFormatter()
|
gmimano/commcaretest
|
refs/heads/master
|
corehq/apps/translations/urls.py
|
1
|
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns('corehq.apps.translations.views',
url(r'^api/suggestions/$', 'get_translations'),
url(r'^api/set/$', 'set_translations', name="set_translation"),
url(r'^edit/$', 'edit'),
)
|
WarrenWeckesser/scipy
|
refs/heads/master
|
scipy/fftpack/pseudo_diffs.py
|
22
|
"""
Differential and pseudo-differential operators.
"""
# Created by Pearu Peterson, September 2002
__all__ = ['diff',
'tilbert','itilbert','hilbert','ihilbert',
'cs_diff','cc_diff','sc_diff','ss_diff',
'shift']
from numpy import pi, asarray, sin, cos, sinh, cosh, tanh, iscomplexobj
from . import convolve
from scipy.fft._pocketfft.helper import _datacopied
_cache = {}
def diff(x,order=1,period=None, _cache=_cache):
"""
Return kth derivative (or integral) of a periodic sequence x.
If x_j and y_j are Fourier coefficients of periodic functions x
and y, respectively, then::
y_j = pow(sqrt(-1)*j*2*pi/period, order) * x_j
y_0 = 0 if order is not 0.
Parameters
----------
x : array_like
Input array.
order : int, optional
The order of differentiation. Default order is 1. If order is
negative, then integration is carried out under the assumption
that ``x_0 == 0``.
period : float, optional
The assumed period of the sequence. Default is ``2*pi``.
Notes
-----
If ``sum(x, axis=0) = 0`` then ``diff(diff(x, k), -k) == x`` (within
numerical accuracy).
For odd order and even ``len(x)``, the Nyquist mode is taken zero.
"""
tmp = asarray(x)
if order == 0:
return tmp
if iscomplexobj(tmp):
return diff(tmp.real,order,period)+1j*diff(tmp.imag,order,period)
if period is not None:
c = 2*pi/period
else:
c = 1.0
n = len(x)
omega = _cache.get((n,order,c))
if omega is None:
if len(_cache) > 20:
while _cache:
_cache.popitem()
def kernel(k,order=order,c=c):
if k:
return pow(c*k,order)
return 0
omega = convolve.init_convolution_kernel(n,kernel,d=order,
zero_nyquist=1)
_cache[(n,order,c)] = omega
overwrite_x = _datacopied(tmp, x)
return convolve.convolve(tmp,omega,swap_real_imag=order % 2,
overwrite_x=overwrite_x)
del _cache
_cache = {}
def tilbert(x, h, period=None, _cache=_cache):
"""
Return h-Tilbert transform of a periodic sequence x.
If x_j and y_j are Fourier coefficients of periodic functions x
and y, respectively, then::
y_j = sqrt(-1)*coth(j*h*2*pi/period) * x_j
y_0 = 0
Parameters
----------
x : array_like
The input array to transform.
h : float
Defines the parameter of the Tilbert transform.
period : float, optional
The assumed period of the sequence. Default period is ``2*pi``.
Returns
-------
tilbert : ndarray
The result of the transform.
Notes
-----
If ``sum(x, axis=0) == 0`` and ``n = len(x)`` is odd, then
``tilbert(itilbert(x)) == x``.
If ``2 * pi * h / period`` is approximately 10 or larger, then
numerically ``tilbert == hilbert``
(theoretically oo-Tilbert == Hilbert).
For even ``len(x)``, the Nyquist mode of ``x`` is taken zero.
"""
tmp = asarray(x)
if iscomplexobj(tmp):
return tilbert(tmp.real, h, period) + \
1j * tilbert(tmp.imag, h, period)
if period is not None:
h = h * 2 * pi / period
n = len(x)
omega = _cache.get((n, h))
if omega is None:
if len(_cache) > 20:
while _cache:
_cache.popitem()
def kernel(k, h=h):
if k:
return 1.0/tanh(h*k)
return 0
omega = convolve.init_convolution_kernel(n, kernel, d=1)
_cache[(n,h)] = omega
overwrite_x = _datacopied(tmp, x)
return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x)
del _cache
_cache = {}
def itilbert(x,h,period=None, _cache=_cache):
"""
Return inverse h-Tilbert transform of a periodic sequence x.
If ``x_j`` and ``y_j`` are Fourier coefficients of periodic functions x
and y, respectively, then::
y_j = -sqrt(-1)*tanh(j*h*2*pi/period) * x_j
y_0 = 0
For more details, see `tilbert`.
"""
tmp = asarray(x)
if iscomplexobj(tmp):
return itilbert(tmp.real,h,period) + \
1j*itilbert(tmp.imag,h,period)
if period is not None:
h = h*2*pi/period
n = len(x)
omega = _cache.get((n,h))
if omega is None:
if len(_cache) > 20:
while _cache:
_cache.popitem()
def kernel(k,h=h):
if k:
return -tanh(h*k)
return 0
omega = convolve.init_convolution_kernel(n,kernel,d=1)
_cache[(n,h)] = omega
overwrite_x = _datacopied(tmp, x)
return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x)
del _cache
_cache = {}
def hilbert(x, _cache=_cache):
"""
Return Hilbert transform of a periodic sequence x.
If x_j and y_j are Fourier coefficients of periodic functions x
and y, respectively, then::
y_j = sqrt(-1)*sign(j) * x_j
y_0 = 0
Parameters
----------
x : array_like
The input array, should be periodic.
_cache : dict, optional
Dictionary that contains the kernel used to do a convolution with.
Returns
-------
y : ndarray
The transformed input.
See Also
--------
scipy.signal.hilbert : Compute the analytic signal, using the Hilbert
transform.
Notes
-----
If ``sum(x, axis=0) == 0`` then ``hilbert(ihilbert(x)) == x``.
For even len(x), the Nyquist mode of x is taken zero.
The sign of the returned transform does not have a factor -1 that is more
often than not found in the definition of the Hilbert transform. Note also
that `scipy.signal.hilbert` does have an extra -1 factor compared to this
function.
"""
tmp = asarray(x)
if iscomplexobj(tmp):
return hilbert(tmp.real)+1j*hilbert(tmp.imag)
n = len(x)
omega = _cache.get(n)
if omega is None:
if len(_cache) > 20:
while _cache:
_cache.popitem()
def kernel(k):
if k > 0:
return 1.0
elif k < 0:
return -1.0
return 0.0
omega = convolve.init_convolution_kernel(n,kernel,d=1)
_cache[n] = omega
overwrite_x = _datacopied(tmp, x)
return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x)
del _cache
def ihilbert(x):
"""
Return inverse Hilbert transform of a periodic sequence x.
If ``x_j`` and ``y_j`` are Fourier coefficients of periodic functions x
and y, respectively, then::
y_j = -sqrt(-1)*sign(j) * x_j
y_0 = 0
"""
return -hilbert(x)
_cache = {}
def cs_diff(x, a, b, period=None, _cache=_cache):
"""
Return (a,b)-cosh/sinh pseudo-derivative of a periodic sequence.
If ``x_j`` and ``y_j`` are Fourier coefficients of periodic functions x
and y, respectively, then::
y_j = -sqrt(-1)*cosh(j*a*2*pi/period)/sinh(j*b*2*pi/period) * x_j
y_0 = 0
Parameters
----------
x : array_like
The array to take the pseudo-derivative from.
a, b : float
Defines the parameters of the cosh/sinh pseudo-differential
operator.
period : float, optional
The period of the sequence. Default period is ``2*pi``.
Returns
-------
cs_diff : ndarray
Pseudo-derivative of periodic sequence `x`.
Notes
-----
For even len(`x`), the Nyquist mode of `x` is taken as zero.
"""
tmp = asarray(x)
if iscomplexobj(tmp):
return cs_diff(tmp.real,a,b,period) + \
1j*cs_diff(tmp.imag,a,b,period)
if period is not None:
a = a*2*pi/period
b = b*2*pi/period
n = len(x)
omega = _cache.get((n,a,b))
if omega is None:
if len(_cache) > 20:
while _cache:
_cache.popitem()
def kernel(k,a=a,b=b):
if k:
return -cosh(a*k)/sinh(b*k)
return 0
omega = convolve.init_convolution_kernel(n,kernel,d=1)
_cache[(n,a,b)] = omega
overwrite_x = _datacopied(tmp, x)
return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x)
del _cache
_cache = {}
def sc_diff(x, a, b, period=None, _cache=_cache):
"""
Return (a,b)-sinh/cosh pseudo-derivative of a periodic sequence x.
If x_j and y_j are Fourier coefficients of periodic functions x
and y, respectively, then::
y_j = sqrt(-1)*sinh(j*a*2*pi/period)/cosh(j*b*2*pi/period) * x_j
y_0 = 0
Parameters
----------
x : array_like
Input array.
a,b : float
Defines the parameters of the sinh/cosh pseudo-differential
operator.
period : float, optional
The period of the sequence x. Default is 2*pi.
Notes
-----
``sc_diff(cs_diff(x,a,b),b,a) == x``
For even ``len(x)``, the Nyquist mode of x is taken as zero.
"""
tmp = asarray(x)
if iscomplexobj(tmp):
return sc_diff(tmp.real,a,b,period) + \
1j*sc_diff(tmp.imag,a,b,period)
if period is not None:
a = a*2*pi/period
b = b*2*pi/period
n = len(x)
omega = _cache.get((n,a,b))
if omega is None:
if len(_cache) > 20:
while _cache:
_cache.popitem()
def kernel(k,a=a,b=b):
if k:
return sinh(a*k)/cosh(b*k)
return 0
omega = convolve.init_convolution_kernel(n,kernel,d=1)
_cache[(n,a,b)] = omega
overwrite_x = _datacopied(tmp, x)
return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x)
del _cache
_cache = {}
def ss_diff(x, a, b, period=None, _cache=_cache):
"""
Return (a,b)-sinh/sinh pseudo-derivative of a periodic sequence x.
If x_j and y_j are Fourier coefficients of periodic functions x
and y, respectively, then::
y_j = sinh(j*a*2*pi/period)/sinh(j*b*2*pi/period) * x_j
y_0 = a/b * x_0
Parameters
----------
x : array_like
The array to take the pseudo-derivative from.
a,b
Defines the parameters of the sinh/sinh pseudo-differential
operator.
period : float, optional
The period of the sequence x. Default is ``2*pi``.
Notes
-----
``ss_diff(ss_diff(x,a,b),b,a) == x``
"""
tmp = asarray(x)
if iscomplexobj(tmp):
return ss_diff(tmp.real,a,b,period) + \
1j*ss_diff(tmp.imag,a,b,period)
if period is not None:
a = a*2*pi/period
b = b*2*pi/period
n = len(x)
omega = _cache.get((n,a,b))
if omega is None:
if len(_cache) > 20:
while _cache:
_cache.popitem()
def kernel(k,a=a,b=b):
if k:
return sinh(a*k)/sinh(b*k)
return float(a)/b
omega = convolve.init_convolution_kernel(n,kernel)
_cache[(n,a,b)] = omega
overwrite_x = _datacopied(tmp, x)
return convolve.convolve(tmp,omega,overwrite_x=overwrite_x)
del _cache
_cache = {}
def cc_diff(x, a, b, period=None, _cache=_cache):
"""
Return (a,b)-cosh/cosh pseudo-derivative of a periodic sequence.
If x_j and y_j are Fourier coefficients of periodic functions x
and y, respectively, then::
y_j = cosh(j*a*2*pi/period)/cosh(j*b*2*pi/period) * x_j
Parameters
----------
x : array_like
The array to take the pseudo-derivative from.
a,b : float
Defines the parameters of the sinh/sinh pseudo-differential
operator.
period : float, optional
The period of the sequence x. Default is ``2*pi``.
Returns
-------
cc_diff : ndarray
Pseudo-derivative of periodic sequence `x`.
Notes
-----
``cc_diff(cc_diff(x,a,b),b,a) == x``
"""
tmp = asarray(x)
if iscomplexobj(tmp):
return cc_diff(tmp.real,a,b,period) + \
1j*cc_diff(tmp.imag,a,b,period)
if period is not None:
a = a*2*pi/period
b = b*2*pi/period
n = len(x)
omega = _cache.get((n,a,b))
if omega is None:
if len(_cache) > 20:
while _cache:
_cache.popitem()
def kernel(k,a=a,b=b):
return cosh(a*k)/cosh(b*k)
omega = convolve.init_convolution_kernel(n,kernel)
_cache[(n,a,b)] = omega
overwrite_x = _datacopied(tmp, x)
return convolve.convolve(tmp,omega,overwrite_x=overwrite_x)
del _cache
_cache = {}
def shift(x, a, period=None, _cache=_cache):
"""
Shift periodic sequence x by a: y(u) = x(u+a).
If x_j and y_j are Fourier coefficients of periodic functions x
and y, respectively, then::
y_j = exp(j*a*2*pi/period*sqrt(-1)) * x_f
Parameters
----------
x : array_like
The array to take the pseudo-derivative from.
a : float
Defines the parameters of the sinh/sinh pseudo-differential
period : float, optional
The period of the sequences x and y. Default period is ``2*pi``.
"""
tmp = asarray(x)
if iscomplexobj(tmp):
return shift(tmp.real,a,period)+1j*shift(tmp.imag,a,period)
if period is not None:
a = a*2*pi/period
n = len(x)
omega = _cache.get((n,a))
if omega is None:
if len(_cache) > 20:
while _cache:
_cache.popitem()
def kernel_real(k,a=a):
return cos(a*k)
def kernel_imag(k,a=a):
return sin(a*k)
omega_real = convolve.init_convolution_kernel(n,kernel_real,d=0,
zero_nyquist=0)
omega_imag = convolve.init_convolution_kernel(n,kernel_imag,d=1,
zero_nyquist=0)
_cache[(n,a)] = omega_real,omega_imag
else:
omega_real,omega_imag = omega
overwrite_x = _datacopied(tmp, x)
return convolve.convolve_z(tmp,omega_real,omega_imag,
overwrite_x=overwrite_x)
del _cache
|
grap/OCB
|
refs/heads/7.0
|
addons/mrp/mrp.py
|
1
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
import openerp.addons.decimal_precision as dp
from openerp.osv import fields, osv, orm
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT
from openerp.tools import float_compare
from openerp.tools.translate import _
from openerp import netsvc
from openerp import tools
from openerp import SUPERUSER_ID
from openerp.addons.product import _common
#----------------------------------------------------------
# Work Centers
#----------------------------------------------------------
# capacity_hour : capacity per hour. default: 1.0.
# Eg: If 5 concurrent operations at one time: capacity = 5 (because 5 employees)
# unit_per_cycle : how many units are produced for one cycle
class mrp_workcenter(osv.osv):
_name = 'mrp.workcenter'
_description = 'Work Center'
_inherits = {'resource.resource':"resource_id"}
_columns = {
'note': fields.text('Description', help="Description of the Work Center. Explain here what's a cycle according to this Work Center."),
'capacity_per_cycle': fields.float('Capacity per Cycle', help="Number of operations this Work Center can do in parallel. If this Work Center represents a team of 5 workers, the capacity per cycle is 5."),
'time_cycle': fields.float('Time for 1 cycle (hour)', help="Time in hours for doing one cycle."),
'time_start': fields.float('Time before prod.', help="Time in hours for the setup."),
'time_stop': fields.float('Time after prod.', help="Time in hours for the cleaning."),
'costs_hour': fields.float('Cost per hour', help="Specify Cost of Work Center per hour."),
'costs_hour_account_id': fields.many2one('account.analytic.account', 'Hour Account', domain=[('type','!=','view')],
help="Fill this only if you want automatic analytic accounting entries on production orders."),
'costs_cycle': fields.float('Cost per cycle', help="Specify Cost of Work Center per cycle."),
'costs_cycle_account_id': fields.many2one('account.analytic.account', 'Cycle Account', domain=[('type','!=','view')],
help="Fill this only if you want automatic analytic accounting entries on production orders."),
'costs_journal_id': fields.many2one('account.analytic.journal', 'Analytic Journal'),
'costs_general_account_id': fields.many2one('account.account', 'General Account', domain=[('type','!=','view')]),
'resource_id': fields.many2one('resource.resource','Resource', ondelete='cascade', required=True),
'product_id': fields.many2one('product.product','Work Center Product', help="Fill this product to easily track your production costs in the analytic accounting."),
}
_defaults = {
'capacity_per_cycle': 1.0,
'resource_type': 'material',
}
def on_change_product_cost(self, cr, uid, ids, product_id, context=None):
value = {}
if product_id:
cost = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
value = {'costs_hour': cost.standard_price}
return {'value': value}
def _check_capacity_per_cycle(self, cr, uid, ids, context=None):
for obj in self.browse(cr, uid, ids, context=context):
if obj.capacity_per_cycle <= 0.0:
return False
return True
_constraints = [
(_check_capacity_per_cycle, 'The capacity per cycle must be strictly positive.', ['capacity_per_cycle']),
]
mrp_workcenter()
class mrp_routing(osv.osv):
"""
For specifying the routings of Work Centers.
"""
_name = 'mrp.routing'
_description = 'Routing'
_columns = {
'name': fields.char('Name', size=64, required=True),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the routing without removing it."),
'code': fields.char('Code', size=8),
'note': fields.text('Description'),
'workcenter_lines': fields.one2many('mrp.routing.workcenter', 'routing_id', 'Work Centers'),
'location_id': fields.many2one('stock.location', 'Production Location',
help="Keep empty if you produce at the location where the finished products are needed." \
"Set a location if you produce at a fixed location. This can be a partner location " \
"if you subcontract the manufacturing operations."
),
'company_id': fields.many2one('res.company', 'Company'),
}
_defaults = {
'active': lambda *a: 1,
'company_id': lambda self, cr, uid, context: self.pool.get('res.company')._company_default_get(cr, uid, 'mrp.routing', context=context)
}
mrp_routing()
class mrp_routing_workcenter(osv.osv):
"""
Defines working cycles and hours of a Work Center using routings.
"""
_name = 'mrp.routing.workcenter'
_description = 'Work Center Usage'
_order = 'sequence'
_columns = {
'workcenter_id': fields.many2one('mrp.workcenter', 'Work Center', required=True),
'name': fields.char('Name', size=64, required=True),
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of routing Work Centers."),
'cycle_nbr': fields.float('Number of Cycles', required=True,
help="Number of iterations this work center has to do in the specified operation of the routing."),
'hour_nbr': fields.float('Number of Hours', required=True, help="Time in hours for this Work Center to achieve the operation of the specified routing."),
'routing_id': fields.many2one('mrp.routing', 'Parent Routing', select=True, ondelete='cascade',
help="Routing indicates all the Work Centers used, for how long and/or cycles." \
"If Routing is indicated then,the third tab of a production order (Work Centers) will be automatically pre-completed."),
'note': fields.text('Description'),
'company_id': fields.related('routing_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True),
}
_defaults = {
'cycle_nbr': lambda *a: 1.0,
'hour_nbr': lambda *a: 0.0,
}
mrp_routing_workcenter()
class mrp_bom(osv.osv):
"""
Defines bills of material for a product.
"""
_name = 'mrp.bom'
_description = 'Bill of Material'
_inherit = ['mail.thread']
def _child_compute(self, cr, uid, ids, name, arg, context=None):
""" Gets child bom.
@param self: The object pointer
@param cr: The current row, from the database cursor,
@param uid: The current user ID for security checks
@param ids: List of selected IDs
@param name: Name of the field
@param arg: User defined argument
@param context: A standard dictionary for contextual values
@return: Dictionary of values
"""
result = {}
if context is None:
context = {}
bom_obj = self.pool.get('mrp.bom')
bom_id = context and context.get('active_id', False) or False
cr.execute('select id from mrp_bom')
if all(bom_id != r[0] for r in cr.fetchall()):
ids.sort()
bom_id = ids[0]
bom_parent = bom_obj.browse(cr, uid, bom_id, context=context)
for bom in self.browse(cr, uid, ids, context=context):
if (bom_parent) or (bom.id == bom_id):
result[bom.id] = map(lambda x: x.id, bom.bom_lines)
else:
result[bom.id] = []
if bom.bom_lines:
continue
ok = ((name=='child_complete_ids') and (bom.product_id.supply_method=='produce'))
if (bom.type=='phantom' or ok):
sids = bom_obj.search(cr, uid, [('bom_id','=',False),('product_id','=',bom.product_id.id)])
if sids:
bom2 = bom_obj.browse(cr, uid, sids[0], context=context)
result[bom.id] += map(lambda x: x.id, bom2.bom_lines)
return result
def _compute_type(self, cr, uid, ids, field_name, arg, context=None):
""" Sets particular method for the selected bom type.
@param field_name: Name of the field
@param arg: User defined argument
@return: Dictionary of values
"""
res = dict.fromkeys(ids, False)
for line in self.browse(cr, uid, ids, context=context):
if line.type == 'phantom' and not line.bom_id:
res[line.id] = 'set'
continue
if line.bom_lines or line.type == 'phantom':
continue
if line.product_id.supply_method == 'produce':
if line.product_id.procure_method == 'make_to_stock':
res[line.id] = 'stock'
else:
res[line.id] = 'order'
return res
_columns = {
'name': fields.char('Name', size=64),
'code': fields.char('Reference', size=16),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the bills of material without removing it."),
'type': fields.selection([('normal','Normal BoM'),('phantom','Sets / Phantom')], 'BoM Type', required=True,
help= "If a by-product is used in several products, it can be useful to create its own BoM. "\
"Though if you don't want separated production orders for this by-product, select Set/Phantom as BoM type. "\
"If a Phantom BoM is used for a root product, it will be sold and shipped as a set of components, instead of being produced."),
'method': fields.function(_compute_type, string='Method', type='selection', selection=[('',''),('stock','On Stock'),('order','On Order'),('set','Set / Pack')]),
'date_start': fields.date('Valid From', help="Validity of this BoM or component. Keep empty if it's always valid."),
'date_stop': fields.date('Valid Until', help="Validity of this BoM or component. Keep empty if it's always valid."),
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of bills of material."),
'position': fields.char('Internal Reference', size=64, help="Reference to a position in an external plan."),
'product_id': fields.many2one('product.product', 'Product', required=True),
'product_uos_qty': fields.float('Product UOS Qty'),
'product_uos': fields.many2one('product.uom', 'Product UOS', help="Product UOS (Unit of Sale) is the unit of measurement for the invoicing and promotion of stock."),
'product_qty': fields.float('Product Quantity', required=True, digits_compute=dp.get_precision('Product Unit of Measure')),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True, help="Unit of Measure (Unit of Measure) is the unit of measurement for the inventory control"),
'product_rounding': fields.float('Product Rounding', help="Rounding applied on the product quantity."),
'product_efficiency': fields.float('Manufacturing Efficiency', required=True, help="A factor of 0.9 means a loss of 10% within the production process."),
'bom_lines': fields.one2many('mrp.bom', 'bom_id', 'BoM Lines'),
'bom_id': fields.many2one('mrp.bom', 'Parent BoM', ondelete='cascade', select=True),
'routing_id': fields.many2one('mrp.routing', 'Routing', help="The list of operations (list of work centers) to produce the finished product. The routing is mainly used to compute work center costs during operations and to plan future loads on work centers based on production planning."),
'property_ids': fields.many2many('mrp.property', 'mrp_bom_property_rel', 'bom_id','property_id', 'Properties'),
'child_complete_ids': fields.function(_child_compute, relation='mrp.bom', string="BoM Hierarchy", type='many2many'),
'company_id': fields.many2one('res.company','Company',required=True),
}
_defaults = {
'active': lambda *a: 1,
'product_efficiency': lambda *a: 1.0,
'product_qty': lambda *a: 1.0,
'product_rounding': lambda *a: 0.0,
'type': lambda *a: 'normal',
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'mrp.bom', context=c),
}
_order = "sequence"
_parent_name = "bom_id"
_sql_constraints = [
('bom_qty_zero', 'CHECK (product_qty>0)', 'All product quantities must be greater than 0.\n' \
'You should install the mrp_byproduct module if you want to manage extra products on BoMs !'),
]
def _check_recursion(self, cr, uid, ids, context=None):
level = 100
while len(ids):
cr.execute('select distinct bom_id from mrp_bom where id IN %s',(tuple(ids),))
ids = filter(None, map(lambda x:x[0], cr.fetchall()))
if not level:
return False
level -= 1
return True
def _check_product(self, cr, uid, ids, context=None):
boms = self.browse(cr, uid, ids, context=context)
def check_bom(boms, all_prod):
res = True
for bom in boms:
if bom.product_id.id in all_prod:
return False
if bom.bom_lines:
res = res and check_bom([b for b in bom.bom_lines if b not in boms], all_prod + [bom.product_id.id])
return res
return check_bom(boms, [])
_constraints = [
(_check_recursion, 'Error ! You cannot create recursive BoM.', ['parent_id']),
(_check_product, 'BoM line product should not be same as BoM product.', ['product_id']),
]
def onchange_product_id(self, cr, uid, ids, product_id, name, context=None):
""" Changes UoM and name if product_id changes.
@param name: Name of the field
@param product_id: Changed product_id
@return: Dictionary of changed values
"""
if product_id:
prod = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
return {'value': {'name': prod.name, 'product_uom': prod.uom_id.id}}
return {}
def onchange_uom(self, cr, uid, ids, product_id, product_uom, context=None):
res = {'value':{}}
if not product_uom or not product_id:
return res
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
uom = self.pool.get('product.uom').browse(cr, uid, product_uom, context=context)
if uom.category_id.id != product.uom_id.category_id.id:
res['warning'] = {'title': _('Warning'), 'message': _('The Product Unit of Measure you chose has a different category than in the product form.')}
res['value'].update({'product_uom': product.uom_id.id})
return res
def _bom_find(self, cr, uid, product_id, product_uom, properties=None):
""" Finds BoM for particular product and product uom.
@param product_id: Selected product.
@param product_uom: Unit of measure of a product.
@param properties: List of related properties.
@return: False or BoM id.
"""
if properties is None:
properties = []
domain = [('product_id', '=', product_id), ('bom_id', '=', False),
'|', ('date_start', '=', False), ('date_start', '<=', time.strftime(DEFAULT_SERVER_DATE_FORMAT)),
'|', ('date_stop', '=', False), ('date_stop', '>=', time.strftime(DEFAULT_SERVER_DATE_FORMAT))]
ids = self.search(cr, uid, domain)
max_prop = 0
result = False
for bom in self.pool.get('mrp.bom').browse(cr, uid, ids):
prop = 0
for prop_id in bom.property_ids:
if prop_id.id in properties:
prop += 1
if (prop > max_prop) or ((max_prop == 0) and not result):
result = bom.id
max_prop = prop
return result
def _bom_explode(self, cr, uid, bom, factor, properties=None, addthis=False, level=0, routing_id=False, context=None):
""" Finds Products and Work Centers for related BoM for manufacturing order.
@param bom: BoM of particular product.
@param factor: Factor of product UoM.
@param properties: A List of properties Ids.
@param addthis: If BoM found then True else False.
@param level: Depth level to find BoM lines starts from 10.
@return: result: List of dictionaries containing product details.
result2: List of dictionaries containing Work Center details.
"""
routing_obj = self.pool.get('mrp.routing')
factor = factor / (bom.product_efficiency or 1.0)
factor = _common.ceiling(factor, bom.product_rounding)
if factor < bom.product_rounding:
factor = bom.product_rounding
result = []
result2 = []
phantom = False
if bom.type == 'phantom' and not bom.bom_lines:
newbom = self._bom_find(cr, uid, bom.product_id.id, bom.product_uom.id, properties)
if newbom and newbom != bom.id:
res = self._bom_explode(cr, uid, self.browse(cr, uid, [newbom])[0], factor*bom.product_qty, properties, addthis=True, level=level+10, context=context)
result = result + res[0]
result2 = result2 + res[1]
phantom = True
else:
phantom = False
if not phantom:
if addthis and not bom.bom_lines:
result.append(
{
'name': bom.product_id.name,
'product_id': bom.product_id.id,
'product_qty': bom.product_qty * factor,
'product_uom': bom.product_uom.id,
'product_uos_qty': bom.product_uos and bom.product_uos_qty * factor or False,
'product_uos': bom.product_uos and bom.product_uos.id or False,
})
routing = (routing_id and routing_obj.browse(cr, uid, routing_id)) or bom.routing_id or False
if routing:
for wc_use in routing.workcenter_lines:
wc = wc_use.workcenter_id
d, m = divmod(factor, wc_use.workcenter_id.capacity_per_cycle)
mult = (d + (m and 1.0 or 0.0))
cycle = mult * wc_use.cycle_nbr
result2.append({
'name': tools.ustr(wc_use.name) + ' - ' + tools.ustr(bom.product_id.name),
'workcenter_id': wc.id,
'sequence': level+(wc_use.sequence or 0),
'cycle': cycle,
'hour': float(wc_use.hour_nbr*mult + ((wc.time_start or 0.0)+(wc.time_stop or 0.0)+cycle*(wc.time_cycle or 0.0)) * (wc.time_efficiency or 1.0)),
})
for bom2 in bom.bom_lines:
if (bom2.date_start and bom2.date_start > time.strftime(DEFAULT_SERVER_DATE_FORMAT)) or \
(bom2.date_stop and bom2.date_stop < time.strftime(DEFAULT_SERVER_DATE_FORMAT)):
continue
res = self._bom_explode(cr, uid, bom2, factor, properties, addthis=True, level=level+10, context=context)
result = result + res[0]
result2 = result2 + res[1]
# We merge the results for the same product. The reason is that action_produce does not
# support the case where the same product appears on multiple lines. To avoid major changes
# in a stable version, we do this simple hack at this point.
# Only for v7.0, do not use in v8.0
result_dict = {}
result_dup = False
for product_detail in result:
key = (
product_detail['name'],
product_detail['product_id'],
product_detail['product_uom'],
product_detail['product_uos_qty'],
product_detail['product_uos']
)
if key in result_dict:
result_dict[key] += product_detail['product_qty']
result_dup = True
else:
result_dict[key] = product_detail['product_qty']
if result_dup:
result = []
for key in result_dict.keys():
result.append(
{
'name': key[0],
'product_id': key[1],
'product_qty': result_dict[key],
'product_uom': key[2],
'product_uos_qty': key[3],
'product_uos': key[4],
})
return result, result2
def copy_data(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
bom_data = self.read(cr, uid, id, [], context=context)
default.update(name=_("%s (copy)") % (bom_data['name']), bom_id=False)
return super(mrp_bom, self).copy_data(cr, uid, id, default, context=context)
def unlink(self, cr, uid, ids, context=None):
if self.pool['mrp.production'].search(cr, uid, [
('bom_id', 'in', ids), ('state', 'not in', ['done', 'cancel'])
], context=context):
raise osv.except_osv(_('Warning!'), _('You can not delete a Bill of Material with running manufacturing orders.\nPlease close or cancel it first.'))
return super(mrp_bom, self).unlink(cr, uid, ids, context=context)
def rounding(f, r):
# TODO for trunk: log deprecation warning
# _logger.warning("Deprecated rounding method, please use tools.float_round to round floats.")
import math
if not r:
return f
return math.ceil(f / r) * r
class mrp_production(osv.osv):
"""
Production Orders / Manufacturing Orders
"""
_name = 'mrp.production'
_description = 'Manufacturing Order'
_date_name = 'date_planned'
_inherit = ['mail.thread', 'ir.needaction_mixin']
def _production_calc(self, cr, uid, ids, prop, unknow_none, context=None):
""" Calculates total hours and total no. of cycles for a production order.
@param prop: Name of field.
@param unknow_none:
@return: Dictionary of values.
"""
result = {}
for prod in self.browse(cr, uid, ids, context=context):
result[prod.id] = {
'hour_total': 0.0,
'cycle_total': 0.0,
}
for wc in prod.workcenter_lines:
result[prod.id]['hour_total'] += wc.hour
result[prod.id]['cycle_total'] += wc.cycle
return result
def _src_id_default(self, cr, uid, ids, context=None):
try:
location_model, location_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'stock', 'stock_location_stock')
self.pool.get('stock.location').check_access_rule(cr, uid, [location_id], 'read', context=context)
except (orm.except_orm, ValueError):
location_id = False
return location_id
def _dest_id_default(self, cr, uid, ids, context=None):
try:
location_model, location_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'stock', 'stock_location_stock')
self.pool.get('stock.location').check_access_rule(cr, uid, [location_id], 'read', context=context)
except (orm.except_orm, ValueError):
location_id = False
return location_id
_columns = {
'name': fields.char('Reference', size=64, required=True, readonly=True, states={'draft': [('readonly', False)]}),
'origin': fields.char('Source Document', size=64, readonly=True, states={'draft': [('readonly', False)]},
help="Reference of the document that generated this production order request."),
'priority': fields.selection([('0','Not urgent'),('1','Normal'),('2','Urgent'),('3','Very Urgent')], 'Priority',
select=True, readonly=True, states=dict.fromkeys(['draft', 'confirmed'], [('readonly', False)])),
'product_id': fields.many2one('product.product', 'Product', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'product_qty': fields.float('Product Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True, readonly=True, states={'draft':[('readonly',False)]}),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'product_uos_qty': fields.float('Product UoS Quantity', readonly=True, states={'draft': [('readonly', False)]}),
'product_uos': fields.many2one('product.uom', 'Product UoS', readonly=True, states={'draft': [('readonly', False)]}),
'location_src_id': fields.many2one('stock.location', 'Raw Materials Location', required=True,
readonly=True, states={'draft':[('readonly',False)]},
help="Location where the system will look for components."),
'location_dest_id': fields.many2one('stock.location', 'Finished Products Location', required=True,
readonly=True, states={'draft':[('readonly',False)]},
help="Location where the system will stock the finished products."),
'date_planned': fields.datetime('Scheduled Date', required=True, select=1, readonly=True, states={'draft':[('readonly',False)]}),
'date_start': fields.datetime('Start Date', select=True, readonly=True),
'date_finished': fields.datetime('End Date', select=True, readonly=True),
'bom_id': fields.many2one('mrp.bom', 'Bill of Material', domain=[('bom_id','=',False)], readonly=True, states={'draft':[('readonly',False)]},
help="Bill of Materials allow you to define the list of required raw materials to make a finished product."),
'routing_id': fields.many2one('mrp.routing', string='Routing', on_delete='set null', readonly=True, states={'draft':[('readonly',False)]},
help="The list of operations (list of work centers) to produce the finished product. The routing is mainly used to compute work center costs during operations and to plan future loads on work centers based on production plannification."),
'picking_id': fields.many2one('stock.picking', 'Picking List', readonly=True, ondelete="restrict",
help="This is the Internal Picking List that brings the finished product to the production plan"),
'move_prod_id': fields.many2one('stock.move', 'Product Move', readonly=True),
'move_lines': fields.many2many('stock.move', 'mrp_production_move_ids', 'production_id', 'move_id', 'Products to Consume',
domain=[('state','not in', ('done', 'cancel'))], readonly=True, states={'draft':[('readonly',False)]}),
'move_lines2': fields.many2many('stock.move', 'mrp_production_move_ids', 'production_id', 'move_id', 'Consumed Products',
domain=[('state','in', ('done', 'cancel'))], readonly=True, states={'draft':[('readonly',False)]}),
'move_created_ids': fields.one2many('stock.move', 'production_id', 'Products to Produce',
domain=[('state','not in', ('done', 'cancel'))], readonly=True, states={'draft':[('readonly',False)]}),
'move_created_ids2': fields.one2many('stock.move', 'production_id', 'Produced Products',
domain=[('state','in', ('done', 'cancel'))], readonly=True, states={'draft':[('readonly',False)]}),
'product_lines': fields.one2many('mrp.production.product.line', 'production_id', 'Scheduled goods',
readonly=True, states={'draft':[('readonly',False)]}),
'workcenter_lines': fields.one2many('mrp.production.workcenter.line', 'production_id', 'Work Centers Utilisation',
readonly=True, states={'draft':[('readonly',False)]}),
'state': fields.selection(
[('draft', 'New'), ('cancel', 'Cancelled'), ('picking_except', 'Picking Exception'), ('confirmed', 'Awaiting Raw Materials'),
('ready', 'Ready to Produce'), ('in_production', 'Production Started'), ('done', 'Done')],
string='Status', readonly=True,
track_visibility='onchange',
help="When the production order is created the status is set to 'Draft'.\n\
If the order is confirmed the status is set to 'Waiting Goods'.\n\
If any exceptions are there, the status is set to 'Picking Exception'.\n\
If the stock is available then the status is set to 'Ready to Produce'.\n\
When the production gets started then the status is set to 'In Production'.\n\
When the production is over, the status is set to 'Done'."),
'hour_total': fields.function(_production_calc, type='float', string='Total Hours', multi='workorder', store=True),
'cycle_total': fields.function(_production_calc, type='float', string='Total Cycles', multi='workorder', store=True),
'user_id':fields.many2one('res.users', 'Responsible'),
'company_id': fields.many2one('res.company','Company',required=True),
}
_defaults = {
'priority': lambda *a: '1',
'state': lambda *a: 'draft',
'date_planned': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
'product_qty': lambda *a: 1.0,
'user_id': lambda self, cr, uid, c: uid,
'name': lambda x, y, z, c: x.pool.get('ir.sequence').get(y, z, 'mrp.production') or '/',
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'mrp.production', context=c),
'location_src_id': _src_id_default,
'location_dest_id': _dest_id_default
}
_sql_constraints = [
('name_uniq', 'unique(name, company_id)', 'Reference must be unique per Company!'),
]
_order = 'priority desc, date_planned asc';
def _check_qty(self, cr, uid, ids, context=None):
for order in self.browse(cr, uid, ids, context=context):
if order.product_qty <= 0:
return False
return True
_constraints = [
(_check_qty, 'Order quantity cannot be negative or zero!', ['product_qty']),
]
def unlink(self, cr, uid, ids, context=None):
for production in self.browse(cr, uid, ids, context=context):
if production.state not in ('draft', 'cancel'):
raise osv.except_osv(_('Invalid Action!'), _('Cannot delete a manufacturing order in state \'%s\'.') % production.state)
return super(mrp_production, self).unlink(cr, uid, ids, context=context)
def copy(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
default.update({
'name': self.pool.get('ir.sequence').get(cr, uid, 'mrp.production'),
'move_lines' : [],
'move_lines2' : [],
'move_created_ids' : [],
'move_created_ids2' : [],
'product_lines' : [],
'move_prod_id' : False,
'picking_id' : False
})
return super(mrp_production, self).copy(cr, uid, id, default, context)
def location_id_change(self, cr, uid, ids, src, dest, context=None):
""" Changes destination location if source location is changed.
@param src: Source location id.
@param dest: Destination location id.
@return: Dictionary of values.
"""
if dest:
return {}
if src:
return {'value': {'location_dest_id': src}}
return {}
def product_id_change(self, cr, uid, ids, product_id, context=None):
""" Finds UoM of changed product.
@param product_id: Id of changed product.
@return: Dictionary of values.
"""
if not product_id:
return {'value': {
'product_uom': False,
'bom_id': False,
'routing_id': False
}}
bom_obj = self.pool.get('mrp.bom')
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
bom_id = bom_obj._bom_find(cr, uid, product.id, product.uom_id and product.uom_id.id, [])
routing_id = False
if bom_id:
bom_point = bom_obj.browse(cr, uid, bom_id, context=context)
routing_id = bom_point.routing_id.id or False
product_uom_id = product.uom_id and product.uom_id.id or False
result = {
'product_uom': product_uom_id,
'bom_id': bom_id,
'routing_id': routing_id,
}
return {'value': result}
def bom_id_change(self, cr, uid, ids, bom_id, context=None):
""" Finds routing for changed BoM.
@param product: Id of product.
@return: Dictionary of values.
"""
if not bom_id:
return {'value': {
'routing_id': False
}}
bom_point = self.pool.get('mrp.bom').browse(cr, uid, bom_id, context=context)
routing_id = bom_point.routing_id.id or False
result = {
'routing_id': routing_id
}
return {'value': result}
def action_picking_except(self, cr, uid, ids):
""" Changes the state to Exception.
@return: True
"""
self.write(cr, uid, ids, {'state': 'picking_except'})
return True
def _prepare_lines(self, cr, uid, production, properties=None, context=None):
# search BoM structure and route
bom_obj = self.pool.get('mrp.bom')
uom_obj = self.pool.get('product.uom')
bom_point = production.bom_id
bom_id = production.bom_id.id
if not bom_point:
bom_id = bom_obj._bom_find(cr, uid, production.product_id.id, production.product_uom.id, properties)
if bom_id:
bom_point = bom_obj.browse(cr, uid, bom_id)
routing_id = bom_point.routing_id.id or False
self.write(cr, uid, [production.id], {'bom_id': bom_id, 'routing_id': routing_id})
if not bom_id:
raise osv.except_osv(_('Error!'), _("Cannot find a bill of material for this product."))
# get components and workcenter_lines from BoM structure
factor = uom_obj._compute_qty(cr, uid, production.product_uom.id, production.product_qty, bom_point.product_uom.id)
return bom_obj._bom_explode(cr, uid, bom_point, factor / bom_point.product_qty, properties, routing_id=production.routing_id.id, context=context)
def _action_compute_lines(self, cr, uid, ids, properties=None, context=None):
""" Compute product_lines and workcenter_lines from BoM structure
@return: product_lines
"""
if properties is None:
properties = []
results = []
prod_line_obj = self.pool.get('mrp.production.product.line')
workcenter_line_obj = self.pool.get('mrp.production.workcenter.line')
for production in self.browse(cr, uid, ids, context=context):
#unlink product_lines
prod_line_obj.unlink(cr, SUPERUSER_ID, [line.id for line in production.product_lines], context=context)
#unlink workcenter_lines
workcenter_line_obj.unlink(cr, SUPERUSER_ID, [line.id for line in production.workcenter_lines], context=context)
res = self._prepare_lines(cr, uid, production, properties=properties, context=context)
results = res[0] # product_lines
results2 = res[1] # workcenter_lines
# reset product_lines in production order
for line in results:
line['production_id'] = production.id
prod_line_obj.create(cr, uid, line)
#reset workcenter_lines in production order
for line in results2:
line['production_id'] = production.id
workcenter_line_obj.create(cr, uid, line)
return results
def action_compute(self, cr, uid, ids, properties=None, context=None):
""" Computes bills of material of a product.
@param properties: List containing dictionaries of properties.
@return: No. of products.
"""
return len(self._action_compute_lines(cr, uid, ids, properties=properties, context=context))
def action_cancel(self, cr, uid, ids, context=None):
""" Cancels the production order and related stock moves.
@return: True
"""
if context is None:
context = {}
move_obj = self.pool.get('stock.move')
for production in self.browse(cr, uid, ids, context=context):
if production.state == 'confirmed' and production.picking_id.state not in ('draft', 'cancel'):
raise osv.except_osv(
_('Cannot cancel manufacturing order!'),
_('You must first cancel related internal picking attached to this manufacturing order.'))
if production.move_created_ids:
move_obj.action_cancel(cr, uid, [x.id for x in production.move_created_ids])
move_obj.action_cancel(cr, uid, [x.id for x in production.move_lines])
self.write(cr, uid, ids, {'state': 'cancel'})
return True
def action_ready(self, cr, uid, ids, context=None):
""" Changes the production state to Ready and location id of stock move.
@return: True
"""
move_obj = self.pool.get('stock.move')
self.write(cr, uid, ids, {'state': 'ready'})
for production in self.browse(cr, uid, ids, context=context):
if not production.move_created_ids:
produce_move_id = self._make_production_produce_line(cr, uid, production, context=context)
for scheduled in production.product_lines:
self._make_production_line_procurement(cr, uid, scheduled, False, context=context)
if production.move_prod_id and production.move_prod_id.location_id.id != production.location_dest_id.id:
move_obj.write(cr, uid, [production.move_prod_id.id],
{'location_id': production.location_dest_id.id})
return True
def action_production_end(self, cr, uid, ids, context=None):
""" Changes production state to Finish and writes finished date.
@return: True
"""
for production in self.browse(cr, uid, ids):
self._costs_generate(cr, uid, production)
write_res = self.write(cr, uid, ids, {'state': 'done', 'date_finished': time.strftime('%Y-%m-%d %H:%M:%S')})
return write_res
def test_production_done(self, cr, uid, ids):
""" Tests whether production is done or not.
@return: True or False
"""
res = True
for production in self.browse(cr, uid, ids):
if production.move_lines:
res = False
if production.move_created_ids:
res = False
return res
def _get_subproduct_factor(self, cr, uid, production_id, move_id=None, context=None):
""" Compute the factor to compute the qty of procucts to produce for the given production_id. By default,
it's always equal to the quantity encoded in the production order or the production wizard, but if the
module mrp_subproduct is installed, then we must use the move_id to identify the product to produce
and its quantity.
:param production_id: ID of the mrp.order
:param move_id: ID of the stock move that needs to be produced. Will be used in mrp_subproduct.
:return: The factor to apply to the quantity that we should produce for the given production order.
"""
return 1
def action_produce(self, cr, uid, production_id, production_qty, production_mode, context=None):
""" To produce final product based on production mode (consume/consume&produce).
If Production mode is consume, all stock move lines of raw materials will be done/consumed.
If Production mode is consume & produce, all stock move lines of raw materials will be done/consumed
and stock move lines of final product will be also done/produced.
@param production_id: the ID of mrp.production object
@param production_qty: specify qty to produce
@param production_mode: specify production mode (consume/consume&produce).
@return: True
"""
stock_mov_obj = self.pool.get('stock.move')
production = self.browse(cr, uid, production_id, context=context)
wf_service = netsvc.LocalService("workflow")
if not production.move_lines and production.state == 'ready':
# trigger workflow if not products to consume (eg: services)
wf_service.trg_validate(uid, 'mrp.production', production_id, 'button_produce', cr)
produced_qty = 0
for produced_product in production.move_created_ids2:
if (produced_product.scrapped) or (produced_product.product_id.id != production.product_id.id):
continue
produced_qty += produced_product.product_qty
if production_mode in ['consume','consume_produce']:
consumed_data = {}
# Calculate already consumed qtys
for consumed in production.move_lines2:
if consumed.scrapped:
continue
if not consumed_data.get(consumed.product_id.id, False):
consumed_data[consumed.product_id.id] = 0
consumed_data[consumed.product_id.id] += consumed.product_qty
# Find product qty to be consumed and consume it
for scheduled in production.product_lines:
# total qty of consumed product we need after this consumption
total_consume = ((production_qty + produced_qty) * scheduled.product_qty / production.product_qty)
# qty available for consume and produce
qty_avail = scheduled.product_qty - consumed_data.get(scheduled.product_id.id, 0.0)
if float_compare(qty_avail, 0, precision_rounding=scheduled.product_id.uom_id.rounding) <= 0:
# there will be nothing to consume for this raw material
continue
raw_product = [move for move in production.move_lines if move.product_id.id==scheduled.product_id.id]
if raw_product:
# qtys we have to consume
qty = total_consume - consumed_data.get(scheduled.product_id.id, 0.0)
if float_compare(qty, qty_avail, precision_rounding=scheduled.product_id.uom_id.rounding) == 1:
# if qtys we have to consume is more than qtys available to consume
prod_name = scheduled.product_id.name_get()[0][1]
raise osv.except_osv(_('Warning!'), _('You are going to consume total %s quantities of "%s".\nBut you can only consume up to total %s quantities.') % (qty, prod_name, qty_avail))
if float_compare(qty, 0, precision_rounding=scheduled.product_id.uom_id.rounding) <= 0:
# we already have more qtys consumed than we need
continue
raw_product[0].action_consume(qty, raw_product[0].location_id.id, context=context)
production.refresh()
if production_mode == 'consume_produce':
# To produce remaining qty of final product
#vals = {'state':'confirmed'}
#final_product_todo = [x.id for x in production.move_created_ids]
#stock_mov_obj.write(cr, uid, final_product_todo, vals)
#stock_mov_obj.action_confirm(cr, uid, final_product_todo, context)
produced_products = {}
for produced_product in production.move_created_ids2:
if produced_product.scrapped:
continue
if not produced_products.get(produced_product.product_id.id, False):
produced_products[produced_product.product_id.id] = 0
produced_products[produced_product.product_id.id] += produced_product.product_qty
for produce_product in production.move_created_ids:
produced_qty = produced_products.get(produce_product.product_id.id, 0)
subproduct_factor = self._get_subproduct_factor(cr, uid, production.id, produce_product.id, context=context)
rest_qty = (subproduct_factor * production.product_qty) - produced_qty
if rest_qty < (subproduct_factor * production_qty):
prod_name = produce_product.product_id.name_get()[0][1]
raise osv.except_osv(_('Warning!'), _('You are going to produce total %s quantities of "%s".\nBut you can only produce up to total %s quantities.') % ((subproduct_factor * production_qty), prod_name, rest_qty))
if rest_qty > 0 :
stock_mov_obj.action_consume(cr, uid, [produce_product.id], (subproduct_factor * production_qty), context=context)
production.refresh()
for raw_product in production.move_lines2:
new_parent_ids = []
parent_move_ids = [x.id for x in raw_product.move_history_ids]
for final_product in production.move_created_ids2:
if final_product.id not in parent_move_ids:
new_parent_ids.append(final_product.id)
for new_parent_id in new_parent_ids:
stock_mov_obj.write(cr, uid, [raw_product.id], {'move_history_ids': [(4,new_parent_id)]})
wf_service.trg_validate(uid, 'mrp.production', production_id, 'button_produce_done', cr)
return True
def _costs_generate(self, cr, uid, production):
""" Calculates total costs at the end of the production.
@param production: Id of production order.
@return: Calculated amount.
"""
amount = 0.0
analytic_line_obj = self.pool.get('account.analytic.line')
for wc_line in production.workcenter_lines:
wc = wc_line.workcenter_id
if wc.costs_journal_id and wc.costs_general_account_id:
# Cost per hour
value = wc_line.hour * wc.costs_hour
account = wc.costs_hour_account_id.id
if value and account:
amount += value
analytic_line_obj.create(cr, uid, {
'name': wc_line.name + ' (H)',
'amount': value,
'account_id': account,
'general_account_id': wc.costs_general_account_id.id,
'journal_id': wc.costs_journal_id.id,
'ref': wc.code,
'product_id': wc.product_id.id,
'unit_amount': wc_line.hour,
'product_uom_id': wc.product_id and wc.product_id.uom_id.id or False
} )
# Cost per cycle
value = wc_line.cycle * wc.costs_cycle
account = wc.costs_cycle_account_id.id
if value and account:
amount += value
analytic_line_obj.create(cr, uid, {
'name': wc_line.name+' (C)',
'amount': value,
'account_id': account,
'general_account_id': wc.costs_general_account_id.id,
'journal_id': wc.costs_journal_id.id,
'ref': wc.code,
'product_id': wc.product_id.id,
'unit_amount': wc_line.cycle,
'product_uom_id': wc.product_id and wc.product_id.uom_id.id or False
} )
return amount
def action_in_production(self, cr, uid, ids, context=None):
""" Changes state to In Production and writes starting date.
@return: True
"""
return self.write(cr, uid, ids, {'state': 'in_production', 'date_start': time.strftime('%Y-%m-%d %H:%M:%S')})
def test_if_product(self, cr, uid, ids):
"""
@return: True or False
"""
res = True
for production in self.browse(cr, uid, ids):
boms = self._prepare_lines(cr, uid, production)[0]
res = False
for bom in boms:
product = self.pool.get('product.product').browse(cr, uid, bom['product_id'])
if product.type in ('product', 'consu'):
res = True
return res
def _get_auto_picking(self, cr, uid, production):
return True
def _hook_create_post_procurement(self, cr, uid, production, procurement_id, context=None):
return True
def _make_production_line_procurement(self, cr, uid, production_line, shipment_move_id, context=None):
wf_service = netsvc.LocalService("workflow")
procurement_order = self.pool.get('procurement.order')
production = production_line.production_id
location_id = production.location_src_id.id
date_planned = production.date_planned
procurement_name = (production.origin or '').split(':')[0] + ':' + production.name
procurement_id = procurement_order.create(cr, uid, {
'name': procurement_name,
'origin': procurement_name,
'date_planned': date_planned,
'product_id': production_line.product_id.id,
'product_qty': production_line.product_qty,
'product_uom': production_line.product_uom.id,
'product_uos_qty': production_line.product_uos and production_line.product_qty or False,
'product_uos': production_line.product_uos and production_line.product_uos.id or False,
'location_id': location_id,
'procure_method': production_line.product_id.procure_method,
'move_id': shipment_move_id,
'company_id': production.company_id.id,
})
self._hook_create_post_procurement(cr, uid, production, procurement_id, context=context)
wf_service.trg_validate(uid, procurement_order._name, procurement_id, 'button_confirm', cr)
return procurement_id
def _make_production_internal_shipment_line(self, cr, uid, production_line, shipment_id, parent_move_id, destination_location_id=False, context=None):
stock_move = self.pool.get('stock.move')
production = production_line.production_id
date_planned = production.date_planned
# Internal shipment is created for Stockable and Consumer Products
if production_line.product_id.type not in ('product', 'consu'):
return False
source_location_id = production.location_src_id.id
if not destination_location_id:
destination_location_id = source_location_id
return stock_move.create(cr, uid, {
'name': production.name,
'picking_id': shipment_id,
'product_id': production_line.product_id.id,
'product_qty': production_line.product_qty,
'product_uom': production_line.product_uom.id,
'product_uos_qty': production_line.product_uos and production_line.product_uos_qty or False,
'product_uos': production_line.product_uos and production_line.product_uos.id or False,
'date': date_planned,
'move_dest_id': parent_move_id,
'location_id': source_location_id,
'location_dest_id': destination_location_id,
'state': 'waiting',
'company_id': production.company_id.id,
})
def _make_production_internal_shipment(self, cr, uid, production, context=None):
ir_sequence = self.pool.get('ir.sequence')
stock_picking = self.pool.get('stock.picking')
routing_loc = None
pick_type = 'internal'
partner_id = False
# Take routing address as a Shipment Address.
# If usage of routing location is a internal, make outgoing shipment otherwise internal shipment
if production.bom_id.routing_id and production.bom_id.routing_id.location_id:
routing_loc = production.bom_id.routing_id.location_id
if routing_loc.usage != 'internal':
pick_type = 'out'
partner_id = routing_loc.partner_id and routing_loc.partner_id.id or False
# Take next Sequence number of shipment base on type
if pick_type!='internal':
pick_name = ir_sequence.get(cr, uid, 'stock.picking.' + pick_type)
else:
pick_name = ir_sequence.get(cr, uid, 'stock.picking')
picking_id = stock_picking.create(cr, uid, {
'name': pick_name,
'origin': (production.origin or '').split(':')[0] + ':' + production.name,
'type': pick_type,
'move_type': 'one',
'state': 'auto',
'partner_id': partner_id,
'auto_picking': self._get_auto_picking(cr, uid, production),
'company_id': production.company_id.id,
})
production.write({'picking_id': picking_id}, context=context)
return picking_id
def _make_production_produce_line(self, cr, uid, production, context=None):
stock_move = self.pool.get('stock.move')
source_location_id = production.product_id.property_stock_production.id
destination_location_id = production.location_dest_id.id
data = {
'name': production.name,
'date': production.date_planned,
'product_id': production.product_id.id,
'product_qty': production.product_qty,
'product_uom': production.product_uom.id,
'product_uos_qty': production.product_uos and production.product_uos_qty or False,
'product_uos': production.product_uos and production.product_uos.id or False,
'location_id': source_location_id,
'location_dest_id': destination_location_id,
'move_dest_id': production.move_prod_id.id,
'state': 'waiting',
'company_id': production.company_id.id,
}
if production.move_prod_id:
production.move_prod_id.write({'location_id': destination_location_id})
move_id = stock_move.create(cr, uid, data, context=context)
production.write({'move_created_ids': [(6, 0, [move_id])]}, context=context)
return move_id
def _make_production_consume_line(self, cr, uid, production_line, parent_move_id, source_location_id=False, context=None):
stock_move = self.pool.get('stock.move')
production = production_line.production_id
# Internal shipment is created for Stockable and Consumer Products
if production_line.product_id.type not in ('product', 'consu'):
return False
destination_location_id = production.product_id.property_stock_production.id
if not source_location_id:
source_location_id = production.location_src_id.id
move_id = stock_move.create(cr, uid, {
'name': production.name,
'date': production.date_planned,
'product_id': production_line.product_id.id,
'product_qty': production_line.product_qty,
'product_uom': production_line.product_uom.id,
'product_uos_qty': production_line.product_uos and production_line.product_uos_qty or False,
'product_uos': production_line.product_uos and production_line.product_uos.id or False,
'location_id': source_location_id,
'location_dest_id': destination_location_id,
'move_dest_id': parent_move_id,
'state': 'waiting',
'company_id': production.company_id.id,
})
production.write({'move_lines': [(4, move_id)]}, context=context)
return move_id
def action_confirm(self, cr, uid, ids, context=None):
""" Confirms production order.
@return: Newly generated Shipment Id.
"""
shipment_id = False
wf_service = netsvc.LocalService("workflow")
uncompute_ids = filter(lambda x:x, [not x.product_lines and x.id or False for x in self.browse(cr, uid, ids, context=context)])
self.action_compute(cr, uid, uncompute_ids, context=context)
for production in self.browse(cr, uid, ids, context=context):
shipment_id = self._make_production_internal_shipment(cr, uid, production, context=context)
produce_move_id = self._make_production_produce_line(cr, uid, production, context=context)
# Take routing location as a Source Location.
source_location_id = production.location_src_id.id
if production.routing_id and production.routing_id.location_id:
source_location_id = production.routing_id.location_id.id
for line in production.product_lines:
consume_move_id = self._make_production_consume_line(cr, uid, line, produce_move_id, source_location_id=source_location_id, context=context)
if shipment_id:
shipment_move_id = self._make_production_internal_shipment_line(cr, uid, line, shipment_id, consume_move_id,\
destination_location_id=source_location_id, context=context)
self._make_production_line_procurement(cr, uid, line, shipment_move_id, context=context)
if shipment_id:
wf_service.trg_validate(uid, 'stock.picking', shipment_id, 'button_confirm', cr)
production.write({'state':'confirmed'}, context=context)
return shipment_id
def force_production(self, cr, uid, ids, *args):
""" Assigns products.
@param *args: Arguments
@return: True
"""
pick_obj = self.pool.get('stock.picking')
pick_obj.force_assign(cr, uid, [prod.picking_id.id for prod in self.browse(cr, uid, ids)])
return True
class mrp_production_workcenter_line(osv.osv):
_name = 'mrp.production.workcenter.line'
_description = 'Work Order'
_order = 'sequence'
_inherit = ['mail.thread']
_columns = {
'name': fields.char('Work Order', size=64, required=True),
'workcenter_id': fields.many2one('mrp.workcenter', 'Work Center', required=True),
'cycle': fields.float('Number of Cycles', digits=(16,2)),
'hour': fields.float('Number of Hours', digits=(16,2)),
'sequence': fields.integer('Sequence', required=True, help="Gives the sequence order when displaying a list of work orders."),
'production_id': fields.many2one('mrp.production', 'Manufacturing Order',
track_visibility='onchange', select=True, ondelete='cascade', required=True),
}
_defaults = {
'sequence': lambda *a: 1,
'hour': lambda *a: 0,
'cycle': lambda *a: 0,
}
class mrp_production_product_line(osv.osv):
_name = 'mrp.production.product.line'
_description = 'Production Scheduled Product'
_columns = {
'name': fields.char('Name', size=64, required=True),
'product_id': fields.many2one('product.product', 'Product', required=True),
'product_qty': fields.float('Product Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'product_uos_qty': fields.float('Product UOS Quantity'),
'product_uos': fields.many2one('product.uom', 'Product UOS'),
'production_id': fields.many2one('mrp.production', 'Production Order', select=True),
}
class product_product(osv.osv):
_inherit = "product.product"
_columns = {
'bom_ids': fields.one2many('mrp.bom', 'product_id', 'Bill of Materials'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
olivierfriard/BORIS
|
refs/heads/master
|
boris/version.py
|
1
|
"""
BORIS
Behavioral Observation Research Interactive Software
Copyright 2012-2021 Olivier Friard
This file is part of BORIS.
BORIS is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
any later version.
BORIS is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not see <http://www.gnu.org/licenses/>.
"""
__version__ = "7.10.6"
__version_date__ = "2021-06-23"
|
counsyl/baya
|
refs/heads/master
|
baya/tests/submod2/urls.py
|
1
|
from django.conf.urls import url
from . import admin
app_name = 'submod2'
urlpatterns = [
url(r'^admin/', admin.site.urls, name='submod2_admin'),
]
|
derAndreas/pyGtk3Docs
|
refs/heads/develop
|
pyGtk3Docs/ast/Callback.py
|
1
|
from Base import Base
class Callback(Base):
""" Callbacks nodes, used for signals """
def parse_node(self):
""" Parse the node """
self.parse_attributes_from_map({
'name' : 'name',
'ctype' : (self.NS_C, 'type')
})
self.parse_parameters()
self.parse_returnvalue()
# propagate the Callback definition to the namespace
self._namespace.registerType(self, self.getName(), self.getCType())
|
nschloe/quadpy
|
refs/heads/main
|
src/quadpy/s2/_classical.py
|
1
|
from ._helpers import S2Scheme, register
def midpoint():
d = {"zero2": [[1]]}
return S2Scheme("Midpoint", d, 1)
register([midpoint])
|
PulsePod/old-www-do-not-use
|
refs/heads/master
|
lib/python2.7/site-packages/pip/backwardcompat/__init__.py
|
80
|
"""Stuff that differs in different Python versions"""
import os
import imp
import sys
import site
__all__ = ['WindowsError']
uses_pycache = hasattr(imp, 'cache_from_source')
class NeverUsedException(Exception):
"""this exception should never be raised"""
try:
WindowsError = WindowsError
except NameError:
WindowsError = NeverUsedException
try:
#new in Python 3.3
PermissionError = PermissionError
except NameError:
PermissionError = NeverUsedException
console_encoding = sys.__stdout__.encoding
if sys.version_info >= (3,):
from io import StringIO, BytesIO
from functools import reduce
from urllib.error import URLError, HTTPError
from queue import Queue, Empty
from urllib.request import url2pathname
from urllib.request import urlretrieve
from email import message as emailmessage
import urllib.parse as urllib
import urllib.request as urllib2
import configparser as ConfigParser
import xmlrpc.client as xmlrpclib
import urllib.parse as urlparse
import http.client as httplib
def cmp(a, b):
return (a > b) - (a < b)
def b(s):
return s.encode('utf-8')
def u(s):
return s.decode('utf-8')
def console_to_str(s):
try:
return s.decode(console_encoding)
except UnicodeDecodeError:
return s.decode('utf_8')
def fwrite(f, s):
f.buffer.write(b(s))
def get_http_message_param(http_message, param, default_value):
return http_message.get_param(param, default_value)
bytes = bytes
string_types = (str,)
raw_input = input
else:
from cStringIO import StringIO
from urllib2 import URLError, HTTPError
from Queue import Queue, Empty
from urllib import url2pathname, urlretrieve
from email import Message as emailmessage
import urllib
import urllib2
import urlparse
import ConfigParser
import xmlrpclib
import httplib
def b(s):
return s
def u(s):
return s
def console_to_str(s):
return s
def fwrite(f, s):
f.write(s)
def get_http_message_param(http_message, param, default_value):
result = http_message.getparam(param)
return result or default_value
bytes = str
string_types = (basestring,)
reduce = reduce
cmp = cmp
raw_input = raw_input
BytesIO = StringIO
from distutils.sysconfig import get_python_lib, get_python_version
#site.USER_SITE was created in py2.6
user_site = getattr(site, 'USER_SITE', None)
def product(*args, **kwds):
# product('ABCD', 'xy') --> Ax Ay Bx By Cx Cy Dx Dy
# product(range(2), repeat=3) --> 000 001 010 011 100 101 110 111
pools = list(map(tuple, args)) * kwds.get('repeat', 1)
result = [[]]
for pool in pools:
result = [x + [y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
## only >=py32 has ssl.match_hostname and ssl.CertificateError
try:
from ssl import match_hostname, CertificateError
except ImportError:
from ssl_match_hostname import match_hostname, CertificateError
|
danirus/django-comments-xtd
|
refs/heads/master
|
django_comments_xtd/tests/test_serializers.py
|
1
|
from __future__ import unicode_literals
from datetime import datetime
import pytz
try:
from unittest.mock import patch
except ImportError:
from mock import patch
from django.contrib.auth.models import Permission, User
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.test import TestCase
from django.urls import reverse
from rest_framework.test import APIClient
from django_comments_xtd import django_comments
from django_comments_xtd.api.serializers import ReadCommentSerializer
from django_comments_xtd.models import XtdComment
from django_comments_xtd.signals import should_request_be_authorized
from django_comments_xtd.tests.models import (
Article, authorize_api_post_comment
)
from django_comments_xtd.tests.utils import post_comment
class UserModeratorTestCase(TestCase):
# Test ReadCommentSerializer.user_moderator field.
def setUp(self):
patcher = patch('django_comments_xtd.views.send_mail')
self.mock_mailer = patcher.start()
self.article = Article.objects.create(
title="October", slug="october", body="What I did on October...")
self.form = django_comments.get_form()(self.article)
def _create_user(self, can_moderate=False):
bob = User.objects.create_user("joe", "fulanito@detal.com", "pwd",
first_name="Joe", last_name="Bloggs")
if can_moderate:
ct = ContentType.objects.get(app_label="django_comments",
model="comment")
permission = Permission.objects.get(content_type=ct,
codename="can_moderate")
bob.user_permissions.add(permission)
return bob
def _send_auth_comment(self, user):
data = {"name": "", "email": "",
"followup": True, "reply_to": 0,
"comment": "Es war einmal eine kleine...",
"honeypot": ""}
data.update(self.form.initial)
response = post_comment(data, auth_user=user)
self.assertEqual(response.status_code, 201)
def test_user_moderator_is_False(self):
bob = self._create_user(can_moderate=False)
self._send_auth_comment(bob)
# Fetch the comment, serialize it and check user_moderator field.
xtdcomment = XtdComment.objects.get(pk=1)
context = {"request": {"user": bob}}
ser = ReadCommentSerializer(xtdcomment, context=context)
self.assertFalse(ser.data['user_moderator'])
def test_user_moderator_is_True(self):
bob = self._create_user(can_moderate=True)
self._send_auth_comment(bob)
# Fetch the comment, serialize it and check user_moderator field.
xtdcomment = XtdComment.objects.get(pk=1)
context = {"request": {"user": bob}}
ser = ReadCommentSerializer(xtdcomment, context=context)
self.assertTrue(ser.data['user_moderator'])
class PostCommentAsVisitorTestCase(TestCase):
# Test WriteCommentSerializer as a mere visitor. The function in
# authorize_api_post_comment in `test/models.py` is not listening for
# the signal `should_request_be_authorized` yet. Therefore before
# connecting the signal with the function the post comment must fail
# indicating missing fields (timestamp, security_hash and honeypot).
def setUp(self):
patcher = patch('django_comments_xtd.views.send_mail')
self.mock_mailer = patcher.start()
self.article = Article.objects.create(
title="October", slug="october", body="What I did on October...")
self.form = django_comments.get_form()(self.article)
# Remove the following fields on purpose, as we don't know them and
# therefore we don't send them when using the web API (unless when)
# using the JavaScript plugin, but that is not the case being tested
# here.
for field_name in ['security_hash', 'timestamp']:
self.form.initial.pop(field_name)
def test_post_comment_as_visitor_before_connecting_signal(self):
data = {
"name": "Joe Bloggs", "email": "joe@bloggs.com", "followup": True,
"reply_to": 0, "comment": "This post comment request should fail"
}
data.update(self.form.initial)
response = post_comment(data)
self.assertEqual(response.status_code, 403)
self.assertEqual(
response.rendered_content,
b'{"detail":"You do not have permission to perform this action."}'
)
self.assertTrue(self.mock_mailer.call_count == 0)
def test_post_comment_as_visitor_after_connecting_signal(self):
should_request_be_authorized.connect(authorize_api_post_comment)
data = {
"name": "Joe Bloggs", "email": "joe@bloggs.com", "followup": True,
"reply_to": 0, "comment": "This post comment request should fail"
}
data.update(self.form.initial)
client = APIClient()
token = "Token 08d9fd42468aebbb8087b604b526ff0821ce4525";
client.credentials(HTTP_AUTHORIZATION=token)
self.assertTrue(self.mock_mailer.call_count == 0)
response = client.post(reverse('comments-xtd-api-create'), data)
self.assertEqual(response.status_code, 204) # Confirmation req sent.
self.assertTrue(self.mock_mailer.call_count == 1)
def test_post_comment_as_registered_user_after_connecting_signal(self):
bob = User.objects.create_user("joe", "fulanito@detal.com", "pwd",
first_name="Joe", last_name="Bloggs")
should_request_be_authorized.connect(authorize_api_post_comment)
data = {
"name": "Joe Bloggs", "email": "joe@bloggs.com", "followup": True,
"reply_to": 0, "comment": "This post comment request should fail"
}
data.update(self.form.initial)
client = APIClient()
client.login(username='joe', password='pwd')
self.assertTrue(self.mock_mailer.call_count == 0)
response = client.post(reverse('comments-xtd-api-create'), data)
self.assertEqual(response.status_code, 201) # Comment created.
self.assertTrue(self.mock_mailer.call_count == 0)
def get_fake_avatar(comment):
return f"/fake/avatar/{comment.user.username}"
funcpath = "django_comments_xtd.tests.test_serializers.get_fake_avatar"
class ReadCommentsGetUserAvatarTestCase(TestCase):
# Test ReadCommentSerializer method get_user_avatar.
# Change setting COMMENTS_XTD_API_GET_USER_AVATAR so that it uses a
# deterministic function: get_fake_avatar (here defined). Then send a
# couple of comments and verify that the function is called.
def setUp(self):
joe = User.objects.create_user("joe", "joe@example.com", "joepwd",
first_name="Joe", last_name="Bloggs")
alice = User.objects.create_user("alice", "alice@tal.com", "alicepwd",
first_name="Alice", last_name="Bloggs")
self.article = Article.objects.create(title="September",
slug="september",
body="During September...")
self.article_ct = ContentType.objects.get(app_label="tests",
model="article")
self.site = Site.objects.get(pk=1)
# Testing comment from Bob.
XtdComment.objects.create(content_type=self.article_ct,
object_pk=self.article.id,
content_object=self.article,
site=self.site,
comment="testing comment from Bob",
user=joe,
submit_date=datetime.now())
# Testing comment from Alice.
XtdComment.objects.create(content_type=self.article_ct,
object_pk=self.article.id,
content_object=self.article,
site=self.site,
comment="testing comment from Alice",
user=alice,
submit_date=datetime.now())
@patch.multiple('django_comments_xtd.conf.settings',
COMMENTS_XTD_API_GET_USER_AVATAR=funcpath)
def test_setting_COMMENTS_XTD_API_GET_USER_AVATAR_works(self):
qs = XtdComment.objects.all()
ser = ReadCommentSerializer(qs, context={"request": None}, many=True)
self.assertEqual(ser.data[0]['user_avatar'], '/fake/avatar/joe')
self.assertEqual(ser.data[1]['user_avatar'], '/fake/avatar/alice')
class RenderSubmitDateTestCase(TestCase):
def setUp(self):
self.article = Article.objects.create(title="October", slug="october",
body="What I did on October...")
def create_comment(self, submit_date_is_aware=True):
site = Site.objects.get(pk=1)
ctype = ContentType.objects.get(app_label="tests", model="article")
if submit_date_is_aware:
utc = pytz.timezone("UTC")
submit_date = datetime(2021, 1, 10, 10, 15, tzinfo=utc)
else:
submit_date = datetime(2021, 1, 10, 10, 15)
self.cm = XtdComment.objects.create(content_type=ctype,
object_pk=self.article.id,
content_object=self.article,
site=site, name="Joe Bloggs",
email="joe@bloggs.com",
comment="Just a comment",
submit_date=submit_date)
@patch.multiple('django.conf.settings', USE_TZ=False)
@patch.multiple('django_comments_xtd.conf.settings', USE_TZ=False)
def test_submit_date_when_use_tz_is_false(self):
self.create_comment(submit_date_is_aware=False)
qs = XtdComment.objects.all()
ser = ReadCommentSerializer(qs, context={"request": None}, many=True)
self.assertEqual(ser.data[0]['submit_date'],
'Jan. 10, 2021, 10:15 a.m.')
@patch.multiple('django.conf.settings', USE_TZ=True)
@patch.multiple('django_comments_xtd.conf.settings', USE_TZ=True)
def test_submit_date_when_use_tz_is_true(self):
self.create_comment(submit_date_is_aware=True)
qs = XtdComment.objects.all()
ser = ReadCommentSerializer(qs, context={"request": None}, many=True)
self.assertEqual(ser.data[0]['submit_date'],
'Jan. 10, 2021, 11:15 a.m.')
|
thaumos/ansible
|
refs/heads/devel
|
test/sanity/code-smell/empty-init.py
|
29
|
#!/usr/bin/env python
import os
import sys
def main():
skip = set([
'test/sanity/code-smell/%s' % os.path.basename(__file__),
# facts is grandfathered in but will break namespacing
# the only way to fix it is to deprecate and eventually remove it
# six and distro will break namespacing but because it is bundled we should not be
# overriding it
'lib/ansible/module_utils/facts/__init__.py',
'lib/ansible/module_utils/six/__init__.py',
'lib/ansible/module_utils/distro/__init__.py',
])
for path in sys.argv[1:] or sys.stdin.read().splitlines():
if path in skip:
continue
if os.path.getsize(path) > 0:
print('%s: empty __init__.py required' % path)
if __name__ == '__main__':
main()
|
harshaneelhg/scikit-learn
|
refs/heads/master
|
sklearn/ensemble/tests/test_gradient_boosting_loss_functions.py
|
221
|
"""
Testing for the gradient boosting loss functions and initial estimators.
"""
import numpy as np
from numpy.testing import assert_array_equal
from numpy.testing import assert_almost_equal
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.utils import check_random_state
from sklearn.ensemble.gradient_boosting import BinomialDeviance
from sklearn.ensemble.gradient_boosting import LogOddsEstimator
from sklearn.ensemble.gradient_boosting import LeastSquaresError
from sklearn.ensemble.gradient_boosting import RegressionLossFunction
from sklearn.ensemble.gradient_boosting import LOSS_FUNCTIONS
from sklearn.ensemble.gradient_boosting import _weighted_percentile
def test_binomial_deviance():
# Check binomial deviance loss.
# Check against alternative definitions in ESLII.
bd = BinomialDeviance(2)
# pred has the same BD for y in {0, 1}
assert_equal(bd(np.array([0.0]), np.array([0.0])),
bd(np.array([1.0]), np.array([0.0])))
assert_almost_equal(bd(np.array([1.0, 1.0, 1.0]),
np.array([100.0, 100.0, 100.0])),
0.0)
assert_almost_equal(bd(np.array([1.0, 0.0, 0.0]),
np.array([100.0, -100.0, -100.0])), 0)
# check if same results as alternative definition of deviance (from ESLII)
alt_dev = lambda y, pred: np.mean(np.logaddexp(0.0, -2.0 *
(2.0 * y - 1) * pred))
test_data = [(np.array([1.0, 1.0, 1.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]),
np.array([-100.0, -100.0, -100.0])),
(np.array([1.0, 1.0, 1.0]),
np.array([-100.0, -100.0, -100.0]))]
for datum in test_data:
assert_almost_equal(bd(*datum), alt_dev(*datum))
# check the gradient against the
alt_ng = lambda y, pred: (2 * y - 1) / (1 + np.exp(2 * (2 * y - 1) * pred))
for datum in test_data:
assert_almost_equal(bd.negative_gradient(*datum), alt_ng(*datum))
def test_log_odds_estimator():
# Check log odds estimator.
est = LogOddsEstimator()
assert_raises(ValueError, est.fit, None, np.array([1]))
est.fit(None, np.array([1.0, 0.0]))
assert_equal(est.prior, 0.0)
assert_array_equal(est.predict(np.array([[1.0], [1.0]])),
np.array([[0.0], [0.0]]))
def test_sample_weight_smoke():
rng = check_random_state(13)
y = rng.rand(100)
pred = rng.rand(100)
# least squares
loss = LeastSquaresError(1)
loss_wo_sw = loss(y, pred)
loss_w_sw = loss(y, pred, np.ones(pred.shape[0], dtype=np.float32))
assert_almost_equal(loss_wo_sw, loss_w_sw)
def test_sample_weight_init_estimators():
# Smoke test for init estimators with sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
else:
k = 2
y = clf_y
if Loss.is_multi_class:
# skip multiclass
continue
loss = Loss(k)
init_est = loss.init_estimator()
init_est.fit(X, y)
out = init_est.predict(X)
assert_equal(out.shape, (y.shape[0], 1))
sw_init_est = loss.init_estimator()
sw_init_est.fit(X, y, sample_weight=sample_weight)
sw_out = init_est.predict(X)
assert_equal(sw_out.shape, (y.shape[0], 1))
# check if predictions match
assert_array_equal(out, sw_out)
def test_weighted_percentile():
y = np.empty(102, dtype=np.float)
y[:50] = 0
y[-51:] = 2
y[-1] = 100000
y[50] = 1
sw = np.ones(102, dtype=np.float)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 1
def test_weighted_percentile_equal():
y = np.empty(102, dtype=np.float)
y.fill(0.0)
sw = np.ones(102, dtype=np.float)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 0
def test_weighted_percentile_zero_weight():
y = np.empty(102, dtype=np.float)
y.fill(1.0)
sw = np.ones(102, dtype=np.float)
sw.fill(0.0)
score = _weighted_percentile(y, sw, 50)
assert score == 1.0
def test_sample_weight_deviance():
# Test if deviance supports sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
mclf_y = rng.randint(0, 3, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
p = reg_y
else:
k = 2
y = clf_y
p = clf_y
if Loss.is_multi_class:
k = 3
y = mclf_y
# one-hot encoding
p = np.zeros((y.shape[0], k), dtype=np.float64)
for i in range(k):
p[:, i] = y == i
loss = Loss(k)
deviance_w_w = loss(y, p, sample_weight)
deviance_wo_w = loss(y, p)
assert deviance_wo_w == deviance_w_w
|
yaoandw/joke
|
refs/heads/master
|
Pods/AVOSCloudCrashReporting/Breakpad/src/tools/gyp/pylib/gyp/generator/dump_dependency_json.py
|
899
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import os
import gyp
import gyp.common
import gyp.msvs_emulation
import json
import sys
generator_supports_multiple_toolsets = True
generator_wants_static_library_dependencies_adjusted = False
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR', 'PRODUCT_DIR',
'LIB_DIR', 'SHARED_LIB_DIR']:
# Some gyp steps fail if these are empty(!).
generator_default_variables[dirname] = 'dir'
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
def CalculateVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
for key, val in generator_flags.items():
default_variables.setdefault(key, val)
default_variables.setdefault('OS', gyp.common.GetFlavor(params))
flavor = gyp.common.GetFlavor(params)
if flavor =='win':
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
if generator_flags.get('adjust_static_libraries', False):
global generator_wants_static_library_dependencies_adjusted
generator_wants_static_library_dependencies_adjusted = True
def GenerateOutput(target_list, target_dicts, data, params):
# Map of target -> list of targets it depends on.
edges = {}
# Queue of targets to visit.
targets_to_visit = target_list[:]
while len(targets_to_visit) > 0:
target = targets_to_visit.pop()
if target in edges:
continue
edges[target] = []
for dep in target_dicts[target].get('dependencies', []):
edges[target].append(dep)
targets_to_visit.append(dep)
filename = 'dump.json'
f = open(filename, 'w')
json.dump(edges, f)
f.close()
print 'Wrote json to %s.' % filename
|
TalShafir/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/amazon/aws_config_aggregator.py
|
31
|
#!/usr/bin/python
# Copyright: (c) 2018, Aaron Smith <ajsmith10381@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: aws_config_aggregator
short_description: Manage AWS Config aggregations across multiple accounts
description:
- Module manages AWS Config resources
version_added: "2.6"
requirements: [ 'botocore', 'boto3' ]
author:
- "Aaron Smith (@slapula)"
options:
name:
description:
- The name of the AWS Config resource.
required: true
state:
description:
- Whether the Config rule should be present or absent.
default: present
choices: ['present', 'absent']
account_sources:
description:
- Provides a list of source accounts and regions to be aggregated.
suboptions:
account_ids:
description:
- A list of 12-digit account IDs of accounts being aggregated.
aws_regions:
description:
- A list of source regions being aggregated.
all_aws_regions:
description:
- If true, aggreagate existing AWS Config regions and future regions.
organization_source:
description:
- The region authorized to collect aggregated data.
suboptions:
role_arn:
description:
- ARN of the IAM role used to retreive AWS Organization details associated with the aggregator account.
aws_regions:
description:
- The source regions being aggregated.
all_aws_regions:
description:
- If true, aggreagate existing AWS Config regions and future regions.
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = r'''
- name: Create cross-account aggregator
aws_config_aggregator:
name: test_config_rule
state: present
account_sources:
account_ids:
- 1234567890
- 0123456789
- 9012345678
all_aws_regions: yes
'''
RETURN = r'''#'''
try:
import botocore
except ImportError:
pass # handled by AnsibleAWSModule
from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code
from ansible.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict
def resource_exists(client, module, params):
try:
aggregator = client.describe_configuration_aggregators(
ConfigurationAggregatorNames=[params['name']]
)
return aggregator['ConfigurationAggregators'][0]
except is_boto3_error_code('NoSuchConfigurationAggregatorException'):
return
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e)
def create_resource(client, module, params, result):
try:
client.put_configuration_aggregator(
ConfigurationAggregatorName=params['ConfigurationAggregatorName'],
AccountAggregationSources=params['AccountAggregationSources'],
OrganizationAggregationSource=params['OrganizationAggregationSource']
)
result['changed'] = True
result['aggregator'] = camel_dict_to_snake_dict(resource_exists(client, module, params))
return result
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't create AWS Config configuration aggregator")
def update_resource(client, module, resource_type, params, result):
current_params = client.describe_configuration_aggregators(
ConfigurationAggregatorNames=[params['name']]
)
del current_params['ConfigurationAggregatorArn']
del current_params['CreationTime']
del current_params['LastUpdatedTime']
if params != current_params['ConfigurationAggregators'][0]:
try:
client.put_configuration_aggregator(
ConfigurationAggregatorName=params['ConfigurationAggregatorName'],
AccountAggregationSources=params['AccountAggregationSources'],
OrganizationAggregationSource=params['OrganizationAggregationSource']
)
result['changed'] = True
result['aggregator'] = camel_dict_to_snake_dict(resource_exists(client, module, params))
return result
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't create AWS Config configuration aggregator")
def delete_resource(client, module, resource_type, params, result):
try:
client.delete_configuration_aggregator(
ConfigurationAggregatorName=params['ConfigurationAggregatorName']
)
result['changed'] = True
return result
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't delete AWS Config configuration aggregator")
def main():
module = AnsibleAWSModule(
argument_spec={
'name': dict(type='str', required=True),
'state': dict(type='str', choices=['present', 'absent'], default='present'),
'account_sources': dict(type='list', required=True),
'organization_source': dict(type='dict', required=True)
},
supports_check_mode=False,
)
result = {
'changed': False
}
name = module.params.get('name')
state = module.params.get('state')
params = {}
if name:
params['ConfigurationAggregatorName'] = name
if module.params.get('account_sources'):
params['AccountAggregationSources'] = []
for i in module.params.get('account_sources'):
tmp_dict = {}
if i.get('account_ids'):
tmp_dict['AccountIds'] = i.get('account_ids')
if i.get('aws_regions'):
tmp_dict['AwsRegions'] = i.get('aws_regions')
if i.get('all_aws_regions') is not None:
tmp_dict['AllAwsRegions'] = i.get('all_aws_regions')
params['AccountAggregationSources'].append(tmp_dict)
if module.params.get('organization_source'):
params['OrganizationAggregationSource'] = {}
if module.params.get('organization_source').get('role_arn'):
params['OrganizationAggregationSource'].update({
'RoleArn': module.params.get('organization_source').get('role_arn')
})
if module.params.get('organization_source').get('aws_regions'):
params['OrganizationAggregationSource'].update({
'AwsRegions': module.params.get('organization_source').get('aws_regions')
})
if module.params.get('organization_source').get('all_aws_regions') is not None:
params['OrganizationAggregationSourcep'].update({
'AllAwsRegions': module.params.get('organization_source').get('all_aws_regions')
})
client = module.client('config', retry_decorator=AWSRetry.jittered_backoff())
resource_status = resource_exists(client, module, params)
if state == 'present':
if not resource_status:
create_resource(client, module, params, result)
else:
update_resource(client, module, params, result)
if state == 'absent':
if resource_status:
delete_resource(client, module, params, result)
module.exit_json(changed=result['changed'])
if __name__ == '__main__':
main()
|
devendermishrajio/nova_test_latest
|
refs/heads/master
|
nova/tests/unit/api/openstack/compute/test_v21_extensions.py
|
37
|
# Copyright 2013 IBM Corp.
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
import stevedore
import webob.exc
from nova.api import openstack
from nova.api.openstack import compute
from nova.api.openstack.compute import plugins
from nova.api.openstack import extensions
from nova import exception
from nova import test
CONF = cfg.CONF
class fake_bad_extension(object):
name = "fake_bad_extension"
alias = "fake-bad"
class fake_stevedore_enabled_extensions(object):
def __init__(self, namespace, check_func, invoke_on_load=False,
invoke_args=(), invoke_kwds=None):
self.extensions = []
def map(self, func, *args, **kwds):
pass
def __iter__(self):
return iter(self.extensions)
class fake_loaded_extension_info(object):
def __init__(self):
self.extensions = {}
def register_extension(self, ext):
self.extensions[ext] = ext
return True
def get_extensions(self):
return {'core1': None, 'core2': None, 'noncore1': None}
class ExtensionLoadingTestCase(test.NoDBTestCase):
def _set_v21_core(self, core_extensions):
openstack.API_V3_CORE_EXTENSIONS = core_extensions
def test_extensions_loaded(self):
app = compute.APIRouterV21()
self.assertIn('servers', app._loaded_extension_info.extensions)
def test_check_bad_extension(self):
extension_info = plugins.LoadedExtensionInfo()
self.assertFalse(extension_info._check_extension(fake_bad_extension))
def test_extensions_blacklist(self):
app = compute.APIRouterV21()
self.assertIn('os-hosts', app._loaded_extension_info.extensions)
CONF.set_override('extensions_blacklist', ['os-hosts'], 'osapi_v3')
app = compute.APIRouterV21()
self.assertNotIn('os-hosts', app._loaded_extension_info.extensions)
@mock.patch('nova.api.openstack.APIRouterV21._register_resources_list')
def test_extensions_inherit(self, mock_register):
app = compute.APIRouterV21()
self.assertIn('servers', app._loaded_extension_info.extensions)
self.assertIn('os-volumes', app._loaded_extension_info.extensions)
mock_register.assert_called_with(mock.ANY, mock.ANY)
ext_no_inherits = mock_register.call_args_list[0][0][0]
ext_has_inherits = mock_register.call_args_list[1][0][0]
# os-volumes inherits from servers
name_list = [ext.obj.alias for ext in ext_has_inherits]
self.assertIn('os-volumes', name_list)
name_list = [ext.obj.alias for ext in ext_no_inherits]
self.assertIn('servers', name_list)
def test_extensions_whitelist_accept(self):
# NOTE(maurosr): just to avoid to get an exception raised for not
# loading all core api.
v21_core = openstack.API_V3_CORE_EXTENSIONS
openstack.API_V3_CORE_EXTENSIONS = set(['servers'])
self.addCleanup(self._set_v21_core, v21_core)
app = compute.APIRouterV21()
self.assertIn('os-hosts', app._loaded_extension_info.extensions)
CONF.set_override('extensions_whitelist', ['servers', 'os-hosts'],
'osapi_v3')
app = compute.APIRouterV21()
self.assertIn('os-hosts', app._loaded_extension_info.extensions)
def test_extensions_whitelist_block(self):
# NOTE(maurosr): just to avoid to get an exception raised for not
# loading all core api.
v21_core = openstack.API_V3_CORE_EXTENSIONS
openstack.API_V3_CORE_EXTENSIONS = set(['servers'])
self.addCleanup(self._set_v21_core, v21_core)
app = compute.APIRouterV21()
self.assertIn('os-hosts', app._loaded_extension_info.extensions)
CONF.set_override('extensions_whitelist', ['servers'], 'osapi_v3')
app = compute.APIRouterV21()
self.assertNotIn('os-hosts', app._loaded_extension_info.extensions)
def test_blacklist_overrides_whitelist(self):
# NOTE(maurosr): just to avoid to get an exception raised for not
# loading all core api.
v21_core = openstack.API_V3_CORE_EXTENSIONS
openstack.API_V3_CORE_EXTENSIONS = set(['servers'])
self.addCleanup(self._set_v21_core, v21_core)
app = compute.APIRouterV21()
self.assertIn('os-hosts', app._loaded_extension_info.extensions)
CONF.set_override('extensions_whitelist', ['servers', 'os-hosts'],
'osapi_v3')
CONF.set_override('extensions_blacklist', ['os-hosts'], 'osapi_v3')
app = compute.APIRouterV21()
self.assertNotIn('os-hosts', app._loaded_extension_info.extensions)
self.assertIn('servers', app._loaded_extension_info.extensions)
self.assertEqual(1, len(app._loaded_extension_info.extensions))
def test_get_missing_core_extensions(self):
v21_core = openstack.API_V3_CORE_EXTENSIONS
openstack.API_V3_CORE_EXTENSIONS = set(['core1', 'core2'])
self.addCleanup(self._set_v21_core, v21_core)
self.assertEqual(0, len(
compute.APIRouterV21.get_missing_core_extensions(
['core1', 'core2', 'noncore1'])))
missing_core = compute.APIRouterV21.get_missing_core_extensions(
['core1'])
self.assertEqual(1, len(missing_core))
self.assertIn('core2', missing_core)
missing_core = compute.APIRouterV21.get_missing_core_extensions([])
self.assertEqual(2, len(missing_core))
self.assertIn('core1', missing_core)
self.assertIn('core2', missing_core)
missing_core = compute.APIRouterV21.get_missing_core_extensions(
['noncore1'])
self.assertEqual(2, len(missing_core))
self.assertIn('core1', missing_core)
self.assertIn('core2', missing_core)
def test_core_extensions_present(self):
self.stubs.Set(stevedore.enabled, 'EnabledExtensionManager',
fake_stevedore_enabled_extensions)
self.stubs.Set(plugins, 'LoadedExtensionInfo',
fake_loaded_extension_info)
v21_core = openstack.API_V3_CORE_EXTENSIONS
openstack.API_V3_CORE_EXTENSIONS = set(['core1', 'core2'])
self.addCleanup(self._set_v21_core, v21_core)
# if no core API extensions are missing then an exception will
# not be raised when creating an instance of compute.APIRouterV21
compute.APIRouterV21()
def test_core_extensions_missing(self):
self.stubs.Set(stevedore.enabled, 'EnabledExtensionManager',
fake_stevedore_enabled_extensions)
self.stubs.Set(plugins, 'LoadedExtensionInfo',
fake_loaded_extension_info)
self.assertRaises(exception.CoreAPIMissing, compute.APIRouterV21)
def test_extensions_expected_error(self):
@extensions.expected_errors(404)
def fake_func():
raise webob.exc.HTTPNotFound()
self.assertRaises(webob.exc.HTTPNotFound, fake_func)
def test_extensions_expected_error_from_list(self):
@extensions.expected_errors((404, 403))
def fake_func():
raise webob.exc.HTTPNotFound()
self.assertRaises(webob.exc.HTTPNotFound, fake_func)
def test_extensions_unexpected_error(self):
@extensions.expected_errors(404)
def fake_func():
raise webob.exc.HTTPConflict()
self.assertRaises(webob.exc.HTTPInternalServerError, fake_func)
def test_extensions_unexpected_error_from_list(self):
@extensions.expected_errors((404, 413))
def fake_func():
raise webob.exc.HTTPConflict()
self.assertRaises(webob.exc.HTTPInternalServerError, fake_func)
def test_extensions_unexpected_policy_not_authorized_error(self):
@extensions.expected_errors(404)
def fake_func():
raise exception.PolicyNotAuthorized(action="foo")
self.assertRaises(exception.PolicyNotAuthorized, fake_func)
|
spcui/autotest
|
refs/heads/master
|
client/shared/control_data.py
|
6
|
#
# Copyright 2008 Google Inc. Released under the GPL v2
import compiler, textwrap
REQUIRED_VARS = set(['author', 'doc', 'name', 'time', 'test_class',
'test_category', 'test_type'])
class ControlVariableException(Exception):
pass
class ControlData(object):
def __init__(self, vars, path, raise_warnings=False):
# Defaults
self.path = path
self.dependencies = set()
self.experimental = False
self.run_verify = True
self.sync_count = 1
self.test_parameters = set()
diff = REQUIRED_VARS - set(vars)
if len(diff) > 0:
warning = ("WARNING: Not all required control "
"variables were specified in %s. Please define "
"%s.") % (self.path, ', '.join(diff))
if raise_warnings:
raise ControlVariableException(warning)
print textwrap.wrap(warning, 80)
for key, val in vars.iteritems():
try:
self.set_attr(key, val, raise_warnings)
except Exception, e:
if raise_warnings:
raise
print "WARNING: %s; skipping" % e
def set_attr(self, attr, val, raise_warnings=False):
attr = attr.lower()
try:
set_fn = getattr(self, 'set_%s' % attr)
set_fn(val)
except AttributeError:
# This must not be a variable we care about
pass
def _set_string(self, attr, val):
val = str(val)
setattr(self, attr, val)
def _set_option(self, attr, val, options):
val = str(val)
if val.lower() not in [x.lower() for x in options]:
raise ValueError("%s must be one of the following "
"options: %s" % (attr,
', '.join(options)))
setattr(self, attr, val)
def _set_bool(self, attr, val):
val = str(val).lower()
if val == "false":
val = False
elif val == "true":
val = True
else:
msg = "%s must be either true or false" % attr
raise ValueError(msg)
setattr(self, attr, val)
def _set_int(self, attr, val, min=None, max=None):
val = int(val)
if min is not None and min > val:
raise ValueError("%s is %d, which is below the "
"minimum of %d" % (attr, val, min))
if max is not None and max < val:
raise ValueError("%s is %d, which is above the "
"maximum of %d" % (attr, val, max))
setattr(self, attr, val)
def _set_set(self, attr, val):
val = str(val)
items = [x.strip() for x in val.split(',')]
setattr(self, attr, set(items))
def set_author(self, val):
self._set_string('author', val)
def set_dependencies(self, val):
self._set_set('dependencies', val)
def set_doc(self, val):
self._set_string('doc', val)
def set_experimental(self, val):
self._set_bool('experimental', val)
def set_name(self, val):
self._set_string('name', val)
def set_run_verify(self, val):
self._set_bool('run_verify', val)
def set_sync_count(self, val):
self._set_int('sync_count', val, min=1)
def set_time(self, val):
self._set_option('time', val, ['short', 'medium', 'long'])
def set_test_class(self, val):
self._set_string('test_class', val.lower())
def set_test_category(self, val):
self._set_string('test_category', val.lower())
def set_test_type(self, val):
self._set_option('test_type', val, ['client', 'server'])
def set_test_parameters(self, val):
self._set_set('test_parameters', val)
def _extract_const(n):
assert(n.__class__ == compiler.ast.Assign)
assert(n.expr.__class__ == compiler.ast.Const)
assert(n.expr.value.__class__ in (str, int, float, unicode))
assert(n.nodes.__class__ == list)
assert(len(n.nodes) == 1)
assert(n.nodes[0].__class__ == compiler.ast.AssName)
assert(n.nodes[0].flags.__class__ == str)
assert(n.nodes[0].name.__class__ == str)
key = n.nodes[0].name.lower()
val = str(n.expr.value).strip()
return (key, val)
def _extract_name(n):
assert(n.__class__ == compiler.ast.Assign)
assert(n.expr.__class__ == compiler.ast.Name)
assert(n.nodes.__class__ == list)
assert(len(n.nodes) == 1)
assert(n.nodes[0].__class__ == compiler.ast.AssName)
assert(n.nodes[0].flags.__class__ == str)
assert(n.nodes[0].name.__class__ == str)
assert(n.expr.name in ('False', 'True', 'None'))
key = n.nodes[0].name.lower()
val = str(n.expr.name)
return (key, val)
def parse_control(path, raise_warnings=False):
try:
mod = compiler.parseFile(path)
except SyntaxError, e:
raise ControlVariableException("Error parsing %s because %s" %
(path, e))
assert(mod.__class__ == compiler.ast.Module)
assert(mod.node.__class__ == compiler.ast.Stmt)
assert(mod.node.nodes.__class__ == list)
vars = {}
for n in mod.node.nodes:
for fn in (_extract_const, _extract_name):
try:
key, val = fn(n)
vars[key] = val
except AssertionError, e:
pass
return ControlData(vars, path, raise_warnings)
|
alexsandrohaag/odoo-website-addons
|
refs/heads/master
|
website_sale_add_to_cart/__init__.py
|
1350
|
import controllers
|
ProyectoRATSoft/RATSoft
|
refs/heads/master
|
web/bower_components/bootstrap-datepicker/docs/conf.py
|
171
|
# -*- coding: utf-8 -*-
#
# bootstrap-datepicker documentation build configuration file, created by
# sphinx-quickstart on Fri Aug 2 14:45:57 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
#version = ''
# The full version, including alpha/beta/rc tags.
#release = ''
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
primary_domain = 'js'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'bootstrap-datepicker'
copyright = u'2016, eternicode'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
highlight_language = 'javascript'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = ['_themes',]
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'bootstrap-datepickerdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'bootstrap-datepicker.tex', u'bootstrap-datepicker Documentation',
u'eternicode', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'bootstrap-datepicker', u'bootstrap-datepicker Documentation',
[u'eternicode'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'bootstrap-datepicker', u'bootstrap-datepicker Documentation',
u'eternicode', 'bootstrap-datepicker', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
nikolay-fedotov/tempest
|
refs/heads/master
|
tempest/api/compute/servers/test_multiple_create.py
|
6
|
# Copyright 2013 IBM Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest.common.utils import data_utils
from tempest import test
class MultipleCreateTestJSON(base.BaseV2ComputeTest):
_name = 'multiple-create-test'
def _generate_name(self):
return data_utils.rand_name(self._name)
def _create_multiple_servers(self, name=None, wait_until=None, **kwargs):
"""
This is the right way to create_multiple servers and manage to get the
created servers into the servers list to be cleaned up after all.
"""
kwargs['name'] = kwargs.get('name', self._generate_name())
resp, body = self.create_test_server(**kwargs)
return resp, body
@test.attr(type='gate')
def test_multiple_create(self):
resp, body = self._create_multiple_servers(wait_until='ACTIVE',
min_count=1,
max_count=2)
# NOTE(maurosr): do status response check and also make sure that
# reservation_id is not in the response body when the request send
# contains return_reservation_id=False
self.assertEqual('202', resp['status'])
self.assertNotIn('reservation_id', body)
@test.attr(type='gate')
def test_multiple_create_with_reservation_return(self):
resp, body = self._create_multiple_servers(wait_until='ACTIVE',
min_count=1,
max_count=2,
return_reservation_id=True)
self.assertEqual(resp['status'], '202')
self.assertIn('reservation_id', body)
class MultipleCreateTestXML(MultipleCreateTestJSON):
_interface = 'xml'
|
mdaniel/intellij-community
|
refs/heads/master
|
python/testData/inspections/PyDataclassInspection/comparisonInStdInheritance.py
|
14
|
from dataclasses import dataclass
@dataclass(order=True)
class A1:
x: int = 1
@dataclass(order=False)
class B1(A1):
y: str = "1"
print(A1() <error descr="'__lt__' not supported between instances of 'A1' and 'B1'"><</error> B1())
print(B1() < B1())
@dataclass(order=False)
class A2:
x: int = 1
@dataclass(order=True)
class B2(A2):
y: str = "1"
print(A2() <error descr="'__lt__' not supported between instances of 'A2' and 'B2'"><</error> B2())
print(B2() < B2())
@dataclass(order=False)
class A3:
x: int = 1
@dataclass(order=False)
class B3(A3):
y: str = "1"
print(A3() <error descr="'__lt__' not supported between instances of 'A3' and 'B3'"><</error> B3())
print(B3() <error descr="'__lt__' not supported between instances of 'B3'"><</error> B3())
@dataclass(order=True)
class A4:
x: int = 1
@dataclass(order=True)
class B4(A4):
y: str = "1"
print(A4() <error descr="'__lt__' not supported between instances of 'A4' and 'B4'"><</error> B4())
print(B4() < B4())
class A5:
x: int = 1
@dataclass(order=True)
class B5(A5):
y: str = "1"
print(A5() <error descr="'__lt__' not supported between instances of 'A5' and 'B5'"><</error> B5())
print(B5() < B5())
@dataclass(order=True)
class A6:
x: int = 1
class B6(A6):
y: str = "1"
print(A6() <error descr="'__lt__' not supported between instances of 'A6' and 'B6'"><</error> B6())
print(B6() < B6())
|
mrquim/mrquimrepo
|
refs/heads/master
|
repo/plugin.video.rebirth/resources/lib/modules/source_utils.py
|
5
|
# -*- coding: utf-8 -*-
################################################################################
# | #
# | ______________________________________________________________ #
# | :~8a.`~888a:::::::::::::::88......88:::::::::::::::;a8~".a88::| #
# | ::::~8a.`~888a::::::::::::88......88::::::::::::;a8~".a888~:::| #
# | :::::::~8a.`~888a:::::::::88......88:::::::::;a8~".a888~::::::| #
# | ::::::::::~8a.`~888a::::::88......88::::::;a8~".a888~:::::::::| #
# | :::::::::::::~8a.`~888a:::88......88:::;a8~".a888~::::::::::::| #
# | :::::::::::: :~8a.`~888a:88 .....88;a8~".a888~:::::::::::::::| #
# | :::::::::::::::::::~8a.`~888......88~".a888~::::::::::::::::::| #
# | 8888888888888888888888888888......8888888888888888888888888888| #
# | ..............................................................| #
# | ..............................................................| #
# | 8888888888888888888888888888......8888888888888888888888888888| #
# | ::::::::::::::::::a888~".a88......888a."~8;:::::::::::::::::::| #
# | :::::::::::::::a888~".a8~:88......88~888a."~8;::::::::::::::::| #
# | ::::::::::::a888~".a8~::::88......88:::~888a."~8;:::::::::::::| #
# | :::::::::a888~".a8~:::::::88......88::::::~888a."~8;::::::::::| #
# | ::::::a888~".a8~::::::::::88......88:::::::::~888a."~8;:::::::| #
# | :::a888~".a8~:::::::::::::88......88::::::::::::~888a."~8;::::| #
# | a888~".a8~::::::::::::::::88......88:::::::::::::::~888a."~8;:| #
# | #
# | Rebirth Addon #
# | Copyright (C) 2017 Cypher #
# | #
# | This program is free software: you can redistribute it and/or modify #
# | it under the terms of the GNU General Public License as published by #
# | the Free Software Foundation, either version 3 of the License, or #
# | (at your option) any later version. #
# | #
# | This program is distributed in the hope that it will be useful, #
# | but WITHOUT ANY WARRANTY; without even the implied warranty of #
# | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# | GNU General Public License for more details. #
# | #
################################################################################
import base64
import urlparse
import urllib
import hashlib
import re
from resources.lib.modules import client
from resources.lib.modules import trakt
from resources.lib.modules import pyaes
def is_anime(content, type, type_id):
try:
r = trakt.getGenre(content, type, type_id)
return 'anime' in r or 'animation' in r
except:
return False
def get_release_quality(release_name):
if release_name is None: return
try: release_name = release_name.encode('utf-8')
except: pass
try:
release_name = release_name.upper()
fmt = re.sub('(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', release_name)
fmt = re.split('\.|\(|\)|\[|\]|\s|-', fmt)
fmt = [i.lower() for i in fmt]
if '1080p' in fmt: quality = '1080p'
elif '720p' in fmt: quality = 'HD'
else: quality = 'SD'
if any(i in ['dvdscr', 'r5', 'r6'] for i in fmt): quality = 'SCR'
elif any(i in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts'] for i in fmt): quality = 'CAM'
info = []
if '3d' in fmt or '.3D.' in release_name: info.append('3D')
if any(i in ['hevc', 'h265', 'x265'] for i in fmt): info.append('HEVC')
return quality, info
except:
return 'SD', []
def label_to_quality(label):
try:
try: label = int(re.search('(\d+)', label).group(1))
except: label = 0
if label >= 2160:
return '4K'
elif label >= 1440:
return '1440p'
elif label >= 1080:
return '1080p'
elif 720 <= label < 1080:
return 'HD'
elif label < 720:
return 'SD'
except:
return 'SD'
def strip_domain(url):
try:
if url.lower().startswith('http') or url.startswith('/'):
url = re.findall('(?://.+?|)(/.+)', url)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def is_host_valid(url, domains):
try:
host = __top_domain(url)
hosts = [domain.lower() for domain in domains if host and host in domain.lower()]
if hosts and '.' not in host:
host = hosts[0]
if hosts and any([h for h in ['google', 'picasa', 'blogspot'] if h in host]):
host = 'gvideo'
return any(hosts), host
except:
return False, ''
def __top_domain(url):
elements = urlparse.urlparse(url)
domain = elements.netloc or elements.path
domain = domain.split('@')[-1].split(':')[0]
regex = "(?:www\.)?([\w\-]*\.[\w\-]{2,3}(?:\.[\w\-]{2,3})?)$"
res = re.search(regex, domain)
if res: domain = res.group(1)
domain = domain.lower()
return domain
def aliases_to_array(aliases, filter=None):
try:
if not filter:
filter = []
if isinstance(filter, str):
filter = [filter]
return [x.get('title') for x in aliases if not filter or x.get('country') in filter]
except:
return []
def append_headers(headers):
return '|%s' % '&'.join(['%s=%s' % (key, urllib.quote_plus(headers[key])) for key in headers])
# if salt is provided, it should be string
# ciphertext is base64 and passphrase is string
def evp_decode(cipher_text, passphrase, salt=None):
cipher_text = base64.b64decode(cipher_text)
if not salt:
salt = cipher_text[8:16]
cipher_text = cipher_text[16:]
data = evpKDF(passphrase, salt)
decrypter = pyaes.Decrypter(pyaes.AESModeOfOperationCBC(data['key'], data['iv']))
plain_text = decrypter.feed(cipher_text)
plain_text += decrypter.feed()
return plain_text
def evpKDF(passwd, salt, key_size=8, iv_size=4, iterations=1, hash_algorithm="md5"):
target_key_size = key_size + iv_size
derived_bytes = ""
number_of_derived_words = 0
block = None
hasher = hashlib.new(hash_algorithm)
while number_of_derived_words < target_key_size:
if block is not None:
hasher.update(block)
hasher.update(passwd)
hasher.update(salt)
block = hasher.digest()
hasher = hashlib.new(hash_algorithm)
for _i in range(1, iterations):
hasher.update(block)
block = hasher.digest()
hasher = hashlib.new(hash_algorithm)
derived_bytes += block[0: min(len(block), (target_key_size - number_of_derived_words) * 4)]
number_of_derived_words += len(block) / 4
return {
"key": derived_bytes[0: key_size * 4],
"iv": derived_bytes[key_size * 4:]
}
|
ddy88958620/lib
|
refs/heads/master
|
Python/scrapy/reliefspot/__init__.py
|
2
|
ACCOUNT_NAME = 'reliefspot'
|
robhudson/zamboni
|
refs/heads/master
|
apps/stats/db.py
|
5
|
from django.db import models
import phpserialize as php
try:
import simplejson as json
except ImportError:
import json
class StatsDictField(models.TextField):
description = 'A dictionary of counts stored as serialized php.'
__metaclass__ = models.SubfieldBase
def db_type(self, connection):
return 'text'
def to_python(self, value):
# object case
if value is None:
return None
if isinstance(value, dict):
return value
# string case
if value and value[0] in '[{':
# JSON
try:
d = json.loads(value)
except ValueError:
d = None
else:
# phpserialize data
try:
if isinstance(value, unicode):
value = value.encode('utf8')
d = php.unserialize(value, decode_strings=True)
except ValueError:
d = None
if isinstance(d, dict):
return d
return None
def get_db_prep_value(self, value, connection, prepared=False):
if value is None or value == '':
return value
try:
value = json.dumps(dict(value))
except TypeError:
value = None
return value
def value_to_string(self, obj):
return str(obj)
|
ProfessionalIT/professionalit-webiste
|
refs/heads/master
|
sdk/google_appengine/lib/django-1.2/django/db/backends/oracle/creation.py
|
43
|
import sys, time
from django.db.backends.creation import BaseDatabaseCreation
TEST_DATABASE_PREFIX = 'test_'
PASSWORD = 'Im_a_lumberjack'
class DatabaseCreation(BaseDatabaseCreation):
# This dictionary maps Field objects to their associated Oracle column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
#
# Any format strings starting with "qn_" are quoted before being used in the
# output (the "qn_" prefix is stripped before the lookup is performed.
data_types = {
'AutoField': 'NUMBER(11)',
'BooleanField': 'NUMBER(1) CHECK (%(qn_column)s IN (0,1))',
'CharField': 'NVARCHAR2(%(max_length)s)',
'CommaSeparatedIntegerField': 'VARCHAR2(%(max_length)s)',
'DateField': 'DATE',
'DateTimeField': 'TIMESTAMP',
'DecimalField': 'NUMBER(%(max_digits)s, %(decimal_places)s)',
'FileField': 'NVARCHAR2(%(max_length)s)',
'FilePathField': 'NVARCHAR2(%(max_length)s)',
'FloatField': 'DOUBLE PRECISION',
'IntegerField': 'NUMBER(11)',
'BigIntegerField': 'NUMBER(19)',
'IPAddressField': 'VARCHAR2(15)',
'NullBooleanField': 'NUMBER(1) CHECK ((%(qn_column)s IN (0,1)) OR (%(qn_column)s IS NULL))',
'OneToOneField': 'NUMBER(11)',
'PositiveIntegerField': 'NUMBER(11) CHECK (%(qn_column)s >= 0)',
'PositiveSmallIntegerField': 'NUMBER(11) CHECK (%(qn_column)s >= 0)',
'SlugField': 'NVARCHAR2(%(max_length)s)',
'SmallIntegerField': 'NUMBER(11)',
'TextField': 'NCLOB',
'TimeField': 'TIMESTAMP',
'URLField': 'VARCHAR2(%(max_length)s)',
}
def __init__(self, connection):
self.remember = {}
super(DatabaseCreation, self).__init__(connection)
def _create_test_db(self, verbosity=1, autoclobber=False):
TEST_NAME = self._test_database_name()
TEST_USER = self._test_database_user()
TEST_PASSWD = self._test_database_passwd()
TEST_TBLSPACE = self._test_database_tblspace()
TEST_TBLSPACE_TMP = self._test_database_tblspace_tmp()
parameters = {
'dbname': TEST_NAME,
'user': TEST_USER,
'password': TEST_PASSWD,
'tblspace': TEST_TBLSPACE,
'tblspace_temp': TEST_TBLSPACE_TMP,
}
self.remember['user'] = self.connection.settings_dict['USER']
self.remember['passwd'] = self.connection.settings_dict['PASSWORD']
cursor = self.connection.cursor()
if self._test_database_create():
if verbosity >= 1:
print 'Creating test database...'
try:
self._execute_test_db_creation(cursor, parameters, verbosity)
except Exception, e:
sys.stderr.write("Got an error creating the test database: %s\n" % e)
if not autoclobber:
confirm = raw_input("It appears the test database, %s, already exists. Type 'yes' to delete it, or 'no' to cancel: " % TEST_NAME)
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print "Destroying old test database..."
self._execute_test_db_destruction(cursor, parameters, verbosity)
if verbosity >= 1:
print "Creating test database..."
self._execute_test_db_creation(cursor, parameters, verbosity)
except Exception, e:
sys.stderr.write("Got an error recreating the test database: %s\n" % e)
sys.exit(2)
else:
print "Tests cancelled."
sys.exit(1)
if self._test_user_create():
if verbosity >= 1:
print "Creating test user..."
try:
self._create_test_user(cursor, parameters, verbosity)
except Exception, e:
sys.stderr.write("Got an error creating the test user: %s\n" % e)
if not autoclobber:
confirm = raw_input("It appears the test user, %s, already exists. Type 'yes' to delete it, or 'no' to cancel: " % TEST_USER)
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print "Destroying old test user..."
self._destroy_test_user(cursor, parameters, verbosity)
if verbosity >= 1:
print "Creating test user..."
self._create_test_user(cursor, parameters, verbosity)
except Exception, e:
sys.stderr.write("Got an error recreating the test user: %s\n" % e)
sys.exit(2)
else:
print "Tests cancelled."
sys.exit(1)
self.connection.settings_dict['TEST_USER'] = self.connection.settings_dict["USER"] = TEST_USER
self.connection.settings_dict["PASSWORD"] = TEST_PASSWD
return self.connection.settings_dict['NAME']
def test_db_signature(self):
settings_dict = self.connection.settings_dict
return (
settings_dict['HOST'],
settings_dict['PORT'],
settings_dict['ENGINE'],
settings_dict['NAME'],
settings_dict['TEST_USER'],
)
def _destroy_test_db(self, test_database_name, verbosity=1):
"""
Destroy a test database, prompting the user for confirmation if the
database already exists. Returns the name of the test database created.
"""
TEST_NAME = self._test_database_name()
TEST_USER = self._test_database_user()
TEST_PASSWD = self._test_database_passwd()
TEST_TBLSPACE = self._test_database_tblspace()
TEST_TBLSPACE_TMP = self._test_database_tblspace_tmp()
self.connection.settings_dict["USER"] = self.remember['user']
self.connection.settings_dict["PASSWORD"] = self.remember['passwd']
parameters = {
'dbname': TEST_NAME,
'user': TEST_USER,
'password': TEST_PASSWD,
'tblspace': TEST_TBLSPACE,
'tblspace_temp': TEST_TBLSPACE_TMP,
}
cursor = self.connection.cursor()
time.sleep(1) # To avoid "database is being accessed by other users" errors.
if self._test_user_create():
if verbosity >= 1:
print 'Destroying test user...'
self._destroy_test_user(cursor, parameters, verbosity)
if self._test_database_create():
if verbosity >= 1:
print 'Destroying test database tables...'
self._execute_test_db_destruction(cursor, parameters, verbosity)
self.connection.close()
def _execute_test_db_creation(self, cursor, parameters, verbosity):
if verbosity >= 2:
print "_create_test_db(): dbname = %s" % parameters['dbname']
statements = [
"""CREATE TABLESPACE %(tblspace)s
DATAFILE '%(tblspace)s.dbf' SIZE 20M
REUSE AUTOEXTEND ON NEXT 10M MAXSIZE 200M
""",
"""CREATE TEMPORARY TABLESPACE %(tblspace_temp)s
TEMPFILE '%(tblspace_temp)s.dbf' SIZE 20M
REUSE AUTOEXTEND ON NEXT 10M MAXSIZE 100M
""",
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _create_test_user(self, cursor, parameters, verbosity):
if verbosity >= 2:
print "_create_test_user(): username = %s" % parameters['user']
statements = [
"""CREATE USER %(user)s
IDENTIFIED BY %(password)s
DEFAULT TABLESPACE %(tblspace)s
TEMPORARY TABLESPACE %(tblspace_temp)s
""",
"""GRANT CONNECT, RESOURCE TO %(user)s""",
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _execute_test_db_destruction(self, cursor, parameters, verbosity):
if verbosity >= 2:
print "_execute_test_db_destruction(): dbname=%s" % parameters['dbname']
statements = [
'DROP TABLESPACE %(tblspace)s INCLUDING CONTENTS AND DATAFILES CASCADE CONSTRAINTS',
'DROP TABLESPACE %(tblspace_temp)s INCLUDING CONTENTS AND DATAFILES CASCADE CONSTRAINTS',
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _destroy_test_user(self, cursor, parameters, verbosity):
if verbosity >= 2:
print "_destroy_test_user(): user=%s" % parameters['user']
print "Be patient. This can take some time..."
statements = [
'DROP USER %(user)s CASCADE',
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _execute_statements(self, cursor, statements, parameters, verbosity):
for template in statements:
stmt = template % parameters
if verbosity >= 2:
print stmt
try:
cursor.execute(stmt)
except Exception, err:
sys.stderr.write("Failed (%s)\n" % (err))
raise
def _test_database_name(self):
name = TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME']
try:
if self.connection.settings_dict['TEST_NAME']:
name = self.connection.settings_dict['TEST_NAME']
except AttributeError:
pass
return name
def _test_database_create(self):
return self.connection.settings_dict.get('TEST_CREATE', True)
def _test_user_create(self):
return self.connection.settings_dict.get('TEST_USER_CREATE', True)
def _test_database_user(self):
name = TEST_DATABASE_PREFIX + self.connection.settings_dict['USER']
try:
if self.connection.settings_dict['TEST_USER']:
name = self.connection.settings_dict['TEST_USER']
except KeyError:
pass
return name
def _test_database_passwd(self):
name = PASSWORD
try:
if self.connection.settings_dict['TEST_PASSWD']:
name = self.connection.settings_dict['TEST_PASSWD']
except KeyError:
pass
return name
def _test_database_tblspace(self):
name = TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME']
try:
if self.connection.settings_dict['TEST_TBLSPACE']:
name = self.connection.settings_dict['TEST_TBLSPACE']
except KeyError:
pass
return name
def _test_database_tblspace_tmp(self):
name = TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME'] + '_temp'
try:
if self.connection.settings_dict['TEST_TBLSPACE_TMP']:
name = self.connection.settings_dict['TEST_TBLSPACE_TMP']
except KeyError:
pass
return name
|
hynnet/openwrt-mt7620
|
refs/heads/master
|
staging_dir/target-mipsel_r2_uClibc-0.9.33.2/root-ralink/usr/lib/python2.7/Cookie.py
|
65
|
#!/usr/bin/env python
#
####
# Copyright 2000 by Timothy O'Malley <timo@alum.mit.edu>
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software
# and its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Timothy O'Malley not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# Timothy O'Malley DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS, IN NO EVENT SHALL Timothy O'Malley BE LIABLE FOR
# ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
#
####
#
# Id: Cookie.py,v 2.29 2000/08/23 05:28:49 timo Exp
# by Timothy O'Malley <timo@alum.mit.edu>
#
# Cookie.py is a Python module for the handling of HTTP
# cookies as a Python dictionary. See RFC 2109 for more
# information on cookies.
#
# The original idea to treat Cookies as a dictionary came from
# Dave Mitchell (davem@magnet.com) in 1995, when he released the
# first version of nscookie.py.
#
####
r"""
Here's a sample session to show how to use this module.
At the moment, this is the only documentation.
The Basics
----------
Importing is easy..
>>> import Cookie
Most of the time you start by creating a cookie. Cookies come in
three flavors, each with slightly different encoding semantics, but
more on that later.
>>> C = Cookie.SimpleCookie()
>>> C = Cookie.SerialCookie()
>>> C = Cookie.SmartCookie()
[Note: Long-time users of Cookie.py will remember using
Cookie.Cookie() to create an Cookie object. Although deprecated, it
is still supported by the code. See the Backward Compatibility notes
for more information.]
Once you've created your Cookie, you can add values just as if it were
a dictionary.
>>> C = Cookie.SmartCookie()
>>> C["fig"] = "newton"
>>> C["sugar"] = "wafer"
>>> C.output()
'Set-Cookie: fig=newton\r\nSet-Cookie: sugar=wafer'
Notice that the printable representation of a Cookie is the
appropriate format for a Set-Cookie: header. This is the
default behavior. You can change the header and printed
attributes by using the .output() function
>>> C = Cookie.SmartCookie()
>>> C["rocky"] = "road"
>>> C["rocky"]["path"] = "/cookie"
>>> print C.output(header="Cookie:")
Cookie: rocky=road; Path=/cookie
>>> print C.output(attrs=[], header="Cookie:")
Cookie: rocky=road
The load() method of a Cookie extracts cookies from a string. In a
CGI script, you would use this method to extract the cookies from the
HTTP_COOKIE environment variable.
>>> C = Cookie.SmartCookie()
>>> C.load("chips=ahoy; vienna=finger")
>>> C.output()
'Set-Cookie: chips=ahoy\r\nSet-Cookie: vienna=finger'
The load() method is darn-tootin smart about identifying cookies
within a string. Escaped quotation marks, nested semicolons, and other
such trickeries do not confuse it.
>>> C = Cookie.SmartCookie()
>>> C.load('keebler="E=everybody; L=\\"Loves\\"; fudge=\\012;";')
>>> print C
Set-Cookie: keebler="E=everybody; L=\"Loves\"; fudge=\012;"
Each element of the Cookie also supports all of the RFC 2109
Cookie attributes. Here's an example which sets the Path
attribute.
>>> C = Cookie.SmartCookie()
>>> C["oreo"] = "doublestuff"
>>> C["oreo"]["path"] = "/"
>>> print C
Set-Cookie: oreo=doublestuff; Path=/
Each dictionary element has a 'value' attribute, which gives you
back the value associated with the key.
>>> C = Cookie.SmartCookie()
>>> C["twix"] = "none for you"
>>> C["twix"].value
'none for you'
A Bit More Advanced
-------------------
As mentioned before, there are three different flavors of Cookie
objects, each with different encoding/decoding semantics. This
section briefly discusses the differences.
SimpleCookie
The SimpleCookie expects that all values should be standard strings.
Just to be sure, SimpleCookie invokes the str() builtin to convert
the value to a string, when the values are set dictionary-style.
>>> C = Cookie.SimpleCookie()
>>> C["number"] = 7
>>> C["string"] = "seven"
>>> C["number"].value
'7'
>>> C["string"].value
'seven'
>>> C.output()
'Set-Cookie: number=7\r\nSet-Cookie: string=seven'
SerialCookie
The SerialCookie expects that all values should be serialized using
cPickle (or pickle, if cPickle isn't available). As a result of
serializing, SerialCookie can save almost any Python object to a
value, and recover the exact same object when the cookie has been
returned. (SerialCookie can yield some strange-looking cookie
values, however.)
>>> C = Cookie.SerialCookie()
>>> C["number"] = 7
>>> C["string"] = "seven"
>>> C["number"].value
7
>>> C["string"].value
'seven'
>>> C.output()
'Set-Cookie: number="I7\\012."\r\nSet-Cookie: string="S\'seven\'\\012p1\\012."'
Be warned, however, if SerialCookie cannot de-serialize a value (because
it isn't a valid pickle'd object), IT WILL RAISE AN EXCEPTION.
SmartCookie
The SmartCookie combines aspects of each of the other two flavors.
When setting a value in a dictionary-fashion, the SmartCookie will
serialize (ala cPickle) the value *if and only if* it isn't a
Python string. String objects are *not* serialized. Similarly,
when the load() method parses out values, it attempts to de-serialize
the value. If it fails, then it fallsback to treating the value
as a string.
>>> C = Cookie.SmartCookie()
>>> C["number"] = 7
>>> C["string"] = "seven"
>>> C["number"].value
7
>>> C["string"].value
'seven'
>>> C.output()
'Set-Cookie: number="I7\\012."\r\nSet-Cookie: string=seven'
Backwards Compatibility
-----------------------
In order to keep compatibilty with earlier versions of Cookie.py,
it is still possible to use Cookie.Cookie() to create a Cookie. In
fact, this simply returns a SmartCookie.
>>> C = Cookie.Cookie()
>>> print C.__class__.__name__
SmartCookie
Finis.
""" #"
# ^
# |----helps out font-lock
#
# Import our required modules
#
import string
try:
from cPickle import dumps, loads
except ImportError:
from pickle import dumps, loads
import re, warnings
__all__ = ["CookieError","BaseCookie","SimpleCookie","SerialCookie",
"SmartCookie","Cookie"]
_nulljoin = ''.join
_semispacejoin = '; '.join
_spacejoin = ' '.join
#
# Define an exception visible to External modules
#
class CookieError(Exception):
pass
# These quoting routines conform to the RFC2109 specification, which in
# turn references the character definitions from RFC2068. They provide
# a two-way quoting algorithm. Any non-text character is translated
# into a 4 character sequence: a forward-slash followed by the
# three-digit octal equivalent of the character. Any '\' or '"' is
# quoted with a preceeding '\' slash.
#
# These are taken from RFC2068 and RFC2109.
# _LegalChars is the list of chars which don't require "'s
# _Translator hash-table for fast quoting
#
_LegalChars = string.ascii_letters + string.digits + "!#$%&'*+-.^_`|~"
_Translator = {
'\000' : '\\000', '\001' : '\\001', '\002' : '\\002',
'\003' : '\\003', '\004' : '\\004', '\005' : '\\005',
'\006' : '\\006', '\007' : '\\007', '\010' : '\\010',
'\011' : '\\011', '\012' : '\\012', '\013' : '\\013',
'\014' : '\\014', '\015' : '\\015', '\016' : '\\016',
'\017' : '\\017', '\020' : '\\020', '\021' : '\\021',
'\022' : '\\022', '\023' : '\\023', '\024' : '\\024',
'\025' : '\\025', '\026' : '\\026', '\027' : '\\027',
'\030' : '\\030', '\031' : '\\031', '\032' : '\\032',
'\033' : '\\033', '\034' : '\\034', '\035' : '\\035',
'\036' : '\\036', '\037' : '\\037',
# Because of the way browsers really handle cookies (as opposed
# to what the RFC says) we also encode , and ;
',' : '\\054', ';' : '\\073',
'"' : '\\"', '\\' : '\\\\',
'\177' : '\\177', '\200' : '\\200', '\201' : '\\201',
'\202' : '\\202', '\203' : '\\203', '\204' : '\\204',
'\205' : '\\205', '\206' : '\\206', '\207' : '\\207',
'\210' : '\\210', '\211' : '\\211', '\212' : '\\212',
'\213' : '\\213', '\214' : '\\214', '\215' : '\\215',
'\216' : '\\216', '\217' : '\\217', '\220' : '\\220',
'\221' : '\\221', '\222' : '\\222', '\223' : '\\223',
'\224' : '\\224', '\225' : '\\225', '\226' : '\\226',
'\227' : '\\227', '\230' : '\\230', '\231' : '\\231',
'\232' : '\\232', '\233' : '\\233', '\234' : '\\234',
'\235' : '\\235', '\236' : '\\236', '\237' : '\\237',
'\240' : '\\240', '\241' : '\\241', '\242' : '\\242',
'\243' : '\\243', '\244' : '\\244', '\245' : '\\245',
'\246' : '\\246', '\247' : '\\247', '\250' : '\\250',
'\251' : '\\251', '\252' : '\\252', '\253' : '\\253',
'\254' : '\\254', '\255' : '\\255', '\256' : '\\256',
'\257' : '\\257', '\260' : '\\260', '\261' : '\\261',
'\262' : '\\262', '\263' : '\\263', '\264' : '\\264',
'\265' : '\\265', '\266' : '\\266', '\267' : '\\267',
'\270' : '\\270', '\271' : '\\271', '\272' : '\\272',
'\273' : '\\273', '\274' : '\\274', '\275' : '\\275',
'\276' : '\\276', '\277' : '\\277', '\300' : '\\300',
'\301' : '\\301', '\302' : '\\302', '\303' : '\\303',
'\304' : '\\304', '\305' : '\\305', '\306' : '\\306',
'\307' : '\\307', '\310' : '\\310', '\311' : '\\311',
'\312' : '\\312', '\313' : '\\313', '\314' : '\\314',
'\315' : '\\315', '\316' : '\\316', '\317' : '\\317',
'\320' : '\\320', '\321' : '\\321', '\322' : '\\322',
'\323' : '\\323', '\324' : '\\324', '\325' : '\\325',
'\326' : '\\326', '\327' : '\\327', '\330' : '\\330',
'\331' : '\\331', '\332' : '\\332', '\333' : '\\333',
'\334' : '\\334', '\335' : '\\335', '\336' : '\\336',
'\337' : '\\337', '\340' : '\\340', '\341' : '\\341',
'\342' : '\\342', '\343' : '\\343', '\344' : '\\344',
'\345' : '\\345', '\346' : '\\346', '\347' : '\\347',
'\350' : '\\350', '\351' : '\\351', '\352' : '\\352',
'\353' : '\\353', '\354' : '\\354', '\355' : '\\355',
'\356' : '\\356', '\357' : '\\357', '\360' : '\\360',
'\361' : '\\361', '\362' : '\\362', '\363' : '\\363',
'\364' : '\\364', '\365' : '\\365', '\366' : '\\366',
'\367' : '\\367', '\370' : '\\370', '\371' : '\\371',
'\372' : '\\372', '\373' : '\\373', '\374' : '\\374',
'\375' : '\\375', '\376' : '\\376', '\377' : '\\377'
}
_idmap = ''.join(chr(x) for x in xrange(256))
def _quote(str, LegalChars=_LegalChars,
idmap=_idmap, translate=string.translate):
#
# If the string does not need to be double-quoted,
# then just return the string. Otherwise, surround
# the string in doublequotes and precede quote (with a \)
# special characters.
#
if "" == translate(str, idmap, LegalChars):
return str
else:
return '"' + _nulljoin( map(_Translator.get, str, str) ) + '"'
# end _quote
_OctalPatt = re.compile(r"\\[0-3][0-7][0-7]")
_QuotePatt = re.compile(r"[\\].")
def _unquote(str):
# If there aren't any doublequotes,
# then there can't be any special characters. See RFC 2109.
if len(str) < 2:
return str
if str[0] != '"' or str[-1] != '"':
return str
# We have to assume that we must decode this string.
# Down to work.
# Remove the "s
str = str[1:-1]
# Check for special sequences. Examples:
# \012 --> \n
# \" --> "
#
i = 0
n = len(str)
res = []
while 0 <= i < n:
Omatch = _OctalPatt.search(str, i)
Qmatch = _QuotePatt.search(str, i)
if not Omatch and not Qmatch: # Neither matched
res.append(str[i:])
break
# else:
j = k = -1
if Omatch: j = Omatch.start(0)
if Qmatch: k = Qmatch.start(0)
if Qmatch and ( not Omatch or k < j ): # QuotePatt matched
res.append(str[i:k])
res.append(str[k+1])
i = k+2
else: # OctalPatt matched
res.append(str[i:j])
res.append( chr( int(str[j+1:j+4], 8) ) )
i = j+4
return _nulljoin(res)
# end _unquote
# The _getdate() routine is used to set the expiration time in
# the cookie's HTTP header. By default, _getdate() returns the
# current time in the appropriate "expires" format for a
# Set-Cookie header. The one optional argument is an offset from
# now, in seconds. For example, an offset of -3600 means "one hour ago".
# The offset may be a floating point number.
#
_weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
_monthname = [None,
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
def _getdate(future=0, weekdayname=_weekdayname, monthname=_monthname):
from time import gmtime, time
now = time()
year, month, day, hh, mm, ss, wd, y, z = gmtime(now + future)
return "%s, %02d-%3s-%4d %02d:%02d:%02d GMT" % \
(weekdayname[wd], day, monthname[month], year, hh, mm, ss)
#
# A class to hold ONE key,value pair.
# In a cookie, each such pair may have several attributes.
# so this class is used to keep the attributes associated
# with the appropriate key,value pair.
# This class also includes a coded_value attribute, which
# is used to hold the network representation of the
# value. This is most useful when Python objects are
# pickled for network transit.
#
class Morsel(dict):
# RFC 2109 lists these attributes as reserved:
# path comment domain
# max-age secure version
#
# For historical reasons, these attributes are also reserved:
# expires
#
# This is an extension from Microsoft:
# httponly
#
# This dictionary provides a mapping from the lowercase
# variant on the left to the appropriate traditional
# formatting on the right.
_reserved = { "expires" : "expires",
"path" : "Path",
"comment" : "Comment",
"domain" : "Domain",
"max-age" : "Max-Age",
"secure" : "secure",
"httponly" : "httponly",
"version" : "Version",
}
def __init__(self):
# Set defaults
self.key = self.value = self.coded_value = None
# Set default attributes
for K in self._reserved:
dict.__setitem__(self, K, "")
# end __init__
def __setitem__(self, K, V):
K = K.lower()
if not K in self._reserved:
raise CookieError("Invalid Attribute %s" % K)
dict.__setitem__(self, K, V)
# end __setitem__
def isReservedKey(self, K):
return K.lower() in self._reserved
# end isReservedKey
def set(self, key, val, coded_val,
LegalChars=_LegalChars,
idmap=_idmap, translate=string.translate):
# First we verify that the key isn't a reserved word
# Second we make sure it only contains legal characters
if key.lower() in self._reserved:
raise CookieError("Attempt to set a reserved key: %s" % key)
if "" != translate(key, idmap, LegalChars):
raise CookieError("Illegal key value: %s" % key)
# It's a good key, so save it.
self.key = key
self.value = val
self.coded_value = coded_val
# end set
def output(self, attrs=None, header = "Set-Cookie:"):
return "%s %s" % ( header, self.OutputString(attrs) )
__str__ = output
def __repr__(self):
return '<%s: %s=%s>' % (self.__class__.__name__,
self.key, repr(self.value) )
def js_output(self, attrs=None):
# Print javascript
return """
<script type="text/javascript">
<!-- begin hiding
document.cookie = \"%s\";
// end hiding -->
</script>
""" % ( self.OutputString(attrs).replace('"',r'\"'), )
# end js_output()
def OutputString(self, attrs=None):
# Build up our result
#
result = []
RA = result.append
# First, the key=value pair
RA("%s=%s" % (self.key, self.coded_value))
# Now add any defined attributes
if attrs is None:
attrs = self._reserved
items = self.items()
items.sort()
for K,V in items:
if V == "": continue
if K not in attrs: continue
if K == "expires" and type(V) == type(1):
RA("%s=%s" % (self._reserved[K], _getdate(V)))
elif K == "max-age" and type(V) == type(1):
RA("%s=%d" % (self._reserved[K], V))
elif K == "secure":
RA(str(self._reserved[K]))
elif K == "httponly":
RA(str(self._reserved[K]))
else:
RA("%s=%s" % (self._reserved[K], V))
# Return the result
return _semispacejoin(result)
# end OutputString
# end Morsel class
#
# Pattern for finding cookie
#
# This used to be strict parsing based on the RFC2109 and RFC2068
# specifications. I have since discovered that MSIE 3.0x doesn't
# follow the character rules outlined in those specs. As a
# result, the parsing rules here are less strict.
#
_LegalCharsPatt = r"[\w\d!#%&'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\=]"
_CookiePattern = re.compile(
r"(?x)" # This is a Verbose pattern
r"(?P<key>" # Start of group 'key'
""+ _LegalCharsPatt +"+?" # Any word of at least one letter, nongreedy
r")" # End of group 'key'
r"\s*=\s*" # Equal Sign
r"(?P<val>" # Start of group 'val'
r'"(?:[^\\"]|\\.)*"' # Any doublequoted string
r"|" # or
r"\w{3},\s[\w\d-]{9,11}\s[\d:]{8}\sGMT" # Special case for "expires" attr
r"|" # or
""+ _LegalCharsPatt +"*" # Any word or empty string
r")" # End of group 'val'
r"\s*;?" # Probably ending in a semi-colon
)
# At long last, here is the cookie class.
# Using this class is almost just like using a dictionary.
# See this module's docstring for example usage.
#
class BaseCookie(dict):
# A container class for a set of Morsels
#
def value_decode(self, val):
"""real_value, coded_value = value_decode(STRING)
Called prior to setting a cookie's value from the network
representation. The VALUE is the value read from HTTP
header.
Override this function to modify the behavior of cookies.
"""
return val, val
# end value_encode
def value_encode(self, val):
"""real_value, coded_value = value_encode(VALUE)
Called prior to setting a cookie's value from the dictionary
representation. The VALUE is the value being assigned.
Override this function to modify the behavior of cookies.
"""
strval = str(val)
return strval, strval
# end value_encode
def __init__(self, input=None):
if input: self.load(input)
# end __init__
def __set(self, key, real_value, coded_value):
"""Private method for setting a cookie's value"""
M = self.get(key, Morsel())
M.set(key, real_value, coded_value)
dict.__setitem__(self, key, M)
# end __set
def __setitem__(self, key, value):
"""Dictionary style assignment."""
rval, cval = self.value_encode(value)
self.__set(key, rval, cval)
# end __setitem__
def output(self, attrs=None, header="Set-Cookie:", sep="\015\012"):
"""Return a string suitable for HTTP."""
result = []
items = self.items()
items.sort()
for K,V in items:
result.append( V.output(attrs, header) )
return sep.join(result)
# end output
__str__ = output
def __repr__(self):
L = []
items = self.items()
items.sort()
for K,V in items:
L.append( '%s=%s' % (K,repr(V.value) ) )
return '<%s: %s>' % (self.__class__.__name__, _spacejoin(L))
def js_output(self, attrs=None):
"""Return a string suitable for JavaScript."""
result = []
items = self.items()
items.sort()
for K,V in items:
result.append( V.js_output(attrs) )
return _nulljoin(result)
# end js_output
def load(self, rawdata):
"""Load cookies from a string (presumably HTTP_COOKIE) or
from a dictionary. Loading cookies from a dictionary 'd'
is equivalent to calling:
map(Cookie.__setitem__, d.keys(), d.values())
"""
if type(rawdata) == type(""):
self.__ParseString(rawdata)
else:
# self.update() wouldn't call our custom __setitem__
for k, v in rawdata.items():
self[k] = v
return
# end load()
def __ParseString(self, str, patt=_CookiePattern):
i = 0 # Our starting point
n = len(str) # Length of string
M = None # current morsel
while 0 <= i < n:
# Start looking for a cookie
match = patt.search(str, i)
if not match: break # No more cookies
K,V = match.group("key"), match.group("val")
i = match.end(0)
# Parse the key, value in case it's metainfo
if K[0] == "$":
# We ignore attributes which pertain to the cookie
# mechanism as a whole. See RFC 2109.
# (Does anyone care?)
if M:
M[ K[1:] ] = V
elif K.lower() in Morsel._reserved:
if M:
M[ K ] = _unquote(V)
else:
rval, cval = self.value_decode(V)
self.__set(K, rval, cval)
M = self[K]
# end __ParseString
# end BaseCookie class
class SimpleCookie(BaseCookie):
"""SimpleCookie
SimpleCookie supports strings as cookie values. When setting
the value using the dictionary assignment notation, SimpleCookie
calls the builtin str() to convert the value to a string. Values
received from HTTP are kept as strings.
"""
def value_decode(self, val):
return _unquote( val ), val
def value_encode(self, val):
strval = str(val)
return strval, _quote( strval )
# end SimpleCookie
class SerialCookie(BaseCookie):
"""SerialCookie
SerialCookie supports arbitrary objects as cookie values. All
values are serialized (using cPickle) before being sent to the
client. All incoming values are assumed to be valid Pickle
representations. IF AN INCOMING VALUE IS NOT IN A VALID PICKLE
FORMAT, THEN AN EXCEPTION WILL BE RAISED.
Note: Large cookie values add overhead because they must be
retransmitted on every HTTP transaction.
Note: HTTP has a 2k limit on the size of a cookie. This class
does not check for this limit, so be careful!!!
"""
def __init__(self, input=None):
warnings.warn("SerialCookie class is insecure; do not use it",
DeprecationWarning)
BaseCookie.__init__(self, input)
# end __init__
def value_decode(self, val):
# This could raise an exception!
return loads( _unquote(val) ), val
def value_encode(self, val):
return val, _quote( dumps(val) )
# end SerialCookie
class SmartCookie(BaseCookie):
"""SmartCookie
SmartCookie supports arbitrary objects as cookie values. If the
object is a string, then it is quoted. If the object is not a
string, however, then SmartCookie will use cPickle to serialize
the object into a string representation.
Note: Large cookie values add overhead because they must be
retransmitted on every HTTP transaction.
Note: HTTP has a 2k limit on the size of a cookie. This class
does not check for this limit, so be careful!!!
"""
def __init__(self, input=None):
warnings.warn("Cookie/SmartCookie class is insecure; do not use it",
DeprecationWarning)
BaseCookie.__init__(self, input)
# end __init__
def value_decode(self, val):
strval = _unquote(val)
try:
return loads(strval), val
except:
return strval, val
def value_encode(self, val):
if type(val) == type(""):
return val, _quote(val)
else:
return val, _quote( dumps(val) )
# end SmartCookie
###########################################################
# Backwards Compatibility: Don't break any existing code!
# We provide Cookie() as an alias for SmartCookie()
Cookie = SmartCookie
#
###########################################################
def _test():
import doctest, Cookie
return doctest.testmod(Cookie)
if __name__ == "__main__":
_test()
#Local Variables:
#tab-width: 4
#end:
|
Mitchkoens/sympy
|
refs/heads/master
|
sympy/printing/codeprinter.py
|
38
|
from __future__ import print_function, division
from sympy.core import Add, Mul, Pow, S
from sympy.core.basic import Basic
from sympy.core.compatibility import default_sort_key, string_types
from sympy.core.function import Lambda
from sympy.core.mul import _keep_coeff
from sympy.core.relational import Relational
from sympy.core.symbol import Symbol
from sympy.printing.str import StrPrinter
from sympy.printing.precedence import precedence
from sympy.core.sympify import _sympify, sympify
class AssignmentError(Exception):
"""
Raised if an assignment variable for a loop is missing.
"""
pass
class Assignment(Relational):
"""
Represents variable assignment for code generation.
Parameters
----------
lhs : Expr
Sympy object representing the lhs of the expression. These should be
singular objects, such as one would use in writing code. Notable types
include Symbol, MatrixSymbol, MatrixElement, and Indexed. Types that
subclass these types are also supported.
rhs : Expr
Sympy object representing the rhs of the expression. This can be any
type, provided its shape corresponds to that of the lhs. For example,
a Matrix type can be assigned to MatrixSymbol, but not to Symbol, as
the dimensions will not align.
Examples
========
>>> from sympy import symbols, MatrixSymbol, Matrix
>>> from sympy.printing.codeprinter import Assignment
>>> x, y, z = symbols('x, y, z')
>>> Assignment(x, y)
x := y
>>> Assignment(x, 0)
x := 0
>>> A = MatrixSymbol('A', 1, 3)
>>> mat = Matrix([x, y, z]).T
>>> Assignment(A, mat)
A := Matrix([[x, y, z]])
>>> Assignment(A[0, 1], x)
A[0, 1] := x
"""
rel_op = ':='
__slots__ = []
def __new__(cls, lhs, rhs=0, **assumptions):
from sympy.matrices.expressions.matexpr import (
MatrixElement, MatrixSymbol)
from sympy.tensor.indexed import Indexed
lhs = _sympify(lhs)
rhs = _sympify(rhs)
# Tuple of things that can be on the lhs of an assignment
assignable = (Symbol, MatrixSymbol, MatrixElement, Indexed)
if not isinstance(lhs, assignable):
raise TypeError("Cannot assign to lhs of type %s." % type(lhs))
# Indexed types implement shape, but don't define it until later. This
# causes issues in assignment validation. For now, matrices are defined
# as anything with a shape that is not an Indexed
lhs_is_mat = hasattr(lhs, 'shape') and not isinstance(lhs, Indexed)
rhs_is_mat = hasattr(rhs, 'shape') and not isinstance(rhs, Indexed)
# If lhs and rhs have same structure, then this assignment is ok
if lhs_is_mat:
if not rhs_is_mat:
raise ValueError("Cannot assign a scalar to a matrix.")
elif lhs.shape != rhs.shape:
raise ValueError("Dimensions of lhs and rhs don't align.")
elif rhs_is_mat and not lhs_is_mat:
raise ValueError("Cannot assign a matrix to a scalar.")
return Relational.__new__(cls, lhs, rhs, **assumptions)
class CodePrinter(StrPrinter):
"""
The base class for code-printing subclasses.
"""
_operators = {
'and': '&&',
'or': '||',
'not': '!',
}
_default_settings = {'order': None,
'full_prec': 'auto',
'error_on_reserved': False,
'reserved_word_suffix': '_'}
def __init__(self, settings=None):
super(CodePrinter, self).__init__(settings=settings)
self.reserved_words = set()
def doprint(self, expr, assign_to=None):
"""
Print the expression as code.
Parameters
----------
expr : Expression
The expression to be printed.
assign_to : Symbol, MatrixSymbol, or string (optional)
If provided, the printed code will set the expression to a
variable with name ``assign_to``.
"""
from sympy.matrices.expressions.matexpr import MatrixSymbol
if isinstance(assign_to, string_types):
if expr.is_Matrix:
assign_to = MatrixSymbol(assign_to, *expr.shape)
else:
assign_to = Symbol(assign_to)
elif not isinstance(assign_to, (Basic, type(None))):
raise TypeError("{0} cannot assign to object of type {1}".format(
type(self).__name__, type(assign_to)))
if assign_to:
expr = Assignment(assign_to, expr)
else:
# _sympify is not enough b/c it errors on iterables
expr = sympify(expr)
# keep a set of expressions that are not strictly translatable to Code
# and number constants that must be declared and initialized
self._not_supported = set()
self._number_symbols = set()
lines = self._print(expr).splitlines()
# format the output
if self._settings["human"]:
frontlines = []
if len(self._not_supported) > 0:
frontlines.append(self._get_comment(
"Not supported in {0}:".format(self.language)))
for expr in sorted(self._not_supported, key=str):
frontlines.append(self._get_comment(type(expr).__name__))
for name, value in sorted(self._number_symbols, key=str):
frontlines.append(self._declare_number_const(name, value))
lines = frontlines + lines
lines = self._format_code(lines)
result = "\n".join(lines)
else:
lines = self._format_code(lines)
result = (self._number_symbols, self._not_supported,
"\n".join(lines))
del self._not_supported
del self._number_symbols
return result
def _doprint_loops(self, expr, assign_to=None):
# Here we print an expression that contains Indexed objects, they
# correspond to arrays in the generated code. The low-level implementation
# involves looping over array elements and possibly storing results in temporary
# variables or accumulate it in the assign_to object.
if self._settings.get('contract', True):
from sympy.tensor import get_contraction_structure
# Setup loops over non-dummy indices -- all terms need these
indices = self._get_expression_indices(expr, assign_to)
# Setup loops over dummy indices -- each term needs separate treatment
dummies = get_contraction_structure(expr)
else:
indices = []
dummies = {None: (expr,)}
openloop, closeloop = self._get_loop_opening_ending(indices)
# terms with no summations first
if None in dummies:
text = StrPrinter.doprint(self, Add(*dummies[None]))
else:
# If all terms have summations we must initialize array to Zero
text = StrPrinter.doprint(self, 0)
# skip redundant assignments (where lhs == rhs)
lhs_printed = self._print(assign_to)
lines = []
if text != lhs_printed:
lines.extend(openloop)
if assign_to is not None:
text = self._get_statement("%s = %s" % (lhs_printed, text))
lines.append(text)
lines.extend(closeloop)
# then terms with summations
for d in dummies:
if isinstance(d, tuple):
indices = self._sort_optimized(d, expr)
openloop_d, closeloop_d = self._get_loop_opening_ending(
indices)
for term in dummies[d]:
if term in dummies and not ([list(f.keys()) for f in dummies[term]]
== [[None] for f in dummies[term]]):
# If one factor in the term has it's own internal
# contractions, those must be computed first.
# (temporary variables?)
raise NotImplementedError(
"FIXME: no support for contractions in factor yet")
else:
# We need the lhs expression as an accumulator for
# the loops, i.e
#
# for (int d=0; d < dim; d++){
# lhs[] = lhs[] + term[][d]
# } ^.................. the accumulator
#
# We check if the expression already contains the
# lhs, and raise an exception if it does, as that
# syntax is currently undefined. FIXME: What would be
# a good interpretation?
if assign_to is None:
raise AssignmentError(
"need assignment variable for loops")
if term.has(assign_to):
raise ValueError("FIXME: lhs present in rhs,\
this is undefined in CodePrinter")
lines.extend(openloop)
lines.extend(openloop_d)
text = "%s = %s" % (lhs_printed, StrPrinter.doprint(
self, assign_to + term))
lines.append(self._get_statement(text))
lines.extend(closeloop_d)
lines.extend(closeloop)
return "\n".join(lines)
def _get_expression_indices(self, expr, assign_to):
from sympy.tensor import get_indices
rinds, junk = get_indices(expr)
linds, junk = get_indices(assign_to)
# support broadcast of scalar
if linds and not rinds:
rinds = linds
if rinds != linds:
raise ValueError("lhs indices must match non-dummy"
" rhs indices in %s" % expr)
return self._sort_optimized(rinds, assign_to)
def _sort_optimized(self, indices, expr):
from sympy.tensor.indexed import Indexed
if not indices:
return []
# determine optimized loop order by giving a score to each index
# the index with the highest score are put in the innermost loop.
score_table = {}
for i in indices:
score_table[i] = 0
arrays = expr.atoms(Indexed)
for arr in arrays:
for p, ind in enumerate(arr.indices):
try:
score_table[ind] += self._rate_index_position(p)
except KeyError:
pass
return sorted(indices, key=lambda x: score_table[x])
def _rate_index_position(self, p):
"""function to calculate score based on position among indices
This method is used to sort loops in an optimized order, see
CodePrinter._sort_optimized()
"""
raise NotImplementedError("This function must be implemented by "
"subclass of CodePrinter.")
def _get_statement(self, codestring):
"""Formats a codestring with the proper line ending."""
raise NotImplementedError("This function must be implemented by "
"subclass of CodePrinter.")
def _get_comment(self, text):
"""Formats a text string as a comment."""
raise NotImplementedError("This function must be implemented by "
"subclass of CodePrinter.")
def _declare_number_const(self, name, value):
"""Declare a numeric constant at the top of a function"""
raise NotImplementedError("This function must be implemented by "
"subclass of CodePrinter.")
def _format_code(self, lines):
"""Take in a list of lines of code, and format them accordingly.
This may include indenting, wrapping long lines, etc..."""
raise NotImplementedError("This function must be implemented by "
"subclass of CodePrinter.")
def _get_loop_opening_ending(self, indices):
"""Returns a tuple (open_lines, close_lines) containing lists
of codelines"""
raise NotImplementedError("This function must be implemented by "
"subclass of CodePrinter.")
def _print_Assignment(self, expr):
from sympy.functions.elementary.piecewise import Piecewise
from sympy.matrices.expressions.matexpr import MatrixSymbol
from sympy.tensor.indexed import IndexedBase
lhs = expr.lhs
rhs = expr.rhs
# We special case assignments that take multiple lines
if isinstance(expr.rhs, Piecewise):
# Here we modify Piecewise so each expression is now
# an Assignment, and then continue on the print.
expressions = []
conditions = []
for (e, c) in rhs.args:
expressions.append(Assignment(lhs, e))
conditions.append(c)
temp = Piecewise(*zip(expressions, conditions))
return self._print(temp)
elif isinstance(lhs, MatrixSymbol):
# Here we form an Assignment for each element in the array,
# printing each one.
lines = []
for (i, j) in self._traverse_matrix_indices(lhs):
temp = Assignment(lhs[i, j], rhs[i, j])
code0 = self._print(temp)
lines.append(code0)
return "\n".join(lines)
elif self._settings["contract"] and (lhs.has(IndexedBase) or
rhs.has(IndexedBase)):
# Here we check if there is looping to be done, and if so
# print the required loops.
return self._doprint_loops(rhs, lhs)
else:
lhs_code = self._print(lhs)
rhs_code = self._print(rhs)
return self._get_statement("%s = %s" % (lhs_code, rhs_code))
def _print_Symbol(self, expr):
name = super(CodePrinter, self)._print_Symbol(expr)
if name in self.reserved_words:
if self._settings['error_on_reserved']:
msg = ('This expression includes the symbol "{}" which is a '
'reserved keyword in this language.')
raise ValueError(msg.format(name))
return name + self._settings['reserved_word_suffix']
else:
return name
def _print_Function(self, expr):
if expr.func.__name__ in self.known_functions:
cond_func = self.known_functions[expr.func.__name__]
func = None
if isinstance(cond_func, str):
func = cond_func
else:
for cond, func in cond_func:
if cond(*expr.args):
break
if func is not None:
return "%s(%s)" % (func, self.stringify(expr.args, ", "))
elif hasattr(expr, '_imp_') and isinstance(expr._imp_, Lambda):
# inlined function
return self._print(expr._imp_(*expr.args))
else:
return self._print_not_supported(expr)
def _print_NumberSymbol(self, expr):
# A Number symbol that is not implemented here or with _printmethod
# is registered and evaluated
self._number_symbols.add((expr,
self._print(expr.evalf(self._settings["precision"]))))
return str(expr)
def _print_Dummy(self, expr):
# dummies must be printed as unique symbols
return "%s_%i" % (expr.name, expr.dummy_index) # Dummy
def _print_Catalan(self, expr):
return self._print_NumberSymbol(expr)
def _print_EulerGamma(self, expr):
return self._print_NumberSymbol(expr)
def _print_GoldenRatio(self, expr):
return self._print_NumberSymbol(expr)
def _print_Exp1(self, expr):
return self._print_NumberSymbol(expr)
def _print_Pi(self, expr):
return self._print_NumberSymbol(expr)
def _print_And(self, expr):
PREC = precedence(expr)
return (" %s " % self._operators['and']).join(self.parenthesize(a, PREC)
for a in sorted(expr.args, key=default_sort_key))
def _print_Or(self, expr):
PREC = precedence(expr)
return (" %s " % self._operators['or']).join(self.parenthesize(a, PREC)
for a in sorted(expr.args, key=default_sort_key))
def _print_Xor(self, expr):
if self._operators.get('xor') is None:
return self._print_not_supported(expr)
PREC = precedence(expr)
return (" %s " % self._operators['xor']).join(self.parenthesize(a, PREC)
for a in expr.args)
def _print_Equivalent(self, expr):
if self._operators.get('equivalent') is None:
return self._print_not_supported(expr)
PREC = precedence(expr)
return (" %s " % self._operators['equivalent']).join(self.parenthesize(a, PREC)
for a in expr.args)
def _print_Not(self, expr):
PREC = precedence(expr)
return self._operators['not'] + self.parenthesize(expr.args[0], PREC)
def _print_Mul(self, expr):
prec = precedence(expr)
c, e = expr.as_coeff_Mul()
if c < 0:
expr = _keep_coeff(-c, e)
sign = "-"
else:
sign = ""
a = [] # items in the numerator
b = [] # items that are in the denominator (if any)
if self.order not in ('old', 'none'):
args = expr.as_ordered_factors()
else:
# use make_args in case expr was something like -x -> x
args = Mul.make_args(expr)
# Gather args for numerator/denominator
for item in args:
if item.is_commutative and item.is_Pow and item.exp.is_Rational and item.exp.is_negative:
if item.exp != -1:
b.append(Pow(item.base, -item.exp, evaluate=False))
else:
b.append(Pow(item.base, -item.exp))
else:
a.append(item)
a = a or [S.One]
a_str = [self.parenthesize(x, prec) for x in a]
b_str = [self.parenthesize(x, prec) for x in b]
if len(b) == 0:
return sign + '*'.join(a_str)
elif len(b) == 1:
return sign + '*'.join(a_str) + "/" + b_str[0]
else:
return sign + '*'.join(a_str) + "/(%s)" % '*'.join(b_str)
def _print_not_supported(self, expr):
self._not_supported.add(expr)
return self.emptyPrinter(expr)
# The following can not be simply translated into C or Fortran
_print_Basic = _print_not_supported
_print_ComplexInfinity = _print_not_supported
_print_Derivative = _print_not_supported
_print_dict = _print_not_supported
_print_ExprCondPair = _print_not_supported
_print_GeometryEntity = _print_not_supported
_print_Infinity = _print_not_supported
_print_Integral = _print_not_supported
_print_Interval = _print_not_supported
_print_Limit = _print_not_supported
_print_list = _print_not_supported
_print_Matrix = _print_not_supported
_print_ImmutableMatrix = _print_not_supported
_print_MutableDenseMatrix = _print_not_supported
_print_MatrixBase = _print_not_supported
_print_DeferredVector = _print_not_supported
_print_NaN = _print_not_supported
_print_NegativeInfinity = _print_not_supported
_print_Normal = _print_not_supported
_print_Order = _print_not_supported
_print_PDF = _print_not_supported
_print_RootOf = _print_not_supported
_print_RootsOf = _print_not_supported
_print_RootSum = _print_not_supported
_print_Sample = _print_not_supported
_print_SparseMatrix = _print_not_supported
_print_tuple = _print_not_supported
_print_Uniform = _print_not_supported
_print_Unit = _print_not_supported
_print_Wild = _print_not_supported
_print_WildFunction = _print_not_supported
|
ierror/django-js-reverse
|
refs/heads/master
|
django_js_reverse/views.py
|
1
|
# -*- coding: utf-8 -*-
import json
from django import http
from django_js_reverse import core
try:
from django.urls import get_resolver
except ImportError:
from django.core.urlresolvers import get_resolver
def _urls_js(fn, type):
def view(request):
default_urlresolver = get_resolver(getattr(request, 'urlconf', None))
return http.HttpResponse(fn(default_urlresolver), content_type=type)
return view
def _generate_json(*args, **kwargs):
return json.dumps(core.generate_json(*args, **kwargs))
urls_js = _urls_js(core.generate_js, 'application/javascript')
urls_json = _urls_js(_generate_json, 'application/json')
|
andresmrm/brython-experiment
|
refs/heads/master
|
static/brython/Lib/_testcapi.py
|
742
|
CHAR_MAX = 127
CHAR_MIN = -128
DBL_MAX = 1.7976931348623157e+308
DBL_MIN = 2.2250738585072014e-308
FLT_MAX = 3.4028234663852886e+38
FLT_MIN = 1.1754943508222875e-38
INT_MAX = 2147483647
INT_MIN = -2147483648
LLONG_MAX = 9223372036854775807
LLONG_MIN = -9223372036854775808
LONG_MAX = 2147483647
LONG_MIN = -2147483648
PY_SSIZE_T_MAX = 2147483647
PY_SSIZE_T_MIN = -2147483648
SHRT_MAX = 32767
SHRT_MIN = -32768
SIZEOF_PYGC_HEAD = 16
UCHAR_MAX = 255
UINT_MAX = 4294967295
ULLONG_MAX = 18446744073709551615
ULONG_MAX = 4294967295
USHRT_MAX = 65535
__loader__ = "<_frozen_importlib.ExtensionFileLoader object at 0x00C98DD0>"
def _pending_threadfunc(*args,**kw):
pass
class _test_structmembersType(object):
pass
def _test_thread_state(*args,**kw):
pass
def argparsing(*args,**kw):
pass
def code_newempty(*args,**kw):
pass
def codec_incrementaldecoder(*args,**kw):
pass
def codec_incrementalencoder(*args,**kw):
pass
def crash_no_current_thread(*args,**kw):
pass
class error(Exception):
pass
def exception_print(*args,**kw):
pass
def getargs_B(*args,**kw):
pass
def getargs_H(*args,**kw):
pass
def getargs_I(*args,**kw):
pass
def getargs_K(*args,**kw):
pass
def getargs_L(*args,**kw):
pass
def getargs_Z(*args,**kw):
pass
def getargs_Z_hash(*args,**kw):
pass
def getargs_b(*args,**kw):
pass
def getargs_c(*args,**kw):
pass
def getargs_h(*args,**kw):
pass
def getargs_i(*args,**kw):
pass
def getargs_k(*args,**kw):
pass
def getargs_keyword_only(*args,**kw):
pass
def getargs_keywords(*args,**kw):
pass
def getargs_l(*args,**kw):
pass
def getargs_n(*args,**kw):
pass
def getargs_p(*args,**kw):
pass
def getargs_s(*args,**kw):
pass
def getargs_s_hash(*args,**kw):
pass
def getargs_s_star(*args,**kw):
pass
def getargs_tuple(*args,**kw):
pass
def getargs_u(*args,**kw):
pass
def getargs_u_hash(*args,**kw):
pass
def getargs_w_star(*args,**kw):
pass
def getargs_y(*args,**kw):
pass
def getargs_y_hash(*args,**kw):
pass
def getargs_y_star(*args,**kw):
pass
def getargs_z(*args,**kw):
pass
def getargs_z_hash(*args,**kw):
pass
def getargs_z_star(*args,**kw):
pass
class instancemethod(object):
pass
def make_exception_with_doc(*args,**kw):
pass
def make_memoryview_from_NULL_pointer(*args,**kw):
pass
def parse_tuple_and_keywords(*args,**kw):
pass
def pytime_object_to_time_t(*args,**kw):
pass
def pytime_object_to_timespec(*args,**kw):
pass
def pytime_object_to_timeval(*args,**kw):
pass
def raise_exception(*args,**kw):
pass
def raise_memoryerror(*args,**kw):
pass
def run_in_subinterp(*args,**kw):
pass
def set_exc_info(*args,**kw):
pass
def test_L_code(*args,**kw):
pass
def test_Z_code(*args,**kw):
pass
def test_capsule(*args,**kw):
pass
def test_config(*args,**kw):
pass
def test_datetime_capi(*args,**kw):
pass
def test_dict_iteration(*args,**kw):
pass
def test_empty_argparse(*args,**kw):
pass
def test_k_code(*args,**kw):
pass
def test_lazy_hash_inheritance(*args,**kw):
pass
def test_list_api(*args,**kw):
pass
def test_long_and_overflow(*args,**kw):
pass
def test_long_api(*args,**kw):
pass
def test_long_as_double(*args,**kw):
pass
def test_long_as_size_t(*args,**kw):
pass
def test_long_long_and_overflow(*args,**kw):
pass
def test_long_numbits(*args,**kw):
pass
def test_longlong_api(*args,**kw):
pass
def test_null_strings(*args,**kw):
pass
def test_s_code(*args,**kw):
pass
def test_string_from_format(*args,**kw):
pass
def test_string_to_double(*args,**kw):
pass
def test_u_code(*args,**kw):
pass
def test_unicode_compare_with_ascii(*args,**kw):
pass
def test_widechar(*args,**kw):
pass
def test_with_docstring(*args,**kw):
"""This is a pretty normal docstring."""
pass
def traceback_print(*args,**kw):
pass
def unicode_aswidechar(*args,**kw):
pass
def unicode_aswidecharstring(*args,**kw):
pass
def unicode_encodedecimal(*args,**kw):
pass
def unicode_transformdecimaltoascii(*args,**kw):
pass
|
sunil07t/e-mission-server
|
refs/heads/master
|
emission/core/wrapper/incident.py
|
3
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import *
import logging
import emission.core.wrapper.wrapperbase as ecwb
class Incident(ecwb.WrapperBase):
props = {"loc": ecwb.WrapperBase.Access.RO, # geojson representation of the point
"ts": ecwb.WrapperBase.Access.RO, # timestamp representation of the point
"stress": ecwb.WrapperBase.Access.RO, # stress level (0 = no stress, 100 = max stress)
"local_dt": ecwb.WrapperBase.Access.RO, # searchable datetime in local time
"fmt_time": ecwb.WrapperBase.Access.RO # formatted time
}
enums = {}
geojson = ["loc"]
nullable = []
local_dates = ["local_dt"]
def _populateDependencies(self):
pass
|
zestrada/horizon-cs498cc
|
refs/heads/master
|
horizon/decorators.py
|
9
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 CRS4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
General-purpose decorators for use with Horizon.
"""
import functools
from django.utils.decorators import available_attrs
from django.utils.translation import ugettext as _
def _current_component(view_func, dashboard=None, panel=None):
""" Sets the currently-active dashboard and/or panel on the request. """
@functools.wraps(view_func, assigned=available_attrs(view_func))
def dec(request, *args, **kwargs):
if dashboard:
request.horizon['dashboard'] = dashboard
if panel:
request.horizon['panel'] = panel
return view_func(request, *args, **kwargs)
return dec
def require_auth(view_func):
""" Performs user authentication check.
Similar to Django's `login_required` decorator, except that this throws
:exc:`~horizon.exceptions.NotAuthenticated` exception if the user is not
signed-in.
"""
from horizon.exceptions import NotAuthenticated
@functools.wraps(view_func, assigned=available_attrs(view_func))
def dec(request, *args, **kwargs):
if request.user.is_authenticated():
return view_func(request, *args, **kwargs)
raise NotAuthenticated(_("Please log in to continue."))
return dec
def require_perms(view_func, required):
""" Enforces permission-based access controls.
:param list required: A tuple of permission names, all of which the request
user must possess in order access the decorated view.
Example usage::
from horizon.decorators import require_perms
@require_perms(['foo.admin', 'foo.member'])
def my_view(request):
...
Raises a :exc:`~horizon.exceptions.NotAuthorized` exception if the
requirements are not met.
"""
from horizon.exceptions import NotAuthorized
# We only need to check each permission once for a view, so we'll use a set
current_perms = getattr(view_func, '_required_perms', set([]))
view_func._required_perms = current_perms | set(required)
@functools.wraps(view_func, assigned=available_attrs(view_func))
def dec(request, *args, **kwargs):
if request.user.is_authenticated():
if request.user.has_perms(view_func._required_perms):
return view_func(request, *args, **kwargs)
raise NotAuthorized(_("You are not authorized to access %s")
% request.path)
# If we don't have any permissions, just return the original view.
if required:
return dec
else:
return view_func
|
PeteW/luigi
|
refs/heads/master
|
luigi/contrib/opener.py
|
3
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""OpenerTarget support, allows easier testing and configuration by abstracting
out the LocalTarget, S3Target, and MockTarget types.
Example:
.. code-block:: python
from luigi.contrib.opener import OpenerTarget
OpenerTarget('/local/path.txt')
OpenerTarget('s3://zefr/remote/path.txt')
"""
import json
from luigi.local_target import LocalTarget
from luigi.mock import MockTarget
from luigi.contrib.s3 import S3Target
from luigi.target import FileSystemException
from six.moves.urllib.parse import urlsplit, parse_qs
__all__ = ['OpenerError',
'NoOpenerError',
'InvalidQuery',
'OpenerRegistry',
'Opener',
'MockOpener',
'LocalOpener',
'S3Opener',
'opener',
'OpenerTarget']
class OpenerError(FileSystemException):
"""The base exception thrown by openers"""
pass
class NoOpenerError(OpenerError):
"""Thrown when there is no opener for the given protocol"""
pass
class InvalidQuery(OpenerError):
"""Thrown when an opener is passed unexpected arguments"""
pass
class OpenerRegistry(object):
def __init__(self, openers=None):
"""An opener registry that stores a number of opener objects used
to parse Target URIs
:param openers: A list of objects inherited from the Opener class.
:type openers: list
"""
if openers is None:
openers = []
self.registry = {}
self.openers = {}
self.default_opener = 'file'
for opener in openers:
self.add(opener)
def get_opener(self, name):
"""Retrieve an opener for the given protocol
:param name: name of the opener to open
:type name: string
:raises NoOpenerError: if no opener has been registered of that name
"""
if name not in self.registry:
raise NoOpenerError("No opener for %s" % name)
index = self.registry[name]
return self.openers[index]
def add(self, opener):
"""Adds an opener to the registry
:param opener: Opener object
:type opener: Opener inherited object
"""
index = len(self.openers)
self.openers[index] = opener
for name in opener.names:
self.registry[name] = index
def open(self, target_uri, **kwargs):
"""Open target uri.
:param target_uri: Uri to open
:type target_uri: string
:returns: Target object
"""
target = urlsplit(target_uri, scheme=self.default_opener)
opener = self.get_opener(target.scheme)
query = opener.conform_query(target.query)
target = opener.get_target(
target.scheme,
target.path,
target.fragment,
target.username,
target.password,
target.hostname,
target.port,
query,
**kwargs
)
target.opener_path = target_uri
return target
class Opener(object):
"""Base class for Opener objects.
"""
# Dictionary of expected kwargs and flag for json loading values (bool/int)
allowed_kwargs = {}
# Flag to filter out unexpected kwargs
filter_kwargs = True
@classmethod
def conform_query(cls, query):
"""Converts the query string from a target uri, uses
cls.allowed_kwargs, and cls.filter_kwargs to drive logic.
:param query: Unparsed query string
:type query: urllib.parse.unsplit(uri).query
:returns: Dictionary of parsed values, everything in cls.allowed_kwargs
with values set to True will be parsed as json strings.
"""
query = parse_qs(query, keep_blank_values=True)
# Remove any unexpected keywords from the query string.
if cls.filter_kwargs:
query = {x: y for x, y in query.items() if x in cls.allowed_kwargs}
for key, vals in query.items():
# Multiple values of the same name could be passed use first
# Also params without strings will be treated as true values
if cls.allowed_kwargs.get(key, False):
val = json.loads(vals[0] or 'true')
else:
val = vals[0] or 'true'
query[key] = val
return query
@classmethod
def get_target(cls, scheme, path, fragment, username,
password, hostname, port, query, **kwargs):
"""Override this method to use values from the parsed uri to initialize
the expected target.
"""
raise NotImplementedError("get_target must be overridden")
class MockOpener(Opener):
"""Mock target opener, works like LocalTarget but files are all in
memory.
example:
* mock://foo/bar.txt
"""
names = ['mock']
allowed_kwargs = {
'is_tmp': True,
'mirror_on_stderr': True,
'format': False,
}
@classmethod
def get_target(cls, scheme, path, fragment, username,
password, hostname, port, query, **kwargs):
full_path = (hostname or '') + path
query.update(kwargs)
return MockTarget(full_path, **query)
class LocalOpener(Opener):
"""Local filesystem opener, works with any valid system path. This
is the default opener and will be used if you don't indicate which opener.
examples:
* file://relative/foo/bar/baz.txt (opens a relative file)
* file:///home/user (opens a directory from a absolute path)
* foo/bar.baz (file:// is the default opener)
"""
names = ['file']
allowed_kwargs = {
'is_tmp': True,
'format': False,
}
@classmethod
def get_target(cls, scheme, path, fragment, username,
password, hostname, port, query, **kwargs):
full_path = (hostname or '') + path
query.update(kwargs)
return LocalTarget(full_path, **query)
class S3Opener(Opener):
"""Opens a target stored on Amazon S3 storage
examples:
* s3://bucket/foo/bar.txt
* s3://bucket/foo/bar.txt?aws_access_key_id=xxx&aws_secret_access_key=yyy
"""
names = ['s3', 's3n']
allowed_kwargs = {
'format': False,
'client': True,
}
filter_kwargs = False
@classmethod
def get_target(cls, scheme, path, fragment, username,
password, hostname, port, query, **kwargs):
query.update(kwargs)
return S3Target('{scheme}://{hostname}{path}'.format(
scheme=scheme, hostname=hostname, path=path), **query)
opener = OpenerRegistry([
MockOpener,
LocalOpener,
S3Opener,
])
OpenerTarget = opener.open
|
raccoongang/edx-platform
|
refs/heads/ginkgo-rg
|
lms/djangoapps/gating/apps.py
|
62
|
"""
Django AppConfig module for the Gating app
"""
from django.apps import AppConfig
class GatingConfig(AppConfig):
"""
Django AppConfig class for the gating app
"""
name = 'gating'
def ready(self):
# Import signals to wire up the signal handlers contained within
from gating import signals # pylint: disable=unused-variable
|
colemanja91/PyEloqua-Examples
|
refs/heads/master
|
venv/lib/python3.4/site-packages/setuptools/command/upload.py
|
210
|
import getpass
from distutils.command import upload as orig
class upload(orig.upload):
"""
Override default upload behavior to obtain password
in a variety of different ways.
"""
def finalize_options(self):
orig.upload.finalize_options(self)
# Attempt to obtain password. Short circuit evaluation at the first
# sign of success.
self.password = (
self.password or
self._load_password_from_keyring() or
self._prompt_for_password()
)
def _load_password_from_keyring(self):
"""
Attempt to load password from keyring. Suppress Exceptions.
"""
try:
keyring = __import__('keyring')
return keyring.get_password(self.repository, self.username)
except Exception:
pass
def _prompt_for_password(self):
"""
Prompt for a password on the tty. Suppress Exceptions.
"""
try:
return getpass.getpass()
except (Exception, KeyboardInterrupt):
pass
|
adarob/magenta
|
refs/heads/master
|
magenta/pipelines/dag_pipeline.py
|
2
|
# Copyright 2019 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pipeline that runs arbitrary pipelines composed in a graph.
Some terminology used in the code.
dag: Directed acyclic graph.
unit: A Pipeline which is run inside DAGPipeline.
connection: A key value pair in the DAG dictionary.
dependency: The right hand side (value in key value dictionary pair) of a DAG
connection. Can be a Pipeline, DagInput, PipelineKey, or dictionary mapping
names to one of those.
subordinate: Any DagInput, Pipeline, or PipelineKey object that appears in a
dependency.
shorthand: Invalid things that can be put in the DAG which get converted to
valid things before parsing. These things are for convenience.
type signature: Something that can be returned from Pipeline's `output_type`
or `input_type`. A python class, or dictionary mapping names to classes.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
from magenta.pipelines import pipeline
import six
class DagOutput(object):
"""Represents an output destination for a `DAGPipeline`.
Each `DagOutput(name)` instance given to DAGPipeline will
be a final output bucket with the same name. If writing
output buckets to disk, the names become dataset names.
The name can be omitted if connecting `DagOutput()` to a
dictionary mapping names to pipelines.
"""
def __init__(self, name=None):
"""Create an `DagOutput` with the given name.
Args:
name: If given, a string name which defines the name of this output.
If not given, the names in the dictionary this is connected to
will be used as output names.
"""
self.name = name
# `output_type` and `input_type` are set by DAGPipeline. Since `DagOutput`
# is not given its type, the type must be infered from what it is connected
# to in the DAG. Having `output_type` and `input_type` makes `DagOutput` act
# like a `Pipeline` in some cases.
self.output_type = None
self.input_type = None
def __eq__(self, other):
return isinstance(other, DagOutput) and other.name == self.name
def __hash__(self):
return hash(self.name)
def __repr__(self):
return 'DagOutput(%s)' % self.name
class DagInput(object):
"""Represents an input source for a `DAGPipeline`.
Give an `DagInput` instance to `DAGPipeline` by connecting `Pipeline` objects
to it in the DAG.
When `DAGPipeline.transform` is called, the input object
will be fed to any Pipeline instances connected to an
`DagInput` given in the DAG.
The type given to `DagInput` will be the `DAGPipeline`'s `input_type`.
"""
def __init__(self, type_):
"""Create an `DagInput` with the given type.
Args:
type_: The Python class which inputs to `DAGPipeline` should be
instances of. `DAGPipeline.input_type` will be this type.
"""
self.output_type = type_
def __eq__(self, other):
return isinstance(other, DagInput) and other.output_type == self.output_type
def __hash__(self):
return hash(self.output_type)
def __repr__(self):
return 'DagInput(%s)' % self.output_type
def _all_are_type(elements, target_type):
"""Checks that all the given elements are the target type.
Args:
elements: A list of objects.
target_type: The Python class which all elemenets need to be an instance of.
Returns:
True if every object in `elements` is an instance of `target_type`, and
False otherwise.
"""
return all(isinstance(elem, target_type) for elem in elements)
class InvalidDAGError(Exception):
"""Thrown when the DAG dictionary is not well formatted.
This can be because a `destination: dependency` pair is not in the form
`Pipeline: Pipeline` or `Pipeline: {'name_1': Pipeline, ...}` (Note that
Pipeline or PipelineKey objects both are allowed in the dependency). It is
also thrown when `DagInput` is given as a destination, or `DagOutput` is given
as a dependency.
"""
pass
class DuplicateNameError(Exception):
"""Thrown when two `Pipeline` instances in the DAG have the same name.
Pipeline names will be used as name spaces for the statistics they produce
and we don't want any conflicts.
"""
pass
class BadTopologyError(Exception):
"""Thrown when there is a directed cycle."""
pass
class NotConnectedError(Exception):
"""Thrown when the DAG is disconnected somewhere.
Either because a `Pipeline` used in a dependency has nothing feeding into it,
or because a `Pipeline` given as a destination does not feed anywhere.
"""
pass
class TypeMismatchError(Exception):
"""Thrown when type signatures in a connection don't match.
In the DAG's `destination: dependency` pairs, type signatures must match.
"""
pass
class BadInputOrOutputError(Exception):
"""Thrown when `DagInput` or `DagOutput` are not used in the graph correctly.
Specifically when there are no `DagInput` objects, more than one `DagInput`
with different types, or there is no `DagOutput` object.
"""
pass
class InvalidDictionaryOutputError(Exception):
"""Thrown when `DagOutput` and dictionaries are not used correctly.
Specifically when `DagOutput()` is used without a dictionary dependency, or
`DagOutput(name)` is used with a `name` and with a dictionary dependency.
"""
pass
class InvalidTransformOutputError(Exception):
"""Thrown when a Pipeline does not output types matching its `output_type`.
"""
pass
class DAGPipeline(pipeline.Pipeline):
"""A directed acyclic graph pipeline.
This Pipeline can be given an arbitrary graph composed of Pipeline instances
and will run all of those pipelines feeding outputs to inputs. See README.md
for details.
Use DAGPipeline to compose multiple smaller pipelines together.
"""
def __init__(self, dag, pipeline_name='DAGPipeline'):
"""Constructs a DAGPipeline.
A DAG (direct acyclic graph) is given which fully specifies what the
DAGPipeline runs.
Args:
dag: A dictionary mapping `Pipeline` or `DagOutput` instances to any of
`Pipeline`, `PipelineKey`, `DagInput`. `dag` defines a directed acyclic
graph.
pipeline_name: String name of this Pipeline object.
Raises:
InvalidDAGError: If each key value pair in the `dag` dictionary is
not of the form
(Pipeline or DagOutput): (Pipeline, PipelineKey, or DagInput).
TypeMismatchError: The type signature of each key and value in `dag`
must match, otherwise this will be thrown.
DuplicateNameError: If two `Pipeline` instances in `dag` have the
same string name.
BadInputOrOutputError: If there are no `DagOutput` instaces in `dag`
or not exactly one `DagInput` plus type combination in `dag`.
InvalidDictionaryOutputError: If `DagOutput()` is not connected to a
dictionary, or `DagOutput(name)` is not connected to a Pipeline,
PipelineKey, or DagInput instance.
NotConnectedError: If a `Pipeline` used in a dependency has nothing
feeding into it, or a `Pipeline` used as a destination does not feed
anywhere.
BadTopologyError: If there there is a directed cycle in `dag`.
Exception: Misc. exceptions.
"""
# Expand DAG shorthand.
self.dag = dict(self._expand_dag_shorthands(dag))
# Make sure DAG is valid.
# DagInput types match output types. Nothing depends on outputs.
# Things that require input get input. DAG is composed of correct types.
for unit, dependency in self.dag.items():
if not isinstance(unit, (pipeline.Pipeline, DagOutput)):
raise InvalidDAGError(
'Dependency {%s: %s} is invalid. Left hand side value %s must '
'either be a Pipeline or DagOutput object'
% (unit, dependency, unit))
if isinstance(dependency, dict):
if not all([isinstance(name, six.string_types) for name in dependency]):
raise InvalidDAGError(
'Dependency {%s: %s} is invalid. Right hand side keys %s must be '
'strings' % (unit, dependency, dependency.keys()))
values = dependency.values()
else:
values = [dependency]
for subordinate in values:
if not (isinstance(subordinate, (pipeline.Pipeline, DagInput)) or
(isinstance(subordinate, pipeline.PipelineKey) and
isinstance(subordinate.unit, pipeline.Pipeline))):
raise InvalidDAGError(
'Dependency {%s: %s} is invalid. Right hand side subordinate %s '
'must be either a Pipeline, PipelineKey, or DagInput object'
% (unit, dependency, subordinate))
# Check that all input types match output types.
if isinstance(unit, DagOutput):
# DagOutput objects don't know their types.
continue
if unit.input_type != self._get_type_signature_for_dependency(dependency):
raise TypeMismatchError(
'Invalid dependency {%s: %s}. Required `input_type` of left hand '
'side is %s. DagOutput type of right hand side is %s.'
% (unit, dependency, unit.input_type,
self._get_type_signature_for_dependency(dependency)))
# Make sure all Pipeline names are unique, so that Statistic objects don't
# clash.
sorted_unit_names = sorted(
[(unit, unit.name) for unit in self.dag],
key=lambda t: t[1])
for index, (unit, name) in enumerate(sorted_unit_names[:-1]):
if name == sorted_unit_names[index + 1][1]:
other_unit = sorted_unit_names[index + 1][0]
raise DuplicateNameError(
'Pipelines %s and %s both have name "%s". Each Pipeline must have '
'a unique name.' % (unit, other_unit, name))
# Find DagInput and DagOutput objects and make sure they are being used
# correctly.
self.outputs = [unit for unit in self.dag if isinstance(unit, DagOutput)]
self.output_names = dict((output.name, output) for output in self.outputs)
for output in self.outputs:
output.input_type = output.output_type = (
self._get_type_signature_for_dependency(self.dag[output]))
inputs = set()
for deps in self.dag.values():
units = self._get_units(deps)
for unit in units:
if isinstance(unit, DagInput):
inputs.add(unit)
if len(inputs) != 1:
if not inputs:
raise BadInputOrOutputError(
'No DagInput object found. DagInput is the start of the pipeline.')
else:
raise BadInputOrOutputError(
'Multiple DagInput objects found. Only one input is supported.')
if not self.outputs:
raise BadInputOrOutputError(
'No DagOutput objects found. DagOutput is the end of the pipeline.')
self.input = inputs.pop()
# Compute output_type for self and call super constructor.
output_signature = dict((output.name, output.output_type)
for output in self.outputs)
super(DAGPipeline, self).__init__(
input_type=self.input.output_type,
output_type=output_signature,
name=pipeline_name)
# Make sure all Pipeline objects have DAG vertices that feed into them,
# and feed their output into other DAG vertices.
all_subordinates = (
set(dep_unit for unit in self.dag
for dep_unit in self._get_units(self.dag[unit]))
.difference(set([self.input])))
all_destinations = set(self.dag.keys()).difference(set(self.outputs))
if all_subordinates != all_destinations:
units_with_no_input = all_subordinates.difference(all_destinations)
units_with_no_output = all_destinations.difference(all_subordinates)
if units_with_no_input:
raise NotConnectedError(
'%s is given as a dependency in the DAG but has nothing connected '
'to it. Nothing in the DAG feeds into it.'
% units_with_no_input.pop())
else:
raise NotConnectedError(
'%s is given as a destination in the DAG but does not output '
'anywhere. It is a deadend.' % units_with_no_output.pop())
# Construct topological ordering to determine the execution order of the
# pipelines.
# https://en.wikipedia.org/wiki/Topological_sorting#Kahn.27s_algorithm
# `graph` maps a pipeline to the pipelines it depends on. Each dict value
# is a list with the dependency pipelines in the 0th position, and a count
# of forward connections to the key pipeline (how many pipelines use this
# pipeline as a dependency).
graph = dict((unit, [self._get_units(self.dag[unit]), 0])
for unit in self.dag)
graph[self.input] = [[], 0]
for unit, (forward_connections, _) in graph.items():
for to_unit in forward_connections:
graph[to_unit][1] += 1
self.call_list = call_list = [] # Topologically sorted elements go here.
nodes = set(self.outputs)
while nodes:
n = nodes.pop()
call_list.append(n)
for m in graph[n][0]:
graph[m][1] -= 1
if graph[m][1] == 0:
nodes.add(m)
elif graph[m][1] < 0:
raise Exception(
'Congratulations, you found a bug! Please report this issue at '
'https://github.com/tensorflow/magenta/issues and copy/paste the '
'following: dag=%s, graph=%s, call_list=%s' % (self.dag, graph,
call_list))
# Check for cycles by checking if any edges remain.
for unit in graph:
if graph[unit][1] != 0:
raise BadTopologyError('Dependency loop found on %s' % unit)
# Note: this exception should never be raised. Disconnected graphs will be
# caught where NotConnectedError is raised. If this exception goes off
# there is likely a bug.
if set(call_list) != set(
list(all_subordinates) + self.outputs + [self.input]):
raise BadTopologyError('Not all pipelines feed into an output or '
'there is a dependency loop.')
call_list.reverse()
assert call_list[0] == self.input
def _expand_dag_shorthands(self, dag):
"""Expand DAG shorthand.
Currently the only shorthand is "direct connection".
A direct connection is a connection {a: b} where a.input_type is a dict,
b.output_type is a dict, and a.input_type == b.output_type. This is not
actually valid, but we can convert it to a valid connection.
{a: b} is expanded to
{a: {"name_1": b["name_1"], "name_2": b["name_2"], ...}}.
{DagOutput(): {"name_1", obj1, "name_2": obj2, ...} is expanded to
{DagOutput("name_1"): obj1, DagOutput("name_2"): obj2, ...}.
Args:
dag: A dictionary encoding the DAG.
Yields:
Key, value pairs for a new dag dictionary.
Raises:
InvalidDictionaryOutputError: If `DagOutput` is not used correctly.
"""
for key, val in dag.items():
# Direct connection.
if (isinstance(key, pipeline.Pipeline) and
isinstance(val, pipeline.Pipeline) and
isinstance(key.input_type, dict) and
key.input_type == val.output_type):
yield key, dict((name, val[name]) for name in val.output_type)
elif key == DagOutput():
if (isinstance(val, pipeline.Pipeline) and
isinstance(val.output_type, dict)):
dependency = [(name, val[name]) for name in val.output_type]
elif isinstance(val, dict):
dependency = val.items()
else:
raise InvalidDictionaryOutputError(
'DagOutput() with no name can only be connected to a dictionary '
'or a Pipeline whose output_type is a dictionary. Found '
'DagOutput() connected to %s' % val)
for name, subordinate in dependency:
yield DagOutput(name), subordinate
elif isinstance(key, DagOutput):
if isinstance(val, dict):
raise InvalidDictionaryOutputError(
'DagOutput("%s") which has name "%s" can only be connected to a '
'single input, not dictionary %s. Use DagOutput() without name '
'instead.' % (key.name, key.name, val))
if (isinstance(val, pipeline.Pipeline) and
isinstance(val.output_type, dict)):
raise InvalidDictionaryOutputError(
'DagOutput("%s") which has name "%s" can only be connected to a '
'single input, not pipeline %s which has dictionary '
'output_type %s. Use DagOutput() without name instead.'
% (key.name, key.name, val, val.output_type))
yield key, val
else:
yield key, val
def _get_units(self, dependency):
"""Gets list of units from a dependency."""
dep_list = []
if isinstance(dependency, dict):
dep_list.extend(dependency.values())
else:
dep_list.append(dependency)
return [self._validate_subordinate(sub) for sub in dep_list]
def _validate_subordinate(self, subordinate):
"""Verifies that subordinate is DagInput, PipelineKey, or Pipeline."""
if isinstance(subordinate, pipeline.Pipeline):
return subordinate
if isinstance(subordinate, pipeline.PipelineKey):
if not isinstance(subordinate.unit, pipeline.Pipeline):
raise InvalidDAGError(
'PipelineKey object %s does not have a valid Pipeline'
% subordinate)
return subordinate.unit
if isinstance(subordinate, DagInput):
return subordinate
raise InvalidDAGError(
'Looking for Pipeline, PipelineKey, or DagInput object, but got %s'
% type(subordinate))
def _get_type_signature_for_dependency(self, dependency):
"""Gets the type signature of the dependency output."""
if isinstance(dependency,
(pipeline.Pipeline, pipeline.PipelineKey, DagInput)):
return dependency.output_type
return dict((name, sub_dep.output_type)
for name, sub_dep in dependency.items())
def transform(self, input_object):
"""Runs the DAG on the given input.
All pipelines in the DAG will run.
Args:
input_object: Any object. The required type depends on implementation.
Returns:
A dictionary mapping output names to lists of objects. The object types
depend on implementation. Each output name corresponds to an output
collection. See get_output_names method.
"""
def stats_accumulator(unit, unit_inputs, cumulative_stats):
for single_input in unit_inputs:
results_ = unit.transform(single_input)
stats = unit.get_stats()
cumulative_stats.extend(stats)
yield results_
stats = []
results = {self.input: [input_object]}
for unit in self.call_list[1:]:
# Compute transformation.
if isinstance(unit, DagOutput):
unit_outputs = self._get_outputs_as_signature(self.dag[unit], results)
else:
unit_inputs = self._get_inputs_for_unit(unit, results)
if not unit_inputs:
# If this unit has no inputs don't run it.
results[unit] = []
continue
unjoined_outputs = list(
stats_accumulator(unit, unit_inputs, stats))
unit_outputs = self._join_lists_or_dicts(unjoined_outputs, unit)
results[unit] = unit_outputs
self._set_stats(stats)
return dict((output.name, results[output]) for output in self.outputs)
def _get_outputs_as_signature(self, dependency, outputs):
"""Returns a list or dict which matches the type signature of dependency.
Args:
dependency: DagInput, PipelineKey, Pipeline instance, or dictionary
mapping names to those values.
outputs: A database of computed unit outputs. A dictionary mapping
Pipeline to list of objects.
Returns:
A list or dictionary of computed unit outputs which matches the type
signature of the given dependency.
"""
def _get_outputs_for_key(unit_or_key, outputs):
if isinstance(unit_or_key, pipeline.PipelineKey):
if not outputs[unit_or_key.unit]:
# If there are no outputs, just return nothing.
return outputs[unit_or_key.unit]
assert isinstance(outputs[unit_or_key.unit], dict)
return outputs[unit_or_key.unit][unit_or_key.key]
assert isinstance(unit_or_key, (pipeline.Pipeline, DagInput))
return outputs[unit_or_key]
if isinstance(dependency, dict):
return dict((name, _get_outputs_for_key(unit_or_key, outputs))
for name, unit_or_key in dependency.items())
return _get_outputs_for_key(dependency, outputs)
def _get_inputs_for_unit(self, unit, results,
list_operation=itertools.product):
"""Creates valid inputs for the given unit from the outputs in `results`.
Args:
unit: The `Pipeline` to create inputs for.
results: A database of computed unit outputs. A dictionary mapping
Pipeline to list of objects.
list_operation: A function that maps lists of inputs to a single list of
tuples, where each tuple is an input. This is used when `unit` takes
a dictionary as input. Each tuple is used as the values for a
dictionary input. This can be thought of as taking a sort of
transpose of a ragged 2D array.
The default is `itertools.product` which takes the cartesian product
of the input lists.
Returns:
If `unit` takes a single input, a list of objects.
If `unit` takes a dictionary input, a list of dictionaries each mapping
string name to object.
"""
previous_outputs = self._get_outputs_as_signature(self.dag[unit], results)
if isinstance(previous_outputs, dict):
names = list(previous_outputs.keys())
lists = [previous_outputs[name] for name in names]
stack = list_operation(*lists)
return [dict(zip(names, values)) for values in stack]
else:
return previous_outputs
def _join_lists_or_dicts(self, outputs, unit):
"""Joins many lists or dicts of outputs into a single list or dict.
This function also validates that the outputs are correct for the given
Pipeline.
If `outputs` is a list of lists, the lists are concated and the type of
each object must match `unit.output_type`.
If `output` is a list of dicts (mapping string names to lists), each
key has its lists concated across all the dicts. The keys and types
are validated against `unit.output_type`.
Args:
outputs: A list of lists, or list of dicts which map string names to
lists.
unit: A Pipeline which every output in `outputs` will be validated
against. `unit` must produce the outputs it says it will produce.
Returns:
If `outputs` is a list of lists, a single list of outputs.
If `outputs` is a list of dicts, a single dictionary mapping string names
to lists of outputs.
Raises:
InvalidTransformOutputError: If anything in `outputs` does not match
the type signature given by `unit.output_type`.
"""
if not outputs:
return []
if isinstance(unit.output_type, dict):
concated = dict((key, list()) for key in unit.output_type.keys())
for d in outputs:
if not isinstance(d, dict):
raise InvalidTransformOutputError(
'Expected dictionary output for %s with output type %s but '
'instead got type %s' % (unit, unit.output_type, type(d)))
if set(d.keys()) != set(unit.output_type.keys()):
raise InvalidTransformOutputError(
'Got dictionary output with incorrect keys for %s. Got %s. '
'Expected %s' % (unit, d.keys(), unit.output_type.keys()))
for k, val in d.items():
if not isinstance(val, list):
raise InvalidTransformOutputError(
'DagOutput from %s for key %s is not a list.' % (unit, k))
if not _all_are_type(val, unit.output_type[k]):
raise InvalidTransformOutputError(
'Some outputs from %s for key %s are not of expected type %s. '
'Got types %s' % (unit, k, unit.output_type[k],
[type(inst) for inst in val]))
concated[k] += val
else:
concated = []
for l in outputs:
if not isinstance(l, list):
raise InvalidTransformOutputError(
'Expected list output for %s with outpu type %s but instead got '
'type %s' % (unit, unit.output_type, type(l)))
if not _all_are_type(l, unit.output_type):
raise InvalidTransformOutputError(
'Some outputs from %s are not of expected type %s. Got types %s'
% (unit, unit.output_type, [type(inst) for inst in l]))
concated += l
return concated
|
garlandkr/ansible
|
refs/heads/devel
|
lib/ansible/module_utils/urls.py
|
2
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
try:
import urllib
HAS_URLLIB = True
except:
HAS_URLLIB = False
try:
import urllib2
HAS_URLLIB2 = True
except:
HAS_URLLIB2 = False
try:
import urlparse
HAS_URLPARSE = True
except:
HAS_URLPARSE = False
try:
import ssl
HAS_SSL=True
except:
HAS_SSL=False
import tempfile
class RequestWithMethod(urllib2.Request):
'''
Workaround for using DELETE/PUT/etc with urllib2
Originally contained in library/net_infrastructure/dnsmadeeasy
'''
def __init__(self, url, method, data=None, headers={}):
self._method = method
urllib2.Request.__init__(self, url, data, headers)
def get_method(self):
if self._method:
return self._method
else:
return urllib2.Request.get_method(self)
class SSLValidationHandler(urllib2.BaseHandler):
'''
A custom handler class for SSL validation.
Based on:
http://stackoverflow.com/questions/1087227/validate-ssl-certificates-with-python
http://techknack.net/python-urllib2-handlers/
'''
def __init__(self, module, hostname, port):
self.module = module
self.hostname = hostname
self.port = port
def get_ca_certs(self):
# tries to find a valid CA cert in one of the
# standard locations for the current distribution
ca_certs = []
paths_checked = []
platform = get_platform()
distribution = get_distribution()
# build a list of paths to check for .crt/.pem files
# based on the platform type
paths_checked.append('/etc/ssl/certs')
if platform == 'Linux':
paths_checked.append('/etc/pki/ca-trust/extracted/pem')
paths_checked.append('/etc/pki/tls/certs')
paths_checked.append('/usr/share/ca-certificates/cacert.org')
elif platform == 'FreeBSD':
paths_checked.append('/usr/local/share/certs')
elif platform == 'OpenBSD':
paths_checked.append('/etc/ssl')
elif platform == 'NetBSD':
ca_certs.append('/etc/openssl/certs')
# fall back to a user-deployed cert in a standard
# location if the OS platform one is not available
paths_checked.append('/etc/ansible')
tmp_fd, tmp_path = tempfile.mkstemp()
# for all of the paths, find any .crt or .pem files
# and compile them into single temp file for use
# in the ssl check to speed up the test
for path in paths_checked:
if os.path.exists(path) and os.path.isdir(path):
dir_contents = os.listdir(path)
for f in dir_contents:
full_path = os.path.join(path, f)
if os.path.isfile(full_path) and os.path.splitext(f)[1] in ('.crt','.pem'):
try:
cert_file = open(full_path, 'r')
os.write(tmp_fd, cert_file.read())
cert_file.close()
except:
pass
return (tmp_path, paths_checked)
def http_request(self, req):
tmp_ca_cert_path, paths_checked = self.get_ca_certs()
try:
server_cert = ssl.get_server_certificate((self.hostname, self.port), ca_certs=tmp_ca_cert_path)
except ssl.SSLError:
# fail if we tried all of the certs but none worked
self.module.fail_json(msg='Failed to validate the SSL certificate for %s:%s. ' % (self.hostname, self.port) + \
'Use validate_certs=no or make sure your managed systems have a valid CA certificate installed. ' + \
'Paths checked for this platform: %s' % ", ".join(paths_checked))
try:
# cleanup the temp file created, don't worry
# if it fails for some reason
os.remove(tmp_ca_cert_path)
except:
pass
return req
https_request = http_request
def url_argument_spec():
'''
Creates an argument spec that can be used with any module
that will be requesting content via urllib/urllib2
'''
return dict(
url = dict(),
force = dict(default='no', aliases=['thirsty'], type='bool'),
http_agent = dict(default='ansible-httpget'),
use_proxy = dict(default='yes', type='bool'),
validate_certs = dict(default='yes', type='bool'),
)
def fetch_url(module, url, data=None, headers=None, method=None,
use_proxy=False, force=False, last_mod_time=None, timeout=10):
'''
Fetches a file from an HTTP/FTP server using urllib2
'''
if not HAS_URLLIB:
module.fail_json(msg='urllib is not installed')
if not HAS_URLLIB2:
module.fail_json(msg='urllib2 is not installed')
elif not HAS_URLPARSE:
module.fail_json(msg='urlparse is not installed')
r = None
handlers = []
info = dict(url=url)
# Get validate_certs from the module params
validate_certs = module.params.get('validate_certs', True)
parsed = urlparse.urlparse(url)
if parsed[0] == 'https':
if not HAS_SSL and validate_certs:
module.fail_json(msg='SSL validation is not available in your version of python. You can use validate_certs=no, however this is unsafe and not recommended')
elif validate_certs:
# do the cert validation
netloc = parsed[1]
if '@' in netloc:
netloc = netloc.split('@', 1)[1]
if ':' in netloc:
hostname, port = netloc.split(':', 1)
else:
hostname = netloc
port = 443
# create the SSL validation handler and
# add it to the list of handlers
ssl_handler = SSLValidationHandler(module, hostname, port)
handlers.append(ssl_handler)
if '@' in parsed[1]:
credentials, netloc = parsed[1].split('@', 1)
if ':' in credentials:
username, password = credentials.split(':', 1)
else:
username = credentials
password = ''
parsed = list(parsed)
parsed[1] = netloc
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
# this creates a password manager
passman.add_password(None, netloc, username, password)
# because we have put None at the start it will always
# use this username/password combination for urls
# for which `theurl` is a super-url
authhandler = urllib2.HTTPBasicAuthHandler(passman)
# create the AuthHandler
handlers.append(authhandler)
#reconstruct url without credentials
url = urlparse.urlunparse(parsed)
if not use_proxy:
proxyhandler = urllib2.ProxyHandler({})
handlers.append(proxyhandler)
opener = urllib2.build_opener(*handlers)
urllib2.install_opener(opener)
if method:
if method.upper() not in ('OPTIONS','GET','HEAD','POST','PUT','DELETE','TRACE','CONNECT'):
module.fail_json(msg='invalid HTTP request method; %s' % method.upper())
request = RequestWithMethod(url, method.upper(), data)
else:
request = urllib2.Request(url, data)
# add the custom agent header, to help prevent issues
# with sites that block the default urllib agent string
request.add_header('User-agent', module.params.get('http_agent'))
# if we're ok with getting a 304, set the timestamp in the
# header, otherwise make sure we don't get a cached copy
if last_mod_time and not force:
tstamp = last_mod_time.strftime('%a, %d %b %Y %H:%M:%S +0000')
request.add_header('If-Modified-Since', tstamp)
else:
request.add_header('cache-control', 'no-cache')
# user defined headers now, which may override things we've set above
if headers:
if not isinstance(headers, dict):
module.fail_json("headers provided to fetch_url() must be a dict")
for header in headers:
request.add_header(header, headers[header])
try:
if sys.version_info < (2,6,0):
# urlopen in python prior to 2.6.0 did not
# have a timeout parameter
r = urllib2.urlopen(request, None)
else:
r = urllib2.urlopen(request, None, timeout)
info.update(r.info())
info['url'] = r.geturl() # The URL goes in too, because of redirects.
info.update(dict(msg="OK (%s bytes)" % r.headers.get('Content-Length', 'unknown'), status=200))
except urllib2.HTTPError, e:
info.update(dict(msg=str(e), status=e.code))
except urllib2.URLError, e:
code = int(getattr(e, 'code', -1))
info.update(dict(msg="Request failed: %s" % str(e), status=code))
return r, info
|
mapnik/mapnik
|
refs/heads/master
|
utils/mapnik-render/build.py
|
8
|
import os
import glob
from copy import copy
Import ('env')
program_env = env.Clone()
source = Split(
"""
mapnik-render.cpp
"""
)
program_env['CXXFLAGS'] = copy(env['LIBMAPNIK_CXXFLAGS'])
program_env.Append(CPPDEFINES = env['LIBMAPNIK_DEFINES'])
if env['HAS_CAIRO']:
program_env.PrependUnique(CPPPATH=env['CAIRO_CPPPATHS'])
program_env.Append(CPPDEFINES = '-DHAVE_CAIRO')
boost_program_options = 'boost_program_options%s' % env['BOOST_APPEND']
libraries = [env['MAPNIK_NAME'],boost_program_options]
libraries.extend(copy(env['LIBMAPNIK_LIBS']))
if env['RUNTIME_LINK'] == 'static' and env['PLATFORM'] == 'Linux':
libraries.append('dl')
mapnik_render = program_env.Program('mapnik-render', source, LIBS=libraries)
Depends(mapnik_render, env.subst('../../src/%s' % env['MAPNIK_LIB_NAME']))
if 'uninstall' not in COMMAND_LINE_TARGETS:
env.Install(os.path.join(env['INSTALL_PREFIX'],'bin'), mapnik_render)
env.Alias('install', os.path.join(env['INSTALL_PREFIX'],'bin'))
env['create_uninstall_target'](env, os.path.join(env['INSTALL_PREFIX'],'bin','mapnik-render'))
|
pybrain/pybrain
|
refs/heads/master
|
pybrain/datasets/supervised.py
|
21
|
from __future__ import print_function
__author__ = 'Thomas Rueckstiess, ruecksti@in.tum.de'
from numpy import random
from random import sample
from scipy import isscalar
from pybrain.datasets.dataset import DataSet
from pybrain.utilities import fListToString
class SupervisedDataSet(DataSet):
"""SupervisedDataSets have two fields, one for input and one for the target.
"""
def __init__(self, inp, target):
"""Initialize an empty supervised dataset.
Pass `inp` and `target` to specify the dimensions of the input and
target vectors."""
DataSet.__init__(self)
if isscalar(inp):
# add input and target fields and link them
self.addField('input', inp)
self.addField('target', target)
else:
self.setField('input', inp)
self.setField('target', target)
self.linkFields(['input', 'target'])
# reset the index marker
self.index = 0
# the input and target dimensions
self.indim = self.getDimension('input')
self.outdim = self.getDimension('target')
def __reduce__(self):
_, _, state, _, _ = super(SupervisedDataSet, self).__reduce__()
creator = self.__class__
args = self.indim, self.outdim
return creator, args, state, iter([]), iter({})
def addSample(self, inp, target):
"""Add a new sample consisting of `input` and `target`."""
self.appendLinked(inp, target)
def getSample(self, index=None):
"""Return a sample at `index` or the current sample."""
return self.getLinked(index)
def setField(self, label, arr, **kwargs):
"""Set the given array `arr` as the new array of the field specfied by
`label`."""
DataSet.setField(self, label, arr, **kwargs)
# refresh dimensions, in case any of these fields were modified
if label == 'input':
self.indim = self.getDimension('input')
elif label == 'target':
self.outdim = self.getDimension('target')
def _provideSequences(self):
"""Return an iterator over sequence lists, although the dataset contains
only single samples."""
return iter([[x] for x in iter(self)])
def evaluateMSE(self, f, **args):
"""Evaluate the predictions of a function on the dataset and return the
Mean Squared Error, incorporating importance."""
ponderation = 0.
totalError = 0
for seq in self._provideSequences():
e, p = self._evaluateSequence(f, seq, **args)
totalError += e
ponderation += p
assert ponderation > 0
return totalError/ponderation
def _evaluateSequence(self, f, seq, verbose = False):
"""Return the ponderated MSE over one sequence."""
totalError = 0.
ponderation = 0.
for input, target in seq:
res = f(input)
e = 0.5 * sum((target-res).flatten()**2)
totalError += e
ponderation += len(target)
if verbose:
print(( 'out: ', fListToString( list( res ) )))
print(( 'correct:', fListToString( target )))
print(( 'error: % .8f' % e))
return totalError, ponderation
def evaluateModuleMSE(self, module, averageOver = 1, **args):
"""Evaluate the predictions of a module on a dataset and return the MSE
(potentially average over a number of epochs)."""
res = 0.
for dummy in range(averageOver):
module.reset()
res += self.evaluateMSE(module.activate, **args)
return res/averageOver
def splitWithProportion(self, proportion = 0.5):
"""Produce two new datasets, the first one containing the fraction given
by `proportion` of the samples."""
indicies = random.permutation(len(self))
separator = int(len(self) * proportion)
leftIndicies = indicies[:separator]
rightIndicies = indicies[separator:]
leftDs = SupervisedDataSet(inp=self['input'][leftIndicies].copy(),
target=self['target'][leftIndicies].copy())
rightDs = SupervisedDataSet(inp=self['input'][rightIndicies].copy(),
target=self['target'][rightIndicies].copy())
return leftDs, rightDs
|
Ingesup-Lab-OS/OS-Lend-Frontend
|
refs/heads/master
|
lend_frontend/lend_frontend/utils/keystone_helper.py
|
1
|
from keystoneclient.v2_0 import client
class KeystoneHelper:
def __init__(self, **kwargs):
self.client = client.Client(**kwargs)
self.endpoint = self.client.service_catalog.url_for(
service_type= 'orchestration',
endpoint_type= 'publicURL'
)
self.token = self.client.auth_token
def get_client(self):
return self.client
def get_endpoint(self):
return self.endpoint
def get_token(self):
return self.token
|
guorendong/iridium-browser-ubuntu
|
refs/heads/ubuntu/precise
|
net/tools/testserver/testserver_base.py
|
51
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import BaseHTTPServer
import errno
import json
import optparse
import os
import re
import socket
import SocketServer
import struct
import sys
import warnings
import tlslite.errors
# Ignore deprecation warnings, they make our output more cluttered.
warnings.filterwarnings("ignore", category=DeprecationWarning)
if sys.platform == 'win32':
import msvcrt
# Using debug() seems to cause hangs on XP: see http://crbug.com/64515.
debug_output = sys.stderr
def debug(string):
debug_output.write(string + "\n")
debug_output.flush()
class Error(Exception):
"""Error class for this module."""
class OptionError(Error):
"""Error for bad command line options."""
class FileMultiplexer(object):
def __init__(self, fd1, fd2) :
self.__fd1 = fd1
self.__fd2 = fd2
def __del__(self) :
if self.__fd1 != sys.stdout and self.__fd1 != sys.stderr:
self.__fd1.close()
if self.__fd2 != sys.stdout and self.__fd2 != sys.stderr:
self.__fd2.close()
def write(self, text) :
self.__fd1.write(text)
self.__fd2.write(text)
def flush(self) :
self.__fd1.flush()
self.__fd2.flush()
class ClientRestrictingServerMixIn:
"""Implements verify_request to limit connections to our configured IP
address."""
def verify_request(self, _request, client_address):
return client_address[0] == self.server_address[0]
class BrokenPipeHandlerMixIn:
"""Allows the server to deal with "broken pipe" errors (which happen if the
browser quits with outstanding requests, like for the favicon). This mix-in
requires the class to derive from SocketServer.BaseServer and not override its
handle_error() method. """
def handle_error(self, request, client_address):
value = sys.exc_info()[1]
if isinstance(value, tlslite.errors.TLSClosedConnectionError):
print "testserver.py: Closed connection"
return
if isinstance(value, socket.error):
err = value.args[0]
if sys.platform in ('win32', 'cygwin'):
# "An established connection was aborted by the software in your host."
pipe_err = 10053
else:
pipe_err = errno.EPIPE
if err == pipe_err:
print "testserver.py: Broken pipe"
return
if err == errno.ECONNRESET:
print "testserver.py: Connection reset by peer"
return
SocketServer.BaseServer.handle_error(self, request, client_address)
class StoppableHTTPServer(BaseHTTPServer.HTTPServer):
"""This is a specialization of BaseHTTPServer to allow it
to be exited cleanly (by setting its "stop" member to True)."""
def serve_forever(self):
self.stop = False
self.nonce_time = None
while not self.stop:
self.handle_request()
self.socket.close()
def MultiplexerHack(std_fd, log_fd):
"""Creates a FileMultiplexer that will write to both specified files.
When running on Windows XP bots, stdout and stderr will be invalid file
handles, so log_fd will be returned directly. (This does not occur if you
run the test suite directly from a console, but only if the output of the
test executable is redirected.)
"""
if std_fd.fileno() <= 0:
return log_fd
return FileMultiplexer(std_fd, log_fd)
class BasePageHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def __init__(self, request, client_address, socket_server,
connect_handlers, get_handlers, head_handlers, post_handlers,
put_handlers):
self._connect_handlers = connect_handlers
self._get_handlers = get_handlers
self._head_handlers = head_handlers
self._post_handlers = post_handlers
self._put_handlers = put_handlers
BaseHTTPServer.BaseHTTPRequestHandler.__init__(
self, request, client_address, socket_server)
def log_request(self, *args, **kwargs):
# Disable request logging to declutter test log output.
pass
def _ShouldHandleRequest(self, handler_name):
"""Determines if the path can be handled by the handler.
We consider a handler valid if the path begins with the
handler name. It can optionally be followed by "?*", "/*".
"""
pattern = re.compile('%s($|\?|/).*' % handler_name)
return pattern.match(self.path)
def do_CONNECT(self):
for handler in self._connect_handlers:
if handler():
return
def do_GET(self):
for handler in self._get_handlers:
if handler():
return
def do_HEAD(self):
for handler in self._head_handlers:
if handler():
return
def do_POST(self):
for handler in self._post_handlers:
if handler():
return
def do_PUT(self):
for handler in self._put_handlers:
if handler():
return
class TestServerRunner(object):
"""Runs a test server and communicates with the controlling C++ test code.
Subclasses should override the create_server method to create their server
object, and the add_options method to add their own options.
"""
def __init__(self):
self.option_parser = optparse.OptionParser()
self.add_options()
def main(self):
self.options, self.args = self.option_parser.parse_args()
logfile = open(self.options.log_file, 'w')
# http://crbug.com/248796 : Error logs streamed to normal sys.stderr will be
# written to HTTP response payload when remote test server is used.
# For this reason, some tests like ResourceFetcherTests.ResourceFetcher404
# were failing on Android because remote test server is being used there.
# To fix them, we need to use sys.stdout as sys.stderr if remote test server
# is used.
if self.options.on_remote_server:
sys.stderr = sys.stdout
sys.stderr = MultiplexerHack(sys.stderr, logfile)
if self.options.log_to_console:
sys.stdout = MultiplexerHack(sys.stdout, logfile)
else:
sys.stdout = logfile
server_data = {
'host': self.options.host,
}
self.server = self.create_server(server_data)
self._notify_startup_complete(server_data)
self.run_server()
def create_server(self, server_data):
"""Creates a server object and returns it.
Must populate server_data['port'], and can set additional server_data
elements if desired."""
raise NotImplementedError()
def run_server(self):
try:
self.server.serve_forever()
except KeyboardInterrupt:
print 'shutting down server'
self.server.stop = True
def add_options(self):
self.option_parser.add_option('--startup-pipe', type='int',
dest='startup_pipe',
help='File handle of pipe to parent process')
self.option_parser.add_option('--log-to-console', action='store_const',
const=True, default=False,
dest='log_to_console',
help='Enables or disables sys.stdout logging '
'to the console.')
self.option_parser.add_option('--log-file', default='testserver.log',
dest='log_file',
help='The name of the server log file.')
self.option_parser.add_option('--port', default=0, type='int',
help='Port used by the server. If '
'unspecified, the server will listen on an '
'ephemeral port.')
self.option_parser.add_option('--host', default='127.0.0.1',
dest='host',
help='Hostname or IP upon which the server '
'will listen. Client connections will also '
'only be allowed from this address.')
self.option_parser.add_option('--data-dir', dest='data_dir',
help='Directory from which to read the '
'files.')
self.option_parser.add_option('--on-remote-server', action='store_const',
const=True, default=False,
dest='on_remote_server',
help='Whether remote server is being used or '
'not.')
def _notify_startup_complete(self, server_data):
# Notify the parent that we've started. (BaseServer subclasses
# bind their sockets on construction.)
if self.options.startup_pipe is not None:
server_data_json = json.dumps(server_data)
server_data_len = len(server_data_json)
print 'sending server_data: %s (%d bytes)' % (
server_data_json, server_data_len)
if sys.platform == 'win32':
fd = msvcrt.open_osfhandle(self.options.startup_pipe, 0)
else:
fd = self.options.startup_pipe
startup_pipe = os.fdopen(fd, "w")
# First write the data length as an unsigned 4-byte value. This
# is _not_ using network byte ordering since the other end of the
# pipe is on the same machine.
startup_pipe.write(struct.pack('=L', server_data_len))
startup_pipe.write(server_data_json)
startup_pipe.close()
|
extremewaysback/django
|
refs/heads/master
|
tests/sessions_tests/custom_db_backend.py
|
159
|
"""
This custom Session model adds an extra column to store an account ID. In
real-world applications, it gives you the option of querying the database for
all active sessions for a particular account.
"""
from django.contrib.sessions.backends.db import SessionStore as DBStore
from django.contrib.sessions.base_session import AbstractBaseSession
from django.db import models
class CustomSession(AbstractBaseSession):
"""
A session model with a column for an account ID.
"""
account_id = models.IntegerField(null=True, db_index=True)
class Meta:
app_label = 'sessions'
@classmethod
def get_session_store_class(cls):
return SessionStore
class SessionStore(DBStore):
"""
A database session store, that handles updating the account ID column
inside the custom session model.
"""
@classmethod
def get_model_class(cls):
return CustomSession
def create_model_instance(self, data):
obj = super(SessionStore, self).create_model_instance(data)
try:
account_id = int(data.get('_auth_user_id'))
except (ValueError, TypeError):
account_id = None
obj.account_id = account_id
return obj
|
rynomster/django
|
refs/heads/master
|
django/contrib/gis/geos/libgeos.py
|
79
|
"""
This module houses the ctypes initialization procedures, as well
as the notice and error handler function callbacks (get called
when an error occurs in GEOS).
This module also houses GEOS Pointer utilities, including
get_pointer_arr(), and GEOM_PTR.
"""
import logging
import os
import re
from ctypes import CDLL, CFUNCTYPE, POINTER, Structure, c_char_p
from ctypes.util import find_library
from django.contrib.gis.geos.error import GEOSException
from django.core.exceptions import ImproperlyConfigured
from django.utils.functional import SimpleLazyObject
logger = logging.getLogger('django.contrib.gis')
def load_geos():
# Custom library path set?
try:
from django.conf import settings
lib_path = settings.GEOS_LIBRARY_PATH
except (AttributeError, EnvironmentError,
ImportError, ImproperlyConfigured):
lib_path = None
# Setting the appropriate names for the GEOS-C library.
if lib_path:
lib_names = None
elif os.name == 'nt':
# Windows NT libraries
lib_names = ['geos_c', 'libgeos_c-1']
elif os.name == 'posix':
# *NIX libraries
lib_names = ['geos_c', 'GEOS']
else:
raise ImportError('Unsupported OS "%s"' % os.name)
# Using the ctypes `find_library` utility to find the path to the GEOS
# shared library. This is better than manually specifying each library name
# and extension (e.g., libgeos_c.[so|so.1|dylib].).
if lib_names:
for lib_name in lib_names:
lib_path = find_library(lib_name)
if lib_path is not None:
break
# No GEOS library could be found.
if lib_path is None:
raise ImportError(
'Could not find the GEOS library (tried "%s"). '
'Try setting GEOS_LIBRARY_PATH in your settings.' %
'", "'.join(lib_names)
)
# Getting the GEOS C library. The C interface (CDLL) is used for
# both *NIX and Windows.
# See the GEOS C API source code for more details on the library function calls:
# http://geos.refractions.net/ro/doxygen_docs/html/geos__c_8h-source.html
_lgeos = CDLL(lib_path)
# Here we set up the prototypes for the initGEOS_r and finishGEOS_r
# routines. These functions aren't actually called until they are
# attached to a GEOS context handle -- this actually occurs in
# geos/prototypes/threadsafe.py.
_lgeos.initGEOS_r.restype = CONTEXT_PTR
_lgeos.finishGEOS_r.argtypes = [CONTEXT_PTR]
return _lgeos
# The notice and error handler C function callback definitions.
# Supposed to mimic the GEOS message handler (C below):
# typedef void (*GEOSMessageHandler)(const char *fmt, ...);
NOTICEFUNC = CFUNCTYPE(None, c_char_p, c_char_p)
def notice_h(fmt, lst):
fmt, lst = fmt.decode(), lst.decode()
try:
warn_msg = fmt % lst
except TypeError:
warn_msg = fmt
logger.warning('GEOS_NOTICE: %s\n', warn_msg)
notice_h = NOTICEFUNC(notice_h)
ERRORFUNC = CFUNCTYPE(None, c_char_p, c_char_p)
def error_h(fmt, lst):
fmt, lst = fmt.decode(), lst.decode()
try:
err_msg = fmt % lst
except TypeError:
err_msg = fmt
logger.error('GEOS_ERROR: %s\n', err_msg)
error_h = ERRORFUNC(error_h)
# #### GEOS Geometry C data structures, and utility functions. ####
# Opaque GEOS geometry structures, used for GEOM_PTR and CS_PTR
class GEOSGeom_t(Structure):
pass
class GEOSPrepGeom_t(Structure):
pass
class GEOSCoordSeq_t(Structure):
pass
class GEOSContextHandle_t(Structure):
pass
# Pointers to opaque GEOS geometry structures.
GEOM_PTR = POINTER(GEOSGeom_t)
PREPGEOM_PTR = POINTER(GEOSPrepGeom_t)
CS_PTR = POINTER(GEOSCoordSeq_t)
CONTEXT_PTR = POINTER(GEOSContextHandle_t)
# Used specifically by the GEOSGeom_createPolygon and GEOSGeom_createCollection
# GEOS routines
def get_pointer_arr(n):
"Gets a ctypes pointer array (of length `n`) for GEOSGeom_t opaque pointer."
GeomArr = GEOM_PTR * n
return GeomArr()
lgeos = SimpleLazyObject(load_geos)
class GEOSFuncFactory(object):
"""
Lazy loading of GEOS functions.
"""
argtypes = None
restype = None
errcheck = None
def __init__(self, func_name, *args, **kwargs):
self.func_name = func_name
self.restype = kwargs.pop('restype', self.restype)
self.errcheck = kwargs.pop('errcheck', self.errcheck)
self.argtypes = kwargs.pop('argtypes', self.argtypes)
self.args = args
self.kwargs = kwargs
self.func = None
def __call__(self, *args, **kwargs):
if self.func is None:
self.func = self.get_func(*self.args, **self.kwargs)
return self.func(*args, **kwargs)
def get_func(self, *args, **kwargs):
from django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc
func = GEOSFunc(self.func_name)
func.argtypes = self.argtypes or []
func.restype = self.restype
if self.errcheck:
func.errcheck = self.errcheck
return func
# Returns the string version of the GEOS library. Have to set the restype
# explicitly to c_char_p to ensure compatibility across 32 and 64-bit platforms.
geos_version = GEOSFuncFactory('GEOSversion', restype=c_char_p)
# Regular expression should be able to parse version strings such as
# '3.0.0rc4-CAPI-1.3.3', '3.0.0-CAPI-1.4.1', '3.4.0dev-CAPI-1.8.0' or '3.4.0dev-CAPI-1.8.0 r0'
version_regex = re.compile(
r'^(?P<version>(?P<major>\d+)\.(?P<minor>\d+)\.(?P<subminor>\d+))'
r'((rc(?P<release_candidate>\d+))|dev)?-CAPI-(?P<capi_version>\d+\.\d+\.\d+)( r\d+)?$'
)
def geos_version_info():
"""
Returns a dictionary containing the various version metadata parsed from
the GEOS version string, including the version number, whether the version
is a release candidate (and what number release candidate), and the C API
version.
"""
ver = geos_version().decode()
m = version_regex.match(ver)
if not m:
raise GEOSException('Could not parse version info string "%s"' % ver)
return {key: m.group(key) for key in (
'version', 'release_candidate', 'capi_version', 'major', 'minor', 'subminor')}
|
dvliman/jaikuengine
|
refs/heads/master
|
.google_appengine/google/appengine/_internal/django/core/management/commands/sql.py
|
23
|
from optparse import make_option
from google.appengine._internal.django.core.management.base import AppCommand
from google.appengine._internal.django.core.management.sql import sql_create
from google.appengine._internal.django.db import connections, DEFAULT_DB_ALIAS
class Command(AppCommand):
help = "Prints the CREATE TABLE SQL statements for the given app name(s)."
option_list = AppCommand.option_list + (
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to print the '
'SQL for. Defaults to the "default" database.'),
)
output_transaction = True
def handle_app(self, app, **options):
return u'\n'.join(sql_create(app, self.style, connections[options.get('database', DEFAULT_DB_ALIAS)])).encode('utf-8')
|
jlnaudin/x-drone
|
refs/heads/master
|
MissionPlanner-master/Lib/smtpd.py
|
76
|
#! /usr/bin/env python
"""An RFC 2821 smtp proxy.
Usage: %(program)s [options] [localhost:localport [remotehost:remoteport]]
Options:
--nosetuid
-n
This program generally tries to setuid `nobody', unless this flag is
set. The setuid call will fail if this program is not run as root (in
which case, use this flag).
--version
-V
Print the version number and exit.
--class classname
-c classname
Use `classname' as the concrete SMTP proxy class. Uses `PureProxy' by
default.
--debug
-d
Turn on debugging prints.
--help
-h
Print this message and exit.
Version: %(__version__)s
If localhost is not given then `localhost' is used, and if localport is not
given then 8025 is used. If remotehost is not given then `localhost' is used,
and if remoteport is not given, then 25 is used.
"""
# Overview:
#
# This file implements the minimal SMTP protocol as defined in RFC 821. It
# has a hierarchy of classes which implement the backend functionality for the
# smtpd. A number of classes are provided:
#
# SMTPServer - the base class for the backend. Raises NotImplementedError
# if you try to use it.
#
# DebuggingServer - simply prints each message it receives on stdout.
#
# PureProxy - Proxies all messages to a real smtpd which does final
# delivery. One known problem with this class is that it doesn't handle
# SMTP errors from the backend server at all. This should be fixed
# (contributions are welcome!).
#
# MailmanProxy - An experimental hack to work with GNU Mailman
# <www.list.org>. Using this server as your real incoming smtpd, your
# mailhost will automatically recognize and accept mail destined to Mailman
# lists when those lists are created. Every message not destined for a list
# gets forwarded to a real backend smtpd, as with PureProxy. Again, errors
# are not handled correctly yet.
#
# Please note that this script requires Python 2.0
#
# Author: Barry Warsaw <barry@python.org>
#
# TODO:
#
# - support mailbox delivery
# - alias files
# - ESMTP
# - handle error codes from the backend smtpd
import sys
import os
import errno
import getopt
import time
import socket
import asyncore
import asynchat
__all__ = ["SMTPServer","DebuggingServer","PureProxy","MailmanProxy"]
program = sys.argv[0]
__version__ = 'Python SMTP proxy version 0.2'
class Devnull:
def write(self, msg): pass
def flush(self): pass
DEBUGSTREAM = Devnull()
NEWLINE = '\n'
EMPTYSTRING = ''
COMMASPACE = ', '
def usage(code, msg=''):
print >> sys.stderr, __doc__ % globals()
if msg:
print >> sys.stderr, msg
sys.exit(code)
class SMTPChannel(asynchat.async_chat):
COMMAND = 0
DATA = 1
def __init__(self, server, conn, addr):
asynchat.async_chat.__init__(self, conn)
self.__server = server
self.__conn = conn
self.__addr = addr
self.__line = []
self.__state = self.COMMAND
self.__greeting = 0
self.__mailfrom = None
self.__rcpttos = []
self.__data = ''
self.__fqdn = socket.getfqdn()
try:
self.__peer = conn.getpeername()
except socket.error, err:
# a race condition may occur if the other end is closing
# before we can get the peername
self.close()
if err[0] != errno.ENOTCONN:
raise
return
print >> DEBUGSTREAM, 'Peer:', repr(self.__peer)
self.push('220 %s %s' % (self.__fqdn, __version__))
self.set_terminator('\r\n')
# Overrides base class for convenience
def push(self, msg):
asynchat.async_chat.push(self, msg + '\r\n')
# Implementation of base class abstract method
def collect_incoming_data(self, data):
self.__line.append(data)
# Implementation of base class abstract method
def found_terminator(self):
line = EMPTYSTRING.join(self.__line)
print >> DEBUGSTREAM, 'Data:', repr(line)
self.__line = []
if self.__state == self.COMMAND:
if not line:
self.push('500 Error: bad syntax')
return
method = None
i = line.find(' ')
if i < 0:
command = line.upper()
arg = None
else:
command = line[:i].upper()
arg = line[i+1:].strip()
method = getattr(self, 'smtp_' + command, None)
if not method:
self.push('502 Error: command "%s" not implemented' % command)
return
method(arg)
return
else:
if self.__state != self.DATA:
self.push('451 Internal confusion')
return
# Remove extraneous carriage returns and de-transparency according
# to RFC 821, Section 4.5.2.
data = []
for text in line.split('\r\n'):
if text and text[0] == '.':
data.append(text[1:])
else:
data.append(text)
self.__data = NEWLINE.join(data)
status = self.__server.process_message(self.__peer,
self.__mailfrom,
self.__rcpttos,
self.__data)
self.__rcpttos = []
self.__mailfrom = None
self.__state = self.COMMAND
self.set_terminator('\r\n')
if not status:
self.push('250 Ok')
else:
self.push(status)
# SMTP and ESMTP commands
def smtp_HELO(self, arg):
if not arg:
self.push('501 Syntax: HELO hostname')
return
if self.__greeting:
self.push('503 Duplicate HELO/EHLO')
else:
self.__greeting = arg
self.push('250 %s' % self.__fqdn)
def smtp_NOOP(self, arg):
if arg:
self.push('501 Syntax: NOOP')
else:
self.push('250 Ok')
def smtp_QUIT(self, arg):
# args is ignored
self.push('221 Bye')
self.close_when_done()
# factored
def __getaddr(self, keyword, arg):
address = None
keylen = len(keyword)
if arg[:keylen].upper() == keyword:
address = arg[keylen:].strip()
if not address:
pass
elif address[0] == '<' and address[-1] == '>' and address != '<>':
# Addresses can be in the form <person@dom.com> but watch out
# for null address, e.g. <>
address = address[1:-1]
return address
def smtp_MAIL(self, arg):
print >> DEBUGSTREAM, '===> MAIL', arg
address = self.__getaddr('FROM:', arg) if arg else None
if not address:
self.push('501 Syntax: MAIL FROM:<address>')
return
if self.__mailfrom:
self.push('503 Error: nested MAIL command')
return
self.__mailfrom = address
print >> DEBUGSTREAM, 'sender:', self.__mailfrom
self.push('250 Ok')
def smtp_RCPT(self, arg):
print >> DEBUGSTREAM, '===> RCPT', arg
if not self.__mailfrom:
self.push('503 Error: need MAIL command')
return
address = self.__getaddr('TO:', arg) if arg else None
if not address:
self.push('501 Syntax: RCPT TO: <address>')
return
self.__rcpttos.append(address)
print >> DEBUGSTREAM, 'recips:', self.__rcpttos
self.push('250 Ok')
def smtp_RSET(self, arg):
if arg:
self.push('501 Syntax: RSET')
return
# Resets the sender, recipients, and data, but not the greeting
self.__mailfrom = None
self.__rcpttos = []
self.__data = ''
self.__state = self.COMMAND
self.push('250 Ok')
def smtp_DATA(self, arg):
if not self.__rcpttos:
self.push('503 Error: need RCPT command')
return
if arg:
self.push('501 Syntax: DATA')
return
self.__state = self.DATA
self.set_terminator('\r\n.\r\n')
self.push('354 End data with <CR><LF>.<CR><LF>')
class SMTPServer(asyncore.dispatcher):
def __init__(self, localaddr, remoteaddr):
self._localaddr = localaddr
self._remoteaddr = remoteaddr
asyncore.dispatcher.__init__(self)
try:
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
# try to re-use a server port if possible
self.set_reuse_addr()
self.bind(localaddr)
self.listen(5)
except:
# cleanup asyncore.socket_map before raising
self.close()
raise
else:
print >> DEBUGSTREAM, \
'%s started at %s\n\tLocal addr: %s\n\tRemote addr:%s' % (
self.__class__.__name__, time.ctime(time.time()),
localaddr, remoteaddr)
def handle_accept(self):
pair = self.accept()
if pair is not None:
conn, addr = pair
print >> DEBUGSTREAM, 'Incoming connection from %s' % repr(addr)
channel = SMTPChannel(self, conn, addr)
# API for "doing something useful with the message"
def process_message(self, peer, mailfrom, rcpttos, data):
"""Override this abstract method to handle messages from the client.
peer is a tuple containing (ipaddr, port) of the client that made the
socket connection to our smtp port.
mailfrom is the raw address the client claims the message is coming
from.
rcpttos is a list of raw addresses the client wishes to deliver the
message to.
data is a string containing the entire full text of the message,
headers (if supplied) and all. It has been `de-transparencied'
according to RFC 821, Section 4.5.2. In other words, a line
containing a `.' followed by other text has had the leading dot
removed.
This function should return None, for a normal `250 Ok' response;
otherwise it returns the desired response string in RFC 821 format.
"""
raise NotImplementedError
class DebuggingServer(SMTPServer):
# Do something with the gathered message
def process_message(self, peer, mailfrom, rcpttos, data):
inheaders = 1
lines = data.split('\n')
print '---------- MESSAGE FOLLOWS ----------'
for line in lines:
# headers first
if inheaders and not line:
print 'X-Peer:', peer[0]
inheaders = 0
print line
print '------------ END MESSAGE ------------'
class PureProxy(SMTPServer):
def process_message(self, peer, mailfrom, rcpttos, data):
lines = data.split('\n')
# Look for the last header
i = 0
for line in lines:
if not line:
break
i += 1
lines.insert(i, 'X-Peer: %s' % peer[0])
data = NEWLINE.join(lines)
refused = self._deliver(mailfrom, rcpttos, data)
# TBD: what to do with refused addresses?
print >> DEBUGSTREAM, 'we got some refusals:', refused
def _deliver(self, mailfrom, rcpttos, data):
import smtplib
refused = {}
try:
s = smtplib.SMTP()
s.connect(self._remoteaddr[0], self._remoteaddr[1])
try:
refused = s.sendmail(mailfrom, rcpttos, data)
finally:
s.quit()
except smtplib.SMTPRecipientsRefused, e:
print >> DEBUGSTREAM, 'got SMTPRecipientsRefused'
refused = e.recipients
except (socket.error, smtplib.SMTPException), e:
print >> DEBUGSTREAM, 'got', e.__class__
# All recipients were refused. If the exception had an associated
# error code, use it. Otherwise,fake it with a non-triggering
# exception code.
errcode = getattr(e, 'smtp_code', -1)
errmsg = getattr(e, 'smtp_error', 'ignore')
for r in rcpttos:
refused[r] = (errcode, errmsg)
return refused
class MailmanProxy(PureProxy):
def process_message(self, peer, mailfrom, rcpttos, data):
from cStringIO import StringIO
from Mailman import Utils
from Mailman import Message
from Mailman import MailList
# If the message is to a Mailman mailing list, then we'll invoke the
# Mailman script directly, without going through the real smtpd.
# Otherwise we'll forward it to the local proxy for disposition.
listnames = []
for rcpt in rcpttos:
local = rcpt.lower().split('@')[0]
# We allow the following variations on the theme
# listname
# listname-admin
# listname-owner
# listname-request
# listname-join
# listname-leave
parts = local.split('-')
if len(parts) > 2:
continue
listname = parts[0]
if len(parts) == 2:
command = parts[1]
else:
command = ''
if not Utils.list_exists(listname) or command not in (
'', 'admin', 'owner', 'request', 'join', 'leave'):
continue
listnames.append((rcpt, listname, command))
# Remove all list recipients from rcpttos and forward what we're not
# going to take care of ourselves. Linear removal should be fine
# since we don't expect a large number of recipients.
for rcpt, listname, command in listnames:
rcpttos.remove(rcpt)
# If there's any non-list destined recipients left,
print >> DEBUGSTREAM, 'forwarding recips:', ' '.join(rcpttos)
if rcpttos:
refused = self._deliver(mailfrom, rcpttos, data)
# TBD: what to do with refused addresses?
print >> DEBUGSTREAM, 'we got refusals:', refused
# Now deliver directly to the list commands
mlists = {}
s = StringIO(data)
msg = Message.Message(s)
# These headers are required for the proper execution of Mailman. All
# MTAs in existence seem to add these if the original message doesn't
# have them.
if not msg.getheader('from'):
msg['From'] = mailfrom
if not msg.getheader('date'):
msg['Date'] = time.ctime(time.time())
for rcpt, listname, command in listnames:
print >> DEBUGSTREAM, 'sending message to', rcpt
mlist = mlists.get(listname)
if not mlist:
mlist = MailList.MailList(listname, lock=0)
mlists[listname] = mlist
# dispatch on the type of command
if command == '':
# post
msg.Enqueue(mlist, tolist=1)
elif command == 'admin':
msg.Enqueue(mlist, toadmin=1)
elif command == 'owner':
msg.Enqueue(mlist, toowner=1)
elif command == 'request':
msg.Enqueue(mlist, torequest=1)
elif command in ('join', 'leave'):
# TBD: this is a hack!
if command == 'join':
msg['Subject'] = 'subscribe'
else:
msg['Subject'] = 'unsubscribe'
msg.Enqueue(mlist, torequest=1)
class Options:
setuid = 1
classname = 'PureProxy'
def parseargs():
global DEBUGSTREAM
try:
opts, args = getopt.getopt(
sys.argv[1:], 'nVhc:d',
['class=', 'nosetuid', 'version', 'help', 'debug'])
except getopt.error, e:
usage(1, e)
options = Options()
for opt, arg in opts:
if opt in ('-h', '--help'):
usage(0)
elif opt in ('-V', '--version'):
print >> sys.stderr, __version__
sys.exit(0)
elif opt in ('-n', '--nosetuid'):
options.setuid = 0
elif opt in ('-c', '--class'):
options.classname = arg
elif opt in ('-d', '--debug'):
DEBUGSTREAM = sys.stderr
# parse the rest of the arguments
if len(args) < 1:
localspec = 'localhost:8025'
remotespec = 'localhost:25'
elif len(args) < 2:
localspec = args[0]
remotespec = 'localhost:25'
elif len(args) < 3:
localspec = args[0]
remotespec = args[1]
else:
usage(1, 'Invalid arguments: %s' % COMMASPACE.join(args))
# split into host/port pairs
i = localspec.find(':')
if i < 0:
usage(1, 'Bad local spec: %s' % localspec)
options.localhost = localspec[:i]
try:
options.localport = int(localspec[i+1:])
except ValueError:
usage(1, 'Bad local port: %s' % localspec)
i = remotespec.find(':')
if i < 0:
usage(1, 'Bad remote spec: %s' % remotespec)
options.remotehost = remotespec[:i]
try:
options.remoteport = int(remotespec[i+1:])
except ValueError:
usage(1, 'Bad remote port: %s' % remotespec)
return options
if __name__ == '__main__':
options = parseargs()
# Become nobody
if options.setuid:
try:
import pwd
except ImportError:
print >> sys.stderr, \
'Cannot import module "pwd"; try running with -n option.'
sys.exit(1)
nobody = pwd.getpwnam('nobody')[2]
try:
os.setuid(nobody)
except OSError, e:
if e.errno != errno.EPERM: raise
print >> sys.stderr, \
'Cannot setuid "nobody"; try running with -n option.'
sys.exit(1)
classname = options.classname
if "." in classname:
lastdot = classname.rfind(".")
mod = __import__(classname[:lastdot], globals(), locals(), [""])
classname = classname[lastdot+1:]
else:
import __main__ as mod
class_ = getattr(mod, classname)
proxy = class_((options.localhost, options.localport),
(options.remotehost, options.remoteport))
try:
asyncore.loop()
except KeyboardInterrupt:
pass
|
montoyjh/pymatgen
|
refs/heads/master
|
pymatgen/io/lammps/utils.py
|
3
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module defines utility classes and functions.
"""
import os
import tempfile
from io import open
from subprocess import Popen, PIPE
import numpy as np
try:
import pybel as pb
except ImportError:
pb = None
from pymatgen import Molecule
from pymatgen.core.operations import SymmOp
from pymatgen.util.coord import get_angle
from pymatgen.io.babel import BabelMolAdaptor
from monty.os.path import which
from monty.tempfile import ScratchDir
__author__ = 'Kiran Mathew, Brandon Wood, Michael Humbert'
__email__ = 'kmathew@lbl.gov'
class Polymer:
"""
Generate polymer chain via Random walk. At each position there are
a total of 5 possible moves(excluding the previous direction).
"""
def __init__(self, start_monomer, s_head, s_tail,
monomer, head, tail,
end_monomer, e_head, e_tail,
n_units, link_distance=1.0, linear_chain=False):
"""
Args:
start_monomer (Molecule): Starting molecule
s_head (int): starting atom index of the start_monomer molecule
s_tail (int): tail atom index of the start_monomer
monomer (Molecule): The monomer
head (int): index of the atom in the monomer that forms the head
tail (int): tail atom index. monomers will be connected from
tail to head
end_monomer (Molecule): Terminal molecule
e_head (int): starting atom index of the end_monomer molecule
e_tail (int): tail atom index of the end_monomer
n_units (int): number of monomer units excluding the start and
terminal molecules
link_distance (float): distance between consecutive monomers
linear_chain (bool): linear or random walk polymer chain
"""
self.start = s_head
self.end = s_tail
self.monomer = monomer
self.n_units = n_units
self.link_distance = link_distance
self.linear_chain = linear_chain
# translate monomers so that head atom is at the origin
start_monomer.translate_sites(range(len(start_monomer)),
- monomer.cart_coords[s_head])
monomer.translate_sites(range(len(monomer)),
- monomer.cart_coords[head])
end_monomer.translate_sites(range(len(end_monomer)),
- monomer.cart_coords[e_head])
self.mon_vector = monomer.cart_coords[tail] - monomer.cart_coords[head]
self.moves = {1: [1, 0, 0],
2: [0, 1, 0],
3: [0, 0, 1],
4: [-1, 0, 0],
5: [0, -1, 0],
6: [0, 0, -1]}
self.prev_move = 1
# places the start monomer at the beginning of the chain
self.molecule = start_monomer.copy()
self.length = 1
# create the chain
self._create(self.monomer, self.mon_vector)
# terminate the chain with the end_monomer
self.n_units += 1
end_mon_vector = end_monomer.cart_coords[e_tail] - \
end_monomer.cart_coords[e_head]
self._create(end_monomer, end_mon_vector)
self.molecule = Molecule.from_sites(self.molecule.sites)
def _create(self, monomer, mon_vector):
"""
create the polymer from the monomer
Args:
monomer (Molecule)
mon_vector (numpy.array): molecule vector that starts from the
start atom index to the end atom index
"""
while self.length != (self.n_units-1):
if self.linear_chain:
move_direction = np.array(mon_vector) / np.linalg.norm(mon_vector)
else:
move_direction = self._next_move_direction()
self._add_monomer(monomer.copy(), mon_vector, move_direction)
def _next_move_direction(self):
"""
pick a move at random from the list of moves
"""
nmoves = len(self.moves)
move = np.random.randint(1, nmoves+1)
while self.prev_move == (move + 3) % nmoves:
move = np.random.randint(1, nmoves+1)
self.prev_move = move
return np.array(self.moves[move])
def _align_monomer(self, monomer, mon_vector, move_direction):
"""
rotate the monomer so that it is aligned along the move direction
Args:
monomer (Molecule)
mon_vector (numpy.array): molecule vector that starts from the
start atom index to the end atom index
move_direction (numpy.array): the direction of the polymer chain
extension
"""
axis = np.cross(mon_vector, move_direction)
origin = monomer[self.start].coords
angle = get_angle(mon_vector, move_direction)
op = SymmOp.from_origin_axis_angle(origin, axis, angle)
monomer.apply_operation(op)
def _add_monomer(self, monomer, mon_vector, move_direction):
"""
extend the polymer molecule by adding a monomer along mon_vector direction
Args:
monomer (Molecule): monomer molecule
mon_vector (numpy.array): monomer vector that points from head to tail.
move_direction (numpy.array): direction along which the monomer
will be positioned
"""
translate_by = self.molecule.cart_coords[self.end] + \
self.link_distance * move_direction
monomer.translate_sites(range(len(monomer)), translate_by)
if not self.linear_chain:
self._align_monomer(monomer, mon_vector, move_direction)
# add monomer if there are no crossings
does_cross = False
for i, site in enumerate(monomer):
try:
self.molecule.append(site.specie, site.coords,
properties=site.properties)
except:
does_cross = True
polymer_length = len(self.molecule)
self.molecule.remove_sites(
range(polymer_length - i, polymer_length))
break
if not does_cross:
self.length += 1
self.end += len(self.monomer)
class PackmolRunner:
"""
Wrapper for the Packmol software that can be used to pack various types of
molecules into a one single unit.
"""
def __init__(self, mols, param_list, input_file="pack.inp",
tolerance=2.0, filetype="xyz",
control_params={"maxit": 20, "nloop": 600},
auto_box=True, output_file="packed.xyz",
bin="packmol"):
"""
Args:
mols:
list of Molecules to pack
input_file:
name of the packmol input file
tolerance:
min distance between the atoms
filetype:
input/output structure file type
control_params:
packmol control parameters dictionary. Basically
all parameters other than structure/atoms
param_list:
list of parameters containing dicts for each molecule
auto_box:
put the molecule assembly in a box
output_file:
output file name. The extension will be adjusted
according to the filetype
"""
self.packmol_bin = bin.split()
if not which(self.packmol_bin[-1]):
raise RuntimeError(
"PackmolRunner requires the executable 'packmol' to be in "
"the path. Please download packmol from "
"https://github.com/leandromartinez98/packmol "
"and follow the instructions in the README to compile. "
"Don't forget to add the packmol binary to your path")
self.mols = mols
self.param_list = param_list
self.input_file = input_file
self.boxit = auto_box
self.control_params = control_params
if not self.control_params.get("tolerance"):
self.control_params["tolerance"] = tolerance
if not self.control_params.get("filetype"):
self.control_params["filetype"] = filetype
if not self.control_params.get("output"):
self.control_params["output"] = "{}.{}".format(
output_file.split(".")[0], self.control_params["filetype"])
if self.boxit:
self._set_box()
def _format_param_val(self, param_val):
"""
Internal method to format values in the packmol parameter dictionaries
Args:
param_val:
Some object to turn into String
Returns:
string representation of the object
"""
if isinstance(param_val, list):
return ' '.join(str(x) for x in param_val)
else:
return str(param_val)
def _set_box(self):
"""
Set the box size for the molecular assembly
"""
net_volume = 0.0
for idx, mol in enumerate(self.mols):
length = max([np.max(mol.cart_coords[:, i])-np.min(mol.cart_coords[:, i])
for i in range(3)]) + 2.0
net_volume += (length**3.0) * float(self.param_list[idx]['number'])
length = net_volume**(1.0/3.0)
for idx, mol in enumerate(self.mols):
self.param_list[idx]['inside box'] = '0.0 0.0 0.0 {} {} {}'.format(
length, length, length)
def _write_input(self, input_dir="."):
"""
Write the packmol input file to the input directory.
Args:
input_dir (string): path to the input directory
"""
with open(os.path.join(input_dir, self.input_file), 'wt', encoding="utf-8") as inp:
for k, v in self.control_params.items():
inp.write('{} {}\n'.format(k, self._format_param_val(v)))
# write the structures of the constituent molecules to file and set
# the molecule id and the corresponding filename in the packmol
# input file.
for idx, mol in enumerate(self.mols):
filename = os.path.join(
input_dir, '{}.{}'.format(
idx, self.control_params["filetype"])).encode("ascii")
# pdb
if self.control_params["filetype"] == "pdb":
self.write_pdb(mol, filename, num=idx+1)
# all other filetypes
else:
a = BabelMolAdaptor(mol)
pm = pb.Molecule(a.openbabel_mol)
pm.write(self.control_params["filetype"], filename=filename,
overwrite=True)
inp.write("\n")
inp.write(
"structure {}.{}\n".format(
os.path.join(input_dir, str(idx)),
self.control_params["filetype"]))
for k, v in self.param_list[idx].items():
inp.write(' {} {}\n'.format(k, self._format_param_val(v)))
inp.write('end structure\n')
def run(self, copy_to_current_on_exit=False, site_property=None):
"""
Write the input file to the scratch directory, run packmol and return
the packed molecule.
Args:
copy_to_current_on_exit (bool): Whether or not to copy the packmol
input/output files from the scratch directory to the current
directory.
site_property (str): if set then the specified site property
for the the final packed molecule will be restored.
Returns:
Molecule object
"""
scratch = tempfile.gettempdir()
with ScratchDir(scratch, copy_to_current_on_exit=copy_to_current_on_exit) as scratch_dir:
self._write_input(input_dir=scratch_dir)
packmol_input = open(os.path.join(scratch_dir, self.input_file), 'r')
p = Popen(self.packmol_bin, stdin=packmol_input, stdout=PIPE, stderr=PIPE)
(stdout, stderr) = p.communicate()
output_file = os.path.join(scratch_dir, self.control_params["output"])
if os.path.isfile(output_file):
packed_mol = BabelMolAdaptor.from_file(output_file,
self.control_params["filetype"])
packed_mol = packed_mol.pymatgen_mol
print("packed molecule written to {}".format(
self.control_params["output"]))
if site_property:
packed_mol = self.restore_site_properties(site_property=site_property, filename=output_file)
return packed_mol
else:
print("Packmol execution failed")
print(stdout, stderr)
return None
def write_pdb(self, mol, filename, name=None, num=None):
"""
dump the molecule into pdb file with custom residue name and number.
"""
# ugly hack to get around the openbabel issues with inconsistent
# residue labelling.
scratch = tempfile.gettempdir()
with ScratchDir(scratch, copy_to_current_on_exit=False) as _:
mol.to(fmt="pdb", filename="tmp.pdb")
bma = BabelMolAdaptor.from_file("tmp.pdb", "pdb")
num = num or 1
name = name or "ml{}".format(num)
# bma = BabelMolAdaptor(mol)
pbm = pb.Molecule(bma._obmol)
for i, x in enumerate(pbm.residues):
x.OBResidue.SetName(name)
x.OBResidue.SetNum(num)
pbm.write(format="pdb", filename=filename, overwrite=True)
def _set_residue_map(self):
"""
map each residue to the corresponding molecule.
"""
self.map_residue_to_mol = {}
lookup = {}
for idx, mol in enumerate(self.mols):
if not mol.formula in lookup:
mol.translate_sites(indices=range(len(mol)),
vector=-mol.center_of_mass)
lookup[mol.formula] = mol.copy()
self.map_residue_to_mol["ml{}".format(idx + 1)] = lookup[mol.formula]
def convert_obatoms_to_molecule(self, atoms, residue_name=None, site_property="ff_map"):
"""
Convert list of openbabel atoms to MOlecule.
Args:
atoms ([OBAtom]): list of OBAtom objects
residue_name (str): the key in self.map_residue_to_mol. Usec to
restore the site properties in the final packed molecule.
site_property (str): the site property to be restored.
Returns:
Molecule object
"""
restore_site_props = True if residue_name is not None else False
if restore_site_props and not hasattr(self, "map_residue_to_mol"):
self._set_residue_map()
coords = []
zs = []
for atm in atoms:
coords.append(list(atm.coords))
zs.append(atm.atomicnum)
mol = Molecule(zs, coords)
if restore_site_props:
props = []
ref = self.map_residue_to_mol[residue_name].copy()
# sanity check
assert len(mol) == len(ref)
assert ref.formula == mol.formula
# the packed molecules have the atoms in the same order..sigh!
for i, site in enumerate(mol):
assert site.specie.symbol == ref[i].specie.symbol
props.append(getattr(ref[i], site_property))
mol.add_site_property(site_property, props)
return mol
def restore_site_properties(self, site_property="ff_map", filename=None):
"""
Restore the site properties for the final packed molecule.
Args:
site_property (str):
filename (str): path to the final packed molecule.
Returns:
Molecule
"""
# only for pdb
if not self.control_params["filetype"] == "pdb":
raise ValueError()
filename = filename or self.control_params["output"]
bma = BabelMolAdaptor.from_file(filename, "pdb")
pbm = pb.Molecule(bma._obmol)
assert len(pbm.residues) == sum([x["number"]
for x in self.param_list])
packed_mol = self.convert_obatoms_to_molecule(
pbm.residues[0].atoms, residue_name=pbm.residues[0].name,
site_property=site_property)
for resid in pbm.residues[1:]:
mol = self.convert_obatoms_to_molecule(
resid.atoms, residue_name=resid.name,
site_property=site_property)
for site in mol:
packed_mol.append(site.species, site.coords,
properties=site.properties)
return packed_mol
class LammpsRunner:
def __init__(self, input_filename="lammps.in", bin="lammps"):
"""
LAMMPS wrapper
Args:
input_filename (string): input file name
bin (string): command to run, excluding the input file name
"""
self.lammps_bin = bin.split()
if not which(self.lammps_bin[-1]):
raise RuntimeError(
"LammpsRunner requires the executable {} to be in the path. "
"Please download and install LAMMPS from " \
"http://lammps.sandia.gov. "
"Don't forget to add the binary to your path".format(self.lammps_bin[-1]))
self.input_filename = input_filename
def run(self):
"""
Write the input/data files and run LAMMPS.
"""
lammps_cmd = self.lammps_bin + ['-in', self.input_filename]
print("Running: {}".format(" ".join(lammps_cmd)))
p = Popen(lammps_cmd, stdout=PIPE, stderr=PIPE)
(stdout, stderr) = p.communicate()
return stdout, stderr
if __name__ == '__main__':
ethanol_coords = [[0.00720, -0.56870, 0.00000],
[-1.28540, 0.24990, 0.00000],
[1.13040, 0.31470, 0.00000],
[0.03920, -1.19720, 0.89000],
[0.03920, -1.19720, -0.89000],
[-1.31750, 0.87840, 0.89000],
[-1.31750, 0.87840, -0.89000],
[-2.14220, -0.42390, -0.00000],
[1.98570, -0.13650, -0.00000]]
ethanol = Molecule(["C", "C", "O", "H", "H", "H", "H", "H", "H"],
ethanol_coords)
water_coords = [[9.626, 6.787, 12.673],
[9.626, 8.420, 12.673],
[10.203, 7.604, 12.673]]
water = Molecule(["H", "H", "O"], water_coords)
pmr = PackmolRunner([ethanol, water],
[{"number": 1, "fixed": [0, 0, 0, 0, 0, 0],
"centerofmass": ""},
{"number": 15, "inside sphere": [0, 0, 0, 5]}],
input_file="packmol_input.inp", tolerance=2.0,
filetype="xyz",
control_params={"nloop": 1000},
auto_box=False, output_file="cocktail.xyz")
s = pmr.run()
|
BIRDY-obspm/DOCKing_System
|
refs/heads/master
|
Module/Simulation/Trajectory/TrajectorySolver/extended_classes.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# ==============================================================
# ===========================LICENSE============================
# ==============================================================
# This file is part of DOCKS.
#
# DOCKS is free software: you can redistribute it and/or modify
# it under the terms of the GNU LESSER GENERAL PUBLIC LICENSE
# as published by the Free Software Foundation, either version
# 3 of the License, or any later version.
#
# DOCKS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU LESSER GENERAL PUBLIC LICENSE for more details.
#
# You should have received a copy of the
# GNU LESSER GENERAL PUBLIC LICENSE along with DOCKS.
# If not, see <http://www.gnu.org/licenses/lgpl.txt>.
#
import PyKEP as pk
import numpy as np
class BasePlanet(pk.planet.jpl_lp):
def __init__(self, *args, **kwargs):
super(BasePlanet, self).__init__(*args, **kwargs)
self.semi_major_axis, self.eccentricity, self.inclination, self.longitude_asc_node, self.arg_periapsis, _ = self.osculating_elements(pk.epoch(0))
self.soi = self.semi_major_axis*(self.mu_self/self.mu_central_body)**(2./5)
def get_relative_position(self, date, position):
pla_pos, _ = np.array(self.eph(date))
return (position - pla_pos)
def get_acceleration(self, date, position):
rel_position = self.get_relative_position(date, position)
acc = -self.mu_self*rel_position/(np.linalg.norm(rel_position)**3)
return acc
|
SummerLW/Perf-Insight-Report
|
refs/heads/test
|
third_party/gsutil/third_party/boto/boto/cloudfront/distribution.py
|
92
|
# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import uuid
import base64
import time
from boto.compat import six, json
from boto.cloudfront.identity import OriginAccessIdentity
from boto.cloudfront.object import Object, StreamingObject
from boto.cloudfront.signers import ActiveTrustedSigners, TrustedSigners
from boto.cloudfront.logging import LoggingInfo
from boto.cloudfront.origin import S3Origin, CustomOrigin
from boto.s3.acl import ACL
class DistributionConfig(object):
def __init__(self, connection=None, origin=None, enabled=False,
caller_reference='', cnames=None, comment='',
trusted_signers=None, default_root_object=None,
logging=None):
"""
:param origin: Origin information to associate with the
distribution. If your distribution will use
an Amazon S3 origin, then this should be an
S3Origin object. If your distribution will use
a custom origin (non Amazon S3), then this
should be a CustomOrigin object.
:type origin: :class:`boto.cloudfront.origin.S3Origin` or
:class:`boto.cloudfront.origin.CustomOrigin`
:param enabled: Whether the distribution is enabled to accept
end user requests for content.
:type enabled: bool
:param caller_reference: A unique number that ensures the
request can't be replayed. If no
caller_reference is provided, boto
will generate a type 4 UUID for use
as the caller reference.
:type enabled: str
:param cnames: A CNAME alias you want to associate with this
distribution. You can have up to 10 CNAME aliases
per distribution.
:type enabled: array of str
:param comment: Any comments you want to include about the
distribution.
:type comment: str
:param trusted_signers: Specifies any AWS accounts you want to
permit to create signed URLs for private
content. If you want the distribution to
use signed URLs, this should contain a
TrustedSigners object; if you want the
distribution to use basic URLs, leave
this None.
:type trusted_signers: :class`boto.cloudfront.signers.TrustedSigners`
:param default_root_object: Designates a default root object.
Only include a DefaultRootObject value
if you are going to assign a default
root object for the distribution.
:type comment: str
:param logging: Controls whether access logs are written for the
distribution. If you want to turn on access logs,
this should contain a LoggingInfo object; otherwise
it should contain None.
:type logging: :class`boto.cloudfront.logging.LoggingInfo`
"""
self.connection = connection
self.origin = origin
self.enabled = enabled
if caller_reference:
self.caller_reference = caller_reference
else:
self.caller_reference = str(uuid.uuid4())
self.cnames = []
if cnames:
self.cnames = cnames
self.comment = comment
self.trusted_signers = trusted_signers
self.logging = logging
self.default_root_object = default_root_object
def __repr__(self):
return "DistributionConfig:%s" % self.origin
def to_xml(self):
s = '<?xml version="1.0" encoding="UTF-8"?>\n'
s += '<DistributionConfig xmlns="http://cloudfront.amazonaws.com/doc/2010-07-15/">\n'
if self.origin:
s += self.origin.to_xml()
s += ' <CallerReference>%s</CallerReference>\n' % self.caller_reference
for cname in self.cnames:
s += ' <CNAME>%s</CNAME>\n' % cname
if self.comment:
s += ' <Comment>%s</Comment>\n' % self.comment
s += ' <Enabled>'
if self.enabled:
s += 'true'
else:
s += 'false'
s += '</Enabled>\n'
if self.trusted_signers:
s += '<TrustedSigners>\n'
for signer in self.trusted_signers:
if signer == 'Self':
s += ' <Self></Self>\n'
else:
s += ' <AwsAccountNumber>%s</AwsAccountNumber>\n' % signer
s += '</TrustedSigners>\n'
if self.logging:
s += '<Logging>\n'
s += ' <Bucket>%s</Bucket>\n' % self.logging.bucket
s += ' <Prefix>%s</Prefix>\n' % self.logging.prefix
s += '</Logging>\n'
if self.default_root_object:
dro = self.default_root_object
s += '<DefaultRootObject>%s</DefaultRootObject>\n' % dro
s += '</DistributionConfig>\n'
return s
def startElement(self, name, attrs, connection):
if name == 'TrustedSigners':
self.trusted_signers = TrustedSigners()
return self.trusted_signers
elif name == 'Logging':
self.logging = LoggingInfo()
return self.logging
elif name == 'S3Origin':
self.origin = S3Origin()
return self.origin
elif name == 'CustomOrigin':
self.origin = CustomOrigin()
return self.origin
else:
return None
def endElement(self, name, value, connection):
if name == 'CNAME':
self.cnames.append(value)
elif name == 'Comment':
self.comment = value
elif name == 'Enabled':
if value.lower() == 'true':
self.enabled = True
else:
self.enabled = False
elif name == 'CallerReference':
self.caller_reference = value
elif name == 'DefaultRootObject':
self.default_root_object = value
else:
setattr(self, name, value)
class StreamingDistributionConfig(DistributionConfig):
def __init__(self, connection=None, origin='', enabled=False,
caller_reference='', cnames=None, comment='',
trusted_signers=None, logging=None):
super(StreamingDistributionConfig, self).__init__(connection=connection,
origin=origin, enabled=enabled,
caller_reference=caller_reference,
cnames=cnames, comment=comment,
trusted_signers=trusted_signers,
logging=logging)
def to_xml(self):
s = '<?xml version="1.0" encoding="UTF-8"?>\n'
s += '<StreamingDistributionConfig xmlns="http://cloudfront.amazonaws.com/doc/2010-07-15/">\n'
if self.origin:
s += self.origin.to_xml()
s += ' <CallerReference>%s</CallerReference>\n' % self.caller_reference
for cname in self.cnames:
s += ' <CNAME>%s</CNAME>\n' % cname
if self.comment:
s += ' <Comment>%s</Comment>\n' % self.comment
s += ' <Enabled>'
if self.enabled:
s += 'true'
else:
s += 'false'
s += '</Enabled>\n'
if self.trusted_signers:
s += '<TrustedSigners>\n'
for signer in self.trusted_signers:
if signer == 'Self':
s += ' <Self/>\n'
else:
s += ' <AwsAccountNumber>%s</AwsAccountNumber>\n' % signer
s += '</TrustedSigners>\n'
if self.logging:
s += '<Logging>\n'
s += ' <Bucket>%s</Bucket>\n' % self.logging.bucket
s += ' <Prefix>%s</Prefix>\n' % self.logging.prefix
s += '</Logging>\n'
s += '</StreamingDistributionConfig>\n'
return s
class DistributionSummary(object):
def __init__(self, connection=None, domain_name='', id='',
last_modified_time=None, status='', origin=None,
cname='', comment='', enabled=False):
self.connection = connection
self.domain_name = domain_name
self.id = id
self.last_modified_time = last_modified_time
self.status = status
self.origin = origin
self.enabled = enabled
self.cnames = []
if cname:
self.cnames.append(cname)
self.comment = comment
self.trusted_signers = None
self.etag = None
self.streaming = False
def __repr__(self):
return "DistributionSummary:%s" % self.domain_name
def startElement(self, name, attrs, connection):
if name == 'TrustedSigners':
self.trusted_signers = TrustedSigners()
return self.trusted_signers
elif name == 'S3Origin':
self.origin = S3Origin()
return self.origin
elif name == 'CustomOrigin':
self.origin = CustomOrigin()
return self.origin
return None
def endElement(self, name, value, connection):
if name == 'Id':
self.id = value
elif name == 'Status':
self.status = value
elif name == 'LastModifiedTime':
self.last_modified_time = value
elif name == 'DomainName':
self.domain_name = value
elif name == 'Origin':
self.origin = value
elif name == 'CNAME':
self.cnames.append(value)
elif name == 'Comment':
self.comment = value
elif name == 'Enabled':
if value.lower() == 'true':
self.enabled = True
else:
self.enabled = False
elif name == 'StreamingDistributionSummary':
self.streaming = True
else:
setattr(self, name, value)
def get_distribution(self):
return self.connection.get_distribution_info(self.id)
class StreamingDistributionSummary(DistributionSummary):
def get_distribution(self):
return self.connection.get_streaming_distribution_info(self.id)
class Distribution(object):
def __init__(self, connection=None, config=None, domain_name='',
id='', last_modified_time=None, status=''):
self.connection = connection
self.config = config
self.domain_name = domain_name
self.id = id
self.last_modified_time = last_modified_time
self.status = status
self.in_progress_invalidation_batches = 0
self.active_signers = None
self.etag = None
self._bucket = None
self._object_class = Object
def __repr__(self):
return "Distribution:%s" % self.domain_name
def startElement(self, name, attrs, connection):
if name == 'DistributionConfig':
self.config = DistributionConfig()
return self.config
elif name == 'ActiveTrustedSigners':
self.active_signers = ActiveTrustedSigners()
return self.active_signers
else:
return None
def endElement(self, name, value, connection):
if name == 'Id':
self.id = value
elif name == 'LastModifiedTime':
self.last_modified_time = value
elif name == 'Status':
self.status = value
elif name == 'InProgressInvalidationBatches':
self.in_progress_invalidation_batches = int(value)
elif name == 'DomainName':
self.domain_name = value
else:
setattr(self, name, value)
def update(self, enabled=None, cnames=None, comment=None):
"""
Update the configuration of the Distribution. The only values
of the DistributionConfig that can be directly updated are:
* CNAMES
* Comment
* Whether the Distribution is enabled or not
Any changes to the ``trusted_signers`` or ``origin`` properties of
this distribution's current config object will also be included in
the update. Therefore, to set the origin access identity for this
distribution, set ``Distribution.config.origin.origin_access_identity``
before calling this update method.
:type enabled: bool
:param enabled: Whether the Distribution is active or not.
:type cnames: list of str
:param cnames: The DNS CNAME's associated with this
Distribution. Maximum of 10 values.
:type comment: str or unicode
:param comment: The comment associated with the Distribution.
"""
new_config = DistributionConfig(self.connection, self.config.origin,
self.config.enabled, self.config.caller_reference,
self.config.cnames, self.config.comment,
self.config.trusted_signers,
self.config.default_root_object)
if enabled is not None:
new_config.enabled = enabled
if cnames is not None:
new_config.cnames = cnames
if comment is not None:
new_config.comment = comment
self.etag = self.connection.set_distribution_config(self.id, self.etag, new_config)
self.config = new_config
self._object_class = Object
def enable(self):
"""
Activate the Distribution. A convenience wrapper around
the update method.
"""
self.update(enabled=True)
def disable(self):
"""
Deactivate the Distribution. A convenience wrapper around
the update method.
"""
self.update(enabled=False)
def delete(self):
"""
Delete this CloudFront Distribution. The content
associated with the Distribution is not deleted from
the underlying Origin bucket in S3.
"""
self.connection.delete_distribution(self.id, self.etag)
def _get_bucket(self):
if isinstance(self.config.origin, S3Origin):
if not self._bucket:
bucket_dns_name = self.config.origin.dns_name
bucket_name = bucket_dns_name.replace('.s3.amazonaws.com', '')
from boto.s3.connection import S3Connection
s3 = S3Connection(self.connection.aws_access_key_id,
self.connection.aws_secret_access_key,
proxy=self.connection.proxy,
proxy_port=self.connection.proxy_port,
proxy_user=self.connection.proxy_user,
proxy_pass=self.connection.proxy_pass)
self._bucket = s3.get_bucket(bucket_name)
self._bucket.distribution = self
self._bucket.set_key_class(self._object_class)
return self._bucket
else:
raise NotImplementedError('Unable to get_objects on CustomOrigin')
def get_objects(self):
"""
Return a list of all content objects in this distribution.
:rtype: list of :class:`boto.cloudfront.object.Object`
:return: The content objects
"""
bucket = self._get_bucket()
objs = []
for key in bucket:
objs.append(key)
return objs
def set_permissions(self, object, replace=False):
"""
Sets the S3 ACL grants for the given object to the appropriate
value based on the type of Distribution. If the Distribution
is serving private content the ACL will be set to include the
Origin Access Identity associated with the Distribution. If
the Distribution is serving public content the content will
be set up with "public-read".
:type object: :class:`boto.cloudfront.object.Object`
:param enabled: The Object whose ACL is being set
:type replace: bool
:param replace: If False, the Origin Access Identity will be
appended to the existing ACL for the object.
If True, the ACL for the object will be
completely replaced with one that grants
READ permission to the Origin Access Identity.
"""
if isinstance(self.config.origin, S3Origin):
if self.config.origin.origin_access_identity:
id = self.config.origin.origin_access_identity.split('/')[-1]
oai = self.connection.get_origin_access_identity_info(id)
policy = object.get_acl()
if replace:
policy.acl = ACL()
policy.acl.add_user_grant('READ', oai.s3_user_id)
object.set_acl(policy)
else:
object.set_canned_acl('public-read')
def set_permissions_all(self, replace=False):
"""
Sets the S3 ACL grants for all objects in the Distribution
to the appropriate value based on the type of Distribution.
:type replace: bool
:param replace: If False, the Origin Access Identity will be
appended to the existing ACL for the object.
If True, the ACL for the object will be
completely replaced with one that grants
READ permission to the Origin Access Identity.
"""
bucket = self._get_bucket()
for key in bucket:
self.set_permissions(key, replace)
def add_object(self, name, content, headers=None, replace=True):
"""
Adds a new content object to the Distribution. The content
for the object will be copied to a new Key in the S3 Bucket
and the permissions will be set appropriately for the type
of Distribution.
:type name: str or unicode
:param name: The name or key of the new object.
:type content: file-like object
:param content: A file-like object that contains the content
for the new object.
:type headers: dict
:param headers: A dictionary containing additional headers
you would like associated with the new
object in S3.
:rtype: :class:`boto.cloudfront.object.Object`
:return: The newly created object.
"""
if self.config.origin.origin_access_identity:
policy = 'private'
else:
policy = 'public-read'
bucket = self._get_bucket()
object = bucket.new_key(name)
object.set_contents_from_file(content, headers=headers, policy=policy)
if self.config.origin.origin_access_identity:
self.set_permissions(object, replace)
return object
def create_signed_url(self, url, keypair_id,
expire_time=None, valid_after_time=None,
ip_address=None, policy_url=None,
private_key_file=None, private_key_string=None):
"""
Creates a signed CloudFront URL that is only valid within the specified
parameters.
:type url: str
:param url: The URL of the protected object.
:type keypair_id: str
:param keypair_id: The keypair ID of the Amazon KeyPair used to sign
theURL. This ID MUST correspond to the private key
specified with private_key_file or private_key_string.
:type expire_time: int
:param expire_time: The expiry time of the URL. If provided, the URL
will expire after the time has passed. If not provided the URL will
never expire. Format is a unix epoch.
Use time.time() + duration_in_sec.
:type valid_after_time: int
:param valid_after_time: If provided, the URL will not be valid until
after valid_after_time. Format is a unix epoch.
Use time.time() + secs_until_valid.
:type ip_address: str
:param ip_address: If provided, only allows access from the specified
IP address. Use '192.168.0.10' for a single IP or
use '192.168.0.0/24' CIDR notation for a subnet.
:type policy_url: str
:param policy_url: If provided, allows the signature to contain
wildcard globs in the URL. For example, you could
provide: 'http://example.com/media/\*' and the policy
and signature would allow access to all contents of
the media subdirectory. If not specified, only
allow access to the exact url provided in 'url'.
:type private_key_file: str or file object.
:param private_key_file: If provided, contains the filename of the
private key file used for signing or an open
file object containing the private key
contents. Only one of private_key_file or
private_key_string can be provided.
:type private_key_string: str
:param private_key_string: If provided, contains the private key string
used for signing. Only one of private_key_file or
private_key_string can be provided.
:rtype: str
:return: The signed URL.
"""
# Get the required parameters
params = self._create_signing_params(
url=url, keypair_id=keypair_id, expire_time=expire_time,
valid_after_time=valid_after_time, ip_address=ip_address,
policy_url=policy_url, private_key_file=private_key_file,
private_key_string=private_key_string)
#combine these into a full url
if "?" in url:
sep = "&"
else:
sep = "?"
signed_url_params = []
for key in ["Expires", "Policy", "Signature", "Key-Pair-Id"]:
if key in params:
param = "%s=%s" % (key, params[key])
signed_url_params.append(param)
signed_url = url + sep + "&".join(signed_url_params)
return signed_url
def _create_signing_params(self, url, keypair_id,
expire_time=None, valid_after_time=None,
ip_address=None, policy_url=None,
private_key_file=None, private_key_string=None):
"""
Creates the required URL parameters for a signed URL.
"""
params = {}
# Check if we can use a canned policy
if expire_time and not valid_after_time and not ip_address and not policy_url:
# we manually construct this policy string to ensure formatting
# matches signature
policy = self._canned_policy(url, expire_time)
params["Expires"] = str(expire_time)
else:
# If no policy_url is specified, default to the full url.
if policy_url is None:
policy_url = url
# Can't use canned policy
policy = self._custom_policy(policy_url, expires=expire_time,
valid_after=valid_after_time,
ip_address=ip_address)
encoded_policy = self._url_base64_encode(policy)
params["Policy"] = encoded_policy
#sign the policy
signature = self._sign_string(policy, private_key_file, private_key_string)
#now base64 encode the signature (URL safe as well)
encoded_signature = self._url_base64_encode(signature)
params["Signature"] = encoded_signature
params["Key-Pair-Id"] = keypair_id
return params
@staticmethod
def _canned_policy(resource, expires):
"""
Creates a canned policy string.
"""
policy = ('{"Statement":[{"Resource":"%(resource)s",'
'"Condition":{"DateLessThan":{"AWS:EpochTime":'
'%(expires)s}}}]}' % locals())
return policy
@staticmethod
def _custom_policy(resource, expires=None, valid_after=None, ip_address=None):
"""
Creates a custom policy string based on the supplied parameters.
"""
condition = {}
# SEE: http://docs.amazonwebservices.com/AmazonCloudFront/latest/DeveloperGuide/RestrictingAccessPrivateContent.html#CustomPolicy
# The 'DateLessThan' property is required.
if not expires:
# Defaults to ONE day
expires = int(time.time()) + 86400
condition["DateLessThan"] = {"AWS:EpochTime": expires}
if valid_after:
condition["DateGreaterThan"] = {"AWS:EpochTime": valid_after}
if ip_address:
if '/' not in ip_address:
ip_address += "/32"
condition["IpAddress"] = {"AWS:SourceIp": ip_address}
policy = {"Statement": [{
"Resource": resource,
"Condition": condition}]}
return json.dumps(policy, separators=(",", ":"))
@staticmethod
def _sign_string(message, private_key_file=None, private_key_string=None):
"""
Signs a string for use with Amazon CloudFront.
Requires the rsa library be installed.
"""
try:
import rsa
except ImportError:
raise NotImplementedError("Boto depends on the python rsa "
"library to generate signed URLs for "
"CloudFront")
# Make sure only one of private_key_file and private_key_string is set
if private_key_file and private_key_string:
raise ValueError("Only specify the private_key_file or the private_key_string not both")
if not private_key_file and not private_key_string:
raise ValueError("You must specify one of private_key_file or private_key_string")
# If private_key_file is a file name, open it and read it
if private_key_string is None:
if isinstance(private_key_file, six.string_types):
with open(private_key_file, 'r') as file_handle:
private_key_string = file_handle.read()
# Otherwise, treat it like a file
else:
private_key_string = private_key_file.read()
# Sign it!
private_key = rsa.PrivateKey.load_pkcs1(private_key_string)
signature = rsa.sign(str(message), private_key, 'SHA-1')
return signature
@staticmethod
def _url_base64_encode(msg):
"""
Base64 encodes a string using the URL-safe characters specified by
Amazon.
"""
msg_base64 = base64.b64encode(msg)
msg_base64 = msg_base64.replace('+', '-')
msg_base64 = msg_base64.replace('=', '_')
msg_base64 = msg_base64.replace('/', '~')
return msg_base64
class StreamingDistribution(Distribution):
def __init__(self, connection=None, config=None, domain_name='',
id='', last_modified_time=None, status=''):
super(StreamingDistribution, self).__init__(connection, config,
domain_name, id, last_modified_time, status)
self._object_class = StreamingObject
def startElement(self, name, attrs, connection):
if name == 'StreamingDistributionConfig':
self.config = StreamingDistributionConfig()
return self.config
else:
return super(StreamingDistribution, self).startElement(name, attrs,
connection)
def update(self, enabled=None, cnames=None, comment=None):
"""
Update the configuration of the StreamingDistribution. The only values
of the StreamingDistributionConfig that can be directly updated are:
* CNAMES
* Comment
* Whether the Distribution is enabled or not
Any changes to the ``trusted_signers`` or ``origin`` properties of
this distribution's current config object will also be included in
the update. Therefore, to set the origin access identity for this
distribution, set
``StreamingDistribution.config.origin.origin_access_identity``
before calling this update method.
:type enabled: bool
:param enabled: Whether the StreamingDistribution is active or not.
:type cnames: list of str
:param cnames: The DNS CNAME's associated with this
Distribution. Maximum of 10 values.
:type comment: str or unicode
:param comment: The comment associated with the Distribution.
"""
new_config = StreamingDistributionConfig(self.connection,
self.config.origin,
self.config.enabled,
self.config.caller_reference,
self.config.cnames,
self.config.comment,
self.config.trusted_signers)
if enabled is not None:
new_config.enabled = enabled
if cnames is not None:
new_config.cnames = cnames
if comment is not None:
new_config.comment = comment
self.etag = self.connection.set_streaming_distribution_config(self.id,
self.etag,
new_config)
self.config = new_config
self._object_class = StreamingObject
def delete(self):
self.connection.delete_streaming_distribution(self.id, self.etag)
|
cchurch/ansible
|
refs/heads/devel
|
lib/ansible/modules/web_infrastructure/rundeck_project.py
|
44
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Ansible module to manage rundeck projects
# (c) 2017, Loic Blot <loic.blot@unix-experience.fr>
# Sponsored by Infopro Digital. http://www.infopro-digital.com/
# Sponsored by E.T.A.I. http://www.etai.fr/
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rundeck_project
short_description: Manage Rundeck projects.
description:
- Create and remove Rundeck projects through HTTP API.
version_added: "2.4"
author: "Loic Blot (@nerzhul)"
options:
state:
description:
- Create or remove Rundeck project.
choices: ['present', 'absent']
default: 'present'
name:
description:
- Sets the project name.
required: True
url:
description:
- Sets the rundeck instance URL.
required: True
api_version:
description:
- Sets the API version used by module.
- API version must be at least 14.
default: 14
token:
description:
- Sets the token to authenticate against Rundeck API.
required: True
'''
EXAMPLES = '''
- name: Create a rundeck project
rundeck_project:
name: "Project_01"
api_version: 18
url: "https://rundeck.example.org"
token: "mytoken"
state: present
- name: Remove a rundeck project
rundeck_project:
name: "Project_02"
url: "https://rundeck.example.org"
token: "mytoken"
state: absent
'''
RETURN = '''
rundeck_response:
description: Rundeck response when a failure occurs
returned: failed
type: str
before:
description: dictionary containing project information before modification
returned: success
type: dict
after:
description: dictionary containing project information after modification
returned: success
type: dict
'''
# import module snippets
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.urls import fetch_url
import json
class RundeckProjectManager(object):
def __init__(self, module):
self.module = module
def handle_http_code_if_needed(self, infos):
if infos["status"] == 403:
self.module.fail_json(msg="Token not allowed. Please ensure token is allowed or has the correct "
"permissions.", rundeck_response=infos["body"])
elif infos["status"] >= 500:
self.module.fail_json(msg="Fatal Rundeck API error.", rundeck_response=infos["body"])
def request_rundeck_api(self, query, data=None, method="GET"):
resp, info = fetch_url(self.module,
"%s/api/%d/%s" % (self.module.params["url"], self.module.params["api_version"], query),
data=json.dumps(data),
method=method,
headers={
"Content-Type": "application/json",
"Accept": "application/json",
"X-Rundeck-Auth-Token": self.module.params["token"]
})
self.handle_http_code_if_needed(info)
if resp is not None:
resp = resp.read()
if resp != "":
try:
json_resp = json.loads(resp)
return json_resp, info
except ValueError as e:
self.module.fail_json(msg="Rundeck response was not a valid JSON. Exception was: %s. "
"Object was: %s" % (to_native(e), resp))
return resp, info
def get_project_facts(self):
resp, info = self.request_rundeck_api("project/%s" % self.module.params["name"])
return resp
def create_or_update_project(self):
facts = self.get_project_facts()
if facts is None:
# If in check mode don't create project, simulate a fake project creation
if self.module.check_mode:
self.module.exit_json(changed=True, before={}, after={"name": self.module.params["name"]})
resp, info = self.request_rundeck_api("projects", method="POST", data={
"name": self.module.params["name"],
"config": {}
})
if info["status"] == 201:
self.module.exit_json(changed=True, before={}, after=self.get_project_facts())
else:
self.module.fail_json(msg="Unhandled HTTP status %d, please report the bug" % info["status"],
before={}, after=self.get_project_facts())
else:
self.module.exit_json(changed=False, before=facts, after=facts)
def remove_project(self):
facts = self.get_project_facts()
if facts is None:
self.module.exit_json(changed=False, before={}, after={})
else:
# If not in check mode, remove the project
if not self.module.check_mode:
self.request_rundeck_api("project/%s" % self.module.params["name"], method="DELETE")
self.module.exit_json(changed=True, before=facts, after={})
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', choices=['present', 'absent'], default='present'),
name=dict(required=True, type='str'),
url=dict(required=True, type='str'),
api_version=dict(type='int', default=14),
token=dict(required=True, type='str', no_log=True),
),
supports_check_mode=True
)
if module.params["api_version"] < 14:
module.fail_json(msg="API version should be at least 14")
rundeck = RundeckProjectManager(module)
if module.params['state'] == 'present':
rundeck.create_or_update_project()
elif module.params['state'] == 'absent':
rundeck.remove_project()
if __name__ == '__main__':
main()
|
vinced/namecoin
|
refs/heads/master
|
client/DNS/Base.py
|
35
|
"""
$Id: Base.py,v 1.12.2.10 2008/08/01 03:58:03 customdesigned Exp $
This file is part of the pydns project.
Homepage: http://pydns.sourceforge.net
This code is covered by the standard Python License.
Base functionality. Request and Response classes, that sort of thing.
"""
import socket, string, types, time, select
import Type,Class,Opcode
import asyncore
#
# This random generator is used for transaction ids and port selection. This
# is important to prevent spurious results from lost packets, and malicious
# cache poisoning. This doesn't matter if you are behind a caching nameserver
# or your app is a primary DNS server only. To install your own generator,
# replace DNS.Base.random. SystemRandom uses /dev/urandom or similar source.
#
try:
from random import SystemRandom
random = SystemRandom()
except:
import random
class DNSError(Exception): pass
# Lib uses DNSError, so import after defining.
import Lib
defaults= { 'protocol':'udp', 'port':53, 'opcode':Opcode.QUERY,
'qtype':Type.A, 'rd':1, 'timing':1, 'timeout': 30 }
defaults['server']=[]
def ParseResolvConf(resolv_path="/etc/resolv.conf"):
"parses the /etc/resolv.conf file and sets defaults for name servers"
global defaults
lines=open(resolv_path).readlines()
for line in lines:
line = string.strip(line)
if not line or line[0]==';' or line[0]=='#':
continue
fields=string.split(line)
if len(fields) < 2:
continue
if fields[0]=='domain' and len(fields) > 1:
defaults['domain']=fields[1]
if fields[0]=='search':
pass
if fields[0]=='options':
pass
if fields[0]=='sortlist':
pass
if fields[0]=='nameserver':
defaults['server'].append(fields[1])
def DiscoverNameServers():
import sys
if sys.platform in ('win32', 'nt'):
import win32dns
defaults['server']=win32dns.RegistryResolve()
else:
return ParseResolvConf()
class DnsRequest:
""" high level Request object """
def __init__(self,*name,**args):
self.donefunc=None
self.async=None
self.defaults = {}
self.argparse(name,args)
self.defaults = self.args
self.tid = 0
def argparse(self,name,args):
if not name and self.defaults.has_key('name'):
args['name'] = self.defaults['name']
if type(name) is types.StringType:
args['name']=name
else:
if len(name) == 1:
if name[0]:
args['name']=name[0]
for i in defaults.keys():
if not args.has_key(i):
if self.defaults.has_key(i):
args[i]=self.defaults[i]
else:
args[i]=defaults[i]
if type(args['server']) == types.StringType:
args['server'] = [args['server']]
self.args=args
def socketInit(self,a,b):
self.s = socket.socket(a,b)
def processUDPReply(self):
if self.timeout > 0:
r,w,e = select.select([self.s],[],[],self.timeout)
if not len(r):
raise DNSError, 'Timeout'
(self.reply, self.from_address) = self.s.recvfrom(65535)
self.time_finish=time.time()
self.args['server']=self.ns
return self.processReply()
def processTCPReply(self):
if self.timeout > 0:
r,w,e = select.select([self.s],[],[],self.timeout)
if not len(r):
raise DNSError, 'Timeout'
#f = self.s.makefile('r')
#header = f.read(2)
header = self.s.recv(2)
if len(header) < 2:
raise DNSError,'EOF'
count = Lib.unpack16bit(header)
#self.reply = f.read(count)
self.reply = self.s.recv(count)
if len(self.reply) != count:
# FIXME: Since we are non-blocking, it could just be a large reply
# that we need to loop and wait for.
raise DNSError,'incomplete reply'
self.time_finish=time.time()
self.args['server']=self.ns
return self.processReply()
def processReply(self):
self.args['elapsed']=(self.time_finish-self.time_start)*1000
u = Lib.Munpacker(self.reply)
r=Lib.DnsResult(u,self.args)
r.args=self.args
#self.args=None # mark this DnsRequest object as used.
return r
#### TODO TODO TODO ####
# if protocol == 'tcp' and qtype == Type.AXFR:
# while 1:
# header = f.read(2)
# if len(header) < 2:
# print '========== EOF =========='
# break
# count = Lib.unpack16bit(header)
# if not count:
# print '========== ZERO COUNT =========='
# break
# print '========== NEXT =========='
# reply = f.read(count)
# if len(reply) != count:
# print '*** Incomplete reply ***'
# break
# u = Lib.Munpacker(reply)
# Lib.dumpM(u)
def getSource(self):
"Pick random source port to avoid DNS cache poisoning attack."
while True:
try:
source_port = random.randint(1024,65535)
self.s.bind(('', source_port))
break
except socket.error, msg:
# Error 98, 'Address already in use'
if msg[0] != 98: raise
def conn(self):
self.getSource()
self.s.connect((self.ns,self.port))
def req(self,*name,**args):
" needs a refactoring "
self.argparse(name,args)
#if not self.args:
# raise DNSError,'reinitialize request before reuse'
protocol = self.args['protocol']
self.port = self.args['port']
self.tid = random.randint(0,65535)
self.timeout = self.args['timeout'];
opcode = self.args['opcode']
rd = self.args['rd']
server=self.args['server']
if type(self.args['qtype']) == types.StringType:
try:
qtype = getattr(Type, string.upper(self.args['qtype']))
except AttributeError:
raise DNSError,'unknown query type'
else:
qtype=self.args['qtype']
if not self.args.has_key('name'):
print self.args
raise DNSError,'nothing to lookup'
qname = self.args['name']
if qtype == Type.AXFR:
print 'Query type AXFR, protocol forced to TCP'
protocol = 'tcp'
#print 'QTYPE %d(%s)' % (qtype, Type.typestr(qtype))
m = Lib.Mpacker()
# jesus. keywords and default args would be good. TODO.
m.addHeader(self.tid,
0, opcode, 0, 0, rd, 0, 0, 0,
1, 0, 0, 0)
m.addQuestion(qname, qtype, Class.IN)
self.request = m.getbuf()
try:
if protocol == 'udp':
self.sendUDPRequest(server)
else:
self.sendTCPRequest(server)
except socket.error, reason:
raise DNSError, reason
if self.async:
return None
else:
if not self.response:
raise DNSError,'no working nameservers found'
return self.response
def sendUDPRequest(self, server):
"refactor me"
self.response=None
for self.ns in server:
#print "trying udp",self.ns
try:
if self.ns.count(':'):
if hasattr(socket,'has_ipv6') and socket.has_ipv6:
self.socketInit(socket.AF_INET6, socket.SOCK_DGRAM)
else: continue
else:
self.socketInit(socket.AF_INET, socket.SOCK_DGRAM)
try:
# TODO. Handle timeouts &c correctly (RFC)
self.time_start=time.time()
self.conn()
if not self.async:
self.s.send(self.request)
r=self.processUDPReply()
# Since we bind to the source port and connect to the
# destination port, we don't need to check that here,
# but do make sure it's actually a DNS request that the
# packet is in reply to.
while r.header['id'] != self.tid \
or self.from_address[1] != self.port:
r=self.processUDPReply()
self.response = r
# FIXME: check waiting async queries
finally:
if not self.async:
self.s.close()
except socket.error:
continue
break
def sendTCPRequest(self, server):
" do the work of sending a TCP request "
self.response=None
for self.ns in server:
#print "trying tcp",self.ns
try:
if self.ns.count(':'):
if hasattr(socket,'has_ipv6') and socket.has_ipv6:
self.socketInit(socket.AF_INET6, socket.SOCK_STREAM)
else: continue
else:
self.socketInit(socket.AF_INET, socket.SOCK_STREAM)
try:
# TODO. Handle timeouts &c correctly (RFC)
self.time_start=time.time()
self.conn()
buf = Lib.pack16bit(len(self.request))+self.request
# Keep server from making sendall hang
self.s.setblocking(1)
# FIXME: throws WOULDBLOCK if request too large to fit in
# system buffer
self.s.sendall(buf)
#self.s.shutdown(socket.SHUT_WR)
r=self.processTCPReply()
if r.header['id'] == self.tid:
self.response = r
break
finally:
self.s.close()
except socket.error:
continue
#class DnsAsyncRequest(DnsRequest):
class DnsAsyncRequest(DnsRequest,asyncore.dispatcher_with_send):
" an asynchronous request object. out of date, probably broken "
def __init__(self,*name,**args):
DnsRequest.__init__(self, *name, **args)
# XXX todo
if args.has_key('done') and args['done']:
self.donefunc=args['done']
else:
self.donefunc=self.showResult
#self.realinit(name,args) # XXX todo
self.async=1
def conn(self):
self.getSource()
self.connect((self.ns,self.port))
self.time_start=time.time()
if self.args.has_key('start') and self.args['start']:
asyncore.dispatcher.go(self)
def socketInit(self,a,b):
self.create_socket(a,b)
asyncore.dispatcher.__init__(self)
self.s=self
def handle_read(self):
if self.args['protocol'] == 'udp':
self.response=self.processUDPReply()
if self.donefunc:
apply(self.donefunc,(self,))
def handle_connect(self):
self.send(self.request)
def handle_write(self):
pass
def showResult(self,*s):
self.response.show()
#
# $Log: Base.py,v $
# Revision 1.12.2.10 2008/08/01 03:58:03 customdesigned
# Don't try to close socket when never opened.
#
# Revision 1.12.2.9 2008/08/01 03:48:31 customdesigned
# Fix more breakage from port randomization patch. Support Ipv6 queries.
#
# Revision 1.12.2.8 2008/07/31 18:22:59 customdesigned
# Wait until tcp response at least starts coming in.
#
# Revision 1.12.2.7 2008/07/28 01:27:00 customdesigned
# Check configured port.
#
# Revision 1.12.2.6 2008/07/28 00:17:10 customdesigned
# Randomize source ports.
#
# Revision 1.12.2.5 2008/07/24 20:10:55 customdesigned
# Randomize tid in requests, and check in response.
#
# Revision 1.12.2.4 2007/05/22 20:28:31 customdesigned
# Missing import Lib
#
# Revision 1.12.2.3 2007/05/22 20:25:52 customdesigned
# Use socket.inetntoa,inetaton.
#
# Revision 1.12.2.2 2007/05/22 20:21:46 customdesigned
# Trap socket error
#
# Revision 1.12.2.1 2007/05/22 20:19:35 customdesigned
# Skip bogus but non-empty lines in resolv.conf
#
# Revision 1.12 2002/04/23 06:04:27 anthonybaxter
# attempt to refactor the DNSRequest.req method a little. after doing a bit
# of this, I've decided to bite the bullet and just rewrite the puppy. will
# be checkin in some design notes, then unit tests and then writing the sod.
#
# Revision 1.11 2002/03/19 13:05:02 anthonybaxter
# converted to class based exceptions (there goes the python1.4 compatibility :)
#
# removed a quite gross use of 'eval()'.
#
# Revision 1.10 2002/03/19 12:41:33 anthonybaxter
# tabnannied and reindented everything. 4 space indent, no tabs.
# yay.
#
# Revision 1.9 2002/03/19 12:26:13 anthonybaxter
# death to leading tabs.
#
# Revision 1.8 2002/03/19 10:30:33 anthonybaxter
# first round of major bits and pieces. The major stuff here (summarised
# from my local, off-net CVS server :/ this will cause some oddities with
# the
#
# tests/testPackers.py:
# a large slab of unit tests for the packer and unpacker code in DNS.Lib
#
# DNS/Lib.py:
# placeholder for addSRV.
# added 'klass' to addA, make it the same as the other A* records.
# made addTXT check for being passed a string, turn it into a length 1 list.
# explicitly check for adding a string of length > 255 (prohibited).
# a bunch of cleanups from a first pass with pychecker
# new code for pack/unpack. the bitwise stuff uses struct, for a smallish
# (disappointly small, actually) improvement, while addr2bin is much
# much faster now.
#
# DNS/Base.py:
# added DiscoverNameServers. This automatically does the right thing
# on unix/ win32. No idea how MacOS handles this. *sigh*
# Incompatible change: Don't use ParseResolvConf on non-unix, use this
# function, instead!
# a bunch of cleanups from a first pass with pychecker
#
# Revision 1.5 2001/08/09 09:22:28 anthonybaxter
# added what I hope is win32 resolver lookup support. I'll need to try
# and figure out how to get the CVS checkout onto my windows machine to
# make sure it works (wow, doing something other than games on the
# windows machine :)
#
# Code from Wolfgang.Strobl@gmd.de
# win32dns.py from
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66260
#
# Really, ParseResolvConf() should be renamed "FindNameServers" or
# some such.
#
# Revision 1.4 2001/08/09 09:08:55 anthonybaxter
# added identifying header to top of each file
#
# Revision 1.3 2001/07/19 07:20:12 anthony
# Handle blank resolv.conf lines.
# Patch from Bastian Kleineidam
#
# Revision 1.2 2001/07/19 06:57:07 anthony
# cvs keywords added
#
#
|
ran5515/DeepDecision
|
refs/heads/master
|
tensorflow/contrib/slim/python/slim/nets/overfeat.py
|
164
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the model definition for the OverFeat network.
The definition for the network was obtained from:
OverFeat: Integrated Recognition, Localization and Detection using
Convolutional Networks
Pierre Sermanet, David Eigen, Xiang Zhang, Michael Mathieu, Rob Fergus and
Yann LeCun, 2014
http://arxiv.org/abs/1312.6229
Usage:
with slim.arg_scope(overfeat.overfeat_arg_scope()):
outputs, end_points = overfeat.overfeat(inputs)
@@overfeat
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import layers
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.layers.python.layers import layers as layers_lib
from tensorflow.contrib.layers.python.layers import regularizers
from tensorflow.contrib.layers.python.layers import utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
trunc_normal = lambda stddev: init_ops.truncated_normal_initializer(0.0, stddev)
def overfeat_arg_scope(weight_decay=0.0005):
with arg_scope(
[layers.conv2d, layers_lib.fully_connected],
activation_fn=nn_ops.relu,
weights_regularizer=regularizers.l2_regularizer(weight_decay),
biases_initializer=init_ops.zeros_initializer()):
with arg_scope([layers.conv2d], padding='SAME'):
with arg_scope([layers_lib.max_pool2d], padding='VALID') as arg_sc:
return arg_sc
def overfeat(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.5,
spatial_squeeze=True,
scope='overfeat'):
"""Contains the model definition for the OverFeat network.
The definition for the network was obtained from:
OverFeat: Integrated Recognition, Localization and Detection using
Convolutional Networks
Pierre Sermanet, David Eigen, Xiang Zhang, Michael Mathieu, Rob Fergus and
Yann LeCun, 2014
http://arxiv.org/abs/1312.6229
Note: All the fully_connected layers have been transformed to conv2d layers.
To use in classification mode, resize input to 231x231. To use in fully
convolutional mode, set spatial_squeeze to false.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes.
is_training: whether or not the model is being trained.
dropout_keep_prob: the probability that activations are kept in the dropout
layers during training.
spatial_squeeze: whether or not should squeeze the spatial dimensions of the
outputs. Useful to remove unnecessary dimensions for classification.
scope: Optional scope for the variables.
Returns:
the last op containing the log predictions and end_points dict.
"""
with variable_scope.variable_scope(scope, 'overfeat', [inputs]) as sc:
end_points_collection = sc.name + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d
with arg_scope(
[layers.conv2d, layers_lib.fully_connected, layers_lib.max_pool2d],
outputs_collections=end_points_collection):
net = layers.conv2d(
inputs, 64, [11, 11], 4, padding='VALID', scope='conv1')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool1')
net = layers.conv2d(net, 256, [5, 5], padding='VALID', scope='conv2')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool2')
net = layers.conv2d(net, 512, [3, 3], scope='conv3')
net = layers.conv2d(net, 1024, [3, 3], scope='conv4')
net = layers.conv2d(net, 1024, [3, 3], scope='conv5')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool5')
with arg_scope(
[layers.conv2d],
weights_initializer=trunc_normal(0.005),
biases_initializer=init_ops.constant_initializer(0.1)):
# Use conv2d instead of fully_connected layers.
net = layers.conv2d(net, 3072, [6, 6], padding='VALID', scope='fc6')
net = layers_lib.dropout(
net, dropout_keep_prob, is_training=is_training, scope='dropout6')
net = layers.conv2d(net, 4096, [1, 1], scope='fc7')
net = layers_lib.dropout(
net, dropout_keep_prob, is_training=is_training, scope='dropout7')
net = layers.conv2d(
net,
num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
biases_initializer=init_ops.zeros_initializer(),
scope='fc8')
# Convert end_points_collection into a end_point dict.
end_points = utils.convert_collection_to_dict(end_points_collection)
if spatial_squeeze:
net = array_ops.squeeze(net, [1, 2], name='fc8/squeezed')
end_points[sc.name + '/fc8'] = net
return net, end_points
|
ThreatCentral/blackberries
|
refs/heads/master
|
src/ThreatCentral/transforms/IncidentToThreatCentral.py
|
1
|
#!/usr/bin/env python
# (c) Copyright [2016] Hewlett Packard Enterprise Development LP Licensed under
# the Apache License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of the License
# at Unless required by applicable
# law or agreed to in writing, software distributed under the License is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
from canari.maltego.utils import debug
from canari.framework import configure
from canari.maltego.entities import Phrase
from canari.maltego.message import Label, UIMessage
from common.entities import Case, Incident, Indicator, Actor, CoursesOfAction
from common.client import search_incident, get_incident, encode_to_utf8, lower, ThreatCentralError
__author__ = 'Bart Otten'
__copyright__ = '(c) Copyright [2016] Hewlett Packard Enterprise Development LP'
__credits__ = []
__license__ = 'Apache 2.0'
__version__ = '1'
__maintainer__ = 'Bart Otten'
__email__ = 'tc-support@hpe.com'
__status__ = 'Development'
__all__ = [
'dotransform'
]
@configure(
label='Search Incident in Threat Central',
description='Searches Incident in Threat Central',
uuids=['threatcentral.v2.IncidentToThreatCentral'],
inputs=[('Threat Central', Incident)],
debug=False,
remote=False
)
def dotransform(request, response, config):
try:
incident = get_incident(request.fields['ThreatCentral.resourceId'])
except ThreatCentralError as err:
response += UIMessage(err.value, type='PartialError')
return response
except KeyError:
try:
incidents = search_incident(request.value)
except ThreatCentralError as err:
response += UIMessage(err.value, type='PartialError')
return response
else:
if incidents:
try:
for incident in incidents:
if incident.get('tcScore'):
weight = int(incident.get('tcScore'))
else:
weight = 1
incident = incident.get('resource')
if incident:
e = Incident(encode_to_utf8(incident.get('title')), weight=weight)
e.title = encode_to_utf8(incident.get('title'))
e.resourceId = incident.get('resourceId')
e.reportedOn = incident.get('reportedOn')
e += Label('Reported On', incident.get('reportedOn'))
if len(incident.get('incidentCategory', list())) is not 0:
e += Label('Incident Category', '<br/>'.join([encode_to_utf8(_.get('displayName'))
for _ in incident.get('incidentCategory',
list())]))
if len(incident.get('affectedAsset', list())) is not 0:
e += Label('Affected Asset', '<br/>'.join([encode_to_utf8(_.get('displayName'))
for _ in incident.get('affectedAsset', list())]))
if len(incident.get('incidentEffect', list())) is not 0:
e += Label('Incident Effect', '<br/>'.join([encode_to_utf8(_.get('displayName'))
for _ in incident.get('incidentEffect', list())]))
if len(incident.get('discoveryMethod', list())) is not 0:
e += Label('Discovery Method', '<br/>'.join([encode_to_utf8(_.get('displayName'))
for _ in incident.get('discoveryMethod', list())]))
if incident.get('description'):
e += Label('Description', '<br/>'.join(encode_to_utf8(incident.get('description')
).split('\n')))
response += e
except AttributeError as err:
response += UIMessage('Error: {}'.format(err), type='PartialError')
except ThreatCentralError as err:
response += UIMessage(err.value, type='PartialError')
except TypeError:
return response
else:
if incident:
try:
if incident.get('tcScore'):
weight = int(incident.get('tcScore'))
else:
weight = 1
# Update Indicator entity
e = Incident(request.value, weight=weight)
e.title = encode_to_utf8(incident.get('title'))
e.resourceId = incident.get('resourceId')
e.reportedOn = incident.get('reportedOn')
e += Label('Reported On', incident.get('reportedOn'))
if len(incident.get('incidentCategory', list())) is not 0:
e += Label('Incident Category', '<br/>'.join([encode_to_utf8(_.get('displayName'))
for _ in incident.get('incidentCategory', list())]))
if len(incident.get('affectedAsset', list())) is not 0:
e += Label('Affected Asset', '<br/>'.join([encode_to_utf8(_.get('displayName'))
for _ in incident.get('affectedAsset', list())]))
if len(incident.get('incidentEffect', list())) is not 0:
e += Label('Incident Effect', '<br/>'.join([encode_to_utf8(_.get('displayName'))
for _ in incident.get('incidentEffect', list())]))
if len(incident.get('discoveryMethod', list())) is not 0:
e += Label('Discovery Method', '<br/>'.join([encode_to_utf8(_.get('displayName'))
for _ in incident.get('discoveryMethod', list())]))
if incident.get('description'):
e += Label('Description', '<br/>'.join(encode_to_utf8(incident.get('description')
).split('\n')))
response += e
except AttributeError as err:
response += UIMessage('Error: {}'.format(err), type='PartialError')
except ThreatCentralError as err:
response += UIMessage(err.value, type='PartialError')
except TypeError:
return response
return response
|
tpodowd/boto
|
refs/heads/master
|
boto/ec2/instancestatus.py
|
181
|
# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class Details(dict):
"""
A dict object that contains name/value pairs which provide
more detailed information about the status of the system
or the instance.
"""
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'name':
self._name = value
elif name == 'status':
self[self._name] = value
else:
setattr(self, name, value)
class Event(object):
"""
A status event for an instance.
:ivar code: A string indicating the event type.
:ivar description: A string describing the reason for the event.
:ivar not_before: A datestring describing the earliest time for
the event.
:ivar not_after: A datestring describing the latest time for
the event.
"""
def __init__(self, code=None, description=None,
not_before=None, not_after=None):
self.code = code
self.description = description
self.not_before = not_before
self.not_after = not_after
def __repr__(self):
return 'Event:%s' % self.code
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'code':
self.code = value
elif name == 'description':
self.description = value
elif name == 'notBefore':
self.not_before = value
elif name == 'notAfter':
self.not_after = value
else:
setattr(self, name, value)
class Status(object):
"""
A generic Status object used for system status and instance status.
:ivar status: A string indicating overall status.
:ivar details: A dict containing name-value pairs which provide
more details about the current status.
"""
def __init__(self, status=None, details=None):
self.status = status
if not details:
details = Details()
self.details = details
def __repr__(self):
return 'Status:%s' % self.status
def startElement(self, name, attrs, connection):
if name == 'details':
return self.details
return None
def endElement(self, name, value, connection):
if name == 'status':
self.status = value
else:
setattr(self, name, value)
class EventSet(list):
def startElement(self, name, attrs, connection):
if name == 'item':
event = Event()
self.append(event)
return event
else:
return None
def endElement(self, name, value, connection):
setattr(self, name, value)
class InstanceStatus(object):
"""
Represents an EC2 Instance status as reported by
DescribeInstanceStatus request.
:ivar id: The instance identifier.
:ivar zone: The availability zone of the instance.
:ivar events: A list of events relevant to the instance.
:ivar state_code: An integer representing the current state
of the instance.
:ivar state_name: A string describing the current state
of the instance.
:ivar system_status: A Status object that reports impaired
functionality that stems from issues related to the systems
that support an instance, such as such as hardware failures
and network connectivity problems.
:ivar instance_status: A Status object that reports impaired
functionality that arises from problems internal to the instance.
"""
def __init__(self, id=None, zone=None, events=None,
state_code=None, state_name=None):
self.id = id
self.zone = zone
self.events = events
self.state_code = state_code
self.state_name = state_name
self.system_status = Status()
self.instance_status = Status()
def __repr__(self):
return 'InstanceStatus:%s' % self.id
def startElement(self, name, attrs, connection):
if name == 'eventsSet':
self.events = EventSet()
return self.events
elif name == 'systemStatus':
return self.system_status
elif name == 'instanceStatus':
return self.instance_status
else:
return None
def endElement(self, name, value, connection):
if name == 'instanceId':
self.id = value
elif name == 'availabilityZone':
self.zone = value
elif name == 'code':
self.state_code = int(value)
elif name == 'name':
self.state_name = value
else:
setattr(self, name, value)
class InstanceStatusSet(list):
"""
A list object that contains the results of a call to
DescribeInstanceStatus request. Each element of the
list will be an InstanceStatus object.
:ivar next_token: If the response was truncated by
the EC2 service, the next_token attribute of the
object will contain the string that needs to be
passed in to the next request to retrieve the next
set of results.
"""
def __init__(self, connection=None):
list.__init__(self)
self.connection = connection
self.next_token = None
def startElement(self, name, attrs, connection):
if name == 'item':
status = InstanceStatus()
self.append(status)
return status
else:
return None
def endElement(self, name, value, connection):
if name == 'nextToken':
self.next_token = value
setattr(self, name, value)
|
Accelerite/cinder
|
refs/heads/master
|
cinder/volume/drivers/huawei/__init__.py
|
3
|
# Copyright (c) 2013 - 2014 Huawei Technologies Co., Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Provide a unified driver class for users.
The product type and the protocol should be specified in config file before.
"""
from oslo_config import cfg
import six
from cinder import exception
from cinder.i18n import _, _LI, _LW
from cinder.openstack.common import log as logging
from cinder.volume.drivers.huawei import huawei_18000
from cinder.volume.drivers.huawei import huawei_dorado
from cinder.volume.drivers.huawei import huawei_t
from cinder.volume.drivers.huawei import huawei_utils
LOG = logging.getLogger(__name__)
huawei_opt = [
cfg.StrOpt('cinder_huawei_conf_file',
default='/etc/cinder/cinder_huawei_conf.xml',
help='The configuration file for the Cinder Huawei '
'driver')]
CONF = cfg.CONF
CONF.register_opts(huawei_opt)
MAPPING = {'HVS': '18000'}
class HuaweiVolumeDriver(object):
"""Define an unified driver for Huawei drivers."""
def __init__(self, *args, **kwargs):
super(HuaweiVolumeDriver, self).__init__()
self._product = {'T': huawei_t, 'Dorado': huawei_dorado,
'18000': huawei_18000, 'HVS': huawei_18000}
self._protocol = {'iSCSI': 'ISCSIDriver', 'FC': 'FCDriver'}
self.driver = self._instantiate_driver(*args, **kwargs)
def _instantiate_driver(self, *args, **kwargs):
"""Instantiate the specified driver."""
self.configuration = kwargs.get('configuration', None)
if not self.configuration:
msg = (_('_instantiate_driver: configuration not found.'))
raise exception.InvalidInput(reason=msg)
self.configuration.append_config_values(huawei_opt)
conf_file = self.configuration.cinder_huawei_conf_file
(product, protocol) = self._get_conf_info(conf_file)
LOG.info(_LI(
'_instantiate_driver: Loading %(protocol)s driver for '
'Huawei OceanStor %(product)s series storage arrays.')
% {'protocol': protocol,
'product': product})
#Map HVS to 18000
if product in MAPPING:
LOG.warn(_LW("Product name %s is deprecated, update your "
"configuration to the new product name."), product)
product = MAPPING[product]
driver_module = self._product[product]
driver_class = 'Huawei' + product + self._protocol[protocol]
driver_class = getattr(driver_module, driver_class)
return driver_class(*args, **kwargs)
def _get_conf_info(self, filename):
"""Get product type and connection protocol from config file."""
root = huawei_utils.parse_xml_file(filename)
product = root.findtext('Storage/Product').strip()
protocol = root.findtext('Storage/Protocol').strip()
if (product in self._product.keys() and
protocol in self._protocol.keys()):
return (product, protocol)
else:
msg = (_('"Product" or "Protocol" is illegal. "Product" should '
'be set to either T, Dorado or 18000. "Protocol" should '
'be set to either iSCSI or FC. Product: %(product)s '
'Protocol: %(protocol)s')
% {'product': six.text_type(product),
'protocol': six.text_type(protocol)})
raise exception.InvalidInput(reason=msg)
def __setattr__(self, name, value):
"""Set the attribute."""
if getattr(self, 'driver', None):
self.driver.__setattr__(name, value)
return
object.__setattr__(self, name, value)
def __getattr__(self, name):
""""Get the attribute."""
drver = object.__getattribute__(self, 'driver')
return getattr(drver, name)
|
virajprabhu/oppia
|
refs/heads/develop
|
core/domain/user_services_test.py
|
29
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'Stephanie Federwisch'
from core.domain import user_services
import test_utils
import utils
class UserServicesUnitTests(test_utils.GenericTestBase):
"""Test the user services methods."""
def test_set_and_get_username(self):
user_id = 'someUser'
username = 'username'
with self.assertRaisesRegexp(Exception, 'User not found.'):
user_services.set_username(user_id, username)
user_services._create_user(user_id, 'email@email.com')
user_services.set_username(user_id, username)
self.assertEquals(username, user_services.get_username(user_id))
def test_get_username_for_nonexistent_user(self):
with self.assertRaisesRegexp(Exception, 'User not found.'):
user_services.get_username('fakeUser')
def test_get_username_none(self):
user_services._create_user('fakeUser', 'email@email.com')
self.assertEquals(None, user_services.get_username('fakeUser'))
def test_is_username_taken_false(self):
self.assertFalse(user_services.is_username_taken('fakeUsername'))
def test_is_username_taken_true(self):
user_id = 'someUser'
username = 'newUsername'
user_services._create_user(user_id, 'email@email.com')
user_services.set_username(user_id, username)
self.assertTrue(user_services.is_username_taken(username))
def test_is_username_taken_different_case(self):
user_id = 'someUser'
username = 'camelCase'
user_services._create_user(user_id, 'email@email.com')
user_services.set_username(user_id, username)
self.assertTrue(user_services.is_username_taken('CaMeLcAsE'))
def test_set_invalid_usernames(self):
user_id = 'someUser'
user_services._create_user(user_id, 'email@email.com')
bad_usernames = [
' bob ', '@', '', 'a' * 100, 'ADMIN', 'admin', 'AdMiN2020']
for username in bad_usernames:
with self.assertRaises(utils.ValidationError):
user_services.set_username(user_id, username)
def test_invalid_emails(self):
bad_email_addresses = ['@', '@@', 'abc', '', None, ['a', '@', 'b.com']]
for email in bad_email_addresses:
with self.assertRaises(utils.ValidationError):
user_services.get_or_create_user('user_id', email)
def test_email_truncation(self):
email_addresses = [
('a@b.c', '..@b.c'),
('ab@c.d', 'a..@c.d'),
('abc@def.gh', 'a..@def.gh'),
('abcd@efg.h', 'a..@efg.h'),
('abcdefgh@efg.h', 'abcde..@efg.h'),
]
for ind, (actual_email, expected_email) in enumerate(email_addresses):
user_settings = user_services.get_or_create_user(
str(ind), actual_email)
self.assertEqual(user_settings.truncated_email, expected_email)
def test_get_email_from_username(self):
user_id = 'someUser'
username = 'username'
user_email = 'email@email.com'
user_services._create_user(user_id, user_email)
user_services.set_username(user_id, username)
self.assertEquals(user_services.get_username(user_id), username)
# Handle usernames that exist.
self.assertEquals(
user_services.get_email_from_username(username), user_email)
# Handle usernames in the same equivalence class correctly.
self.assertEquals(
user_services.get_email_from_username('USERNAME'), user_email)
# Return None for usernames which don't exist.
self.assertIsNone(
user_services.get_email_from_username('fakeUsername'))
def test_get_user_id_from_username(self):
user_id = 'someUser'
username = 'username'
user_email = 'email@email.com'
user_services._create_user(user_id, user_email)
user_services.set_username(user_id, username)
self.assertEquals(user_services.get_username(user_id), username)
# Handle usernames that exist.
self.assertEquals(
user_services.get_user_id_from_username(username), user_id)
# Handle usernames in the same equivalence class correctly.
self.assertEquals(
user_services.get_user_id_from_username('USERNAME'), user_id)
# Return None for usernames which don't exist.
self.assertIsNone(
user_services.get_user_id_from_username('fakeUsername'))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.