repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
cdDiaCo/myGarage
|
refs/heads/master
|
myGarageClient/views.py
|
1
|
from django.shortcuts import render
from myGarageClient.forms import CarForm, UserForm
from django.template import RequestContext
from django.shortcuts import render_to_response
from django.contrib.auth import authenticate, login
from django.http import HttpResponseRedirect, HttpResponse
from django.contrib.auth import logout
from django.contrib.auth.decorators import login_required
from django.http import JsonResponse
from django.conf import settings
import requests
# get the user's ip address
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
def first_page(request):
if request.user.is_authenticated():
return HttpResponseRedirect("/garage/")
context = RequestContext(request)
registered = False
carErrorList = {'manufacturer_name': "", 'model_name': ""}
userErrorList = {'username': "", 'password1': "", 'password2': ""}
recaptcha_validation_error = ""
recaptcha_validated = False
tempFields = {}
if request.method == 'POST':
if 'manufacturer_name' in request.POST:
#do register
# start recaptcha processing
response = {}
data = request.POST
captcha_rs = data.get('g-recaptcha-response')
url = "https://www.google.com/recaptcha/api/siteverify"
params = {
'secret': settings.RECAPTCHA_SECRET_KEY,
'response': captcha_rs,
'remoteip': get_client_ip(request)
}
verify_rs = requests.get(url, params=params, verify=True)
verify_rs = verify_rs.json()
response["status"] = verify_rs.get("success", False)
response['message'] = verify_rs.get('error-codes', None) or "Unspecified error."
print('status: ' + str(response["status"]))
print('message: ' + str(response["message"]))
if not response["status"]:
recaptcha_validation_error = "reCAPTCHA validation failed. Please try again."
else:
recaptcha_validated = True
# end recaptcha processing
user_form = UserForm(data=request.POST)
#profile_form = UserProfileForm(data=request.POST)
car_form = CarForm(data=request.POST)
tempFields['username'] = request.POST.get("username", "")
tempFields['password1'] = request.POST.get("password1", "")
tempFields['password2'] = request.POST.get("password2", "")
tempFields['carMake'] = request.POST.get("manufacturer_name", "")
tempFields['carModel'] = request.POST.get("model_name", "")
if user_form.is_valid() and car_form.is_valid() and recaptcha_validated:
user = user_form.save()
car = car_form.save(commit=False)
car.user = user
car.save()
#profile = profile_form.save(commit=False)
#profile.user = user
#profile.save()
registered = True
else:
if recaptcha_validated:
recaptcha_validation_error = "Don't forget to check the reCAPTCHA again."
print(user_form.errors, car_form.errors)
for key in car_form.getKeys():
if key in car_form.errors:
for error in car_form.errors[key]:
carErrorList[key] += error+" "
for key in user_form.getRegisterFormKeys():
if key in user_form.errors:
for error in user_form.errors[key]:
userErrorList[key] += error+" "
else:
# do login
username = request.POST['loginUsername']
password = request.POST['loginPassword']
user = authenticate(username=username, password=password)
if user:
if user.is_active:
login(request, user)
return HttpResponseRedirect("/garage/")
else:
return HttpResponse("Your account is disabled.")
else:
print("Invalid login details: {0}, {1}".format(username, password))
return HttpResponse("Invalid login details supplied.")
else:
user_form = UserForm()
car_form = CarForm()
#profile_form = UserProfileForm()
return render_to_response(
'index.html',
{'user_form': user_form, 'car_form': car_form,
'registered': registered, 'carErrorList': carErrorList,
'userErrorList': userErrorList, 'recaptchaValidationError': recaptcha_validation_error,
'tempFields': tempFields,}, context)
@login_required
def garageView(request, offset=False):
return render(request, 'garage.html', {'userID': request.user.id})
def getCars(request, offset=False):
if request.is_ajax():
message = "this is ajax"
else:
message = "this is not ajax"
response = {}
response['firstCar'] = {'carID': '1', 'registration_number': '1233', 'manufacturer_name': 'Dacia', 'model_name': 'Logan'}
response['secCar'] = {'carID': '2', 'registration_number': '444', 'manufacturer_name': 'Dacia', 'model_name': 'Duster'}
return JsonResponse(response)
# Use the login_required() decorator to ensure only those logged in can access the view.
@login_required
def user_logout(request):
# Since we know the user is logged in, we can now just log them out.
logout(request)
# Take the user back to the homepage.
return HttpResponseRedirect('/')
|
javier-ruiz-b/docker-rasppi-images
|
refs/heads/master
|
raspberry-google-home/env/lib/python3.7/site-packages/setuptools/_distutils/util.py
|
6
|
"""distutils.util
Miscellaneous utility functions -- anything that doesn't fit into
one of the other *util.py modules.
"""
import os
import re
import importlib.util
import string
import sys
from distutils.errors import DistutilsPlatformError
from distutils.dep_util import newer
from distutils.spawn import spawn
from distutils import log
from distutils.errors import DistutilsByteCompileError
from .py35compat import _optim_args_from_interpreter_flags
def get_host_platform():
"""Return a string that identifies the current platform. This is used mainly to
distinguish platform-specific build directories and platform-specific built
distributions. Typically includes the OS name and version and the
architecture (as supplied by 'os.uname()'), although the exact information
included depends on the OS; eg. on Linux, the kernel version isn't
particularly important.
Examples of returned values:
linux-i586
linux-alpha (?)
solaris-2.6-sun4u
Windows will return one of:
win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc)
win32 (all others - specifically, sys.platform is returned)
For other non-POSIX platforms, currently just returns 'sys.platform'.
"""
if os.name == 'nt':
if 'amd64' in sys.version.lower():
return 'win-amd64'
if '(arm)' in sys.version.lower():
return 'win-arm32'
if '(arm64)' in sys.version.lower():
return 'win-arm64'
return sys.platform
# Set for cross builds explicitly
if "_PYTHON_HOST_PLATFORM" in os.environ:
return os.environ["_PYTHON_HOST_PLATFORM"]
if os.name != "posix" or not hasattr(os, 'uname'):
# XXX what about the architecture? NT is Intel or Alpha,
# Mac OS is M68k or PPC, etc.
return sys.platform
# Try to distinguish various flavours of Unix
(osname, host, release, version, machine) = os.uname()
# Convert the OS name to lowercase, remove '/' characters, and translate
# spaces (for "Power Macintosh")
osname = osname.lower().replace('/', '')
machine = machine.replace(' ', '_')
machine = machine.replace('/', '-')
if osname[:5] == "linux":
# At least on Linux/Intel, 'machine' is the processor --
# i386, etc.
# XXX what about Alpha, SPARC, etc?
return "%s-%s" % (osname, machine)
elif osname[:5] == "sunos":
if release[0] >= "5": # SunOS 5 == Solaris 2
osname = "solaris"
release = "%d.%s" % (int(release[0]) - 3, release[2:])
# We can't use "platform.architecture()[0]" because a
# bootstrap problem. We use a dict to get an error
# if some suspicious happens.
bitness = {2147483647:"32bit", 9223372036854775807:"64bit"}
machine += ".%s" % bitness[sys.maxsize]
# fall through to standard osname-release-machine representation
elif osname[:3] == "aix":
from .py38compat import aix_platform
return aix_platform(osname, version, release)
elif osname[:6] == "cygwin":
osname = "cygwin"
rel_re = re.compile (r'[\d.]+', re.ASCII)
m = rel_re.match(release)
if m:
release = m.group()
elif osname[:6] == "darwin":
import _osx_support, distutils.sysconfig
osname, release, machine = _osx_support.get_platform_osx(
distutils.sysconfig.get_config_vars(),
osname, release, machine)
return "%s-%s-%s" % (osname, release, machine)
def get_platform():
if os.name == 'nt':
TARGET_TO_PLAT = {
'x86' : 'win32',
'x64' : 'win-amd64',
'arm' : 'win-arm32',
}
return TARGET_TO_PLAT.get(os.environ.get('VSCMD_ARG_TGT_ARCH')) or get_host_platform()
else:
return get_host_platform()
def convert_path (pathname):
"""Return 'pathname' as a name that will work on the native filesystem,
i.e. split it on '/' and put it back together again using the current
directory separator. Needed because filenames in the setup script are
always supplied in Unix style, and have to be converted to the local
convention before we can actually use them in the filesystem. Raises
ValueError on non-Unix-ish systems if 'pathname' either starts or
ends with a slash.
"""
if os.sep == '/':
return pathname
if not pathname:
return pathname
if pathname[0] == '/':
raise ValueError("path '%s' cannot be absolute" % pathname)
if pathname[-1] == '/':
raise ValueError("path '%s' cannot end with '/'" % pathname)
paths = pathname.split('/')
while '.' in paths:
paths.remove('.')
if not paths:
return os.curdir
return os.path.join(*paths)
# convert_path ()
def change_root (new_root, pathname):
"""Return 'pathname' with 'new_root' prepended. If 'pathname' is
relative, this is equivalent to "os.path.join(new_root,pathname)".
Otherwise, it requires making 'pathname' relative and then joining the
two, which is tricky on DOS/Windows and Mac OS.
"""
if os.name == 'posix':
if not os.path.isabs(pathname):
return os.path.join(new_root, pathname)
else:
return os.path.join(new_root, pathname[1:])
elif os.name == 'nt':
(drive, path) = os.path.splitdrive(pathname)
if path[0] == '\\':
path = path[1:]
return os.path.join(new_root, path)
else:
raise DistutilsPlatformError("nothing known about platform '%s'" % os.name)
_environ_checked = 0
def check_environ ():
"""Ensure that 'os.environ' has all the environment variables we
guarantee that users can use in config files, command-line options,
etc. Currently this includes:
HOME - user's home directory (Unix only)
PLAT - description of the current platform, including hardware
and OS (see 'get_platform()')
"""
global _environ_checked
if _environ_checked:
return
if os.name == 'posix' and 'HOME' not in os.environ:
try:
import pwd
os.environ['HOME'] = pwd.getpwuid(os.getuid())[5]
except (ImportError, KeyError):
# bpo-10496: if the current user identifier doesn't exist in the
# password database, do nothing
pass
if 'PLAT' not in os.environ:
os.environ['PLAT'] = get_platform()
_environ_checked = 1
def subst_vars (s, local_vars):
"""Perform shell/Perl-style variable substitution on 'string'. Every
occurrence of '$' followed by a name is considered a variable, and
variable is substituted by the value found in the 'local_vars'
dictionary, or in 'os.environ' if it's not in 'local_vars'.
'os.environ' is first checked/augmented to guarantee that it contains
certain values: see 'check_environ()'. Raise ValueError for any
variables not found in either 'local_vars' or 'os.environ'.
"""
check_environ()
def _subst (match, local_vars=local_vars):
var_name = match.group(1)
if var_name in local_vars:
return str(local_vars[var_name])
else:
return os.environ[var_name]
try:
return re.sub(r'\$([a-zA-Z_][a-zA-Z_0-9]*)', _subst, s)
except KeyError as var:
raise ValueError("invalid variable '$%s'" % var)
# subst_vars ()
def grok_environment_error (exc, prefix="error: "):
# Function kept for backward compatibility.
# Used to try clever things with EnvironmentErrors,
# but nowadays str(exception) produces good messages.
return prefix + str(exc)
# Needed by 'split_quoted()'
_wordchars_re = _squote_re = _dquote_re = None
def _init_regex():
global _wordchars_re, _squote_re, _dquote_re
_wordchars_re = re.compile(r'[^\\\'\"%s ]*' % string.whitespace)
_squote_re = re.compile(r"'(?:[^'\\]|\\.)*'")
_dquote_re = re.compile(r'"(?:[^"\\]|\\.)*"')
def split_quoted (s):
"""Split a string up according to Unix shell-like rules for quotes and
backslashes. In short: words are delimited by spaces, as long as those
spaces are not escaped by a backslash, or inside a quoted string.
Single and double quotes are equivalent, and the quote characters can
be backslash-escaped. The backslash is stripped from any two-character
escape sequence, leaving only the escaped character. The quote
characters are stripped from any quoted string. Returns a list of
words.
"""
# This is a nice algorithm for splitting up a single string, since it
# doesn't require character-by-character examination. It was a little
# bit of a brain-bender to get it working right, though...
if _wordchars_re is None: _init_regex()
s = s.strip()
words = []
pos = 0
while s:
m = _wordchars_re.match(s, pos)
end = m.end()
if end == len(s):
words.append(s[:end])
break
if s[end] in string.whitespace: # unescaped, unquoted whitespace: now
words.append(s[:end]) # we definitely have a word delimiter
s = s[end:].lstrip()
pos = 0
elif s[end] == '\\': # preserve whatever is being escaped;
# will become part of the current word
s = s[:end] + s[end+1:]
pos = end+1
else:
if s[end] == "'": # slurp singly-quoted string
m = _squote_re.match(s, end)
elif s[end] == '"': # slurp doubly-quoted string
m = _dquote_re.match(s, end)
else:
raise RuntimeError("this can't happen (bad char '%c')" % s[end])
if m is None:
raise ValueError("bad string (mismatched %s quotes?)" % s[end])
(beg, end) = m.span()
s = s[:beg] + s[beg+1:end-1] + s[end:]
pos = m.end() - 2
if pos >= len(s):
words.append(s)
break
return words
# split_quoted ()
def execute (func, args, msg=None, verbose=0, dry_run=0):
"""Perform some action that affects the outside world (eg. by
writing to the filesystem). Such actions are special because they
are disabled by the 'dry_run' flag. This method takes care of all
that bureaucracy for you; all you have to do is supply the
function to call and an argument tuple for it (to embody the
"external action" being performed), and an optional message to
print.
"""
if msg is None:
msg = "%s%r" % (func.__name__, args)
if msg[-2:] == ',)': # correct for singleton tuple
msg = msg[0:-2] + ')'
log.info(msg)
if not dry_run:
func(*args)
def strtobool (val):
"""Convert a string representation of truth to true (1) or false (0).
True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values
are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if
'val' is anything else.
"""
val = val.lower()
if val in ('y', 'yes', 't', 'true', 'on', '1'):
return 1
elif val in ('n', 'no', 'f', 'false', 'off', '0'):
return 0
else:
raise ValueError("invalid truth value %r" % (val,))
def byte_compile (py_files,
optimize=0, force=0,
prefix=None, base_dir=None,
verbose=1, dry_run=0,
direct=None):
"""Byte-compile a collection of Python source files to .pyc
files in a __pycache__ subdirectory. 'py_files' is a list
of files to compile; any files that don't end in ".py" are silently
skipped. 'optimize' must be one of the following:
0 - don't optimize
1 - normal optimization (like "python -O")
2 - extra optimization (like "python -OO")
If 'force' is true, all files are recompiled regardless of
timestamps.
The source filename encoded in each bytecode file defaults to the
filenames listed in 'py_files'; you can modify these with 'prefix' and
'basedir'. 'prefix' is a string that will be stripped off of each
source filename, and 'base_dir' is a directory name that will be
prepended (after 'prefix' is stripped). You can supply either or both
(or neither) of 'prefix' and 'base_dir', as you wish.
If 'dry_run' is true, doesn't actually do anything that would
affect the filesystem.
Byte-compilation is either done directly in this interpreter process
with the standard py_compile module, or indirectly by writing a
temporary script and executing it. Normally, you should let
'byte_compile()' figure out to use direct compilation or not (see
the source for details). The 'direct' flag is used by the script
generated in indirect mode; unless you know what you're doing, leave
it set to None.
"""
# Late import to fix a bootstrap issue: _posixsubprocess is built by
# setup.py, but setup.py uses distutils.
import subprocess
# nothing is done if sys.dont_write_bytecode is True
if sys.dont_write_bytecode:
raise DistutilsByteCompileError('byte-compiling is disabled.')
# First, if the caller didn't force us into direct or indirect mode,
# figure out which mode we should be in. We take a conservative
# approach: choose direct mode *only* if the current interpreter is
# in debug mode and optimize is 0. If we're not in debug mode (-O
# or -OO), we don't know which level of optimization this
# interpreter is running with, so we can't do direct
# byte-compilation and be certain that it's the right thing. Thus,
# always compile indirectly if the current interpreter is in either
# optimize mode, or if either optimization level was requested by
# the caller.
if direct is None:
direct = (__debug__ and optimize == 0)
# "Indirect" byte-compilation: write a temporary script and then
# run it with the appropriate flags.
if not direct:
try:
from tempfile import mkstemp
(script_fd, script_name) = mkstemp(".py")
except ImportError:
from tempfile import mktemp
(script_fd, script_name) = None, mktemp(".py")
log.info("writing byte-compilation script '%s'", script_name)
if not dry_run:
if script_fd is not None:
script = os.fdopen(script_fd, "w")
else:
script = open(script_name, "w")
with script:
script.write("""\
from distutils.util import byte_compile
files = [
""")
# XXX would be nice to write absolute filenames, just for
# safety's sake (script should be more robust in the face of
# chdir'ing before running it). But this requires abspath'ing
# 'prefix' as well, and that breaks the hack in build_lib's
# 'byte_compile()' method that carefully tacks on a trailing
# slash (os.sep really) to make sure the prefix here is "just
# right". This whole prefix business is rather delicate -- the
# problem is that it's really a directory, but I'm treating it
# as a dumb string, so trailing slashes and so forth matter.
#py_files = map(os.path.abspath, py_files)
#if prefix:
# prefix = os.path.abspath(prefix)
script.write(",\n".join(map(repr, py_files)) + "]\n")
script.write("""
byte_compile(files, optimize=%r, force=%r,
prefix=%r, base_dir=%r,
verbose=%r, dry_run=0,
direct=1)
""" % (optimize, force, prefix, base_dir, verbose))
cmd = [sys.executable]
cmd.extend(_optim_args_from_interpreter_flags())
cmd.append(script_name)
spawn(cmd, dry_run=dry_run)
execute(os.remove, (script_name,), "removing %s" % script_name,
dry_run=dry_run)
# "Direct" byte-compilation: use the py_compile module to compile
# right here, right now. Note that the script generated in indirect
# mode simply calls 'byte_compile()' in direct mode, a weird sort of
# cross-process recursion. Hey, it works!
else:
from py_compile import compile
for file in py_files:
if file[-3:] != ".py":
# This lets us be lazy and not filter filenames in
# the "install_lib" command.
continue
# Terminology from the py_compile module:
# cfile - byte-compiled file
# dfile - purported source filename (same as 'file' by default)
if optimize >= 0:
opt = '' if optimize == 0 else optimize
cfile = importlib.util.cache_from_source(
file, optimization=opt)
else:
cfile = importlib.util.cache_from_source(file)
dfile = file
if prefix:
if file[:len(prefix)] != prefix:
raise ValueError("invalid prefix: filename %r doesn't start with %r"
% (file, prefix))
dfile = dfile[len(prefix):]
if base_dir:
dfile = os.path.join(base_dir, dfile)
cfile_base = os.path.basename(cfile)
if direct:
if force or newer(file, cfile):
log.info("byte-compiling %s to %s", file, cfile_base)
if not dry_run:
compile(file, cfile, dfile)
else:
log.debug("skipping byte-compilation of %s to %s",
file, cfile_base)
# byte_compile ()
def rfc822_escape (header):
"""Return a version of the string escaped for inclusion in an
RFC-822 header, by ensuring there are 8 spaces space after each newline.
"""
lines = header.split('\n')
sep = '\n' + 8 * ' '
return sep.join(lines)
# 2to3 support
def run_2to3(files, fixer_names=None, options=None, explicit=None):
"""Invoke 2to3 on a list of Python files.
The files should all come from the build area, as the
modification is done in-place. To reduce the build time,
only files modified since the last invocation of this
function should be passed in the files argument."""
if not files:
return
# Make this class local, to delay import of 2to3
from lib2to3.refactor import RefactoringTool, get_fixers_from_package
class DistutilsRefactoringTool(RefactoringTool):
def log_error(self, msg, *args, **kw):
log.error(msg, *args)
def log_message(self, msg, *args):
log.info(msg, *args)
def log_debug(self, msg, *args):
log.debug(msg, *args)
if fixer_names is None:
fixer_names = get_fixers_from_package('lib2to3.fixes')
r = DistutilsRefactoringTool(fixer_names, options=options)
r.refactor(files, write=True)
def copydir_run_2to3(src, dest, template=None, fixer_names=None,
options=None, explicit=None):
"""Recursively copy a directory, only copying new and changed files,
running run_2to3 over all newly copied Python modules afterward.
If you give a template string, it's parsed like a MANIFEST.in.
"""
from distutils.dir_util import mkpath
from distutils.file_util import copy_file
from distutils.filelist import FileList
filelist = FileList()
curdir = os.getcwd()
os.chdir(src)
try:
filelist.findall()
finally:
os.chdir(curdir)
filelist.files[:] = filelist.allfiles
if template:
for line in template.splitlines():
line = line.strip()
if not line: continue
filelist.process_template_line(line)
copied = []
for filename in filelist.files:
outname = os.path.join(dest, filename)
mkpath(os.path.dirname(outname))
res = copy_file(os.path.join(src, filename), outname, update=1)
if res[1]: copied.append(outname)
run_2to3([fn for fn in copied if fn.lower().endswith('.py')],
fixer_names=fixer_names, options=options, explicit=explicit)
return copied
class Mixin2to3:
'''Mixin class for commands that run 2to3.
To configure 2to3, setup scripts may either change
the class variables, or inherit from individual commands
to override how 2to3 is invoked.'''
# provide list of fixers to run;
# defaults to all from lib2to3.fixers
fixer_names = None
# options dictionary
options = None
# list of fixers to invoke even though they are marked as explicit
explicit = None
def run_2to3(self, files):
return run_2to3(files, self.fixer_names, self.options, self.explicit)
|
weidnem/IntroPython2016
|
refs/heads/master
|
students/enrique_silva/session03/rot13.py
|
3
|
import string
alphabet=string.ascii_lowercase
rot_13_text="Zntargvp sebz bhgfvqr arne pbeare"
for i, letter in enumerate(rot_13_text):
letter_index=i
|
ltilve/ChromiumGStreamerBackend
|
refs/heads/master
|
third_party/closure_linter/closure_linter/full_test.py
|
84
|
#!/usr/bin/env python
#
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Full regression-type (Medium) tests for gjslint.
Tests every error that can be thrown by gjslint. Based heavily on
devtools/javascript/gpylint/full_test.py
"""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
import os
import sys
import unittest
import gflags as flags
import unittest as googletest
from closure_linter import error_check
from closure_linter import errors
from closure_linter import runner
from closure_linter.common import filetestcase
_RESOURCE_PREFIX = 'closure_linter/testdata'
flags.FLAGS.strict = True
flags.FLAGS.custom_jsdoc_tags = ('customtag', 'requires')
flags.FLAGS.closurized_namespaces = ('goog', 'dummy')
flags.FLAGS.limited_doc_files = ('externs.js', 'dummy.js',
'limited_doc_checks.js')
flags.FLAGS.jslint_error = error_check.Rule.ALL
# List of files under testdata to test.
# We need to list files explicitly since pyglib can't list directories.
# TODO(user): Figure out how to list the directory.
_TEST_FILES = [
'all_js_wrapped.js',
'blank_lines.js',
'ends_with_block.js',
'empty_file.js',
'externs.js',
'externs_jsdoc.js',
'goog_scope.js',
'html_parse_error.html',
'indentation.js',
'interface.js',
'jsdoc.js',
'limited_doc_checks.js',
'minimal.js',
'other.js',
'provide_blank.js',
'provide_extra.js',
'provide_missing.js',
'require_all_caps.js',
'require_blank.js',
'require_extra.js',
'require_function.js',
'require_function_missing.js',
'require_function_through_both.js',
'require_function_through_namespace.js',
'require_interface.js',
'require_interface_base.js',
'require_lower_case.js',
'require_missing.js',
'require_numeric.js',
'require_provide_blank.js',
'require_provide_missing.js',
'require_provide_ok.js',
'semicolon_missing.js',
'simple.html',
'spaces.js',
'tokenizer.js',
'unparseable.js',
'unused_local_variables.js',
'unused_private_members.js',
'utf8.html',
]
class GJsLintTestSuite(unittest.TestSuite):
"""Test suite to run a GJsLintTest for each of several files.
If sys.argv[1:] is non-empty, it is interpreted as a list of filenames in
testdata to test. Otherwise, _TEST_FILES is used.
"""
def __init__(self, tests=()):
unittest.TestSuite.__init__(self, tests)
argv = sys.argv and sys.argv[1:] or []
if argv:
test_files = argv
else:
test_files = _TEST_FILES
for test_file in test_files:
resource_path = os.path.join(_RESOURCE_PREFIX, test_file)
self.addTest(
filetestcase.AnnotatedFileTestCase(
resource_path,
runner.Run,
errors.ByName))
if __name__ == '__main__':
# Don't let main parse args; it happens in the TestSuite.
googletest.main(argv=sys.argv[0:1], defaultTest='GJsLintTestSuite')
|
Flowerfan524/TriClustering
|
refs/heads/master
|
reid/loss/tri_clu_loss.py
|
1
|
from __future__ import absolute_import
import torch
from torch import nn
from torch.autograd import Variable
import numpy as np
class TripletClusteringLoss(nn.Module):
def __init__(self, clusters, margin=0,):
super(TripletClusteringLoss, self).__init__()
assert isinstance(clusters, torch.autograd.Variable)
self.clusters = clusters
self.margin = margin
self.ranking_loss = nn.MarginRankingLoss(margin=margin)
self.num_classes = clusters.size(0)
self.num_features = clusters.size(1)
self.dist = torch.pow(self.clusters, 2).sum(dim=1, keepdim=True)
def forward(self, inputs, targets):
assert self.num_features == input.size(1)
n = inputs.size(0)
dist = self.dist.expand(self.num_classes, n)
dist += torch.pow(inputs, 2).sum(dim=1).t()
dist.addmm_(1, -2, self.clusters, inputs.t())
dist = dist.clamp(min=1e-12).sqrt() # for numerical stability
dist = dist.t()
# For each anchor, find the hardest positive and negative
mask = torch.zeros(n,self.num_classes,out=torch.ByteTensor())
target_ids = targets.data.numpy().astype(int)
mask[np.arange(n),target_ids] = 1
dist_ap = dist[mask == 1]
dist_an = dist[mask == 0].view(n, -1).min(dim=1)
# Compute ranking hinge loss
y = dist_an.data.new()
y.resize_as_(dist_an.data)
y.fill_(1)
y = Variable(y)
loss = self.ranking_loss(dist_an, dist_ap, y)
prec = (dist_an.data > dist_ap.data).sum() * 1. / y.size(0)
return loss, prec
def update_clusters(self,clusters):
self.clusters = clusters
|
vikasraunak/mptcp-1
|
refs/heads/development
|
src/network/bindings/callbacks_list.py
|
331
|
callback_classes = [
['bool', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::Address const&', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Socket>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Socket>', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Socket>', 'ns3::Address const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr<ns3::Socket>', 'ns3::Address const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::Address const&', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
]
|
resb53/witcher
|
refs/heads/master
|
maze/maze-output.py
|
1
|
#!/usr/local/bin/python3
import json
import sys
#prepare data structure
maze = { "n" : {
"aa":"rb", "ab":"", "ac":"", "ad":"", "ae":"st", "af":"", "ag":"", "ah":"", "ai":"", "aj":"st", "ak":"", "al":"", "am":"lb", "an":"rb", "ao":"", "ap":"", "aq":"st", "ar":"", "as":"", "at":"lb", "au":"rb", "av":"lb", "aw":"rb", "ax":"st", "ay":"lb", "az":"rb", "a1":"", "a2":"", "a3":"st", "a4":"lb",
"ba":"s", "bb":"rb", "bc":"", "bd":"", "be":"", "bf":"rb", "bg":"", "bh":"", "bi":"lb", "bj":"s", "bk":"rb", "bl":"", "bm":"lt", "bn":"rt", "bo":"", "bp":"", "bq":"s", "br":"", "bs":"", "bt":"", "bu":"s", "bv":"", "bw":"", "bx":"s", "by":"", "bz":"s", "b1":"rb", "b2":"", "b3":"s", "b4":"s",
"ca":"s", "cb":"s", "cc":"rb", "cd":"", "ce":"st", "cf":"", "cg":"", "ch":"", "ci":"s", "cj":"s", "ck":"s", "cl":"de", "cm":"s", "cn":"s", "co":"rb", "cp":"", "cq":"s", "cr":"rb", "cs":"st", "ct":"lb", "cu":"s", "cv":"rb", "cw":"", "cx":"x", "cy":"", "cz":"", "c1":"s", "c2":"", "c3":"", "c4":"s",
"da":"s", "db":"s", "dc":"", "dd":"", "de":"s", "df":"rb", "dg":"", "dh":"", "di":"", "dj":"s", "dk":"s", "dl":"", "dm":"", "dn":"s", "do":"rt", "dp":"", "dq":"", "dr":"s", "ds":"s", "dt":"s", "du":"s", "dv":"", "dw":"", "dx":"s", "dy":"or", "dz":"rb", "d1":"", "d2":"st", "d3":"lb", "d4":"s",
"ea":"s", "eb":"", "ec":"", "ed":"", "ee":"lt", "ef":"", "eg":"", "eh":"", "ei":"", "ej":"", "ek":"rt", "el":"", "em":"", "en":"x", "eo":"", "ep":"", "eq":"", "er":"", "es":"s", "et":"", "eu":"s", "ev":"rb", "ew":"", "ex":"", "ey":"", "ez":"", "e1":"", "e2":"s", "e3":"", "e4":"",
"fa":"rt", "fb":"", "fc":"st", "fd":"", "fe":"", "ff":"lb", "fg":"rb", "fh":"", "fi":"st", "fj":"lb", "fk":"s", "fl":"rb", "fm":"lb", "fn":"s", "fo":"rb", "fp":"lb", "fq":"rb", "fr":"", "fs":"", "ft":"", "fu":"lt", "fv":"", "fw":"", "fx":"lb", "fy":"rb", "fz":"", "f1":"", "f2":"x", "f3":"", "f4":"lb",
"ga":"s", "gb":"de", "gc":"s", "gd":"rb", "ge":"lb", "gf":"s", "gg":"s", "gh":"de", "gi":"s", "gj":"s", "gk":"s", "gl":"s", "gm":"s", "gn":"s", "go":"s", "gp":"s", "gq":"s", "gr":"rb", "gs":"", "gt":"lb", "gu":"", "gv":"", "gw":"", "gx":"", "gy":"", "gz":"", "g1":"", "g2":"s", "g3":"de", "g4":"s",
"ha":"s", "hb":"s", "hc":"s", "hd":"", "he":"s", "hf":"s", "hg":"", "hh":"", "hi":"s", "hj":"s", "hk":"s", "hl":"s", "hm":"s", "hn":"s", "ho":"s", "hp":"rt", "hq":"lt", "hr":"s", "hs":"de", "ht":"s", "hu":"rb", "hv":"", "hw":"", "hx":"lb", "hy":"rb", "hz":"", "h1":"lb", "h2":"s", "h3":"s", "h4":"s",
"ia":"s", "ib":"", "ic":"", "id":"", "ie":"", "if":"rt", "ig":"", "ih":"", "ii":"", "ij":"s", "ik":"s", "il":"", "im":"s", "in":"", "io":"", "ip":"s", "iq":"s", "ir":"", "is":"", "it":"s", "iu":"s", "iv":"rb", "iw":"", "ix":"s", "iy":"", "iz":"", "i1":"s", "i2":"s", "i3":"", "i4":"",
"ja":"", "jb":"", "jc":"", "jd":"", "je":"", "jf":"", "jg":"", "jh":"", "ji":"", "jj":"s", "jk":"", "jl":"", "jm":"", "jn":"", "jo":"st", "jp":"", "jq":"", "jr":"", "js":"", "jt":"", "ju":"s", "jv":"", "jw":"", "jx":"", "jy":"", "jz":"", "j1":"", "j2":"", "j3":"", "j4":"",
"ka":"rb", "kb":"lb", "kc":"rb", "kd":"", "ke":"", "kf":"", "kg":"rb", "kh":"", "ki":"", "kj":"lt", "kk":"rb", "kl":"st", "km":"", "kn":"lb", "ko":"s", "kp":"", "kq":"", "kr":"st", "ks":"", "kt":"lb", "ku":"s", "kv":"", "kw":"", "kx":"lb", "ky":"", "kz":"lb", "k1":"rb", "k2":"", "k3":"", "k4":"lb",
"la":"", "lb":"s", "lc":"", "ld":"", "le":"st", "lf":"st", "lg":"", "lh":"", "li":"", "lj":"s", "lk":"", "ll":"s", "lm":"", "ln":"", "lo":"rt", "lp":"", "lq":"", "lr":"", "ls":"de", "lt":"s", "lu":"rt", "lv":"", "lw":"lb", "lx":"", "ly":"", "lz":"lt", "l1":"s", "l2":"rb", "l3":"lb", "l4":"s",
"ma":"", "mb":"x", "mc":"", "md":"lb", "me":"s", "mf":"s", "mg":"rb", "mh":"", "mi":"", "mj":"lt", "mk":"rb", "ml":"", "mm":"", "mn":"", "mo":"lt", "mp":"rb", "mq":"st", "mr":"lb", "ms":"s", "mt":"s", "mu":"s", "mv":"rb", "mw":"", "mx":"st", "my":"lb", "mz":"s", "m1":"s", "m2":"", "m3":"s", "m4":"s",
"na":"rb", "nb":"lt", "nc":"de", "nd":"s", "ne":"s", "nf":"", "ng":"", "nh":"st", "ni":"lb", "nj":"s", "nk":"s", "nl":"rb", "nm":"", "nn":"", "no":"s", "np":"s", "nq":"s", "nr":"s", "ns":"", "nt":"lt", "nu":"s", "nv":"s", "nw":"", "nx":"", "ny":"s", "nz":"", "n1":"rt", "n2":"", "n3":"", "n4":"s",
"oa":"", "ob":"s", "oc":"", "od":"", "oe":"s", "of":"rb", "og":"lb", "oh":"s", "oi":"s", "oj":"s", "ok":"s", "ol":"", "om":"st", "on":"", "oo":"lt", "op":"s", "oq":"s", "or":"s", "os":"de", "ot":"s", "ou":"", "ov":"", "ow":"", "ox":"", "oy":"x", "oz":"", "o1":"", "o2":"rb", "o3":"", "o4":"",
"pa":"rb", "pb":"", "pc":"", "pd":"", "pe":"lt", "pf":"s", "pg":"s", "ph":"", "pi":"s", "pj":"s", "pk":"", "pl":"", "pm":"s", "pn":"rb", "po":"", "pp":"s", "pq":"s", "pr":"s", "ps":"s", "pt":"s", "pu":"rb", "pv":"", "pw":"", "px":"lb", "py":"s", "pz":"rb", "p1":"", "p2":"", "p3":"", "p4":"lb",
"qa":"s", "qb":"de", "qc":"rb", "qd":"", "qe":"", "qf":"s", "qg":"", "qh":"", "qi":"", "qj":"s", "qk":"", "ql":"", "qm":"lt", "qn":"", "qo":"", "qp":"", "qq":"s", "qr":"", "qs":"", "qt":"s", "qu":"s", "qv":"rb", "qw":"lb", "qx":"s", "qy":"s", "qz":"s", "q1":"rb", "q2":"", "q3":"lb", "q4":"s",
"ra":"s", "rb":"s", "rc":"", "rd":"", "re":"", "rf":"", "rg":"", "rh":"", "ri":"lb", "rj":"rt", "rk":"", "rl":"", "rm":"", "rn":"st", "ro":"", "rp":"", "rq":"", "rr":"st", "rs":"", "rt":"", "ru":"s", "rv":"s", "rw":"", "rx":"s", "ry":"s", "rz":"s", "r1":"s", "r2":"de", "r3":"", "r4":"s",
"sa":"s", "sb":"s", "sc":"", "sd":"", "se":"st", "sf":"lb", "sg":"rb", "sh":"", "si":"", "sj":"s", "sk":"rb", "sl":"", "sm":"", "sn":"s", "so":"", "sp":"", "sq":"lb", "sr":"s", "ss":"", "st":"lb", "su":"s", "sv":"", "sw":"", "sx":"", "sy":"s", "sz":"s", "s1":"", "s2":"", "s3":"", "s4":"",
"ta":"", "tb":"", "tc":"", "td":"", "te":"", "tf":"s", "tg":"", "th":"", "ti":"", "tj":"", "tk":"", "tl":"", "tm":"", "tn":"", "to":"", "tp":"", "tq":"", "tr":"", "ts":"", "tt":"", "tu":"", "tv":"", "tw":"", "tx":"", "ty":"", "tz":"", "t1":"", "t2":"", "t3":"", "t4":"lb",
"ua":"rb", "ub":"lb", "uc":"rb", "ud":"", "ue":"", "uf":"", "ug":"", "uh":"", "ui":"", "uj":"lb", "uk":"rb", "ul":"", "um":"", "un":"", "uo":"", "up":"", "uq":"", "ur":"", "us":"", "ut":"lb", "uu":"rb", "uv":"", "uw":"st", "ux":"", "uy":"", "uz":"", "u1":"lb", "u2":"rb", "u3":"", "u4":"s",
"va":"s", "vb":"s", "vc":"s", "vd":"rb", "ve":"", "vf":"st", "vg":"st", "vh":"", "vi":"st", "vj":"lt", "vk":"s", "vl":"rb", "vm":"", "vn":"st", "vo":"", "vp":"st", "vq":"", "vr":"", "vs":"lb", "vt":"s", "vu":"s", "vv":"de", "vw":"s", "vx":"rb", "vy":"", "vz":"lb", "v1":"", "v2":"", "v3":"rb", "v4":"lt",
"wa":"s", "wb":"s", "wc":"s", "wd":"", "we":"", "wf":"s", "wg":"s", "wh":"de", "wi":"s", "wj":"", "wk":"s", "wl":"", "wm":"", "wn":"s", "wo":"rb", "wp":"lt", "wq":"rb", "wr":"", "ws":"", "wt":"s", "wu":"", "wv":"", "ww":"s", "wx":"s", "wy":"de", "wz":"s", "w1":"", "w2":"", "w3":"lt", "w4":"s",
"xa":"", "xb":"", "xc":"x", "xd":"", "xe":"", "xf":"lt", "xg":"rt", "xh":"lt", "xi":"rt", "xj":"lb", "xk":"rt", "xl":"", "xm":"", "xn":"lt", "xo":"", "xp":"s", "xq":"", "xr":"", "xs":"lb", "xt":"s", "xu":"rb", "xv":"", "xw":"", "xx":"s", "xy":"", "xz":"", "x1":"", "x2":"lb", "x3":"s", "x4":"s",
"ya":"rb", "yb":"lb", "yc":"s", "yd":"rb", "ye":"lb", "yf":"s", "yg":"s", "yh":"s", "yi":"s", "yj":"s", "yk":"s", "yl":"rb", "ym":"", "yn":"", "yo":"", "yp":"", "yq":"", "yr":"lb", "ys":"", "yt":"lt", "yu":"", "yv":"", "yw":"lb", "yx":"rt", "yy":"", "yz":"st", "y1":"lb", "y2":"s", "y3":"", "y4":"lt",
"za":"s", "zb":"s", "zc":"s", "zd":"s", "ze":"s", "zf":"s", "zg":"s", "zh":"s", "zi":"", "zj":"s", "zk":"s", "zl":"", "zm":"", "zn":"", "zo":"", "zp":"", "zq":"", "zr":"", "zs":"lb", "zt":"s", "zu":"rb", "zv":"", "zw":"", "zx":"s", "zy":"", "zz":"", "z1":"s", "z2":"", "z3":"lb", "z4":"s",
"1a":"s", "1b":"", "1c":"s", "1d":"", "1e":"s", "1f":"rt", "1g":"", "1h":"s", "1i":"rb", "1j":"lt", "1k":"rt", "1l":"", "1m":"", "1n":"", "1o":"st", "1p":"", "1q":"", "1r":"lb", "1s":"s", "1t":"s", "1u":"s", "1v":"rb", "1w":"", "1x":"x", "1y":"", "1z":"", "11":"x", "12":"lb", "13":"s", "14":"s",
"2a":"rt", "2b":"", "2c":"", "2d":"", "2e":"x", "2f":"", "2g":"", "2h":"", "2i":"s", "2j":"s", "2k":"s", "2l":"rb", "2m":"", "2n":"lb", "2o":"s", "2p":"rb", "2q":"", "2r":"s", "2s":"s", "2t":"s", "2u":"s", "2v":"s", "2w":"de", "2x":"s", "2y":"rb", "2z":"", "21":"lt", "22":"s", "23":"", "24":"",
"3a":"s", "3b":"", "3c":"", "3d":"lb", "3e":"s", "3f":"rb", "3g":"", "3h":"", "3i":"", "3j":"", "3k":"s", "3l":"s", "3m":"", "3n":"", "3o":"s", "3p":"", "3q":"", "3r":"", "3s":"s", "3t":"s", "3u":"", "3v":"", "3w":"", "3x":"s", "3y":"", "3z":"", "31":"s", "32":"", "33":"", "34":"lb",
"4a":"", "4b":"", "4c":"", "4d":"", "4e":"", "4f":"", "4g":"", "4h":"", "4i":"", "4j":"", "4k":"", "4l":"", "4m":"", "4n":"", "4o":"", "4p":"", "4q":"", "4r":"", "4s":"", "4t":"", "4u":"", "4v":"", "4w":"", "4x":"", "4y":"", "4z":"", "41":"", "42":"", "43":"", "44":""
},
"e" : {
'aa':'', 'ab':'s', 'ac':'s', 'ad':'s', 'ae':'rt', 'af':'s', 'ag':'s', 'ah':'s', 'ai':'s', 'aj':'rt', 'ak':'s', 'al':'s', 'am':'rb', 'an':'', 'ao':'s', 'ap':'s', 'aq':'rt', 'ar':'s', 'as':'s', 'at':'rb', 'au':'', 'av':'rb', 'aw':'', 'ax':'rt', 'ay':'rb', 'az':'', 'a1':'lt', 'a2':'s', 'a3':'rt', 'a4':'rb',
'ba':'', 'bb':'', 'bc':'s', 'bd':'s', 'be':'lb', 'bf':'', 'bg':'s', 'bh':'s', 'bi':'rb', 'bj':'', 'bk':'', 'bl':'s', 'bm':'st', 'bn':'', 'bo':'s', 'bp':'de', 'bq':'', 'br':'', 'bs':'s', 'bt':'lb', 'bu':'', 'bv':'', 'bw':'lb', 'bx':'', 'by':'', 'bz':'', 'b1':'', 'b2':'de', 'b3':'', 'b4':'',
'ca':'', 'cb':'', 'cc':'', 'cd':'s', 'ce':'rt', 'cf':'lt', 'cg':'s', 'ch':'de', 'ci':'', 'cj':'', 'ck':'', 'cl':'', 'cm':'', 'cn':'', 'co':'', 'cp':'de', 'cq':'', 'cr':'', 'cs':'rt', 'ct':'rb', 'cu':'', 'cv':'', 'cw':'s', 'cx':'x', 'cy':'s', 'cz':'lb', 'c1':'', 'c2':'', 'c3':'lb', 'c4':'',
'da':'', 'db':'', 'dc':'', 'dd':'op', 'de':'', 'df':'', 'dg':'s', 'dh':'s', 'di':'lb', 'dj':'', 'dk':'', 'dl':'', 'dm':'lb', 'dn':'', 'do':'', 'dp':'s', 'dq':'lb', 'dr':'', 'ds':'', 'dt':'', 'du':'', 'dv':'', 'dw':'de', 'dx':'', 'dy':'', 'dz':'', 'd1':'lt', 'd2':'rt', 'd3':'rb', 'd4':'',
'ea':'', 'eb':'', 'ec':'s', 'ed':'s', 'ee':'st', 'ef':'', 'eg':'s', 'eh':'s', 'ei':'s', 'ej':'lb', 'ek':'', 'el':'s', 'em':'s', 'en':'x', 'eo':'lt', 'ep':'s', 'eq':'s', 'er':'lb', 'es':'', 'et':'', 'eu':'', 'ev':'', 'ew':'s', 'ex':'lt', 'ey':'lb', 'ez':'', 'e1':'de', 'e2':'', 'e3':'', 'e4':'lb',
'fa':'', 'fb':'s', 'fc':'rt', 'fd':'s', 'fe':'lt', 'ff':'rb', 'fg':'', 'fh':'s', 'fi':'rt', 'fj':'rb', 'fk':'', 'fl':'', 'fm':'rb', 'fn':'', 'fo':'', 'fp':'rb', 'fq':'', 'fr':'s', 'fs':'lt', 'ft':'s', 'fu':'st', 'fv':'', 'fw':'s', 'fx':'rb', 'fy':'', 'fz':'s', 'f1':'s', 'f2':'x', 'f3':'s', 'f4':'rb',
'ga':'', 'gb':'', 'gc':'', 'gd':'', 'ge':'rb', 'gf':'', 'gg':'', 'gh':'', 'gi':'', 'gj':'', 'gk':'', 'gl':'', 'gm':'', 'gn':'', 'go':'', 'gp':'', 'gq':'', 'gr':'', 'gs':'s', 'gt':'rb', 'gu':'', 'gv':'s', 'gw':'s', 'gx':'lb', 'gy':'', 'gz':'s', 'g1':'de', 'g2':'', 'g3':'', 'g4':'',
'ha':'', 'hb':'', 'hc':'', 'hd':'', 'he':'', 'hf':'', 'hg':'', 'hh':'lb', 'hi':'', 'hj':'', 'hk':'', 'hl':'', 'hm':'', 'hn':'', 'ho':'', 'hp':'', 'hq':'st', 'hr':'', 'hs':'', 'ht':'', 'hu':'', 'hv':'s', 'hw':'s', 'hx':'rb', 'hy':'', 'hz':'s', 'h1':'rb', 'h2':'', 'h3':'', 'h4':'',
'ia':'', 'ib':'', 'ic':'lt', 'id':'s', 'ie':'lb', 'if':'', 'ig':'s', 'ih':'s', 'ii':'lb', 'ij':'', 'ik':'', 'il':'', 'im':'', 'in':'', 'io':'lb', 'ip':'', 'iq':'', 'ir':'', 'is':'lb', 'it':'', 'iu':'', 'iv':'', 'iw':'de', 'ix':'', 'iy':'', 'iz':'de', 'i1':'', 'i2':'', 'i3':'', 'i4':'lb',
'ja':'', 'jb':'s', 'jc':'s', 'jd':'s', 'je':'s', 'jf':'lt', 'jg':'s', 'jh':'s', 'ji':'de', 'jj':'', 'jk':'', 'jl':'s', 'jm':'lt', 'jn':'s', 'jo':'rt', 'jp':'lb', 'jq':'', 'jr':'s', 'js':'s', 'jt':'lb', 'ju':'', 'jv':'', 'jw':'s', 'jx':'lt', 'jy':'s', 'jz':'s', 'j1':'lt', 'j2':'lt', 'j3':'s', 'j4':'de',
'ka':'', 'kb':'rb', 'kc':'', 'kd':'s', 'ke':'s', 'kf':'de', 'kg':'', 'kh':'s', 'ki':'s', 'kj':'st', 'kk':'', 'kl':'rt', 'km':'s', 'kn':'rb', 'ko':'', 'kp':'', 'kq':'s', 'kr':'rt', 'ks':'s', 'kt':'rb', 'ku':'', 'kv':'', 'kw':'s', 'kx':'rb', 'ky':'', 'kz':'rb', 'k1':'', 'k2':'s', 'k3':'s', 'k4':'rb', 'la':'',
'lb':'', 'lc':'', 'ld':'s', 'le':'rt', 'lf':'rt', 'lg':'lt', 'lh':'s', 'li':'de', 'lj':'', 'lk':'', 'll':'', 'lm':'', 'ln':'lb', 'lo':'', 'lp':'s', 'lq':'s', 'lr':'lb', 'ls':'', 'lt':'', 'lu':'', 'lv':'s', 'lw':'rb', 'lx':'', 'ly':'s', 'lz':'st', 'l1':'', 'l2':'', 'l3':'rb', 'l4':'',
'ma':'', 'mb':'x', 'mc':'s', 'md':'rb', 'me':'', 'mf':'', 'mg':'', 'mh':'s', 'mi':'s', 'mj':'st', 'mk':'', 'ml':'lt', 'mm':'s', 'mn':'s', 'mo':'st', 'mp':'', 'mq':'rt', 'mr':'rb', 'ms':'', 'mt':'', 'mu':'', 'mv':'', 'mw':'lt', 'mx':'rt', 'my':'rb', 'mz':'', 'm1':'', 'm2':'', 'm3':'', 'm4':'',
'na':'', 'nb':'st', 'nc':'', 'nd':'', 'ne':'', 'nf':'', 'ng':'', 'nh':'rt', 'ni':'rb', 'nj':'', 'nk':'', 'nl':'', 'nm':'s', 'nn':'oc', 'no':'', 'np':'', 'nq':'', 'nr':'', 'ns':'', 'nt':'st', 'nu':'', 'nv':'', 'nw':'', 'nx':'lb', 'ny':'', 'nz':'', 'n1':'', 'n2':'s', 'n3':'lb', 'n4':'',
'oa':'', 'ob':'', 'oc':'', 'od':'lb', 'oe':'', 'of':'', 'og':'rb', 'oh':'', 'oi':'', 'oj':'', 'ok':'', 'ol':'', 'om':'rt', 'on':'s', 'oo':'st', 'op':'', 'oq':'', 'or':'', 'os':'', 'ot':'', 'ou':'', 'ov':'', 'ow':'s', 'ox':'s', 'oy':'x', 'oz':'s', 'o1':'lb', 'o2':'', 'o3':'s', 'o4':'lb',
'pa':'', 'pb':'lt', 'pc':'s', 'pd':'s', 'pe':'st', 'pf':'', 'pg':'', 'ph':'', 'pi':'', 'pj':'', 'pk':'', 'pl':'de', 'pm':'', 'pn':'', 'po':'lb', 'pp':'', 'pq':'', 'pr':'', 'ps':'', 'pt':'', 'pu':'', 'pv':'s', 'pw':'s', 'px':'rb', 'py':'', 'pz':'', 'p1':'s', 'p2':'lt', 'p3':'s', 'p4':'rb',
'qa':'', 'qb':'', 'qc':'', 'qd':'de', 'qe':'', 'qf':'', 'qg':'', 'qh':'s', 'qi':'lb', 'qj':'', 'qk':'', 'ql':'s', 'qm':'st', 'qn':'', 'qo':'s', 'qp':'lb', 'qq':'', 'qr':'', 'qs':'lb', 'qt':'', 'qu':'', 'qv':'', 'qw':'rb', 'qx':'', 'qy':'', 'qz':'', 'q1':'', 'q2':'s', 'q3':'rb', 'q4':'',
'ra':'', 'rb':'', 'rc':'', 'rd':'s', 're':'s', 'rf':'lt', 'rg':'s', 'rh':'s', 'ri':'rb', 'rj':'', 'rk':'s', 'rl':'s', 'rm':'lt', 'rn':'rt', 'ro':'s', 'rp':'s', 'rq':'lt', 'rr':'rt', 'rs':'s', 'rt':'lb', 'ru':'', 'rv':'', 'rw':'', 'rx':'', 'ry':'', 'rz':'', 'r1':'', 'r2':'', 'r3':'', 'r4':'',
'sa':'', 'sb':'', 'sc':'', 'sd':'s', 'se':'rt', 'sf':'rb', 'sg':'', 'sh':'s', 'si':'lb', 'sj':'', 'sk':'', 'sl':'s', 'sm':'de', 'sn':'', 'so':'', 'sp':'s', 'sq':'rb', 'sr':'', 'ss':'', 'st':'rb', 'su':'', 'sv':'', 'sw':'s', 'sx':'lb', 'sy':'', 'sz':'', 's1':'', 's2':'lt', 's3':'s', 's4':'lb',
'ta':'', 'tb':'lt', 'tc':'s', 'td':'s', 'te':'lb', 'tf':'', 'tg':'', 'th':'s', 'ti':'s', 'tj':'lb', 'tk':'', 'tl':'s', 'tm':'s', 'tn':'lt', 'to':'s', 'tp':'s', 'tq':'lb', 'tr':'', 'ts':'s', 'tt':'lb', 'tu':'', 'tv':'s', 'tw':'s', 'tx':'s', 'ty':'lb', 'tz':'', 't1':'s', 't2':'s', 't3':'s', 't4':'rb',
'ua':'', 'ub':'rb', 'uc':'', 'ud':'s', 'ue':'s', 'uf':'lt', 'ug':'s', 'uh':'s', 'ui':'s', 'uj':'rb', 'uk':'', 'ul':'s', 'um':'s', 'un':'s', 'uo':'s', 'up':'s', 'uq':'s', 'ur':'s', 'us':'s', 'ut':'rb', 'uu':'', 'uv':'s', 'uw':'rt', 'ux':'s', 'uy':'s', 'uz':'s', 'u1':'rb', 'u2':'', 'u3':'de', 'u4':'',
'va':'', 'vb':'', 'vc':'', 'vd':'', 've':'s', 'vf':'rt', 'vg':'rt', 'vh':'s', 'vi':'rt', 'vj':'st', 'vk':'', 'vl':'', 'vm':'s', 'vn':'rt', 'vo':'s', 'vp':'rt', 'vq':'s', 'vr':'s', 'vs':'rb', 'vt':'', 'vu':'', 'vv':'', 'vw':'', 'vx':'', 'vy':'s', 'vz':'rb', 'v1':'', 'v2':'lb', 'v3':'', 'v4':'st',
'wa':'', 'wb':'', 'wc':'', 'wd':'', 'we':'de', 'wf':'', 'wg':'', 'wh':'', 'wi':'', 'wj':'', 'wk':'', 'wl':'', 'wm':'de', 'wn':'', 'wo':'', 'wp':'st', 'wq':'', 'wr':'s', 'ws':'lb', 'wt':'', 'wu':'', 'wv':'lb', 'ww':'', 'wx':'', 'wy':'', 'wz':'', 'w1':'', 'w2':'s', 'w3':'st', 'w4':'',
'xa':'', 'xb':'s', 'xc':'x', 'xd':'s', 'xe':'s', 'xf':'st', 'xg':'', 'xh':'st', 'xi':'', 'xj':'rb', 'xk':'', 'xl':'s', 'xm':'s', 'xn':'st', 'xo':'', 'xp':'', 'xq':'', 'xr':'s', 'xs':'rb', 'xt':'', 'xu':'', 'xv':'s', 'xw':'lb', 'xx':'', 'xy':'', 'xz':'lt', 'x1':'s', 'x2':'rb', 'x3':'', 'x4':'',
'ya':'', 'yb':'rb', 'yc':'', 'yd':'', 'ye':'rb', 'yf':'', 'yg':'', 'yh':'', 'yi':'', 'yj':'', 'yk':'', 'yl':'', 'ym':'de', 'yn':'', 'yo':'s', 'yp':'lt', 'yq':'s', 'yr':'rb', 'ys':'', 'yt':'st', 'yu':'', 'yv':'s', 'yw':'rb', 'yx':'', 'yy':'s', 'yz':'rt', 'y1':'rb', 'y2':'', 'y3':'', 'y4':'st',
'za':'', 'zb':'', 'zc':'', 'zd':'', 'ze':'', 'zf':'', 'zg':'', 'zh':'', 'zi':'', 'zj':'', 'zk':'', 'zl':'', 'zm':'s', 'zn':'s', 'zo':'s', 'zp':'s', 'zq':'s', 'zr':'lt', 'zs':'rb', 'zt':'', 'zu':'', 'zv':'s', 'zw':'lb', 'zx':'', 'zy':'', 'zz':'lb', 'z1':'', 'z2':'', 'z3':'rb', 'z4':'',
'1a':'', '1b':'', '1c':'', '1d':'', '1e':'', '1f':'', '1g':'lb', '1h':'', '1i':'', '1j':'st', '1k':'', '1l':'s', '1m':'s', '1n':'s', '1o':'rt', '1p':'s', '1q':'s', '1r':'rb', '1s':'', '1t':'', '1u':'', '1v':'', '1w':'s', '1x':'x', '1y':'s', '1z':'s', '11':'x', '12':'rb', '13':'', '14':'',
'2a':'', '2b':'s', '2c':'lt', '2d':'s', '2e':'x', '2f':'lb', '2g':'', '2h':'lb', '2i':'', '2j':'', '2k':'', '2l':'', '2m':'s', '2n':'rb', '2o':'', '2p':'', '2q':'de', '2r':'', '2s':'', '2t':'', '2u':'', '2v':'', '2w':'', '2x':'', '2y':'', '2z':'s', '21':'st', '22':'', '23':'', '24':'lb',
'3a':'', '3b':'', '3c':'s', '3d':'rb', '3e':'', '3f':'', '3g':'s', '3h':'s', '3i':'lb', '3j':'', '3k':'', '3l':'', '3m':'', '3n':'lb', '3o':'', '3p':'', '3q':'s', '3r':'lb', '3s':'', '3t':'', '3u':'', '3v':'lt', '3w':'lb', '3x':'', '3y':'', '3z':'de', '31':'', '32':'', '33':'s', '34':'rb',
'41':'lt', '42':'s', '43':'de', '44':'', '4a':'', '4b':'s', '4c':'s', '4d':'lb', '4e':'', '4f':'', '4g':'s', '4h':'s', '4i':'s', '4j':'de', '4k':'', '4l':'', '4m':'s', '4n':'s', '4o':'lt', '4p':'s', '4q':'s', '4r':'s', '4s':'lb', '4t':'', '4u':'s', '4v':'s', '4w':'s', '4x':'lt', '4y':'s', '4z':'s',
},
"s" : {
'aa':'', 'ab':'', 'ac':'', 'ad':'', 'ae':'', 'af':'', 'ag':'', 'ah':'', 'ai':'', 'aj':'', 'ak':'', 'al':'', 'am':'', 'an':'', 'ao':'', 'ap':'', 'aq':'', 'ar':'', 'as':'', 'at':'', 'au':'', 'av':'', 'aw':'', 'ax':'', 'ay':'', 'az':'', 'a1':'st', 'a2':'', 'a3':'', 'a4':'',
'ba':'s', 'bb':'', 'bc':'', 'bd':'', 'be':'rb', 'bf':'', 'bg':'', 'bh':'', 'bi':'', 'bj':'s', 'bk':'', 'bl':'', 'bm':'rt', 'bn':'lt', 'bo':'', 'bp':'', 'bq':'s', 'br':'', 'bs':'', 'bt':'rb', 'bu':'s', 'bv':'lb', 'bw':'rb', 'bx':'s', 'by':'de', 'bz':'s', 'b1':'', 'b2':'', 'b3':'s', 'b4':'s',
'ca':'s', 'cb':'s', 'cc':'', 'cd':'', 'ce':'', 'cf':'st', 'cg':'', 'ch':'', 'ci':'s', 'cj':'s', 'ck':'s', 'cl':'', 'cm':'s', 'cn':'s', 'co':'', 'cp':'', 'cq':'s', 'cr':'', 'cs':'', 'ct':'', 'cu':'s', 'cv':'', 'cw':'', 'cx':'x', 'cy':'', 'cz':'rb', 'c1':'s', 'c2':'', 'c3':'rb', 'c4':'s',
'da':'s', 'db':'s', 'dc':'lb', 'dd':'', 'de':'s', 'df':'', 'dg':'', 'dh':'', 'di':'rb', 'dj':'s', 'dk':'s', 'dl':'lb', 'dm':'rb', 'dn':'s', 'do':'lt', 'dp':'', 'dq':'rb', 'dr':'s', 'ds':'s', 'dt':'s', 'du':'s', 'dv':'lb', 'dw':'', 'dx':'s', 'dy':'', 'dz':'', 'd1':'st', 'd2':'', 'd3':'', 'd4':'s',
'ea':'s', 'eb':'lb', 'ec':'', 'ed':'', 'ee':'rt', 'ef':'lb', 'eg':'', 'eh':'', 'ei':'', 'ej':'rb', 'ek':'lt', 'el':'', 'em':'', 'en':'x', 'eo':'st', 'ep':'', 'eq':'', 'er':'rb', 'es':'s', 'et':'de', 'eu':'s', 'ev':'', 'ew':'', 'ex':'st', 'ey':'rb', 'ez':'lb', 'e1':'', 'e2':'s', 'e3':'lb', 'e4':'rb',
'fa':'lt', 'fb':'', 'fc':'', 'fd':'', 'fe':'st', 'ff':'', 'fg':'', 'fh':'', 'fi':'', 'fj':'', 'fk':'s', 'fl':'', 'fm':'', 'fn':'s', 'fo':'', 'fp':'', 'fq':'', 'fr':'', 'fs':'st', 'ft':'', 'fu':'rt', 'fv':'lb', 'fw':'', 'fx':'', 'fy':'', 'fz':'', 'f1':'', 'f2':'x', 'f3':'', 'f4':'',
'ga':'s', 'gb':'', 'gc':'s', 'gd':'', 'ge':'', 'gf':'s', 'gg':'s', 'gh':'', 'gi':'s', 'gj':'s', 'gk':'s', 'gl':'s', 'gm':'s', 'gn':'s', 'go':'s', 'gp':'s', 'gq':'s', 'gr':'', 'gs':'', 'gt':'', 'gu':'lb', 'gv':'', 'gw':'', 'gx':'rb', 'gy':'lb', 'gz':'', 'g1':'', 'g2':'s', 'g3':'', 'g4':'s',
'ha':'s', 'hb':'s', 'hc':'s', 'hd':'de', 'he':'s', 'hf':'s', 'hg':'lb', 'hh':'rb', 'hi':'s', 'hj':'s', 'hk':'s', 'hl':'s', 'hm':'s', 'hn':'s', 'ho':'s', 'hp':'lt', 'hq':'rt', 'hr':'s', 'hs':'', 'ht':'s', 'hu':'', 'hv':'', 'hw':'', 'hx':'', 'hy':'', 'hz':'', 'h1':'', 'h2':'s', 'h3':'s', 'h4':'s',
'ia':'s', 'ib':'lb', 'ic':'st', 'id':'', 'ie':'rb', 'if':'lt', 'ig':'', 'ih':'', 'ii':'rb', 'ij':'s', 'ik':'s', 'il':'de', 'im':'s', 'in':'lb', 'io':'rb', 'ip':'s', 'iq':'s', 'ir':'lb', 'is':'rb', 'it':'s', 'iu':'s', 'iv':'', 'iw':'', 'ix':'s', 'iy':'lb', 'iz':'', 'i1':'s', 'i2':'s', 'i3':'lb', 'i4':'rb',
'ja':'lb', 'jb':'', 'jc':'', 'jd':'', 'je':'', 'jf':'st', 'jg':'', 'jh':'', 'ji':'', 'jj':'s', 'jk':'lb', 'jl':'', 'jm':'st', 'jn':'', 'jo':'', 'jp':'rb', 'jq':'lb', 'jr':'', 'js':'', 'jt':'rb', 'ju':'s', 'jv':'lb', 'jw':'', 'jx':'st', 'jy':'', 'jz':'', 'j1':'st', 'j2':'st', 'j3':'', 'j4':'',
'ka':'', 'kb':'', 'kc':'', 'kd':'', 'ke':'', 'kf':'', 'kg':'', 'kh':'', 'ki':'', 'kj':'rt', 'kk':'', 'kl':'', 'km':'', 'kn':'', 'ko':'s', 'kp':'', 'kq':'', 'kr':'', 'ks':'', 'kt':'', 'ku':'s', 'kv':'', 'kw':'', 'kx':'', 'ky':'', 'kz':'', 'k1':'', 'k2':'', 'k3':'', 'k4':'',
'la':'de', 'lb':'s', 'lc':'lb', 'ld':'', 'le':'', 'lf':'', 'lg':'st', 'lh':'', 'li':'', 'lj':'s', 'lk':'de', 'll':'s', 'lm':'', 'ln':'rb', 'lo':'lt', 'lp':'', 'lq':'', 'lr':'rb', 'ls':'', 'lt':'s', 'lu':'lt', 'lv':'', 'lw':'', 'lx':'lb', 'ly':'', 'lz':'rt', 'l1':'s', 'l2':'', 'l3':'', 'l4':'s',
'ma':'', 'mb':'x', 'mc':'', 'md':'', 'me':'s', 'mf':'s', 'mg':'', 'mh':'', 'mi':'', 'mj':'rt', 'mk':'', 'ml':'st', 'mm':'', 'mn':'', 'mo':'rt', 'mp':'', 'mq':'', 'mr':'', 'ms':'s', 'mt':'s', 'mu':'s', 'mv':'', 'mw':'st', 'mx':'', 'my':'', 'mz':'s', 'm1':'s', 'm2':'de', 'm3':'s', 'm4':'s',
'na':'', 'nb':'rt', 'nc':'', 'nd':'s', 'ne':'s', 'nf':'de', 'ng':'lb', 'nh':'', 'ni':'', 'nj':'s', 'nk':'s', 'nl':'', 'nm':'', 'nn':'', 'no':'s', 'np':'s', 'nq':'s', 'nr':'s', 'ns':'lb', 'nt':'rt', 'nu':'s', 'nv':'s', 'nw':'', 'nx':'rb', 'ny':'s', 'nz':'de', 'n1':'lt', 'n2':'', 'n3':'rb', 'n4':'s',
'oa':'de', 'ob':'s', 'oc':'lb', 'od':'rb', 'oe':'s', 'of':'', 'og':'', 'oh':'s', 'oi':'s', 'oj':'s', 'ok':'s', 'ol':'lb', 'om':'', 'on':'', 'oo':'rt', 'op':'s', 'oq':'s', 'or':'s', 'os':'', 'ot':'s', 'ou':'de', 'ov':'lb', 'ow':'', 'ox':'', 'oy':'x', 'oz':'', 'o1':'rb', 'o2':'', 'o3':'', 'o4':'rb',
'pa':'', 'pb':'st', 'pc':'', 'pd':'', 'pe':'rt', 'pf':'s', 'pg':'s', 'ph':'de', 'pi':'s', 'pj':'s', 'pk':'lb', 'pl':'', 'pm':'s', 'pn':'', 'po':'rb', 'pp':'s', 'pq':'s', 'pr':'s', 'ps':'s', 'pt':'s', 'pu':'', 'pv':'', 'pw':'', 'px':'', 'py':'s', 'pz':'', 'p1':'', 'p2':'st', 'p3':'', 'p4':'',
'qa':'s', 'qb':'', 'qc':'', 'qd':'', 'qe':'de', 'qf':'s', 'qg':'lb', 'qh':'', 'qi':'rb', 'qj':'s', 'qk':'', 'ql':'', 'qm':'rt', 'qn':'lb', 'qo':'', 'qp':'rb', 'qq':'s', 'qr':'lb', 'qs':'rb', 'qt':'s', 'qu':'s', 'qv':'', 'qw':'', 'qx':'s', 'qy':'s', 'qz':'s', 'q1':'', 'q2':'', 'q3':'', 'q4':'s',
'ra':'s', 'rb':'s', 'rc':'lb', 'rd':'', 're':'', 'rf':'st', 'rg':'', 'rh':'', 'ri':'', 'rj':'lt', 'rk':'', 'rl':'', 'rm':'st', 'rn':'', 'ro':'', 'rp':'', 'rq':'st', 'rr':'', 'rs':'', 'rt':'rb', 'ru':'s', 'rv':'s', 'rw':'de', 'rx':'s', 'ry':'s', 'rz':'s', 'r1':'s', 'r2':'', 'r3':'de', 'r4':'s',
'sa':'s', 'sb':'s', 'sc':'', 'sd':'', 'se':'', 'sf':'', 'sg':'', 'sh':'', 'si':'rb', 'sj':'s', 'sk':'', 'sl':'', 'sm':'', 'sn':'s', 'so':'', 'sp':'', 'sq':'', 'sr':'s', 'ss':'', 'st':'', 'su':'s', 'sv':'lb', 'sw':'', 'sx':'rb', 'sy':'s', 'sz':'s', 's1':'lb', 's2':'st', 's3':'', 's4':'rb',
'ta':'lb', 'tb':'st', 'tc':'', 'td':'', 'te':'rb', 'tf':'s', 'tg':'lb', 'th':'', 'ti':'', 'tj':'rb', 'tk':'lb', 'tl':'', 'tm':'', 'tn':'st', 'to':'', 'tp':'', 'tq':'rb', 'tr':'lb', 'ts':'', 'tt':'rb', 'tu':'lb', 'tv':'', 'tw':'', 'tx':'', 'ty':'rb', 'tz':'lb', 't1':'', 't2':'', 't3':'', 't4':'',
'ua':'', 'ub':'', 'uc':'', 'ud':'', 'ue':'', 'uf':'st', 'ug':'', 'uh':'', 'ui':'', 'uj':'', 'uk':'', 'ul':'', 'um':'', 'un':'', 'uo':'', 'up':'', 'uq':'', 'ur':'', 'us':'', 'ut':'', 'uu':'', 'uv':'', 'uw':'', 'ux':'', 'uy':'', 'uz':'', 'u1':'', 'u2':'', 'u3':'', 'u4':'s',
'va':'s', 'vb':'s', 'vc':'s', 'vd':'', 've':'', 'vf':'', 'vg':'', 'vh':'', 'vi':'', 'vj':'rt', 'vk':'s', 'vl':'', 'vm':'', 'vn':'', 'vo':'', 'vp':'', 'vq':'', 'vr':'', 'vs':'', 'vt':'s', 'vu':'s', 'vv':'', 'vw':'s', 'vx':'', 'vy':'', 'vz':'', 'v1':'lb', 'v2':'rb', 'v3':'', 'v4':'rt',
'wa':'s', 'wb':'s', 'wc':'s', 'wd':'lb', 'we':'', 'wf':'s', 'wg':'s', 'wh':'', 'wi':'s', 'wj':'de', 'wk':'s', 'wl':'lb', 'wm':'', 'wn':'s', 'wo':'', 'wp':'rt', 'wq':'', 'wr':'', 'ws':'rb', 'wt':'s', 'wu':'lb', 'wv':'rb', 'ww':'s', 'wx':'s', 'wy':'', 'wz':'s', 'w1':'', 'w2':'', 'w3':'rt', 'w4':'s',
'xa':'lb', 'xb':'', 'xc':'x', 'xd':'', 'xe':'', 'xf':'rt', 'xg':'lt', 'xh':'rt', 'xi':'lt', 'xj':'', 'xk':'lt', 'xl':'', 'xm':'', 'xn':'rt', 'xo':'de', 'xp':'s', 'xq':'lb', 'xr':'', 'xs':'', 'xt':'s', 'xu':'', 'xv':'', 'xw':'rb', 'xx':'s', 'xy':'lb', 'xz':'st', 'x1':'', 'x2':'', 'x3':'s', 'x4':'s',
'ya':'', 'yb':'', 'yc':'s', 'yd':'', 'ye':'', 'yf':'s', 'yg':'s', 'yh':'s', 'yi':'s', 'yj':'s', 'yk':'s', 'yl':'', 'ym':'', 'yn':'lb', 'yo':'', 'yp':'st', 'yq':'', 'yr':'', 'ys':'lb', 'yt':'rt', 'yu':'lb', 'yv':'', 'yw':'', 'yx':'lt', 'yy':'', 'yz':'', 'y1':'', 'y2':'s', 'y3':'lb', 'y4':'rt',
'za':'s', 'zb':'s', 'zc':'s', 'zd':'s', 'ze':'s', 'zf':'s', 'zg':'s', 'zh':'s', 'zi':'de', 'zj':'s', 'zk':'s', 'zl':'lb', 'zm':'', 'zn':'', 'zo':'', 'zp':'', 'zq':'', 'zr':'st', 'zs':'', 'zt':'s', 'zu':'', 'zv':'', 'zw':'rb', 'zx':'s', 'zy':'', 'zz':'rb', 'z1':'s', 'z2':'lb', 'z3':'', 'z4':'s',
'1a':'s', '1b':'de', '1c':'s', '1d':'de', '1e':'s', '1f':'lt', '1g':'rb', '1h':'s', '1i':'', '1j':'rt', '1k':'lt', '1l':'', '1m':'', '1n':'', '1o':'', '1p':'', '1q':'', '1r':'', '1s':'s', '1t':'s', '1u':'s', '1v':'', '1w':'', '1x':'x', '1y':'', '1z':'', '11':'x', '12':'', '13':'s', '14':'s',
'2a':'lt', '2b':'', '2c':'st', '2d':'', '2e':'x', '2f':'rb', '2g':'', '2h':'rb', '2i':'s', '2j':'s', '2k':'s', '2l':'', '2m':'', '2n':'', '2o':'s', '2p':'', '2q':'', '2r':'s', '2s':'s', '2t':'s', '2u':'s', '2v':'s', '2w':'', '2x':'s', '2y':'', '2z':'', '21':'rt', '22':'s', '23':'lb', '24':'rb',
'3a':'s', '3b':'', '3c':'', '3d':'', '3e':'s', '3f':'', '3g':'', '3h':'', '3i':'rb', '3j':'de', '3k':'s', '3l':'s', '3m':'', '3n':'rb', '3o':'s', '3p':'lb', '3q':'', '3r':'rb', '3s':'s', '3t':'s', '3u':'lb', '3v':'st', '3w':'rb', '3x':'s', '3y':'lb', '3z':'', '31':'s', '32':'lb', '33':'', '34':'',
'4a':'lb', '4b':'', '4c':'', '4d':'rb', '4e':'de', '4f':'lb', '4g':'', '4h':'', '4i':'', '4j':'', '4k':'de', '4l':'lb', '4m':'', '4n':'', '4o':'st', '4p':'', '4q':'', '4r':'', '4s':'rb', '4t':'lb', '4u':'', '4v':'', '4w':'', '4x':'st', '4y':'', '4z':'', '41':'st', '42':'', '43':'', '44':'de',
},
"w" : {
'aa':'lb', 'ab':'s', 'ac':'s', 'ad':'s', 'ae':'lt', 'af':'s', 'ag':'s', 'ah':'s', 'ai':'s', 'aj':'lt', 'ak':'s', 'al':'s', 'am':'', 'an':'lb', 'ao':'s', 'ap':'s', 'aq':'lt', 'ar':'s', 'as':'s', 'at':'', 'au':'lb', 'av':'', 'aw':'lb', 'ax':'lt', 'ay':'', 'az':'lb', 'a1':'rt', 'a2':'s', 'a3':'lt', 'a4':'',
'ba':'', 'bb':'lb', 'bc':'s', 'bd':'s', 'be':'', 'bf':'lb', 'bg':'s', 'bh':'s', 'bi':'', 'bj':'', 'bk':'lb', 'bl':'s', 'bm':'', 'bn':'st', 'bo':'s', 'bp':'', 'bq':'', 'br':'de', 'bs':'s', 'bt':'', 'bu':'', 'bv':'rb', 'bw':'', 'bx':'', 'by':'', 'bz':'', 'b1':'lb', 'b2':'', 'b3':'', 'b4':'',
'ca':'', 'cb':'', 'cc':'lb', 'cd':'s', 'ce':'lt', 'cf':'rt', 'cg':'s', 'ch':'', 'ci':'', 'cj':'', 'ck':'', 'cl':'', 'cm':'', 'cn':'', 'co':'lb', 'cp':'', 'cq':'', 'cr':'lb', 'cs':'lt', 'ct':'', 'cu':'', 'cv':'lb', 'cw':'s', 'cx':'x', 'cy':'s', 'cz':'', 'c1':'', 'c2':'de', 'c3':'', 'c4':'',
'da':'', 'db':'', 'dc':'rb', 'dd':'', 'de':'', 'df':'lb', 'dg':'s', 'dh':'s', 'di':'', 'dj':'', 'dk':'', 'dl':'rb', 'dm':'', 'dn':'', 'do':'st', 'dp':'s', 'dq':'', 'dr':'', 'ds':'', 'dt':'', 'du':'', 'dv':'rb', 'dw':'', 'dx':'', 'dy':'', 'dz':'lb', 'd1':'rt', 'd2':'lt', 'd3':'', 'd4':'',
'ea':'', 'eb':'rb', 'ec':'s', 'ed':'s', 'ee':'', 'ef':'rb', 'eg':'s', 'eh':'s', 'ei':'s', 'ej':'', 'ek':'st', 'el':'s', 'em':'s', 'en':'x', 'eo':'rt', 'ep':'s', 'eq':'s', 'er':'', 'es':'', 'et':'', 'eu':'', 'ev':'lb', 'ew':'s', 'ex':'rt', 'ey':'', 'ez':'rb', 'e1':'', 'e2':'', 'e3':'rb', 'e4':'',
'fa':'st', 'fb':'s', 'fc':'lt', 'fd':'s', 'fe':'rt', 'ff':'', 'fg':'lb', 'fh':'s', 'fi':'lt', 'fj':'', 'fk':'', 'fl':'lb', 'fm':'', 'fn':'', 'fo':'lb', 'fp':'', 'fq':'lb', 'fr':'s', 'fs':'rt', 'ft':'s', 'fu':'', 'fv':'rb', 'fw':'s', 'fx':'', 'fy':'lb', 'fz':'s', 'f1':'s', 'f2':'x', 'f3':'s', 'f4':'',
'ga':'', 'gb':'', 'gc':'', 'gd':'lb', 'ge':'', 'gf':'', 'gg':'', 'gh':'', 'gi':'', 'gj':'', 'gk':'', 'gl':'', 'gm':'', 'gn':'', 'go':'', 'gp':'', 'gq':'', 'gr':'lb', 'gs':'s', 'gt':'', 'gu':'rb', 'gv':'s', 'gw':'s', 'gx':'', 'gy':'rb', 'gz':'s', 'g1':'', 'g2':'', 'g3':'', 'g4':'',
'ha':'', 'hb':'', 'hc':'', 'hd':'', 'he':'', 'hf':'', 'hg':'rb', 'hh':'', 'hi':'', 'hj':'', 'hk':'', 'hl':'', 'hm':'', 'hn':'', 'ho':'', 'hp':'st', 'hq':'', 'hr':'', 'hs':'', 'ht':'', 'hu':'lb', 'hv':'s', 'hw':'s', 'hx':'', 'hy':'lb', 'hz':'s', 'h1':'', 'h2':'', 'h3':'', 'h4':'',
'ia':'', 'ib':'rb', 'ic':'rt', 'id':'s', 'ie':'', 'if':'st', 'ig':'s', 'ih':'s', 'ii':'', 'ij':'', 'ik':'', 'il':'', 'im':'', 'in':'rb', 'io':'', 'ip':'', 'iq':'', 'ir':'rb', 'is':'', 'it':'', 'iu':'', 'iv':'lb', 'iw':'', 'ix':'', 'iy':'rb', 'iz':'', 'i1':'', 'i2':'', 'i3':'rb', 'i4':'',
'ja':'rb', 'jb':'s', 'jc':'s', 'jd':'s', 'je':'s', 'jf':'rt', 'jg':'s', 'jh':'s', 'ji':'', 'jj':'', 'jk':'rb', 'jl':'s', 'jm':'rt', 'jn':'s', 'jo':'lt', 'jp':'', 'jq':'rb', 'jr':'s', 'js':'s', 'jt':'', 'ju':'', 'jv':'rb', 'jw':'s', 'jx':'rt', 'jy':'s', 'jz':'s', 'j1':'rt', 'j2':'rt', 'j3':'s', 'j4':'',
'ka':'lb', 'kb':'', 'kc':'lb', 'kd':'s', 'ke':'s', 'kf':'', 'kg':'lb', 'kh':'s', 'ki':'s', 'kj':'', 'kk':'lb', 'kl':'lt', 'km':'s', 'kn':'', 'ko':'', 'kp':'de', 'kq':'s', 'kr':'lt', 'ks':'s', 'kt':'', 'ku':'', 'kv':'de', 'kw':'s', 'kx':'', 'ky':'de', 'kz':'', 'k1':'lb', 'k2':'s', 'k3':'s', 'k4':'',
'la':'', 'lb':'', 'lc':'rb', 'ld':'s', 'le':'lt', 'lf':'lt', 'lg':'rt', 'lh':'s', 'li':'', 'lj':'', 'lk':'', 'll':'', 'lm':'de', 'ln':'', 'lo':'st', 'lp':'s', 'lq':'s', 'lr':'', 'ls':'', 'lt':'', 'lu':'st', 'lv':'s', 'lw':'', 'lx':'rb', 'ly':'s', 'lz':'', 'l1':'', 'l2':'lb', 'l3':'', 'l4':'',
'ma':'de', 'mb':'x', 'mc':'s', 'md':'', 'me':'', 'mf':'', 'mg':'lb', 'mh':'s', 'mi':'s', 'mj':'', 'mk':'lb', 'ml':'rt', 'mm':'s', 'mn':'s', 'mo':'', 'mp':'lb', 'mq':'lt', 'mr':'', 'ms':'', 'mt':'', 'mu':'', 'mv':'lb', 'mw':'rt', 'mx':'lt', 'my':'', 'mz':'', 'm1':'', 'm2':'', 'm3':'', 'm4':'',
'na':'lb', 'nb':'', 'nc':'', 'nd':'', 'ne':'', 'nf':'', 'ng':'rb', 'nh':'lt', 'ni':'', 'nj':'', 'nk':'', 'nl':'lb', 'nm':'s', 'nn':'', 'no':'', 'np':'', 'nq':'', 'nr':'', 'ns':'rb', 'nt':'', 'nu':'', 'nv':'', 'nw':'de', 'nx':'', 'ny':'', 'nz':'', 'n1':'st', 'n2':'s', 'n3':'', 'n4':'',
'oa':'', 'ob':'', 'oc':'rb', 'od':'', 'oe':'', 'of':'lb', 'og':'', 'oh':'', 'oi':'', 'oj':'', 'ok':'', 'ol':'rb', 'om':'lt', 'on':'s', 'oo':'', 'op':'', 'oq':'', 'or':'', 'os':'', 'ot':'', 'ou':'', 'ov':'rb', 'ow':'s', 'ox':'s', 'oy':'x', 'oz':'s', 'o1':'', 'o2':'lb', 'o3':'s', 'o4':'',
'pa':'lb', 'pb':'rt', 'pc':'s', 'pd':'s', 'pe':'', 'pf':'', 'pg':'', 'ph':'', 'pi':'', 'pj':'', 'pk':'rb', 'pl':'', 'pm':'', 'pn':'lb', 'po':'', 'pp':'', 'pq':'', 'pr':'', 'ps':'', 'pt':'', 'pu':'lb', 'pv':'s', 'pw':'s', 'px':'', 'py':'', 'pz':'lb', 'p1':'s', 'p2':'rt', 'p3':'s', 'p4':'',
'qa':'', 'qb':'', 'qc':'lb', 'qd':'', 'qe':'', 'qf':'', 'qg':'rb', 'qh':'s', 'qi':'', 'qj':'', 'qk':'de', 'ql':'s', 'qm':'', 'qn':'rb', 'qo':'s', 'qp':'', 'qq':'', 'qr':'rb', 'qs':'', 'qt':'', 'qu':'', 'qv':'lb', 'qw':'', 'qx':'', 'qy':'', 'qz':'', 'q1':'lb', 'q2':'s', 'q3':'', 'q4':'',
'ra':'', 'rb':'', 'rc':'rb', 'rd':'s', 're':'s', 'rf':'rt', 'rg':'s', 'rh':'s', 'ri':'', 'rj':'st', 'rk':'s', 'rl':'s', 'rm':'rt', 'rn':'lt', 'ro':'s', 'rp':'s', 'rq':'rt', 'rr':'lt', 'rs':'s', 'rt':'', 'ru':'', 'rv':'', 'rw':'', 'rx':'', 'ry':'', 'rz':'', 'r1':'', 'r2':'', 'r3':'', 'r4':'',
'sa':'', 'sb':'', 'sc':'de', 'sd':'s', 'se':'lt', 'sf':'', 'sg':'lb', 'sh':'s', 'si':'', 'sj':'', 'sk':'lb', 'sl':'s', 'sm':'', 'sn':'', 'so':'de', 'sp':'s', 'sq':'', 'sr':'', 'ss':'de', 'st':'', 'su':'', 'sv':'rb', 'sw':'s', 'sx':'', 'sy':'', 'sz':'', 's1':'rb', 's2':'rt', 's3':'s', 's4':'',
'ta':'rb', 'tb':'rt', 'tc':'s', 'td':'s', 'te':'', 'tf':'', 'tg':'rb', 'th':'s', 'ti':'s', 'tj':'', 'tk':'rb', 'tl':'s', 'tm':'s', 'tn':'rt', 'to':'s', 'tp':'s', 'tq':'', 'tr':'rb', 'ts':'s', 'tt':'', 'tu':'rb', 'tv':'s', 'tw':'s', 'tx':'s', 'ty':'', 'tz':'rb', 't1':'s', 't2':'s', 't3':'s', 't4':'',
'ua':'lb', 'ub':'', 'uc':'lb', 'ud':'s', 'ue':'s', 'uf':'rt', 'ug':'s', 'uh':'s', 'ui':'s', 'uj':'', 'uk':'lb', 'ul':'s', 'um':'s', 'un':'s', 'uo':'s', 'up':'s', 'uq':'s', 'ur':'s', 'us':'s', 'ut':'', 'uu':'lb', 'uv':'s', 'uw':'lt', 'ux':'s', 'uy':'s', 'uz':'s', 'u1':'', 'u2':'lb', 'u3':'', 'u4':'',
'va':'', 'vb':'', 'vc':'', 'vd':'lb', 've':'s', 'vf':'lt', 'vg':'lt', 'vh':'s', 'vi':'lt', 'vj':'', 'vk':'', 'vl':'lb', 'vm':'s', 'vn':'lt', 'vo':'s', 'vp':'lt', 'vq':'s', 'vr':'s', 'vs':'', 'vt':'', 'vu':'', 'vv':'', 'vw':'', 'vx':'lb', 'vy':'s', 'vz':'', 'v1':'rb', 'v2':'', 'v3':'lb', 'v4':'',
'wa':'', 'wb':'', 'wc':'', 'wd':'rb', 'we':'', 'wf':'', 'wg':'', 'wh':'', 'wi':'', 'wj':'', 'wk':'', 'wl':'rb', 'wm':'', 'wn':'', 'wo':'lb', 'wp':'', 'wq':'lb', 'wr':'s', 'ws':'', 'wt':'', 'wu':'rb', 'wv':'', 'ww':'', 'wx':'', 'wy':'', 'wz':'', 'w1':'de', 'w2':'s', 'w3':'', 'w4':'',
'xa':'rb', 'xb':'s', 'xc':'x', 'xd':'s', 'xe':'s', 'xf':'', 'xg':'st', 'xh':'', 'xi':'st', 'xj':'', 'xk':'st', 'xl':'s', 'xm':'s', 'xn':'', 'xo':'', 'xp':'', 'xq':'rb', 'xr':'s', 'xs':'', 'xt':'', 'xu':'lb', 'xv':'s', 'xw':'', 'xx':'', 'xy':'rb', 'xz':'rt', 'x1':'s', 'x2':'', 'x3':'', 'x4':'',
'ya':'lb', 'yb':'', 'yc':'', 'yd':'lb', 'ye':'', 'yf':'', 'yg':'', 'yh':'', 'yi':'', 'yj':'', 'yk':'', 'yl':'lb', 'ym':'', 'yn':'rb', 'yo':'s', 'yp':'rt', 'yq':'s', 'yr':'', 'ys':'rb', 'yt':'', 'yu':'rb', 'yv':'s', 'yw':'', 'yx':'st', 'yy':'s', 'yz':'lt', 'y1':'', 'y2':'', 'y3':'rb', 'y4':'',
'za':'', 'zb':'', 'zc':'', 'zd':'', 'ze':'', 'zf':'', 'zg':'', 'zh':'', 'zi':'', 'zj':'', 'zk':'', 'zl':'rb', 'zm':'s', 'zn':'s', 'zo':'s', 'zp':'s', 'zq':'s', 'zr':'rt', 'zs':'', 'zt':'', 'zu':'lb', 'zv':'s', 'zw':'', 'zx':'', 'zy':'de', 'zz':'', 'z1':'', 'z2':'rb', 'z3':'', 'z4':'',
'1a':'', '1b':'', '1c':'', '1d':'', '1e':'', '1f':'st', '1g':'', '1h':'', '1i':'lb', '1j':'', '1k':'st', '1l':'s', '1m':'s', '1n':'s', '1o':'lt', '1p':'s', '1q':'s', '1r':'', '1s':'', '1t':'', '1u':'', '1v':'lb', '1w':'s', '1x':'x', '1y':'s', '1z':'s', '11':'x', '12':'', '13':'', '14':'',
'2a':'st', '2b':'s', '2c':'rt', '2d':'s', '2e':'x', '2f':'', '2g':'de', '2h':'', '2i':'', '2j':'', '2k':'', '2l':'lb', '2m':'s', '2n':'', '2o':'', '2p':'lb', '2q':'', '2r':'', '2s':'', '2t':'', '2u':'', '2v':'', '2w':'', '2x':'', '2y':'lb', '2z':'s', '21':'', '22':'', '23':'rb', '24':'',
'3a':'', '3b':'de', '3c':'s', '3d':'', '3e':'', '3f':'lb', '3g':'s', '3h':'s', '3i':'', '3j':'', '3k':'', '3l':'', '3m':'de', '3n':'', '3o':'', '3p':'rb', '3q':'s', '3r':'', '3s':'', '3t':'', '3u':'rb', '3v':'rt', '3w':'', '3x':'', '3y':'rb', '3z':'', '31':'', '32':'rb', '33':'s', '34':'',
'4a':'rb', '4b':'s', '4c':'s', '4d':'', '4e':'', '4f':'rb', '4g':'s', '4h':'s', '4i':'s', '4j':'', '4k':'', '4l':'rb', '4m':'s', '4n':'s', '4o':'rt', '4p':'s', '4q':'s', '4r':'s', '4s':'', '4t':'rb', '4u':'s', '4v':'s', '4w':'s', '4x':'rt', '4y':'s', '4z':'s', '41':'rt', '42':'s', '43':'', '44':'',
}
};
def main():
print(json.dumps(maze, separators=(',',':')))
if __name__ == "__main__":
main()
|
nbari/zunzuncito
|
refs/heads/master
|
my_api/default/v0/zun_thread/zun_thread.py
|
1
|
"""
thread resource
"""
from zunzuncito import tools
class APIResource(object):
def __init__(self):
self.headers = {'content-TyPe': 'text/html; charset=UTF-8'}
def dispatch(self, request, response):
request.log.info(tools.log_json({
'API': request.version,
'URI': request.URI,
'rid': request.request_id,
'in': 'dispatch',
'thread': request.environ.get('thread', '-'),
'thread_env': request.environ.get('thread', '-')
}))
response.headers.update(self.headers)
return tools.log_json(request.environ)
|
Sylrob434/CouchPotatoServer
|
refs/heads/develop
|
couchpotato/core/media/_base/search/main.py
|
80
|
from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent, addEvent
from couchpotato.core.helpers.variable import mergeDicts, getImdb
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
log = CPLog(__name__)
class Search(Plugin):
def __init__(self):
addApiView('search', self.search, docs = {
'desc': 'Search the info in providers for a movie',
'params': {
'q': {'desc': 'The (partial) movie name you want to search for'},
'type': {'desc': 'Search for a specific media type. Leave empty to search all.'},
},
'return': {'type': 'object', 'example': """{
'success': True,
'movies': array,
'show': array,
etc
}"""}
})
addEvent('app.load', self.addSingleSearches)
def search(self, q = '', types = None, **kwargs):
# Make sure types is the correct instance
if isinstance(types, (str, unicode)):
types = [types]
elif isinstance(types, (list, tuple, set)):
types = list(types)
imdb_identifier = getImdb(q)
if not types:
if imdb_identifier:
result = fireEvent('movie.info', identifier = imdb_identifier, merge = True)
result = {result['type']: [result]}
else:
result = fireEvent('info.search', q = q, merge = True)
else:
result = {}
for media_type in types:
if imdb_identifier:
result[media_type] = fireEvent('%s.info' % media_type, identifier = imdb_identifier)
else:
result[media_type] = fireEvent('%s.search' % media_type, q = q)
return mergeDicts({
'success': True,
}, result)
def createSingleSearch(self, media_type):
def singleSearch(q, **kwargs):
return self.search(q, type = media_type, **kwargs)
return singleSearch
def addSingleSearches(self):
for media_type in fireEvent('media.types', merge = True):
addApiView('%s.search' % media_type, self.createSingleSearch(media_type))
|
hyiltiz/youtube-dl
|
refs/heads/master
|
docs/conf.py
|
137
|
# -*- coding: utf-8 -*-
#
# youtube-dl documentation build configuration file, created by
# sphinx-quickstart on Fri Mar 14 21:05:43 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# Allows to import youtube_dl
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# -- General configuration ------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'youtube-dl'
copyright = u'2014, Ricardo Garcia Gonzalez'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
from youtube_dl.version import __version__
version = __version__
# The full version, including alpha/beta/rc tags.
release = version
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Output file base name for HTML help builder.
htmlhelp_basename = 'youtube-dldoc'
|
miketamis/CouchPotatoServer
|
refs/heads/master
|
libs/requests/packages/urllib3/fields.py
|
1007
|
import email.utils
import mimetypes
from .packages import six
def guess_content_type(filename, default='application/octet-stream'):
"""
Guess the "Content-Type" of a file.
:param filename:
The filename to guess the "Content-Type" of using :mod:`mimetypes`.
:param default:
If no "Content-Type" can be guessed, default to `default`.
"""
if filename:
return mimetypes.guess_type(filename)[0] or default
return default
def format_header_param(name, value):
"""
Helper function to format and quote a single header parameter.
Particularly useful for header parameters which might contain
non-ASCII values, like file names. This follows RFC 2231, as
suggested by RFC 2388 Section 4.4.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
if not any(ch in value for ch in '"\\\r\n'):
result = '%s="%s"' % (name, value)
try:
result.encode('ascii')
except UnicodeEncodeError:
pass
else:
return result
if not six.PY3: # Python 2:
value = value.encode('utf-8')
value = email.utils.encode_rfc2231(value, 'utf-8')
value = '%s*=%s' % (name, value)
return value
class RequestField(object):
"""
A data container for request body parameters.
:param name:
The name of this request field.
:param data:
The data/value body.
:param filename:
An optional filename of the request field.
:param headers:
An optional dict-like object of headers to initially use for the field.
"""
def __init__(self, name, data, filename=None, headers=None):
self._name = name
self._filename = filename
self.data = data
self.headers = {}
if headers:
self.headers = dict(headers)
@classmethod
def from_tuples(cls, fieldname, value):
"""
A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.
Supports constructing :class:`~urllib3.fields.RequestField` from
parameter of key/value strings AND key/filetuple. A filetuple is a
(filename, data, MIME type) tuple where the MIME type is optional.
For example::
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
Field names and filenames must be unicode.
"""
if isinstance(value, tuple):
if len(value) == 3:
filename, data, content_type = value
else:
filename, data = value
content_type = guess_content_type(filename)
else:
filename = None
content_type = None
data = value
request_param = cls(fieldname, data, filename=filename)
request_param.make_multipart(content_type=content_type)
return request_param
def _render_part(self, name, value):
"""
Overridable helper function to format a single header parameter.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
return format_header_param(name, value)
def _render_parts(self, header_parts):
"""
Helper function to format and quote a single header.
Useful for single headers that are composed of multiple items. E.g.,
'Content-Disposition' fields.
:param header_parts:
A sequence of (k, v) typles or a :class:`dict` of (k, v) to format
as `k1="v1"; k2="v2"; ...`.
"""
parts = []
iterable = header_parts
if isinstance(header_parts, dict):
iterable = header_parts.items()
for name, value in iterable:
if value:
parts.append(self._render_part(name, value))
return '; '.join(parts)
def render_headers(self):
"""
Renders the headers for this request field.
"""
lines = []
sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location']
for sort_key in sort_keys:
if self.headers.get(sort_key, False):
lines.append('%s: %s' % (sort_key, self.headers[sort_key]))
for header_name, header_value in self.headers.items():
if header_name not in sort_keys:
if header_value:
lines.append('%s: %s' % (header_name, header_value))
lines.append('\r\n')
return '\r\n'.join(lines)
def make_multipart(self, content_disposition=None, content_type=None,
content_location=None):
"""
Makes this request field into a multipart request field.
This method overrides "Content-Disposition", "Content-Type" and
"Content-Location" headers to the request parameter.
:param content_type:
The 'Content-Type' of the request body.
:param content_location:
The 'Content-Location' of the request body.
"""
self.headers['Content-Disposition'] = content_disposition or 'form-data'
self.headers['Content-Disposition'] += '; '.join([
'', self._render_parts(
(('name', self._name), ('filename', self._filename))
)
])
self.headers['Content-Type'] = content_type
self.headers['Content-Location'] = content_location
|
embeddedarm/android_external_chromium_org
|
refs/heads/imx_KK4.4.3_2.0.0-ga
|
tools/json_schema_compiler/PRESUBMIT.py
|
127
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for changes affecting tools/json_schema_compiler/
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
WHITELIST = [ r'.+_test.py$' ]
def CheckChangeOnUpload(input_api, output_api):
return input_api.canned_checks.RunUnitTestsInDirectory(
input_api, output_api, '.', whitelist=WHITELIST)
def CheckChangeOnCommit(input_api, output_api):
return input_api.canned_checks.RunUnitTestsInDirectory(
input_api, output_api, '.', whitelist=WHITELIST)
|
pyropeter/archweb
|
refs/heads/master
|
main/migrations/0028_auto__add_field_repo_bugs_project__add_field_repo_svn_root.py
|
5
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Repo.bugs_project'
db.add_column('repos', 'bugs_project', self.gf('django.db.models.fields.SmallIntegerField')(default=1), keep_default=False)
# Adding field 'Repo.svn_root'
db.add_column('repos', 'svn_root', self.gf('django.db.models.fields.CharField')(default='packages', max_length=64), keep_default=False)
def backwards(self, orm):
# Deleting field 'Repo.bugs_project'
db.delete_column('repos', 'bugs_project')
# Deleting field 'Repo.svn_root'
db.delete_column('repos', 'svn_root')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'main.arch': {
'Meta': {'ordering': "['name']", 'object_name': 'Arch', 'db_table': "'arches'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'main.donor': {
'Meta': {'ordering': "['name']", 'object_name': 'Donor', 'db_table': "'donors'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'main.mirror': {
'Meta': {'ordering': "('country', 'name')", 'object_name': 'Mirror'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'admin_email': ('django.db.models.fields.EmailField', [], {'max_length': '255', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isos': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'rsync_password': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50', 'blank': 'True'}),
'rsync_user': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50', 'blank': 'True'}),
'tier': ('django.db.models.fields.SmallIntegerField', [], {'default': '2'}),
'upstream': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Mirror']", 'null': 'True'})
},
'main.mirrorprotocol': {
'Meta': {'object_name': 'MirrorProtocol'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'protocol': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'})
},
'main.mirrorrsync': {
'Meta': {'object_name': 'MirrorRsync'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.CharField', [], {'max_length': '24'}),
'mirror': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'rsync_ips'", 'to': "orm['main.Mirror']"})
},
'main.mirrorurl': {
'Meta': {'object_name': 'MirrorUrl'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mirror': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'urls'", 'to': "orm['main.Mirror']"}),
'protocol': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'urls'", 'to': "orm['main.MirrorProtocol']"}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'main.news': {
'Meta': {'ordering': "['-postdate', '-id']", 'object_name': 'News', 'db_table': "'news'"},
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'news_author'", 'to': "orm['auth.User']"}),
'content': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'postdate': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'main.package': {
'Meta': {'ordering': "('pkgname',)", 'object_name': 'Package', 'db_table': "'packages'"},
'arch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'packages'", 'to': "orm['main.Arch']"}),
'build_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'compressed_size': ('django.db.models.fields.BigIntegerField', [], {'null': 'True'}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'files_last_update': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'flag_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'installed_size': ('django.db.models.fields.BigIntegerField', [], {'null': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'license': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'packager': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'packager_str': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'pkgbase': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'pkgdesc': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'pkgname': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'pkgrel': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'pkgver': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'repo': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'packages'", 'to': "orm['main.Repo']"}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'})
},
'main.packagedepend': {
'Meta': {'object_name': 'PackageDepend', 'db_table': "'package_depends'"},
'depname': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'depvcmp': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pkg': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Package']"})
},
'main.packagefile': {
'Meta': {'object_name': 'PackageFile', 'db_table': "'package_files'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'pkg': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Package']"})
},
'main.repo': {
'Meta': {'ordering': "['name']", 'object_name': 'Repo', 'db_table': "'repos'"},
'bugs_project': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'svn_root': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'testing': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'main.signoff': {
'Meta': {'object_name': 'Signoff'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'packager': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'pkg': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Package']"}),
'pkgrel': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'pkgver': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'main.todolist': {
'Meta': {'object_name': 'Todolist', 'db_table': "'todolists'"},
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'date_added': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'main.todolistpkg': {
'Meta': {'unique_together': "(('list', 'pkg'),)", 'object_name': 'TodolistPkg', 'db_table': "'todolist_pkgs'"},
'complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'list': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Todolist']"}),
'pkg': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Package']"})
},
'main.userprofile': {
'Meta': {'object_name': 'UserProfile', 'db_table': "'user_profiles'"},
'alias': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'allowed_repos': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.Repo']", 'symmetrical': 'False', 'blank': 'True'}),
'favorite_distros': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interests': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'languages': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'notify': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'occupation': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'other_contact': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'picture': ('django.db.models.fields.files.FileField', [], {'default': "'devs/silhouette.png'", 'max_length': '100'}),
'public_email': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'roles': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'userprofile_user'", 'unique': 'True', 'to': "orm['auth.User']"}),
'website': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'yob': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['main']
|
P1R/sl4a2mongo
|
refs/heads/master
|
ReadSensors.py
|
1
|
import subprocess
from ast import literal_eval as leval
def gps():
cmd = ['termux-location']
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE)
text = p.stdout.read().decode("UTF-8")
#retcode = p.wait()
return leval(f'{{"gps": {text} }}') #, retcode
#text, ret = gps()
print(gps())
#print(ret)
|
MER-GROUP/intellij-community
|
refs/heads/master
|
python/testData/codeInsight/smartEnter/while_after.py
|
83
|
while a:
<caret>
|
elba7r/builder
|
refs/heads/master
|
frappe/hooks.py
|
1
|
from __future__ import unicode_literals
from . import __version__ as app_version
app_name = "frappe"
app_title = "Frappe Framework"
app_publisher = "Frappe Technologies"
app_description = "Full stack web framework with Python, Javascript, MariaDB, Redis, Node"
app_icon = "octicon octicon-circuit-board"
app_color = "orange"
source_link = "https://github.com/frappe/frappe"
app_license = "MIT"
app_email = "info@frappe.io"
before_install = "frappe.utils.install.before_install"
after_install = "frappe.utils.install.after_install"
page_js = {
"setup-wizard": "public/js/frappe/setup_wizard.js"
}
# website
app_include_js = [
"assets/js/libs.min.js",
"assets/js/desk.min.js",
"assets/js/editor.min.js",
"assets/js/list.min.js",
"assets/js/form.min.js",
"assets/js/report.min.js",
"assets/js/d3.min.js",
"assets/frappe/js/frappe/toolbar.js"
]
app_include_css = [
"assets/css/desk.min.css",
"assets/css/list.min.css",
"assets/css/form.min.css",
"assets/css/report.min.css",
"assets/css/module.min.css"
]
web_include_js = [
"website_script.js"
]
bootstrap = "assets/frappe/css/bootstrap.css"
web_include_css = [
"assets/css/frappe-web.css"
]
website_route_rules = [
{"from_route": "/blog", "to_route": "Blog Post"},
{"from_route": "/blog/<category>", "to_route": "Blog Post"},
{"from_route": "/kb/<category>", "to_route": "Help Article"}
]
write_file_keys = ["file_url", "file_name"]
notification_config = "frappe.core.notifications.get_notification_config"
before_tests = "frappe.utils.install.before_tests"
website_generators = ["Web Page", "Blog Post", "Blog Category", "Web Form",
"Help Article"]
email_append_to = ["Event", "ToDo", "Communication"]
calendars = ["Event"]
# login
on_session_creation = [
"frappe.core.doctype.communication.feed.login_feed",
"frappe.core.doctype.user.user.notify_admin_access_to_system_manager",
"frappe.limits.check_if_expired",
"frappe.utils.scheduler.reset_enabled_scheduler_events",
]
# permissions
permission_query_conditions = {
"Event": "frappe.desk.doctype.event.event.get_permission_query_conditions",
"ToDo": "frappe.desk.doctype.todo.todo.get_permission_query_conditions",
"User": "frappe.core.doctype.user.user.get_permission_query_conditions",
"Note": "frappe.desk.doctype.note.note.get_permission_query_conditions",
}
has_permission = {
"Event": "frappe.desk.doctype.event.event.has_permission",
"ToDo": "frappe.desk.doctype.todo.todo.has_permission",
"User": "frappe.core.doctype.user.user.has_permission",
"Note": "frappe.desk.doctype.note.note.has_permission",
"Communication": "frappe.core.doctype.communication.communication.has_permission"
}
standard_queries = {
"User": "frappe.core.doctype.user.user.user_query"
}
doc_events = {
"*": {
"on_update": [
"frappe.desk.notifications.clear_doctype_notifications",
"frappe.core.doctype.communication.feed.update_feed"
],
"after_rename": "frappe.desk.notifications.clear_doctype_notifications",
"on_cancel": [
"frappe.desk.notifications.clear_doctype_notifications",
],
"on_trash": "frappe.desk.notifications.clear_doctype_notifications"
},
"Email Group Member": {
"validate": "frappe.email.doctype.email_group.email_group.restrict_email_group"
},
}
scheduler_events = {
"all": [
"frappe.email.queue.flush",
"frappe.email.doctype.email_account.email_account.pull",
"frappe.email.doctype.email_account.email_account.notify_unreplied",
"frappe.oauth.delete_oauth2_data"
],
"hourly": [
"frappe.model.utils.link_count.update_link_count",
'frappe.model.utils.list_settings.sync_list_settings',
"frappe.utils.error.collect_error_snapshots"
],
"daily": [
"frappe.email.queue.clear_outbox",
"frappe.desk.notifications.clear_notifications",
"frappe.core.doctype.error_log.error_log.set_old_logs_as_seen",
"frappe.desk.doctype.event.event.send_event_digest",
"frappe.sessions.clear_expired_sessions",
"frappe.email.doctype.email_alert.email_alert.trigger_daily_alerts",
"frappe.async.remove_old_task_logs",
"frappe.utils.scheduler.disable_scheduler_on_expiry",
"frappe.utils.scheduler.restrict_scheduler_events_if_dormant",
"frappe.limits.update_space_usage",
"frappe.email.doctype.auto_email_report.auto_email_report.send_daily",
"frappe.desk.page.backups.backups.delete_downloadable_backups"
],
"monthly": [
"frappe.email.doctype.auto_email_report.auto_email_report.send_monthly"
]
}
get_translated_dict = {
("doctype", "System Settings"): "frappe.geo.country_info.get_translated_dict",
("page", "setup-wizard"): "frappe.geo.country_info.get_translated_dict"
}
sounds = [
{"name": "email", "src": "/assets/frappe/sounds/email.mp3", "volume": 0.1},
{"name": "submit", "src": "/assets/frappe/sounds/submit.mp3", "volume": 0.1},
{"name": "cancel", "src": "/assets/frappe/sounds/cancel.mp3", "volume": 0.1},
{"name": "delete", "src": "/assets/frappe/sounds/delete.mp3", "volume": 0.05},
{"name": "click", "src": "/assets/frappe/sounds/click.mp3", "volume": 0.05},
{"name": "error", "src": "/assets/frappe/sounds/error.mp3", "volume": 0.1},
# {"name": "alert", "src": "/assets/frappe/sounds/alert.mp3"},
# {"name": "chime", "src": "/assets/frappe/sounds/chime.mp3"},
]
bot_parsers = [
'frappe.utils.bot.ShowNotificationBot',
'frappe.utils.bot.GetOpenListBot',
'frappe.utils.bot.ListBot',
'frappe.utils.bot.FindBot',
'frappe.utils.bot.CountBot'
]
setup_wizard_exception = "frappe.desk.page.setup_wizard.setup_wizard.email_setup_wizard_exception"
before_write_file = "frappe.limits.validate_space_limit"
integration_services = ["PayPal", "Razorpay", "Dropbox", "LDAP"]
|
ptemplier/ansible
|
refs/heads/devel
|
lib/ansible/modules/monitoring/monit.py
|
9
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Darryl Stoflet <stoflet@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: monit
short_description: Manage the state of a program monitored via Monit
description:
- Manage the state of a program monitored via I(Monit)
version_added: "1.2"
options:
name:
description:
- The name of the I(monit) program/process to manage
required: true
default: null
state:
description:
- The state of service
required: true
default: null
choices: [ "present", "started", "stopped", "restarted", "monitored", "unmonitored", "reloaded" ]
timeout:
description:
- If there are pending actions for the service monitored by monit, then Ansible will check
for up to this many seconds to verify the requested action has been performed.
Ansible will sleep for five seconds between each check.
required: false
default: 300
version_added: "2.1"
requirements: [ ]
author: "Darryl Stoflet (@dstoflet)"
'''
EXAMPLES = '''
# Manage the state of program "httpd" to be in "started" state.
- monit:
name: httpd
state: started
'''
import time
import re
from ansible.module_utils.basic import AnsibleModule
def main():
arg_spec = dict(
name=dict(required=True),
timeout=dict(default=300, type='int'),
state=dict(required=True, choices=['present', 'started', 'restarted', 'stopped', 'monitored', 'unmonitored', 'reloaded'])
)
module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)
name = module.params['name']
state = module.params['state']
timeout = module.params['timeout']
MONIT = module.get_bin_path('monit', True)
def monit_version():
rc, out, err = module.run_command('%s -V' % MONIT, check_rc=True)
version_line = out.split('\n')[0]
version = re.search("[0-9]+\.[0-9]+", version_line).group().split('.')
# Use only major and minor even if there are more these should be enough
return int(version[0]), int(version[1])
def is_version_higher_than_18():
return MONIT_MAJOR_VERSION > 3 or MONIT_MAJOR_VERSION == 3 and MONIT_MINOR_VERSION > 18
def parse(parts):
if is_version_higher_than_18():
return parse_current(parts)
else:
return parse_older_versions(parts)
def parse_older_versions(parts):
if len(parts) > 2 and parts[0].lower() == 'process' and parts[1] == "'%s'" % name:
return ' '.join(parts[2:]).lower()
else:
return ''
def parse_current(parts):
if len(parts) > 2 and parts[2].lower() == 'process' and parts[0] == name:
return ''.join(parts[1]).lower()
else:
return ''
def status():
"""Return the status of the process in monit, or the empty string if not present."""
rc, out, err = module.run_command('%s %s' % (MONIT, SUMMARY_COMMAND), check_rc=True)
for line in out.split('\n'):
# Sample output lines:
# Process 'name' Running
# Process 'name' Running - restart pending
parts = parse(line.split())
if parts != '':
return parts
else:
return ''
def run_command(command):
"""Runs a monit command, and returns the new status."""
module.run_command('%s %s %s' % (MONIT, command, name), check_rc=True)
return status()
def wait_for_monit_to_stop_pending():
"""Fails this run if there is no status or it's pending/initializing for timeout"""
timeout_time = time.time() + timeout
sleep_time = 5
running_status = status()
while running_status == '' or 'pending' in running_status or 'initializing' in running_status:
if time.time() >= timeout_time:
module.fail_json(
msg='waited too long for "pending", or "initiating" status to go away ({0})'.format(
running_status
),
state=state
)
time.sleep(sleep_time)
running_status = status()
MONIT_MAJOR_VERSION, MONIT_MINOR_VERSION = monit_version()
SUMMARY_COMMAND = ('summary', 'summary -B')[is_version_higher_than_18()]
if state == 'reloaded':
if module.check_mode:
module.exit_json(changed=True)
rc, out, err = module.run_command('%s reload' % MONIT)
if rc != 0:
module.fail_json(msg='monit reload failed', stdout=out, stderr=err)
wait_for_monit_to_stop_pending()
module.exit_json(changed=True, name=name, state=state)
present = status() != ''
if not present and not state == 'present':
module.fail_json(msg='%s process not presently configured with monit' % name, name=name, state=state)
if state == 'present':
if not present:
if module.check_mode:
module.exit_json(changed=True)
status = run_command('reload')
if status == '':
wait_for_monit_to_stop_pending()
module.exit_json(changed=True, name=name, state=state)
module.exit_json(changed=False, name=name, state=state)
wait_for_monit_to_stop_pending()
running = 'running' in status()
if running and state in ['started', 'monitored']:
module.exit_json(changed=False, name=name, state=state)
if running and state == 'stopped':
if module.check_mode:
module.exit_json(changed=True)
status = run_command('stop')
if status in ['not monitored'] or 'stop pending' in status:
module.exit_json(changed=True, name=name, state=state)
module.fail_json(msg='%s process not stopped' % name, status=status)
if running and state == 'unmonitored':
if module.check_mode:
module.exit_json(changed=True)
status = run_command('unmonitor')
if status in ['not monitored'] or 'unmonitor pending' in status:
module.exit_json(changed=True, name=name, state=state)
module.fail_json(msg='%s process not unmonitored' % name, status=status)
elif state == 'restarted':
if module.check_mode:
module.exit_json(changed=True)
status = run_command('restart')
if status in ['initializing', 'running'] or 'restart pending' in status:
module.exit_json(changed=True, name=name, state=state)
module.fail_json(msg='%s process not restarted' % name, status=status)
elif not running and state == 'started':
if module.check_mode:
module.exit_json(changed=True)
status = run_command('start')
if status in ['initializing', 'running'] or 'start pending' in status:
module.exit_json(changed=True, name=name, state=state)
module.fail_json(msg='%s process not started' % name, status=status)
elif not running and state == 'monitored':
if module.check_mode:
module.exit_json(changed=True)
status = run_command('monitor')
if status not in ['not monitored']:
module.exit_json(changed=True, name=name, state=state)
module.fail_json(msg='%s process not monitored' % name, status=status)
module.exit_json(changed=False, name=name, state=state)
if __name__ == '__main__':
main()
|
ahuang11/ahh
|
refs/heads/master
|
examples/old_examples/basic_example.py
|
1
|
from ahh import vis
x = [1, 2, 3, 4]
y = [5, 6, 7, 8]
vis.plot_line(x, y)
|
jhunufernandes/ArduWatchRaspSerial
|
refs/heads/master
|
virtualenv/lib/python3.4/site-packages/pip/_vendor/requests/auth.py
|
149
|
# -*- coding: utf-8 -*-
"""
requests.auth
~~~~~~~~~~~~~
This module contains the authentication handlers for Requests.
"""
import os
import re
import time
import hashlib
import threading
from base64 import b64encode
from .compat import urlparse, str
from .cookies import extract_cookies_to_jar
from .utils import parse_dict_header, to_native_string
from .status_codes import codes
CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded'
CONTENT_TYPE_MULTI_PART = 'multipart/form-data'
def _basic_auth_str(username, password):
"""Returns a Basic Auth string."""
authstr = 'Basic ' + to_native_string(
b64encode(('%s:%s' % (username, password)).encode('latin1')).strip()
)
return authstr
class AuthBase(object):
"""Base class that all auth implementations derive from"""
def __call__(self, r):
raise NotImplementedError('Auth hooks must be callable.')
class HTTPBasicAuth(AuthBase):
"""Attaches HTTP Basic Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
def __eq__(self, other):
return all([
self.username == getattr(other, 'username', None),
self.password == getattr(other, 'password', None)
])
def __ne__(self, other):
return not self == other
def __call__(self, r):
r.headers['Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPProxyAuth(HTTPBasicAuth):
"""Attaches HTTP Proxy Authentication to a given Request object."""
def __call__(self, r):
r.headers['Proxy-Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPDigestAuth(AuthBase):
"""Attaches HTTP Digest Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
# Keep state in per-thread local storage
self._thread_local = threading.local()
def init_per_thread_state(self):
# Ensure state is initialized just once per-thread
if not hasattr(self._thread_local, 'init'):
self._thread_local.init = True
self._thread_local.last_nonce = ''
self._thread_local.nonce_count = 0
self._thread_local.chal = {}
self._thread_local.pos = None
self._thread_local.num_401_calls = None
def build_digest_header(self, method, url):
realm = self._thread_local.chal['realm']
nonce = self._thread_local.chal['nonce']
qop = self._thread_local.chal.get('qop')
algorithm = self._thread_local.chal.get('algorithm')
opaque = self._thread_local.chal.get('opaque')
hash_utf8 = None
if algorithm is None:
_algorithm = 'MD5'
else:
_algorithm = algorithm.upper()
# lambdas assume digest modules are imported at the top level
if _algorithm == 'MD5' or _algorithm == 'MD5-SESS':
def md5_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.md5(x).hexdigest()
hash_utf8 = md5_utf8
elif _algorithm == 'SHA':
def sha_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.sha1(x).hexdigest()
hash_utf8 = sha_utf8
KD = lambda s, d: hash_utf8("%s:%s" % (s, d))
if hash_utf8 is None:
return None
# XXX not implemented yet
entdig = None
p_parsed = urlparse(url)
#: path is request-uri defined in RFC 2616 which should not be empty
path = p_parsed.path or "/"
if p_parsed.query:
path += '?' + p_parsed.query
A1 = '%s:%s:%s' % (self.username, realm, self.password)
A2 = '%s:%s' % (method, path)
HA1 = hash_utf8(A1)
HA2 = hash_utf8(A2)
if nonce == self._thread_local.last_nonce:
self._thread_local.nonce_count += 1
else:
self._thread_local.nonce_count = 1
ncvalue = '%08x' % self._thread_local.nonce_count
s = str(self._thread_local.nonce_count).encode('utf-8')
s += nonce.encode('utf-8')
s += time.ctime().encode('utf-8')
s += os.urandom(8)
cnonce = (hashlib.sha1(s).hexdigest()[:16])
if _algorithm == 'MD5-SESS':
HA1 = hash_utf8('%s:%s:%s' % (HA1, nonce, cnonce))
if not qop:
respdig = KD(HA1, "%s:%s" % (nonce, HA2))
elif qop == 'auth' or 'auth' in qop.split(','):
noncebit = "%s:%s:%s:%s:%s" % (
nonce, ncvalue, cnonce, 'auth', HA2
)
respdig = KD(HA1, noncebit)
else:
# XXX handle auth-int.
return None
self._thread_local.last_nonce = nonce
# XXX should the partial digests be encoded too?
base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \
'response="%s"' % (self.username, realm, nonce, path, respdig)
if opaque:
base += ', opaque="%s"' % opaque
if algorithm:
base += ', algorithm="%s"' % algorithm
if entdig:
base += ', digest="%s"' % entdig
if qop:
base += ', qop="auth", nc=%s, cnonce="%s"' % (ncvalue, cnonce)
return 'Digest %s' % (base)
def handle_redirect(self, r, **kwargs):
"""Reset num_401_calls counter on redirects."""
if r.is_redirect:
self._thread_local.num_401_calls = 1
def handle_401(self, r, **kwargs):
"""Takes the given response and tries digest-auth, if needed."""
if self._thread_local.pos is not None:
# Rewind the file position indicator of the body to where
# it was to resend the request.
r.request.body.seek(self._thread_local.pos)
s_auth = r.headers.get('www-authenticate', '')
if 'digest' in s_auth.lower() and self._thread_local.num_401_calls < 2:
self._thread_local.num_401_calls += 1
pat = re.compile(r'digest ', flags=re.IGNORECASE)
self._thread_local.chal = parse_dict_header(pat.sub('', s_auth, count=1))
# Consume content and release the original connection
# to allow our new request to reuse the same one.
r.content
r.close()
prep = r.request.copy()
extract_cookies_to_jar(prep._cookies, r.request, r.raw)
prep.prepare_cookies(prep._cookies)
prep.headers['Authorization'] = self.build_digest_header(
prep.method, prep.url)
_r = r.connection.send(prep, **kwargs)
_r.history.append(r)
_r.request = prep
return _r
self._thread_local.num_401_calls = 1
return r
def __call__(self, r):
# Initialize per-thread state, if needed
self.init_per_thread_state()
# If we have a saved nonce, skip the 401
if self._thread_local.last_nonce:
r.headers['Authorization'] = self.build_digest_header(r.method, r.url)
try:
self._thread_local.pos = r.body.tell()
except AttributeError:
# In the case of HTTPDigestAuth being reused and the body of
# the previous request was a file-like object, pos has the
# file position of the previous body. Ensure it's set to
# None.
self._thread_local.pos = None
r.register_hook('response', self.handle_401)
r.register_hook('response', self.handle_redirect)
self._thread_local.num_401_calls = 1
return r
def __eq__(self, other):
return all([
self.username == getattr(other, 'username', None),
self.password == getattr(other, 'password', None)
])
def __ne__(self, other):
return not self == other
|
theunissenlab/python-neo
|
refs/heads/master
|
neo/test/iotest/test_brainwaredamio.py
|
6
|
# -*- coding: utf-8 -*-
"""
Tests of neo.io.brainwaredamio
"""
# needed for python 3 compatibility
from __future__ import absolute_import, division, print_function
import os.path
import sys
try:
import unittest2 as unittest
except ImportError:
import unittest
import numpy as np
import quantities as pq
from neo.core import (AnalogSignal, Block,
ChannelIndex, Segment)
from neo.io import BrainwareDamIO
from neo.test.iotest.common_io_test import BaseTestIO
from neo.test.tools import (assert_same_sub_schema,
assert_neo_object_is_compliant)
from neo.test.iotest.tools import create_generic_reader
PY_VER = sys.version_info[0]
def proc_dam(filename):
'''Load an dam file that has already been processed by the official matlab
file converter. That matlab data is saved to an m-file, which is then
converted to a numpy '.npz' file. This numpy file is the file actually
loaded. This function converts it to a neo block and returns the block.
This block can be compared to the block produced by BrainwareDamIO to
make sure BrainwareDamIO is working properly
block = proc_dam(filename)
filename: The file name of the numpy file to load. It should end with
'*_dam_py?.npz'. This will be converted to a neo 'file_origin' property
with the value '*.dam', so the filename to compare should fit that pattern.
'py?' should be 'py2' for the python 2 version of the numpy file or 'py3'
for the python 3 version of the numpy file.
example: filename = 'file1_dam_py2.npz'
dam file name = 'file1.dam'
'''
with np.load(filename) as damobj:
damfile = damobj.items()[0][1].flatten()
filename = os.path.basename(filename[:-12]+'.dam')
signals = [res.flatten() for res in damfile['signal']]
stimIndexes = [int(res[0, 0].tolist()) for res in damfile['stimIndex']]
timestamps = [res[0, 0] for res in damfile['timestamp']]
block = Block(file_origin=filename)
chx = ChannelIndex(file_origin=filename,
index=np.array([0]),
channel_ids=np.array([1]),
channel_names=np.array(['Chan1'], dtype='S'))
block.channel_indexes.append(chx)
params = [res['params'][0, 0].flatten() for res in damfile['stim']]
values = [res['values'][0, 0].flatten() for res in damfile['stim']]
params = [[res1[0] for res1 in res] for res in params]
values = [[res1 for res1 in res] for res in values]
stims = [dict(zip(param, value)) for param, value in zip(params, values)]
fulldam = zip(stimIndexes, timestamps, signals, stims)
for stimIndex, timestamp, signal, stim in fulldam:
sig = AnalogSignal(signal=signal*pq.mV,
t_start=timestamp*pq.d,
file_origin=filename,
sampling_period=1.*pq.s)
segment = Segment(file_origin=filename,
index=stimIndex,
**stim)
segment.analogsignals = [sig]
block.segments.append(segment)
block.create_many_to_one_relationship()
return block
class BrainwareDamIOTestCase(BaseTestIO, unittest.TestCase):
'''
Unit test testcase for neo.io.BrainwareDamIO
'''
ioclass = BrainwareDamIO
read_and_write_is_bijective = False
# These are the files it tries to read and test for compliance
files_to_test = ['block_300ms_4rep_1clust_part_ch1.dam',
'interleaved_500ms_5rep_ch2.dam',
'long_170s_1rep_1clust_ch2.dam',
'multi_500ms_mulitrep_ch1.dam',
'random_500ms_12rep_noclust_part_ch2.dam',
'sequence_500ms_5rep_ch2.dam']
# these are reference files to compare to
files_to_compare = ['block_300ms_4rep_1clust_part_ch1',
'interleaved_500ms_5rep_ch2',
'',
'multi_500ms_mulitrep_ch1',
'random_500ms_12rep_noclust_part_ch2',
'sequence_500ms_5rep_ch2']
# add the appropriate suffix depending on the python version
for i, fname in enumerate(files_to_compare):
if fname:
files_to_compare[i] += '_dam_py%s.npz' % PY_VER
# Will fetch from g-node if they don't already exist locally
# How does it know to do this before any of the other tests?
files_to_download = files_to_test + files_to_compare
def test_reading_same(self):
for ioobj, path in self.iter_io_objects(return_path=True):
obj_reader_base = create_generic_reader(ioobj, target=False)
obj_reader_single = create_generic_reader(ioobj)
obj_base = obj_reader_base()
obj_single = obj_reader_single()
try:
assert_same_sub_schema(obj_base, obj_single)
except BaseException as exc:
exc.args += ('from ' + os.path.basename(path),)
raise
def test_against_reference(self):
for filename, refname in zip(self.files_to_test,
self.files_to_compare):
if not refname:
continue
obj = self.read_file(filename=filename)
refobj = proc_dam(self.get_filename_path(refname))
try:
assert_neo_object_is_compliant(obj)
assert_neo_object_is_compliant(refobj)
assert_same_sub_schema(obj, refobj)
except BaseException as exc:
exc.args += ('from ' + filename,)
raise
if __name__ == '__main__':
unittest.main()
|
yephper/django
|
refs/heads/master
|
tests/delete_regress/tests.py
|
1
|
from __future__ import unicode_literals
import datetime
from django.db import connection, models, transaction
from django.test import TestCase, TransactionTestCase, skipUnlessDBFeature
from .models import (
Award, AwardNote, Book, Child, Eaten, Email, File, Food, FooFile,
FooFileProxy, FooImage, FooPhoto, House, Image, Item, Location, Login,
OrderedPerson, OrgUnit, Person, Photo, PlayedWith, PlayedWithNote, Policy,
Researcher, Toy, Version,
)
# Can't run this test under SQLite, because you can't
# get two connections to an in-memory database.
@skipUnlessDBFeature('test_db_allows_multiple_connections')
class DeleteLockingTest(TransactionTestCase):
available_apps = ['delete_regress']
def setUp(self):
# Create a second connection to the default database
self.conn2 = connection.copy()
self.conn2.set_autocommit(False)
def tearDown(self):
# Close down the second connection.
self.conn2.rollback()
self.conn2.close()
def test_concurrent_delete(self):
"""Concurrent deletes don't collide and lock the database (#9479)."""
with transaction.atomic():
Book.objects.create(id=1, pagecount=100)
Book.objects.create(id=2, pagecount=200)
Book.objects.create(id=3, pagecount=300)
with transaction.atomic():
# Start a transaction on the main connection.
self.assertEqual(3, Book.objects.count())
# Delete something using another database connection.
with self.conn2.cursor() as cursor2:
cursor2.execute("DELETE from delete_regress_book WHERE id = 1")
self.conn2.commit()
# In the same transaction on the main connection, perform a
# queryset delete that covers the object deleted with the other
# connection. This causes an infinite loop under MySQL InnoDB
# unless we keep track of already deleted objects.
Book.objects.filter(pagecount__lt=250).delete()
self.assertEqual(1, Book.objects.count())
class DeleteCascadeTests(TestCase):
def test_generic_relation_cascade(self):
"""
Django cascades deletes through generic-related objects to their
reverse relations.
"""
person = Person.objects.create(name='Nelson Mandela')
award = Award.objects.create(name='Nobel', content_object=person)
AwardNote.objects.create(note='a peace prize',
award=award)
self.assertEqual(AwardNote.objects.count(), 1)
person.delete()
self.assertEqual(Award.objects.count(), 0)
# first two asserts are just sanity checks, this is the kicker:
self.assertEqual(AwardNote.objects.count(), 0)
def test_fk_to_m2m_through(self):
"""
If an M2M relationship has an explicitly-specified through model, and
some other model has an FK to that through model, deletion is cascaded
from one of the participants in the M2M, to the through model, to its
related model.
"""
juan = Child.objects.create(name='Juan')
paints = Toy.objects.create(name='Paints')
played = PlayedWith.objects.create(child=juan, toy=paints,
date=datetime.date.today())
PlayedWithNote.objects.create(played=played,
note='the next Jackson Pollock')
self.assertEqual(PlayedWithNote.objects.count(), 1)
paints.delete()
self.assertEqual(PlayedWith.objects.count(), 0)
# first two asserts just sanity checks, this is the kicker:
self.assertEqual(PlayedWithNote.objects.count(), 0)
def test_15776(self):
policy = Policy.objects.create(pk=1, policy_number="1234")
version = Version.objects.create(policy=policy)
location = Location.objects.create(version=version)
Item.objects.create(version=version, location=location)
policy.delete()
class DeleteCascadeTransactionTests(TransactionTestCase):
available_apps = ['delete_regress']
def test_inheritance(self):
"""
Auto-created many-to-many through tables referencing a parent model are
correctly found by the delete cascade when a child of that parent is
deleted.
Refs #14896.
"""
r = Researcher.objects.create()
email = Email.objects.create(
label="office-email", email_address="carl@science.edu"
)
r.contacts.add(email)
email.delete()
def test_to_field(self):
"""
Cascade deletion works with ForeignKey.to_field set to non-PK.
"""
apple = Food.objects.create(name="apple")
Eaten.objects.create(food=apple, meal="lunch")
apple.delete()
self.assertFalse(Food.objects.exists())
self.assertFalse(Eaten.objects.exists())
class LargeDeleteTests(TestCase):
def test_large_deletes(self):
"Regression for #13309 -- if the number of objects > chunk size, deletion still occurs"
for x in range(300):
Book.objects.create(pagecount=x + 100)
# attach a signal to make sure we will not fast-delete
def noop(*args, **kwargs):
pass
models.signals.post_delete.connect(noop, sender=Book)
Book.objects.all().delete()
models.signals.post_delete.disconnect(noop, sender=Book)
self.assertEqual(Book.objects.count(), 0)
class ProxyDeleteTest(TestCase):
"""
Tests on_delete behavior for proxy models.
See #16128.
"""
def create_image(self):
"""Return an Image referenced by both a FooImage and a FooFile."""
# Create an Image
test_image = Image()
test_image.save()
foo_image = FooImage(my_image=test_image)
foo_image.save()
# Get the Image instance as a File
test_file = File.objects.get(pk=test_image.pk)
foo_file = FooFile(my_file=test_file)
foo_file.save()
return test_image
def test_delete_proxy(self):
"""
Deleting the *proxy* instance bubbles through to its non-proxy and
*all* referring objects are deleted.
"""
self.create_image()
Image.objects.all().delete()
# An Image deletion == File deletion
self.assertEqual(len(Image.objects.all()), 0)
self.assertEqual(len(File.objects.all()), 0)
# The Image deletion cascaded and *all* references to it are deleted.
self.assertEqual(len(FooImage.objects.all()), 0)
self.assertEqual(len(FooFile.objects.all()), 0)
def test_delete_proxy_of_proxy(self):
"""
Deleting a proxy-of-proxy instance should bubble through to its proxy
and non-proxy parents, deleting *all* referring objects.
"""
test_image = self.create_image()
# Get the Image as a Photo
test_photo = Photo.objects.get(pk=test_image.pk)
foo_photo = FooPhoto(my_photo=test_photo)
foo_photo.save()
Photo.objects.all().delete()
# A Photo deletion == Image deletion == File deletion
self.assertEqual(len(Photo.objects.all()), 0)
self.assertEqual(len(Image.objects.all()), 0)
self.assertEqual(len(File.objects.all()), 0)
# The Photo deletion should have cascaded and deleted *all*
# references to it.
self.assertEqual(len(FooPhoto.objects.all()), 0)
self.assertEqual(len(FooFile.objects.all()), 0)
self.assertEqual(len(FooImage.objects.all()), 0)
def test_delete_concrete_parent(self):
"""
Deleting an instance of a concrete model should also delete objects
referencing its proxy subclass.
"""
self.create_image()
File.objects.all().delete()
# A File deletion == Image deletion
self.assertEqual(len(File.objects.all()), 0)
self.assertEqual(len(Image.objects.all()), 0)
# The File deletion should have cascaded and deleted *all* references
# to it.
self.assertEqual(len(FooFile.objects.all()), 0)
self.assertEqual(len(FooImage.objects.all()), 0)
def test_delete_proxy_pair(self):
"""
If a pair of proxy models are linked by an FK from one concrete parent
to the other, deleting one proxy model cascade-deletes the other, and
the deletion happens in the right order (not triggering an
IntegrityError on databases unable to defer integrity checks).
Refs #17918.
"""
# Create an Image (proxy of File) and FooFileProxy (proxy of FooFile,
# which has an FK to File)
image = Image.objects.create()
as_file = File.objects.get(pk=image.pk)
FooFileProxy.objects.create(my_file=as_file)
Image.objects.all().delete()
self.assertEqual(len(FooFileProxy.objects.all()), 0)
def test_19187_values(self):
with self.assertRaises(TypeError):
Image.objects.values().delete()
with self.assertRaises(TypeError):
Image.objects.values_list().delete()
class Ticket19102Tests(TestCase):
"""
Test different queries which alter the SELECT clause of the query. We
also must be using a subquery for the deletion (that is, the original
query has a join in it). The deletion should be done as "fast-path"
deletion (that is, just one query for the .delete() call).
Note that .values() is not tested here on purpose. .values().delete()
doesn't work for non fast-path deletes at all.
"""
def setUp(self):
self.o1 = OrgUnit.objects.create(name='o1')
self.o2 = OrgUnit.objects.create(name='o2')
self.l1 = Login.objects.create(description='l1', orgunit=self.o1)
self.l2 = Login.objects.create(description='l2', orgunit=self.o2)
@skipUnlessDBFeature("update_can_self_select")
def test_ticket_19102_annotate(self):
with self.assertNumQueries(1):
Login.objects.order_by('description').filter(
orgunit__name__isnull=False
).annotate(
n=models.Count('description')
).filter(
n=1, pk=self.l1.pk
).delete()
self.assertFalse(Login.objects.filter(pk=self.l1.pk).exists())
self.assertTrue(Login.objects.filter(pk=self.l2.pk).exists())
@skipUnlessDBFeature("update_can_self_select")
def test_ticket_19102_extra(self):
with self.assertNumQueries(1):
Login.objects.order_by('description').filter(
orgunit__name__isnull=False
).extra(
select={'extraf': '1'}
).filter(
pk=self.l1.pk
).delete()
self.assertFalse(Login.objects.filter(pk=self.l1.pk).exists())
self.assertTrue(Login.objects.filter(pk=self.l2.pk).exists())
@skipUnlessDBFeature("update_can_self_select")
@skipUnlessDBFeature('can_distinct_on_fields')
def test_ticket_19102_distinct_on(self):
# Both Login objs should have same description so that only the one
# having smaller PK will be deleted.
Login.objects.update(description='description')
with self.assertNumQueries(1):
Login.objects.distinct('description').order_by('pk').filter(
orgunit__name__isnull=False
).delete()
# Assumed that l1 which is created first has smaller PK.
self.assertFalse(Login.objects.filter(pk=self.l1.pk).exists())
self.assertTrue(Login.objects.filter(pk=self.l2.pk).exists())
@skipUnlessDBFeature("update_can_self_select")
def test_ticket_19102_select_related(self):
with self.assertNumQueries(1):
Login.objects.filter(
pk=self.l1.pk
).filter(
orgunit__name__isnull=False
).order_by(
'description'
).select_related('orgunit').delete()
self.assertFalse(Login.objects.filter(pk=self.l1.pk).exists())
self.assertTrue(Login.objects.filter(pk=self.l2.pk).exists())
@skipUnlessDBFeature("update_can_self_select")
def test_ticket_19102_defer(self):
with self.assertNumQueries(1):
Login.objects.filter(
pk=self.l1.pk
).filter(
orgunit__name__isnull=False
).order_by(
'description'
).only('id').delete()
self.assertFalse(Login.objects.filter(pk=self.l1.pk).exists())
self.assertTrue(Login.objects.filter(pk=self.l2.pk).exists())
class OrderedDeleteTests(TestCase):
def test_meta_ordered_delete(self):
# When a subquery is performed by deletion code, the subquery must be
# cleared of all ordering. There was a but that caused _meta ordering
# to be used. Refs #19720.
h = House.objects.create(address='Foo')
OrderedPerson.objects.create(name='Jack', lives_in=h)
OrderedPerson.objects.create(name='Bob', lives_in=h)
OrderedPerson.objects.filter(lives_in__address='Foo').delete()
self.assertEqual(OrderedPerson.objects.count(), 0)
|
minixalpha/Online-Judge
|
refs/heads/master
|
LeetCode/Python/linked_list_cycle_ii.py
|
2
|
#!/usr/bin/env python
#coding: utf-8
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
# @param head, a ListNode
# @return a list node
def detectCycle(self, head):
visited = set([])
h = head
while h:
if h in visited:
return h
else:
visited.add(h)
h = h.next
return None
|
dancingdan/tensorflow
|
refs/heads/master
|
tensorflow/contrib/distributions/python/ops/bijectors/square.py
|
35
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Square bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.util import deprecation
__all__ = [
"Square",
]
class Square(bijector.Bijector):
"""Compute `g(X) = X^2`; X is a positive real number.
g is a bijection between the non-negative real numbers (R_+) and the
non-negative real numbers.
#### Examples
```python
bijector.Square().forward(x=[[1., 0], [2, 1]])
# Result: [[1., 0], [4, 1]], i.e., x^2
bijector.Square().inverse(y=[[1., 4], [9, 1]])
# Result: [[1., 2], [3, 1]], i.e., sqrt(y).
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self, validate_args=False, name="square"):
"""Instantiates the `Square` bijector.
Args:
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str` name given to ops managed by this object.
"""
self._name = name
super(Square, self).__init__(
forward_min_event_ndims=0,
validate_args=validate_args,
name=name)
def _forward(self, x):
x = self._maybe_assert_valid(x)
return math_ops.square(x)
def _inverse(self, y):
y = self._maybe_assert_valid(y)
return math_ops.sqrt(y)
def _forward_log_det_jacobian(self, x):
x = self._maybe_assert_valid(x)
return np.log(2.) + math_ops.log(x)
def _maybe_assert_valid(self, t):
if not self.validate_args:
return t
is_valid = check_ops.assert_non_negative(
t, message="All elements must be non-negative.")
return control_flow_ops.with_dependencies([is_valid], t)
|
ReneHerthel/RIOT
|
refs/heads/master
|
dist/tools/headerguards/headerguards.py
|
39
|
#!/usr/bin/env python3
import os
import sys
import difflib
from io import BytesIO, TextIOWrapper
_in = "/-."
_out = "___"
transtab = str.maketrans(_in, _out)
def path_to_guardname(filepath):
res = filepath.upper().translate(transtab)
if res.startswith("_"):
res = "PRIV" + res
return res
def get_guard_name(filepath):
parts = filepath.split(os.sep)
start = 0
found = False
for i, part in enumerate(parts):
if part == "include":
found = True
start = i + 1
break
if not found:
start = len(parts) - 1
return path_to_guardname(os.path.join(*parts[start:]))
def fix_headerguard(filename):
supposed = get_guard_name(filename)
with open(filename, "r", encoding='utf-8', errors='ignore') as f:
inlines = f.readlines()
tmp = TextIOWrapper(BytesIO(), encoding="utf-8", errors="ignore")
tmp.seek(0)
guard_found = 0
guard_name = ""
ifstack = 0
for line in inlines:
if guard_found == 0:
if line.startswith("#ifndef"):
guard_found += 1
guard_name = line[8:].rstrip()
line = "#ifndef %s\n" % (supposed)
elif guard_found == 1:
if line.startswith("#define") and line[8:].rstrip() == guard_name:
line = "#define %s\n" % (supposed)
guard_found += 1
else:
break
elif guard_found == 2:
if line.startswith("#if"):
ifstack += 1
elif line.startswith("#endif"):
if ifstack > 0:
ifstack -= 1
else:
guard_found += 1
line = "#endif /* %s */\n" % supposed
tmp.write(line)
tmp.seek(0)
if guard_found == 3:
for line in difflib.unified_diff(inlines, tmp.readlines(),
"%s" % filename, "%s" % filename):
sys.stdout.write(line)
else:
print("%s: no / broken header guard" % filename, file=sys.stderr)
return False
if __name__ == "__main__":
error = False
for filename in sys.argv[1:]:
if fix_headerguard(filename) is False:
error = True
if error:
sys.exit(1)
|
k-nut/osm-checker-ulm
|
refs/heads/master
|
routes.py
|
1
|
#! /usr/bin/env python2.7
# -*- coding: utf-8 -*-
import requests
from flask import redirect, url_for, \
render_template, jsonify, request, \
send_from_directory
import datetime
import logging
import sys
from math import log10
import codecs
from models import DB_Stop, DB_Train, VBB_Stop, Bvg_line, app, db
from helpers import print_success, print_failure
logging.basicConfig(filename="rechecks.log",
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M',
level=logging.INFO)
if "--verbose" in sys.argv:
console = logging.StreamHandler(sys.stdout)
logging.getLogger('').addHandler(console)
@app.route("/")
def main():
''' The main page '''
return pagination(1)
@app.route("/city/<city>/page/<number>")
def pagination(number, city="Ulm"):
""" Render only 100 stops for better overview"""
number = int(number)
start = (number-1)*50
stop = number * 50
q = DB_Stop.query\
.filter(DB_Stop.landkreis == city)\
.order_by("last_run desc").all()
Stops = q[start:stop]
all_stops = len(q)
matches = len([stop for stop in q if stop.matches > 0])
landkreise = list(set([stop.landkreis for stop in
DB_Stop.query.all()]))
landkreise.sort()
for stop in Stops:
stop.turbo_url = VBB_Stop(stop.to_vbb_syntax()).turbo_url
return render_template("index.html",
city=city,
stops=Stops,
pages=all_stops,
this_page=number,
matches_count=matches,
landkreise=landkreise
)
@app.route("/search/<query>")
def search(query):
""" Return a list with all the stops that match the query"""
Stops = DB_Stop.query.filter(DB_Stop.name.like("%" + query + "%")).all()
for stop in Stops:
stop.turbo_url = VBB_Stop(stop.to_vbb_syntax()).turbo_url
return render_template("index.html", stops=Stops, pages=False)
@app.route("/stops/<show_only>/<north>/<east>/<south>/<west>")
def stops_in_bounding_box(show_only, north, east, south, west):
''' Only show stops within a given bounding box. Allow filtering by
matches/nomatches'''
if show_only == "problemsonly":
result = DB_Stop.query.filter(
DB_Stop.lat.between(float(south), float(north)),
DB_Stop.lon.between(float(west), float(east)),
DB_Stop.matches == 0
).all()
elif show_only == "matchesonly":
result = DB_Stop.query.filter(
DB_Stop.lat.between(float(south), float(north)),
DB_Stop.lon.between(float(west), float(east)),
DB_Stop.matches > 0
).all()
else:
result = DB_Stop.query.filter(
DB_Stop.lat.between(float(south), float(north)),
DB_Stop.lon.between(float(west), float(east)),
).all()
landkreise = list(set([stop.landkreis for stop in
DB_Stop.query.all()]))
landkreise.sort()
return render_template("index.html",
stops=result,
landkreise=landkreise
)
@app.route("/api/jsonstops/<show_only>/<north>/<east>/<south>/<west>")
def json_stops(show_only, north, east, south, west):
''' Only show stops within a given bounding box. Allow filtering by
matches/nomatches'''
if show_only == "problemsonly":
result = DB_Stop.query.filter(
DB_Stop.lat.between(float(south), float(north)),
DB_Stop.lon.between(float(west), float(east)),
DB_Stop.matches == 0
).all()
elif show_only == "matchesonly":
result = DB_Stop.query.filter(
DB_Stop.lat.between(float(south), float(north)),
DB_Stop.lon.between(float(west), float(east)),
DB_Stop.matches > 0
).all()
else:
result = DB_Stop.query.filter(
DB_Stop.lat.between(float(south), float(north)),
DB_Stop.lon.between(float(west), float(east)),
).all()
all_stops = []
for stop in result:
all_stops.append(stop.to_dict())
if len(all_stops) > 100:
return jsonify(stops="Too many")
return jsonify(stops=all_stops)
@app.route("/recheck/<id>")
def recheck(id, from_cm_line=False):
''' Rerun the checks for a stop and update the db'''
stop = DB_Stop.query.filter_by(id=id).first()
old_matches = stop.matches
stop.matches = VBB_Stop(stop.to_vbb_syntax(), stop.exception).is_in_osm()
stop.last_run = datetime.datetime.now().replace(microsecond=0)
db.session.commit()
if not from_cm_line:
logging.info("[recheck] Name: %-5s; Old: %-3i; New: %-3i; IP: %-3s" % (stop.name,
old_matches,
stop.matches,
request.remote_addr))
return redirect(url_for("pagination", number=1, city=stop.landkreis))
else:
if stop.matches > 0:
print_success("%s now matches %i stops" % (stop.name,
stop.matches))
else:
print_failure("%s does not match any stops..." % (stop.name))
return stop.matches
@app.route("/map_of_the_bad")
def map_of_the_bad():
''' Return a map with all the stops that aren't in OSM '''
return render_template("map.html")
@app.route("/api/stops")
def api_stops():
if request.args.get("matchesOnly"):
Stops = DB_Stop.query.filter(DB_Stop.matches >= 1).all()
elif request.args.get("noMatchesOnly"):
Stops = DB_Stop.query.filter(DB_Stop.matches < 1).all()
else:
Stops = DB_Stop.query.all()
all_stops = []
for stop in Stops:
all_stops.append(stop.to_dict())
return jsonify(stops=all_stops)
@app.route('/exceptions', methods=["get", "post"])
def match_exceptions():
if request.method == "POST":
if request.form["id"] and request.form["string"]:
stop = DB_Stop.query.filter_by(id=int(request.form["id"])).first()
stop.exception = request.form["string"]
db.session.commit()
return redirect(url_for("match_exceptions"))
all_stops = DB_Stop.query.all()
exceptions = [Stop for Stop in all_stops if Stop.exception]
return render_template("exceptions.html", all_stops=all_stops, exceptions=exceptions)
@app.route('/robots.txt')
def serve_static():
return send_from_directory(app.static_folder, request.path[1:])
def get_trains():
''' The initial query to set up the train db '''
url = "http://datenfragen.de/openvbb/GTFS_VBB_Okt2012/routes.txt"
req = requests.get(url)
text = req.text.split("\n")
for line in text:
Train = Bvg_line(line)
if Train.operator in ["BVG", "DB"]:
feedback = Train.is_in_osm()
if feedback:
print_success(feedback)
else:
print_failure(Train.line_number + " is not in OSM")
db_rep = DB_Train()
db.session.add(db_rep)
db.session.commit()
def recheck_batch(Stops):
ids = [Stop.id for Stop in Stops]
total = 0
number_of_stops = len(Stops)
# get the number of digits we want to show
digits = int(log10(number_of_stops)) + 1
counter = 0
for Stop in Stops:
counter += 1
print "%*i/%*i " % (digits, counter, digits, number_of_stops),
out = recheck(Stop.id, from_cm_line=True)
if out > 0:
total += 1
print_success("Insgesamt %i neue Treffer" % total)
def recheck_all_missings_stops():
Stops = DB_Stop.query.filter(DB_Stop.matches < 1).all()
recheck_batch(Stops)
def recheck_by_name(name):
Stops = DB_Stop.query.filter(DB_Stop.name.like("%" + name + "%"),
DB_Stop.matches < 1).all()
recheck_batch(Stops)
def recheck_all():
Stops = DB_Stop.query.all()
recheck_batch(Stops)
def get_stops():
''' The initial query to set up the stop db '''
all_stops = DB_Stop.query.all()
all_ids = [stop.id for stop in all_stops]
url = "http://www.swu.de/fileadmin/gtfs/stops.txt"
req = requests.get(url)
req.encoding = "utf-8"
text = req.text.split("\n")[1:]
for line in text: # start in line 35 to exlclude stops in Poland
if len(line) > 1:
Stop = VBB_Stop(line)
if Stop.stop_id not in all_ids and int(Stop.ismainstation) != 0:
feedback = Stop.is_in_osm()
if feedback > 0:
print_success(Stop.name + ": " + str(feedback))
else:
print_failure(Stop.name + ": 0")
new_stop = DB_Stop(
name=Stop.name,
lat=Stop.lat,
lon=Stop.lon,
matches=feedback,
vbb_id=Stop.stop_id
)
db.session.add(new_stop)
db.session.commit()
if __name__ == "__main__":
app.debug = True
app.run()
|
trankmichael/numpy
|
refs/heads/master
|
numpy/core/tests/test_scalarinherit.py
|
50
|
# -*- coding: utf-8 -*-
""" Test printing of scalar types.
"""
import numpy as np
from numpy.testing import TestCase, run_module_suite
class A(object):
pass
class B(A, np.float64):
pass
class C(B):
pass
class D(C, B):
pass
class B0(np.float64, A):
pass
class C0(B0):
pass
class TestInherit(TestCase):
def test_init(self):
x = B(1.0)
assert str(x) == '1.0'
y = C(2.0)
assert str(y) == '2.0'
z = D(3.0)
assert str(z) == '3.0'
def test_init2(self):
x = B0(1.0)
assert str(x) == '1.0'
y = C0(2.0)
assert str(y) == '2.0'
if __name__ == "__main__":
run_module_suite()
|
xqt2010a/Python_Study
|
refs/heads/master
|
python/10_uart/01_uart.py
|
1
|
#pip install pyserial
import serial
import struct
import binascii
import numpy as np
from time import sleep
from threading import Thread
from matplotlib import pyplot as plt
from matplotlib import animation
global ax1
global ax2
global ax3
global ax4
global ax5
global ax6
global f
global Name_Str
global finish_data
REPORT_DATA_LEN = 66
DIR_FILE = './'
Right_Data = []
Left_Data = []
R_xs = []
R_v_cur = []
R_err = []
R_err1 = []
R_err2 = []
R_count = []
L_xs = []
L_v_cur = []
L_err = []
L_err1 = []
L_err2 = []
L_count = []
def bcc_off(serial):
global f
serial.write(bytes.fromhex('A3 3A 00 01 01 00'))
while True:
flag = 0
while serial.inWaiting()>0:
data = serial.readline()
print(data,len(data))
if data[:6] == b'\xA3\x3A\x00\x01\x00\x01':
print("bcc off")
flag = 1
break
if flag == 1:
break
def recv(serial):
global f
while True:
data = serial.readline()
if data != b'':
print("rx: ",data,file = f)
sleep(0.01)
return data
def Clear_Buf():
global R_xs
global R_v_cur
global L_xs
global L_v_cur
R_xs = []
R_v_cur = []
R_err = []
R_err1 = []
R_err2 = []
R_count = []
L_xs = []
L_v_cur = []
L_err = []
L_err1 = []
L_err2 = []
L_count = []
def Send_CMD():
global f
global Name_Str
while True:
tx_header = "A33A"
tx_buf = tx_header
indata = input("\r\nw [v] [s]: 线速度 距离\r\nd [w] [deg]:角速度 角度\r\nq [v] [w]: 线速度 角速度\r\ninput cmd:")
cmd_datas = indata.split(" ")
cmd_i = 0
flag = 0
Name_Str = indata
for cmd_data in cmd_datas:
print(cmd_data)
if cmd_i == 0:
if cmd_data == 'q': #线速度 角速度
tx_buf += 'A0'
tx_buf += '08'
elif cmd_data == 'w': #线速度 距离
tx_buf += "A1"
tx_buf += "08"
elif cmd_data == 'd': #角速度 度
tx_buf += 'A2'
tx_buf += '08'
elif cmd_i == 1:
bytes_hex1 = struct.pack('>l',int(cmd_data))#大端
str_data1 = str(binascii.b2a_hex(bytes_hex1))[2:-1]
tx_buf += str_data1
elif cmd_i == 2:
bytes_hex2 = struct.pack('>l',int(cmd_data))
str_data2 = str(binascii.b2a_hex(bytes_hex2))[2:-1]
tx_buf += str_data2
flag = 1
cmd_i += 1
if flag == 1:
f = open(DIR_FILE+Name_Str+'.txt','w')
print(tx_buf,file = f)
tx_buf_b = bytes().fromhex(tx_buf)
serial.write(tx_buf_b)
Clear_Buf()
def UART_Rx_Decode(data):
global f
odd_data = ''
decode_datas = data.split('eeeeeeee')
for decode_data in decode_datas:
#print('x:%d ',len(decode_data),decode_data)
if len(decode_data) == REPORT_DATA_LEN:
if decode_data[:2] == "01": #Right_Data
#print('R:',decode_data)
Right_Data.append(decode_data)
elif decode_data[:2] == "02": #Left_Data
Left_Data.append(decode_data)
else:
print("error:",decode_data,file = f)
else:
if decode_data[:2] == "01":
odd_data = decode_data
elif decode_data[:2] == "02":
odd_data = decode_data
else:
print("rx: ",decode_data,file = f)
return odd_data
def UART_Handle():
global finish_data
has_data = 0
count = 0
last_data = ''
while True:
data = serial.readline()
sleep(0.1)
if data != b'':
print("...")
temp = str(binascii.b2a_hex(data))[2:-1] #str
last_data = UART_Rx_Decode(last_data+temp)
has_data = 1
count = 0
#finish_data = 0
#print(temp)
#print("receive: ",temp)
#serial.write(data)
else:
if 1==has_data:
count = count+1
if count > 9:
finish_data = 1
has_data = 0
print("xx")
def Draw_Init():
line1.set_data([],[])
line2.set_data([],[])
return line1,line2,
def Draw_Plot():
global ax1
global ax2
global ax3
global ax4
global ax5
global ax6
global f
global R_xs
global R_v_cur
global finish_data
Err_Count = []
if finish_data == 1:
r_len = len(Right_Data)
l_len = len(Left_Data)
if r_len >= l_len:
min_len = l_len
else:
min_len = r_len
print('len:',r_len,l_len,min_len,file = f)
for i in range(r_len):
#print(Right_Data)
r_y_str = (Right_Data[i])[2:]
r_y_hex = bytes.fromhex(r_y_str)
r_num,r_v_dst,r_v_cur,r_err,r_err1,r_err2,r_inc,r_count = struct.unpack('<llllllll',bytes(r_y_hex))
r_inc = r_inc/30000
print("r:%5d %8d %8d %8d %8d %8d %8d %8d"%(r_num,r_v_dst,r_v_cur,r_err,r_err1,r_err2,r_inc,r_count),file = f)
if r_num != 0:
R_xs.append(r_num)
R_v_cur.append(r_v_cur)
R_err.append(r_err)
R_err1.append(r_err1)
R_err2.append(r_err2)
R_count.append(r_count)
for i in range(l_len):
l_y_str = (Left_Data[i])[2:]
l_y_hex = bytes.fromhex(l_y_str)
l_num,l_v_dst,l_v_cur,l_err,l_err1,l_err2,l_inc,l_count = struct.unpack('<llllllll',bytes(l_y_hex))
l_inc = l_inc/30000
print('l:%5d %8d %8d %8d %8d %8d %8d %8d'%(l_num,l_v_dst,l_v_cur,l_err,l_err1,l_err2,l_inc,l_count),file = f)
if l_num != 0:
L_xs.append(l_num)
L_v_cur.append(l_v_cur)
L_err.append(l_err)
L_err1.append(l_err1)
L_err2.append(l_err2)
L_count.append(l_count)
min_len = min_len-5
for i in range(min_len):
print(i,R_count[i], L_count[i],(R_count[i]-L_count[i]),file = f)
Err_Count.append(R_count[i]-L_count[i])
ax1.plot(R_xs,R_v_cur,'b-')
ax3.plot(R_xs,R_err,'r-',label='err')
ax3.plot(R_xs,R_err1,'g-',label='err1')
ax3.plot(R_xs,R_err2,'b-',label='err2')
ax5.plot(R_xs,R_count,'r*',label='r_count')
ax2.plot(L_xs,L_v_cur,'b-')
ax4.plot(L_xs,L_err,'r-',label='err')
ax4.plot(L_xs,L_err1,'g-',label='err1')
ax4.plot(L_xs,L_err2,'b-',label='err2')
ax5.plot(L_xs,L_count,'g*',label='l_count')
ax6.plot(range(min_len),Err_Count,'g.',label='err')
f.close()
plt.savefig(DIR_FILE+Name_Str+'.png',dpi=100)
plt.show()
finish_data = 0
print("show")
def DRAW_Handle():
global ax1
global ax2
global ax3
global ax4
global ax5
global ax6
fig = plt.figure()
fig.set_size_inches(18,10,forward=True)
ax1 = fig.add_subplot(3,2,1)
ax2 = fig.add_subplot(3,2,2)
ax3 = fig.add_subplot(3,2,3)
ax4 = fig.add_subplot(3,2,4)
ax5 = fig.add_subplot(3,2,5)
ax6 = fig.add_subplot(3,2,6)
ax1.set_title('Right wheel')
ax2.set_title('Left wheel')
ax3.set_title('Right error')
ax4.set_title('Left error')
ax4.set_title('Left error')
ax5.set_title('Count')
ax6.set_title('Count error')
ax1.grid(True) #显示网格
ax2.grid(True)
ax3.grid(True)
ax4.grid(True)
ax5.grid(True)
ax6.grid(True)
while True:
Draw_Plot()
sleep(0.1)
if __name__ == '__main__':
global finish_data
global Name_Str
serial = serial.Serial('COM5', 115200, timeout=0.5)
if serial.isOpen():
print("success")
else:
print("failed")
bcc_off(serial)
finish_data = 0
t1 = Thread(target=Send_CMD,args=())
t2 = Thread(target=UART_Handle,args=())
t3 = Thread(target=DRAW_Handle,args=())
t1.start()
t2.start()
t3.start()
|
wolendranh/movie_radio
|
refs/heads/master
|
radio/tests/services/test_date.py
|
1
|
import datetime
from unittest import mock, TestCase
from radio.services.date import (
get_day_time,
DAY,
EVENING,
MORNING
)
class DateServiceTestCase(TestCase):
def test_evening(self):
with mock.patch('radio.services.date.datetime') as mock_date:
mock_date.now.return_value = datetime.datetime(2016, 6, 30, 22)
result = get_day_time()
print(result)
assert result == EVENING
def test_morning(self):
with mock.patch('radio.services.date.datetime') as mock_date:
mock_date.now.return_value = datetime.datetime(2016, 6, 30, 9)
result = get_day_time()
assert result == MORNING
def test_day(self):
with mock.patch('radio.services.date.datetime') as mock_date:
mock_date.now.return_value = datetime.datetime(2016, 6, 30, 14)
result = get_day_time()
assert result == DAY
def test_day_time_in_middle(self):
with mock.patch('radio.services.date.datetime') as mock_date:
mock_date.now.return_value = datetime.datetime(2016, 6, 30, 16)
result = get_day_time()
assert result == DAY
def test_evening_time_midnight(self):
with mock.patch('radio.services.date.datetime') as mock_date:
mock_date.now.return_value = datetime.datetime(2016, 6, 30, 0)
result = get_day_time()
assert result == EVENING
def test_evening_time_night(self):
with mock.patch('radio.services.date.datetime') as mock_date:
mock_date.now.return_value = datetime.datetime(2016, 6, 30, 3)
result = get_day_time()
assert result == EVENING
|
kifcaliph/odoo
|
refs/heads/8.0
|
addons/document/wizard/__init__.py
|
444
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import document_configuration
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
trondhindenes/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/google/gcp_dns_resource_record_set.py
|
8
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ["preview"],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_dns_resource_record_set
description:
- A single DNS record that exists on a domain name (i.e. in a managed zone).
- This record defines the information about the domain and where the domain / subdomains
direct to.
- The record will include the domain/subdomain name, a type (i.e. A, AAA, CAA, MX,
CNAME, NS, etc) .
short_description: Creates a GCP ResourceRecordSet
version_added: 2.6
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices: ['present', 'absent']
default: 'present'
name:
description:
- For example, U(www.example.com.)
required: true
type:
description:
- One of valid DNS resource types.
required: true
choices: ['A', 'AAAA', 'CAA', 'CNAME', 'MX', 'NAPTR', 'NS', 'PTR', 'SOA', 'SPF', 'SRV', 'TXT']
ttl:
description:
- Number of seconds that this ResourceRecordSet can be cached by resolvers.
required: false
target:
description:
- As defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) .
required: false
managed_zone:
description:
- Identifies the managed zone addressed by this request.
- Can be the managed zone name or id.
- 'This field represents a link to a ManagedZone resource in GCP. It can be specified
in two ways. You can add `register: name-of-resource` to a gcp_dns_managed_zone
task and then set this managed_zone field to "{{ name-of-resource }}" Alternatively,
you can set this managed_zone to a dictionary with the name key where the value
is the name of your ManagedZone.'
required: true
extends_documentation_fragment: gcp
'''
EXAMPLES = '''
- name: create a managed zone
gcp_dns_managed_zone:
name: "managedzone-rrs"
dns_name: testzone-4.com.
description: test zone
project: "{{ gcp_project }}"
auth_kind: "{{ gcp_cred_kind }}"
service_account_file: "{{ gcp_cred_file }}"
state: present
register: managed_zone
- name: create a resource record set
gcp_dns_resource_record_set:
name: www.testzone-4.com.
managed_zone: "{{ managed_zone }}"
type: A
ttl: 600
target:
- 10.1.2.3
- 40.5.6.7
project: "test_project"
auth_kind: "serviceaccount"
service_account_file: "/tmp/auth.pem"
state: present
'''
RETURN = '''
name:
description:
- For example, U(www.example.com.)
returned: success
type: str
type:
description:
- One of valid DNS resource types.
returned: success
type: str
ttl:
description:
- Number of seconds that this ResourceRecordSet can be cached by resolvers.
returned: success
type: int
target:
description:
- As defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) .
returned: success
type: list
managed_zone:
description:
- Identifies the managed zone addressed by this request.
- Can be the managed zone name or id.
returned: success
type: dict
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict
import json
import copy
import datetime
import time
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
name=dict(required=True, type='str'),
type=dict(required=True, type='str', choices=['A', 'AAAA', 'CAA', 'CNAME', 'MX', 'NAPTR', 'NS', 'PTR', 'SOA', 'SPF', 'SRV', 'TXT']),
ttl=dict(type='int'),
target=dict(type='list', elements='str'),
managed_zone=dict(required=True, type='dict')
)
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/ndev.clouddns.readwrite']
state = module.params['state']
kind = 'dns#resourceRecordSet'
fetch = fetch_wrapped_resource(module, 'dns#resourceRecordSet',
'dns#resourceRecordSetsListResponse',
'rrsets')
changed = False
if fetch:
if state == 'present':
if is_different(module, fetch):
update(module, self_link(module), kind, fetch)
fetch = fetch_resource(module, self_link(module), kind)
changed = True
else:
delete(module, self_link(module), kind, fetch)
fetch = {}
changed = True
else:
if state == 'present':
fetch = create(module, collection(module), kind)
changed = True
else:
fetch = {}
fetch.update({'changed': changed})
module.exit_json(**fetch)
def create(module, link, kind):
change = create_change(None, updated_record(module), module)
change_id = int(change['id'])
if change['status'] == 'pending':
wait_for_change_to_complete(change_id, module)
return fetch_wrapped_resource(module, 'dns#resourceRecordSet',
'dns#resourceRecordSetsListResponse',
'rrsets')
def update(module, link, kind, fetch):
change = create_change(fetch, updated_record(module), module)
change_id = int(change['id'])
if change['status'] == 'pending':
wait_for_change_to_complete(change_id, module)
return fetch_wrapped_resource(module, 'dns#resourceRecordSet',
'dns#resourceRecordSetsListResponse',
'rrsets')
def delete(module, link, kind, fetch):
change = create_change(fetch, None, module)
change_id = int(change['id'])
if change['status'] == 'pending':
wait_for_change_to_complete(change_id, module)
return fetch_wrapped_resource(module, 'dns#resourceRecordSet',
'dns#resourceRecordSetsListResponse',
'rrsets')
def resource_to_request(module):
request = {
u'kind': 'dns#resourceRecordSet',
u'name': module.params.get('name'),
u'type': module.params.get('type'),
u'ttl': module.params.get('ttl'),
u'rrdatas': module.params.get('target')
}
return_vals = {}
for k, v in request.items():
if v:
return_vals[k] = v
return return_vals
def fetch_resource(module, link, kind, allow_not_found=True):
auth = GcpSession(module, 'dns')
return return_if_object(module, auth.get(link), kind, allow_not_found)
def fetch_wrapped_resource(module, kind, wrap_kind, wrap_path):
result = fetch_resource(module, self_link(module), wrap_kind)
if result is None or wrap_path not in result:
return None
result = unwrap_resource(result[wrap_path], module)
if result is None:
return None
if result['kind'] != kind:
module.fail_json(msg="Incorrect result: {kind}".format(**result))
return result
def self_link(module):
res = {
'project': module.params['project'],
'managed_zone': replace_resource_dict(module.params['managed_zone'], 'name'),
'name': module.params['name'],
'type': module.params['type']
}
return "https://www.googleapis.com/dns/v1/projects/{project}/managedZones/{managed_zone}/rrsets?name={name}&type={type}".format(**res)
def collection(module):
res = {
'project': module.params['project'],
'managed_zone': replace_resource_dict(module.params['managed_zone'], 'name')
}
return "https://www.googleapis.com/dns/v1/projects/{project}/managedZones/{managed_zone}/changes".format(**res)
def return_if_object(module, response, kind, allow_not_found=False):
# If not found, return nothing.
if allow_not_found and response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
def is_different(module, response):
request = resource_to_request(module)
response = response_to_hash(module, response)
# Remove all output-only from response.
response_vals = {}
for k, v in response.items():
if k in request:
response_vals[k] = v
request_vals = {}
for k, v in request.items():
if k in response:
request_vals[k] = v
return GcpRequest(request_vals) != GcpRequest(response_vals)
# Remove unnecessary properties from the response.
# This is for doing comparisons with Ansible's current parameters.
def response_to_hash(module, response):
return {
u'name': response.get(u'name'),
u'type': response.get(u'type'),
u'ttl': response.get(u'ttl'),
u'rrdatas': response.get(u'target')
}
def updated_record(module):
return {
'kind': 'dns#resourceRecordSet',
'name': module.params['name'],
'type': module.params['type'],
'ttl': module.params['ttl'] if module.params['ttl'] else 900,
'rrdatas': module.params['target']
}
def unwrap_resource(result, module):
if not result:
return None
return result[0]
class SOAForwardable(object):
def __init__(self, params, module):
self.params = params
self.module = module
def fail_json(self, *args, **kwargs):
self.module.fail_json(*args, **kwargs)
def raise_for_status(self, *args, **kwargs):
self.module.raise_for_status(*args, **kwargs)
def prefetch_soa_resource(module):
name = module.params['name'].split('.')[1:]
resource = SOAForwardable({
'type': 'SOA',
'managed_zone': module.params['managed_zone'],
'name': '.'.join(name),
'project': module.params['project'],
'scopes': module.params['scopes'],
'service_account_file': module.params['service_account_file'],
'auth_kind': module.params['auth_kind'],
'service_account_email': module.params['service_account_email']
}, module)
result = fetch_wrapped_resource(resource, 'dns#resourceRecordSet',
'dns#resourceRecordSetsListResponse',
'rrsets')
if not result:
raise ValueError("Google DNS Managed Zone %s not found" % module.params['managed_zone']['name'])
return result
def create_change(original, updated, module):
auth = GcpSession(module, 'dns')
return return_if_change_object(module,
auth.post(collection(module),
resource_to_change_request(
original, updated, module)
))
# Fetch current SOA. We need the last SOA so we can increment its serial
def update_soa(module):
original_soa = prefetch_soa_resource(module)
# Create a clone of the SOA record so we can update it
updated_soa = copy.deepcopy(original_soa)
soa_parts = updated_soa['rrdatas'][0].split(' ')
soa_parts[2] = str(int(soa_parts[2]) + 1)
updated_soa['rrdatas'][0] = ' '.join(soa_parts)
return [original_soa, updated_soa]
def resource_to_change_request(original_record, updated_record, module):
original_soa, updated_soa = update_soa(module)
result = new_change_request()
add_additions(result, updated_soa, updated_record)
add_deletions(result, original_soa, original_record)
return result
def add_additions(result, updated_soa, updated_record):
if updated_soa:
result['additions'].append(updated_soa)
if updated_record:
result['additions'].append(updated_record)
def add_deletions(result, original_soa, original_record):
if original_soa:
result['deletions'].append(original_soa)
if original_record:
result['deletions'].append(original_record)
# TODO(nelsonjr): Merge and delete this code once async operation
# declared in api.yaml
def wait_for_change_to_complete(change_id, module):
status = 'pending'
while status == 'pending':
status = get_change_status(change_id, module)
if status != 'done':
time.sleep(0.5)
def get_change_status(change_id, module):
auth = GcpSession(module, 'dns')
link = collection(module) + "/%s" % change_id
return return_if_change_object(module, auth.get(link))['status']
def new_change_request():
return {
'kind': 'dns#change',
'additions': [],
'deletions': [],
'start_time': datetime.datetime.now().isoformat()
}
def return_if_change_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
return None
if response.status_code == 204:
return None
try:
response.raise_for_status()
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if result['kind'] != 'dns#change':
module.fail_json(msg="Invalid result: %s" % result['kind'])
return result
if __name__ == '__main__':
main()
|
marchaos/plugin.image.flickr
|
refs/heads/master
|
flickrapi/__init__.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''A FlickrAPI interface.
The main functionality can be found in the `flickrapi.FlickrAPI`
class.
See `the FlickrAPI homepage`_ for more info.
.. _`the FlickrAPI homepage`: http://stuvel.eu/projects/flickrapi
'''
__version__ = '1.4.2'
__all__ = ('FlickrAPI', 'IllegalArgumentException', 'FlickrError',
'CancelUpload', 'XMLNode', 'set_log_level', '__version__')
__author__ = u'Sybren St\u00fcvel'.encode('utf-8')
# Copyright (c) 2007 by the respective coders, see
# http://www.stuvel.eu/projects/flickrapi
#
# This code is subject to the Python licence, as can be read on
# http://www.python.org/download/releases/2.5.2/license/
#
# For those without an internet connection, here is a summary. When this
# summary clashes with the Python licence, the latter will be applied.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys
import urllib
import urllib2
import os.path
import logging
import copy
import webbrowser
# Smartly import hashlib and fall back on md5
try: from hashlib import md5
except ImportError: from md5 import md5
from flickrapi.tokencache import TokenCache, SimpleTokenCache, \
LockingTokenCache
from flickrapi.xmlnode import XMLNode
from flickrapi.multipart import Part, Multipart, FilePart
from flickrapi.exceptions import *
from flickrapi.cache import SimpleCache
from flickrapi import reportinghttp
logging.basicConfig()
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.INFO)
def make_utf8(dictionary):
'''Encodes all Unicode strings in the dictionary to UTF-8. Converts
all other objects to regular strings.
Returns a copy of the dictionary, doesn't touch the original.
'''
result = {}
for (key, value) in dictionary.iteritems():
if isinstance(value, unicode):
value = value.encode('utf-8')
else:
value = str(value)
result[key] = value
return result
def debug(method):
'''Method decorator for debugging method calls.
Using this automatically sets the log level to DEBUG.
'''
LOG.setLevel(logging.DEBUG)
def debugged(*args, **kwargs):
LOG.debug("Call: %s(%s, %s)" % (method.__name__, args,
kwargs))
result = method(*args, **kwargs)
LOG.debug("\tResult: %s" % result)
return result
return debugged
# REST parsers, {format: parser_method, ...}. Fill by using the
# @rest_parser(format) function decorator
rest_parsers = {}
def rest_parser(format):
'''Method decorator, use this to mark a function as the parser for
REST as returned by Flickr.
'''
def decorate_parser(method):
rest_parsers[format] = method
return method
return decorate_parser
def require_format(required_format):
'''Method decorator, raises a ValueError when the decorated method
is called if the default format is not set to ``required_format``.
'''
def decorator(method):
def decorated(self, *args, **kwargs):
# If everything is okay, call the method
if self.default_format == required_format:
return method(self, *args, **kwargs)
# Otherwise raise an exception
msg = 'Function %s requires that you use ' \
'ElementTree ("etree") as the communication format, ' \
'while the current format is set to "%s".'
raise ValueError(msg % (method.func_name, self.default_format))
return decorated
return decorator
class FlickrAPI(object):
"""Encapsulates Flickr functionality.
Example usage::
flickr = flickrapi.FlickrAPI(api_key)
photos = flickr.photos_search(user_id='73509078@N00', per_page='10')
sets = flickr.photosets_getList(user_id='73509078@N00')
"""
flickr_host = "api.flickr.com"
flickr_rest_form = "/services/rest/"
flickr_auth_form = "/services/auth/"
flickr_upload_form = "/services/upload/"
flickr_replace_form = "/services/replace/"
def __init__(self, api_key, secret=None, username=None,
token=None, format='etree', store_token=True,
cache=False):
"""Construct a new FlickrAPI instance for a given API key
and secret.
api_key
The API key as obtained from Flickr.
secret
The secret belonging to the API key.
username
Used to identify the appropriate authentication token for a
certain user.
token
If you already have an authentication token, you can give
it here. It won't be stored on disk by the FlickrAPI instance.
format
The response format. Use either "xmlnode" or "etree" to get a parsed
response, or use any response format supported by Flickr to get an
unparsed response from method calls. It's also possible to pass the
``format`` parameter on individual calls.
store_token
Disables the on-disk token cache if set to False (default is True).
Use this to ensure that tokens aren't read nor written to disk, for
example in web applications that store tokens in cookies.
cache
Enables in-memory caching of FlickrAPI calls - set to ``True`` to
use. If you don't want to use the default settings, you can
instantiate a cache yourself too:
>>> f = FlickrAPI(api_key='123')
>>> f.cache = SimpleCache(timeout=5, max_entries=100)
"""
self.api_key = api_key
self.secret = secret
self.default_format = format
self.__handler_cache = {}
if token:
# Use a memory-only token cache
self.token_cache = SimpleTokenCache()
self.token_cache.token = token
elif not store_token:
# Use an empty memory-only token cache
self.token_cache = SimpleTokenCache()
else:
# Use a real token cache
self.token_cache = TokenCache(api_key, username)
if cache:
self.cache = SimpleCache()
else:
self.cache = None
def __repr__(self):
'''Returns a string representation of this object.'''
return '[FlickrAPI for key "%s"]' % self.api_key
__str__ = __repr__
def trait_names(self):
'''Returns a list of method names as supported by the Flickr
API. Used for tab completion in IPython.
'''
try:
rsp = self.reflection_getMethods(format='etree')
except FlickrError:
return None
def tr(name):
'''Translates Flickr names to something that can be called
here.
>>> tr(u'flickr.photos.getInfo')
u'photos_getInfo'
'''
return name[7:].replace('.', '_')
return [tr(m.text) for m in rsp.getiterator('method')]
@rest_parser('xmlnode')
def parse_xmlnode(self, rest_xml):
'''Parses a REST XML response from Flickr into an XMLNode object.'''
rsp = XMLNode.parse(rest_xml, store_xml=True)
if rsp['stat'] == 'ok':
return rsp
err = rsp.err[0]
raise FlickrError(u'Error: %(code)s: %(msg)s' % err)
@rest_parser('etree')
def parse_etree(self, rest_xml):
'''Parses a REST XML response from Flickr into an ElementTree object.'''
try:
import xml.etree.ElementTree as ElementTree
except ImportError:
# For Python 2.4 compatibility:
try:
import elementtree.ElementTree as ElementTree
except ImportError:
raise ImportError("You need to install "
"ElementTree for using the etree format")
rsp = ElementTree.fromstring(rest_xml)
if rsp.attrib['stat'] == 'ok':
return rsp
err = rsp.find('err')
raise FlickrError(u'Error: %(code)s: %(msg)s' % err.attrib)
def sign(self, dictionary):
"""Calculate the flickr signature for a set of params.
data
a hash of all the params and values to be hashed, e.g.
``{"api_key":"AAAA", "auth_token":"TTTT", "key":
u"value".encode('utf-8')}``
"""
data = [self.secret]
for key in sorted(dictionary.keys()):
data.append(key)
datum = dictionary[key]
if isinstance(datum, unicode):
raise IllegalArgumentException("No Unicode allowed, "
"argument %s (%r) should have been UTF-8 by now"
% (key, datum))
data.append(datum)
md5_hash = md5(''.join(data))
return md5_hash.hexdigest()
def encode_and_sign(self, dictionary):
'''URL encodes the data in the dictionary, and signs it using the
given secret, if a secret was given.
'''
dictionary = make_utf8(dictionary)
if self.secret:
dictionary['api_sig'] = self.sign(dictionary)
return urllib.urlencode(dictionary)
def __getattr__(self, attrib):
"""Handle all the regular Flickr API calls.
Example::
flickr.auth_getFrob(api_key="AAAAAA")
etree = flickr.photos_getInfo(photo_id='1234')
etree = flickr.photos_getInfo(photo_id='1234', format='etree')
xmlnode = flickr.photos_getInfo(photo_id='1234', format='xmlnode')
json = flickr.photos_getInfo(photo_id='1234', format='json')
"""
# Refuse to act as a proxy for unimplemented special methods
if attrib.startswith('_'):
raise AttributeError("No such attribute '%s'" % attrib)
# Construct the method name and see if it's cached
method = "flickr." + attrib.replace("_", ".")
if method in self.__handler_cache:
return self.__handler_cache[method]
def handler(**args):
'''Dynamically created handler for a Flickr API call'''
if self.token_cache.token and not self.secret:
raise ValueError("Auth tokens cannot be used without "
"API secret")
# Set some defaults
defaults = {'method': method,
'auth_token': self.token_cache.token,
'api_key': self.api_key,
'format': self.default_format}
args = self.__supply_defaults(args, defaults)
return self.__wrap_in_parser(self.__flickr_call,
parse_format=args['format'], **args)
handler.method = method
self.__handler_cache[method] = handler
return handler
def __supply_defaults(self, args, defaults):
'''Returns a new dictionary containing ``args``, augmented with defaults
from ``defaults``.
Defaults can be overridden, or completely removed by setting the
appropriate value in ``args`` to ``None``.
>>> f = FlickrAPI('123')
>>> f._FlickrAPI__supply_defaults(
... {'foo': 'bar', 'baz': None, 'token': None},
... {'baz': 'foobar', 'room': 'door'})
{'foo': 'bar', 'room': 'door'}
'''
result = args.copy()
for key, default_value in defaults.iteritems():
# Set the default if the parameter wasn't passed
if key not in args:
result[key] = default_value
for key, value in result.copy().iteritems():
# You are able to remove a default by assigning None, and we can't
# pass None to Flickr anyway.
if result[key] is None:
del result[key]
return result
def __flickr_call(self, **kwargs):
'''Performs a Flickr API call with the given arguments. The method name
itself should be passed as the 'method' parameter.
Returns the unparsed data from Flickr::
data = self.__flickr_call(method='flickr.photos.getInfo',
photo_id='123', format='rest')
'''
LOG.debug("Calling %s" % kwargs)
post_data = self.encode_and_sign(kwargs)
# Return value from cache if available
if self.cache and self.cache.get(post_data):
return self.cache.get(post_data)
url = "https://" + self.flickr_host + self.flickr_rest_form
flicksocket = urllib2.urlopen(url, post_data)
reply = flicksocket.read()
flicksocket.close()
# Store in cache, if we have one
if self.cache is not None:
self.cache.set(post_data, reply)
return reply
def __wrap_in_parser(self, wrapped_method, parse_format, *args, **kwargs):
'''Wraps a method call in a parser.
The parser will be looked up by the ``parse_format`` specifier. If there
is a parser and ``kwargs['format']`` is set, it's set to ``rest``, and
the response of the method is parsed before it's returned.
'''
# Find the parser, and set the format to rest if we're supposed to
# parse it.
if parse_format in rest_parsers and 'format' in kwargs:
kwargs['format'] = 'rest'
LOG.debug('Wrapping call %s(self, %s, %s)' % (wrapped_method, args,
kwargs))
data = wrapped_method(*args, **kwargs)
# Just return if we have no parser
if parse_format not in rest_parsers:
return data
# Return the parsed data
parser = rest_parsers[parse_format]
return parser(self, data)
def auth_url(self, perms, frob):
"""Return the authorization URL to get a token.
This is the URL the app will launch a browser toward if it
needs a new token.
perms
"read", "write", or "delete"
frob
picked up from an earlier call to FlickrAPI.auth_getFrob()
"""
encoded = self.encode_and_sign({
"api_key": self.api_key,
"frob": frob,
"perms": perms})
return "https://%s%s?%s" % (self.flickr_host, \
self.flickr_auth_form, encoded)
def web_login_url(self, perms):
'''Returns the web login URL to forward web users to.
perms
"read", "write", or "delete"
'''
encoded = self.encode_and_sign({
"api_key": self.api_key,
"perms": perms})
return "https://%s%s?%s" % (self.flickr_host, \
self.flickr_auth_form, encoded)
def __extract_upload_response_format(self, kwargs):
'''Returns the response format given in kwargs['format'], or
the default format if there is no such key.
If kwargs contains 'format', it is removed from kwargs.
If the format isn't compatible with Flickr's upload response
type, a FlickrError exception is raised.
'''
# Figure out the response format
format = kwargs.get('format', self.default_format)
if format not in rest_parsers and format != 'rest':
raise FlickrError('Format %s not supported for uploading '
'photos' % format)
# The format shouldn't be used in the request to Flickr.
if 'format' in kwargs:
del kwargs['format']
return format
def upload(self, filename, callback=None, **kwargs):
"""Upload a file to flickr.
Be extra careful you spell the parameters correctly, or you will
get a rather cryptic "Invalid Signature" error on the upload!
Supported parameters:
filename
name of a file to upload
callback
method that gets progress reports
title
title of the photo
description
description a.k.a. caption of the photo
tags
space-delimited list of tags, ``'''tag1 tag2 "long
tag"'''``
is_public
"1" or "0" for a public resp. private photo
is_friend
"1" or "0" whether friends can see the photo while it's
marked as private
is_family
"1" or "0" whether family can see the photo while it's
marked as private
content_type
Set to "1" for Photo, "2" for Screenshot, or "3" for Other.
hidden
Set to "1" to keep the photo in global search results, "2"
to hide from public searches.
format
The response format. You can only choose between the
parsed responses or 'rest' for plain REST.
The callback method should take two parameters:
``def callback(progress, done)``
Progress is a number between 0 and 100, and done is a boolean
that's true only when the upload is done.
"""
return self.__upload_to_form(self.flickr_upload_form,
filename, callback, **kwargs)
def replace(self, filename, photo_id, callback=None, **kwargs):
"""Replace an existing photo.
Supported parameters:
filename
name of a file to upload
photo_id
the ID of the photo to replace
callback
method that gets progress reports
format
The response format. You can only choose between the
parsed responses or 'rest' for plain REST. Defaults to the
format passed to the constructor.
The callback parameter has the same semantics as described in the
``upload`` function.
"""
if not photo_id:
raise IllegalArgumentException("photo_id must be specified")
kwargs['photo_id'] = photo_id
return self.__upload_to_form(self.flickr_replace_form,
filename, callback, **kwargs)
def __upload_to_form(self, form_url, filename, callback, **kwargs):
'''Uploads a photo - can be used to either upload a new photo
or replace an existing one.
form_url must be either ``FlickrAPI.flickr_replace_form`` or
``FlickrAPI.flickr_upload_form``.
'''
if not filename:
raise IllegalArgumentException("filename must be specified")
if not self.token_cache.token:
raise IllegalArgumentException("Authentication is required")
# Figure out the response format
format = self.__extract_upload_response_format(kwargs)
# Update the arguments with the ones the user won't have to supply
arguments = {'auth_token': self.token_cache.token,
'api_key': self.api_key}
arguments.update(kwargs)
# Convert to UTF-8 if an argument is an Unicode string
kwargs = make_utf8(arguments)
if self.secret:
kwargs["api_sig"] = self.sign(kwargs)
url = "https://%s%s" % (self.flickr_host, form_url)
# construct POST data
body = Multipart()
for arg, value in kwargs.iteritems():
part = Part({'name': arg}, value)
body.attach(part)
filepart = FilePart({'name': 'photo'}, filename, 'image/jpeg')
body.attach(filepart)
return self.__wrap_in_parser(self.__send_multipart, format,
url, body, callback)
def __send_multipart(self, url, body, progress_callback=None):
'''Sends a Multipart object to an URL.
Returns the resulting unparsed XML from Flickr.
'''
LOG.debug("Uploading to %s" % url)
request = urllib2.Request(url)
request.add_data(str(body))
(header, value) = body.header()
request.add_header(header, value)
if not progress_callback:
# Just use urllib2 if there is no progress callback
# function
response = urllib2.urlopen(request)
return response.read()
def __upload_callback(percentage, done, seen_header=[False]):
'''Filters out the progress report on the HTTP header'''
# Call the user's progress callback when we've filtered
# out the HTTP header
if seen_header[0]:
return progress_callback(percentage, done)
# Remember the first time we hit 'done'.
if done:
seen_header[0] = True
response = reportinghttp.urlopen(request, __upload_callback)
return response.read()
def validate_frob(self, frob, perms):
'''Lets the user validate the frob by launching a browser to
the Flickr website.
'''
auth_url = self.auth_url(perms, frob)
try:
browser = webbrowser.get()
except webbrowser.Error:
if 'BROWSER' not in os.environ:
raise
browser = webbrowser.GenericBrowser(os.environ['BROWSER'])
browser.open(auth_url, True, True)
def get_token_part_one(self, perms="read", auth_callback=None):
"""Get a token either from the cache, or make a new one from
the frob.
This first attempts to find a token in the user's token cache
on disk. If that token is present and valid, it is returned by
the method.
If that fails (or if the token is no longer valid based on
flickr.auth.checkToken) a new frob is acquired. If an auth_callback
method has been specified it will be called. Otherwise the frob is
validated by having the user log into flickr (with a browser).
To get a proper token, follow these steps:
- Store the result value of this method call
- Give the user a way to signal the program that he/she
has authorized it, for example show a button that can be
pressed.
- Wait for the user to signal the program that the
authorization was performed, but only if there was no
cached token.
- Call flickrapi.get_token_part_two(...) and pass it the
result value you stored.
The newly minted token is then cached locally for the next
run.
perms
"read", "write", or "delete"
auth_callback
method to be called if authorization is needed. When not
passed, ``self.validate_frob(...)`` is called. You can
call this method yourself from the callback method too.
If authorization should be blocked, pass
``auth_callback=False``.
The auth_callback method should take ``(frob, perms)`` as
parameters.
An example::
(token, frob) = flickr.get_token_part_one(perms='write')
if not token: raw_input("Press ENTER after you authorized this program")
flickr.get_token_part_two((token, frob))
Also take a look at ``authenticate_console(perms)``.
"""
# Check our auth_callback parameter for correctness before we
# do anything
authenticate = self.validate_frob
if auth_callback is not None:
if hasattr(auth_callback, '__call__'):
# use the provided callback function
authenticate = auth_callback
elif auth_callback is False:
authenticate = None
else:
# Any non-callable non-False value is invalid
raise ValueError('Invalid value for auth_callback: %s'
% auth_callback)
# see if we have a saved token
token = self.token_cache.token
frob = None
# see if it's valid
if token:
LOG.debug("Trying cached token '%s'" % token)
try:
rsp = self.auth_checkToken(auth_token=token, format='xmlnode')
# see if we have enough permissions
tokenPerms = rsp.auth[0].perms[0].text
if tokenPerms == "read" and perms != "read": token = None
elif tokenPerms == "write" and perms == "delete": token = None
except FlickrError:
LOG.debug("Cached token invalid")
self.token_cache.forget()
token = None
# get a new token if we need one
if not token:
# If we can't authenticate, it's all over.
if not authenticate:
raise FlickrError('Authentication required but '
'blocked using auth_callback=False')
# get the frob
LOG.debug("Getting frob for new token")
rsp = self.auth_getFrob(auth_token=None, format='xmlnode')
frob = rsp.frob[0].text
authenticate(frob, perms)
return (token, frob)
def get_token_part_two(self, (token, frob)):
"""Part two of getting a token, see ``get_token_part_one(...)`` for details."""
# If a valid token was obtained in the past, we're done
if token:
LOG.debug("get_token_part_two: no need, token already there")
self.token_cache.token = token
return token
LOG.debug("get_token_part_two: getting a new token for frob '%s'" % frob)
return self.get_token(frob)
def get_token(self, frob):
'''Gets the token given a certain frob. Used by ``get_token_part_two`` and
by the web authentication method.
'''
# get a token
rsp = self.auth_getToken(frob=frob, auth_token=None, format='xmlnode')
token = rsp.auth[0].token[0].text
LOG.debug("get_token: new token '%s'" % token)
# store the auth info for next time
self.token_cache.token = token
return token
def authenticate_console(self, perms='read', auth_callback=None):
'''Performs the authentication, assuming a console program.
Gets the token, if needed starts the browser and waits for the user to
press ENTER before continuing.
See ``get_token_part_one(...)`` for an explanation of the
parameters.
'''
(token, frob) = self.get_token_part_one(perms, auth_callback)
if not token: raw_input("Press ENTER after you authorized this program")
self.get_token_part_two((token, frob))
@require_format('etree')
def __data_walker(self, method, **params):
'''Calls 'method' with page=0, page=1 etc. until the total
number of pages has been visited. Yields the photos
returned.
Assumes that ``method(page=n, **params).findall('*/photos')``
results in a list of photos, and that the toplevel element of
the result contains a 'pages' attribute with the total number
of pages.
'''
page = 1
total = 1 # We don't know that yet, update when needed
while page <= total:
# Fetch a single page of photos
LOG.debug('Calling %s(page=%i of %i, %s)' %
(method.func_name, page, total, params))
rsp = method(page=page, **params)
photoset = rsp.getchildren()[0]
total = int(photoset.get('pages'))
photos = rsp.findall('*/photo')
# Yield each photo
for photo in photos:
yield photo
# Ready to get the next page
page += 1
@require_format('etree')
def walk_set(self, photoset_id, per_page=50, **kwargs):
'''walk_set(self, photoset_id, per_page=50, ...) -> \
generator, yields each photo in a single set.
:Parameters:
photoset_id
the photoset ID
per_page
the number of photos that are fetched in one call to
Flickr.
Other arguments can be passed, as documented in the
flickr.photosets.getPhotos_ API call in the Flickr API
documentation, except for ``page`` because all pages will be
returned eventually.
.. _flickr.photosets.getPhotos:
http://www.flickr.com/services/api/flickr.photosets.getPhotos.html
Uses the ElementTree format, incompatible with other formats.
'''
return self.__data_walker(self.photosets_getPhotos,
photoset_id=photoset_id, per_page=per_page, **kwargs)
@require_format('etree')
def walk(self, per_page=50, **kwargs):
'''walk(self, user_id=..., tags=..., ...) -> generator, \
yields each photo in a search query result
Accepts the same parameters as flickr.photos.search_ API call,
except for ``page`` because all pages will be returned
eventually.
.. _flickr.photos.search:
http://www.flickr.com/services/api/flickr.photos.search.html
Also see `walk_set`.
'''
return self.__data_walker(self.photos_search,
per_page=per_page, **kwargs)
def set_log_level(level):
'''Sets the log level of the logger used by the FlickrAPI module.
>>> import flickrapi
>>> import logging
>>> flickrapi.set_log_level(logging.INFO)
'''
import flickrapi.tokencache
LOG.setLevel(level)
flickrapi.tokencache.LOG.setLevel(level)
if __name__ == "__main__":
print "Running doctests"
import doctest
doctest.testmod()
print "Tests OK"
|
bufferapp/buffer-django-nonrel
|
refs/heads/master
|
django/contrib/localflavor/au/au_states.py
|
544
|
"""
An alphabetical list of states for use as `choices` in a formfield.
This exists in this standalone file so that it's only imported into memory
when explicitly needed.
"""
STATE_CHOICES = (
('ACT', 'Australian Capital Territory'),
('NSW', 'New South Wales'),
('NT', 'Northern Territory'),
('QLD', 'Queensland'),
('SA', 'South Australia'),
('TAS', 'Tasmania'),
('VIC', 'Victoria'),
('WA', 'Western Australia'),
)
|
markap/TravelMap
|
refs/heads/master
|
config/production.py
|
16
|
config = {
# environment this app is running on: localhost, testing, production
'environment': "production",
# ----> ADD MORE CONFIGURATION OPTIONS HERE <----
}
|
SciGaP/DEPRECATED-Cipres-Airavata-POC
|
refs/heads/master
|
saminda/cipres-airavata/sdk/scripts/remote_resource/triton/new_delete.py
|
4
|
#!/usr/bin/env python
# import test_lib as lib
import lib
import sys
import os
import getopt
def main(argv=None):
"""
Usage is:
delete.py -j jobid [-u url] -d workingdir
"""
if argv is None:
argv=sys.argv
jobid = url = None
options, remainder = getopt.getopt(argv[1:], "-j:-u:-d:")
for opt, arg in options:
if opt in ("-j"):
jobid = int(arg)
elif opt in ("-u"):
url = arg
elif opt in ("-d"):
workingdir = arg
try:
if not jobid:
raise SystemError("Internal error, delete.py invoked without jobid.")
lib.deleteJob(jobid, workingdir)
except SystemError, theException:
print >> sys.stderr, "Caught exception:", theException
return 1
if __name__ == "__main__":
sys.exit(main())
|
apagac/cfme_tests
|
refs/heads/master
|
sprout/appliances/migrations/0035_auto_20160922_1635.py
|
2
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-09-22 16:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('appliances', '0034_provider_allow_renaming'),
]
operations = [
migrations.AddField(
model_name='appliancepool',
name='is_container',
field=models.BooleanField(
default=False, help_text=b'Whether the pool uses containers.'),
),
migrations.AddField(
model_name='group',
name='templates_url',
field=models.TextField(
blank=True, help_text=b'Location of templates. Currently used for containers.',
null=True),
),
migrations.AddField(
model_name='provider',
name='container_base_template',
field=models.CharField(
blank=True, help_text=b'Base tempalte for containerized ManageIQ deployment.',
max_length=64, null=True),
),
migrations.AddField(
model_name='template',
name='container',
field=models.CharField(
blank=True,
help_text=(
b'Whether the appliance is located in a container in the VM. '
b'This then specifies the container name.'),
max_length=32,
null=True),
),
]
|
darktears/chromium-crosswalk
|
refs/heads/master
|
tools/telemetry/third_party/websocket-client/websocket.py
|
67
|
"""
websocket - WebSocket client library for Python
Copyright (C) 2010 Hiroki Ohtani(liris)
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import socket
try:
import ssl
from ssl import SSLError
HAVE_SSL = True
except ImportError:
# dummy class of SSLError for ssl none-support environment.
class SSLError(Exception):
pass
HAVE_SSL = False
from urlparse import urlparse
import os
import array
import struct
import uuid
import hashlib
import base64
import threading
import time
import logging
import traceback
import sys
"""
websocket python client.
=========================
This version support only hybi-13.
Please see http://tools.ietf.org/html/rfc6455 for protocol.
"""
# websocket supported version.
VERSION = 13
# closing frame status codes.
STATUS_NORMAL = 1000
STATUS_GOING_AWAY = 1001
STATUS_PROTOCOL_ERROR = 1002
STATUS_UNSUPPORTED_DATA_TYPE = 1003
STATUS_STATUS_NOT_AVAILABLE = 1005
STATUS_ABNORMAL_CLOSED = 1006
STATUS_INVALID_PAYLOAD = 1007
STATUS_POLICY_VIOLATION = 1008
STATUS_MESSAGE_TOO_BIG = 1009
STATUS_INVALID_EXTENSION = 1010
STATUS_UNEXPECTED_CONDITION = 1011
STATUS_TLS_HANDSHAKE_ERROR = 1015
logger = logging.getLogger()
class WebSocketException(Exception):
"""
websocket exeception class.
"""
pass
class WebSocketConnectionClosedException(WebSocketException):
"""
If remote host closed the connection or some network error happened,
this exception will be raised.
"""
pass
class WebSocketTimeoutException(WebSocketException):
"""
WebSocketTimeoutException will be raised at socket timeout during read/write data.
"""
pass
default_timeout = None
traceEnabled = False
def enableTrace(tracable):
"""
turn on/off the tracability.
tracable: boolean value. if set True, tracability is enabled.
"""
global traceEnabled
traceEnabled = tracable
if tracable:
if not logger.handlers:
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.DEBUG)
def setdefaulttimeout(timeout):
"""
Set the global timeout setting to connect.
timeout: default socket timeout time. This value is second.
"""
global default_timeout
default_timeout = timeout
def getdefaulttimeout():
"""
Return the global timeout setting(second) to connect.
"""
return default_timeout
def _parse_url(url):
"""
parse url and the result is tuple of
(hostname, port, resource path and the flag of secure mode)
url: url string.
"""
if ":" not in url:
raise ValueError("url is invalid")
scheme, url = url.split(":", 1)
parsed = urlparse(url, scheme="http")
if parsed.hostname:
hostname = parsed.hostname
else:
raise ValueError("hostname is invalid")
port = 0
if parsed.port:
port = parsed.port
is_secure = False
if scheme == "ws":
if not port:
port = 80
elif scheme == "wss":
is_secure = True
if not port:
port = 443
else:
raise ValueError("scheme %s is invalid" % scheme)
if parsed.path:
resource = parsed.path
else:
resource = "/"
if parsed.query:
resource += "?" + parsed.query
return (hostname, port, resource, is_secure)
def create_connection(url, timeout=None, **options):
"""
connect to url and return websocket object.
Connect to url and return the WebSocket object.
Passing optional timeout parameter will set the timeout on the socket.
If no timeout is supplied, the global default timeout setting returned by getdefauttimeout() is used.
You can customize using 'options'.
If you set "header" list object, you can set your own custom header.
>>> conn = create_connection("ws://echo.websocket.org/",
... header=["User-Agent: MyProgram",
... "x-custom: header"])
timeout: socket timeout time. This value is integer.
if you set None for this value, it means "use default_timeout value"
options: current support option is only "header".
if you set header as dict value, the custom HTTP headers are added.
"""
sockopt = options.get("sockopt", [])
sslopt = options.get("sslopt", {})
websock = WebSocket(sockopt=sockopt, sslopt=sslopt)
websock.settimeout(timeout if timeout is not None else default_timeout)
websock.connect(url, **options)
return websock
_MAX_INTEGER = (1 << 32) -1
_AVAILABLE_KEY_CHARS = range(0x21, 0x2f + 1) + range(0x3a, 0x7e + 1)
_MAX_CHAR_BYTE = (1<<8) -1
# ref. Websocket gets an update, and it breaks stuff.
# http://axod.blogspot.com/2010/06/websocket-gets-update-and-it-breaks.html
def _create_sec_websocket_key():
uid = uuid.uuid4()
return base64.encodestring(uid.bytes).strip()
_HEADERS_TO_CHECK = {
"upgrade": "websocket",
"connection": "upgrade",
}
class ABNF(object):
"""
ABNF frame class.
see http://tools.ietf.org/html/rfc5234
and http://tools.ietf.org/html/rfc6455#section-5.2
"""
# operation code values.
OPCODE_CONT = 0x0
OPCODE_TEXT = 0x1
OPCODE_BINARY = 0x2
OPCODE_CLOSE = 0x8
OPCODE_PING = 0x9
OPCODE_PONG = 0xa
# available operation code value tuple
OPCODES = (OPCODE_CONT, OPCODE_TEXT, OPCODE_BINARY, OPCODE_CLOSE,
OPCODE_PING, OPCODE_PONG)
# opcode human readable string
OPCODE_MAP = {
OPCODE_CONT: "cont",
OPCODE_TEXT: "text",
OPCODE_BINARY: "binary",
OPCODE_CLOSE: "close",
OPCODE_PING: "ping",
OPCODE_PONG: "pong"
}
# data length threashold.
LENGTH_7 = 0x7d
LENGTH_16 = 1 << 16
LENGTH_63 = 1 << 63
def __init__(self, fin=0, rsv1=0, rsv2=0, rsv3=0,
opcode=OPCODE_TEXT, mask=1, data=""):
"""
Constructor for ABNF.
please check RFC for arguments.
"""
self.fin = fin
self.rsv1 = rsv1
self.rsv2 = rsv2
self.rsv3 = rsv3
self.opcode = opcode
self.mask = mask
self.data = data
self.get_mask_key = os.urandom
def __str__(self):
return "fin=" + str(self.fin) \
+ " opcode=" + str(self.opcode) \
+ " data=" + str(self.data)
@staticmethod
def create_frame(data, opcode):
"""
create frame to send text, binary and other data.
data: data to send. This is string value(byte array).
if opcode is OPCODE_TEXT and this value is uniocde,
data value is conveted into unicode string, automatically.
opcode: operation code. please see OPCODE_XXX.
"""
if opcode == ABNF.OPCODE_TEXT and isinstance(data, unicode):
data = data.encode("utf-8")
# mask must be set if send data from client
return ABNF(1, 0, 0, 0, opcode, 1, data)
def format(self):
"""
format this object to string(byte array) to send data to server.
"""
if any(x not in (0, 1) for x in [self.fin, self.rsv1, self.rsv2, self.rsv3]):
raise ValueError("not 0 or 1")
if self.opcode not in ABNF.OPCODES:
raise ValueError("Invalid OPCODE")
length = len(self.data)
if length >= ABNF.LENGTH_63:
raise ValueError("data is too long")
frame_header = chr(self.fin << 7
| self.rsv1 << 6 | self.rsv2 << 5 | self.rsv3 << 4
| self.opcode)
if length < ABNF.LENGTH_7:
frame_header += chr(self.mask << 7 | length)
elif length < ABNF.LENGTH_16:
frame_header += chr(self.mask << 7 | 0x7e)
frame_header += struct.pack("!H", length)
else:
frame_header += chr(self.mask << 7 | 0x7f)
frame_header += struct.pack("!Q", length)
if not self.mask:
return frame_header + self.data
else:
mask_key = self.get_mask_key(4)
return frame_header + self._get_masked(mask_key)
def _get_masked(self, mask_key):
s = ABNF.mask(mask_key, self.data)
return mask_key + "".join(s)
@staticmethod
def mask(mask_key, data):
"""
mask or unmask data. Just do xor for each byte
mask_key: 4 byte string(byte).
data: data to mask/unmask.
"""
_m = array.array("B", mask_key)
_d = array.array("B", data)
for i in xrange(len(_d)):
_d[i] ^= _m[i % 4]
return _d.tostring()
class WebSocket(object):
"""
Low level WebSocket interface.
This class is based on
The WebSocket protocol draft-hixie-thewebsocketprotocol-76
http://tools.ietf.org/html/draft-hixie-thewebsocketprotocol-76
We can connect to the websocket server and send/recieve data.
The following example is a echo client.
>>> import websocket
>>> ws = websocket.WebSocket()
>>> ws.connect("ws://echo.websocket.org")
>>> ws.send("Hello, Server")
>>> ws.recv()
'Hello, Server'
>>> ws.close()
get_mask_key: a callable to produce new mask keys, see the set_mask_key
function's docstring for more details
sockopt: values for socket.setsockopt.
sockopt must be tuple and each element is argument of sock.setscokopt.
sslopt: dict object for ssl socket option.
"""
def __init__(self, get_mask_key=None, sockopt=None, sslopt=None):
"""
Initalize WebSocket object.
"""
if sockopt is None:
sockopt = []
if sslopt is None:
sslopt = {}
self.connected = False
self.sock = socket.socket()
for opts in sockopt:
self.sock.setsockopt(*opts)
self.sslopt = sslopt
self.get_mask_key = get_mask_key
# Buffers over the packets from the layer beneath until desired amount
# bytes of bytes are received.
self._recv_buffer = []
# These buffer over the build-up of a single frame.
self._frame_header = None
self._frame_length = None
self._frame_mask = None
self._cont_data = None
def fileno(self):
return self.sock.fileno()
def set_mask_key(self, func):
"""
set function to create musk key. You can custumize mask key generator.
Mainly, this is for testing purpose.
func: callable object. the fuct must 1 argument as integer.
The argument means length of mask key.
This func must be return string(byte array),
which length is argument specified.
"""
self.get_mask_key = func
def gettimeout(self):
"""
Get the websocket timeout(second).
"""
return self.sock.gettimeout()
def settimeout(self, timeout):
"""
Set the timeout to the websocket.
timeout: timeout time(second).
"""
self.sock.settimeout(timeout)
timeout = property(gettimeout, settimeout)
def connect(self, url, **options):
"""
Connect to url. url is websocket url scheme. ie. ws://host:port/resource
You can customize using 'options'.
If you set "header" dict object, you can set your own custom header.
>>> ws = WebSocket()
>>> ws.connect("ws://echo.websocket.org/",
... header={"User-Agent: MyProgram",
... "x-custom: header"})
timeout: socket timeout time. This value is integer.
if you set None for this value,
it means "use default_timeout value"
options: current support option is only "header".
if you set header as dict value,
the custom HTTP headers are added.
"""
hostname, port, resource, is_secure = _parse_url(url)
# TODO: we need to support proxy
self.sock.connect((hostname, port))
if is_secure:
if HAVE_SSL:
if self.sslopt is None:
sslopt = {}
else:
sslopt = self.sslopt
self.sock = ssl.wrap_socket(self.sock, **sslopt)
else:
raise WebSocketException("SSL not available.")
self._handshake(hostname, port, resource, **options)
def _handshake(self, host, port, resource, **options):
sock = self.sock
headers = []
headers.append("GET %s HTTP/1.1" % resource)
headers.append("Upgrade: websocket")
headers.append("Connection: Upgrade")
if port == 80:
hostport = host
else:
hostport = "%s:%d" % (host, port)
headers.append("Host: %s" % hostport)
if "origin" in options:
headers.append("Origin: %s" % options["origin"])
else:
headers.append("Origin: http://%s" % hostport)
key = _create_sec_websocket_key()
headers.append("Sec-WebSocket-Key: %s" % key)
headers.append("Sec-WebSocket-Version: %s" % VERSION)
if "header" in options:
headers.extend(options["header"])
headers.append("")
headers.append("")
header_str = "\r\n".join(headers)
self._send(header_str)
if traceEnabled:
logger.debug("--- request header ---")
logger.debug(header_str)
logger.debug("-----------------------")
status, resp_headers = self._read_headers()
if status != 101:
self.close()
raise WebSocketException("Handshake Status %d" % status)
success = self._validate_header(resp_headers, key)
if not success:
self.close()
raise WebSocketException("Invalid WebSocket Header")
self.connected = True
def _validate_header(self, headers, key):
for k, v in _HEADERS_TO_CHECK.iteritems():
r = headers.get(k, None)
if not r:
return False
r = r.lower()
if v != r:
return False
result = headers.get("sec-websocket-accept", None)
if not result:
return False
result = result.lower()
value = key + "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
hashed = base64.encodestring(hashlib.sha1(value).digest()).strip().lower()
return hashed == result
def _read_headers(self):
status = None
headers = {}
if traceEnabled:
logger.debug("--- response header ---")
while True:
line = self._recv_line()
if line == "\r\n":
break
line = line.strip()
if traceEnabled:
logger.debug(line)
if not status:
status_info = line.split(" ", 2)
status = int(status_info[1])
else:
kv = line.split(":", 1)
if len(kv) == 2:
key, value = kv
headers[key.lower()] = value.strip().lower()
else:
raise WebSocketException("Invalid header")
if traceEnabled:
logger.debug("-----------------------")
return status, headers
def send(self, payload, opcode=ABNF.OPCODE_TEXT):
"""
Send the data as string.
payload: Payload must be utf-8 string or unicoce,
if the opcode is OPCODE_TEXT.
Otherwise, it must be string(byte array)
opcode: operation code to send. Please see OPCODE_XXX.
"""
frame = ABNF.create_frame(payload, opcode)
if self.get_mask_key:
frame.get_mask_key = self.get_mask_key
data = frame.format()
length = len(data)
if traceEnabled:
logger.debug("send: " + repr(data))
while data:
l = self._send(data)
data = data[l:]
return length
def send_binary(self, payload):
return self.send(payload, ABNF.OPCODE_BINARY)
def ping(self, payload=""):
"""
send ping data.
payload: data payload to send server.
"""
self.send(payload, ABNF.OPCODE_PING)
def pong(self, payload):
"""
send pong data.
payload: data payload to send server.
"""
self.send(payload, ABNF.OPCODE_PONG)
def recv(self):
"""
Receive string data(byte array) from the server.
return value: string(byte array) value.
"""
opcode, data = self.recv_data()
return data
def recv_data(self):
"""
Recieve data with operation code.
return value: tuple of operation code and string(byte array) value.
"""
while True:
frame = self.recv_frame()
if not frame:
# handle error:
# 'NoneType' object has no attribute 'opcode'
raise WebSocketException("Not a valid frame %s" % frame)
elif frame.opcode in (ABNF.OPCODE_TEXT, ABNF.OPCODE_BINARY, ABNF.OPCODE_CONT):
if frame.opcode == ABNF.OPCODE_CONT and not self._cont_data:
raise WebSocketException("Illegal frame")
if self._cont_data:
self._cont_data[1] += frame.data
else:
self._cont_data = [frame.opcode, frame.data]
if frame.fin:
data = self._cont_data
self._cont_data = None
return data
elif frame.opcode == ABNF.OPCODE_CLOSE:
self.send_close()
return (frame.opcode, None)
elif frame.opcode == ABNF.OPCODE_PING:
self.pong(frame.data)
def recv_frame(self):
"""
recieve data as frame from server.
return value: ABNF frame object.
"""
# Header
if self._frame_header is None:
self._frame_header = self._recv_strict(2)
b1 = ord(self._frame_header[0])
fin = b1 >> 7 & 1
rsv1 = b1 >> 6 & 1
rsv2 = b1 >> 5 & 1
rsv3 = b1 >> 4 & 1
opcode = b1 & 0xf
b2 = ord(self._frame_header[1])
has_mask = b2 >> 7 & 1
# Frame length
if self._frame_length is None:
length_bits = b2 & 0x7f
if length_bits == 0x7e:
length_data = self._recv_strict(2)
self._frame_length = struct.unpack("!H", length_data)[0]
elif length_bits == 0x7f:
length_data = self._recv_strict(8)
self._frame_length = struct.unpack("!Q", length_data)[0]
else:
self._frame_length = length_bits
# Mask
if self._frame_mask is None:
self._frame_mask = self._recv_strict(4) if has_mask else ""
# Payload
payload = self._recv_strict(self._frame_length)
if has_mask:
payload = ABNF.mask(self._frame_mask, payload)
# Reset for next frame
self._frame_header = None
self._frame_length = None
self._frame_mask = None
return ABNF(fin, rsv1, rsv2, rsv3, opcode, has_mask, payload)
def send_close(self, status=STATUS_NORMAL, reason=""):
"""
send close data to the server.
status: status code to send. see STATUS_XXX.
reason: the reason to close. This must be string.
"""
if status < 0 or status >= ABNF.LENGTH_16:
raise ValueError("code is invalid range")
self.send(struct.pack('!H', status) + reason, ABNF.OPCODE_CLOSE)
def close(self, status=STATUS_NORMAL, reason=""):
"""
Close Websocket object
status: status code to send. see STATUS_XXX.
reason: the reason to close. This must be string.
"""
if self.connected:
if status < 0 or status >= ABNF.LENGTH_16:
raise ValueError("code is invalid range")
try:
self.send(struct.pack('!H', status) + reason, ABNF.OPCODE_CLOSE)
timeout = self.sock.gettimeout()
self.sock.settimeout(3)
try:
frame = self.recv_frame()
if logger.isEnabledFor(logging.ERROR):
recv_status = struct.unpack("!H", frame.data)[0]
if recv_status != STATUS_NORMAL:
logger.error("close status: " + repr(recv_status))
except:
pass
self.sock.settimeout(timeout)
self.sock.shutdown(socket.SHUT_RDWR)
except:
pass
self._closeInternal()
def _closeInternal(self):
self.connected = False
self.sock.close()
def _send(self, data):
try:
return self.sock.send(data)
except socket.timeout as e:
raise WebSocketTimeoutException(e.message)
except Exception as e:
if "timed out" in e.message:
raise WebSocketTimeoutException(e.message)
else:
raise e
def _recv(self, bufsize):
try:
bytes = self.sock.recv(bufsize)
except socket.timeout as e:
raise WebSocketTimeoutException(e.message)
except SSLError as e:
if e.message == "The read operation timed out":
raise WebSocketTimeoutException(e.message)
else:
raise
if not bytes:
raise WebSocketConnectionClosedException()
return bytes
def _recv_strict(self, bufsize):
shortage = bufsize - sum(len(x) for x in self._recv_buffer)
while shortage > 0:
bytes = self._recv(shortage)
self._recv_buffer.append(bytes)
shortage -= len(bytes)
unified = "".join(self._recv_buffer)
if shortage == 0:
self._recv_buffer = []
return unified
else:
self._recv_buffer = [unified[bufsize:]]
return unified[:bufsize]
def _recv_line(self):
line = []
while True:
c = self._recv(1)
line.append(c)
if c == "\n":
break
return "".join(line)
class WebSocketApp(object):
"""
Higher level of APIs are provided.
The interface is like JavaScript WebSocket object.
"""
def __init__(self, url, header=[],
on_open=None, on_message=None, on_error=None,
on_close=None, keep_running=True, get_mask_key=None):
"""
url: websocket url.
header: custom header for websocket handshake.
on_open: callable object which is called at opening websocket.
this function has one argument. The arugment is this class object.
on_message: callbale object which is called when recieved data.
on_message has 2 arguments.
The 1st arugment is this class object.
The passing 2nd arugment is utf-8 string which we get from the server.
on_error: callable object which is called when we get error.
on_error has 2 arguments.
The 1st arugment is this class object.
The passing 2nd arugment is exception object.
on_close: callable object which is called when closed the connection.
this function has one argument. The arugment is this class object.
keep_running: a boolean flag indicating whether the app's main loop should
keep running, defaults to True
get_mask_key: a callable to produce new mask keys, see the WebSocket.set_mask_key's
docstring for more information
"""
self.url = url
self.header = header
self.on_open = on_open
self.on_message = on_message
self.on_error = on_error
self.on_close = on_close
self.keep_running = keep_running
self.get_mask_key = get_mask_key
self.sock = None
def send(self, data, opcode=ABNF.OPCODE_TEXT):
"""
send message.
data: message to send. If you set opcode to OPCODE_TEXT, data must be utf-8 string or unicode.
opcode: operation code of data. default is OPCODE_TEXT.
"""
if self.sock.send(data, opcode) == 0:
raise WebSocketConnectionClosedException()
def close(self):
"""
close websocket connection.
"""
self.keep_running = False
self.sock.close()
def _send_ping(self, interval):
while True:
for i in range(interval):
time.sleep(1)
if not self.keep_running:
return
self.sock.ping()
def run_forever(self, sockopt=None, sslopt=None, ping_interval=0):
"""
run event loop for WebSocket framework.
This loop is infinite loop and is alive during websocket is available.
sockopt: values for socket.setsockopt.
sockopt must be tuple and each element is argument of sock.setscokopt.
sslopt: ssl socket optional dict.
ping_interval: automatically send "ping" command every specified period(second)
if set to 0, not send automatically.
"""
if sockopt is None:
sockopt = []
if sslopt is None:
sslopt = {}
if self.sock:
raise WebSocketException("socket is already opened")
thread = None
try:
self.sock = WebSocket(self.get_mask_key, sockopt=sockopt, sslopt=sslopt)
self.sock.settimeout(default_timeout)
self.sock.connect(self.url, header=self.header)
self._callback(self.on_open)
if ping_interval:
thread = threading.Thread(target=self._send_ping, args=(ping_interval,))
thread.setDaemon(True)
thread.start()
while self.keep_running:
data = self.sock.recv()
if data is None:
break
self._callback(self.on_message, data)
except Exception, e:
self._callback(self.on_error, e)
finally:
if thread:
self.keep_running = False
self.sock.close()
self._callback(self.on_close)
self.sock = None
def _callback(self, callback, *args):
if callback:
try:
callback(self, *args)
except Exception, e:
logger.error(e)
if logger.isEnabledFor(logging.DEBUG):
_, _, tb = sys.exc_info()
traceback.print_tb(tb)
if __name__ == "__main__":
enableTrace(True)
ws = create_connection("ws://echo.websocket.org/")
print("Sending 'Hello, World'...")
ws.send("Hello, World")
print("Sent")
print("Receiving...")
result = ws.recv()
print("Received '%s'" % result)
ws.close()
|
NIASC/VirusMeta
|
refs/heads/master
|
blast_module/alignment_search_result.py
|
1
|
#
# Created by davbzh on 2013-08-14.
#
from Bio import SeqIO
from Bio.Blast import NCBIXML
from collections import defaultdict
import numpy as np
import csv
import os
Part = 3
class BlastParser(object):
'''An iterator blast parser that yields the blast results in a multiblast file'''
def __init__(self, fhand):
'The init requires a file to be parser'
fhand.seek(0, 0)
self._blast_file = fhand
blast_version = self._get_blast_version()
self._blast_file.seek(0, 0)
self._blast_parse = None
if fhand.read(1) == '<':
fhand.seek(0)
self._blast_parse = NCBIXML.parse(fhand)
def __iter__(self):
'Part of the iterator protocol'
return self
def _create_result_structure(self, record):
'Given a BioPython blast result it returns our result structure'
definition = record.query
name = record.query
if len(definition.split(' ', 1)) > 0:
definition = definition
else:
definition = None
if definition is None:
definition = "<unknown description>"
#length of query sequence
query_length = record.query_letters
queryID = name
#now we go for the hits (matches)
matches = []
for alignment in record.alignments:
#the subject sequence
#TODO: use also alignment.accession, first you should determine if hit has accession
if str(alignment.hit_id).split("|")[1] == 'BL_ORD_ID':
if str(alignment.hit_def).split("|")[0] == "gi":
s_name = str(alignment.hit_def).split("|")[1] #TODO: be careful this might change output
else:
s_name = str(alignment.hit_def).split("|")[0]
else:
s_name = str(alignment.hit_id).split("|")[1]
if len(alignment.hit_def.split(' ')) > 1:
definition = alignment.hit_def
else:
definition = None
if definition is None:
definition = "<unknown description>"
subjectID = s_name
subjectDef = definition.replace("@","").replace("#","") #"@" character is used later for splitting the lines and it should not exist inside definitions
#"#" character creates problem for R to read
subject_length = alignment.length
#the hsps (match parts)
match_parts = []
for hsp in alignment.hsps:
expect = hsp.expect
hsp_score = hsp.score
subject_start = hsp.sbjct_start
subject_end = hsp.sbjct_end
query_start = hsp.query_start
query_end = hsp.query_end
hsp_length = hsp.align_length
#We have to check the subject strand
if subject_start < subject_end:
subject_strand = 1
else:
subject_strand = -1
subject_start, subject_end = (subject_end,
subject_start)
#Also the query strand
if query_start < query_end:
query_strand = 1
else:
query_strand = -1
query_start, query_end = query_end, query_start
try:
similarity = hsp.positives*100.0/float(hsp_length)
except TypeError:
similarity = None
try:
identity = hsp.identities*100.0/float(hsp_length)
except TypeError:
identity = None
#Coverage is expressed as percent of length of the smaler sequences covereded in the pairwise allignment
Coverage = None
try:
if subject_length > query_length or subject_length == query_length:
Coverage = float(100*(((query_end-query_start)+1)))/float(query_length)
elif subject_length < query_length:
Coverage = float(100*(((subject_end-subject_start)+1)))/float(subject_length)
except TypeError:
Coverage = None
########################################################
#This algorithm is usable only for HPV related contigs
#To check wheather assembled contig is chimeric or not
#and takes assumtions that similarty to the subject sequences
#should be evenly distribured
blast_match_chimera = None
try:
identity_list = []
match_array = hsp.match
match_array = np.array(match_array, dtype='c') == '|'
match_array = match_array.astype(np.uint8)
Parts = len(match_array) / Part
for i in xrange(0, len(match_array), Parts):
if len(match_array[i:i+Parts]) >= float(Parts)/float(3):
identity_list.append(float(100.0 * sum(match_array[i:i+Parts])) / float(len(match_array[i:i+Parts])))
if max(identity_list) >= 90 and min(identity_list) < 90:
if (max(identity_list) - min(identity_list)) >= 5:
blast_match_chimera = "Yes"
else:
blast_match_chimera = "No"
except TypeError:
blast_match_chimera = None
########################################################
match_parts.append({
'subject_start' : subject_start,
'subject_end' : subject_end,
'subject_strand' : subject_strand,
'query_start' : query_start,
'query_end' : query_end,
'query_strand' : query_strand,
'hsp_length' : hsp_length,
'subject_length' : subject_length,
'scores' : {'similarity' : similarity,
'expect' : expect,
'hsp_score' : hsp_score,
'identity' : identity,
'Coverage' : Coverage,
'blast_match_chimera': blast_match_chimera}
})
matches.append({
#'subject' : subject,
'subject' : subjectID,
'subjectDef' : subjectDef,
'match_parts' : match_parts})
result = {#'query' : query,
'query' : queryID,
'query_length' : query_length,
'matches' : matches}
return result
def _get_blast_version(self):
'It gets blast parser version'
for line in self._blast_file.read().split('\n'):
line = line.strip()
if line.startswith('<BlastOutput_version>'):
return line.split('>')[1].split('<')[0]
def next(self):
'It returns the next blast result'
if self._blast_parse is None:
raise StopIteration
else:
record = self._blast_parse.next()
#now we have to change this biopython blast_result in our
#structure
our_result = self._create_result_structure(record)
return our_result
def sortdefdictlist(defdictlist):
for keys in defdictlist:
defdictlist[keys].sort()
def query_subject_hits (subject_hits, query_length):
sortdefdictlist(subject_hits)
global_matches = []
for hit_id in subject_hits:
match_start, match_end = None, None
match_subject_start, match_subject_end = None, None
gap_start,gap_end = None,None
gaps = []
total_lengh_of_gaps = 0
for positions in subject_hits[hit_id]:
query_start = positions[0]
query_end = positions[1]
if match_start is None or query_start < match_start:
match_start = query_start
if match_end and query_start - match_end >= 1:
gap_start = match_end
gap_end = query_start
gaps.append((gap_start,gap_end))
total_lengh_of_gaps += (gap_end-gap_start)
#print "query_start:",query_start, "query_end:",query_end, "match_start:",match_start,"match_end:", match_end, "gap:",(query_start - match_end)
if match_end is None or query_end > match_end:
match_end = query_end
try:
global_coverage = float(100*(((match_end-match_start)-total_lengh_of_gaps)))/float(query_length)
except TypeError:
global_coverage = None
global_matches.append({hit_id:
{'match_start' : match_start,
'match_end' : match_end,
'gaps' : gaps,
'total_lengh_of_gaps' : total_lengh_of_gaps,
'global_coverage' : global_coverage}
})
return global_matches
def global_chimera (global_identity_list):
global_chimera_merged = []
for hit_id in global_identity_list:
blast_global_chimera = None # TODO why it is necessary to put this here??
if max(global_identity_list[hit_id]) >= 90 and min(global_identity_list[hit_id]) < 90:
if (max(global_identity_list[hit_id]) - min(global_identity_list[hit_id])) >= 5:
blast_global_chimera = "Yes"
else:
blast_global_chimera = "No"
global_chimera_merged.append( { hit_id:
{'blast_global_chimera': blast_global_chimera}
})
return global_chimera_merged
def global_inverted (global_orientation_list):
global_inverted_merged = []
for hit_id in global_orientation_list:
if max(global_orientation_list[hit_id]) == 1 and min(global_orientation_list[hit_id]) == -1:
blast_global_orientation = "Inverted"
elif max(global_orientation_list[hit_id]) == 1 and min(global_orientation_list[hit_id]) == 1:
blast_global_orientation = "Plus"
elif max(global_orientation_list[hit_id]) == -1 and min(global_orientation_list[hit_id]) == -1:
blast_global_orientation = "Minus"
global_inverted_merged.append({hit_id:
{'blast_global_orientation' : blast_global_orientation}
})
return global_inverted_merged
class GlobalBlast(BlastParser):
def __init__(self, fhand):
parser = BlastParser(fhand)
if isinstance (parser, BlastParser):
self.parser = parser
else:
raise TypeError
self.query_global_hits = defaultdict(list)
def mimic_blast_global_allignment (self):
for blast in self.parser:
subject_hits = defaultdict(list)
global_identity_list = defaultdict(list)
global_orientation_list = defaultdict(list)
query_global_mimcry = defaultdict(list)
for hits in blast['matches']:
for match_parts in hits['match_parts']:
subject_hits[hits['subject']].append((match_parts['query_start'], match_parts['query_end']))
#subject_hits[hits['subject']].append((match_parts['subject_start'], match_parts['subject_end']))
global_identity_list[hits['subject']].append(match_parts['scores']['identity'])
global_orientation_list[hits['subject']].append(match_parts['subject_strand'])
for hit_dict1 in query_subject_hits (subject_hits,blast['query_length']):
query_global_mimcry[hit_dict1.keys()[0]].append(hit_dict1.values()[0])
for hit_dict2 in global_chimera (global_identity_list):
query_global_mimcry[hit_dict2.keys()[0]].append(hit_dict2.values()[0])
for hit_dict3 in global_inverted (global_orientation_list):
query_global_mimcry[hit_dict3.keys()[0]].append(hit_dict3.values()[0])
self.query_global_hits[blast['query']].append(query_global_mimcry)
return self.query_global_hits
def parse_blastx(blast_out, gi_out, result):
'''
parses blastx xml output.
'''
blast_out = open (blast_out)
gi_out = open(gi_out,'w')
result = csv.writer(open(result, 'wb'),delimiter='@')
result.writerow(("Queryid", "gi", "Strain", "identity", "alignment.length", "positives", "frame", "q.start", "q.end", "s.start", "s.end", "e.value", "bitscore","Length","Coverage"))
gi_hits = []
blast_records = NCBIXML.parse(blast_out)
for record in blast_records:
for align in record.alignments :
for hsp in align.hsps :
Recname = record.query.split()[0]
mystart = hsp.query_start
myend = hsp.query_end
if str(align.hit_id).split("|")[1] == 'BL_ORD_ID':
GI = str(align.hit_def).split("|")[1]
Strain = str(align.hit_def).split("|")[(len(str(align.hit_def).split("|"))-1)]
else:
GI = str(align.hit_id).split("|")[1]
Strain = str(align.hit_def)
percentage_identity = float(100.0 * hsp.identities) / float(hsp.align_length)
Coverage = float(100*(((myend-mystart)+1)))/float(record.query_letters)
if GI not in gi_hits:
gi_hits.append(GI)
result.writerow([Recname,GI,Strain,percentage_identity,hsp.align_length, hsp.positives,str(hsp.frame).replace(" ",""), mystart, myend, hsp.sbjct_start, hsp.sbjct_end, hsp.expect, hsp.score, record.query_letters, Coverage])
blast_out.close()
for gi_id in gi_hits:
gi_out.write("%s\n" % gi_id)
gi_out.close()
|
atlassian/boto
|
refs/heads/develop
|
boto/route53/connection.py
|
103
|
# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# Copyright (c) 2011 Blue Pines Technologies LLC, Brad Carleton
# www.bluepines.org
# Copyright (c) 2012 42 Lines Inc., Jim Browne
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.route53 import exception
import random
import uuid
import xml.sax
import boto
from boto.connection import AWSAuthConnection
from boto import handler
import boto.jsonresponse
from boto.route53.record import ResourceRecordSets
from boto.route53.zone import Zone
from boto.compat import six, urllib
HZXML = """<?xml version="1.0" encoding="UTF-8"?>
<CreateHostedZoneRequest xmlns="%(xmlns)s">
<Name>%(name)s</Name>
<CallerReference>%(caller_ref)s</CallerReference>
<HostedZoneConfig>
<Comment>%(comment)s</Comment>
</HostedZoneConfig>
</CreateHostedZoneRequest>"""
HZPXML = """<?xml version="1.0" encoding="UTF-8"?>
<CreateHostedZoneRequest xmlns="%(xmlns)s">
<Name>%(name)s</Name>
<VPC>
<VPCId>%(vpc_id)s</VPCId>
<VPCRegion>%(vpc_region)s</VPCRegion>
</VPC>
<CallerReference>%(caller_ref)s</CallerReference>
<HostedZoneConfig>
<Comment>%(comment)s</Comment>
</HostedZoneConfig>
</CreateHostedZoneRequest>"""
# boto.set_stream_logger('dns')
class Route53Connection(AWSAuthConnection):
DefaultHost = 'route53.amazonaws.com'
"""The default Route53 API endpoint to connect to."""
Version = '2013-04-01'
"""Route53 API version."""
XMLNameSpace = 'https://route53.amazonaws.com/doc/2013-04-01/'
"""XML schema for this Route53 API version."""
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
port=None, proxy=None, proxy_port=None,
host=DefaultHost, debug=0, security_token=None,
validate_certs=True, https_connection_factory=None,
profile_name=None):
super(Route53Connection, self).__init__(
host,
aws_access_key_id, aws_secret_access_key,
True, port, proxy, proxy_port, debug=debug,
security_token=security_token,
validate_certs=validate_certs,
https_connection_factory=https_connection_factory,
profile_name=profile_name)
def _required_auth_capability(self):
return ['route53']
def make_request(self, action, path, headers=None, data='', params=None):
if params:
pairs = []
for key, val in six.iteritems(params):
if val is None:
continue
pairs.append(key + '=' + urllib.parse.quote(str(val)))
path += '?' + '&'.join(pairs)
return super(Route53Connection, self).make_request(
action, path, headers, data,
retry_handler=self._retry_handler)
# Hosted Zones
def get_all_hosted_zones(self, start_marker=None, zone_list=None):
"""
Returns a Python data structure with information about all
Hosted Zones defined for the AWS account.
:param int start_marker: start marker to pass when fetching additional
results after a truncated list
:param list zone_list: a HostedZones list to prepend to results
"""
params = {}
if start_marker:
params = {'marker': start_marker}
response = self.make_request('GET', '/%s/hostedzone' % self.Version,
params=params)
body = response.read()
boto.log.debug(body)
if response.status >= 300:
raise exception.DNSServerError(response.status,
response.reason,
body)
e = boto.jsonresponse.Element(list_marker='HostedZones',
item_marker=('HostedZone',))
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
if zone_list:
e['ListHostedZonesResponse']['HostedZones'].extend(zone_list)
while 'NextMarker' in e['ListHostedZonesResponse']:
next_marker = e['ListHostedZonesResponse']['NextMarker']
zone_list = e['ListHostedZonesResponse']['HostedZones']
e = self.get_all_hosted_zones(next_marker, zone_list)
return e
def get_hosted_zone(self, hosted_zone_id):
"""
Get detailed information about a particular Hosted Zone.
:type hosted_zone_id: str
:param hosted_zone_id: The unique identifier for the Hosted Zone
"""
uri = '/%s/hostedzone/%s' % (self.Version, hosted_zone_id)
response = self.make_request('GET', uri)
body = response.read()
boto.log.debug(body)
if response.status >= 300:
raise exception.DNSServerError(response.status,
response.reason,
body)
e = boto.jsonresponse.Element(list_marker='NameServers',
item_marker=('NameServer',))
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e
def get_hosted_zone_by_name(self, hosted_zone_name):
"""
Get detailed information about a particular Hosted Zone.
:type hosted_zone_name: str
:param hosted_zone_name: The fully qualified domain name for the Hosted
Zone
"""
if hosted_zone_name[-1] != '.':
hosted_zone_name += '.'
all_hosted_zones = self.get_all_hosted_zones()
for zone in all_hosted_zones['ListHostedZonesResponse']['HostedZones']:
# check that they gave us the FQDN for their zone
if zone['Name'] == hosted_zone_name:
return self.get_hosted_zone(zone['Id'].split('/')[-1])
def create_hosted_zone(self, domain_name, caller_ref=None, comment='',
private_zone=False, vpc_id=None, vpc_region=None):
"""
Create a new Hosted Zone. Returns a Python data structure with
information about the newly created Hosted Zone.
:type domain_name: str
:param domain_name: The name of the domain. This should be a
fully-specified domain, and should end with a final period
as the last label indication. If you omit the final period,
Amazon Route 53 assumes the domain is relative to the root.
This is the name you have registered with your DNS registrar.
It is also the name you will delegate from your registrar to
the Amazon Route 53 delegation servers returned in
response to this request.A list of strings with the image
IDs wanted.
:type caller_ref: str
:param caller_ref: A unique string that identifies the request
and that allows failed CreateHostedZone requests to be retried
without the risk of executing the operation twice. If you don't
provide a value for this, boto will generate a Type 4 UUID and
use that.
:type comment: str
:param comment: Any comments you want to include about the hosted
zone.
:type private_zone: bool
:param private_zone: Set True if creating a private hosted zone.
:type vpc_id: str
:param vpc_id: When creating a private hosted zone, the VPC Id to
associate to is required.
:type vpc_region: str
:param vpc_id: When creating a private hosted zone, the region of
the associated VPC is required.
"""
if caller_ref is None:
caller_ref = str(uuid.uuid4())
if private_zone:
params = {'name': domain_name,
'caller_ref': caller_ref,
'comment': comment,
'vpc_id': vpc_id,
'vpc_region': vpc_region,
'xmlns': self.XMLNameSpace}
xml_body = HZPXML % params
else:
params = {'name': domain_name,
'caller_ref': caller_ref,
'comment': comment,
'xmlns': self.XMLNameSpace}
xml_body = HZXML % params
uri = '/%s/hostedzone' % self.Version
response = self.make_request('POST', uri,
{'Content-Type': 'text/xml'}, xml_body)
body = response.read()
boto.log.debug(body)
if response.status == 201:
e = boto.jsonresponse.Element(list_marker='NameServers',
item_marker=('NameServer',))
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e
else:
raise exception.DNSServerError(response.status,
response.reason,
body)
def delete_hosted_zone(self, hosted_zone_id):
"""
Delete the hosted zone specified by the given id.
:type hosted_zone_id: str
:param hosted_zone_id: The hosted zone's id
"""
uri = '/%s/hostedzone/%s' % (self.Version, hosted_zone_id)
response = self.make_request('DELETE', uri)
body = response.read()
boto.log.debug(body)
if response.status not in (200, 204):
raise exception.DNSServerError(response.status,
response.reason,
body)
e = boto.jsonresponse.Element()
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e
# Health checks
POSTHCXMLBody = """<CreateHealthCheckRequest xmlns="%(xmlns)s">
<CallerReference>%(caller_ref)s</CallerReference>
%(health_check)s
</CreateHealthCheckRequest>"""
def create_health_check(self, health_check, caller_ref=None):
"""
Create a new Health Check
:type health_check: HealthCheck
:param health_check: HealthCheck object
:type caller_ref: str
:param caller_ref: A unique string that identifies the request
and that allows failed CreateHealthCheckRequest requests to be retried
without the risk of executing the operation twice. If you don't
provide a value for this, boto will generate a Type 4 UUID and
use that.
"""
if caller_ref is None:
caller_ref = str(uuid.uuid4())
uri = '/%s/healthcheck' % self.Version
params = {'xmlns': self.XMLNameSpace,
'caller_ref': caller_ref,
'health_check': health_check.to_xml()
}
xml_body = self.POSTHCXMLBody % params
response = self.make_request('POST', uri, {'Content-Type': 'text/xml'}, xml_body)
body = response.read()
boto.log.debug(body)
if response.status == 201:
e = boto.jsonresponse.Element()
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e
else:
raise exception.DNSServerError(response.status, response.reason, body)
def get_list_health_checks(self, maxitems=None, marker=None):
"""
Return a list of health checks
:type maxitems: int
:param maxitems: Maximum number of items to return
:type marker: str
:param marker: marker to get next set of items to list
"""
params = {}
if maxitems is not None:
params['maxitems'] = maxitems
if marker is not None:
params['marker'] = marker
uri = '/%s/healthcheck' % (self.Version, )
response = self.make_request('GET', uri, params=params)
body = response.read()
boto.log.debug(body)
if response.status >= 300:
raise exception.DNSServerError(response.status,
response.reason,
body)
e = boto.jsonresponse.Element(list_marker='HealthChecks',
item_marker=('HealthCheck',))
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e
def get_checker_ip_ranges(self):
"""
Return a list of Route53 healthcheck IP ranges
"""
uri = '/%s/checkeripranges' % self.Version
response = self.make_request('GET', uri)
body = response.read()
boto.log.debug(body)
if response.status >= 300:
raise exception.DNSServerError(response.status,
response.reason,
body)
e = boto.jsonresponse.Element(list_marker='CheckerIpRanges', item_marker=('member',))
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e
def delete_health_check(self, health_check_id):
"""
Delete a health check
:type health_check_id: str
:param health_check_id: ID of the health check to delete
"""
uri = '/%s/healthcheck/%s' % (self.Version, health_check_id)
response = self.make_request('DELETE', uri)
body = response.read()
boto.log.debug(body)
if response.status not in (200, 204):
raise exception.DNSServerError(response.status,
response.reason,
body)
e = boto.jsonresponse.Element()
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e
# Resource Record Sets
def get_all_rrsets(self, hosted_zone_id, type=None,
name=None, identifier=None, maxitems=None):
"""
Retrieve the Resource Record Sets defined for this Hosted Zone.
Returns the raw XML data returned by the Route53 call.
:type hosted_zone_id: str
:param hosted_zone_id: The unique identifier for the Hosted Zone
:type type: str
:param type: The type of resource record set to begin the record
listing from. Valid choices are:
* A
* AAAA
* CNAME
* MX
* NS
* PTR
* SOA
* SPF
* SRV
* TXT
Valid values for weighted resource record sets:
* A
* AAAA
* CNAME
* TXT
Valid values for Zone Apex Aliases:
* A
* AAAA
:type name: str
:param name: The first name in the lexicographic ordering of domain
names to be retrieved
:type identifier: str
:param identifier: In a hosted zone that includes weighted resource
record sets (multiple resource record sets with the same DNS
name and type that are differentiated only by SetIdentifier),
if results were truncated for a given DNS name and type,
the value of SetIdentifier for the next resource record
set that has the current DNS name and type
:type maxitems: int
:param maxitems: The maximum number of records
"""
params = {'type': type, 'name': name,
'identifier': identifier, 'maxitems': maxitems}
uri = '/%s/hostedzone/%s/rrset' % (self.Version, hosted_zone_id)
response = self.make_request('GET', uri, params=params)
body = response.read()
boto.log.debug(body)
if response.status >= 300:
raise exception.DNSServerError(response.status,
response.reason,
body)
rs = ResourceRecordSets(connection=self, hosted_zone_id=hosted_zone_id)
h = handler.XmlHandler(rs, self)
xml.sax.parseString(body, h)
return rs
def change_rrsets(self, hosted_zone_id, xml_body):
"""
Create or change the authoritative DNS information for this
Hosted Zone.
Returns a Python data structure with information about the set of
changes, including the Change ID.
:type hosted_zone_id: str
:param hosted_zone_id: The unique identifier for the Hosted Zone
:type xml_body: str
:param xml_body: The list of changes to be made, defined in the
XML schema defined by the Route53 service.
"""
uri = '/%s/hostedzone/%s/rrset' % (self.Version, hosted_zone_id)
response = self.make_request('POST', uri,
{'Content-Type': 'text/xml'},
xml_body)
body = response.read()
boto.log.debug(body)
if response.status >= 300:
raise exception.DNSServerError(response.status,
response.reason,
body)
e = boto.jsonresponse.Element()
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e
def get_change(self, change_id):
"""
Get information about a proposed set of changes, as submitted
by the change_rrsets method.
Returns a Python data structure with status information about the
changes.
:type change_id: str
:param change_id: The unique identifier for the set of changes.
This ID is returned in the response to the change_rrsets method.
"""
uri = '/%s/change/%s' % (self.Version, change_id)
response = self.make_request('GET', uri)
body = response.read()
boto.log.debug(body)
if response.status >= 300:
raise exception.DNSServerError(response.status,
response.reason,
body)
e = boto.jsonresponse.Element()
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e
def create_zone(self, name, private_zone=False,
vpc_id=None, vpc_region=None):
"""
Create a new Hosted Zone. Returns a Zone object for the newly
created Hosted Zone.
:type name: str
:param name: The name of the domain. This should be a
fully-specified domain, and should end with a final period
as the last label indication. If you omit the final period,
Amazon Route 53 assumes the domain is relative to the root.
This is the name you have registered with your DNS registrar.
It is also the name you will delegate from your registrar to
the Amazon Route 53 delegation servers returned in
response to this request.
:type private_zone: bool
:param private_zone: Set True if creating a private hosted zone.
:type vpc_id: str
:param vpc_id: When creating a private hosted zone, the VPC Id to
associate to is required.
:type vpc_region: str
:param vpc_id: When creating a private hosted zone, the region of
the associated VPC is required.
"""
zone = self.create_hosted_zone(name, private_zone=private_zone,
vpc_id=vpc_id, vpc_region=vpc_region)
return Zone(self, zone['CreateHostedZoneResponse']['HostedZone'])
def get_zone(self, name):
"""
Returns a Zone object for the specified Hosted Zone.
:param name: The name of the domain. This should be a
fully-specified domain, and should end with a final period
as the last label indication.
"""
name = self._make_qualified(name)
for zone in self.get_zones():
if name == zone.name:
return zone
def get_zones(self):
"""
Returns a list of Zone objects, one for each of the Hosted
Zones defined for the AWS account.
:rtype: list
:returns: A list of Zone objects.
"""
zones = self.get_all_hosted_zones()
return [Zone(self, zone) for zone in
zones['ListHostedZonesResponse']['HostedZones']]
def _make_qualified(self, value):
"""
Ensure passed domain names end in a period (.) character.
This will usually make a domain fully qualified.
"""
if type(value) in [list, tuple, set]:
new_list = []
for record in value:
if record and not record[-1] == '.':
new_list.append("%s." % record)
else:
new_list.append(record)
return new_list
else:
value = value.strip()
if value and not value[-1] == '.':
value = "%s." % value
return value
def _retry_handler(self, response, i, next_sleep):
status = None
boto.log.debug("Saw HTTP status: %s" % response.status)
if response.status == 400:
code = response.getheader('Code')
if code:
# This is a case where we need to ignore a 400 error, as
# Route53 returns this. See
# http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DNSLimitations.html
if 'PriorRequestNotComplete' in code:
error = 'PriorRequestNotComplete'
elif 'Throttling' in code:
error = 'Throttling'
else:
return status
msg = "%s, retry attempt %s" % (
error,
i
)
next_sleep = min(random.random() * (2 ** i),
boto.config.get('Boto', 'max_retry_delay', 60))
i += 1
status = (msg, i, next_sleep)
return status
|
alinbalutoiu/tempest
|
refs/heads/master
|
tempest/services/volume/json/availability_zone_client.py
|
8
|
# Copyright 2014 NEC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils as json
from tempest.common import service_client
class BaseVolumeAvailabilityZoneClient(service_client.ServiceClient):
def list_availability_zones(self):
resp, body = self.get('os-availability-zone')
body = json.loads(body)
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, body['availabilityZoneInfo'])
class VolumeAvailabilityZoneClient(BaseVolumeAvailabilityZoneClient):
"""
Volume V1 availability zone client.
"""
|
decvalts/cartopy
|
refs/heads/master
|
lib/cartopy/tests/test_vector_transform.py
|
4
|
# (C) British Crown Copyright 2013 - 2017, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
import cartopy.vector_transform as vec_trans
import cartopy.crs as ccrs
def _sample_plate_carree_coordinates():
x = np.array([-10, 0, 10, -9, 0, 9])
y = np.array([10, 10, 10, 5, 5, 5])
return x, y
def _sample_plate_carree_scalar_field():
return np.array([2, 4, 2, 1.2, 3, 1.2])
def _sample_plate_carree_vector_field():
u = np.array([2, 4, 2, 1.2, 3, 1.2])
v = np.array([5.5, 4, 5.5, 1.2, .3, 1.2])
return u, v
class Test_interpolate_to_grid(object):
@classmethod
def setup_class(cls):
cls.x, cls.y = _sample_plate_carree_coordinates()
cls.s = _sample_plate_carree_scalar_field()
def test_data_extent(self):
# Interpolation to a grid with extents of the input data.
expected_x_grid = np.array([[-10., -5., 0., 5., 10.],
[-10., -5., 0., 5., 10.],
[-10., -5., 0., 5., 10.]])
expected_y_grid = np.array([[5., 5., 5., 5., 5.],
[7.5, 7.5, 7.5, 7.5, 7.5],
[10., 10., 10., 10., 10]])
expected_s_grid = np.array([[np.nan, 2., 3., 2., np.nan],
[np.nan, 2.5, 3.5, 2.5, np.nan],
[2., 3., 4., 3., 2.]])
x_grid, y_grid, s_grid = vec_trans._interpolate_to_grid(
5, 3, self.x, self.y, self.s)
assert_array_equal(x_grid, expected_x_grid)
assert_array_equal(y_grid, expected_y_grid)
assert_array_almost_equal(s_grid, expected_s_grid)
def test_explicit_extent(self):
# Interpolation to a grid with explicit extents.
expected_x_grid = np.array([[-5., 0., 5., 10.],
[-5., 0., 5., 10.]])
expected_y_grid = np.array([[7.5, 7.5, 7.5, 7.5],
[10., 10., 10., 10]])
expected_s_grid = np.array([[2.5, 3.5, 2.5, np.nan],
[3., 4., 3., 2.]])
extent = (-5, 10, 7.5, 10)
x_grid, y_grid, s_grid = vec_trans._interpolate_to_grid(
4, 2, self.x, self.y, self.s, target_extent=extent)
assert_array_equal(x_grid, expected_x_grid)
assert_array_equal(y_grid, expected_y_grid)
assert_array_almost_equal(s_grid, expected_s_grid)
def test_multiple_fields(self):
# Interpolation of multiple fields in one go.
expected_x_grid = np.array([[-10., -5., 0., 5., 10.],
[-10., -5., 0., 5., 10.],
[-10., -5., 0., 5., 10.]])
expected_y_grid = np.array([[5., 5., 5., 5., 5.],
[7.5, 7.5, 7.5, 7.5, 7.5],
[10., 10., 10., 10., 10]])
expected_s_grid = np.array([[np.nan, 2., 3., 2., np.nan],
[np.nan, 2.5, 3.5, 2.5, np.nan],
[2., 3., 4., 3., 2.]])
x_grid, y_grid, s_grid1, s_grid2, s_grid3 = \
vec_trans._interpolate_to_grid(5, 3, self.x, self.y,
self.s, self.s, self.s)
assert_array_equal(x_grid, expected_x_grid)
assert_array_equal(y_grid, expected_y_grid)
assert_array_almost_equal(s_grid1, expected_s_grid)
assert_array_almost_equal(s_grid2, expected_s_grid)
assert_array_almost_equal(s_grid3, expected_s_grid)
class Test_vector_scalar_to_grid(object):
@classmethod
def setup_class(cls):
cls.x, cls.y = _sample_plate_carree_coordinates()
cls.u, cls.v = _sample_plate_carree_vector_field()
cls.s = _sample_plate_carree_scalar_field()
def test_no_transform(self):
# Transform and regrid vector (with no projection transform).
expected_x_grid = np.array([[-10., -5., 0., 5., 10.],
[-10., -5., 0., 5., 10.],
[-10., -5., 0., 5., 10.]])
expected_y_grid = np.array([[5., 5., 5., 5., 5.],
[7.5, 7.5, 7.5, 7.5, 7.5],
[10., 10., 10., 10., 10]])
expected_u_grid = np.array([[np.nan, 2., 3., 2., np.nan],
[np.nan, 2.5, 3.5, 2.5, np.nan],
[2., 3., 4., 3., 2.]])
expected_v_grid = np.array([[np.nan, .8, .3, .8, np.nan],
[np.nan, 2.675, 2.15, 2.675, np.nan],
[5.5, 4.75, 4., 4.75, 5.5]])
src_crs = target_crs = ccrs.PlateCarree()
x_grid, y_grid, u_grid, v_grid = vec_trans.vector_scalar_to_grid(
src_crs, target_crs, (5, 3), self.x, self.y, self.u, self.v)
assert_array_equal(x_grid, expected_x_grid)
assert_array_equal(y_grid, expected_y_grid)
assert_array_almost_equal(u_grid, expected_u_grid)
assert_array_almost_equal(v_grid, expected_v_grid)
def test_with_transform(self):
# Transform and regrid vector.
target_crs = ccrs.PlateCarree()
src_crs = ccrs.NorthPolarStereo()
input_coords = [src_crs.transform_point(xp, yp, target_crs)
for xp, yp in zip(self.x, self.y)]
x_nps = np.array([ic[0] for ic in input_coords])
y_nps = np.array([ic[1] for ic in input_coords])
u_nps, v_nps = src_crs.transform_vectors(target_crs, self.x, self.y,
self.u, self.v)
expected_x_grid = np.array([[-10., -5., 0., 5., 10.],
[-10., -5., 0., 5., 10.],
[-10., -5., 0., 5., 10.]])
expected_y_grid = np.array([[5., 5., 5., 5., 5.],
[7.5, 7.5, 7.5, 7.5, 7.5],
[10., 10., 10., 10., 10]])
expected_u_grid = np.array([[np.nan, 2., 3., 2., np.nan],
[np.nan, 2.5, 3.5, 2.5, np.nan],
[2., 3., 4., 3., 2.]])
expected_v_grid = np.array([[np.nan, .8, .3, .8, np.nan],
[np.nan, 2.675, 2.15, 2.675, np.nan],
[5.5, 4.75, 4., 4.75, 5.5]])
x_grid, y_grid, u_grid, v_grid = vec_trans.vector_scalar_to_grid(
src_crs, target_crs, (5, 3), x_nps, y_nps, u_nps, v_nps)
assert_array_almost_equal(x_grid, expected_x_grid)
assert_array_almost_equal(y_grid, expected_y_grid)
# Vector transforms are somewhat approximate, so we are more lenient
# with the returned values since we have transformed twice.
assert_array_almost_equal(u_grid, expected_u_grid, decimal=4)
assert_array_almost_equal(v_grid, expected_v_grid, decimal=4)
def test_with_scalar_field(self):
# Transform and regrid vector (with no projection transform) with an
# additional scalar field.
expected_x_grid = np.array([[-10., -5., 0., 5., 10.],
[-10., -5., 0., 5., 10.],
[-10., -5., 0., 5., 10.]])
expected_y_grid = np.array([[5., 5., 5., 5., 5.],
[7.5, 7.5, 7.5, 7.5, 7.5],
[10., 10., 10., 10., 10]])
expected_u_grid = np.array([[np.nan, 2., 3., 2., np.nan],
[np.nan, 2.5, 3.5, 2.5, np.nan],
[2., 3., 4., 3., 2.]])
expected_v_grid = np.array([[np.nan, .8, .3, .8, np.nan],
[np.nan, 2.675, 2.15, 2.675, np.nan],
[5.5, 4.75, 4., 4.75, 5.5]])
expected_s_grid = np.array([[np.nan, 2., 3., 2., np.nan],
[np.nan, 2.5, 3.5, 2.5, np.nan],
[2., 3., 4., 3., 2.]])
src_crs = target_crs = ccrs.PlateCarree()
x_grid, y_grid, u_grid, v_grid, s_grid = \
vec_trans.vector_scalar_to_grid(src_crs, target_crs, (5, 3),
self.x, self.y,
self.u, self.v, self.s)
assert_array_equal(x_grid, expected_x_grid)
assert_array_equal(y_grid, expected_y_grid)
assert_array_almost_equal(u_grid, expected_u_grid)
assert_array_almost_equal(v_grid, expected_v_grid)
assert_array_almost_equal(s_grid, expected_s_grid)
|
sdimoudi/bart
|
refs/heads/master
|
tests/pyBART_run_test.py
|
4
|
import sys
import traceback
import os
try:
with open(sys.argv[1], 'r') as fd:
for line in fd.readlines():
exec(line)
except:
exc_info = sys.exc_info()
traceback.print_exception(*exc_info)
print('Exception occurred while executing line: ', line)
sys.exit(1)
|
kalpana-org/kalpana-logger
|
refs/heads/master
|
showstats.py
|
1
|
#!/usr/bin/env python3
from datetime import datetime
from operator import itemgetter
import os
import os.path
from libsyntyche.common import read_json, read_file, local_path, write_file
def formatted_date(datestring):
return datetime.strptime(datestring, '%Y%m%d').strftime('%d %b %Y')
def format_data(data):
files = ['{}: {}'.format(name, wc)
for name, wc in sorted(data.items(), key=itemgetter(1), reverse=True)
if name != 'total']
return '<br>'.join(files)
def generate_stats_file(silent):
config = read_json(os.path.expanduser('~/.config/kalpana/kalpana-logger.conf'))
logpath = os.path.expanduser(os.path.join(config['rootdir'], config['logdir']))
logfiles = [os.path.join(logpath,x) for x in os.listdir(logpath)
if x not in ('index.json', 'stats.html')]
days = {}
for log in logfiles:
lines = read_file(log).splitlines()
offset = int(lines.pop(0))
pairs = [(x[:10].replace('-',''), int(x.split(';')[1])) for x in lines]
for date, wordcount in pairs:
name = os.path.splitext(os.path.basename(log))[0]
if date not in days:
days[date] = {'total': 0, name: 0}
elif name not in days[date]:
days[date][name] = 0
days[date]['total'] += wordcount - offset
days[date][name] += wordcount - offset
offset = wordcount
html = read_file(local_path('statstemplate.html'))
entry = '<div class="date">{date} - {words}</div><div class="files">{files}</div>'
out = [entry.format(date=formatted_date(day), words=data['total'], files=format_data(data))
for day, data in sorted(days.items(), key=itemgetter(0), reverse=True)]
write_file(os.path.join(logpath, 'stats.html'), html.format('\n'.join(out)))
# print(*days.items(), sep='\n')
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--silent', action='store_true',
help="Don't open a browser.")
args = parser.parse_args()
generate_stats_file(args.silent)
if __name__ == '__main__':
main()
|
lupyuen/RaspberryPiImage
|
refs/heads/master
|
home/pi/GrovePi/Software/Python/others/temboo/Library/Google/Directions/__init__.py
|
5
|
from temboo.Library.Google.Directions.GetBicyclingDirections import GetBicyclingDirections, GetBicyclingDirectionsInputSet, GetBicyclingDirectionsResultSet, GetBicyclingDirectionsChoreographyExecution
from temboo.Library.Google.Directions.GetDrivingDirections import GetDrivingDirections, GetDrivingDirectionsInputSet, GetDrivingDirectionsResultSet, GetDrivingDirectionsChoreographyExecution
from temboo.Library.Google.Directions.GetWalkingDirections import GetWalkingDirections, GetWalkingDirectionsInputSet, GetWalkingDirectionsResultSet, GetWalkingDirectionsChoreographyExecution
|
neiudemo1/django
|
refs/heads/master
|
tests/annotations/tests.py
|
194
|
from __future__ import unicode_literals
import datetime
from decimal import Decimal
from django.core.exceptions import FieldDoesNotExist, FieldError
from django.db.models import (
F, BooleanField, CharField, Count, DateTimeField, ExpressionWrapper, Func,
IntegerField, Sum, Value,
)
from django.db.models.functions import Lower
from django.test import TestCase, skipUnlessDBFeature
from django.utils import six
from .models import (
Author, Book, Company, DepartmentStore, Employee, Publisher, Store, Ticket,
)
def cxOracle_py3_bug(func):
"""
There's a bug in Django/cx_Oracle with respect to string handling under
Python 3 (essentially, they treat Python 3 strings as Python 2 strings
rather than unicode). This makes some tests here fail under Python 3, so
we mark them as expected failures until someone fixes them in #23843.
"""
from unittest import expectedFailure
from django.db import connection
return expectedFailure(func) if connection.vendor == 'oracle' and six.PY3 else func
class NonAggregateAnnotationTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.a1 = Author.objects.create(name='Adrian Holovaty', age=34)
cls.a2 = Author.objects.create(name='Jacob Kaplan-Moss', age=35)
cls.a3 = Author.objects.create(name='Brad Dayley', age=45)
cls.a4 = Author.objects.create(name='James Bennett', age=29)
cls.a5 = Author.objects.create(name='Jeffrey Forcier', age=37)
cls.a6 = Author.objects.create(name='Paul Bissex', age=29)
cls.a7 = Author.objects.create(name='Wesley J. Chun', age=25)
cls.a8 = Author.objects.create(name='Peter Norvig', age=57)
cls.a9 = Author.objects.create(name='Stuart Russell', age=46)
cls.a1.friends.add(cls.a2, cls.a4)
cls.a2.friends.add(cls.a1, cls.a7)
cls.a4.friends.add(cls.a1)
cls.a5.friends.add(cls.a6, cls.a7)
cls.a6.friends.add(cls.a5, cls.a7)
cls.a7.friends.add(cls.a2, cls.a5, cls.a6)
cls.a8.friends.add(cls.a9)
cls.a9.friends.add(cls.a8)
cls.p1 = Publisher.objects.create(name='Apress', num_awards=3)
cls.p2 = Publisher.objects.create(name='Sams', num_awards=1)
cls.p3 = Publisher.objects.create(name='Prentice Hall', num_awards=7)
cls.p4 = Publisher.objects.create(name='Morgan Kaufmann', num_awards=9)
cls.p5 = Publisher.objects.create(name="Jonno's House of Books", num_awards=0)
cls.b1 = Book.objects.create(
isbn='159059725', name='The Definitive Guide to Django: Web Development Done Right',
pages=447, rating=4.5, price=Decimal('30.00'), contact=cls.a1, publisher=cls.p1,
pubdate=datetime.date(2007, 12, 6)
)
cls.b2 = Book.objects.create(
isbn='067232959', name='Sams Teach Yourself Django in 24 Hours',
pages=528, rating=3.0, price=Decimal('23.09'), contact=cls.a3, publisher=cls.p2,
pubdate=datetime.date(2008, 3, 3)
)
cls.b3 = Book.objects.create(
isbn='159059996', name='Practical Django Projects',
pages=300, rating=4.0, price=Decimal('29.69'), contact=cls.a4, publisher=cls.p1,
pubdate=datetime.date(2008, 6, 23)
)
cls.b4 = Book.objects.create(
isbn='013235613', name='Python Web Development with Django',
pages=350, rating=4.0, price=Decimal('29.69'), contact=cls.a5, publisher=cls.p3,
pubdate=datetime.date(2008, 11, 3)
)
cls.b5 = Book.objects.create(
isbn='013790395', name='Artificial Intelligence: A Modern Approach',
pages=1132, rating=4.0, price=Decimal('82.80'), contact=cls.a8, publisher=cls.p3,
pubdate=datetime.date(1995, 1, 15)
)
cls.b6 = Book.objects.create(
isbn='155860191', name='Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp',
pages=946, rating=5.0, price=Decimal('75.00'), contact=cls.a8, publisher=cls.p4,
pubdate=datetime.date(1991, 10, 15)
)
cls.b1.authors.add(cls.a1, cls.a2)
cls.b2.authors.add(cls.a3)
cls.b3.authors.add(cls.a4)
cls.b4.authors.add(cls.a5, cls.a6, cls.a7)
cls.b5.authors.add(cls.a8, cls.a9)
cls.b6.authors.add(cls.a8)
s1 = Store.objects.create(
name='Amazon.com',
original_opening=datetime.datetime(1994, 4, 23, 9, 17, 42),
friday_night_closing=datetime.time(23, 59, 59)
)
s2 = Store.objects.create(
name='Books.com',
original_opening=datetime.datetime(2001, 3, 15, 11, 23, 37),
friday_night_closing=datetime.time(23, 59, 59)
)
s3 = Store.objects.create(
name="Mamma and Pappa's Books",
original_opening=datetime.datetime(1945, 4, 25, 16, 24, 14),
friday_night_closing=datetime.time(21, 30)
)
s1.books.add(cls.b1, cls.b2, cls.b3, cls.b4, cls.b5, cls.b6)
s2.books.add(cls.b1, cls.b3, cls.b5, cls.b6)
s3.books.add(cls.b3, cls.b4, cls.b6)
def test_basic_annotation(self):
books = Book.objects.annotate(
is_book=Value(1, output_field=IntegerField()))
for book in books:
self.assertEqual(book.is_book, 1)
def test_basic_f_annotation(self):
books = Book.objects.annotate(another_rating=F('rating'))
for book in books:
self.assertEqual(book.another_rating, book.rating)
def test_joined_annotation(self):
books = Book.objects.select_related('publisher').annotate(
num_awards=F('publisher__num_awards'))
for book in books:
self.assertEqual(book.num_awards, book.publisher.num_awards)
def test_mixed_type_annotation_date_interval(self):
active = datetime.datetime(2015, 3, 20, 14, 0, 0)
duration = datetime.timedelta(hours=1)
expires = datetime.datetime(2015, 3, 20, 14, 0, 0) + duration
Ticket.objects.create(active_at=active, duration=duration)
t = Ticket.objects.annotate(
expires=ExpressionWrapper(F('active_at') + F('duration'), output_field=DateTimeField())
).first()
self.assertEqual(t.expires, expires)
def test_mixed_type_annotation_numbers(self):
test = self.b1
b = Book.objects.annotate(
combined=ExpressionWrapper(F('pages') + F('rating'), output_field=IntegerField())
).get(isbn=test.isbn)
combined = int(test.pages + test.rating)
self.assertEqual(b.combined, combined)
def test_annotate_with_aggregation(self):
books = Book.objects.annotate(
is_book=Value(1, output_field=IntegerField()),
rating_count=Count('rating'))
for book in books:
self.assertEqual(book.is_book, 1)
self.assertEqual(book.rating_count, 1)
def test_aggregate_over_annotation(self):
agg = Author.objects.annotate(other_age=F('age')).aggregate(otherage_sum=Sum('other_age'))
other_agg = Author.objects.aggregate(age_sum=Sum('age'))
self.assertEqual(agg['otherage_sum'], other_agg['age_sum'])
@skipUnlessDBFeature('can_distinct_on_fields')
def test_distinct_on_with_annotation(self):
store = Store.objects.create(
name='test store',
original_opening=datetime.datetime.now(),
friday_night_closing=datetime.time(21, 00, 00),
)
names = [
'Theodore Roosevelt',
'Eleanor Roosevelt',
'Franklin Roosevelt',
'Ned Stark',
'Catelyn Stark',
]
for name in names:
Employee.objects.create(
store=store,
first_name=name.split()[0],
last_name=name.split()[1],
age=30, salary=2000,
)
people = Employee.objects.annotate(
name_lower=Lower('last_name'),
).distinct('name_lower')
self.assertEqual(set(p.last_name for p in people), {'Stark', 'Roosevelt'})
self.assertEqual(len(people), 2)
people2 = Employee.objects.annotate(
test_alias=F('store__name'),
).distinct('test_alias')
self.assertEqual(len(people2), 1)
def test_filter_annotation(self):
books = Book.objects.annotate(
is_book=Value(1, output_field=IntegerField())
).filter(is_book=1)
for book in books:
self.assertEqual(book.is_book, 1)
def test_filter_annotation_with_f(self):
books = Book.objects.annotate(
other_rating=F('rating')
).filter(other_rating=3.5)
for book in books:
self.assertEqual(book.other_rating, 3.5)
def test_filter_annotation_with_double_f(self):
books = Book.objects.annotate(
other_rating=F('rating')
).filter(other_rating=F('rating'))
for book in books:
self.assertEqual(book.other_rating, book.rating)
def test_filter_agg_with_double_f(self):
books = Book.objects.annotate(
sum_rating=Sum('rating')
).filter(sum_rating=F('sum_rating'))
for book in books:
self.assertEqual(book.sum_rating, book.rating)
def test_filter_wrong_annotation(self):
with six.assertRaisesRegex(self, FieldError, "Cannot resolve keyword .*"):
list(Book.objects.annotate(
sum_rating=Sum('rating')
).filter(sum_rating=F('nope')))
def test_combined_annotation_commutative(self):
book1 = Book.objects.annotate(adjusted_rating=F('rating') + 2).get(pk=self.b1.pk)
book2 = Book.objects.annotate(adjusted_rating=2 + F('rating')).get(pk=self.b1.pk)
self.assertEqual(book1.adjusted_rating, book2.adjusted_rating)
book1 = Book.objects.annotate(adjusted_rating=F('rating') + None).get(pk=self.b1.pk)
book2 = Book.objects.annotate(adjusted_rating=None + F('rating')).get(pk=self.b1.pk)
self.assertEqual(book1.adjusted_rating, book2.adjusted_rating)
def test_update_with_annotation(self):
book_preupdate = Book.objects.get(pk=self.b2.pk)
Book.objects.annotate(other_rating=F('rating') - 1).update(rating=F('other_rating'))
book_postupdate = Book.objects.get(pk=self.b2.pk)
self.assertEqual(book_preupdate.rating - 1, book_postupdate.rating)
def test_annotation_with_m2m(self):
books = Book.objects.annotate(author_age=F('authors__age')).filter(pk=self.b1.pk).order_by('author_age')
self.assertEqual(books[0].author_age, 34)
self.assertEqual(books[1].author_age, 35)
def test_annotation_reverse_m2m(self):
books = Book.objects.annotate(
store_name=F('store__name')).filter(
name='Practical Django Projects').order_by(
'store_name')
self.assertQuerysetEqual(
books, [
'Amazon.com',
'Books.com',
'Mamma and Pappa\'s Books'
],
lambda b: b.store_name
)
def test_values_annotation(self):
"""
Annotations can reference fields in a values clause,
and contribute to an existing values clause.
"""
# annotate references a field in values()
qs = Book.objects.values('rating').annotate(other_rating=F('rating') - 1)
book = qs.get(pk=self.b1.pk)
self.assertEqual(book['rating'] - 1, book['other_rating'])
# filter refs the annotated value
book = qs.get(other_rating=4)
self.assertEqual(book['other_rating'], 4)
# can annotate an existing values with a new field
book = qs.annotate(other_isbn=F('isbn')).get(other_rating=4)
self.assertEqual(book['other_rating'], 4)
self.assertEqual(book['other_isbn'], '155860191')
def test_defer_annotation(self):
"""
Deferred attributes can be referenced by an annotation,
but they are not themselves deferred, and cannot be deferred.
"""
qs = Book.objects.defer('rating').annotate(other_rating=F('rating') - 1)
with self.assertNumQueries(2):
book = qs.get(other_rating=4)
self.assertEqual(book.rating, 5)
self.assertEqual(book.other_rating, 4)
with six.assertRaisesRegex(self, FieldDoesNotExist, "\w has no field named u?'other_rating'"):
book = qs.defer('other_rating').get(other_rating=4)
def test_mti_annotations(self):
"""
Fields on an inherited model can be referenced by an
annotated field.
"""
d = DepartmentStore.objects.create(
name='Angus & Robinson',
original_opening=datetime.date(2014, 3, 8),
friday_night_closing=datetime.time(21, 00, 00),
chain='Westfield'
)
books = Book.objects.filter(rating__gt=4)
for b in books:
d.books.add(b)
qs = DepartmentStore.objects.annotate(
other_name=F('name'),
other_chain=F('chain'),
is_open=Value(True, BooleanField()),
book_isbn=F('books__isbn')
).order_by('book_isbn').filter(chain='Westfield')
self.assertQuerysetEqual(
qs, [
('Angus & Robinson', 'Westfield', True, '155860191'),
('Angus & Robinson', 'Westfield', True, '159059725')
],
lambda d: (d.other_name, d.other_chain, d.is_open, d.book_isbn)
)
def test_null_annotation(self):
"""
Test that annotating None onto a model round-trips
"""
book = Book.objects.annotate(no_value=Value(None, output_field=IntegerField())).first()
self.assertIsNone(book.no_value)
def test_order_by_annotation(self):
authors = Author.objects.annotate(other_age=F('age')).order_by('other_age')
self.assertQuerysetEqual(
authors, [
25, 29, 29, 34, 35, 37, 45, 46, 57,
],
lambda a: a.other_age
)
def test_order_by_aggregate(self):
authors = Author.objects.values('age').annotate(age_count=Count('age')).order_by('age_count', 'age')
self.assertQuerysetEqual(
authors, [
(25, 1), (34, 1), (35, 1), (37, 1), (45, 1), (46, 1), (57, 1), (29, 2),
],
lambda a: (a['age'], a['age_count'])
)
def test_annotate_exists(self):
authors = Author.objects.annotate(c=Count('id')).filter(c__gt=1)
self.assertFalse(authors.exists())
def test_column_field_ordering(self):
"""
Test that columns are aligned in the correct order for
resolve_columns. This test will fail on mysql if column
ordering is out. Column fields should be aligned as:
1. extra_select
2. model_fields
3. annotation_fields
4. model_related_fields
"""
store = Store.objects.first()
Employee.objects.create(id=1, first_name='Max', manager=True, last_name='Paine',
store=store, age=23, salary=Decimal(50000.00))
Employee.objects.create(id=2, first_name='Buffy', manager=False, last_name='Summers',
store=store, age=18, salary=Decimal(40000.00))
qs = Employee.objects.extra(
select={'random_value': '42'}
).select_related('store').annotate(
annotated_value=Value(17, output_field=IntegerField())
)
rows = [
(1, 'Max', True, 42, 'Paine', 23, Decimal(50000.00), store.name, 17),
(2, 'Buffy', False, 42, 'Summers', 18, Decimal(40000.00), store.name, 17)
]
self.assertQuerysetEqual(
qs.order_by('id'), rows,
lambda e: (
e.id, e.first_name, e.manager, e.random_value, e.last_name, e.age,
e.salary, e.store.name, e.annotated_value))
def test_column_field_ordering_with_deferred(self):
store = Store.objects.first()
Employee.objects.create(id=1, first_name='Max', manager=True, last_name='Paine',
store=store, age=23, salary=Decimal(50000.00))
Employee.objects.create(id=2, first_name='Buffy', manager=False, last_name='Summers',
store=store, age=18, salary=Decimal(40000.00))
qs = Employee.objects.extra(
select={'random_value': '42'}
).select_related('store').annotate(
annotated_value=Value(17, output_field=IntegerField())
)
rows = [
(1, 'Max', True, 42, 'Paine', 23, Decimal(50000.00), store.name, 17),
(2, 'Buffy', False, 42, 'Summers', 18, Decimal(40000.00), store.name, 17)
]
# and we respect deferred columns!
self.assertQuerysetEqual(
qs.defer('age').order_by('id'), rows,
lambda e: (
e.id, e.first_name, e.manager, e.random_value, e.last_name, e.age,
e.salary, e.store.name, e.annotated_value))
@cxOracle_py3_bug
def test_custom_functions(self):
Company(name='Apple', motto=None, ticker_name='APPL', description='Beautiful Devices').save()
Company(name='Django Software Foundation', motto=None, ticker_name=None, description=None).save()
Company(name='Google', motto='Do No Evil', ticker_name='GOOG', description='Internet Company').save()
Company(name='Yahoo', motto=None, ticker_name=None, description='Internet Company').save()
qs = Company.objects.annotate(
tagline=Func(
F('motto'),
F('ticker_name'),
F('description'),
Value('No Tag'),
function='COALESCE')
).order_by('name')
self.assertQuerysetEqual(
qs, [
('Apple', 'APPL'),
('Django Software Foundation', 'No Tag'),
('Google', 'Do No Evil'),
('Yahoo', 'Internet Company')
],
lambda c: (c.name, c.tagline)
)
@cxOracle_py3_bug
def test_custom_functions_can_ref_other_functions(self):
Company(name='Apple', motto=None, ticker_name='APPL', description='Beautiful Devices').save()
Company(name='Django Software Foundation', motto=None, ticker_name=None, description=None).save()
Company(name='Google', motto='Do No Evil', ticker_name='GOOG', description='Internet Company').save()
Company(name='Yahoo', motto=None, ticker_name=None, description='Internet Company').save()
class Lower(Func):
function = 'LOWER'
qs = Company.objects.annotate(
tagline=Func(
F('motto'),
F('ticker_name'),
F('description'),
Value('No Tag'),
function='COALESCE')
).annotate(
tagline_lower=Lower(F('tagline'), output_field=CharField())
).order_by('name')
# LOWER function supported by:
# oracle, postgres, mysql, sqlite, sqlserver
self.assertQuerysetEqual(
qs, [
('Apple', 'APPL'.lower()),
('Django Software Foundation', 'No Tag'.lower()),
('Google', 'Do No Evil'.lower()),
('Yahoo', 'Internet Company'.lower())
],
lambda c: (c.name, c.tagline_lower)
)
|
rtfd/sphinx-autoapi
|
refs/heads/master
|
autoapi/mappers/python/__init__.py
|
1
|
from .mapper import PythonSphinxMapper
from .objects import (
PythonClass,
PythonFunction,
PythonModule,
PythonMethod,
PythonPackage,
PythonAttribute,
PythonData,
PythonException,
)
|
kaber/netlink-mmap
|
refs/heads/master
|
Documentation/target/tcm_mod_builder.py
|
4981
|
#!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: nab@kernel.org
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
buf += " u64 nport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* SCSI protocol the lport is providing */\n"
buf += " u8 lport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
buf += " u64 iport_wwpn;\n"
buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* ASCII formatted InitiatorName */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n"
buf += " u32 nexus_depth;\n\n"
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!se_nacl_new)\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
buf += " /*\n"
buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
buf += " * when converting a NodeACL from demo mode -> explict\n"
buf += " */\n"
buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
buf += " name, nexus_depth);\n"
buf += " if (IS_ERR(se_nacl)) {\n"
buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
buf += " return se_nacl;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
buf += " */\n"
buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return se_nacl;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!tpg) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
buf += " &tpg->se_tpg, (void *)tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!" + fabric_mod_port + ") {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .get_fabric_sense_len = " + fabric_mod_name + "_get_fabric_sense_len,\n"
buf += " .set_fabric_sense_len = " + fabric_mod_name + "_set_fabric_sense_len,\n"
buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += " .fabric_post_link = NULL,\n"
buf += " .fabric_pre_unlink = NULL,\n"
buf += " .fabric_make_np = NULL,\n"
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
buf += "};\n\n"
buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
buf += "{\n"
buf += " struct target_fabric_configfs *fabric;\n"
buf += " int ret;\n\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += " /*\n"
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
buf += " if (IS_ERR(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return PTR_ERR(fabric);\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
buf += " */\n"
buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
buf += " /*\n"
buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
buf += " */\n"
buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
buf += " /*\n"
buf += " * Register the fabric for use within TCM\n"
buf += " */\n"
buf += " ret = target_fabric_configfs_register(fabric);\n"
buf += " if (ret < 0) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n"
buf += " return ret;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup our local pointer to *fabric\n"
buf += " */\n"
buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "{\n"
buf += " if (!" + fabric_mod_name + "_fabric_configfs)\n"
buf += " return;\n\n"
buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " int ret;\n\n"
buf += " ret = " + fabric_mod_name + "_register_configfs();\n"
buf += " if (ret < 0)\n"
buf += " return ret;\n\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " " + fabric_mod_name + "_deregister_configfs();\n"
buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi.h>\n"
buf += "#include <scsi/scsi_host.h>\n"
buf += "#include <scsi/scsi_device.h>\n"
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_configfs.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name[4:] + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_fabric_proto_ident', fo):
buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " u8 proto_id;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return proto_id;\n"
buf += "}\n\n"
bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('get_default_depth', fo):
buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
if re.search('get_pr_transport_id\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code,\n"
buf += " unsigned char *buf)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *, unsigned char *);\n"
if re.search('get_pr_transport_id_len\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *);\n"
if re.search('parse_pr_out_transport_id\)\(', fo):
buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " const char *buf,\n"
buf += " u32 *out_tid_len,\n"
buf += " char **port_nexus_ptr)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " char *tid = NULL;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
buf += " }\n\n"
buf += " return tid;\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
bufi += " const char *, u32 *, char **);\n"
if re.search('alloc_fabric_acl\)\(', fo):
buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!nacl) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
buf += " return &nacl->se_node_acl;\n"
buf += "}\n\n"
bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
if re.search('release_fabric_acl\)\(', fo):
buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
bufi += " struct se_node_acl *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('\*release_cmd\)\(', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('stop_session\)\(', fo):
buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
if re.search('fall_back_to_erl0\)\(', fo):
buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
if re.search('sess_logged_in\)\(', fo):
buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_task_tag\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('get_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void);\n"
if re.search('set_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *, u32);\n"
if re.search('is_state_remove\)\(', fo):
buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
|
jacknjzhou/neutron
|
refs/heads/master
|
neutron/tests/fullstack/resources/process.py
|
4
|
# Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from distutils import spawn
import os
import fixtures
from neutronclient.common import exceptions as nc_exc
from neutronclient.v2_0 import client
from oslo_log import log as logging
from neutron.agent.linux import async_process
from neutron.agent.linux import utils
from neutron.common import utils as common_utils
from neutron.tests import base
from neutron.tests.common import net_helpers
LOG = logging.getLogger(__name__)
# This should correspond the directory from which infra retrieves log files
DEFAULT_LOG_DIR = '/tmp/fullstack-logs/'
class ProcessFixture(fixtures.Fixture):
def __init__(self, test_name, process_name, exec_name, config_filenames):
super(ProcessFixture, self).__init__()
self.test_name = test_name
self.process_name = process_name
self.exec_name = exec_name
self.config_filenames = config_filenames
self.process = None
def _setUp(self):
self.start()
self.addCleanup(self.stop)
def start(self):
log_dir = os.path.join(DEFAULT_LOG_DIR, self.test_name)
common_utils.ensure_dir(log_dir)
timestamp = datetime.datetime.now().strftime("%Y-%m-%d--%H-%M-%S-%f")
log_file = "%s--%s.log" % (self.process_name, timestamp)
cmd = [spawn.find_executable(self.exec_name),
'--log-dir', log_dir,
'--log-file', log_file]
for filename in self.config_filenames:
cmd += ['--config-file', filename]
self.process = async_process.AsyncProcess(cmd)
self.process.start(block=True)
def stop(self):
self.process.stop(block=True)
class RabbitmqEnvironmentFixture(fixtures.Fixture):
def _setUp(self):
self.user = base.get_rand_name(prefix='user')
self.password = base.get_rand_name(prefix='pass')
self.vhost = base.get_rand_name(prefix='vhost')
self._execute('add_user', self.user, self.password)
self.addCleanup(self._execute, 'delete_user', self.user)
self._execute('add_vhost', self.vhost)
self.addCleanup(self._execute, 'delete_vhost', self.vhost)
self._execute('set_permissions', '-p', self.vhost, self.user,
'.*', '.*', '.*')
def _execute(self, *args):
cmd = ['rabbitmqctl']
cmd.extend(args)
utils.execute(cmd, run_as_root=True)
class NeutronServerFixture(fixtures.Fixture):
NEUTRON_SERVER = "neutron-server"
def __init__(self, test_name, neutron_cfg_fixture, plugin_cfg_fixture):
self.test_name = test_name
self.neutron_cfg_fixture = neutron_cfg_fixture
self.plugin_cfg_fixture = plugin_cfg_fixture
def _setUp(self):
config_filenames = [self.neutron_cfg_fixture.filename,
self.plugin_cfg_fixture.filename]
self.process_fixture = self.useFixture(ProcessFixture(
test_name=self.test_name,
process_name=self.NEUTRON_SERVER,
exec_name=self.NEUTRON_SERVER,
config_filenames=config_filenames))
utils.wait_until_true(self.server_is_live)
def server_is_live(self):
try:
self.client.list_networks()
return True
except nc_exc.NeutronClientException:
return False
@property
def client(self):
url = ("http://127.0.0.1:%s" %
self.neutron_cfg_fixture.config.DEFAULT.bind_port)
return client.Client(auth_strategy="noauth", endpoint_url=url)
class OVSAgentFixture(fixtures.Fixture):
NEUTRON_OVS_AGENT = "neutron-openvswitch-agent"
def __init__(self, test_name, neutron_cfg_fixture, agent_cfg_fixture):
self.test_name = test_name
self.neutron_cfg_fixture = neutron_cfg_fixture
self.neutron_config = self.neutron_cfg_fixture.config
self.agent_cfg_fixture = agent_cfg_fixture
self.agent_config = agent_cfg_fixture.config
def _setUp(self):
self.br_int = self.useFixture(
net_helpers.OVSBridgeFixture(
self.agent_cfg_fixture.get_br_int_name())).bridge
config_filenames = [self.neutron_cfg_fixture.filename,
self.agent_cfg_fixture.filename]
self.process_fixture = self.useFixture(ProcessFixture(
test_name=self.test_name,
process_name=self.NEUTRON_OVS_AGENT,
exec_name=self.NEUTRON_OVS_AGENT,
config_filenames=config_filenames))
class L3AgentFixture(fixtures.Fixture):
NEUTRON_L3_AGENT = "neutron-l3-agent"
def __init__(self, test_name, neutron_cfg_fixture, l3_agent_cfg_fixture):
super(L3AgentFixture, self).__init__()
self.test_name = test_name
self.neutron_cfg_fixture = neutron_cfg_fixture
self.l3_agent_cfg_fixture = l3_agent_cfg_fixture
def _setUp(self):
self.plugin_config = self.l3_agent_cfg_fixture.config
config_filenames = [self.neutron_cfg_fixture.filename,
self.l3_agent_cfg_fixture.filename]
self.process_fixture = self.useFixture(ProcessFixture(
test_name=self.test_name,
process_name=self.NEUTRON_L3_AGENT,
exec_name=spawn.find_executable(
'l3_agent.py',
path=os.path.join(base.ROOTDIR, 'common', 'agents')),
config_filenames=config_filenames))
def get_namespace_suffix(self):
return self.plugin_config.DEFAULT.test_namespace_suffix
|
tedder/ansible
|
refs/heads/devel
|
test/units/modules/net_tools/nios/test_nios_mx_record.py
|
68
|
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.modules.net_tools.nios import nios_mx_record
from ansible.module_utils.net_tools.nios import api
from units.compat.mock import patch, MagicMock, Mock
from .test_nios_module import TestNiosModule, load_fixture
class TestNiosMXRecordModule(TestNiosModule):
module = nios_mx_record
def setUp(self):
super(TestNiosMXRecordModule, self).setUp()
self.module = MagicMock(name='ansible.modules.net_tools.nios.nios_mx_record.WapiModule')
self.module.check_mode = False
self.module.params = {'provider': None}
self.mock_wapi = patch('ansible.modules.net_tools.nios.nios_mx_record.WapiModule')
self.exec_command = self.mock_wapi.start()
self.mock_wapi_run = patch('ansible.modules.net_tools.nios.nios_mx_record.WapiModule.run')
self.mock_wapi_run.start()
self.load_config = self.mock_wapi_run.start()
def tearDown(self):
super(TestNiosMXRecordModule, self).tearDown()
self.mock_wapi.stop()
self.mock_wapi_run.stop()
def _get_wapi(self, test_object):
wapi = api.WapiModule(self.module)
wapi.get_object = Mock(name='get_object', return_value=test_object)
wapi.create_object = Mock(name='create_object')
wapi.update_object = Mock(name='update_object')
wapi.delete_object = Mock(name='delete_object')
return wapi
def load_fixtures(self, commands=None):
self.exec_command.return_value = (0, load_fixture('nios_result.txt').strip(), None)
self.load_config.return_value = dict(diff=None, session='session')
def test_nios_mx_record_create(self):
self.module.params = {'provider': None, 'state': 'present', 'name': 'ansible.com',
'mx': 'mailhost.ansible.com', 'preference': 0, 'comment': None, 'extattrs': None}
test_object = None
test_spec = {
"name": {"ib_req": True},
"mx": {"ib_req": True},
"preference": {"ib_req": True},
"comment": {},
"extattrs": {}
}
wapi = self._get_wapi(test_object)
print("WAPI: ", wapi)
res = wapi.run('testobject', test_spec)
self.assertTrue(res['changed'])
wapi.create_object.assert_called_once_with('testobject', {'name': self.module._check_type_dict().__getitem__(),
'mx': 'mailhost.ansible.com', 'preference': 0})
def test_nios_mx_record_update_comment(self):
self.module.params = {'provider': None, 'state': 'present', 'name': 'ansible.com', 'mx': 'mailhost.ansible.com',
'preference': 0, 'comment': 'updated comment', 'extattrs': None}
test_object = [
{
"comment": "test comment",
"_ref": "mxrecord/ZG5zLm5ldHdvcmtfdmlldyQw:default/true",
"name": "ansible.com",
"mx": "mailhost.ansible.com",
"preference": 0,
"extattrs": {}
}
]
test_spec = {
"name": {"ib_req": True},
"mx": {"ib_req": True},
"preference": {"ib_req": True},
"comment": {},
"extattrs": {}
}
wapi = self._get_wapi(test_object)
res = wapi.run('testobject', test_spec)
self.assertTrue(res['changed'])
def test_nios_mx_record_remove(self):
self.module.params = {'provider': None, 'state': 'absent', 'name': 'ansible.com', 'mx': 'mailhost.ansible.com',
'preference': 0, 'comment': None, 'extattrs': None}
ref = "mxrecord/ZG5zLm5ldHdvcmtfdmlldyQw:default/false"
test_object = [{
"comment": "test comment",
"_ref": ref,
"name": "ansible.com",
"mx": "mailhost.ansible.com",
"extattrs": {'Site': {'value': 'test'}}
}]
test_spec = {
"name": {"ib_req": True},
"mx": {"ib_req": True},
"preference": {"ib_req": True},
"comment": {},
"extattrs": {}
}
wapi = self._get_wapi(test_object)
res = wapi.run('testobject', test_spec)
self.assertTrue(res['changed'])
wapi.delete_object.assert_called_once_with(ref)
|
lnielsen/invenio
|
refs/heads/pu
|
invenio/modules/formatter/models.py
|
1
|
# -*- coding: utf-8 -*-
#
## This file is part of Invenio.
## Copyright (C) 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
invenio.modules.formatter.models
-----------------------------------
Database access related functions for Formatter engine and
administration pages.
"""
from invenio.ext.sqlalchemy import db
from invenio.modules.records.models import Record as Bibrec
class Format(db.Model):
"""Represents a Format record."""
__tablename__ = 'format'
id = db.Column(
db.MediumInteger(9, unsigned=True),
primary_key=True,
autoincrement=True)
name = db.Column(db.String(255), nullable=False)
code = db.Column(db.String(20), nullable=False, unique=True)
description = db.Column(db.String(255), server_default='')
content_type = db.Column(db.String(255), server_default='')
visibility = db.Column(
db.TinyInteger(4),
nullable=False,
server_default='1')
last_updated = db.Column(db.DateTime, nullable=True)
@classmethod
def get_export_formats(cls):
return cls.query.filter(db.and_(
Format.content_type != 'text/html',
Format.visibility == 1)
).order_by(Format.name).all()
def set_name(self, name, lang="generic", type='ln'):
"""
Sets the name of an output format.
if 'type' different from 'ln' or 'sn', do nothing
if 'name' exceeds 256 chars, 'name' is truncated to first 256 chars.
The localized names of output formats are located in formatname table.
:param type: either 'ln' (for long name) and 'sn' (for short name)
:param lang: the language in which the name is given
:param name: the name to give to the output format
"""
if len(name) > 256:
name = name[:256]
if type.lower() != "sn" and type.lower() != "ln":
return
if lang == "generic" and type.lower() == "ln":
self.name = name
else:
# Save inside formatname table for name variations
fname = db.session.query(Formatname)\
.get((self.id, lang, type.lower()))
if not fname:
fname = db.session.merge(Formatname())
fname.id_format = self.id
fname.ln = lang
fname.type = type.lower()
fname.value = name
db.session.save(fname)
db.session.add(self)
db.session.commit()
class Formatname(db.Model):
"""Represents a Formatname record."""
__tablename__ = 'formatname'
id_format = db.Column(
db.MediumInteger(9, unsigned=True),
db.ForeignKey(Format.id),
primary_key=True)
ln = db.Column(
db.Char(5),
primary_key=True,
server_default='')
type = db.Column(
db.Char(3),
primary_key=True,
server_default='sn')
value = db.Column(db.String(255), nullable=False)
format = db.relationship(Format, backref='names')
#TODO add association proxy with key (id_format, ln, type)
class Bibfmt(db.Model):
"""Represents a Bibfmt record."""
__tablename__ = 'bibfmt'
id_bibrec = db.Column(
db.MediumInteger(8, unsigned=True),
db.ForeignKey(Bibrec.id),
nullable=False,
server_default='0',
primary_key=True,
autoincrement=False)
format = db.Column(
db.String(10),
nullable=False,
server_default='',
primary_key=True,
index=True)
kind = db.Column(
db.String(10),
nullable=False,
server_default='',
index=True
)
last_updated = db.Column(
db.DateTime,
nullable=False,
server_default='1900-01-01 00:00:00',
index=True)
value = db.Column(db.iLargeBinary)
needs_2nd_pass = db.Column(db.TinyInteger(1), server_default='0')
bibrec = db.relationship(Bibrec, backref='bibfmt')
__all__ = [
'Format',
'Formatname',
'Bibfmt',
]
|
cpcloud/ibis
|
refs/heads/master
|
ibis/client.py
|
1
|
import abc
import ibis.common.exceptions as com
import ibis.expr.operations as ops
import ibis.expr.schema as sch
import ibis.expr.types as ir
import ibis.sql.compiler as comp
import ibis.util as util
from ibis.config import options
class Client:
pass
class Query:
"""Abstraction for DML query execution to enable queries, progress,
cancellation and more (for backends supporting such functionality).
"""
def __init__(self, client, sql, **kwargs):
self.client = client
dml = getattr(sql, 'dml', sql)
self.expr = getattr(
dml, 'parent_expr', getattr(dml, 'table_set', None)
)
if not isinstance(sql, str):
self.compiled_sql = sql.compile()
else:
self.compiled_sql = sql
self.result_wrapper = getattr(dml, 'result_handler', None)
self.extra_options = kwargs
def execute(self):
# synchronous by default
with self.client._execute(self.compiled_sql, results=True) as cur:
result = self._fetch(cur)
return self._wrap_result(result)
def _wrap_result(self, result):
if self.result_wrapper is not None:
result = self.result_wrapper(result)
return result
def _fetch(self, cursor):
raise NotImplementedError
def schema(self):
if isinstance(self.expr, (ir.TableExpr, ir.ExprList, sch.HasSchema)):
return self.expr.schema()
elif isinstance(self.expr, ir.ValueExpr):
return sch.schema([(self.expr.get_name(), self.expr.type())])
else:
raise ValueError(
'Expression with type {} does not have a '
'schema'.format(type(self.expr))
)
class SQLClient(Client, metaclass=abc.ABCMeta):
dialect = comp.Dialect
query_class = Query
table_class = ops.DatabaseTable
table_expr_class = ir.TableExpr
def table(self, name, database=None):
"""
Create a table expression that references a particular table in the
database
Parameters
----------
name : string
database : string, optional
Returns
-------
table : TableExpr
"""
qualified_name = self._fully_qualified_name(name, database)
schema = self._get_table_schema(qualified_name)
node = self.table_class(qualified_name, schema, self)
return self.table_expr_class(node)
@property
def current_database(self):
return self.con.database
def database(self, name=None):
"""
Create a Database object for a given database name that can be used for
exploring and manipulating the objects (tables, functions, views, etc.)
inside
Parameters
----------
name : string
Name of database
Returns
-------
database : Database
"""
# TODO: validate existence of database
if name is None:
name = self.current_database
return self.database_class(name, self)
def _fully_qualified_name(self, name, database):
# XXX
return name
def _execute(self, query, results=False):
cur = self.con.execute(query)
if results:
return cur
else:
cur.release()
def sql(self, query):
"""
Convert a SQL query to an Ibis table expression
Parameters
----------
Returns
-------
table : TableExpr
"""
# Get the schema by adding a LIMIT 0 on to the end of the query. If
# there is already a limit in the query, we find and remove it
limited_query = 'SELECT * FROM ({}) t0 LIMIT 0'.format(query)
schema = self._get_schema_using_query(limited_query)
return ops.SQLQueryResult(query, schema, self).to_expr()
def raw_sql(self, query, results=False):
"""
Execute a given query string. Could have unexpected results if the
query modifies the behavior of the session in a way unknown to Ibis; be
careful.
Parameters
----------
query : string
DML or DDL statement
results : boolean, default False
Pass True if the query as a result set
Returns
-------
cur : ImpalaCursor if results=True, None otherwise
You must call cur.release() after you are finished using the cursor.
"""
return self._execute(query, results=results)
def execute(self, expr, params=None, limit='default', **kwargs):
"""
Compile and execute Ibis expression using this backend client
interface, returning results in-memory in the appropriate object type
Parameters
----------
expr : Expr
limit : int, default None
For expressions yielding result yets; retrieve at most this number of
values/rows. Overrides any limit already set on the expression.
params : not yet implemented
Returns
-------
output : input type dependent
Table expressions: pandas.DataFrame
Array expressions: pandas.Series
Scalar expressions: Python scalar value
"""
query_ast = self._build_ast_ensure_limit(expr, limit, params=params)
result = self._execute_query(query_ast, **kwargs)
return result
def _execute_query(self, dml, **kwargs):
query = self.query_class(self, dml, **kwargs)
return query.execute()
def compile(self, expr, params=None, limit=None):
"""
Translate expression to one or more queries according to backend target
Returns
-------
output : single query or list of queries
"""
query_ast = self._build_ast_ensure_limit(expr, limit, params=params)
return query_ast.compile()
def _build_ast_ensure_limit(self, expr, limit, params=None):
context = self.dialect.make_context(params=params)
query_ast = self._build_ast(expr, context)
# note: limit can still be None at this point, if the global
# default_limit is None
for query in reversed(query_ast.queries):
if (
isinstance(query, comp.Select)
and not isinstance(expr, ir.ScalarExpr)
and query.table_set is not None
):
if query.limit is None:
if limit == 'default':
query_limit = options.sql.default_limit
else:
query_limit = limit
if query_limit:
query.limit = {'n': query_limit, 'offset': 0}
elif limit is not None and limit != 'default':
query.limit = {'n': limit, 'offset': query.limit['offset']}
return query_ast
def explain(self, expr, params=None):
"""
Query for and return the query plan associated with the indicated
expression or SQL query.
Returns
-------
plan : string
"""
if isinstance(expr, ir.Expr):
context = self.dialect.make_context(params=params)
query_ast = self._build_ast(expr, context)
if len(query_ast.queries) > 1:
raise Exception('Multi-query expression')
query = query_ast.queries[0].compile()
else:
query = expr
statement = 'EXPLAIN {0}'.format(query)
with self._execute(statement, results=True) as cur:
result = self._get_list(cur)
return 'Query:\n{0}\n\n{1}'.format(
util.indent(query, 2), '\n'.join(result)
)
def _build_ast(self, expr, context):
# Implement in clients
raise NotImplementedError(type(self).__name__)
class QueryPipeline:
"""
Execute a series of queries, and capture any result sets generated
Note: No query pipelines have yet been implemented
"""
pass
def validate_backends(backends):
if not backends:
default = options.default_backend
if default is None:
raise com.IbisError(
'Expression depends on no backends, and found no default'
)
return [default]
if len(backends) > 1:
raise ValueError('Multiple backends found')
return backends
def execute(expr, limit='default', params=None, **kwargs):
backend, = validate_backends(list(find_backends(expr)))
return backend.execute(expr, limit=limit, params=params, **kwargs)
def compile(expr, limit=None, params=None, **kwargs):
backend, = validate_backends(list(find_backends(expr)))
return backend.compile(expr, limit=limit, params=params, **kwargs)
def find_backends(expr):
seen_backends = set()
stack = [expr.op()]
seen = set()
while stack:
node = stack.pop()
if node not in seen:
seen.add(node)
for arg in node.flat_args():
if isinstance(arg, Client):
if arg not in seen_backends:
yield arg
seen_backends.add(arg)
elif isinstance(arg, ir.Expr):
stack.append(arg.op())
class Database:
def __init__(self, name, client):
self.name = name
self.client = client
def __repr__(self):
return '{}({!r})'.format(type(self).__name__, self.name)
def __dir__(self):
attrs = dir(type(self))
unqualified_tables = [self._unqualify(x) for x in self.tables]
return sorted(frozenset(attrs + unqualified_tables))
def __contains__(self, key):
return key in self.tables
@property
def tables(self):
return self.list_tables()
def __getitem__(self, key):
return self.table(key)
def __getattr__(self, key):
return self.table(key)
def _qualify(self, value):
return value
def _unqualify(self, value):
return value
def drop(self, force=False):
"""
Drop the database
Parameters
----------
drop : boolean, default False
Drop any objects if they exist, and do not fail if the databaes does
not exist
"""
self.client.drop_database(self.name, force=force)
def namespace(self, ns):
"""
Creates a derived Database instance for collections of objects having a
common prefix. For example, for tables fooa, foob, and fooc, creating
the "foo" namespace would enable you to reference those objects as a,
b, and c, respectively.
Returns
-------
ns : DatabaseNamespace
"""
return DatabaseNamespace(self, ns)
def table(self, name):
"""
Return a table expression referencing a table in this database
Returns
-------
table : TableExpr
"""
qualified_name = self._qualify(name)
return self.client.table(qualified_name, self.name)
def list_tables(self, like=None):
return self.client.list_tables(
like=self._qualify_like(like), database=self.name
)
def _qualify_like(self, like):
return like
class DatabaseNamespace(Database):
def __init__(self, parent, namespace):
self.parent = parent
self.namespace = namespace
def __repr__(self):
return "{}(database={!r}, namespace={!r})".format(
type(self).__name__, self.name, self.namespace
)
@property
def client(self):
return self.parent.client
@property
def name(self):
return self.parent.name
def _qualify(self, value):
return self.namespace + value
def _unqualify(self, value):
return value.replace(self.namespace, '', 1)
def _qualify_like(self, like):
if like:
return self.namespace + like
else:
return '{0}*'.format(self.namespace)
class DatabaseEntity:
pass
class View(DatabaseEntity):
def drop(self):
pass
|
cosmiclattes/TPBviz
|
refs/heads/master
|
torrent/lib/python2.7/site-packages/south/migration/migrators.py
|
21
|
from __future__ import print_function
from copy import copy, deepcopy
import datetime
import inspect
import sys
import traceback
from django.core.management import call_command
from django.core.management.commands import loaddata
from django.db import models
from django import VERSION as DJANGO_VERSION
import south.db
from south import exceptions
from south.db import DEFAULT_DB_ALIAS
from south.models import MigrationHistory
from south.signals import ran_migration
from south.utils.py3 import StringIO
class Migrator(object):
def __init__(self, verbosity=0, interactive=False):
self.verbosity = int(verbosity)
self.interactive = bool(interactive)
@staticmethod
def title(target):
raise NotImplementedError()
def print_title(self, target):
if self.verbosity:
print(self.title(target))
@staticmethod
def status(target):
raise NotImplementedError()
def print_status(self, migration):
status = self.status(migration)
if self.verbosity and status:
print(status)
@staticmethod
def orm(migration):
raise NotImplementedError()
def backwards(self, migration):
return self._wrap_direction(migration.backwards(), migration.prev_orm())
def direction(self, migration):
raise NotImplementedError()
@staticmethod
def _wrap_direction(direction, orm):
args = inspect.getargspec(direction)
if len(args[0]) == 1:
# Old migration, no ORM should be passed in
return direction
return (lambda: direction(orm))
@staticmethod
def record(migration, database):
raise NotImplementedError()
def run_migration_error(self, migration, extra_info=''):
return (
' ! Error found during real run of migration! Aborting.\n'
'\n'
' ! Since you have a database that does not support running\n'
' ! schema-altering statements in transactions, we have had \n'
' ! to leave it in an interim state between migrations.\n'
'%s\n'
' ! The South developers regret this has happened, and would\n'
' ! like to gently persuade you to consider a slightly\n'
' ! easier-to-deal-with DBMS (one that supports DDL transactions)\n'
' ! NOTE: The error which caused the migration to fail is further up.'
) % extra_info
def run_migration(self, migration, database):
migration_function = self.direction(migration)
south.db.db.start_transaction()
try:
migration_function()
south.db.db.execute_deferred_sql()
if not isinstance(getattr(self, '_wrapper', self), DryRunMigrator):
# record us as having done this in the same transaction,
# since we're not in a dry run
self.record(migration, database)
except:
south.db.db.rollback_transaction()
if not south.db.db.has_ddl_transactions:
print(self.run_migration_error(migration))
print("Error in migration: %s" % migration)
raise
else:
try:
south.db.db.commit_transaction()
except:
print("Error during commit in migration: %s" % migration)
raise
def run(self, migration, database):
# Get the correct ORM.
south.db.db.current_orm = self.orm(migration)
# If we're not already in a dry run, and the database doesn't support
# running DDL inside a transaction, *cough*MySQL*cough* then do a dry
# run first.
if not isinstance(getattr(self, '_wrapper', self), DryRunMigrator):
if not south.db.db.has_ddl_transactions:
dry_run = DryRunMigrator(migrator=self, ignore_fail=False)
dry_run.run_migration(migration, database)
return self.run_migration(migration, database)
def send_ran_migration(self, migration, database):
ran_migration.send(None,
app=migration.app_label(),
migration=migration,
method=self.__class__.__name__.lower(),
verbosity=self.verbosity,
interactive=self.interactive,
db=database)
def migrate(self, migration, database):
"""
Runs the specified migration forwards/backwards, in order.
"""
app = migration.migrations._migrations
migration_name = migration.name()
self.print_status(migration)
result = self.run(migration, database)
self.send_ran_migration(migration, database)
return result
def migrate_many(self, target, migrations, database):
raise NotImplementedError()
class MigratorWrapper(object):
def __init__(self, migrator, *args, **kwargs):
self._migrator = copy(migrator)
attributes = dict([(k, getattr(self, k))
for k in self.__class__.__dict__
if not k.startswith('__')])
self._migrator.__dict__.update(attributes)
self._migrator.__dict__['_wrapper'] = self
def __getattr__(self, name):
return getattr(self._migrator, name)
class DryRunMigrator(MigratorWrapper):
def __init__(self, ignore_fail=True, *args, **kwargs):
super(DryRunMigrator, self).__init__(*args, **kwargs)
self._ignore_fail = ignore_fail
def _run_migration(self, migration):
if migration.no_dry_run():
if self.verbosity:
print(" - Migration '%s' is marked for no-dry-run." % migration)
return
south.db.db.dry_run = True
# preserve the constraint cache as it can be mutated by the dry run
constraint_cache = deepcopy(south.db.db._constraint_cache)
if self._ignore_fail:
south.db.db.debug, old_debug = False, south.db.db.debug
pending_creates = south.db.db.get_pending_creates()
south.db.db.start_transaction()
migration_function = self.direction(migration)
try:
try:
migration_function()
south.db.db.execute_deferred_sql()
except:
raise exceptions.FailedDryRun(migration, sys.exc_info())
finally:
south.db.db.rollback_transactions_dry_run()
if self._ignore_fail:
south.db.db.debug = old_debug
south.db.db.clear_run_data(pending_creates)
south.db.db.dry_run = False
# restore the preserved constraint cache from before dry run was
# executed
south.db.db._constraint_cache = constraint_cache
def run_migration(self, migration, database):
try:
self._run_migration(migration)
except exceptions.FailedDryRun:
if self._ignore_fail:
return False
raise
def send_ran_migration(self, *args, **kwargs):
pass
class FakeMigrator(MigratorWrapper):
def run(self, migration, database):
# Don't actually run, just record as if ran
self.record(migration, database)
if self.verbosity:
print(' (faked)')
def send_ran_migration(self, *args, **kwargs):
pass
class LoadInitialDataMigrator(MigratorWrapper):
def load_initial_data(self, target, db='default'):
if target is None or target != target.migrations[-1]:
return
# Load initial data, if we ended up at target
if self.verbosity:
print(" - Loading initial data for %s." % target.app_label())
if DJANGO_VERSION < (1, 6):
self.pre_1_6(target, db)
else:
self.post_1_6(target, db)
def pre_1_6(self, target, db):
# Override Django's get_apps call temporarily to only load from the
# current app
old_get_apps = models.get_apps
new_get_apps = lambda: [models.get_app(target.app_label())]
models.get_apps = new_get_apps
loaddata.get_apps = new_get_apps
try:
call_command('loaddata', 'initial_data', verbosity=self.verbosity, database=db)
finally:
models.get_apps = old_get_apps
loaddata.get_apps = old_get_apps
def post_1_6(self, target, db):
import django.db.models.loading
## build a new 'AppCache' object with just the app we care about.
old_cache = django.db.models.loading.cache
new_cache = django.db.models.loading.AppCache()
new_cache.get_apps = lambda: [new_cache.get_app(target.app_label())]
## monkeypatch
django.db.models.loading.cache = new_cache
try:
call_command('loaddata', 'initial_data', verbosity=self.verbosity, database=db)
finally:
## unmonkeypatch
django.db.models.loading.cache = old_cache
def migrate_many(self, target, migrations, database):
migrator = self._migrator
result = migrator.__class__.migrate_many(migrator, target, migrations, database)
if result:
self.load_initial_data(target, db=database)
return True
class Forwards(Migrator):
"""
Runs the specified migration forwards, in order.
"""
torun = 'forwards'
@staticmethod
def title(target):
if target is not None:
return " - Migrating forwards to %s." % target.name()
else:
assert False, "You cannot migrate forwards to zero."
@staticmethod
def status(migration):
return ' > %s' % migration
@staticmethod
def orm(migration):
return migration.orm()
def forwards(self, migration):
return self._wrap_direction(migration.forwards(), migration.orm())
direction = forwards
@staticmethod
def record(migration, database):
# Record us as having done this
record = MigrationHistory.for_migration(migration, database)
try:
from django.utils.timezone import now
record.applied = now()
except ImportError:
record.applied = datetime.datetime.utcnow()
if database != DEFAULT_DB_ALIAS:
record.save(using=database)
else:
# Django 1.1 and below always go down this branch.
record.save()
def format_backwards(self, migration):
if migration.no_dry_run():
return " (migration cannot be dry-run; cannot discover commands)"
old_debug, old_dry_run = south.db.db.debug, south.db.db.dry_run
south.db.db.debug = south.db.db.dry_run = True
stdout = sys.stdout
sys.stdout = StringIO()
try:
try:
self.backwards(migration)()
return sys.stdout.getvalue()
except:
raise
finally:
south.db.db.debug, south.db.db.dry_run = old_debug, old_dry_run
sys.stdout = stdout
def run_migration_error(self, migration, extra_info=''):
extra_info = ('\n'
'! You *might* be able to recover with:'
'%s'
'%s' %
(self.format_backwards(migration), extra_info))
return super(Forwards, self).run_migration_error(migration, extra_info)
def migrate_many(self, target, migrations, database):
try:
for migration in migrations:
result = self.migrate(migration, database)
if result is False: # The migrations errored, but nicely.
return False
finally:
# Call any pending post_syncdb signals
south.db.db.send_pending_create_signals(verbosity=self.verbosity,
interactive=self.interactive)
return True
class Backwards(Migrator):
"""
Runs the specified migration backwards, in order.
"""
torun = 'backwards'
@staticmethod
def title(target):
if target is None:
return " - Migrating backwards to zero state."
else:
return " - Migrating backwards to just after %s." % target.name()
@staticmethod
def status(migration):
return ' < %s' % migration
@staticmethod
def orm(migration):
return migration.prev_orm()
direction = Migrator.backwards
@staticmethod
def record(migration, database):
# Record us as having not done this
record = MigrationHistory.for_migration(migration, database)
if record.id is not None:
if database != DEFAULT_DB_ALIAS:
record.delete(using=database)
else:
# Django 1.1 always goes down here
record.delete()
def migrate_many(self, target, migrations, database):
for migration in migrations:
self.migrate(migration, database)
return True
|
pacificIT/linux-2.6.36
|
refs/heads/lichee-dev
|
tools/perf/scripts/python/sched-migration.py
|
185
|
#!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <fweisbec@gmail.com>
#
# perf trace event handlers have been generated by perf trace -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
|
cisco-openstack/neutron
|
refs/heads/staging/libertyplus
|
neutron/extensions/l3.py
|
25
|
# Copyright 2012 VMware, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from oslo_config import cfg
from neutron.api import extensions
from neutron.api.v2 import attributes as attr
from neutron.api.v2 import resource_helper
from neutron.common import exceptions as nexception
from neutron.plugins.common import constants
# L3 Exceptions
class RouterNotFound(nexception.NotFound):
message = _("Router %(router_id)s could not be found")
class RouterInUse(nexception.InUse):
message = _("Router %(router_id)s %(reason)s")
def __init__(self, **kwargs):
if 'reason' not in kwargs:
kwargs['reason'] = "still has ports"
super(RouterInUse, self).__init__(**kwargs)
class RouterInterfaceNotFound(nexception.NotFound):
message = _("Router %(router_id)s does not have "
"an interface with id %(port_id)s")
class RouterInterfaceNotFoundForSubnet(nexception.NotFound):
message = _("Router %(router_id)s has no interface "
"on subnet %(subnet_id)s")
class RouterInterfaceInUseByFloatingIP(nexception.InUse):
message = _("Router interface for subnet %(subnet_id)s on router "
"%(router_id)s cannot be deleted, as it is required "
"by one or more floating IPs.")
class FloatingIPNotFound(nexception.NotFound):
message = _("Floating IP %(floatingip_id)s could not be found")
class ExternalGatewayForFloatingIPNotFound(nexception.NotFound):
message = _("External network %(external_network_id)s is not reachable "
"from subnet %(subnet_id)s. Therefore, cannot associate "
"Port %(port_id)s with a Floating IP.")
class FloatingIPPortAlreadyAssociated(nexception.InUse):
message = _("Cannot associate floating IP %(floating_ip_address)s "
"(%(fip_id)s) with port %(port_id)s "
"using fixed IP %(fixed_ip)s, as that fixed IP already "
"has a floating IP on external network %(net_id)s.")
class RouterExternalGatewayInUseByFloatingIp(nexception.InUse):
message = _("Gateway cannot be updated for router %(router_id)s, since a "
"gateway to external network %(net_id)s is required by one or "
"more floating IPs.")
ROUTERS = 'routers'
EXTERNAL_GW_INFO = 'external_gateway_info'
RESOURCE_ATTRIBUTE_MAP = {
ROUTERS: {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': attr.NAME_MAX_LEN},
'is_visible': True, 'default': ''},
'admin_state_up': {'allow_post': True, 'allow_put': True,
'default': True,
'convert_to': attr.convert_to_boolean,
'is_visible': True},
'status': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {'type:string': attr.TENANT_ID_MAX_LEN},
'is_visible': True},
EXTERNAL_GW_INFO: {'allow_post': True, 'allow_put': True,
'is_visible': True, 'default': None,
'enforce_policy': True,
'validate': {
'type:dict_or_nodata': {
'network_id': {'type:uuid': None,
'required': True},
'external_fixed_ips': {
'convert_list_to':
attr.convert_kvp_list_to_dict,
'type:fixed_ips': None,
'default': None,
'required': False,
}
}
}}
},
'floatingips': {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'floating_ip_address': {'allow_post': True, 'allow_put': False,
'validate': {'type:ip_address_or_none': None},
'is_visible': True, 'default': None,
'enforce_policy': True},
'subnet_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid_or_none': None},
'is_visible': False, # Use False for input only attr
'default': None},
'floating_network_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True},
'router_id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid_or_none': None},
'is_visible': True, 'default': None},
'port_id': {'allow_post': True, 'allow_put': True,
'validate': {'type:uuid_or_none': None},
'is_visible': True, 'default': None,
'required_by_policy': True},
'fixed_ip_address': {'allow_post': True, 'allow_put': True,
'validate': {'type:ip_address_or_none': None},
'is_visible': True, 'default': None},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {'type:string': attr.TENANT_ID_MAX_LEN},
'is_visible': True},
'status': {'allow_post': False, 'allow_put': False,
'is_visible': True},
},
}
l3_quota_opts = [
cfg.IntOpt('quota_router',
default=10,
help=_('Number of routers allowed per tenant. '
'A negative value means unlimited.')),
cfg.IntOpt('quota_floatingip',
default=50,
help=_('Number of floating IPs allowed per tenant. '
'A negative value means unlimited.')),
]
cfg.CONF.register_opts(l3_quota_opts, 'QUOTAS')
class L3(extensions.ExtensionDescriptor):
@classmethod
def get_name(cls):
return "Neutron L3 Router"
@classmethod
def get_alias(cls):
return "router"
@classmethod
def get_description(cls):
return ("Router abstraction for basic L3 forwarding"
" between L2 Neutron networks and access to external"
" networks via a NAT gateway.")
@classmethod
def get_updated(cls):
return "2012-07-20T10:00:00-00:00"
@classmethod
def get_resources(cls):
"""Returns Ext Resources."""
plural_mappings = resource_helper.build_plural_mappings(
{}, RESOURCE_ATTRIBUTE_MAP)
plural_mappings['external_fixed_ips'] = 'external_fixed_ip'
attr.PLURALS.update(plural_mappings)
action_map = {'router': {'add_router_interface': 'PUT',
'remove_router_interface': 'PUT'}}
return resource_helper.build_resource_info(plural_mappings,
RESOURCE_ATTRIBUTE_MAP,
constants.L3_ROUTER_NAT,
action_map=action_map,
register_quota=True)
def update_attributes_map(self, attributes):
super(L3, self).update_attributes_map(
attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP)
def get_extended_resources(self, version):
if version == "2.0":
return RESOURCE_ATTRIBUTE_MAP
else:
return {}
class RouterPluginBase(object):
@abc.abstractmethod
def create_router(self, context, router):
pass
@abc.abstractmethod
def update_router(self, context, id, router):
pass
@abc.abstractmethod
def get_router(self, context, id, fields=None):
pass
@abc.abstractmethod
def delete_router(self, context, id):
pass
@abc.abstractmethod
def get_routers(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None, page_reverse=False):
pass
@abc.abstractmethod
def add_router_interface(self, context, router_id, interface_info):
pass
@abc.abstractmethod
def remove_router_interface(self, context, router_id, interface_info):
pass
@abc.abstractmethod
def create_floatingip(self, context, floatingip):
pass
@abc.abstractmethod
def update_floatingip(self, context, id, floatingip):
pass
@abc.abstractmethod
def get_floatingip(self, context, id, fields=None):
pass
@abc.abstractmethod
def delete_floatingip(self, context, id):
pass
@abc.abstractmethod
def get_floatingips(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
pass
def get_routers_count(self, context, filters=None):
raise NotImplementedError()
def get_floatingips_count(self, context, filters=None):
raise NotImplementedError()
|
Glasgow2015/team-10
|
refs/heads/master
|
env/lib/python2.7/site-packages/cms/test_utils/project/placeholderapp/cms_app.py
|
55
|
from cms.apphook_pool import apphook_pool
from cms.app_base import CMSApp
from django.utils.translation import ugettext_lazy as _
class Example1App(CMSApp):
name = _("Example1 App")
urls = ["cms.test_utils.project.placeholderapp.urls"]
apphook_pool.register(Example1App)
class MultilingualExample1App(CMSApp):
name = _("MultilingualExample1 App")
urls = ["cms.test_utils.project.placeholderapp.urls_multi"]
apphook_pool.register(MultilingualExample1App)
|
akosyakov/intellij-community
|
refs/heads/master
|
python/lib/Lib/encodings/mac_roman.py
|
593
|
""" Python Character Mapping Codec mac_roman generated from 'MAPPINGS/VENDORS/APPLE/ROMAN.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-roman',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> CONTROL CHARACTER
u'\x01' # 0x01 -> CONTROL CHARACTER
u'\x02' # 0x02 -> CONTROL CHARACTER
u'\x03' # 0x03 -> CONTROL CHARACTER
u'\x04' # 0x04 -> CONTROL CHARACTER
u'\x05' # 0x05 -> CONTROL CHARACTER
u'\x06' # 0x06 -> CONTROL CHARACTER
u'\x07' # 0x07 -> CONTROL CHARACTER
u'\x08' # 0x08 -> CONTROL CHARACTER
u'\t' # 0x09 -> CONTROL CHARACTER
u'\n' # 0x0A -> CONTROL CHARACTER
u'\x0b' # 0x0B -> CONTROL CHARACTER
u'\x0c' # 0x0C -> CONTROL CHARACTER
u'\r' # 0x0D -> CONTROL CHARACTER
u'\x0e' # 0x0E -> CONTROL CHARACTER
u'\x0f' # 0x0F -> CONTROL CHARACTER
u'\x10' # 0x10 -> CONTROL CHARACTER
u'\x11' # 0x11 -> CONTROL CHARACTER
u'\x12' # 0x12 -> CONTROL CHARACTER
u'\x13' # 0x13 -> CONTROL CHARACTER
u'\x14' # 0x14 -> CONTROL CHARACTER
u'\x15' # 0x15 -> CONTROL CHARACTER
u'\x16' # 0x16 -> CONTROL CHARACTER
u'\x17' # 0x17 -> CONTROL CHARACTER
u'\x18' # 0x18 -> CONTROL CHARACTER
u'\x19' # 0x19 -> CONTROL CHARACTER
u'\x1a' # 0x1A -> CONTROL CHARACTER
u'\x1b' # 0x1B -> CONTROL CHARACTER
u'\x1c' # 0x1C -> CONTROL CHARACTER
u'\x1d' # 0x1D -> CONTROL CHARACTER
u'\x1e' # 0x1E -> CONTROL CHARACTER
u'\x1f' # 0x1F -> CONTROL CHARACTER
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> CONTROL CHARACTER
u'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0x81 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc7' # 0x82 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xd1' # 0x84 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe3' # 0x8B -> LATIN SMALL LETTER A WITH TILDE
u'\xe5' # 0x8C -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
u'\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE
u'\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
u'\xec' # 0x93 -> LATIN SMALL LETTER I WITH GRAVE
u'\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xf1' # 0x96 -> LATIN SMALL LETTER N WITH TILDE
u'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf2' # 0x98 -> LATIN SMALL LETTER O WITH GRAVE
u'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE
u'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
u'\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE
u'\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u2020' # 0xA0 -> DAGGER
u'\xb0' # 0xA1 -> DEGREE SIGN
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa7' # 0xA4 -> SECTION SIGN
u'\u2022' # 0xA5 -> BULLET
u'\xb6' # 0xA6 -> PILCROW SIGN
u'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
u'\xae' # 0xA8 -> REGISTERED SIGN
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\u2122' # 0xAA -> TRADE MARK SIGN
u'\xb4' # 0xAB -> ACUTE ACCENT
u'\xa8' # 0xAC -> DIAERESIS
u'\u2260' # 0xAD -> NOT EQUAL TO
u'\xc6' # 0xAE -> LATIN CAPITAL LETTER AE
u'\xd8' # 0xAF -> LATIN CAPITAL LETTER O WITH STROKE
u'\u221e' # 0xB0 -> INFINITY
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
u'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
u'\xa5' # 0xB4 -> YEN SIGN
u'\xb5' # 0xB5 -> MICRO SIGN
u'\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL
u'\u2211' # 0xB7 -> N-ARY SUMMATION
u'\u220f' # 0xB8 -> N-ARY PRODUCT
u'\u03c0' # 0xB9 -> GREEK SMALL LETTER PI
u'\u222b' # 0xBA -> INTEGRAL
u'\xaa' # 0xBB -> FEMININE ORDINAL INDICATOR
u'\xba' # 0xBC -> MASCULINE ORDINAL INDICATOR
u'\u03a9' # 0xBD -> GREEK CAPITAL LETTER OMEGA
u'\xe6' # 0xBE -> LATIN SMALL LETTER AE
u'\xf8' # 0xBF -> LATIN SMALL LETTER O WITH STROKE
u'\xbf' # 0xC0 -> INVERTED QUESTION MARK
u'\xa1' # 0xC1 -> INVERTED EXCLAMATION MARK
u'\xac' # 0xC2 -> NOT SIGN
u'\u221a' # 0xC3 -> SQUARE ROOT
u'\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK
u'\u2248' # 0xC5 -> ALMOST EQUAL TO
u'\u2206' # 0xC6 -> INCREMENT
u'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
u'\xa0' # 0xCA -> NO-BREAK SPACE
u'\xc0' # 0xCB -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc3' # 0xCC -> LATIN CAPITAL LETTER A WITH TILDE
u'\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE
u'\u0152' # 0xCE -> LATIN CAPITAL LIGATURE OE
u'\u0153' # 0xCF -> LATIN SMALL LIGATURE OE
u'\u2013' # 0xD0 -> EN DASH
u'\u2014' # 0xD1 -> EM DASH
u'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
u'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
u'\xf7' # 0xD6 -> DIVISION SIGN
u'\u25ca' # 0xD7 -> LOZENGE
u'\xff' # 0xD8 -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\u0178' # 0xD9 -> LATIN CAPITAL LETTER Y WITH DIAERESIS
u'\u2044' # 0xDA -> FRACTION SLASH
u'\u20ac' # 0xDB -> EURO SIGN
u'\u2039' # 0xDC -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
u'\u203a' # 0xDD -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
u'\ufb01' # 0xDE -> LATIN SMALL LIGATURE FI
u'\ufb02' # 0xDF -> LATIN SMALL LIGATURE FL
u'\u2021' # 0xE0 -> DOUBLE DAGGER
u'\xb7' # 0xE1 -> MIDDLE DOT
u'\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK
u'\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2030' # 0xE4 -> PER MILLE SIGN
u'\xc2' # 0xE5 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xca' # 0xE6 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xcb' # 0xE8 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xc8' # 0xE9 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xEB -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xEC -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xcc' # 0xED -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\uf8ff' # 0xF0 -> Apple logo
u'\xd2' # 0xF1 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xF3 -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xd9' # 0xF4 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\u0131' # 0xF5 -> LATIN SMALL LETTER DOTLESS I
u'\u02c6' # 0xF6 -> MODIFIER LETTER CIRCUMFLEX ACCENT
u'\u02dc' # 0xF7 -> SMALL TILDE
u'\xaf' # 0xF8 -> MACRON
u'\u02d8' # 0xF9 -> BREVE
u'\u02d9' # 0xFA -> DOT ABOVE
u'\u02da' # 0xFB -> RING ABOVE
u'\xb8' # 0xFC -> CEDILLA
u'\u02dd' # 0xFD -> DOUBLE ACUTE ACCENT
u'\u02db' # 0xFE -> OGONEK
u'\u02c7' # 0xFF -> CARON
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
BrooksbridgeCapitalLLP/returnsseries
|
refs/heads/master
|
returnsseries/plot.py
|
2
|
"""Plotting funcs for ReturnsSeries class"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import returnsseries.utils as ru
import returnsseries.displayfunctions as rd
def plot_perf(returns_list, log2, shade_dates=None, shade_color='lightblue',
yticks_round=1, legend_loc='lower right',
summary_funcs=rd.summaries['ts'], **kwargs):
"""Plot a list of ReturnsSeries with relevant summary stats
Parameters
----------
returns_list: list
list of ReturnsSeries
log2: bool
Passed to ReturnsSeries.account_curve. If False result will be
price-index of compounding returns. If True result will be base-2
logarithm (numpy.log2) of price-index and will reset the y-axis
label and y-axis tick labels to reflect the log scale
shade_dates: list, optional, default None
List of tuples, each tuple of length 2, each tuple contains start
date and end date of time periods to shade with color
shade_color: str, optional, default 'lightblue'
String specifying the color to use for shade_dates. Accepts any
valid matplotlib color name/string
yticks_round: int, optional, default 1
Number of decimals the y-axis tick lables should be rounded to
legend_loc: str, optional, default 'lower right'
Specifies where to place pyplot.legend, accepts any string that is
valid for pyplot.legend loc arg
summary_funcs: list, optional,
default returnsseries.displayfunctions.summaries['ts']
list of functions passed into ReturnsSeries.summary
kwargs: keywords
Any keyword arguments to pass to matplotlib.pyplot.plot
Returns
-------
None"""
for rtns in returns_list:
rtns.plot_line(log2, shade_dates, **kwargs)
if log2:
yticks_log2(yticks_round)
summary_df = pd.concat([rtns.summary(summary_funcs) \
for rtns in returns_list], axis=1)
text_topleft(summary_df)
if legend_loc is not None:
plt.legend(loc=legend_loc)
return None
def correl_calc(returns_list, base_series):
"""Calculate correlation between all series in returns_list and base_series
Parameters
----------
returns_list: list
list of pandas.Series
base_series: int
Specifies which entry in returns_list to calculate the
correlation with. must have 0 <= base_series < len(returns_list)
Returns
-------
pandas.Series
index are the pandas.Series.name from the entries in returns_list,
values are the correlations between each series and the seriers in
returns_list[base_series]
"""
correlations = pd.concat(returns_list, axis=1).corr()
correlations = correlations.iloc[:,base_series]
correlations = correlations.round(2)
name = correlations.index[base_series]
correlations.name = "Correlation with {}".format(name)
return correlations
def shade_dates(shade_dates, srs, color):
"""Color in area below srs between index values in shade_dates
Note
----
Operates on active plotting figure.
Parameters
----------
shade_dates: list
list of tuples, each tuple contains 2 entries, entries define the
start and end of a subperiod within srs.index to be shaded in
srs: pandas.Series
values define the y-values to color beneath
color: str
Name of the color to use, can be any valid matplotlib color name
Returns
-------
None"""
maxs = ru.within_dates(srs, shade_dates, np.nan)
mins = maxs.copy()
ylim_min = min(plt.ylim())
mins[ np.invert(mins.isnull()) ] = ylim_min
plt.fill_between(mins.index, mins.values, maxs.values, color=color)
return None
def yticks_log2(round_=1):
"""Relabel y-axis for log2 plot
Note
----
Operates on active plotting figure.
Parameters
----------
round_: int, optional, default 1
Number of digits to round y-axis tick labels to, passed to numpy.round
Returns
-------
None"""
y_tick_locs, y_tick_labels = plt.yticks()
new_labels = np.round(pow(2, y_tick_locs), round_)
plt.yticks(y_tick_locs, new_labels)
plt.ylabel('Logarithmic Return Scale')
return None
def text_topleft(str_):
"""Write a text in the top-left corner of active plotting figure
Parameters
----------
str_: str
Text string to write
Returns
-------
None"""
xlims = plt.xlim()
xdiff = max(xlims) - min(xlims)
text_x = min(xlims) + xdiff * .01
text_y = max(plt.ylim()) * .99
plt.text(text_x, text_y, str_, horizontalalignment='left',
verticalalignment='top', family='monospace')
return None
|
MachineLearningControl/OpenMLC-Python
|
refs/heads/master
|
MLC/arduino/connection/__init__.py
|
1
|
from base import BaseConnection
from serialconnection import SerialConnection
from mockconnection import MockConnection
__all__ = ["BaseConnection", "SerialConnection", "MockConnection" ]
|
jobsafran/mediadrop
|
refs/heads/master
|
batch-scripts/find_todos.py
|
11
|
#!/usr/bin/env python
keywords = [
'TODO',
'TOFIX',
'FIXME',
'HACK',
'XXX',
'WARN',
]
import os
grep_cmd = """grep -ERn "%s" """ % ("|".join(keywords))
files_and_dirs = [
'batch-scripts',
'deployment-scripts',
'mediadrop',
'plugins',
'setup*',
]
exclude_files_and_dirs = [
'batch-scripts/find_todos.py',
'mediadrop/public/scripts/third-party/',
'mediadrop/lib/xhtml/htmlsanitizer.py',
'mediadrop/public/scripts/mcore-compiled.js',
]
IN, MULT = 1, 2
# File extensions for files that share comment styles.
c_like_files = ['c', 'h', 'java', 'cpp']
html_files = ['xml', 'html', 'xhtml', 'htm']
js_files = ['js']
css_files = ['css']
python_files = ['py']
sql_files = ['sql']
ini_files = ['ini', 'ini_tmpl']
# multiline comment beginning/ending strings
# mapped to the filetypes associated with them.
multiline = {
('<!--!', '-->'): html_files + python_files,
('"""', '"""'): python_files,
('/*', '*/'): c_like_files + js_files + css_files + html_files,
}
# inline comment beginning strings
# mapped to the filetypes associated with them.
inline = {
'#': python_files + ini_files,
'//': c_like_files + js_files + html_files,
'--': sql_files,
}
def get_beginning(lines, line_no, filename):
# Find the beginning of the enclosing comment block, for the
# comment on the given line
line_offset = line_no
while line_offset >= 0:
line = lines[line_offset]
for begin, end in multiline:
if not any(map(filename.endswith, multiline[(begin, end)])):
continue
char_offset = line.find(begin)
if char_offset >= 0:
return begin, end, line_offset, char_offset, MULT
for begin in inline:
if not any(map(filename.endswith, inline[begin])):
continue
char_offset = line.find(begin)
if char_offset >= 0:
return begin, None, line_offset, char_offset, IN
line_offset -= 1
return None, None, None, None, None
def get_ending(lines, begin, end, begin_line, begin_char, type):
# Find the ending of the enclosing comment block, given a
# description of the beginning of the block
end_line = begin_line
end_char = 0
if type == MULT:
while (end_line < len(lines)):
start = 0
if end_line == begin_line:
start = begin_char + len(begin)
end_char = lines[end_line].find(end, start)
if end_char >= 0:
break
end_line += 1
end_line += 1
elif type == IN:
while (end_line < len(lines)):
start = 0
if end_line == begin_line:
start = lines[end_line].index(begin)
if not lines[end_line][start:].strip().startswith(begin):
break
end_line += 1
return end_line, end_char
def get_lines(lines, line_no, filename):
# FIRST, GET THE ENTIRE CONTAINING COMMENT BLOCK
begin, end, begin_line, begin_char, type = get_beginning(lines, line_no, filename)
if (begin,end) == (None, None):
return None # false alarm, this isn't a comment at all!
end_line, end_char = get_ending(lines, begin, end, begin_line, begin_char, type)
lines = map(lambda line: line.strip(), lines[begin_line:end_line])
# "lines" NOW HOLDS EVERY LINE IN THE CONTAINING COMMENT BLOCK
# NOW, FIND ONLY THE LINES IN THE SECTION WE CARE ABOUT
offset = line_no - begin_line
lines = lines[offset:]
size = 1
while size < len(lines):
line = lines[size].strip().lstrip(begin)
if line == "":
break
size += 1
return lines[:size]
# Keep track of how many of each keyword we see
counts = { }
for k in keywords:
counts[k] = 0
# Populate a dict of filename -> [lines of interest]
matched_files = {}
for x in files_and_dirs:
cmd = grep_cmd + x
result = os.popen(cmd)
for line in result.readlines():
if line.startswith('Binary file'):
# ignore binary files
continue
if any(map(line.startswith, exclude_files_and_dirs)):
# don't include the specifically excluded dirs
continue
file, line_no, rest = line.split(":", 2)
for k in counts:
# keep track of how many of each keyword we see
if k in rest:
counts[k] += 1
# Add this entry to the dict.
if file not in matched_files:
matched_files[file] = []
matched_files[file].append(int(line_no))
# Iterate over each filename, printing the found
# todo blocks.
for x in sorted(matched_files.keys()):
line_nos = matched_files[x]
f = open(x)
lines = f.readlines()
f.close()
output = ["\nFILE: %s\n-----" % x]
for i, num in enumerate(line_nos):
curr_line = line_nos[i]-1
next_line = None
if (i+1) < len(line_nos):
next_line = line_nos[i+1]-1
todo_lines = get_lines(lines, curr_line, x)
if not todo_lines:
continue
if next_line is not None:
# ensure that the current 'todo' item doesn't
# overlap with the next 'todo' item.
max_length = next_line - curr_line
todo_lines = todo_lines[:max_length]
output.append("line: %d\n%s\n" % (num, "\n".join(todo_lines)))
if len(output) > 1:
for chunk in output:
print chunk
# Print our counts
for k in counts:
print k, counts[k]
|
syndbg/ubuntu-make
|
refs/heads/master
|
tests/data/duplicatedframeworks/samecategory.py
|
13
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Canonical
#
# Authors:
# Didier Roche
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; version 3.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""Framework with another category module without any framework"""
import umake.frameworks
class ACategory(umake.frameworks.BaseCategory):
def __init__(self):
super().__init__(name="Category A", description="Other category A description")
class FrameworkC(umake.frameworks.BaseFramework):
def __init__(self, category):
super().__init__(name="Framework C", description="Description for framework C",
category=category)
def setup(self, install_path=None, auto_accept_license=False):
super().setup()
def remove(self):
super().remove()
class FrameworkD(umake.frameworks.BaseFramework):
def __init__(self, category):
super().__init__(name="Framework D", description="Description for framework D",
category=category)
def setup(self, install_path=None, auto_accept_license=False):
super().setup()
def remove(self):
super().remove()
|
alxgu/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/ovs/openvswitch_bridge.py
|
75
|
#!/usr/bin/python
# coding: utf-8 -*-
# (c) 2013, David Stygstra <david.stygstra@gmail.com>
# Portions copyright @ 2015 VMware, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: openvswitch_bridge
version_added: 1.4
author: "David Stygstra (@stygstra)"
short_description: Manage Open vSwitch bridges
requirements: [ ovs-vsctl ]
description:
- Manage Open vSwitch bridges
options:
bridge:
required: true
description:
- Name of bridge or fake bridge to manage
parent:
version_added: "2.3"
description:
- Bridge parent of the fake bridge to manage
vlan:
version_added: "2.3"
description:
- The VLAN id of the fake bridge to manage (must be between 0 and
4095). This parameter is required if I(parent) parameter is set.
state:
default: "present"
choices: [ present, absent ]
description:
- Whether the bridge should exist
timeout:
default: 5
description:
- How long to wait for ovs-vswitchd to respond
external_ids:
version_added: 2.0
description:
- A dictionary of external-ids. Omitting this parameter is a No-op.
To clear all external-ids pass an empty value.
fail_mode:
version_added: 2.0
choices : [secure, standalone]
description:
- Set bridge fail-mode. The default value (None) is a No-op.
set:
version_added: 2.3
description:
- Run set command after bridge configuration. This parameter is
non-idempotent, play will always return I(changed) state if
present
'''
EXAMPLES = '''
# Create a bridge named br-int
- openvswitch_bridge:
bridge: br-int
state: present
# Create a fake bridge named br-int within br-parent on the VLAN 405
- openvswitch_bridge:
bridge: br-int
parent: br-parent
vlan: 405
state: present
# Create an integration bridge
- openvswitch_bridge:
bridge: br-int
state: present
fail_mode: secure
args:
external_ids:
bridge-id: br-int
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
def _fail_mode_to_str(text):
if not text:
return None
else:
return text.strip()
def _external_ids_to_dict(text):
if not text:
return None
else:
d = {}
for l in text.splitlines():
if l:
k, v = l.split('=')
d[k] = v
return d
def map_obj_to_commands(want, have, module):
commands = list()
if module.params['state'] == 'absent':
if have:
templatized_command = ("%(ovs-vsctl)s -t %(timeout)s del-br"
" %(bridge)s")
command = templatized_command % module.params
commands.append(command)
else:
if have:
if want['fail_mode'] != have['fail_mode']:
templatized_command = ("%(ovs-vsctl)s -t %(timeout)s"
" set-fail-mode %(bridge)s"
" %(fail_mode)s")
command = templatized_command % module.params
commands.append(command)
if want['external_ids'] != have['external_ids']:
templatized_command = ("%(ovs-vsctl)s -t %(timeout)s"
" br-set-external-id %(bridge)s")
command = templatized_command % module.params
if want['external_ids']:
for k, v in iteritems(want['external_ids']):
if (k not in have['external_ids']
or want['external_ids'][k] != have['external_ids'][k]):
command += " " + k + " " + v
commands.append(command)
else:
templatized_command = ("%(ovs-vsctl)s -t %(timeout)s add-br"
" %(bridge)s")
command = templatized_command % module.params
if want['parent']:
templatized_command = "%(parent)s %(vlan)s"
command += " " + templatized_command % module.params
if want['set']:
templatized_command = " -- set %(set)s"
command += templatized_command % module.params
commands.append(command)
if want['fail_mode']:
templatized_command = ("%(ovs-vsctl)s -t %(timeout)s"
" set-fail-mode %(bridge)s"
" %(fail_mode)s")
command = templatized_command % module.params
commands.append(command)
if want['external_ids']:
for k, v in iteritems(want['external_ids']):
templatized_command = ("%(ovs-vsctl)s -t %(timeout)s"
" br-set-external-id %(bridge)s")
command = templatized_command % module.params
command += " " + k + " " + v
commands.append(command)
return commands
def map_config_to_obj(module):
templatized_command = "%(ovs-vsctl)s -t %(timeout)s list-br"
command = templatized_command % module.params
rc, out, err = module.run_command(command, check_rc=True)
if rc != 0:
module.fail_json(msg=err)
obj = {}
if module.params['bridge'] in out.splitlines():
obj['bridge'] = module.params['bridge']
templatized_command = ("%(ovs-vsctl)s -t %(timeout)s br-to-parent"
" %(bridge)s")
command = templatized_command % module.params
rc, out, err = module.run_command(command, check_rc=True)
obj['parent'] = out.strip()
templatized_command = ("%(ovs-vsctl)s -t %(timeout)s br-to-vlan"
" %(bridge)s")
command = templatized_command % module.params
rc, out, err = module.run_command(command, check_rc=True)
obj['vlan'] = out.strip()
templatized_command = ("%(ovs-vsctl)s -t %(timeout)s get-fail-mode"
" %(bridge)s")
command = templatized_command % module.params
rc, out, err = module.run_command(command, check_rc=True)
obj['fail_mode'] = _fail_mode_to_str(out)
templatized_command = ("%(ovs-vsctl)s -t %(timeout)s br-get-external-id"
" %(bridge)s")
command = templatized_command % module.params
rc, out, err = module.run_command(command, check_rc=True)
obj['external_ids'] = _external_ids_to_dict(out)
return obj
def map_params_to_obj(module):
obj = {
'bridge': module.params['bridge'],
'parent': module.params['parent'],
'vlan': module.params['vlan'],
'fail_mode': module.params['fail_mode'],
'external_ids': module.params['external_ids'],
'set': module.params['set']
}
return obj
def main():
""" Entry point. """
argument_spec = {
'bridge': {'required': True},
'parent': {'default': None},
'vlan': {'default': None, 'type': 'int'},
'state': {'default': 'present', 'choices': ['present', 'absent']},
'timeout': {'default': 5, 'type': 'int'},
'external_ids': {'default': None, 'type': 'dict'},
'fail_mode': {'default': None},
'set': {'required': False, 'default': None}
}
required_if = [('parent', not None, ('vlan',))]
module = AnsibleModule(argument_spec=argument_spec,
required_if=required_if,
supports_check_mode=True)
result = {'changed': False}
# We add ovs-vsctl to module_params to later build up templatized commands
module.params["ovs-vsctl"] = module.get_bin_path("ovs-vsctl", True)
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands(want, have, module)
result['commands'] = commands
if commands:
if not module.check_mode:
for c in commands:
module.run_command(c, check_rc=True)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
|
RedHatInsights/insights-core
|
refs/heads/master
|
insights/parsers/tests/test_podman_inspect.py
|
1
|
import pytest
import doctest
from insights.parsers import podman_inspect, SkipException
from insights.tests import context_wrap
PODMAN_CONTAINER_INSPECT = """
[
{
"ID": "66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda",
"Created": "2019-08-21T10:38:34.753548542Z",
"Path": "dumb-init",
"Args": [
"--single-child",
"--",
"kolla_start"
],
"State": {
"OciVersion": "1.0.1-dev",
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 6606,
"ExitCode": 0,
"Error": "",
"StartedAt": "2019-09-06T19:16:08.066138727Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "a6b8f27df9feb9d820527d413f24ec9b1fcfb12049dd91af5fc188636bebe504",
"ImageName": "192.168.24.1:8787/rhosp15/openstack-gnocchi-metricd:20190819.1",
"Rootfs": "",
"ResolvConfPath": "/var/run/containers/storage/overlay-containers/66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda/userdata/resolv.conf",
"HostnamePath": "/var/run/containers/storage/overlay-containers/66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda/userdata/hostname",
"HostsPath": "/var/run/containers/storage/overlay-containers/66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda/userdata/hosts",
"StaticDir": "/var/lib/containers/storage/overlay-containers/66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda/userdata",
"LogPath": "/var/log/containers/stdouts/gnocchi_metricd.log",
"Name": "gnocchi_metricd",
"RestartCount": 0,
"Driver": "overlay",
"MountLabel": "system_u:object_r:container_file_t:s0:c514,c813",
"ProcessLabel": "system_u:system_r:container_t:s0:c514,c813",
"AppArmorProfile": "",
"EffectiveCaps": null,
"BoundingCaps": [
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_AUDIT_WRITE"
],
"ExecIDs": [],
"GraphDriver": {
"Name": "overlay",
"Data": {
"LowerDir": "/var/lib/containers/storage/overlay/8cf325ff3583d7b6e10f170f85605e14797460f4ee0fa8d4eef2176c27627a26/diff:/var/lib/containers/storage/overlay/3fcda38b3b3199f8d0a1aa61a20d9561ba4ca4805e09ae18d6b50f2854cd5091/diff:/var/lib/containers/storage/overlay/1344d596d37069ebcdd447b67559396b6d046f2f98a63093f229621c544da013/diff:/var/lib/containers/storage/overlay/4faa8d5827f59db011a639ea73621234812291ff51d875f58e1e4197c6239429/diff:/var/lib/containers/storage/overlay/671441d9601355a777b2ce9afceef5a7d0d4890d11ef4f5744e534394ef7c447/diff:/var/lib/containers/storage/overlay/c7269138daa6b23e16efdcbf7ee323ba42fba93eb2406192ac22631bdd0cb4e3/diff",
"MergedDir": "/var/lib/containers/storage/overlay/87b994364ae69db1d3d8ff1e19a5882f230514bce4a3362ee25bfe618f9fa5ee/merged",
"UpperDir": "/var/lib/containers/storage/overlay/87b994364ae69db1d3d8ff1e19a5882f230514bce4a3362ee25bfe618f9fa5ee/diff",
"WorkDir": "/var/lib/containers/storage/overlay/87b994364ae69db1d3d8ff1e19a5882f230514bce4a3362ee25bfe618f9fa5ee/work"
}
},
"Mounts": [
{
"destination": "/sys",
"type": "sysfs",
"source": "sysfs",
"options": [
"nosuid",
"noexec",
"nodev",
"ro"
]
},
{
"destination": "/dev",
"type": "tmpfs",
"source": "tmpfs",
"options": [
"nosuid",
"strictatime",
"mode=755",
"size=65536k"
]
},
{
"destination": "/etc/pki/ca-trust/source/anchors",
"type": "bind",
"source": "/etc/pki/ca-trust/source/anchors",
"options": [
"ro",
"rbind",
"rprivate"
]
},
{
"destination": "/var/lib/kolla/config_files/src-ceph",
"type": "bind",
"source": "/etc/ceph",
"options": [
"ro",
"rbind",
"rprivate"
]
}
],
"Dependencies": [],
"NetworkSettings": {
"Bridge": "",
"SandboxID": "",
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"Ports": [],
"SandboxKey": "",
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": ""
},
"ExitCommand": [
"/usr/bin/podman",
"--root",
"/var/lib/containers/storage",
"--runroot",
"/var/run/containers/storage",
"--log-level",
"error",
"--cgroup-manager",
"systemd",
"--tmpdir",
"/var/run/libpod",
"--storage-driver",
"overlay",
"container",
"cleanup",
"66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda"
],
"Namespace": "",
"IsInfra": false,
"HostConfig": {
"ContainerIDFile": "",
"LogConfig": null,
"NetworkMode": "host",
"PortBindings": null,
"AutoRemove": false,
"CapAdd": [],
"CapDrop": [],
"DNS": [],
"DNSOptions": [],
"DNSSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "",
"Cgroup": "host",
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": false,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": [],
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 65536000,
"Runtime": "runc",
"ConsoleSize": null,
"CpuShares": null,
"Memory": 0,
"NanoCpus": 0,
"CgroupParent": "",
"BlkioWeight": null,
"BlkioWeightDevice": null,
"BlkioDeviceReadBps": null,
"BlkioDeviceWriteBps": null,
"BlkioDeviceReadIOps": null,
"BlkioDeviceWriteIOps": null,
"CpuPeriod": null,
"CpuQuota": null,
"CpuRealtimePeriod": null,
"CpuRealtimeRuntime": null,
"CpuSetCpus": "",
"CpuSetMems": "",
"Devices": null,
"DiskQuota": 0,
"KernelMemory": null,
"MemoryReservation": null,
"MemorySwap": null,
"MemorySwappiness": null,
"OomKillDisable": false,
"PidsLimit": null,
"Ulimits": [],
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"Tmpfs": []
},
"Config": {
"Hostname": "controller-0",
"Domainname": "",
"User": {
"uid": 0,
"gid": 0
},
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"Tty": false,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"TERM=xterm",
"HOSTNAME=controller-0",
"container=oci",
"KOLLA_CONFIG_STRATEGY=COPY_ALWAYS",
"TRIPLEO_CONFIG_HASH=3faca5d7029273bb994631cb4a075e0f",
"KOLLA_INSTALL_TYPE=binary",
"KOLLA_INSTALL_METATYPE=rhos",
"KOLLA_DISTRO_PYTHON_VERSION=3.6",
"KOLLA_BASE_DISTRO=rhel",
"PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ "
],
"Cmd": [
"dumb-init",
"--single-child",
"--",
"kolla_start"
],
"Image": "192.168.24.1:8787/rhosp15/openstack-gnocchi-metricd:20190819.1",
"Volumes": null,
"WorkingDir": "/",
"Entrypoint": "dumb-init --single-child --",
"Labels": {
"architecture": "x86_64",
"authoritative-source-url": "registry.access.redhat.com",
"batch": "20190819.1",
"build-date": "2019-08-19T20:42:03.096048",
"com.redhat.build-host": "cpt-1004.osbs.prod.upshift.rdu2.redhat.com",
"com.redhat.component": "openstack-gnocchi-metricd-container",
"com.redhat.license_terms": "https://www.redhat.com/en/about/red-hat-end-user-license-agreements",
"config_id": "tripleo_step5",
"container_name": "gnocchi_metricd",
"description": "Red Hat OpenStack Platform 15.0 gnocchi-metricd",
"distribution-scope": "public",
"io.k8s.description": "Red Hat OpenStack Platform 15.0 gnocchi-metricd",
"io.k8s.display-name": "Red Hat OpenStack Platform 15.0 gnocchi-metricd",
"io.openshift.expose-services": "",
"io.openshift.tags": "rhosp osp openstack osp-15.0",
"maintainer": "Red Hat, Inc.",
"managed_by": "paunch",
"name": "rhosp15/openstack-gnocchi-metricd",
"release": "58",
"summary": "Red Hat OpenStack Platform 15.0 gnocchi-metricd",
"url": "https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp15/openstack-gnocchi-metricd/images/15.0-58",
"vcs-ref": "18e6ecd9e04f6590526657b85423347b7543391a",
"vcs-type": "git",
"vendor": "Red Hat, Inc.",
"version": "15.0"
},
"Annotations": {
"io.kubernetes.cri-o.ContainerType": "sandbox",
"io.kubernetes.cri-o.TTY": "false"
},
"StopSignal": 15
}
}
]
""".splitlines()
PODMAN_IMAGE_INSPECT = """
[
{
"Id": "013125b8a088f45be8f85f88b5504f05c02463b10a6eea2b66809a262bb911ca",
"Digest": "sha256:f9662cdd45e3db182372a4fa6bfff10e1c601cc785bac09ccae3b18f0bc429df",
"RepoTags": [
"192.168.24.1:8787/rhosp15/openstack-rabbitmq:20190819.1",
"192.168.24.1:8787/rhosp15/openstack-rabbitmq:pcmklatest"
],
"RepoDigests": [
"192.168.24.1:8787/rhosp15/openstack-rabbitmq@sha256:f9662cdd45e3db182372a4fa6bfff10e1c601cc785bac09ccae3b18f0bc429df",
"192.168.24.1:8787/rhosp15/openstack-rabbitmq@sha256:f9662cdd45e3db182372a4fa6bfff10e1c601cc785bac09ccae3b18f0bc429df"
],
"Parent": "",
"Comment": "",
"Created": "2019-08-19T19:39:31.939714Z",
"Config": {
"User": "rabbitmq",
"Env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"container=oci",
"KOLLA_BASE_DISTRO=rhel",
"KOLLA_INSTALL_TYPE=binary",
"KOLLA_INSTALL_METATYPE=rhos",
"KOLLA_DISTRO_PYTHON_VERSION=3.6",
"PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ "
],
"Entrypoint": [
"dumb-init",
"--single-child",
"--"
],
"Cmd": [
"kolla_start"
],
"Labels": {
"architecture": "x86_64",
"authoritative-source-url": "registry.access.redhat.com",
"batch": "20190819.1",
"build-date": "2019-08-19T19:38:18.798307",
"com.redhat.build-host": "cpt-1003.osbs.prod.upshift.rdu2.redhat.com",
"com.redhat.component": "openstack-rabbitmq-container",
"com.redhat.license_terms": "https://www.redhat.com/en/about/red-hat-end-user-license-agreements",
"description": "Red Hat OpenStack Platform 15.0 rabbitmq",
"distribution-scope": "public",
"io.k8s.description": "Red Hat OpenStack Platform 15.0 rabbitmq",
"io.k8s.display-name": "Red Hat OpenStack Platform 15.0 rabbitmq",
"io.openshift.expose-services": "",
"io.openshift.tags": "rhosp osp openstack osp-15.0",
"maintainer": "Red Hat, Inc.",
"name": "rhosp15/openstack-rabbitmq",
"release": "64",
"summary": "Red Hat OpenStack Platform 15.0 rabbitmq",
"url": "https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp15/openstack-rabbitmq/images/15.0-64",
"vcs-ref": "292efe508dcdf588e92503273c5abcc89af574d6",
"vcs-type": "git",
"vendor": "Red Hat, Inc.",
"version": "15.0"
},
"StopSignal": "SIGTERM"
},
"Version": "1.13.1",
"Author": "",
"Architecture": "amd64",
"Os": "linux",
"Size": 542316943,
"VirtualSize": 542316943,
"GraphDriver": {
"Name": "overlay",
"Data": {
"LowerDir": "/var/lib/containers/storage/overlay/4faa8d5827f59db011a639ea73621234812291ff51d875f58e1e4197c6239429/diff:/var/lib/containers/storage/overlay/671441d9601355a777b2ce9afceef5a7d0d4890d11ef4f5744e534394ef7c447/diff:/var/lib/containers/storage/overlay/c7269138daa6b23e16efdcbf7ee323ba42fba93eb2406192ac22631bdd0cb4e3/diff",
"MergedDir": "/var/lib/containers/storage/overlay/b25ec647038ff1d35285422ddbedb5b5c7d36d64e67fcbbd2a2f205dc2aa1eb5/merged",
"UpperDir": "/var/lib/containers/storage/overlay/b25ec647038ff1d35285422ddbedb5b5c7d36d64e67fcbbd2a2f205dc2aa1eb5/diff",
"WorkDir": "/var/lib/containers/storage/overlay/b25ec647038ff1d35285422ddbedb5b5c7d36d64e67fcbbd2a2f205dc2aa1eb5/work"
}
},
"RootFS": {
"Type": "layers",
"Layers": [
"sha256:c7269138daa6b23e16efdcbf7ee323ba42fba93eb2406192ac22631bdd0cb4e3",
"sha256:786011f2f6269cc2512d58fd7d6c8feac1330754b12b4ffacfcaa8bd685ed898",
"sha256:d74075ef7bbc7f840dec3cafc7bf8f82e900ee2f8b4a4d328448965bd8e398ce",
"sha256:272314807b476c2c183edd6427bd450cea885976446afcdbd6b52ad47943a60f"
]
},
"Labels": {
"architecture": "x86_64",
"authoritative-source-url": "registry.access.redhat.com",
"batch": "20190819.1",
"build-date": "2019-08-19T19:38:18.798307",
"com.redhat.build-host": "cpt-1003.osbs.prod.upshift.rdu2.redhat.com",
"com.redhat.component": "openstack-rabbitmq-container",
"com.redhat.license_terms": "https://www.redhat.com/en/about/red-hat-end-user-license-agreements",
"description": "Red Hat OpenStack Platform 15.0 rabbitmq",
"distribution-scope": "public",
"io.k8s.description": "Red Hat OpenStack Platform 15.0 rabbitmq",
"io.k8s.display-name": "Red Hat OpenStack Platform 15.0 rabbitmq",
"io.openshift.expose-services": "",
"io.openshift.tags": "rhosp osp openstack osp-15.0",
"maintainer": "Red Hat, Inc.",
"name": "rhosp15/openstack-rabbitmq",
"release": "64",
"summary": "Red Hat OpenStack Platform 15.0 rabbitmq",
"url": "https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp15/openstack-rabbitmq/images/15.0-64",
"vcs-ref": "292efe508dcdf588e92503273c5abcc89af574d6",
"vcs-type": "git",
"vendor": "Red Hat, Inc.",
"version": "15.0"
},
"Annotations": {},
"ManifestType": "application/vnd.docker.distribution.manifest.v2+json",
"User": "rabbitmq",
"History": [
{
"created": "2019-07-15T05:10:57.589513378Z",
"comment": "Imported from -"
},
{
"created": "2019-07-15T05:11:04.220661Z"
},
{
"created": "2019-08-19T19:24:22.99993Z"
},
{
"created": "2019-08-19T19:39:31.939714Z"
}
]
}
]
""".splitlines()
PODMAN_CONTAINER_INSPECT_TRUNCATED = """
[
{
"ID": "66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda",
"Created": "2019-08-21T10:38:34.753548542Z",
"Path": "dumb-init",
"""
def test_podman_object_container_inspect():
result = podman_inspect.PodmanInspect(context_wrap(PODMAN_CONTAINER_INSPECT))
assert result.get('ID') == "66db151828e9beede0cdd9c17fc9bd5ebb5d125dd036f7230bc6b6433e5c0dda"
assert result.get('NetworkSettings').get('HairpinMode') is False
assert result.get('Config').get('Env') == [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"TERM=xterm",
"HOSTNAME=controller-0",
"container=oci",
"KOLLA_CONFIG_STRATEGY=COPY_ALWAYS",
"TRIPLEO_CONFIG_HASH=3faca5d7029273bb994631cb4a075e0f",
"KOLLA_INSTALL_TYPE=binary",
"KOLLA_INSTALL_METATYPE=rhos",
"KOLLA_DISTRO_PYTHON_VERSION=3.6",
"KOLLA_BASE_DISTRO=rhel",
"PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ "
]
assert result.get('GraphDriver').get('Name') == 'overlay'
def test_podman_object_image_inspect():
result = podman_inspect.PodmanInspect(context_wrap(PODMAN_IMAGE_INSPECT))
assert result.get('Id') == "013125b8a088f45be8f85f88b5504f05c02463b10a6eea2b66809a262bb911ca"
assert result.get('Size') == 542316943
assert result.get('Digest') == "sha256:f9662cdd45e3db182372a4fa6bfff10e1c601cc785bac09ccae3b18f0bc429df"
def test_podman_container_inspect_truncated_input():
with pytest.raises(SkipException):
podman_inspect.PodmanInspectContainer(context_wrap(PODMAN_CONTAINER_INSPECT_TRUNCATED))
def test_doc_test():
dic = podman_inspect.PodmanInspectContainer(context_wrap(PODMAN_CONTAINER_INSPECT))
dii = podman_inspect.PodmanInspectImage(context_wrap(PODMAN_IMAGE_INSPECT))
env = {
'container': dic,
'image': dii,
}
failed, total = doctest.testmod(podman_inspect, globs=env)
assert failed == 0
|
mapr/hue
|
refs/heads/hue-3.9.0-mapr
|
desktop/core/ext-py/tablib-0.10.0/tablib/packages/odf3/table.py
|
56
|
# -*- coding: utf-8 -*-
# Copyright (C) 2006-2007 Søren Roug, European Environment Agency
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
from .namespaces import TABLENS
from .element import Element
# Autogenerated
def Body(**args):
return Element(qname = (TABLENS,'body'), **args)
def CalculationSettings(**args):
return Element(qname = (TABLENS,'calculation-settings'), **args)
def CellAddress(**args):
return Element(qname = (TABLENS,'cell-address'), **args)
def CellContentChange(**args):
return Element(qname = (TABLENS,'cell-content-change'), **args)
def CellContentDeletion(**args):
return Element(qname = (TABLENS,'cell-content-deletion'), **args)
def CellRangeSource(**args):
return Element(qname = (TABLENS,'cell-range-source'), **args)
def ChangeDeletion(**args):
return Element(qname = (TABLENS,'change-deletion'), **args)
def ChangeTrackTableCell(**args):
return Element(qname = (TABLENS,'change-track-table-cell'), **args)
def Consolidation(**args):
return Element(qname = (TABLENS,'consolidation'), **args)
def ContentValidation(**args):
return Element(qname = (TABLENS,'content-validation'), **args)
def ContentValidations(**args):
return Element(qname = (TABLENS,'content-validations'), **args)
def CoveredTableCell(**args):
return Element(qname = (TABLENS,'covered-table-cell'), **args)
def CutOffs(**args):
return Element(qname = (TABLENS,'cut-offs'), **args)
def DataPilotDisplayInfo(**args):
return Element(qname = (TABLENS,'data-pilot-display-info'), **args)
def DataPilotField(**args):
return Element(qname = (TABLENS,'data-pilot-field'), **args)
def DataPilotFieldReference(**args):
return Element(qname = (TABLENS,'data-pilot-field-reference'), **args)
def DataPilotGroup(**args):
return Element(qname = (TABLENS,'data-pilot-group'), **args)
def DataPilotGroupMember(**args):
return Element(qname = (TABLENS,'data-pilot-group-member'), **args)
def DataPilotGroups(**args):
return Element(qname = (TABLENS,'data-pilot-groups'), **args)
def DataPilotLayoutInfo(**args):
return Element(qname = (TABLENS,'data-pilot-layout-info'), **args)
def DataPilotLevel(**args):
return Element(qname = (TABLENS,'data-pilot-level'), **args)
def DataPilotMember(**args):
return Element(qname = (TABLENS,'data-pilot-member'), **args)
def DataPilotMembers(**args):
return Element(qname = (TABLENS,'data-pilot-members'), **args)
def DataPilotSortInfo(**args):
return Element(qname = (TABLENS,'data-pilot-sort-info'), **args)
def DataPilotSubtotal(**args):
return Element(qname = (TABLENS,'data-pilot-subtotal'), **args)
def DataPilotSubtotals(**args):
return Element(qname = (TABLENS,'data-pilot-subtotals'), **args)
def DataPilotTable(**args):
return Element(qname = (TABLENS,'data-pilot-table'), **args)
def DataPilotTables(**args):
return Element(qname = (TABLENS,'data-pilot-tables'), **args)
def DatabaseRange(**args):
return Element(qname = (TABLENS,'database-range'), **args)
def DatabaseRanges(**args):
return Element(qname = (TABLENS,'database-ranges'), **args)
def DatabaseSourceQuery(**args):
return Element(qname = (TABLENS,'database-source-query'), **args)
def DatabaseSourceSql(**args):
return Element(qname = (TABLENS,'database-source-sql'), **args)
def DatabaseSourceTable(**args):
return Element(qname = (TABLENS,'database-source-table'), **args)
def DdeLink(**args):
return Element(qname = (TABLENS,'dde-link'), **args)
def DdeLinks(**args):
return Element(qname = (TABLENS,'dde-links'), **args)
def Deletion(**args):
return Element(qname = (TABLENS,'deletion'), **args)
def Deletions(**args):
return Element(qname = (TABLENS,'deletions'), **args)
def Dependencies(**args):
return Element(qname = (TABLENS,'dependencies'), **args)
def Dependency(**args):
return Element(qname = (TABLENS,'dependency'), **args)
def Detective(**args):
return Element(qname = (TABLENS,'detective'), **args)
def ErrorMacro(**args):
return Element(qname = (TABLENS,'error-macro'), **args)
def ErrorMessage(**args):
return Element(qname = (TABLENS,'error-message'), **args)
def EvenColumns(**args):
return Element(qname = (TABLENS,'even-columns'), **args)
def EvenRows(**args):
return Element(qname = (TABLENS,'even-rows'), **args)
def Filter(**args):
return Element(qname = (TABLENS,'filter'), **args)
def FilterAnd(**args):
return Element(qname = (TABLENS,'filter-and'), **args)
def FilterCondition(**args):
return Element(qname = (TABLENS,'filter-condition'), **args)
def FilterOr(**args):
return Element(qname = (TABLENS,'filter-or'), **args)
def FirstColumn(**args):
return Element(qname = (TABLENS,'first-column'), **args)
def FirstRow(**args):
return Element(qname = (TABLENS,'first-row'), **args)
def HelpMessage(**args):
return Element(qname = (TABLENS,'help-message'), **args)
def HighlightedRange(**args):
return Element(qname = (TABLENS,'highlighted-range'), **args)
def Insertion(**args):
return Element(qname = (TABLENS,'insertion'), **args)
def InsertionCutOff(**args):
return Element(qname = (TABLENS,'insertion-cut-off'), **args)
def Iteration(**args):
return Element(qname = (TABLENS,'iteration'), **args)
def LabelRange(**args):
return Element(qname = (TABLENS,'label-range'), **args)
def LabelRanges(**args):
return Element(qname = (TABLENS,'label-ranges'), **args)
def LastColumn(**args):
return Element(qname = (TABLENS,'last-column'), **args)
def LastRow(**args):
return Element(qname = (TABLENS,'last-row'), **args)
def Movement(**args):
return Element(qname = (TABLENS,'movement'), **args)
def MovementCutOff(**args):
return Element(qname = (TABLENS,'movement-cut-off'), **args)
def NamedExpression(**args):
return Element(qname = (TABLENS,'named-expression'), **args)
def NamedExpressions(**args):
return Element(qname = (TABLENS,'named-expressions'), **args)
def NamedRange(**args):
return Element(qname = (TABLENS,'named-range'), **args)
def NullDate(**args):
return Element(qname = (TABLENS,'null-date'), **args)
def OddColumns(**args):
return Element(qname = (TABLENS,'odd-columns'), **args)
def OddRows(**args):
return Element(qname = (TABLENS,'odd-rows'), **args)
def Operation(**args):
return Element(qname = (TABLENS,'operation'), **args)
def Previous(**args):
return Element(qname = (TABLENS,'previous'), **args)
def Scenario(**args):
return Element(qname = (TABLENS,'scenario'), **args)
def Shapes(**args):
return Element(qname = (TABLENS,'shapes'), **args)
def Sort(**args):
return Element(qname = (TABLENS,'sort'), **args)
def SortBy(**args):
return Element(qname = (TABLENS,'sort-by'), **args)
def SortGroups(**args):
return Element(qname = (TABLENS,'sort-groups'), **args)
def SourceCellRange(**args):
return Element(qname = (TABLENS,'source-cell-range'), **args)
def SourceRangeAddress(**args):
return Element(qname = (TABLENS,'source-range-address'), **args)
def SourceService(**args):
return Element(qname = (TABLENS,'source-service'), **args)
def SubtotalField(**args):
return Element(qname = (TABLENS,'subtotal-field'), **args)
def SubtotalRule(**args):
return Element(qname = (TABLENS,'subtotal-rule'), **args)
def SubtotalRules(**args):
return Element(qname = (TABLENS,'subtotal-rules'), **args)
def Table(**args):
return Element(qname = (TABLENS,'table'), **args)
def TableCell(**args):
return Element(qname = (TABLENS,'table-cell'), **args)
def TableColumn(**args):
return Element(qname = (TABLENS,'table-column'), **args)
def TableColumnGroup(**args):
return Element(qname = (TABLENS,'table-column-group'), **args)
def TableColumns(**args):
return Element(qname = (TABLENS,'table-columns'), **args)
def TableHeaderColumns(**args):
return Element(qname = (TABLENS,'table-header-columns'), **args)
def TableHeaderRows(**args):
return Element(qname = (TABLENS,'table-header-rows'), **args)
def TableRow(**args):
return Element(qname = (TABLENS,'table-row'), **args)
def TableRowGroup(**args):
return Element(qname = (TABLENS,'table-row-group'), **args)
def TableRows(**args):
return Element(qname = (TABLENS,'table-rows'), **args)
def TableSource(**args):
return Element(qname = (TABLENS,'table-source'), **args)
def TableTemplate(**args):
return Element(qname = (TABLENS,'table-template'), **args)
def TargetRangeAddress(**args):
return Element(qname = (TABLENS,'target-range-address'), **args)
def TrackedChanges(**args):
return Element(qname = (TABLENS,'tracked-changes'), **args)
|
aalopes/codeSnippets
|
refs/heads/master
|
tcp_ip/python/server/server.py
|
1
|
#!/usr/bin/python
""" TCP/IP server
Alexandre Lopes
15.12.2015
"""
import select
import socket
import time
import signal
import sys
import locale
from difflib import Differ
# functions -------------------------------------------------------------------
def sigintHandler(signal, frame):
"""
SIGINT Handler in case user presses ctr+c - in order to close the
server gracefully.
We do not disconnect the client since that would imply knowing whether
or not a client connect and seems unnecessary for our purposes.
"""
print ('\nCtrl+C pressed! Shutting down server.')
sys.exit(0)
# declarations ----------------------------------------------------------------
HOST = '' # Symbolic name meaning all available interfaces
PORT = 59263 # Arbitrary non-privileged port
TIMEOUT = 60 # Timeout (s)
# main ------------------------------------------------------------------------
print ('Server started ...')
# register interrupt handler
signal.signal(signal.SIGINT, sigintHandler)
# create socket and bind to the right port
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOST, PORT))
# listen to connections
s.listen(1)
# server loop
quitServer = 0
while quitServer != 1:
# accept incoming connection
# as it stands if another connection is attempted, it will replace
# the previous one. Therefore use only 1 client at a time!
conn, addr = s.accept()
print ('Client on ', addr, ' connected.')
# loop to read data from our socket
while 1:
# receive incoming data only if there is data to be received!
# this way we can timeout if the client hangs for some reason,
# avoid getting stuck at conn.recv and move to accept a new connection
ready = select.select([conn], [], [], TIMEOUT)
if ready[0]:
data = conn.recv(1024)
if not data:
print ('Client closed socket!')
break
else:
# data parsing ---------
# remove trailling null bytes (C)
data = data.rstrip('\0')
data = data.rstrip('\x00')
# perform execute
if data == 'quit':
conn.close()
print ('Client disconnected.')
elif data == 'exit':
print("Closing server!")
quitServer = 1
break
elif data == 'echo':
print (data)
# echo
conn.sendall('echo')
else:
print (data)
else:
print ('Client timed out!')
break
|
raccoongang/edx-platform
|
refs/heads/ginkgo-rg
|
lms/djangoapps/django_comment_client/tests/mock_cs_server/test_mock_cs_server.py
|
10
|
import json
import threading
import unittest
import urllib2
from nose.plugins.skip import SkipTest
from django_comment_client.tests.mock_cs_server.mock_cs_server import MockCommentServiceServer
class MockCommentServiceServerTest(unittest.TestCase):
'''
A mock version of the Comment Service server that listens on a local
port and responds with pre-defined grade messages.
'''
def setUp(self):
super(MockCommentServiceServerTest, self).setUp()
# This is a test of the test setup,
# so it does not need to run as part of the unit test suite
# You can re-enable it by commenting out the line below
raise SkipTest
# Create the server
server_port = 4567
self.server_url = 'http://127.0.0.1:%d' % server_port
# Start up the server and tell it that by default it should
# return this as its json response
self.expected_response = {'username': 'user100', 'external_id': '4'}
self.server = MockCommentServiceServer(port_num=server_port,
response=self.expected_response)
self.addCleanup(self.server.shutdown)
# Start the server in a separate daemon thread
server_thread = threading.Thread(target=self.server.serve_forever)
server_thread.daemon = True
server_thread.start()
def test_new_user_request(self):
"""
Test the mock comment service using an example
of how you would create a new user
"""
# Send a request
values = {'username': u'user100',
'external_id': '4', 'email': u'user100@edx.org'}
data = json.dumps(values)
headers = {'Content-Type': 'application/json', 'Content-Length': len(data), 'X-Edx-Api-Key': 'TEST_API_KEY'}
req = urllib2.Request(self.server_url + '/api/v1/users/4', data, headers)
# Send the request to the mock cs server
response = urllib2.urlopen(req)
# Receive the reply from the mock cs server
response_dict = json.loads(response.read())
# You should have received the response specified in the setup above
self.assertEqual(response_dict, self.expected_response)
|
h0nIg/ansible-modules-extras
|
refs/heads/devel
|
packaging/language/pear.py
|
157
|
#!/usr/bin/python -tt
# -*- coding: utf-8 -*-
# (c) 2012, Afterburn <http://github.com/afterburn>
# (c) 2013, Aaron Bull Schaefer <aaron@elasticdog.com>
# (c) 2015, Jonathan Lestrelin <jonathan.lestrelin@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: pear
short_description: Manage pear/pecl packages
description:
- Manage PHP packages with the pear package manager.
version_added: 2.0
author:
- "'jonathan.lestrelin' <jonathan.lestrelin@gmail.com>"
options:
name:
description:
- Name of the package to install, upgrade, or remove.
required: true
state:
description:
- Desired state of the package.
required: false
default: "present"
choices: ["present", "absent", "latest"]
'''
EXAMPLES = '''
# Install pear package
- pear: name=Net_URL2 state=present
# Install pecl package
- pear: name=pecl/json_post state=present
# Upgrade package
- pear: name=Net_URL2 state=latest
# Remove packages
- pear: name=Net_URL2,pecl/json_post state=absent
'''
import os
def get_local_version(pear_output):
"""Take pear remoteinfo output and get the installed version"""
lines = pear_output.split('\n')
for line in lines:
if 'Installed ' in line:
installed = line.rsplit(None, 1)[-1].strip()
if installed == '-': continue
return installed
return None
def get_repository_version(pear_output):
"""Take pear remote-info output and get the latest version"""
lines = pear_output.split('\n')
for line in lines:
if 'Latest ' in line:
return line.rsplit(None, 1)[-1].strip()
return None
def query_package(module, name, state="present"):
"""Query the package status in both the local system and the repository.
Returns a boolean to indicate if the package is installed,
and a second boolean to indicate if the package is up-to-date."""
if state == "present":
lcmd = "pear info %s" % (name)
lrc, lstdout, lstderr = module.run_command(lcmd, check_rc=False)
if lrc != 0:
# package is not installed locally
return False, False
rcmd = "pear remote-info %s" % (name)
rrc, rstdout, rstderr = module.run_command(rcmd, check_rc=False)
# get the version installed locally (if any)
lversion = get_local_version(rstdout)
# get the version in the repository
rversion = get_repository_version(rstdout)
if rrc == 0:
# Return True to indicate that the package is installed locally,
# and the result of the version number comparison
# to determine if the package is up-to-date.
return True, (lversion == rversion)
return False, False
def remove_packages(module, packages):
remove_c = 0
# Using a for loop incase of error, we can report the package that failed
for package in packages:
# Query the package first, to see if we even need to remove
installed, updated = query_package(module, package)
if not installed:
continue
cmd = "pear uninstall %s" % (package)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc != 0:
module.fail_json(msg="failed to remove %s" % (package))
remove_c += 1
if remove_c > 0:
module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
module.exit_json(changed=False, msg="package(s) already absent")
def install_packages(module, state, packages):
install_c = 0
for i, package in enumerate(packages):
# if the package is installed and state == present
# or state == latest and is up-to-date then skip
installed, updated = query_package(module, package)
if installed and (state == 'present' or (state == 'latest' and updated)):
continue
if state == 'present':
command = 'install'
if state == 'latest':
command = 'upgrade'
cmd = "pear %s %s" % (command, package)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc != 0:
module.fail_json(msg="failed to install %s" % (package))
install_c += 1
if install_c > 0:
module.exit_json(changed=True, msg="installed %s package(s)" % (install_c))
module.exit_json(changed=False, msg="package(s) already installed")
def check_packages(module, packages, state):
would_be_changed = []
for package in packages:
installed, updated = query_package(module, package)
if ((state in ["present", "latest"] and not installed) or
(state == "absent" and installed) or
(state == "latest" and not updated)):
would_be_changed.append(package)
if would_be_changed:
if state == "absent":
state = "removed"
module.exit_json(changed=True, msg="%s package(s) would be %s" % (
len(would_be_changed), state))
else:
module.exit_json(change=False, msg="package(s) already %s" % state)
def exe_exists(program):
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if os.path.isfile(exe_file) and os.access(exe_file, os.X_OK):
return True
return False
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(aliases=['pkg']),
state = dict(default='present', choices=['present', 'installed', "latest", 'absent', 'removed'])),
required_one_of = [['name']],
supports_check_mode = True)
if not exe_exists("pear"):
module.fail_json(msg="cannot find pear executable in PATH")
p = module.params
# normalize the state parameter
if p['state'] in ['present', 'installed']:
p['state'] = 'present'
elif p['state'] in ['absent', 'removed']:
p['state'] = 'absent'
if p['name']:
pkgs = p['name'].split(',')
pkg_files = []
for i, pkg in enumerate(pkgs):
pkg_files.append(None)
if module.check_mode:
check_packages(module, pkgs, p['state'])
if p['state'] in ['present', 'latest']:
install_packages(module, p['state'], pkgs)
elif p['state'] == 'absent':
remove_packages(module, pkgs)
# import module snippets
from ansible.module_utils.basic import *
main()
|
ralphbean/ansible
|
refs/heads/devel
|
v2/ansible/plugins/lookup/indexed_items.py
|
127
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
class LookupModule(LookupBase):
def __init__(self, basedir=None, **kwargs):
self.basedir = basedir
def run(self, terms, variables, **kwargs):
if not isinstance(terms, list):
raise AnsibleError("with_indexed_items expects a list")
items = self._flatten(terms)
return zip(range(len(items)), items)
|
mcgachey/edx-platform
|
refs/heads/master
|
cms/djangoapps/contentstore/management/commands/delete_course.py
|
42
|
"""
Command for deleting courses
Arguments:
arg1 (str): Course key of the course to delete
arg2 (str): 'commit'
Returns:
none
"""
from django.core.management.base import BaseCommand, CommandError
from .prompt import query_yes_no
from contentstore.utils import delete_course_and_groups
from opaque_keys.edx.keys import CourseKey
from opaque_keys import InvalidKeyError
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
class Command(BaseCommand):
"""
Delete a MongoDB backed course
"""
help = '''Delete a MongoDB backed course'''
def handle(self, *args, **options):
if len(args) == 0:
raise CommandError("Arguments missing: 'org/number/run commit'")
if len(args) == 1:
if args[0] == 'commit':
raise CommandError("Delete_course requires a course_key <org/number/run> argument.")
else:
raise CommandError("Delete_course requires a commit argument at the end")
elif len(args) == 2:
try:
course_key = CourseKey.from_string(args[0])
except InvalidKeyError:
try:
course_key = SlashSeparatedCourseKey.from_deprecated_string(args[0])
except InvalidKeyError:
raise CommandError("Invalid course_key: '%s'. Proper syntax: 'org/number/run commit' " % args[0])
if args[1] != 'commit':
raise CommandError("Delete_course requires a commit argument at the end")
elif len(args) > 2:
raise CommandError("Too many arguments! Expected <course_key> <commit>")
if not modulestore().get_course(course_key):
raise CommandError("Course with '%s' key not found." % args[0])
print 'Actually going to delete the %s course from DB....' % args[0]
if query_yes_no("Deleting course {0}. Confirm?".format(course_key), default="no"):
if query_yes_no("Are you sure. This action cannot be undone!", default="no"):
delete_course_and_groups(course_key, ModuleStoreEnum.UserID.mgmt_command)
print "Deleted course {}".format(course_key)
|
LiveZenLK/CeygateERP
|
refs/heads/master
|
addons/product_expiry/product_expiry.py
|
17
|
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import datetime
import openerp
from openerp import api, models
from openerp.osv import fields, osv
class stock_production_lot(osv.osv):
_inherit = 'stock.production.lot'
def _get_date(dtype):
"""Return a function to compute the limit date for this type"""
def calc_date(self, cr, uid, context=None):
"""Compute the limit date for a given date"""
if context is None:
context = {}
if not context.get('product_id', False):
date = False
else:
product = openerp.registry(cr.dbname)['product.product'].browse(
cr, uid, context['product_id'])
duration = getattr(product, dtype)
# set date to False when no expiry time specified on the product
date = duration and (datetime.datetime.today()
+ datetime.timedelta(days=duration))
return date and date.strftime('%Y-%m-%d %H:%M:%S') or False
return calc_date
_columns = {
'life_date': fields.datetime('End of Life Date',
help='This is the date on which the goods with this Serial Number may become dangerous and must not be consumed.'),
'use_date': fields.datetime('Best before Date',
help='This is the date on which the goods with this Serial Number start deteriorating, without being dangerous yet.'),
'removal_date': fields.datetime('Removal Date',
help='This is the date on which the goods with this Serial Number should be removed from the stock.'),
'alert_date': fields.datetime('Alert Date',
help="This is the date on which an alert should be notified about the goods with this Serial Number."),
}
# Assign dates according to products data
def create(self, cr, uid, vals, context=None):
newid = super(stock_production_lot, self).create(cr, uid, vals, context=context)
obj = self.browse(cr, uid, newid, context=context)
towrite = []
for f in ('life_date', 'use_date', 'removal_date', 'alert_date'):
if not getattr(obj, f):
towrite.append(f)
context = dict(context or {})
context['product_id'] = obj.product_id.id
self.write(cr, uid, [obj.id], self.default_get(cr, uid, towrite, context=context))
return newid
_defaults = {
'life_date': _get_date('life_time'),
'use_date': _get_date('use_time'),
'removal_date': _get_date('removal_time'),
'alert_date': _get_date('alert_time'),
}
# Onchange added in new api to avoid having to change views
class StockProductionLot(models.Model):
_inherit = 'stock.production.lot'
@api.onchange('product_id')
def _onchange_product(self):
defaults = self.with_context(
product_id=self.product_id.id).default_get(
['life_date', 'use_date', 'removal_date', 'alert_date'])
for field, value in defaults.items():
setattr(self, field, value)
class stock_quant(osv.osv):
_inherit = 'stock.quant'
def _get_quants(self, cr, uid, ids, context=None):
return self.pool.get('stock.quant').search(cr, uid, [('lot_id', 'in', ids)], context=context)
_columns = {
'removal_date': fields.related('lot_id', 'removal_date', type='datetime', string='Removal Date',
store={
'stock.quant': (lambda self, cr, uid, ids, ctx: ids, ['lot_id'], 20),
'stock.production.lot': (_get_quants, ['removal_date'], 20),
}),
}
def apply_removal_strategy(self, cr, uid, qty, move, ops=False, domain=None, removal_strategy='fifo', context=None):
if removal_strategy == 'fefo':
order = 'removal_date, in_date, id'
return self._quants_get_order(cr, uid, qty, move, ops=ops, domain=domain, orderby=order, context=context)
return super(stock_quant, self).apply_removal_strategy(cr, uid, qty, move, ops=ops, domain=domain,
removal_strategy=removal_strategy, context=context)
class product_product(osv.osv):
_inherit = 'product.template'
_columns = {
'life_time': fields.integer('Product Life Time',
help='When a new a Serial Number is issued, this is the number of days before the goods may become dangerous and must not be consumed.'),
'use_time': fields.integer('Product Use Time',
help='When a new a Serial Number is issued, this is the number of days before the goods starts deteriorating, without being dangerous yet.'),
'removal_time': fields.integer('Product Removal Time',
help='When a new a Serial Number is issued, this is the number of days before the goods should be removed from the stock.'),
'alert_time': fields.integer('Product Alert Time',
help='When a new a Serial Number is issued, this is the number of days before an alert should be notified.'),
}
|
crcresearch/osf.io
|
refs/heads/develop
|
api/base/middleware.py
|
11
|
import gc
import StringIO
import cProfile
import pstats
import threading
from django.conf import settings
from raven.contrib.django.raven_compat.models import sentry_exception_handler
import corsheaders.middleware
from framework.postcommit_tasks.handlers import (
postcommit_after_request,
postcommit_before_request
)
from framework.celery_tasks.handlers import (
celery_before_request,
celery_after_request,
celery_teardown_request
)
from .api_globals import api_globals
from api.base import settings as api_settings
class CeleryTaskMiddleware(object):
"""Celery Task middleware."""
def process_request(self, request):
celery_before_request()
def process_exception(self, request, exception):
"""If an exception occurs, clear the celery task queue so process_response has nothing."""
sentry_exception_handler(request=request)
celery_teardown_request(error=True)
return None
def process_response(self, request, response):
"""Clear the celery task queue if the response status code is 400 or above"""
celery_after_request(response, base_status_code_error=400)
celery_teardown_request()
return response
class DjangoGlobalMiddleware(object):
"""
Store request object on a thread-local variable for use in database caching mechanism.
"""
def process_request(self, request):
api_globals.request = request
def process_exception(self, request, exception):
sentry_exception_handler(request=request)
api_globals.request = None
return None
def process_response(self, request, response):
api_globals.request = None
if api_settings.DEBUG and len(gc.get_referents(request)) > 2:
raise Exception('You wrote a memory leak. Stop it')
return response
class CorsMiddleware(corsheaders.middleware.CorsMiddleware):
"""
Augment CORS origin white list with the Institution model's domains.
"""
_context = threading.local()
def origin_not_found_in_white_lists(self, origin, url):
settings.CORS_ORIGIN_WHITELIST += api_settings.ORIGINS_WHITELIST
# Check if origin is in the dynamic custom domain whitelist
not_found = super(CorsMiddleware, self).origin_not_found_in_white_lists(origin, url)
# Check if a cross-origin request using the Authorization header
if not_found:
if not self._context.request.COOKIES:
if self._context.request.META.get('HTTP_AUTHORIZATION'):
return
elif (
self._context.request.method == 'OPTIONS' and
'HTTP_ACCESS_CONTROL_REQUEST_METHOD' in self._context.request.META and
'authorization' in map(
lambda h: h.strip(),
self._context.request.META.get('HTTP_ACCESS_CONTROL_REQUEST_HEADERS', '').split(',')
)
):
return None
return not_found
def process_response(self, request, response):
self._context.request = request
try:
return super(CorsMiddleware, self).process_response(request, response)
finally:
self._context.request = None
class PostcommitTaskMiddleware(object):
"""
Handle postcommit tasks for django.
"""
def process_request(self, request):
postcommit_before_request()
def process_response(self, request, response):
postcommit_after_request(response=response, base_status_error_code=400)
return response
# Adapted from http://www.djangosnippets.org/snippets/186/
# Original author: udfalkso
# Modified by: Shwagroo Team and Gun.io
# Modified by: COS
class ProfileMiddleware(object):
"""
Displays hotshot profiling for any view.
http://yoursite.com/yourview/?prof
Add the "prof" key to query string by appending ?prof (or &prof=)
and you'll see the profiling results in your browser.
It's set up to only be available in django's debug mode, is available for superuser otherwise,
but you really shouldn't add this middleware to any production configuration.
"""
def process_request(self, request):
if (settings.DEBUG or request.user.is_superuser) and 'prof' in request.GET:
self.prof = cProfile.Profile()
def process_view(self, request, callback, callback_args, callback_kwargs):
if (settings.DEBUG or request.user.is_superuser) and 'prof' in request.GET:
self.prof.enable()
def process_response(self, request, response):
if (settings.DEBUG or request.user.is_superuser) and 'prof' in request.GET:
self.prof.disable()
s = StringIO.StringIO()
ps = pstats.Stats(self.prof, stream=s).sort_stats('cumtime')
ps.print_stats()
response.content = s.getvalue()
return response
|
cloudstax/firecamp
|
refs/heads/master
|
vendor/lambda-python-requests/chardet/mbcharsetprober.py
|
289
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .enums import ProbingState, MachineState
class MultiByteCharSetProber(CharSetProber):
"""
MultiByteCharSetProber
"""
def __init__(self, lang_filter=None):
super(MultiByteCharSetProber, self).__init__(lang_filter=lang_filter)
self.distribution_analyzer = None
self.coding_sm = None
self._last_char = [0, 0]
def reset(self):
super(MultiByteCharSetProber, self).reset()
if self.coding_sm:
self.coding_sm.reset()
if self.distribution_analyzer:
self.distribution_analyzer.reset()
self._last_char = [0, 0]
@property
def charset_name(self):
raise NotImplementedError
@property
def language(self):
raise NotImplementedError
def feed(self, byte_str):
for i in range(len(byte_str)):
coding_state = self.coding_sm.next_state(byte_str[i])
if coding_state == MachineState.ERROR:
self.logger.debug('%s %s prober hit error at byte %s',
self.charset_name, self.language, i)
self._state = ProbingState.NOT_ME
break
elif coding_state == MachineState.ITS_ME:
self._state = ProbingState.FOUND_IT
break
elif coding_state == MachineState.START:
char_len = self.coding_sm.get_current_charlen()
if i == 0:
self._last_char[1] = byte_str[0]
self.distribution_analyzer.feed(self._last_char, char_len)
else:
self.distribution_analyzer.feed(byte_str[i - 1:i + 1],
char_len)
self._last_char[0] = byte_str[-1]
if self.state == ProbingState.DETECTING:
if (self.distribution_analyzer.got_enough_data() and
(self.get_confidence() > self.SHORTCUT_THRESHOLD)):
self._state = ProbingState.FOUND_IT
return self.state
def get_confidence(self):
return self.distribution_analyzer.get_confidence()
|
guewen/odoo
|
refs/heads/master
|
addons/payment_paypal/controllers/__init__.py
|
4497
|
# -*- coding: utf-8 -*-
import main
|
torwag/micropython
|
refs/heads/master
|
tests/bench/func_args-2-pos_default_2_of_3.py
|
102
|
import bench
def func(a, b=1, c=2):
pass
def test(num):
for i in iter(range(num)):
func(i)
bench.run(test)
|
mzdaniel/oh-mainline
|
refs/heads/master
|
vendor/packages/twisted/twisted/conch/scripts/conch.py
|
17
|
# -*- test-case-name: twisted.conch.test.test_conch -*-
#
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
#
# $Id: conch.py,v 1.65 2004/03/11 00:29:14 z3p Exp $
#""" Implementation module for the `conch` command.
#"""
from twisted.conch.client import connect, default, options
from twisted.conch.error import ConchError
from twisted.conch.ssh import connection, common
from twisted.conch.ssh import session, forwarding, channel
from twisted.internet import reactor, stdio, task
from twisted.python import log, usage
import os, sys, getpass, struct, tty, fcntl, signal
class ClientOptions(options.ConchOptions):
synopsis = """Usage: conch [options] host [command]
"""
longdesc = ("conch is a SSHv2 client that allows logging into a remote "
"machine and executing commands.")
optParameters = [['escape', 'e', '~'],
['localforward', 'L', None, 'listen-port:host:port Forward local port to remote address'],
['remoteforward', 'R', None, 'listen-port:host:port Forward remote port to local address'],
]
optFlags = [['null', 'n', 'Redirect input from /dev/null.'],
['fork', 'f', 'Fork to background after authentication.'],
['tty', 't', 'Tty; allocate a tty even if command is given.'],
['notty', 'T', 'Do not allocate a tty.'],
['noshell', 'N', 'Do not execute a shell or command.'],
['subsystem', 's', 'Invoke command (mandatory) as SSH2 subsystem.'],
]
#zsh_altArgDescr = {"foo":"use this description for foo instead"}
#zsh_multiUse = ["foo", "bar"]
#zsh_mutuallyExclusive = [("foo", "bar"), ("bar", "baz")]
#zsh_actions = {"foo":'_files -g "*.foo"', "bar":"(one two three)"}
zsh_actionDescr = {"localforward":"listen-port:host:port",
"remoteforward":"listen-port:host:port"}
zsh_extras = ["*:command: "]
localForwards = []
remoteForwards = []
def opt_escape(self, esc):
"Set escape character; ``none'' = disable"
if esc == 'none':
self['escape'] = None
elif esc[0] == '^' and len(esc) == 2:
self['escape'] = chr(ord(esc[1])-64)
elif len(esc) == 1:
self['escape'] = esc
else:
sys.exit("Bad escape character '%s'." % esc)
def opt_localforward(self, f):
"Forward local port to remote address (lport:host:port)"
localPort, remoteHost, remotePort = f.split(':') # doesn't do v6 yet
localPort = int(localPort)
remotePort = int(remotePort)
self.localForwards.append((localPort, (remoteHost, remotePort)))
def opt_remoteforward(self, f):
"""Forward remote port to local address (rport:host:port)"""
remotePort, connHost, connPort = f.split(':') # doesn't do v6 yet
remotePort = int(remotePort)
connPort = int(connPort)
self.remoteForwards.append((remotePort, (connHost, connPort)))
def parseArgs(self, host, *command):
self['host'] = host
self['command'] = ' '.join(command)
# Rest of code in "run"
options = None
conn = None
exitStatus = 0
old = None
_inRawMode = 0
_savedRawMode = None
def run():
global options, old
args = sys.argv[1:]
if '-l' in args: # cvs is an idiot
i = args.index('-l')
args = args[i:i+2]+args
del args[i+2:i+4]
for arg in args[:]:
try:
i = args.index(arg)
if arg[:2] == '-o' and args[i+1][0]!='-':
args[i:i+2] = [] # suck on it scp
except ValueError:
pass
options = ClientOptions()
try:
options.parseOptions(args)
except usage.UsageError, u:
print 'ERROR: %s' % u
options.opt_help()
sys.exit(1)
if options['log']:
if options['logfile']:
if options['logfile'] == '-':
f = sys.stdout
else:
f = file(options['logfile'], 'a+')
else:
f = sys.stderr
realout = sys.stdout
log.startLogging(f)
sys.stdout = realout
else:
log.discardLogs()
doConnect()
fd = sys.stdin.fileno()
try:
old = tty.tcgetattr(fd)
except:
old = None
try:
oldUSR1 = signal.signal(signal.SIGUSR1, lambda *a: reactor.callLater(0, reConnect))
except:
oldUSR1 = None
try:
reactor.run()
finally:
if old:
tty.tcsetattr(fd, tty.TCSANOW, old)
if oldUSR1:
signal.signal(signal.SIGUSR1, oldUSR1)
if (options['command'] and options['tty']) or not options['notty']:
signal.signal(signal.SIGWINCH, signal.SIG_DFL)
if sys.stdout.isatty() and not options['command']:
print 'Connection to %s closed.' % options['host']
sys.exit(exitStatus)
def handleError():
from twisted.python import failure
global exitStatus
exitStatus = 2
reactor.callLater(0.01, _stopReactor)
log.err(failure.Failure())
raise
def _stopReactor():
try:
reactor.stop()
except: pass
def doConnect():
# log.deferr = handleError # HACK
if '@' in options['host']:
options['user'], options['host'] = options['host'].split('@',1)
if not options.identitys:
options.identitys = ['~/.ssh/id_rsa', '~/.ssh/id_dsa']
host = options['host']
if not options['user']:
options['user'] = getpass.getuser()
if not options['port']:
options['port'] = 22
else:
options['port'] = int(options['port'])
host = options['host']
port = options['port']
vhk = default.verifyHostKey
uao = default.SSHUserAuthClient(options['user'], options, SSHConnection())
connect.connect(host, port, options, vhk, uao).addErrback(_ebExit)
def _ebExit(f):
global exitStatus
if hasattr(f.value, 'value'):
s = f.value.value
else:
s = str(f)
exitStatus = "conch: exiting with error %s" % f
reactor.callLater(0.1, _stopReactor)
def onConnect():
# if keyAgent and options['agent']:
# cc = protocol.ClientCreator(reactor, SSHAgentForwardingLocal, conn)
# cc.connectUNIX(os.environ['SSH_AUTH_SOCK'])
if hasattr(conn.transport, 'sendIgnore'):
_KeepAlive(conn)
if options.localForwards:
for localPort, hostport in options.localForwards:
s = reactor.listenTCP(localPort,
forwarding.SSHListenForwardingFactory(conn,
hostport,
SSHListenClientForwardingChannel))
conn.localForwards.append(s)
if options.remoteForwards:
for remotePort, hostport in options.remoteForwards:
log.msg('asking for remote forwarding for %s:%s' %
(remotePort, hostport))
conn.requestRemoteForwarding(remotePort, hostport)
reactor.addSystemEventTrigger('before', 'shutdown', beforeShutdown)
if not options['noshell'] or options['agent']:
conn.openChannel(SSHSession())
if options['fork']:
if os.fork():
os._exit(0)
os.setsid()
for i in range(3):
try:
os.close(i)
except OSError, e:
import errno
if e.errno != errno.EBADF:
raise
def reConnect():
beforeShutdown()
conn.transport.transport.loseConnection()
def beforeShutdown():
remoteForwards = options.remoteForwards
for remotePort, hostport in remoteForwards:
log.msg('cancelling %s:%s' % (remotePort, hostport))
conn.cancelRemoteForwarding(remotePort)
def stopConnection():
if not options['reconnect']:
reactor.callLater(0.1, _stopReactor)
class _KeepAlive:
def __init__(self, conn):
self.conn = conn
self.globalTimeout = None
self.lc = task.LoopingCall(self.sendGlobal)
self.lc.start(300)
def sendGlobal(self):
d = self.conn.sendGlobalRequest("conch-keep-alive@twistedmatrix.com",
"", wantReply = 1)
d.addBoth(self._cbGlobal)
self.globalTimeout = reactor.callLater(30, self._ebGlobal)
def _cbGlobal(self, res):
if self.globalTimeout:
self.globalTimeout.cancel()
self.globalTimeout = None
def _ebGlobal(self):
if self.globalTimeout:
self.globalTimeout = None
self.conn.transport.loseConnection()
class SSHConnection(connection.SSHConnection):
def serviceStarted(self):
global conn
conn = self
self.localForwards = []
self.remoteForwards = {}
if not isinstance(self, connection.SSHConnection):
# make these fall through
del self.__class__.requestRemoteForwarding
del self.__class__.cancelRemoteForwarding
onConnect()
def serviceStopped(self):
lf = self.localForwards
self.localForwards = []
for s in lf:
s.loseConnection()
stopConnection()
def requestRemoteForwarding(self, remotePort, hostport):
data = forwarding.packGlobal_tcpip_forward(('0.0.0.0', remotePort))
d = self.sendGlobalRequest('tcpip-forward', data,
wantReply=1)
log.msg('requesting remote forwarding %s:%s' %(remotePort, hostport))
d.addCallback(self._cbRemoteForwarding, remotePort, hostport)
d.addErrback(self._ebRemoteForwarding, remotePort, hostport)
def _cbRemoteForwarding(self, result, remotePort, hostport):
log.msg('accepted remote forwarding %s:%s' % (remotePort, hostport))
self.remoteForwards[remotePort] = hostport
log.msg(repr(self.remoteForwards))
def _ebRemoteForwarding(self, f, remotePort, hostport):
log.msg('remote forwarding %s:%s failed' % (remotePort, hostport))
log.msg(f)
def cancelRemoteForwarding(self, remotePort):
data = forwarding.packGlobal_tcpip_forward(('0.0.0.0', remotePort))
self.sendGlobalRequest('cancel-tcpip-forward', data)
log.msg('cancelling remote forwarding %s' % remotePort)
try:
del self.remoteForwards[remotePort]
except:
pass
log.msg(repr(self.remoteForwards))
def channel_forwarded_tcpip(self, windowSize, maxPacket, data):
log.msg('%s %s' % ('FTCP', repr(data)))
remoteHP, origHP = forwarding.unpackOpen_forwarded_tcpip(data)
log.msg(self.remoteForwards)
log.msg(remoteHP)
if self.remoteForwards.has_key(remoteHP[1]):
connectHP = self.remoteForwards[remoteHP[1]]
log.msg('connect forwarding %s' % (connectHP,))
return SSHConnectForwardingChannel(connectHP,
remoteWindow = windowSize,
remoteMaxPacket = maxPacket,
conn = self)
else:
raise ConchError(connection.OPEN_CONNECT_FAILED, "don't know about that port")
# def channel_auth_agent_openssh_com(self, windowSize, maxPacket, data):
# if options['agent'] and keyAgent:
# return agent.SSHAgentForwardingChannel(remoteWindow = windowSize,
# remoteMaxPacket = maxPacket,
# conn = self)
# else:
# return connection.OPEN_CONNECT_FAILED, "don't have an agent"
def channelClosed(self, channel):
log.msg('connection closing %s' % channel)
log.msg(self.channels)
if len(self.channels) == 1: # just us left
log.msg('stopping connection')
stopConnection()
else:
# because of the unix thing
self.__class__.__bases__[0].channelClosed(self, channel)
class SSHSession(channel.SSHChannel):
name = 'session'
def channelOpen(self, foo):
log.msg('session %s open' % self.id)
if options['agent']:
d = self.conn.sendRequest(self, 'auth-agent-req@openssh.com', '', wantReply=1)
d.addBoth(lambda x:log.msg(x))
if options['noshell']: return
if (options['command'] and options['tty']) or not options['notty']:
_enterRawMode()
c = session.SSHSessionClient()
if options['escape'] and not options['notty']:
self.escapeMode = 1
c.dataReceived = self.handleInput
else:
c.dataReceived = self.write
c.connectionLost = lambda x=None,s=self:s.sendEOF()
self.stdio = stdio.StandardIO(c)
fd = 0
if options['subsystem']:
self.conn.sendRequest(self, 'subsystem', \
common.NS(options['command']))
elif options['command']:
if options['tty']:
term = os.environ['TERM']
winsz = fcntl.ioctl(fd, tty.TIOCGWINSZ, '12345678')
winSize = struct.unpack('4H', winsz)
ptyReqData = session.packRequest_pty_req(term, winSize, '')
self.conn.sendRequest(self, 'pty-req', ptyReqData)
signal.signal(signal.SIGWINCH, self._windowResized)
self.conn.sendRequest(self, 'exec', \
common.NS(options['command']))
else:
if not options['notty']:
term = os.environ['TERM']
winsz = fcntl.ioctl(fd, tty.TIOCGWINSZ, '12345678')
winSize = struct.unpack('4H', winsz)
ptyReqData = session.packRequest_pty_req(term, winSize, '')
self.conn.sendRequest(self, 'pty-req', ptyReqData)
signal.signal(signal.SIGWINCH, self._windowResized)
self.conn.sendRequest(self, 'shell', '')
#if hasattr(conn.transport, 'transport'):
# conn.transport.transport.setTcpNoDelay(1)
def handleInput(self, char):
#log.msg('handling %s' % repr(char))
if char in ('\n', '\r'):
self.escapeMode = 1
self.write(char)
elif self.escapeMode == 1 and char == options['escape']:
self.escapeMode = 2
elif self.escapeMode == 2:
self.escapeMode = 1 # so we can chain escapes together
if char == '.': # disconnect
log.msg('disconnecting from escape')
stopConnection()
return
elif char == '\x1a': # ^Z, suspend
def _():
_leaveRawMode()
sys.stdout.flush()
sys.stdin.flush()
os.kill(os.getpid(), signal.SIGTSTP)
_enterRawMode()
reactor.callLater(0, _)
return
elif char == 'R': # rekey connection
log.msg('rekeying connection')
self.conn.transport.sendKexInit()
return
elif char == '#': # display connections
self.stdio.write('\r\nThe following connections are open:\r\n')
channels = self.conn.channels.keys()
channels.sort()
for channelId in channels:
self.stdio.write(' #%i %s\r\n' % (channelId, str(self.conn.channels[channelId])))
return
self.write('~' + char)
else:
self.escapeMode = 0
self.write(char)
def dataReceived(self, data):
self.stdio.write(data)
def extReceived(self, t, data):
if t==connection.EXTENDED_DATA_STDERR:
log.msg('got %s stderr data' % len(data))
sys.stderr.write(data)
def eofReceived(self):
log.msg('got eof')
self.stdio.loseWriteConnection()
def closeReceived(self):
log.msg('remote side closed %s' % self)
self.conn.sendClose(self)
def closed(self):
global old
log.msg('closed %s' % self)
log.msg(repr(self.conn.channels))
def request_exit_status(self, data):
global exitStatus
exitStatus = int(struct.unpack('>L', data)[0])
log.msg('exit status: %s' % exitStatus)
def sendEOF(self):
self.conn.sendEOF(self)
def stopWriting(self):
self.stdio.pauseProducing()
def startWriting(self):
self.stdio.resumeProducing()
def _windowResized(self, *args):
winsz = fcntl.ioctl(0, tty.TIOCGWINSZ, '12345678')
winSize = struct.unpack('4H', winsz)
newSize = winSize[1], winSize[0], winSize[2], winSize[3]
self.conn.sendRequest(self, 'window-change', struct.pack('!4L', *newSize))
class SSHListenClientForwardingChannel(forwarding.SSHListenClientForwardingChannel): pass
class SSHConnectForwardingChannel(forwarding.SSHConnectForwardingChannel): pass
def _leaveRawMode():
global _inRawMode
if not _inRawMode:
return
fd = sys.stdin.fileno()
tty.tcsetattr(fd, tty.TCSANOW, _savedMode)
_inRawMode = 0
def _enterRawMode():
global _inRawMode, _savedMode
if _inRawMode:
return
fd = sys.stdin.fileno()
try:
old = tty.tcgetattr(fd)
new = old[:]
except:
log.msg('not a typewriter!')
else:
# iflage
new[0] = new[0] | tty.IGNPAR
new[0] = new[0] & ~(tty.ISTRIP | tty.INLCR | tty.IGNCR | tty.ICRNL |
tty.IXON | tty.IXANY | tty.IXOFF)
if hasattr(tty, 'IUCLC'):
new[0] = new[0] & ~tty.IUCLC
# lflag
new[3] = new[3] & ~(tty.ISIG | tty.ICANON | tty.ECHO | tty.ECHO |
tty.ECHOE | tty.ECHOK | tty.ECHONL)
if hasattr(tty, 'IEXTEN'):
new[3] = new[3] & ~tty.IEXTEN
#oflag
new[1] = new[1] & ~tty.OPOST
new[6][tty.VMIN] = 1
new[6][tty.VTIME] = 0
_savedMode = old
tty.tcsetattr(fd, tty.TCSANOW, new)
#tty.setraw(fd)
_inRawMode = 1
if __name__ == '__main__':
run()
|
kubernetes-client/python
|
refs/heads/master
|
kubernetes/client/models/v1_object_meta.py
|
1
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.18
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1ObjectMeta(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'annotations': 'dict(str, str)',
'cluster_name': 'str',
'creation_timestamp': 'datetime',
'deletion_grace_period_seconds': 'int',
'deletion_timestamp': 'datetime',
'finalizers': 'list[str]',
'generate_name': 'str',
'generation': 'int',
'labels': 'dict(str, str)',
'managed_fields': 'list[V1ManagedFieldsEntry]',
'name': 'str',
'namespace': 'str',
'owner_references': 'list[V1OwnerReference]',
'resource_version': 'str',
'self_link': 'str',
'uid': 'str'
}
attribute_map = {
'annotations': 'annotations',
'cluster_name': 'clusterName',
'creation_timestamp': 'creationTimestamp',
'deletion_grace_period_seconds': 'deletionGracePeriodSeconds',
'deletion_timestamp': 'deletionTimestamp',
'finalizers': 'finalizers',
'generate_name': 'generateName',
'generation': 'generation',
'labels': 'labels',
'managed_fields': 'managedFields',
'name': 'name',
'namespace': 'namespace',
'owner_references': 'ownerReferences',
'resource_version': 'resourceVersion',
'self_link': 'selfLink',
'uid': 'uid'
}
def __init__(self, annotations=None, cluster_name=None, creation_timestamp=None, deletion_grace_period_seconds=None, deletion_timestamp=None, finalizers=None, generate_name=None, generation=None, labels=None, managed_fields=None, name=None, namespace=None, owner_references=None, resource_version=None, self_link=None, uid=None, local_vars_configuration=None): # noqa: E501
"""V1ObjectMeta - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._annotations = None
self._cluster_name = None
self._creation_timestamp = None
self._deletion_grace_period_seconds = None
self._deletion_timestamp = None
self._finalizers = None
self._generate_name = None
self._generation = None
self._labels = None
self._managed_fields = None
self._name = None
self._namespace = None
self._owner_references = None
self._resource_version = None
self._self_link = None
self._uid = None
self.discriminator = None
if annotations is not None:
self.annotations = annotations
if cluster_name is not None:
self.cluster_name = cluster_name
if creation_timestamp is not None:
self.creation_timestamp = creation_timestamp
if deletion_grace_period_seconds is not None:
self.deletion_grace_period_seconds = deletion_grace_period_seconds
if deletion_timestamp is not None:
self.deletion_timestamp = deletion_timestamp
if finalizers is not None:
self.finalizers = finalizers
if generate_name is not None:
self.generate_name = generate_name
if generation is not None:
self.generation = generation
if labels is not None:
self.labels = labels
if managed_fields is not None:
self.managed_fields = managed_fields
if name is not None:
self.name = name
if namespace is not None:
self.namespace = namespace
if owner_references is not None:
self.owner_references = owner_references
if resource_version is not None:
self.resource_version = resource_version
if self_link is not None:
self.self_link = self_link
if uid is not None:
self.uid = uid
@property
def annotations(self):
"""Gets the annotations of this V1ObjectMeta. # noqa: E501
Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations # noqa: E501
:return: The annotations of this V1ObjectMeta. # noqa: E501
:rtype: dict(str, str)
"""
return self._annotations
@annotations.setter
def annotations(self, annotations):
"""Sets the annotations of this V1ObjectMeta.
Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations # noqa: E501
:param annotations: The annotations of this V1ObjectMeta. # noqa: E501
:type: dict(str, str)
"""
self._annotations = annotations
@property
def cluster_name(self):
"""Gets the cluster_name of this V1ObjectMeta. # noqa: E501
The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. # noqa: E501
:return: The cluster_name of this V1ObjectMeta. # noqa: E501
:rtype: str
"""
return self._cluster_name
@cluster_name.setter
def cluster_name(self, cluster_name):
"""Sets the cluster_name of this V1ObjectMeta.
The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. # noqa: E501
:param cluster_name: The cluster_name of this V1ObjectMeta. # noqa: E501
:type: str
"""
self._cluster_name = cluster_name
@property
def creation_timestamp(self):
"""Gets the creation_timestamp of this V1ObjectMeta. # noqa: E501
CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata # noqa: E501
:return: The creation_timestamp of this V1ObjectMeta. # noqa: E501
:rtype: datetime
"""
return self._creation_timestamp
@creation_timestamp.setter
def creation_timestamp(self, creation_timestamp):
"""Sets the creation_timestamp of this V1ObjectMeta.
CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata # noqa: E501
:param creation_timestamp: The creation_timestamp of this V1ObjectMeta. # noqa: E501
:type: datetime
"""
self._creation_timestamp = creation_timestamp
@property
def deletion_grace_period_seconds(self):
"""Gets the deletion_grace_period_seconds of this V1ObjectMeta. # noqa: E501
Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only. # noqa: E501
:return: The deletion_grace_period_seconds of this V1ObjectMeta. # noqa: E501
:rtype: int
"""
return self._deletion_grace_period_seconds
@deletion_grace_period_seconds.setter
def deletion_grace_period_seconds(self, deletion_grace_period_seconds):
"""Sets the deletion_grace_period_seconds of this V1ObjectMeta.
Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only. # noqa: E501
:param deletion_grace_period_seconds: The deletion_grace_period_seconds of this V1ObjectMeta. # noqa: E501
:type: int
"""
self._deletion_grace_period_seconds = deletion_grace_period_seconds
@property
def deletion_timestamp(self):
"""Gets the deletion_timestamp of this V1ObjectMeta. # noqa: E501
DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested. Populated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata # noqa: E501
:return: The deletion_timestamp of this V1ObjectMeta. # noqa: E501
:rtype: datetime
"""
return self._deletion_timestamp
@deletion_timestamp.setter
def deletion_timestamp(self, deletion_timestamp):
"""Sets the deletion_timestamp of this V1ObjectMeta.
DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested. Populated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata # noqa: E501
:param deletion_timestamp: The deletion_timestamp of this V1ObjectMeta. # noqa: E501
:type: datetime
"""
self._deletion_timestamp = deletion_timestamp
@property
def finalizers(self):
"""Gets the finalizers of this V1ObjectMeta. # noqa: E501
Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list. # noqa: E501
:return: The finalizers of this V1ObjectMeta. # noqa: E501
:rtype: list[str]
"""
return self._finalizers
@finalizers.setter
def finalizers(self, finalizers):
"""Sets the finalizers of this V1ObjectMeta.
Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list. # noqa: E501
:param finalizers: The finalizers of this V1ObjectMeta. # noqa: E501
:type: list[str]
"""
self._finalizers = finalizers
@property
def generate_name(self):
"""Gets the generate_name of this V1ObjectMeta. # noqa: E501
GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency # noqa: E501
:return: The generate_name of this V1ObjectMeta. # noqa: E501
:rtype: str
"""
return self._generate_name
@generate_name.setter
def generate_name(self, generate_name):
"""Sets the generate_name of this V1ObjectMeta.
GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency # noqa: E501
:param generate_name: The generate_name of this V1ObjectMeta. # noqa: E501
:type: str
"""
self._generate_name = generate_name
@property
def generation(self):
"""Gets the generation of this V1ObjectMeta. # noqa: E501
A sequence number representing a specific generation of the desired state. Populated by the system. Read-only. # noqa: E501
:return: The generation of this V1ObjectMeta. # noqa: E501
:rtype: int
"""
return self._generation
@generation.setter
def generation(self, generation):
"""Sets the generation of this V1ObjectMeta.
A sequence number representing a specific generation of the desired state. Populated by the system. Read-only. # noqa: E501
:param generation: The generation of this V1ObjectMeta. # noqa: E501
:type: int
"""
self._generation = generation
@property
def labels(self):
"""Gets the labels of this V1ObjectMeta. # noqa: E501
Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels # noqa: E501
:return: The labels of this V1ObjectMeta. # noqa: E501
:rtype: dict(str, str)
"""
return self._labels
@labels.setter
def labels(self, labels):
"""Sets the labels of this V1ObjectMeta.
Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels # noqa: E501
:param labels: The labels of this V1ObjectMeta. # noqa: E501
:type: dict(str, str)
"""
self._labels = labels
@property
def managed_fields(self):
"""Gets the managed_fields of this V1ObjectMeta. # noqa: E501
ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \"ci-cd\". The set of fields is always in the version that the workflow used when modifying the object. # noqa: E501
:return: The managed_fields of this V1ObjectMeta. # noqa: E501
:rtype: list[V1ManagedFieldsEntry]
"""
return self._managed_fields
@managed_fields.setter
def managed_fields(self, managed_fields):
"""Sets the managed_fields of this V1ObjectMeta.
ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \"ci-cd\". The set of fields is always in the version that the workflow used when modifying the object. # noqa: E501
:param managed_fields: The managed_fields of this V1ObjectMeta. # noqa: E501
:type: list[V1ManagedFieldsEntry]
"""
self._managed_fields = managed_fields
@property
def name(self):
"""Gets the name of this V1ObjectMeta. # noqa: E501
Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names # noqa: E501
:return: The name of this V1ObjectMeta. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1ObjectMeta.
Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names # noqa: E501
:param name: The name of this V1ObjectMeta. # noqa: E501
:type: str
"""
self._name = name
@property
def namespace(self):
"""Gets the namespace of this V1ObjectMeta. # noqa: E501
Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces # noqa: E501
:return: The namespace of this V1ObjectMeta. # noqa: E501
:rtype: str
"""
return self._namespace
@namespace.setter
def namespace(self, namespace):
"""Sets the namespace of this V1ObjectMeta.
Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces # noqa: E501
:param namespace: The namespace of this V1ObjectMeta. # noqa: E501
:type: str
"""
self._namespace = namespace
@property
def owner_references(self):
"""Gets the owner_references of this V1ObjectMeta. # noqa: E501
List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller. # noqa: E501
:return: The owner_references of this V1ObjectMeta. # noqa: E501
:rtype: list[V1OwnerReference]
"""
return self._owner_references
@owner_references.setter
def owner_references(self, owner_references):
"""Sets the owner_references of this V1ObjectMeta.
List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller. # noqa: E501
:param owner_references: The owner_references of this V1ObjectMeta. # noqa: E501
:type: list[V1OwnerReference]
"""
self._owner_references = owner_references
@property
def resource_version(self):
"""Gets the resource_version of this V1ObjectMeta. # noqa: E501
An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources. Populated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency # noqa: E501
:return: The resource_version of this V1ObjectMeta. # noqa: E501
:rtype: str
"""
return self._resource_version
@resource_version.setter
def resource_version(self, resource_version):
"""Sets the resource_version of this V1ObjectMeta.
An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources. Populated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency # noqa: E501
:param resource_version: The resource_version of this V1ObjectMeta. # noqa: E501
:type: str
"""
self._resource_version = resource_version
@property
def self_link(self):
"""Gets the self_link of this V1ObjectMeta. # noqa: E501
SelfLink is a URL representing this object. Populated by the system. Read-only. DEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release. # noqa: E501
:return: The self_link of this V1ObjectMeta. # noqa: E501
:rtype: str
"""
return self._self_link
@self_link.setter
def self_link(self, self_link):
"""Sets the self_link of this V1ObjectMeta.
SelfLink is a URL representing this object. Populated by the system. Read-only. DEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release. # noqa: E501
:param self_link: The self_link of this V1ObjectMeta. # noqa: E501
:type: str
"""
self._self_link = self_link
@property
def uid(self):
"""Gets the uid of this V1ObjectMeta. # noqa: E501
UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations. Populated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids # noqa: E501
:return: The uid of this V1ObjectMeta. # noqa: E501
:rtype: str
"""
return self._uid
@uid.setter
def uid(self, uid):
"""Sets the uid of this V1ObjectMeta.
UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations. Populated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids # noqa: E501
:param uid: The uid of this V1ObjectMeta. # noqa: E501
:type: str
"""
self._uid = uid
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ObjectMeta):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ObjectMeta):
return True
return self.to_dict() != other.to_dict()
|
JerryXia/fastgoagent
|
refs/heads/master
|
goagent/server/uploader/appcfg.py
|
1
|
#!/usr/bin/env python
# coding:utf-8
__version__ = '1.2'
__author__ = "phus.lu@gmail.com"
import sys
import os
sys.dont_write_bytecode = True
sys.path += ['.', __file__, '../local']
import re
import collections
import getpass
import logging
import socket
import urllib2
import fancy_urllib
import random
import threading
import thread
import Queue
import time
import select
_realgetpass = getpass.getpass
def getpass_getpass(prompt='Password:', stream=None):
try:
import msvcrt
password = ''
sys.stdout.write(prompt)
while 1:
ch = msvcrt.getch()
if ch == '\b':
if password:
password = password[:-1]
sys.stdout.write('\b \b')
else:
continue
elif ch == '\r':
sys.stdout.write(os.linesep)
return password
else:
password += ch
sys.stdout.write('*')
except Exception, e:
return _realgetpass(prompt, stream)
getpass.getpass = getpass_getpass
def create_connection((host, port), timeout=None, address=None):
for i in xrange(8):
if '.google' in host or '.appspot.com' in host:
iplist = sum((socket.gethostbyname_ex(x)[-1] for x in ('www.google.com', 'mail.google.com')), [])
else:
iplist = socket.gethostbyname_ex(host)[-1]
logging.info('create_connection try connect iplist=%s, port=%d', iplist, port)
socks = []
for ip in iplist:
sock = socket.socket(socket.AF_INET if ':' not in ip else socket.AF_INET6)
sock.setblocking(0)
err = sock.connect_ex((ip, port))
socks.append(sock)
# something happens :D
(_, outs, _) = select.select([], socks, [], 5)
if outs:
sock = outs[0]
sock.setblocking(1)
socks.remove(sock)
any(s.close() for s in socks)
return sock
else:
raise socket.error('timed out', 'counld not connect to %r' % host)
fancy_urllib._create_connection = create_connection
fancy_urllib.FancyHTTPSHandler = urllib2.HTTPSHandler
socket.create_connection = create_connection
def upload(dirname, appid):
assert isinstance(dirname, basestring) and isinstance(appid, basestring)
filename = os.path.join(dirname, 'app.yaml')
assert os.path.isfile(filename), u'%s not exists!' % filename
with open(filename, 'rb') as fp:
yaml = fp.read()
yaml=re.sub(r'application:\s*\S+', 'application: '+appid, yaml)
with open(filename, 'wb') as fp:
fp.write(yaml)
if sys.modules.has_key('google'):
del sys.modules['google']
from google.appengine.tools import appengine_rpc
from google.appengine.tools import appcfg
appengine_rpc.HttpRpcServer.DEFAULT_COOKIE_FILE_PATH = './.appcfg_cookies'
appcfg.main(['appcfg', 'rollback', dirname])
appcfg.main(['appcfg', 'update', dirname])
def main():
appids = raw_input('APPID:')
if not re.match(r'[0-9a-zA-Z\-|]+', appids):
print('appid Wrong Format, please login http://appengine.google.com to view the correct appid!')
sys.exit(-1)
if any(x in appids.lower() for x in ('ios', 'android')):
print('appid cannot contians ios/android')
sys.exit(-1)
for appid in appids.split('|'):
upload(os.environ.get('uploaddir', 'gae').strip(), appid)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
|
Salat-Cx65/python-for-android
|
refs/heads/master
|
python-modules/twisted/twisted/test/process_linger.py
|
140
|
"""Write to a file descriptor and then close it, waiting a few seconds before
quitting. This serves to make sure SIGCHLD is actually being noticed.
"""
import os, sys, time
print "here is some text"
time.sleep(1)
print "goodbye"
os.close(1)
os.close(2)
time.sleep(2)
sys.exit(0)
|
alkyl1978/gnuradio
|
refs/heads/master
|
gr-fec/python/fec/extended_decoder.py
|
47
|
#!/usr/bin/env python
#
# Copyright 2014 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, blocks
import fec_swig as fec
from bitflip import *
import sys
if sys.modules.has_key("gnuradio.digital"):
digital = sys.modules["gnuradio.digital"]
else:
from gnuradio import digital
from threaded_decoder import threaded_decoder
from capillary_threaded_decoder import capillary_threaded_decoder
class extended_decoder(gr.hier_block2):
#solution to log_(1-2*t)(1-2*.0335) = 1/taps where t is thresh (syndrome density)
#for i in numpy.arange(.1, .499, .01):
#print str(log((1-(2 * .035)), (1-(2 * i)))) + ':' + str(i);
garbletable = {
0.310786835319:0.1,
0.279118162802:0.11,
0.252699589071:0.12,
0.230318516016:0.13,
0.211108735347:0.14,
0.194434959095:0.15,
0.179820650401:0.16,
0.166901324951:0.17,
0.15539341766:0.18,
0.145072979886:0.19,
0.135760766313:0.2,
0.127311581396:0.21,
0.119606529806:0.22,
0.112547286766:0.23,
0.106051798775:0.24,
0.10005101381:0.25,
0.0944863633098:0.26,
0.0893078003966:0.27,
0.084472254501:0.28,
0.0799424008658:0.29,
0.0756856701944:0.3,
0.0716734425668:0.31,
0.0678803831565:0.32,
0.0642838867856:0.33,
0.0608636049994:0.34,
0.0576010337489:0.35,
0.0544791422522:0.36,
0.0514820241933:0.37,
0.0485945507251:0.38,
0.0458019998183:0.39,
0.0430896262596:0.4,
0.0404421166935:0.41,
0.0378428350972:0.42,
0.0352726843274:0.43,
0.0327082350617:0.44,
0.0301183562535:0.45,
0.0274574540266:0.46,
0.0246498236897:0.47,
0.0215448131298:0.48,
0.0177274208353:0.49,
}
def __init__(self, decoder_obj_list, threading, ann=None, puncpat='11',
integration_period=10000, flush=None, rotator=None):
gr.hier_block2.__init__(self, "extended_decoder",
gr.io_signature(1, 1, gr.sizeof_float),
gr.io_signature(1, 1, gr.sizeof_char))
self.blocks=[]
self.ann=ann
self.puncpat=puncpat
self.flush=flush
if(type(decoder_obj_list) == list):
if(type(decoder_obj_list[0]) == list):
gr.log.info("fec.extended_decoder: Parallelism must be 1.")
raise AttributeError
else:
# If it has parallelism of 0, force it into a list of 1
decoder_obj_list = [decoder_obj_list,]
message_collector_connected=False
##anything going through the annihilator needs shifted, uchar vals
if fec.get_decoder_input_conversion(decoder_obj_list[0]) == "uchar" or \
fec.get_decoder_input_conversion(decoder_obj_list[0]) == "packed_bits":
self.blocks.append(blocks.multiply_const_ff(48.0))
if fec.get_shift(decoder_obj_list[0]) != 0.0:
self.blocks.append(blocks.add_const_ff(fec.get_shift(decoder_obj_list[0])))
elif fec.get_decoder_input_conversion(decoder_obj_list[0]) == "packed_bits":
self.blocks.append(blocks.add_const_ff(128.0))
if fec.get_decoder_input_conversion(decoder_obj_list[0]) == "uchar" or \
fec.get_decoder_input_conversion(decoder_obj_list[0]) == "packed_bits":
self.blocks.append(blocks.float_to_uchar());
const_index = 0; #index that corresponds to mod order for specinvert purposes
if not self.flush:
flush = 10000;
else:
flush = self.flush;
if self.ann: #ann and puncpat are strings of 0s and 1s
cat = fec.ULLVector();
for i in fec.read_big_bitlist(ann):
cat.append(i);
synd_garble = .49
idx_list = self.garbletable.keys()
idx_list.sort()
for i in idx_list:
if 1.0/self.ann.count('1') >= i:
synd_garble = self.garbletable[i]
print 'using syndrom garble threshold ' + str(synd_garble) + 'for conv_bit_corr_bb'
print 'ceiling: .0335 data garble rate'
self.blocks.append(fec.conv_bit_corr_bb(cat, len(puncpat) - puncpat.count('0'),
len(ann), integration_period, flush, synd_garble))
if self.puncpat != '11':
self.blocks.append(fec.depuncture_bb(len(puncpat), read_bitlist(puncpat), 0))
if fec.get_decoder_input_conversion(decoder_obj_list[0]) == "packed_bits":
self.blocks.append(blocks.uchar_to_float())
self.blocks.append(blocks.add_const_ff(-128.0))
self.blocks.append(digital.binary_slicer_fb())
self.blocks.append(blocks.unpacked_to_packed_bb(1,0))
if(len(decoder_obj_list) > 1):
if(fec.get_history(decoder_obj_list[0]) != 0):
gr.log.info("fec.extended_decoder: Cannot use multi-threaded parallelism on a decoder with history.")
raise AttributeError
if threading == 'capillary':
self.blocks.append(capillary_threaded_decoder(decoder_obj_list,
fec.get_decoder_input_item_size(decoder_obj_list[0]),
fec.get_decoder_output_item_size(decoder_obj_list[0])))
elif threading == 'ordinary':
self.blocks.append(threaded_decoder(decoder_obj_list,
fec.get_decoder_input_item_size(decoder_obj_list[0]),
fec.get_decoder_output_item_size(decoder_obj_list[0])))
else:
self.blocks.append(fec.decoder(decoder_obj_list[0],
fec.get_decoder_input_item_size(decoder_obj_list[0]),
fec.get_decoder_output_item_size(decoder_obj_list[0])))
if fec.get_decoder_output_conversion(decoder_obj_list[0]) == "unpack":
self.blocks.append(blocks.packed_to_unpacked_bb(1, gr.GR_MSB_FIRST));
self.connect((self, 0), (self.blocks[0], 0));
self.connect((self.blocks[-1], 0), (self, 0));
for i in range(len(self.blocks) - 1):
self.connect((self.blocks[i], 0), (self.blocks[i+1], 0));
|
fengbaicanhe/intellij-community
|
refs/heads/master
|
python/testData/refactoring/move/starImportUsage/after/src/a.py
|
45382
| |
npiganeau/odoo
|
refs/heads/master
|
addons/account_test/__init__.py
|
441
|
import account_test
import report
|
ahamilton55/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/f5/bigip_ssl_certificate.py
|
30
|
#!/usr/bin/python
#
# (c) 2016, Kevin Coming (@waffie1)
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: bigip_ssl_certificate
short_description: Import/Delete certificates from BIG-IP
description:
- This module will import/delete SSL certificates on BIG-IP LTM.
Certificates can be imported from certificate and key files on the local
disk, in PEM format.
version_added: 2.2
options:
cert_content:
description:
- When used instead of 'cert_src', sets the contents of a certificate directly
to the specified value. This is used with lookup plugins or for anything
with formatting or templating. Either one of C(key_src),
C(key_content), C(cert_src) or C(cert_content) must be provided when
C(state) is C(present).
required: false
key_content:
description:
- When used instead of 'key_src', sets the contents of a certificate key
directly to the specified value. This is used with lookup plugins or for
anything with formatting or templating. Either one of C(key_src),
C(key_content), C(cert_src) or C(cert_content) must be provided when
C(state) is C(present).
required: false
state:
description:
- Certificate and key state. This determines if the provided certificate
and key is to be made C(present) on the device or C(absent).
required: true
default: present
choices:
- present
- absent
partition:
description:
- BIG-IP partition to use when adding/deleting certificate.
required: false
default: Common
name:
description:
- SSL Certificate Name. This is the cert/key pair name used
when importing a certificate/key into the F5. It also
determines the filenames of the objects on the LTM
(:Partition:name.cer_11111_1 and :Partition_name.key_11111_1).
required: true
cert_src:
description:
- This is the local filename of the certificate. Either one of C(key_src),
C(key_content), C(cert_src) or C(cert_content) must be provided when
C(state) is C(present).
required: false
key_src:
description:
- This is the local filename of the private key. Either one of C(key_src),
C(key_content), C(cert_src) or C(cert_content) must be provided when
C(state) is C(present).
required: false
passphrase:
description:
- Passphrase on certificate private key
required: false
notes:
- Requires the f5-sdk Python package on the host. This is as easy as pip
install f5-sdk.
- Requires the netaddr Python package on the host.
- If you use this module, you will not be able to remove the certificates
and keys that are managed, via the web UI. You can only remove them via
tmsh or these modules.
extends_documentation_fragment: f5
requirements:
- f5-sdk >= 1.5.0
- BigIP >= v12
author:
- Kevin Coming (@waffie1)
- Tim Rupp (@caphrim007)
'''
EXAMPLES = '''
- name: Import PEM Certificate from local disk
bigip_ssl_certificate:
name: "certificate-name"
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "present"
cert_src: "/path/to/cert.crt"
key_src: "/path/to/key.key"
delegate_to: localhost
- name: Use a file lookup to import PEM Certificate
bigip_ssl_certificate:
name: "certificate-name"
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "present"
cert_content: "{{ lookup('file', '/path/to/cert.crt') }}"
key_content: "{{ lookup('file', '/path/to/key.key') }}"
delegate_to: localhost
- name: "Delete Certificate"
bigip_ssl_certificate:
name: "certificate-name"
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "absent"
delegate_to: localhost
'''
RETURN = '''
cert_name:
description: >
The name of the SSL certificate. The C(cert_name) and
C(key_name) will be equal to each other.
returned: created, changed or deleted
type: string
sample: "cert1"
key_name:
description: >
The name of the SSL certificate key. The C(key_name) and
C(cert_name) will be equal to each other.
returned: created, changed or deleted
type: string
sample: "key1"
partition:
description: Partition in which the cert/key was created
returned: created, changed or deleted
type: string
sample: "Common"
key_checksum:
description: SHA1 checksum of the key that was provided
returned: created or changed
type: string
sample: "cf23df2207d99a74fbe169e3eba035e633b65d94"
cert_checksum:
description: SHA1 checksum of the cert that was provided
returned: created or changed
type: string
sample: "f7ff9e8b7bb2e09b70935a5d785e0cc5d9d0abf0"
'''
try:
from f5.bigip.contexts import TransactionContextManager
from f5.bigip import ManagementRoot
from icontrol.session import iControlUnexpectedHTTPError
HAS_F5SDK = True
except ImportError:
HAS_F5SDK = False
import hashlib
import StringIO
class BigIpSslCertificate(object):
def __init__(self, *args, **kwargs):
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
required_args = ['key_content', 'key_src', 'cert_content', 'cert_src']
ksource = kwargs['key_src']
if ksource:
with open(ksource) as f:
kwargs['key_content'] = f.read()
csource = kwargs['cert_src']
if csource:
with open(csource) as f:
kwargs['cert_content'] = f.read()
if kwargs['state'] == 'present':
if not any(kwargs[k] is not None for k in required_args):
raise F5ModuleError(
"Either 'key_content', 'key_src', 'cert_content' or "
"'cert_src' must be provided"
)
# This is the remote BIG-IP path from where it will look for certs
# to install.
self.dlpath = '/var/config/rest/downloads'
# The params that change in the module
self.cparams = dict()
# Stores the params that are sent to the module
self.params = kwargs
self.api = ManagementRoot(kwargs['server'],
kwargs['user'],
kwargs['password'],
port=kwargs['server_port'])
def exists(self):
cert = self.cert_exists()
key = self.key_exists()
if cert and key:
return True
else:
return False
def get_hash(self, content):
k = hashlib.sha1()
s = StringIO.StringIO(content)
while True:
data = s.read(1024)
if not data:
break
k.update(data)
return k.hexdigest()
def present(self):
current = self.read()
changed = False
do_key = False
do_cert = False
chash = None
khash = None
check_mode = self.params['check_mode']
name = self.params['name']
partition = self.params['partition']
cert_content = self.params['cert_content']
key_content = self.params['key_content']
passphrase = self.params['passphrase']
# Technically you dont need to provide us with anything in the form
# of content for your cert, but that's kind of illogical, so we just
# return saying you didn't "do" anything if you left the cert and keys
# empty.
if not cert_content and not key_content:
return False
if key_content is not None:
if 'key_checksum' in current:
khash = self.get_hash(key_content)
if khash not in current['key_checksum']:
do_key = "update"
else:
do_key = "create"
if cert_content is not None:
if 'cert_checksum' in current:
chash = self.get_hash(cert_content)
if chash not in current['cert_checksum']:
do_cert = "update"
else:
do_cert = "create"
if do_cert or do_key:
changed = True
params = dict()
params['cert_name'] = name
params['key_name'] = name
params['partition'] = partition
if khash:
params['key_checksum'] = khash
if chash:
params['cert_checksum'] = chash
self.cparams = params
if check_mode:
return changed
if not do_cert and not do_key:
return False
tx = self.api.tm.transactions.transaction
with TransactionContextManager(tx) as api:
if do_cert:
# Upload the content of a certificate as a StringIO object
cstring = StringIO.StringIO(cert_content)
filename = "%s.crt" % (name)
filepath = os.path.join(self.dlpath, filename)
api.shared.file_transfer.uploads.upload_stringio(
cstring,
filename
)
if do_cert == "update":
# Install the certificate
params = {
'name': name,
'partition': partition
}
cert = api.tm.sys.file.ssl_certs.ssl_cert.load(**params)
# This works because, while the source path is the same,
# calling update causes the file to be re-read
cert.update()
changed = True
elif do_cert == "create":
# Install the certificate
params = {
'sourcePath': "file://" + filepath,
'name': name,
'partition': partition
}
api.tm.sys.file.ssl_certs.ssl_cert.create(**params)
changed = True
if do_key:
# Upload the content of a certificate key as a StringIO object
kstring = StringIO.StringIO(key_content)
filename = "%s.key" % (name)
filepath = os.path.join(self.dlpath, filename)
api.shared.file_transfer.uploads.upload_stringio(
kstring,
filename
)
if do_key == "update":
# Install the key
params = {
'name': name,
'partition': partition
}
key = api.tm.sys.file.ssl_keys.ssl_key.load(**params)
params = dict()
if passphrase:
params['passphrase'] = passphrase
else:
params['passphrase'] = None
key.update(**params)
changed = True
elif do_key == "create":
# Install the key
params = {
'sourcePath': "file://" + filepath,
'name': name,
'partition': partition
}
if passphrase:
params['passphrase'] = self.params['passphrase']
else:
params['passphrase'] = None
api.tm.sys.file.ssl_keys.ssl_key.create(**params)
changed = True
return changed
def key_exists(self):
return self.api.tm.sys.file.ssl_keys.ssl_key.exists(
name=self.params['name'],
partition=self.params['partition']
)
def cert_exists(self):
return self.api.tm.sys.file.ssl_certs.ssl_cert.exists(
name=self.params['name'],
partition=self.params['partition']
)
def read(self):
p = dict()
name = self.params['name']
partition = self.params['partition']
if self.key_exists():
key = self.api.tm.sys.file.ssl_keys.ssl_key.load(
name=name,
partition=partition
)
if hasattr(key, 'checksum'):
p['key_checksum'] = str(key.checksum)
if self.cert_exists():
cert = self.api.tm.sys.file.ssl_certs.ssl_cert.load(
name=name,
partition=partition
)
if hasattr(cert, 'checksum'):
p['cert_checksum'] = str(cert.checksum)
p['name'] = name
return p
def flush(self):
result = dict()
state = self.params['state']
try:
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
result.update(**self.cparams)
result.update(dict(changed=changed))
return result
def absent(self):
changed = False
if self.exists():
changed = self.delete()
return changed
def delete(self):
changed = False
name = self.params['name']
partition = self.params['partition']
check_mode = self.params['check_mode']
delete_cert = self.cert_exists()
delete_key = self.key_exists()
if not delete_cert and not delete_key:
return changed
if check_mode:
params = dict()
params['cert_name'] = name
params['key_name'] = name
params['partition'] = partition
self.cparams = params
return True
tx = self.api.tm.transactions.transaction
with TransactionContextManager(tx) as api:
if delete_cert:
# Delete the certificate
c = api.tm.sys.file.ssl_certs.ssl_cert.load(
name=self.params['name'],
partition=self.params['partition']
)
c.delete()
changed = True
if delete_key:
# Delete the certificate key
k = self.api.tm.sys.file.ssl_keys.ssl_key.load(
name=self.params['name'],
partition=self.params['partition']
)
k.delete()
changed = True
return changed
def main():
argument_spec = f5_argument_spec()
meta_args = dict(
name=dict(type='str', required=True),
cert_content=dict(type='str', default=None),
cert_src=dict(type='path', default=None),
key_content=dict(type='str', default=None),
key_src=dict(type='path', default=None),
passphrase=dict(type='str', default=None, no_log=True)
)
argument_spec.update(meta_args)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[
['key_content', 'key_src'],
['cert_content', 'cert_src']
]
)
try:
obj = BigIpSslCertificate(check_mode=module.check_mode,
**module.params)
result = obj.flush()
module.exit_json(**result)
except F5ModuleError as e:
module.fail_json(msg=str(e))
from ansible.module_utils.basic import *
from ansible.module_utils.f5_utils import *
if __name__ == '__main__':
main()
|
evamwangi/bc-7-Todo_List
|
refs/heads/master
|
venv/Lib/encodings/koi8_r.py
|
593
|
""" Python Character Mapping Codec koi8_r generated from 'MAPPINGS/VENDORS/MISC/KOI8-R.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='koi8-r',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\u2500' # 0x80 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u2502' # 0x81 -> BOX DRAWINGS LIGHT VERTICAL
u'\u250c' # 0x82 -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2510' # 0x83 -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x84 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2518' # 0x85 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u251c' # 0x86 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2524' # 0x87 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\u252c' # 0x88 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u2534' # 0x89 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u253c' # 0x8A -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u2580' # 0x8B -> UPPER HALF BLOCK
u'\u2584' # 0x8C -> LOWER HALF BLOCK
u'\u2588' # 0x8D -> FULL BLOCK
u'\u258c' # 0x8E -> LEFT HALF BLOCK
u'\u2590' # 0x8F -> RIGHT HALF BLOCK
u'\u2591' # 0x90 -> LIGHT SHADE
u'\u2592' # 0x91 -> MEDIUM SHADE
u'\u2593' # 0x92 -> DARK SHADE
u'\u2320' # 0x93 -> TOP HALF INTEGRAL
u'\u25a0' # 0x94 -> BLACK SQUARE
u'\u2219' # 0x95 -> BULLET OPERATOR
u'\u221a' # 0x96 -> SQUARE ROOT
u'\u2248' # 0x97 -> ALMOST EQUAL TO
u'\u2264' # 0x98 -> LESS-THAN OR EQUAL TO
u'\u2265' # 0x99 -> GREATER-THAN OR EQUAL TO
u'\xa0' # 0x9A -> NO-BREAK SPACE
u'\u2321' # 0x9B -> BOTTOM HALF INTEGRAL
u'\xb0' # 0x9C -> DEGREE SIGN
u'\xb2' # 0x9D -> SUPERSCRIPT TWO
u'\xb7' # 0x9E -> MIDDLE DOT
u'\xf7' # 0x9F -> DIVISION SIGN
u'\u2550' # 0xA0 -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u2551' # 0xA1 -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2552' # 0xA2 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
u'\u0451' # 0xA3 -> CYRILLIC SMALL LETTER IO
u'\u2553' # 0xA4 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
u'\u2554' # 0xA5 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2555' # 0xA6 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
u'\u2556' # 0xA7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
u'\u2557' # 0xA8 -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u2558' # 0xA9 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
u'\u2559' # 0xAA -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
u'\u255a' # 0xAB -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u255b' # 0xAC -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
u'\u255c' # 0xAD -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
u'\u255d' # 0xAE -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u255e' # 0xAF -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
u'\u255f' # 0xB0 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
u'\u2560' # 0xB1 -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2561' # 0xB2 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
u'\u0401' # 0xB3 -> CYRILLIC CAPITAL LETTER IO
u'\u2562' # 0xB4 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
u'\u2563' # 0xB5 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2564' # 0xB6 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
u'\u2565' # 0xB7 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
u'\u2566' # 0xB8 -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2567' # 0xB9 -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
u'\u2568' # 0xBA -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
u'\u2569' # 0xBB -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u256a' # 0xBC -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
u'\u256b' # 0xBD -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
u'\u256c' # 0xBE -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\xa9' # 0xBF -> COPYRIGHT SIGN
u'\u044e' # 0xC0 -> CYRILLIC SMALL LETTER YU
u'\u0430' # 0xC1 -> CYRILLIC SMALL LETTER A
u'\u0431' # 0xC2 -> CYRILLIC SMALL LETTER BE
u'\u0446' # 0xC3 -> CYRILLIC SMALL LETTER TSE
u'\u0434' # 0xC4 -> CYRILLIC SMALL LETTER DE
u'\u0435' # 0xC5 -> CYRILLIC SMALL LETTER IE
u'\u0444' # 0xC6 -> CYRILLIC SMALL LETTER EF
u'\u0433' # 0xC7 -> CYRILLIC SMALL LETTER GHE
u'\u0445' # 0xC8 -> CYRILLIC SMALL LETTER HA
u'\u0438' # 0xC9 -> CYRILLIC SMALL LETTER I
u'\u0439' # 0xCA -> CYRILLIC SMALL LETTER SHORT I
u'\u043a' # 0xCB -> CYRILLIC SMALL LETTER KA
u'\u043b' # 0xCC -> CYRILLIC SMALL LETTER EL
u'\u043c' # 0xCD -> CYRILLIC SMALL LETTER EM
u'\u043d' # 0xCE -> CYRILLIC SMALL LETTER EN
u'\u043e' # 0xCF -> CYRILLIC SMALL LETTER O
u'\u043f' # 0xD0 -> CYRILLIC SMALL LETTER PE
u'\u044f' # 0xD1 -> CYRILLIC SMALL LETTER YA
u'\u0440' # 0xD2 -> CYRILLIC SMALL LETTER ER
u'\u0441' # 0xD3 -> CYRILLIC SMALL LETTER ES
u'\u0442' # 0xD4 -> CYRILLIC SMALL LETTER TE
u'\u0443' # 0xD5 -> CYRILLIC SMALL LETTER U
u'\u0436' # 0xD6 -> CYRILLIC SMALL LETTER ZHE
u'\u0432' # 0xD7 -> CYRILLIC SMALL LETTER VE
u'\u044c' # 0xD8 -> CYRILLIC SMALL LETTER SOFT SIGN
u'\u044b' # 0xD9 -> CYRILLIC SMALL LETTER YERU
u'\u0437' # 0xDA -> CYRILLIC SMALL LETTER ZE
u'\u0448' # 0xDB -> CYRILLIC SMALL LETTER SHA
u'\u044d' # 0xDC -> CYRILLIC SMALL LETTER E
u'\u0449' # 0xDD -> CYRILLIC SMALL LETTER SHCHA
u'\u0447' # 0xDE -> CYRILLIC SMALL LETTER CHE
u'\u044a' # 0xDF -> CYRILLIC SMALL LETTER HARD SIGN
u'\u042e' # 0xE0 -> CYRILLIC CAPITAL LETTER YU
u'\u0410' # 0xE1 -> CYRILLIC CAPITAL LETTER A
u'\u0411' # 0xE2 -> CYRILLIC CAPITAL LETTER BE
u'\u0426' # 0xE3 -> CYRILLIC CAPITAL LETTER TSE
u'\u0414' # 0xE4 -> CYRILLIC CAPITAL LETTER DE
u'\u0415' # 0xE5 -> CYRILLIC CAPITAL LETTER IE
u'\u0424' # 0xE6 -> CYRILLIC CAPITAL LETTER EF
u'\u0413' # 0xE7 -> CYRILLIC CAPITAL LETTER GHE
u'\u0425' # 0xE8 -> CYRILLIC CAPITAL LETTER HA
u'\u0418' # 0xE9 -> CYRILLIC CAPITAL LETTER I
u'\u0419' # 0xEA -> CYRILLIC CAPITAL LETTER SHORT I
u'\u041a' # 0xEB -> CYRILLIC CAPITAL LETTER KA
u'\u041b' # 0xEC -> CYRILLIC CAPITAL LETTER EL
u'\u041c' # 0xED -> CYRILLIC CAPITAL LETTER EM
u'\u041d' # 0xEE -> CYRILLIC CAPITAL LETTER EN
u'\u041e' # 0xEF -> CYRILLIC CAPITAL LETTER O
u'\u041f' # 0xF0 -> CYRILLIC CAPITAL LETTER PE
u'\u042f' # 0xF1 -> CYRILLIC CAPITAL LETTER YA
u'\u0420' # 0xF2 -> CYRILLIC CAPITAL LETTER ER
u'\u0421' # 0xF3 -> CYRILLIC CAPITAL LETTER ES
u'\u0422' # 0xF4 -> CYRILLIC CAPITAL LETTER TE
u'\u0423' # 0xF5 -> CYRILLIC CAPITAL LETTER U
u'\u0416' # 0xF6 -> CYRILLIC CAPITAL LETTER ZHE
u'\u0412' # 0xF7 -> CYRILLIC CAPITAL LETTER VE
u'\u042c' # 0xF8 -> CYRILLIC CAPITAL LETTER SOFT SIGN
u'\u042b' # 0xF9 -> CYRILLIC CAPITAL LETTER YERU
u'\u0417' # 0xFA -> CYRILLIC CAPITAL LETTER ZE
u'\u0428' # 0xFB -> CYRILLIC CAPITAL LETTER SHA
u'\u042d' # 0xFC -> CYRILLIC CAPITAL LETTER E
u'\u0429' # 0xFD -> CYRILLIC CAPITAL LETTER SHCHA
u'\u0427' # 0xFE -> CYRILLIC CAPITAL LETTER CHE
u'\u042a' # 0xFF -> CYRILLIC CAPITAL LETTER HARD SIGN
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
puneetugru/Experiment
|
refs/heads/master
|
drf/api/migrations/0005_auto_20170912_1126.py
|
1
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-09-12 11:26
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0004_auto_20170912_1120'),
]
operations = [
migrations.AlterField(
model_name='school',
name='name',
field=models.TextField(),
),
]
|
sestrella/ansible
|
refs/heads/devel
|
contrib/inventory/cloudstack.py
|
13
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <mail@renemoser.net>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
"""
Ansible CloudStack external inventory script.
=============================================
Generates Ansible inventory from CloudStack. Configuration is read from
'cloudstack.ini'. If you need to pass the project, write a simple wrapper
script, e.g. project_cloudstack.sh:
#!/bin/bash
cloudstack.py --project <your_project> $@
When run against a specific host, this script returns the following attributes
based on the data obtained from CloudStack API:
"web01": {
"cpu_number": 2,
"nic": [
{
"ip": "10.102.76.98",
"mac": "02:00:50:99:00:01",
"type": "Isolated",
"netmask": "255.255.255.0",
"gateway": "10.102.76.1"
},
{
"ip": "10.102.138.63",
"mac": "06:b7:5a:00:14:84",
"type": "Shared",
"netmask": "255.255.255.0",
"gateway": "10.102.138.1"
}
],
"default_ip": "10.102.76.98",
"zone": "ZUERICH",
"created": "2014-07-02T07:53:50+0200",
"hypervisor": "VMware",
"memory": 2048,
"state": "Running",
"tags": [],
"cpu_speed": 1800,
"affinity_group": [],
"service_offering": "Small",
"cpu_used": "62%"
}
usage: cloudstack.py [--list] [--host HOST] [--project PROJECT] [--domain DOMAIN]
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import sys
import argparse
import json
try:
from cs import CloudStack, CloudStackException, read_config
except ImportError:
print("Error: CloudStack library must be installed: pip install cs.",
file=sys.stderr)
sys.exit(1)
class CloudStackInventory(object):
def __init__(self):
parser = argparse.ArgumentParser()
parser.add_argument('--host')
parser.add_argument('--list', action='store_true')
parser.add_argument('--tag', help="Filter machines by a tag. Should be in the form key=value.")
parser.add_argument('--project')
parser.add_argument('--domain')
options = parser.parse_args()
try:
self.cs = CloudStack(**read_config())
except CloudStackException:
print("Error: Could not connect to CloudStack API", file=sys.stderr)
domain_id = None
if options.domain:
domain_id = self.get_domain_id(options.domain)
project_id = None
if options.project:
project_id = self.get_project_id(options.project, domain_id)
if options.host:
data = self.get_host(options.host, project_id, domain_id)
print(json.dumps(data, indent=2))
elif options.list:
tags = dict()
if options.tag:
tags['tags[0].key'], tags['tags[0].value'] = options.tag.split('=')
data = self.get_list(project_id, domain_id, **tags)
print(json.dumps(data, indent=2))
else:
print("usage: --list [--tag <tag>] | --host <hostname> [--project <project>] [--domain <domain_path>]",
file=sys.stderr)
sys.exit(1)
def get_domain_id(self, domain):
domains = self.cs.listDomains(listall=True)
if domains:
for d in domains['domain']:
if d['path'].lower() == domain.lower():
return d['id']
print("Error: Domain %s not found." % domain, file=sys.stderr)
sys.exit(1)
def get_project_id(self, project, domain_id=None):
projects = self.cs.listProjects(domainid=domain_id)
if projects:
for p in projects['project']:
if p['name'] == project or p['id'] == project:
return p['id']
print("Error: Project %s not found." % project, file=sys.stderr)
sys.exit(1)
def get_host(self, name, project_id=None, domain_id=None, **kwargs):
hosts = self.cs.listVirtualMachines(projectid=project_id, domainid=domain_id, fetch_list=True, **kwargs)
data = {}
if not hosts:
return data
for host in hosts:
host_name = host['displayname']
if name == host_name:
data['zone'] = host['zonename']
if 'group' in host:
data['group'] = host['group']
data['state'] = host['state']
data['service_offering'] = host['serviceofferingname']
data['affinity_group'] = host['affinitygroup']
data['security_group'] = host['securitygroup']
data['cpu_number'] = host['cpunumber']
if 'cpu_speed' in host:
data['cpu_speed'] = host['cpuspeed']
if 'cpuused' in host:
data['cpu_used'] = host['cpuused']
data['memory'] = host['memory']
data['tags'] = host['tags']
if 'hypervisor' in host:
data['hypervisor'] = host['hypervisor']
data['created'] = host['created']
data['nic'] = []
for nic in host['nic']:
nicdata = {
'ip': nic['ipaddress'],
'mac': nic['macaddress'],
'netmask': nic['netmask'],
'gateway': nic['gateway'],
'type': nic['type'],
}
if 'ip6address' in nic:
nicdata['ip6'] = nic['ip6address']
if 'gateway' in nic:
nicdata['gateway'] = nic['gateway']
if 'netmask' in nic:
nicdata['netmask'] = nic['netmask']
data['nic'].append(nicdata)
if nic['isdefault']:
data['default_ip'] = nic['ipaddress']
if 'ip6address' in nic:
data['default_ip6'] = nic['ip6address']
break
return data
def get_list(self, project_id=None, domain_id=None, **kwargs):
data = {
'all': {
'hosts': [],
},
'_meta': {
'hostvars': {},
},
}
groups = self.cs.listInstanceGroups(projectid=project_id, domainid=domain_id)
if groups:
for group in groups['instancegroup']:
group_name = group['name']
if group_name and group_name not in data:
data[group_name] = {
'hosts': []
}
hosts = self.cs.listVirtualMachines(projectid=project_id, domainid=domain_id, fetch_list=True, **kwargs)
if not hosts:
return data
for host in hosts:
host_name = host['displayname']
data['all']['hosts'].append(host_name)
data['_meta']['hostvars'][host_name] = {}
# Make a group per zone
data['_meta']['hostvars'][host_name]['zone'] = host['zonename']
group_name = host['zonename']
if group_name not in data:
data[group_name] = {
'hosts': []
}
data[group_name]['hosts'].append(host_name)
if 'group' in host:
data['_meta']['hostvars'][host_name]['group'] = host['group']
data['_meta']['hostvars'][host_name]['state'] = host['state']
data['_meta']['hostvars'][host_name]['service_offering'] = host['serviceofferingname']
data['_meta']['hostvars'][host_name]['affinity_group'] = host['affinitygroup']
data['_meta']['hostvars'][host_name]['security_group'] = host['securitygroup']
data['_meta']['hostvars'][host_name]['cpu_number'] = host['cpunumber']
if 'cpuspeed' in host:
data['_meta']['hostvars'][host_name]['cpu_speed'] = host['cpuspeed']
if 'cpuused' in host:
data['_meta']['hostvars'][host_name]['cpu_used'] = host['cpuused']
data['_meta']['hostvars'][host_name]['created'] = host['created']
data['_meta']['hostvars'][host_name]['memory'] = host['memory']
data['_meta']['hostvars'][host_name]['tags'] = host['tags']
if 'hypervisor' in host:
data['_meta']['hostvars'][host_name]['hypervisor'] = host['hypervisor']
data['_meta']['hostvars'][host_name]['created'] = host['created']
data['_meta']['hostvars'][host_name]['nic'] = []
for nic in host['nic']:
nicdata = {
'ip': nic['ipaddress'],
'mac': nic['macaddress'],
'netmask': nic['netmask'],
'gateway': nic['gateway'],
'type': nic['type'],
}
if 'ip6address' in nic:
nicdata['ip6'] = nic['ip6address']
if 'gateway' in nic:
nicdata['gateway'] = nic['gateway']
if 'netmask' in nic:
nicdata['netmask'] = nic['netmask']
data['_meta']['hostvars'][host_name]['nic'].append(nicdata)
if nic['isdefault']:
data['_meta']['hostvars'][host_name]['default_ip'] = nic['ipaddress']
if 'ip6address' in nic:
data['_meta']['hostvars'][host_name]['default_ip6'] = nic['ip6address']
group_name = ''
if 'group' in host:
group_name = host['group']
if group_name and group_name in data:
data[group_name]['hosts'].append(host_name)
return data
if __name__ == '__main__':
CloudStackInventory()
|
sumeetsk/NEXT-1
|
refs/heads/master
|
apps/PoolBasedBinaryClassification/dashboard/Dashboard.py
|
1
|
import json
import numpy
import numpy.random
from datetime import datetime
from datetime import timedelta
import next.utils as utils
from next.apps.AppDashboard import AppDashboard
# import next.database_client.DatabaseAPIHTTP as db
# import next.logging_client.LoggerHTTP as ell
class MyAppDashboard(AppDashboard):
def __init__(self,db,ell):
AppDashboard.__init__(self, db, ell)
def test_error_multiline_plot(self,app, butler):
"""
Description: Returns multiline plot where there is a one-to-one mapping lines to
algorithms and each line indicates the error on the validation set with respect to number of reported answers
Expected input:
None
Expected output (in dict):
(dict) MPLD3 plot dictionary
"""
args = butler.experiment.get(key='args')
alg_list = args['alg_list']
test_alg_label = alg_list[0]['test_alg_label']
test_queries, didSucceed, message = butler.db.get_docs_with_filter(app.app_id+':queries',{'exp_uid':app.exp_uid, 'alg_label':test_alg_label})
test_S = [(query['target_index'], query['target_label'])
for query in test_queries
if 'target_index' in query.keys()]
targets = butler.targets.get_targetset(app.exp_uid)
targets = sorted(targets,key=lambda x: x['target_id'])
target_features = []
for target_index in range(len(targets)):
target_vec = targets[target_index]['meta']['features']
target_vec.append(1.)
target_features.append(target_vec)
x_min = numpy.float('inf')
x_max = -numpy.float('inf')
y_min = numpy.float('inf')
y_max = -numpy.float('inf')
list_of_alg_dicts = []
for algorithm in alg_list:
alg_label = algorithm['alg_label']
list_of_log_dict,didSucceed,message = self.ell.get_logs_with_filter(app.app_id+':ALG-EVALUATION',{'exp_uid':app.exp_uid, 'alg_label':alg_label})
list_of_log_dict = sorted(list_of_log_dict, key=lambda item: utils.str2datetime(item['timestamp']) )
x = []
y = []
for item in list_of_log_dict:
num_reported_answers = item['num_reported_answers']
weights = item['weights']
err = 0.
for q in test_S:
estimated_label = numpy.sign(numpy.dot( numpy.array(target_features[q[0]]), numpy.array(weights) ))
err += estimated_label*q[1]<0. #do the labels agree or not
m = float(len(test_S))
err = err/m
x.append(num_reported_answers)
y.append(err)
x = numpy.argsort(x)
x = [x[i] for i in x]
y = [y[i] for i in x]
alg_dict = {}
alg_dict['legend_label'] = alg_label
alg_dict['x'] = x
alg_dict['y'] = y
try:
x_min = min(x_min,min(x))
x_max = max(x_max,max(x))
y_min = min(y_min,min(y))
y_max = max(y_max,max(y))
except:
pass
list_of_alg_dicts.append(alg_dict)
import matplotlib.pyplot as plt
import mpld3
fig, ax = plt.subplots(subplot_kw=dict(axisbg='#EEEEEE'))
for alg_dict in list_of_alg_dicts:
ax.plot(alg_dict['x'],alg_dict['y'],label=alg_dict['legend_label'])
ax.set_xlabel('Number of answered queries')
ax.set_ylabel('Error on hold-out set')
ax.set_xlim([x_min,x_max])
ax.set_ylim([y_min,y_max])
ax.grid(color='white', linestyle='solid')
ax.set_title('Test Error', size=14)
legend = ax.legend(loc=2,ncol=3,mode="expand")
for label in legend.get_texts():
label.set_fontsize('small')
plot_dict = mpld3.fig_to_dict(fig)
plt.close()
return plot_dict
|
kowall116/roadTrip
|
refs/heads/master
|
node_modules/karma/node_modules/socket.io/node_modules/socket.io-client/node_modules/engine.io-client/node_modules/engine.io-parser/node_modules/utf8/tests/generate-test-data.py
|
1788
|
#!/usr/bin/env python
import re
import json
# https://mathiasbynens.be/notes/javascript-encoding#surrogate-formulae
# http://stackoverflow.com/a/13436167/96656
def unisymbol(codePoint):
if codePoint >= 0x0000 and codePoint <= 0xFFFF:
return unichr(codePoint)
elif codePoint >= 0x010000 and codePoint <= 0x10FFFF:
highSurrogate = int((codePoint - 0x10000) / 0x400) + 0xD800
lowSurrogate = int((codePoint - 0x10000) % 0x400) + 0xDC00
return unichr(highSurrogate) + unichr(lowSurrogate)
else:
return 'Error'
def hexify(codePoint):
return 'U+' + hex(codePoint)[2:].upper().zfill(6)
def writeFile(filename, contents):
print filename
with open(filename, 'w') as f:
f.write(contents.strip() + '\n')
data = []
for codePoint in range(0x000000, 0x10FFFF + 1):
# Skip non-scalar values.
if codePoint >= 0xD800 and codePoint <= 0xDFFF:
continue
symbol = unisymbol(codePoint)
# http://stackoverflow.com/a/17199950/96656
bytes = symbol.encode('utf8').decode('latin1')
data.append({
'codePoint': codePoint,
'decoded': symbol,
'encoded': bytes
});
jsonData = json.dumps(data, sort_keys=False, indent=2, separators=(',', ': '))
# Use tabs instead of double spaces for indentation
jsonData = jsonData.replace(' ', '\t')
# Escape hexadecimal digits in escape sequences
jsonData = re.sub(
r'\\u([a-fA-F0-9]{4})',
lambda match: r'\u{}'.format(match.group(1).upper()),
jsonData
)
writeFile('data.json', jsonData)
|
mycodeday/crm-platform
|
refs/heads/master
|
bus/__openerp__.py
|
299
|
{
'name' : 'IM Bus',
'version': '1.0',
'author': 'OpenERP SA',
'category': 'Hidden',
'complexity': 'easy',
'description': "Instant Messaging Bus allow you to send messages to users, in live.",
'depends': ['base', 'web'],
'data': [
'views/bus.xml',
'security/ir.model.access.csv',
],
'installable': True,
'auto_install': True,
}
|
Lyhan/GCWiiManager
|
refs/heads/master
|
GWcli.py
|
1
|
# Library for CLI functions
import os
import sys
# Validate user input [y/n]
def validateYN(message):
a=''
while True:
a = input(message + "[y/n]: ").lower()
if a == "y" or a == "yes":
return 1
elif a == "n" or a == "no":
return 0
elif a == "e" or a == "exit":
sys.exit(0)
else:
print("""Not a valid option, type "e" or "exit" to quit.""")
# Get game path from user input
def getGamesPath():
path=''
while not os.path.exists(path):
path = input("Enter games location: ")
if not os.path.exists(path):
print("ERROR: {} not found.".format(path))
return path
# Get destination folder
def getDestPath():
while True:
path = input("Enter destination folder: ")
if os.path.exists(path):
return path
else:
a = validateYN("The destination folder does not exist. Would you like to create it?")
if a:
os.mkdir(path)
return path
|
thonkify/thonkify
|
refs/heads/master
|
src/lib/pycountry/tests/test_general.py
|
1
|
import gettext
import re
import pycountry
import pycountry.db
import pytest
@pytest.fixture(autouse=True, scope='session')
def logging():
import logging
logging.basicConfig(level=logging.DEBUG)
def test_country_list():
assert len(pycountry.countries) == 249
assert isinstance(list(pycountry.countries)[0], pycountry.db.Data)
def test_germany_has_all_attributes():
germany = pycountry.countries.get(alpha_2='DE')
assert germany.alpha_2 == u'DE'
assert germany.alpha_3 == u'DEU'
assert germany.numeric == u'276'
assert germany.name == u'Germany'
assert germany.official_name == u'Federal Republic of Germany'
def test_subdivisions_directly_accessible():
assert len(pycountry.subdivisions) == 4847
assert isinstance(list(pycountry.subdivisions)[0], pycountry.db.Data)
de_st = pycountry.subdivisions.get(code='DE-ST')
assert de_st.code == u'DE-ST'
assert de_st.name == u'Sachsen-Anhalt'
assert de_st.type == u'State'
assert de_st.parent is None
assert de_st.parent_code is None
assert de_st.country is pycountry.countries.get(alpha_2='DE')
def test_subdivisions_have_subdivision_as_parent():
al_br = pycountry.subdivisions.get(code='AL-BU')
assert al_br.code == u'AL-BU'
assert al_br.name == u'Bulqiz\xeb'
assert al_br.type == u'District'
assert al_br.parent_code == u'AL-09'
assert al_br.parent is pycountry.subdivisions.get(code='AL-09')
assert al_br.parent.name == u'Dib\xebr'
def test_query_subdivisions_of_country():
assert len(pycountry.subdivisions.get(country_code='DE')) == 16
assert len(pycountry.subdivisions.get(country_code='US')) == 57
def test_scripts():
assert len(pycountry.scripts) == 182
assert isinstance(list(pycountry.scripts)[0], pycountry.db.Data)
latin = pycountry.scripts.get(name='Latin')
assert latin.alpha_4 == u'Latn'
assert latin.name == u'Latin'
assert latin.numeric == u'215'
def test_currencies():
assert len(pycountry.currencies) == 170
assert isinstance(list(pycountry.currencies)[0], pycountry.db.Data)
argentine_peso = pycountry.currencies.get(alpha_3='ARS')
assert argentine_peso.alpha_3 == u'ARS'
assert argentine_peso.name == u'Argentine Peso'
assert argentine_peso.numeric == u'032'
def test_languages():
assert len(pycountry.languages) == 7847
assert isinstance(list(pycountry.languages)[0], pycountry.db.Data)
aragonese = pycountry.languages.get(alpha_2='an')
assert aragonese.alpha_2 == u'an'
assert aragonese.alpha_3 == u'arg'
assert aragonese.name == u'Aragonese'
bengali = pycountry.languages.get(alpha_2='bn')
assert bengali.name == u'Bengali'
assert bengali.common_name == u'Bangla'
def test_locales():
german = gettext.translation(
'iso3166', pycountry.LOCALES_DIR, languages=['de'])
german.install()
assert __builtins__['_']('Germany') == 'Deutschland'
def test_removed_countries():
ussr = pycountry.historic_countries.get(alpha_3='SUN')
assert isinstance(ussr, pycountry.db.Data)
assert ussr.alpha_4 == u'SUHH'
assert ussr.alpha_3 == u'SUN'
assert ussr.name == u'USSR, Union of Soviet Socialist Republics'
assert ussr.withdrawal_date == u'1992-08-30'
def test_repr():
assert re.match("Country\\(alpha_2=u?'DE', "
"alpha_3=u?'DEU', "
"name=u?'Germany', "
"numeric=u?'276', "
"official_name=u?'Federal Republic of Germany'\\)",
repr(pycountry.countries.get(alpha_2='DE')))
def test_dir():
germany = pycountry.countries.get(alpha_2='DE')
for n in 'alpha_2', 'alpha_3', 'name', 'numeric', 'official_name':
assert n in dir(germany)
def test_get():
c = pycountry.countries
with pytest.raises(TypeError):
c.get(alpha_2='DE', alpha_3='DEU')
assert c.get(alpha_2='DE') == c.get(alpha_3='DEU')
def test_lookup():
c = pycountry.countries
g = c.get(alpha_2='DE')
assert g == c.lookup('de')
assert g == c.lookup('DEU')
assert g == c.lookup('276')
assert g == c.lookup('germany')
assert g == c.lookup('Federal Republic of Germany')
# try a generated field
bqaq = pycountry.historic_countries.get(alpha_4='BQAQ')
assert bqaq == pycountry.historic_countries.lookup('atb')
german = pycountry.languages.get(alpha_2='de')
assert german == pycountry.languages.lookup('De')
euro = pycountry.currencies.get(alpha_3='EUR')
assert euro == pycountry.currencies.lookup('euro')
latin = pycountry.scripts.get(name='Latin')
assert latin == pycountry.scripts.lookup('latn')
al_bu = pycountry.subdivisions.get(code='AL-BU')
assert al_bu == pycountry.subdivisions.lookup('al-bu')
with pytest.raises(LookupError):
pycountry.countries.lookup('bogus country')
with pytest.raises(LookupError):
pycountry.countries.lookup(12345)
|
rjschof/gem5
|
refs/heads/master
|
src/cpu/minor/MinorCPU.py
|
12
|
# Copyright (c) 2012-2014 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
# Nathan Binkert
# Andrew Bardsley
from m5.defines import buildEnv
from m5.params import *
from m5.proxy import *
from m5.SimObject import SimObject
from BaseCPU import BaseCPU
from DummyChecker import DummyChecker
from BranchPredictor import *
from TimingExpr import TimingExpr
from FuncUnit import OpClass
class MinorOpClass(SimObject):
"""Boxing of OpClass to get around build problems and provide a hook for
future additions to OpClass checks"""
type = 'MinorOpClass'
cxx_header = "cpu/minor/func_unit.hh"
opClass = Param.OpClass("op class to match")
class MinorOpClassSet(SimObject):
"""A set of matchable op classes"""
type = 'MinorOpClassSet'
cxx_header = "cpu/minor/func_unit.hh"
opClasses = VectorParam.MinorOpClass([], "op classes to be matched."
" An empty list means any class")
class MinorFUTiming(SimObject):
type = 'MinorFUTiming'
cxx_header = "cpu/minor/func_unit.hh"
mask = Param.UInt64(0, "mask for testing ExtMachInst")
match = Param.UInt64(0, "match value for testing ExtMachInst:"
" (ext_mach_inst & mask) == match")
suppress = Param.Bool(False, "if true, this inst. is not executed by"
" this FU")
extraCommitLat = Param.Cycles(0, "extra cycles to stall commit for"
" this inst.")
extraCommitLatExpr = Param.TimingExpr(NULL, "extra cycles as a"
" run-time evaluated expression")
extraAssumedLat = Param.Cycles(0, "extra cycles to add to scoreboard"
" retire time for this insts dest registers once it leaves the"
" functional unit. For mem refs, if this is 0, the result's time"
" is marked as unpredictable and no forwarding can take place.")
srcRegsRelativeLats = VectorParam.Cycles("the maximum number of cycles"
" after inst. issue that each src reg can be available for this"
" inst. to issue")
opClasses = Param.MinorOpClassSet(MinorOpClassSet(),
"op classes to be considered for this decode. An empty set means any"
" class")
description = Param.String('', "description string of the decoding/inst."
" class")
def minorMakeOpClassSet(op_classes):
"""Make a MinorOpClassSet from a list of OpClass enum value strings"""
def boxOpClass(op_class):
return MinorOpClass(opClass=op_class)
return MinorOpClassSet(opClasses=map(boxOpClass, op_classes))
class MinorFU(SimObject):
type = 'MinorFU'
cxx_header = "cpu/minor/func_unit.hh"
opClasses = Param.MinorOpClassSet(MinorOpClassSet(), "type of operations"
" allowed on this functional unit")
opLat = Param.Cycles(1, "latency in cycles")
issueLat = Param.Cycles(1, "cycles until another instruction can be"
" issued")
timings = VectorParam.MinorFUTiming([], "extra decoding rules")
cantForwardFromFUIndices = VectorParam.Unsigned([],
"list of FU indices from which this FU can't receive and early"
" (forwarded) result")
class MinorFUPool(SimObject):
type = 'MinorFUPool'
cxx_header = "cpu/minor/func_unit.hh"
funcUnits = VectorParam.MinorFU("functional units")
class MinorDefaultIntFU(MinorFU):
opClasses = minorMakeOpClassSet(['IntAlu'])
timings = [MinorFUTiming(description="Int",
srcRegsRelativeLats=[2])]
opLat = 3
class MinorDefaultIntMulFU(MinorFU):
opClasses = minorMakeOpClassSet(['IntMult'])
timings = [MinorFUTiming(description='Mul',
srcRegsRelativeLats=[0])]
opLat = 3
class MinorDefaultIntDivFU(MinorFU):
opClasses = minorMakeOpClassSet(['IntDiv'])
issueLat = 9
opLat = 9
class MinorDefaultFloatSimdFU(MinorFU):
opClasses = minorMakeOpClassSet([
'FloatAdd', 'FloatCmp', 'FloatCvt', 'FloatMisc', 'FloatMult',
'FloatMultAcc', 'FloatDiv', 'FloatSqrt',
'SimdAdd', 'SimdAddAcc', 'SimdAlu', 'SimdCmp', 'SimdCvt',
'SimdMisc', 'SimdMult', 'SimdMultAcc', 'SimdShift', 'SimdShiftAcc',
'SimdSqrt', 'SimdFloatAdd', 'SimdFloatAlu', 'SimdFloatCmp',
'SimdFloatCvt', 'SimdFloatDiv', 'SimdFloatMisc', 'SimdFloatMult',
'SimdFloatMultAcc', 'SimdFloatSqrt'])
timings = [MinorFUTiming(description='FloatSimd',
srcRegsRelativeLats=[2])]
opLat = 6
class MinorDefaultMemFU(MinorFU):
opClasses = minorMakeOpClassSet(['MemRead', 'MemWrite', 'FloatMemRead',
'FloatMemWrite'])
timings = [MinorFUTiming(description='Mem',
srcRegsRelativeLats=[1], extraAssumedLat=2)]
opLat = 1
class MinorDefaultMiscFU(MinorFU):
opClasses = minorMakeOpClassSet(['IprAccess', 'InstPrefetch'])
opLat = 1
class MinorDefaultFUPool(MinorFUPool):
funcUnits = [MinorDefaultIntFU(), MinorDefaultIntFU(),
MinorDefaultIntMulFU(), MinorDefaultIntDivFU(),
MinorDefaultFloatSimdFU(), MinorDefaultMemFU(),
MinorDefaultMiscFU()]
class ThreadPolicy(Enum): vals = ['SingleThreaded', 'RoundRobin', 'Random']
class MinorCPU(BaseCPU):
type = 'MinorCPU'
cxx_header = "cpu/minor/cpu.hh"
@classmethod
def memory_mode(cls):
return 'timing'
@classmethod
def require_caches(cls):
return True
@classmethod
def support_take_over(cls):
return True
threadPolicy = Param.ThreadPolicy('RoundRobin',
"Thread scheduling policy")
fetch1FetchLimit = Param.Unsigned(1,
"Number of line fetches allowable in flight at once")
fetch1LineSnapWidth = Param.Unsigned(0,
"Fetch1 'line' fetch snap size in bytes"
" (0 means use system cache line size)")
fetch1LineWidth = Param.Unsigned(0,
"Fetch1 maximum fetch size in bytes (0 means use system cache"
" line size)")
fetch1ToFetch2ForwardDelay = Param.Cycles(1,
"Forward cycle delay from Fetch1 to Fetch2 (1 means next cycle)")
fetch1ToFetch2BackwardDelay = Param.Cycles(1,
"Backward cycle delay from Fetch2 to Fetch1 for branch prediction"
" signalling (0 means in the same cycle, 1 mean the next cycle)")
fetch2InputBufferSize = Param.Unsigned(2,
"Size of input buffer to Fetch2 in cycles-worth of insts.")
fetch2ToDecodeForwardDelay = Param.Cycles(1,
"Forward cycle delay from Fetch2 to Decode (1 means next cycle)")
fetch2CycleInput = Param.Bool(True,
"Allow Fetch2 to cross input lines to generate full output each"
" cycle")
decodeInputBufferSize = Param.Unsigned(3,
"Size of input buffer to Decode in cycles-worth of insts.")
decodeToExecuteForwardDelay = Param.Cycles(1,
"Forward cycle delay from Decode to Execute (1 means next cycle)")
decodeInputWidth = Param.Unsigned(2,
"Width (in instructions) of input to Decode (and implicitly"
" Decode's own width)")
decodeCycleInput = Param.Bool(True,
"Allow Decode to pack instructions from more than one input cycle"
" to fill its output each cycle")
executeInputWidth = Param.Unsigned(2,
"Width (in instructions) of input to Execute")
executeCycleInput = Param.Bool(True,
"Allow Execute to use instructions from more than one input cycle"
" each cycle")
executeIssueLimit = Param.Unsigned(2,
"Number of issuable instructions in Execute each cycle")
executeMemoryIssueLimit = Param.Unsigned(1,
"Number of issuable memory instructions in Execute each cycle")
executeCommitLimit = Param.Unsigned(2,
"Number of committable instructions in Execute each cycle")
executeMemoryCommitLimit = Param.Unsigned(1,
"Number of committable memory references in Execute each cycle")
executeInputBufferSize = Param.Unsigned(7,
"Size of input buffer to Execute in cycles-worth of insts.")
executeMemoryWidth = Param.Unsigned(0,
"Width (and snap) in bytes of the data memory interface. (0 mean use"
" the system cacheLineSize)")
executeMaxAccessesInMemory = Param.Unsigned(2,
"Maximum number of concurrent accesses allowed to the memory system"
" from the dcache port")
executeLSQMaxStoreBufferStoresPerCycle = Param.Unsigned(2,
"Maximum number of stores that the store buffer can issue per cycle")
executeLSQRequestsQueueSize = Param.Unsigned(1,
"Size of LSQ requests queue (address translation queue)")
executeLSQTransfersQueueSize = Param.Unsigned(2,
"Size of LSQ transfers queue (memory transaction queue)")
executeLSQStoreBufferSize = Param.Unsigned(5,
"Size of LSQ store buffer")
executeBranchDelay = Param.Cycles(1,
"Delay from Execute deciding to branch and Fetch1 reacting"
" (1 means next cycle)")
executeFuncUnits = Param.MinorFUPool(MinorDefaultFUPool(),
"FUlines for this processor")
executeSetTraceTimeOnCommit = Param.Bool(True,
"Set inst. trace times to be commit times")
executeSetTraceTimeOnIssue = Param.Bool(False,
"Set inst. trace times to be issue times")
executeAllowEarlyMemoryIssue = Param.Bool(True,
"Allow mem refs to be issued to the LSQ before reaching the head of"
" the in flight insts queue")
enableIdling = Param.Bool(True,
"Enable cycle skipping when the processor is idle\n");
branchPred = Param.BranchPredictor(TournamentBP(
numThreads = Parent.numThreads), "Branch Predictor")
def addCheckerCpu(self):
print "Checker not yet supported by MinorCPU"
exit(1)
|
RabbitMC/Autofind
|
refs/heads/master
|
mean/node_modules/node-gyp/gyp/gyptest.py
|
1752
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
__doc__ = """
gyptest.py -- test runner for GYP tests.
"""
import os
import optparse
import subprocess
import sys
class CommandRunner(object):
"""
Executor class for commands, including "commands" implemented by
Python functions.
"""
verbose = True
active = True
def __init__(self, dictionary={}):
self.subst_dictionary(dictionary)
def subst_dictionary(self, dictionary):
self._subst_dictionary = dictionary
def subst(self, string, dictionary=None):
"""
Substitutes (via the format operator) the values in the specified
dictionary into the specified command.
The command can be an (action, string) tuple. In all cases, we
perform substitution on strings and don't worry if something isn't
a string. (It's probably a Python function to be executed.)
"""
if dictionary is None:
dictionary = self._subst_dictionary
if dictionary:
try:
string = string % dictionary
except TypeError:
pass
return string
def display(self, command, stdout=None, stderr=None):
if not self.verbose:
return
if type(command) == type(()):
func = command[0]
args = command[1:]
s = '%s(%s)' % (func.__name__, ', '.join(map(repr, args)))
if type(command) == type([]):
# TODO: quote arguments containing spaces
# TODO: handle meta characters?
s = ' '.join(command)
else:
s = self.subst(command)
if not s.endswith('\n'):
s += '\n'
sys.stdout.write(s)
sys.stdout.flush()
def execute(self, command, stdout=None, stderr=None):
"""
Executes a single command.
"""
if not self.active:
return 0
if type(command) == type(''):
command = self.subst(command)
cmdargs = shlex.split(command)
if cmdargs[0] == 'cd':
command = (os.chdir,) + tuple(cmdargs[1:])
if type(command) == type(()):
func = command[0]
args = command[1:]
return func(*args)
else:
if stdout is sys.stdout:
# Same as passing sys.stdout, except python2.4 doesn't fail on it.
subout = None
else:
# Open pipe for anything else so Popen works on python2.4.
subout = subprocess.PIPE
if stderr is sys.stderr:
# Same as passing sys.stderr, except python2.4 doesn't fail on it.
suberr = None
elif stderr is None:
# Merge with stdout if stderr isn't specified.
suberr = subprocess.STDOUT
else:
# Open pipe for anything else so Popen works on python2.4.
suberr = subprocess.PIPE
p = subprocess.Popen(command,
shell=(sys.platform == 'win32'),
stdout=subout,
stderr=suberr)
p.wait()
if stdout is None:
self.stdout = p.stdout.read()
elif stdout is not sys.stdout:
stdout.write(p.stdout.read())
if stderr not in (None, sys.stderr):
stderr.write(p.stderr.read())
return p.returncode
def run(self, command, display=None, stdout=None, stderr=None):
"""
Runs a single command, displaying it first.
"""
if display is None:
display = command
self.display(display)
return self.execute(command, stdout, stderr)
class Unbuffered(object):
def __init__(self, fp):
self.fp = fp
def write(self, arg):
self.fp.write(arg)
self.fp.flush()
def __getattr__(self, attr):
return getattr(self.fp, attr)
sys.stdout = Unbuffered(sys.stdout)
sys.stderr = Unbuffered(sys.stderr)
def is_test_name(f):
return f.startswith('gyptest') and f.endswith('.py')
def find_all_gyptest_files(directory):
result = []
for root, dirs, files in os.walk(directory):
if '.svn' in dirs:
dirs.remove('.svn')
result.extend([ os.path.join(root, f) for f in files if is_test_name(f) ])
result.sort()
return result
def main(argv=None):
if argv is None:
argv = sys.argv
usage = "gyptest.py [-ahlnq] [-f formats] [test ...]"
parser = optparse.OptionParser(usage=usage)
parser.add_option("-a", "--all", action="store_true",
help="run all tests")
parser.add_option("-C", "--chdir", action="store", default=None,
help="chdir to the specified directory")
parser.add_option("-f", "--format", action="store", default='',
help="run tests with the specified formats")
parser.add_option("-G", '--gyp_option', action="append", default=[],
help="Add -G options to the gyp command line")
parser.add_option("-l", "--list", action="store_true",
help="list available tests and exit")
parser.add_option("-n", "--no-exec", action="store_true",
help="no execute, just print the command line")
parser.add_option("--passed", action="store_true",
help="report passed tests")
parser.add_option("--path", action="append", default=[],
help="additional $PATH directory")
parser.add_option("-q", "--quiet", action="store_true",
help="quiet, don't print test command lines")
opts, args = parser.parse_args(argv[1:])
if opts.chdir:
os.chdir(opts.chdir)
if opts.path:
extra_path = [os.path.abspath(p) for p in opts.path]
extra_path = os.pathsep.join(extra_path)
os.environ['PATH'] = extra_path + os.pathsep + os.environ['PATH']
if not args:
if not opts.all:
sys.stderr.write('Specify -a to get all tests.\n')
return 1
args = ['test']
tests = []
for arg in args:
if os.path.isdir(arg):
tests.extend(find_all_gyptest_files(os.path.normpath(arg)))
else:
if not is_test_name(os.path.basename(arg)):
print >>sys.stderr, arg, 'is not a valid gyp test name.'
sys.exit(1)
tests.append(arg)
if opts.list:
for test in tests:
print test
sys.exit(0)
CommandRunner.verbose = not opts.quiet
CommandRunner.active = not opts.no_exec
cr = CommandRunner()
os.environ['PYTHONPATH'] = os.path.abspath('test/lib')
if not opts.quiet:
sys.stdout.write('PYTHONPATH=%s\n' % os.environ['PYTHONPATH'])
passed = []
failed = []
no_result = []
if opts.format:
format_list = opts.format.split(',')
else:
# TODO: not duplicate this mapping from pylib/gyp/__init__.py
format_list = {
'aix5': ['make'],
'freebsd7': ['make'],
'freebsd8': ['make'],
'openbsd5': ['make'],
'cygwin': ['msvs'],
'win32': ['msvs', 'ninja'],
'linux2': ['make', 'ninja'],
'linux3': ['make', 'ninja'],
'darwin': ['make', 'ninja', 'xcode', 'xcode-ninja'],
}[sys.platform]
for format in format_list:
os.environ['TESTGYP_FORMAT'] = format
if not opts.quiet:
sys.stdout.write('TESTGYP_FORMAT=%s\n' % format)
gyp_options = []
for option in opts.gyp_option:
gyp_options += ['-G', option]
if gyp_options and not opts.quiet:
sys.stdout.write('Extra Gyp options: %s\n' % gyp_options)
for test in tests:
status = cr.run([sys.executable, test] + gyp_options,
stdout=sys.stdout,
stderr=sys.stderr)
if status == 2:
no_result.append(test)
elif status:
failed.append(test)
else:
passed.append(test)
if not opts.quiet:
def report(description, tests):
if tests:
if len(tests) == 1:
sys.stdout.write("\n%s the following test:\n" % description)
else:
fmt = "\n%s the following %d tests:\n"
sys.stdout.write(fmt % (description, len(tests)))
sys.stdout.write("\t" + "\n\t".join(tests) + "\n")
if opts.passed:
report("Passed", passed)
report("Failed", failed)
report("No result from", no_result)
if failed:
return 1
else:
return 0
if __name__ == "__main__":
sys.exit(main())
|
encukou/freeipa
|
refs/heads/master
|
ipatests/test_ipaserver/test_kadmin.py
|
3
|
#
# Copyright (C) 2016 FreeIPA Contributors see COPYING for license
#
"""
Test suite for creating principals via kadmin.local and modifying their keys
"""
import os
import pytest
import tempfile
from ipalib import api
from ipaserver.install import installutils
@pytest.fixture
def keytab():
fd, keytab_path = tempfile.mkstemp(suffix='.keytab')
os.close(fd)
try:
yield keytab_path
finally:
try:
os.remove(keytab_path)
except OSError:
pass
@pytest.fixture()
def service_in_kerberos_subtree(request):
princ = u'svc1/{0.host}@{0.realm}'.format(api.env)
installutils.kadmin_addprinc(princ)
def fin():
try:
installutils.kadmin(
'delprinc -force {}'.format(princ))
except Exception:
pass
request.addfinalizer(fin)
return princ
@pytest.fixture()
def service_in_service_subtree(request):
princ = u'svc2/{0.host}@{0.realm}'.format(api.env)
rpcclient = api.Backend.rpcclient
was_connected = rpcclient.isconnected()
if not was_connected:
rpcclient.connect()
api.Command.service_add(princ)
def fin():
try:
api.Command.service_del(princ)
except Exception:
pass
try:
if not was_connected:
rpcclient.disconnect()
except Exception:
pass
request.addfinalizer(fin)
return princ
@pytest.fixture(params=["service_in_kerberos_subtree",
"service_in_service_subtree"])
def service(request):
return request.getfixturevalue(request.param)
@pytest.mark.skipif(
os.getuid() != 0, reason="kadmin.local is accesible only to root")
class TestKadmin:
def assert_success(self, command, *args):
"""
Since kadmin.local returns 0 also when internal errors occur, we have
to catch the command's stderr and check that it is empty
"""
result = command(*args)
assert not result.error_output
def test_create_keytab(self, service, keytab):
"""
tests that ktadd command works for both types of services
"""
self.assert_success(
installutils.create_keytab,
keytab,
service)
def test_change_key(self, service, keytab):
"""
tests that both types of service can have passwords changed using
kadmin
"""
self.assert_success(
installutils.create_keytab,
keytab,
service)
self.assert_success(
installutils.kadmin,
'change_password -randkey {}'.format(service))
def test_append_key(self, service, keytab):
"""
Tests that we can create a new keytab for both service types and then
append new keys to it
"""
self.assert_success(
installutils.create_keytab,
keytab,
service)
self.assert_success(
installutils.create_keytab,
keytab,
service)
def test_getprincs(self):
"""
tests that kadmin.local getprincs command returns a list of principals
"""
self.assert_success(installutils.kadmin, 'getprincs')
|
jonparrott/google-cloud-python
|
refs/heads/master
|
redis/noxfile.py
|
2
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import nox
LOCAL_DEPS = (
os.path.join('..', 'api_core'),
os.path.join('..', 'core'),
)
def default(session):
"""Default unit test session.
"""
# Install all test dependencies, then install local packages in-place.
session.install('mock', 'pytest', 'pytest-cov')
for local_dep in LOCAL_DEPS:
session.install('-e', local_dep)
session.install('-e', '.')
# Run py.test against the unit tests.
session.run(
'py.test',
'--quiet',
'--cov=google.cloud.redis',
'--cov=google.cloud.redis_v1beta1',
'--cov=tests.unit',
'--cov-append',
'--cov-config=.coveragerc',
'--cov-report=',
'--cov-fail-under=97',
'tests/unit',
*session.posargs
)
@nox.session(python=['2.7', '3.5', '3.6', '3.7'])
def unit(session):
"""Run the unit test suite."""
default(session)
@nox.session(python='3.6')
def lint(session):
"""Run linters.
Returns a failure if the linters find linting errors or sufficiently
serious code quality issues.
"""
session.install('flake8', *LOCAL_DEPS)
session.install('.')
session.run('flake8', 'google', 'tests')
@nox.session(python='3.6')
def lint_setup_py(session):
"""Verify that setup.py is valid (including RST check)."""
session.install('docutils', 'pygments')
session.run('python', 'setup.py', 'check', '--restructuredtext',
'--strict')
@nox.session(python='3.6')
def cover(session):
"""Run the final coverage report.
This outputs the coverage report aggregating coverage from the unit
test runs (not system test runs), and then erases coverage data.
"""
session.install('coverage', 'pytest-cov')
session.run('coverage', 'report', '--show-missing', '--fail-under=100')
session.run('coverage', 'erase')
|
dlozeve/reveal_CommunityDetection
|
refs/heads/master
|
node_modules/node-gyp/gyp/pylib/gyp/simple_copy.py
|
1869
|
# Copyright 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A clone of the default copy.deepcopy that doesn't handle cyclic
structures or complex types except for dicts and lists. This is
because gyp copies so large structure that small copy overhead ends up
taking seconds in a project the size of Chromium."""
class Error(Exception):
pass
__all__ = ["Error", "deepcopy"]
def deepcopy(x):
"""Deep copy operation on gyp objects such as strings, ints, dicts
and lists. More than twice as fast as copy.deepcopy but much less
generic."""
try:
return _deepcopy_dispatch[type(x)](x)
except KeyError:
raise Error('Unsupported type %s for deepcopy. Use copy.deepcopy ' +
'or expand simple_copy support.' % type(x))
_deepcopy_dispatch = d = {}
def _deepcopy_atomic(x):
return x
for x in (type(None), int, long, float,
bool, str, unicode, type):
d[x] = _deepcopy_atomic
def _deepcopy_list(x):
return [deepcopy(a) for a in x]
d[list] = _deepcopy_list
def _deepcopy_dict(x):
y = {}
for key, value in x.iteritems():
y[deepcopy(key)] = deepcopy(value)
return y
d[dict] = _deepcopy_dict
del d
|
NullSoldier/django
|
refs/heads/master
|
django/contrib/gis/db/backends/oracle/features.py
|
235
|
from django.contrib.gis.db.backends.base.features import BaseSpatialFeatures
from django.db.backends.oracle.features import \
DatabaseFeatures as OracleDatabaseFeatures
class DatabaseFeatures(BaseSpatialFeatures, OracleDatabaseFeatures):
supports_add_srs_entry = False
supports_geometry_field_introspection = False
supports_geometry_field_unique_index = False
|
monash-merc/cvl-fabric-launcher
|
refs/heads/master
|
pyinstaller-2.1/tests/basic/data7.py
|
7
|
#-----------------------------------------------------------------------------
# Copyright (c) 2013, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import time
time.sleep(3)
x = 5
|
bsmr-eve/Pyfa
|
refs/heads/master
|
eos/effects/freightersmacapacitybonuso1.py
|
2
|
# freighterSMACapacityBonusO1
#
# Used by:
# Ship: Bowhead
type = "passive"
def handler(fit, ship, context):
# todo: stacking?
fit.ship.boostItemAttr("agility", ship.getModifiedItemAttr("freighterBonusO2"), skill="ORE Freighter",
stackingPenalties=True)
|
eugenewong/AirShare
|
refs/heads/master
|
boilerplate/external/babel/messages/pofile.py
|
67
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://babel.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://babel.edgewall.org/log/.
"""Reading and writing of files in the ``gettext`` PO (portable object)
format.
:see: `The Format of PO Files
<http://www.gnu.org/software/gettext/manual/gettext.html#PO-Files>`_
"""
from datetime import date, datetime
import os
import re
from babel import __version__ as VERSION
from babel.messages.catalog import Catalog, Message
from babel.util import set, wraptext, LOCALTZ
__all__ = ['read_po', 'write_po']
__docformat__ = 'restructuredtext en'
def unescape(string):
r"""Reverse `escape` the given string.
>>> print unescape('"Say:\\n \\"hello, world!\\"\\n"')
Say:
"hello, world!"
<BLANKLINE>
:param string: the string to unescape
:return: the unescaped string
:rtype: `str` or `unicode`
"""
return string[1:-1].replace('\\\\', '\\') \
.replace('\\t', '\t') \
.replace('\\r', '\r') \
.replace('\\n', '\n') \
.replace('\\"', '\"')
def denormalize(string):
r"""Reverse the normalization done by the `normalize` function.
>>> print denormalize(r'''""
... "Say:\n"
... " \"hello, world!\"\n"''')
Say:
"hello, world!"
<BLANKLINE>
>>> print denormalize(r'''""
... "Say:\n"
... " \"Lorem ipsum dolor sit "
... "amet, consectetur adipisicing"
... " elit, \"\n"''')
Say:
"Lorem ipsum dolor sit amet, consectetur adipisicing elit, "
<BLANKLINE>
:param string: the string to denormalize
:return: the denormalized string
:rtype: `unicode` or `str`
"""
if string.startswith('""'):
lines = []
for line in string.splitlines()[1:]:
lines.append(unescape(line))
return ''.join(lines)
else:
return unescape(string)
def read_po(fileobj, locale=None, domain=None, ignore_obsolete=False):
"""Read messages from a ``gettext`` PO (portable object) file from the given
file-like object and return a `Catalog`.
>>> from StringIO import StringIO
>>> buf = StringIO('''
... #: main.py:1
... #, fuzzy, python-format
... msgid "foo %(name)s"
... msgstr ""
...
... # A user comment
... #. An auto comment
... #: main.py:3
... msgid "bar"
... msgid_plural "baz"
... msgstr[0] ""
... msgstr[1] ""
... ''')
>>> catalog = read_po(buf)
>>> catalog.revision_date = datetime(2007, 04, 01)
>>> for message in catalog:
... if message.id:
... print (message.id, message.string)
... print ' ', (message.locations, message.flags)
... print ' ', (message.user_comments, message.auto_comments)
(u'foo %(name)s', '')
([(u'main.py', 1)], set([u'fuzzy', u'python-format']))
([], [])
((u'bar', u'baz'), ('', ''))
([(u'main.py', 3)], set([]))
([u'A user comment'], [u'An auto comment'])
:param fileobj: the file-like object to read the PO file from
:param locale: the locale identifier or `Locale` object, or `None`
if the catalog is not bound to a locale (which basically
means it's a template)
:param domain: the message domain
:param ignore_obsolete: whether to ignore obsolete messages in the input
:return: an iterator over ``(message, translation, location)`` tuples
:rtype: ``iterator``
"""
catalog = Catalog(locale=locale, domain=domain)
counter = [0]
offset = [0]
messages = []
translations = []
locations = []
flags = []
user_comments = []
auto_comments = []
obsolete = [False]
in_msgid = [False]
in_msgstr = [False]
def _add_message():
translations.sort()
if len(messages) > 1:
msgid = tuple([denormalize(m) for m in messages])
else:
msgid = denormalize(messages[0])
if isinstance(msgid, (list, tuple)):
string = []
for idx in range(catalog.num_plurals):
try:
string.append(translations[idx])
except IndexError:
string.append((idx, ''))
string = tuple([denormalize(t[1]) for t in string])
else:
string = denormalize(translations[0][1])
message = Message(msgid, string, list(locations), set(flags),
auto_comments, user_comments, lineno=offset[0] + 1)
if obsolete[0]:
if not ignore_obsolete:
catalog.obsolete[msgid] = message
else:
catalog[msgid] = message
del messages[:]; del translations[:]; del locations[:];
del flags[:]; del auto_comments[:]; del user_comments[:]
obsolete[0] = False
counter[0] += 1
def _process_message_line(lineno, line):
if line.startswith('msgid_plural'):
in_msgid[0] = True
msg = line[12:].lstrip()
messages.append(msg)
elif line.startswith('msgid'):
in_msgid[0] = True
offset[0] = lineno
txt = line[5:].lstrip()
if messages:
_add_message()
messages.append(txt)
elif line.startswith('msgstr'):
in_msgid[0] = False
in_msgstr[0] = True
msg = line[6:].lstrip()
if msg.startswith('['):
idx, msg = msg[1:].split(']', 1)
translations.append([int(idx), msg.lstrip()])
else:
translations.append([0, msg])
elif line.startswith('"'):
if in_msgid[0]:
messages[-1] += u'\n' + line.rstrip()
elif in_msgstr[0]:
translations[-1][1] += u'\n' + line.rstrip()
for lineno, line in enumerate(fileobj.readlines()):
line = line.strip()
if not isinstance(line, unicode):
line = line.decode(catalog.charset)
if line.startswith('#'):
in_msgid[0] = in_msgstr[0] = False
if messages and translations:
_add_message()
if line[1:].startswith(':'):
for location in line[2:].lstrip().split():
pos = location.rfind(':')
if pos >= 0:
try:
lineno = int(location[pos + 1:])
except ValueError:
continue
locations.append((location[:pos], lineno))
elif line[1:].startswith(','):
for flag in line[2:].lstrip().split(','):
flags.append(flag.strip())
elif line[1:].startswith('~'):
obsolete[0] = True
_process_message_line(lineno, line[2:].lstrip())
elif line[1:].startswith('.'):
# These are called auto-comments
comment = line[2:].strip()
if comment: # Just check that we're not adding empty comments
auto_comments.append(comment)
else:
# These are called user comments
user_comments.append(line[1:].strip())
else:
_process_message_line(lineno, line)
if messages:
_add_message()
# No actual messages found, but there was some info in comments, from which
# we'll construct an empty header message
elif not counter[0] and (flags or user_comments or auto_comments):
messages.append(u'')
translations.append([0, u''])
_add_message()
return catalog
WORD_SEP = re.compile('('
r'\s+|' # any whitespace
r'[^\s\w]*\w+[a-zA-Z]-(?=\w+[a-zA-Z])|' # hyphenated words
r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w)' # em-dash
')')
def escape(string):
r"""Escape the given string so that it can be included in double-quoted
strings in ``PO`` files.
>>> escape('''Say:
... "hello, world!"
... ''')
'"Say:\\n \\"hello, world!\\"\\n"'
:param string: the string to escape
:return: the escaped string
:rtype: `str` or `unicode`
"""
return '"%s"' % string.replace('\\', '\\\\') \
.replace('\t', '\\t') \
.replace('\r', '\\r') \
.replace('\n', '\\n') \
.replace('\"', '\\"')
def normalize(string, prefix='', width=76):
r"""Convert a string into a format that is appropriate for .po files.
>>> print normalize('''Say:
... "hello, world!"
... ''', width=None)
""
"Say:\n"
" \"hello, world!\"\n"
>>> print normalize('''Say:
... "Lorem ipsum dolor sit amet, consectetur adipisicing elit, "
... ''', width=32)
""
"Say:\n"
" \"Lorem ipsum dolor sit "
"amet, consectetur adipisicing"
" elit, \"\n"
:param string: the string to normalize
:param prefix: a string that should be prepended to every line
:param width: the maximum line width; use `None`, 0, or a negative number
to completely disable line wrapping
:return: the normalized string
:rtype: `unicode`
"""
if width and width > 0:
prefixlen = len(prefix)
lines = []
for idx, line in enumerate(string.splitlines(True)):
if len(escape(line)) + prefixlen > width:
chunks = WORD_SEP.split(line)
chunks.reverse()
while chunks:
buf = []
size = 2
while chunks:
l = len(escape(chunks[-1])) - 2 + prefixlen
if size + l < width:
buf.append(chunks.pop())
size += l
else:
if not buf:
# handle long chunks by putting them on a
# separate line
buf.append(chunks.pop())
break
lines.append(u''.join(buf))
else:
lines.append(line)
else:
lines = string.splitlines(True)
if len(lines) <= 1:
return escape(string)
# Remove empty trailing line
if lines and not lines[-1]:
del lines[-1]
lines[-1] += '\n'
return u'""\n' + u'\n'.join([(prefix + escape(l)) for l in lines])
def write_po(fileobj, catalog, width=76, no_location=False, omit_header=False,
sort_output=False, sort_by_file=False, ignore_obsolete=False,
include_previous=False):
r"""Write a ``gettext`` PO (portable object) template file for a given
message catalog to the provided file-like object.
>>> catalog = Catalog()
>>> catalog.add(u'foo %(name)s', locations=[('main.py', 1)],
... flags=('fuzzy',))
>>> catalog.add((u'bar', u'baz'), locations=[('main.py', 3)])
>>> from StringIO import StringIO
>>> buf = StringIO()
>>> write_po(buf, catalog, omit_header=True)
>>> print buf.getvalue()
#: main.py:1
#, fuzzy, python-format
msgid "foo %(name)s"
msgstr ""
<BLANKLINE>
#: main.py:3
msgid "bar"
msgid_plural "baz"
msgstr[0] ""
msgstr[1] ""
<BLANKLINE>
<BLANKLINE>
:param fileobj: the file-like object to write to
:param catalog: the `Catalog` instance
:param width: the maximum line width for the generated output; use `None`,
0, or a negative number to completely disable line wrapping
:param no_location: do not emit a location comment for every message
:param omit_header: do not include the ``msgid ""`` entry at the top of the
output
:param sort_output: whether to sort the messages in the output by msgid
:param sort_by_file: whether to sort the messages in the output by their
locations
:param ignore_obsolete: whether to ignore obsolete messages and not include
them in the output; by default they are included as
comments
:param include_previous: include the old msgid as a comment when
updating the catalog
"""
def _normalize(key, prefix=''):
return normalize(key, prefix=prefix, width=width) \
.encode(catalog.charset, 'backslashreplace')
def _write(text):
if isinstance(text, unicode):
text = text.encode(catalog.charset)
fileobj.write(text)
def _write_comment(comment, prefix=''):
# xgettext always wraps comments even if --no-wrap is passed;
# provide the same behaviour
if width and width > 0:
_width = width
else:
_width = 76
for line in wraptext(comment, _width):
_write('#%s %s\n' % (prefix, line.strip()))
def _write_message(message, prefix=''):
if isinstance(message.id, (list, tuple)):
_write('%smsgid %s\n' % (prefix, _normalize(message.id[0], prefix)))
_write('%smsgid_plural %s\n' % (
prefix, _normalize(message.id[1], prefix)
))
for idx in range(catalog.num_plurals):
try:
string = message.string[idx]
except IndexError:
string = ''
_write('%smsgstr[%d] %s\n' % (
prefix, idx, _normalize(string, prefix)
))
else:
_write('%smsgid %s\n' % (prefix, _normalize(message.id, prefix)))
_write('%smsgstr %s\n' % (
prefix, _normalize(message.string or '', prefix)
))
messages = list(catalog)
if sort_output:
messages.sort()
elif sort_by_file:
messages.sort(lambda x,y: cmp(x.locations, y.locations))
for message in messages:
if not message.id: # This is the header "message"
if omit_header:
continue
comment_header = catalog.header_comment
if width and width > 0:
lines = []
for line in comment_header.splitlines():
lines += wraptext(line, width=width,
subsequent_indent='# ')
comment_header = u'\n'.join(lines) + u'\n'
_write(comment_header)
for comment in message.user_comments:
_write_comment(comment)
for comment in message.auto_comments:
_write_comment(comment, prefix='.')
if not no_location:
locs = u' '.join([u'%s:%d' % (filename.replace(os.sep, '/'), lineno)
for filename, lineno in message.locations])
_write_comment(locs, prefix=':')
if message.flags:
_write('#%s\n' % ', '.join([''] + list(message.flags)))
if message.previous_id and include_previous:
_write_comment('msgid %s' % _normalize(message.previous_id[0]),
prefix='|')
if len(message.previous_id) > 1:
_write_comment('msgid_plural %s' % _normalize(
message.previous_id[1]
), prefix='|')
_write_message(message)
_write('\n')
if not ignore_obsolete:
for message in catalog.obsolete.values():
for comment in message.user_comments:
_write_comment(comment)
_write_message(message, prefix='#~ ')
_write('\n')
|
MycChiu/tensorflow
|
refs/heads/master
|
tensorflow/contrib/factorization/python/kernel_tests/clustering_ops_test.py
|
73
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# ==============================================================================
"""Tests for clustering_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.factorization.python.ops import clustering_ops
from tensorflow.python.platform import test
class KmeansPlusPlusInitializationTest(test.TestCase):
# All but one input point are close to (101, 1). With uniform random sampling,
# it is highly improbable for (-1, -1) to be selected.
def setUp(self):
self._points = np.array([[100., 0.],
[101., 2.],
[102., 0.],
[100., 1.],
[100., 2.],
[101., 0.],
[101., 0.],
[101., 1.],
[102., 0.],
[-1., -1.]]).astype(np.float32)
def runTestWithSeed(self, seed):
with self.test_session():
sampled_points = clustering_ops.kmeans_plus_plus_initialization(
self._points, 3, seed, (seed % 5) - 1)
self.assertAllClose(
sorted(sampled_points.eval().tolist()), [[-1., -1.],
[101., 1.],
[101., 1.]],
atol=1.0)
def testBasic(self):
for seed in range(100):
self.runTestWithSeed(seed)
# A simple test that can be verified by hand.
class NearestCentersTest(test.TestCase):
def setUp(self):
self._points = np.array([[100., 0.],
[101., 2.],
[99., 2.],
[1., 1.]]).astype(np.float32)
self._centers = np.array([[100., 0.],
[99., 1.],
[50., 50.],
[0., 0.],
[1., 1.]]).astype(np.float32)
def testNearest1(self):
with self.test_session():
[indices, distances] = clustering_ops.nearest_neighbors(self._points,
self._centers, 1)
self.assertAllClose(indices.eval(), [[0], [0], [1], [4]])
self.assertAllClose(distances.eval(), [[0.], [5.], [1.], [0.]])
def testNearest2(self):
with self.test_session():
[indices, distances] = clustering_ops.nearest_neighbors(self._points,
self._centers, 2)
self.assertAllClose(indices.eval(), [[0, 1], [0, 1], [1, 0], [4, 3]])
self.assertAllClose(distances.eval(),
[[0., 2.], [5., 5.], [1., 5.], [0., 2.]])
# A test with large inputs.
class NearestCentersLargeTest(test.TestCase):
def setUp(self):
num_points = 1000
num_centers = 2000
num_dim = 100
max_k = 5
# Construct a small number of random points and later tile them.
points_per_tile = 10
assert num_points % points_per_tile == 0
points = np.random.standard_normal(
[points_per_tile, num_dim]).astype(np.float32)
# Construct random centers.
self._centers = np.random.standard_normal(
[num_centers, num_dim]).astype(np.float32)
# Exhaustively compute expected nearest neighbors.
def squared_distance(x, y):
return np.linalg.norm(x - y, ord=2)**2
nearest_neighbors = [
sorted([(squared_distance(point, self._centers[j]), j)
for j in range(num_centers)])[:max_k] for point in points
]
expected_nearest_neighbor_indices = np.array(
[[i for _, i in nn] for nn in nearest_neighbors])
expected_nearest_neighbor_squared_distances = np.array(
[[dist for dist, _ in nn] for nn in nearest_neighbors])
# Tile points and expected results to reach requested size (num_points)
(self._points, self._expected_nearest_neighbor_indices,
self._expected_nearest_neighbor_squared_distances) = (
np.tile(x, (int(num_points / points_per_tile), 1))
for x in (points, expected_nearest_neighbor_indices,
expected_nearest_neighbor_squared_distances))
def testNearest1(self):
with self.test_session():
[indices, distances] = clustering_ops.nearest_neighbors(self._points,
self._centers, 1)
self.assertAllClose(indices.eval(),
self._expected_nearest_neighbor_indices[:, [0]])
self.assertAllClose(
distances.eval(),
self._expected_nearest_neighbor_squared_distances[:, [0]])
def testNearest5(self):
with self.test_session():
[indices, distances] = clustering_ops.nearest_neighbors(self._points,
self._centers, 5)
self.assertAllClose(indices.eval(),
self._expected_nearest_neighbor_indices[:, 0:5])
self.assertAllClose(
distances.eval(),
self._expected_nearest_neighbor_squared_distances[:, 0:5])
if __name__ == "__main__":
np.random.seed(0)
test.main()
|
ducthien1490/youtube-dl
|
refs/heads/master
|
docs/conf.py
|
137
|
# -*- coding: utf-8 -*-
#
# youtube-dl documentation build configuration file, created by
# sphinx-quickstart on Fri Mar 14 21:05:43 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# Allows to import youtube_dl
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# -- General configuration ------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'youtube-dl'
copyright = u'2014, Ricardo Garcia Gonzalez'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
from youtube_dl.version import __version__
version = __version__
# The full version, including alpha/beta/rc tags.
release = version
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Output file base name for HTML help builder.
htmlhelp_basename = 'youtube-dldoc'
|
firebitsbr/pwn_plug_sources
|
refs/heads/master
|
src/goodfet/GoodFETARM.py
|
8
|
#!/usr/bin/env python
# GoodFET Client Library
#
#
# Good luck with alpha / beta code.
# Contributions and bug reports welcome.
#
import sys, binascii, struct
import atlasutils.smartprint as asp
#Global Commands
READ = 0x00
WRITE = 0x01
PEEK = 0x02
POKE = 0x03
SETUP = 0x10
START = 0x20
STOP = 0x21
CALL = 0x30
EXEC = 0x31
NOK = 0x7E
OK = 0x7F
# ARM7TDMI JTAG commands
GET_DEBUG_CTRL = 0x80
SET_DEBUG_CTRL = 0x81
GET_PC = 0x82
SET_PC = 0x83
GET_CHIP_ID = 0x84
GET_DEBUG_STATE = 0x85
GET_WATCHPOINT = 0x86
SET_WATCHPOINT = 0x87
GET_REGISTER = 0x88
SET_REGISTER = 0x89
GET_REGISTERS = 0x8a
SET_REGISTERS = 0x8b
HALTCPU = 0x8c
RESUMECPU = 0x8d
DEBUG_INSTR = 0x8e #
STEP_INSTR = 0x8f #
STEP_REPLACE = 0x90 #
READ_CODE_MEMORY = 0x91 # ??
WRITE_FLASH_PAGE = 0x92 # ??
READ_FLASH_PAGE = 0x93 # ??
MASS_ERASE_FLASH = 0x94 # ??
PROGRAM_FLASH = 0x95
LOCKCHIP = 0x96 # ??
CHIP_ERASE = 0x97 # can do?
# Really ARM specific stuff
GET_CPSR = 0x98
SET_CPSR = 0x99
GET_SPSR = 0x9a
SET_SPSR = 0x9b
SET_MODE_THUMB = 0x9c
SET_MODE_ARM = 0x9d
from GoodFET import GoodFET
from intelhex import IntelHex
class GoodFETARM(GoodFET):
"""A GoodFET variant for use with ARM7TDMI microprocessor."""
def ARMhaltcpu(self):
"""Halt the CPU."""
self.writecmd(0x33,HALTCPU,0,self.data)
def ARMreleasecpu(self):
"""Resume the CPU."""
self.writecmd(0x33,RESUMECPU,0,self.data)
def ARMsetModeArm(self):
self.writecmd(0x33,SET_MODE_ARM,0,self.data)
def ARMtest(self):
self.ARMreleasecpu()
self.ARMhaltcpu()
print "Status: %s" % self.ARMstatusstr()
#Grab ident three times, should be equal.
ident1=self.ARMident()
ident2=self.ARMident()
ident3=self.ARMident()
if(ident1!=ident2 or ident2!=ident3):
print "Error, repeated ident attempts unequal."
print "%04x, %04x, %04x" % (ident1, ident2, ident3)
#Set and Check Registers
regs = [1024+x for x in range(1,15)]
regr = []
for x in range(len(regs)):
self.ARMset_register(x, regs[x])
for x in range(len(regs)):
regr.append(self.ARMget_register(x))
for x in range(len(regs)):
if regs[x] != regr[x]:
print "Error, R%d fail: %x != %x"%(x,regs[x],regr[x])
return
#Single step, printing PC.
print "Tracing execution at startup."
for i in range(15):
pc=self.ARMgetPC()
byte=self.ARMpeekcodebyte(i)
#print "PC=%04x, %02x" % (pc, byte)
self.ARMstep_instr()
print "Verifying that debugging a NOP doesn't affect the PC."
for i in range(1,15):
pc=self.ARMgetPC()
self.ARMdebuginstr([NOP])
if(pc!=self.ARMgetPC()):
print "ERROR: PC changed during ARMdebuginstr([NOP])!"
print "Checking pokes to XRAM."
for i in range(0xf000,0xf020):
self.ARMpokedatabyte(i,0xde)
if(self.ARMpeekdatabyte(i)!=0xde):
print "Error in DATA at 0x%04x" % i
#print "Status: %s." % self.ARMstatusstr()
#Exit debugger
self.stop()
print "Done."
def setup(self):
"""Move the FET into the JTAG ARM application."""
#print "Initializing ARM."
self.writecmd(0x33,SETUP,0,self.data)
def ARMget_dbgstate(self):
"""Read the config register of an ARM."""
retval = struct.unpack("<L", self.data[:4])[0]
return retval
def ARMget_dbgctrl(self):
"""Read the config register of an ARM."""
self.writecmd(0x33,GET_DEBUG_CTRL,0,self.data)
retval = struct.unpack("B", self.data)[0]
return retval
def ARMset_dbgctrl(self,config):
"""Write the config register of an ARM."""
self.writecmd(0x33,SET_DEBUG_CTRL,1,[config&7])
def ARMlockchip(self):
"""Set the flash lock bit in info mem."""
self.writecmd(0x33, LOCKCHIP, 0, [])
def ARMidentstr(self):
ident=self.ARMident()
ver = ident >> 28
partno = (ident >> 12) & 0x10
mfgid = ident & 0xfff
return "mfg: %x\npartno: %x\nver: %x\n(%x)" % (ver, partno, mfgid, ident);
def ARMident(self):
"""Get an ARM's ID."""
self.writecmd(0x33,GET_CHIP_ID,0,[])
retval = struct.unpack("<L", "".join(self.data[0:4]))[0]
return retval
def ARMgetPC(self):
"""Get an ARM's PC."""
self.writecmd(0x33,GET_PC,0,[])
retval = struct.unpack("<L", "".join(self.data[0:4]))[0]
return retval
def ARMget_register(self, reg):
"""Get an ARM's Register"""
self.writecmd(0x33,GET_REGISTER,1,[reg&0xff])
retval = struct.unpack("<L", "".join(self.data[0:4]))[0]
return retval
def ARMset_register(self, reg, val):
"""Get an ARM's Register"""
self.writecmd(0x33,SET_REGISTER,8,[reg,0,0,0,val&0xff, (val>>8)&0xff, (val>>16)&0xff, val>>24])
#self.writecmd(0x33,SET_REGISTER,8,[reg,0,0,0, (val>>16)&0xff, val>>24, val&0xff, (val>>8)&0xff])
retval = struct.unpack("<L", "".join(self.data[0:4]))[0]
return retval
def ARMget_registers(self):
"""Get ARM Registers"""
self.writecmd(0x33,GET_REGISTERS,0, [])
retval = []
for x in range(0,len(self.data), 4):
retval.append(struct.unpack("<L", self.data[x:x+4])[0])
return retval
def ARMset_registers(self, regs):
"""Set ARM Registers"""
regarry = []
for reg in regs:
regarry.extend([reg&0xff, (reg>>8)&0xff, (reg>>16)&0xff, reg>>24])
self.writecmd(0x33,SET_REGISTERS,16*4,regarry)
retval = struct.unpack("<L", "".join(self.data[0:4]))[0]
return retval
def ARMcmd(self,phrase):
self.writecmd(0x33,READ,len(phrase),phrase)
val=ord(self.data[0])
print "Got %02x" % val
return val
def ARMdebuginstr(self,instr):
if type (instr) == int:
instr = struct.pack("<L", instr)
self.writecmd(0x33,DEBUG_INSTR,len(instr),instr)
return (self.data[0])
def ARMpeekcodebyte(self,adr):
"""Read the contents of code memory at an address."""
self.data=[adr&0xff, (adr&0xff00)>>8]
self.writecmd(0x33,PEEK,2,self.data)
retval = struct.unpack("<L", "".join(self.data[0:4]))[0]
return retval
def ARMpeekdatabyte(self,adr):
"""Read the contents of data memory at an address."""
self.data=[adr&0xff, (adr&0xff00)>>8]
self.writecmd(0x33, PEEK, 2, self.data)
retval = struct.unpack("<L", "".join(self.data[0:4]))[0]
return retval
def ARMpokedatabyte(self,adr,val):
"""Write a byte to data memory."""
self.data=[adr&0xff, (adr&0xff00)>>8, val]
self.writecmd(0x33, POKE, 3, self.data)
retval = struct.unpack("<L", "".join(self.data[0:4]))[0]
return retval
def ARMchiperase(self):
"""Erase all of the target's memory."""
self.writecmd(0x33,CHIP_ERASE,0,[])
def ARMstatus(self):
"""Check the status."""
self.writecmd(0x33,GET_DEBUG_STATE,0,[])
return ord(self.data[0])
ARMstatusbits={
0x10 : "TBIT",
0x08 : "cgenL",
0x04 : "Interrupts Enabled (or not?)",
0x02 : "DBGRQ",
0x01 : "DGBACK"
}
ARMctrlbits={
0x04 : "disable interrupts",
0x02 : "force dbgrq",
0x01 : "force dbgack"
}
def ARMstatusstr(self):
"""Check the status as a string."""
status=self.ARMstatus()
str=""
i=1
while i<0x100:
if(status&i):
str="%s %s" %(self.ARMstatusbits[i],str)
i*=2
return str
def start(self):
"""Start debugging."""
self.writecmd(0x33,START,0,self.data)
#ident=self.ARMidentstr()
#print "Target identifies as %s." % ident
#print "Status: %s." % self.ARMstatusstr()
#self.ARMreleasecpu()
#self.ARMhaltcpu()
#print "Status: %s." % self.ARMstatusstr()
def stop(self):
"""Stop debugging."""
self.writecmd(0x33,STOP,0,self.data)
def ARMstep_instr(self):
"""Step one instruction."""
self.writecmd(0x33,STEP_INSTR,0,self.data)
def ARMflashpage(self,adr):
"""Flash 2kB a page of flash from 0xF000 in XDATA"""
data=[adr&0xFF,
(adr>>8)&0xFF,
(adr>>16)&0xFF,
(adr>>24)&0xFF]
print "Flashing buffer to 0x%06x" % adr
self.writecmd(0x33,MASS_FLASH_PAGE,4,data)
def writecmd(self, app, verb, count=0, data=[]):
"""Write a command and some data to the GoodFET."""
self.serialport.write(chr(app))
self.serialport.write(chr(verb))
count = len(data)
#if data!=None:
# count=len(data); #Initial count ignored.
#print "TX %02x %02x %04x" % (app,verb,count)
#little endian 16-bit length
self.serialport.write(chr(count&0xFF))
self.serialport.write(chr(count>>8))
#print "count=%02x, len(data)=%04x" % (count,len(data))
if count!=0:
if(isinstance(data,list)):
for i in range(0,count):
#print "Converting %02x at %i" % (data[i],i)
data[i]=chr(data[i])
#print type(data)
outstr=''.join(data)
self.serialport.write(outstr)
if not self.besilent:
self.readcmd()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.