code stringlengths 2 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int32 2 1.05M |
|---|---|---|---|---|---|
# A Test Program for pipeTestService.py
#
# Install and start the Pipe Test service, then run this test
# either from the same machine, or from another using the "-s" param.
#
# Eg: pipeTestServiceClient.py -s server_name Hi There
# Should work.
from win32pipe import *
from win32file import *
from win32event import *
import pywintypes
import win32api
import winerror
import sys, os, traceback
verbose = 0
#def ReadFromPipe(pipeName):
# Could (Should?) use CallNamedPipe, but this technique allows variable size
# messages (whereas you must supply a buffer size for CallNamedPipe!
# hPipe = CreateFile(pipeName, GENERIC_WRITE, 0, None, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, 0)
# more = 1
# while more:
# hr = ReadFile(hPipe, 256)
# if hr==0:
# more = 0
# except win32api.error (hr, fn, desc):
# if hr==winerror.ERROR_MORE_DATA:
# data = dat
#
def CallPipe(fn, args):
ret = None
retryCount = 0
while retryCount < 8: # Keep looping until user cancels.
retryCount = retryCount + 1
try:
return fn(*args)
except win32api.error as exc:
if exc.winerror==winerror.ERROR_PIPE_BUSY:
win32api.Sleep(5000)
continue
else:
raise
raise RuntimeError("Could not make a connection to the server")
def testClient(server,msg):
if verbose:
print("Sending", msg)
data = CallPipe(CallNamedPipe, ("\\\\%s\\pipe\\PyPipeTest" % server, msg, 256, NMPWAIT_WAIT_FOREVER))
if verbose:
print("Server sent back '%s'" % data)
print("Sent and received a message!")
def testLargeMessage(server, size = 4096):
if verbose:
print("Sending message of size %d" % (size))
msg = "*" * size
data = CallPipe(CallNamedPipe, ("\\\\%s\\pipe\\PyPipeTest" % server, msg, 512, NMPWAIT_WAIT_FOREVER))
if len(data)-size:
print("Sizes are all wrong - send %d, got back %d" % (size, len(data)))
def stressThread(server, numMessages, wait):
try:
try:
for i in range(numMessages):
r = CallPipe(CallNamedPipe, ("\\\\%s\\pipe\\PyPipeTest" % server, "#" * 512, 1024, NMPWAIT_WAIT_FOREVER))
except:
traceback.print_exc()
print("Failed after %d messages" % i)
finally:
SetEvent(wait)
def stressTestClient(server, numThreads, numMessages):
import _thread
thread_waits = []
for t_num in range(numThreads):
# Note I could just wait on thread handles (after calling DuplicateHandle)
# See the service itself for an example of waiting for the clients...
wait = CreateEvent(None, 0, 0, None)
thread_waits.append(wait)
_thread.start_new_thread(stressThread, (server,numMessages, wait))
# Wait for all threads to finish.
WaitForMultipleObjects(thread_waits, 1, INFINITE)
def main():
import sys, getopt
server = "."
thread_count = 0
msg_count = 500
try:
opts, args = getopt.getopt(sys.argv[1:], 's:t:m:vl')
for o,a in opts:
if o=='-s':
server = a
if o=='-m':
msg_count = int(a)
if o=='-t':
thread_count = int(a)
if o=='-v':
global verbose
verbose = 1
if o=='-l':
testLargeMessage(server)
msg = " ".join(args).encode("mbcs")
except getopt.error as msg:
print(msg)
my_name = os.path.split(sys.argv[0])[1]
print("Usage: %s [-v] [-s server] [-t thread_count=0] [-m msg_count=500] msg ..." % my_name)
print(" -v = verbose")
print(" Specifying a value for -t will stress test using that many threads.")
return
testClient(server, msg)
if thread_count > 0:
print("Spawning %d threads each sending %d messages..." % (thread_count, msg_count))
stressTestClient(server, thread_count, msg_count)
if __name__=='__main__':
main()
| sserrot/champion_relationships | venv/Lib/site-packages/win32/Demos/service/pipeTestServiceClient.py | Python | mit | 4,134 |
import datetime
import logging
from django.contrib import messages
from django.contrib.auth.decorators import login_required, permission_required
from django.contrib.auth.models import Group
from django.shortcuts import render, redirect
from allianceauth.services.forms import ServicePasswordForm
from .forms import JabberBroadcastForm
from .manager import OpenfireManager, PingBotException
from .models import OpenfireUser
from .tasks import OpenfireTasks
logger = logging.getLogger(__name__)
ACCESS_PERM = 'openfire.access_openfire'
@login_required
@permission_required(ACCESS_PERM)
def activate_jabber(request):
logger.debug("activate_jabber called by user %s" % request.user)
character = request.user.profile.main_character
logger.debug("Adding jabber user for user %s with main character %s" % (request.user, character))
info = OpenfireManager.add_user(OpenfireTasks.get_username(request.user))
# If our username is blank means we already had a user
if info[0] is not "":
OpenfireUser.objects.update_or_create(user=request.user, defaults={'username': info[0]})
logger.debug("Updated authserviceinfo for user %s with jabber credentials. Updating groups." % request.user)
OpenfireTasks.update_groups.delay(request.user.pk)
logger.info("Successfully activated jabber for user %s" % request.user)
messages.success(request, 'Activated jabber account.')
credentials = {
'username': info[0],
'password': info[1],
}
return render(request, 'services/service_credentials.html',
context={'credentials': credentials, 'service': 'Jabber'})
else:
logger.error("Unsuccessful attempt to activate jabber for user %s" % request.user)
messages.error(request, 'An error occurred while processing your jabber account.')
return redirect("services:services")
@login_required
@permission_required(ACCESS_PERM)
def deactivate_jabber(request):
logger.debug("deactivate_jabber called by user %s" % request.user)
if OpenfireTasks.has_account(request.user) and OpenfireTasks.delete_user(request.user):
logger.info("Successfully deactivated jabber for user %s" % request.user)
messages.success(request, 'Deactivated jabber account.')
else:
logger.error("Unsuccessful attempt to deactivate jabber for user %s" % request.user)
messages.error(request, 'An error occurred while processing your jabber account.')
return redirect("services:services")
@login_required
@permission_required(ACCESS_PERM)
def reset_jabber_password(request):
logger.debug("reset_jabber_password called by user %s" % request.user)
if OpenfireTasks.has_account(request.user):
result = OpenfireManager.update_user_pass(request.user.openfire.username)
# If our username is blank means we failed
if result != "":
logger.info("Successfully reset jabber password for user %s" % request.user)
messages.success(request, 'Reset jabber password.')
credentials = {
'username': request.user.openfire.username,
'password': result,
}
return render(request, 'services/service_credentials.html',
context={'credentials': credentials, 'service': 'Jabber'})
logger.error("Unsuccessful attempt to reset jabber for user %s" % request.user)
messages.error(request, 'An error occurred while processing your jabber account.')
return redirect("services:services")
@login_required
@permission_required('auth.jabber_broadcast')
def jabber_broadcast_view(request):
logger.debug("jabber_broadcast_view called by user %s" % request.user)
allchoices = []
if request.user.has_perm('auth.jabber_broadcast_all'):
allchoices.append(('all', 'all'))
for g in Group.objects.all():
allchoices.append((str(g.name), str(g.name)))
else:
for g in request.user.groups.all():
allchoices.append((str(g.name), str(g.name)))
if request.method == 'POST':
form = JabberBroadcastForm(request.POST)
form.fields['group'].choices = allchoices
logger.debug("Received POST request containing form, valid: %s" % form.is_valid())
if form.is_valid():
main_char = request.user.profile.main_character
logger.debug("Processing jabber broadcast for user %s with main character %s" % (request.user, main_char))
try:
if main_char is not None:
message_to_send = form.cleaned_data[
'message'] + "\n##### SENT BY: " + "[" + main_char.corporation_ticker + "]" + \
main_char.character_name + " TO: " + \
form.cleaned_data['group'] + " WHEN: " + datetime.datetime.utcnow().strftime(
"%Y-%m-%d %H:%M:%S") + " #####\n##### Replies are NOT monitored #####\n"
group_to_send = form.cleaned_data['group']
else:
message_to_send = form.cleaned_data[
'message'] + "\n##### SENT BY: " + "No character but can send pings?" + " TO: " + \
form.cleaned_data['group'] + " WHEN: " + datetime.datetime.utcnow().strftime(
"%Y-%m-%d %H:%M:%S") + " #####\n##### Replies are NOT monitored #####\n"
group_to_send = form.cleaned_data['group']
OpenfireManager.send_broadcast_message(group_to_send, message_to_send)
messages.success(request, 'Sent jabber broadcast to %s' % group_to_send)
logger.info("Sent jabber broadcast on behalf of user %s" % request.user)
except PingBotException as e:
messages.error(request, e)
else:
form = JabberBroadcastForm()
form.fields['group'].choices = allchoices
logger.debug("Generated broadcast form for user %s containing %s groups" % (
request.user, len(form.fields['group'].choices)))
context = {'form': form}
return render(request, 'services/openfire/broadcast.html', context=context)
@login_required
@permission_required(ACCESS_PERM)
def set_jabber_password(request):
logger.debug("set_jabber_password called by user %s" % request.user)
if request.method == 'POST':
logger.debug("Received POST request with form.")
form = ServicePasswordForm(request.POST)
logger.debug("Form is valid: %s" % form.is_valid())
if form.is_valid() and OpenfireTasks.has_account(request.user):
password = form.cleaned_data['password']
logger.debug("Form contains password of length %s" % len(password))
result = OpenfireManager.update_user_pass(request.user.openfire.username, password=password)
if result != "":
logger.info("Successfully set jabber password for user %s" % request.user)
messages.success(request, 'Set jabber password.')
else:
logger.error("Failed to install custom jabber password for user %s" % request.user)
messages.error(request, 'An error occurred while processing your jabber account.')
return redirect("services:services")
else:
logger.debug("Request is not type POST - providing empty form.")
form = ServicePasswordForm()
logger.debug("Rendering form for user %s" % request.user)
context = {'form': form, 'service': 'Jabber'}
return render(request, 'services/service_password.html', context=context)
| Kaezon/allianceauth | allianceauth/services/modules/openfire/views.py | Python | gpl-2.0 | 7,676 |
from chimera.util.votable import VOTable
from httplib import HTTPConnection
import tempfile
import os
import urllib
class VizQuery(object):
"""
Queries A catalog in Vizier
within a given radius or box of the zenith
"""
def __init__(self):
self.args = {}
self.args["-mime"] = "xml"
self.columns = None
def useCat(self, catName):
"""
@param catName: the catalog's name in Vizier
@type catName: str
Simply sets the catalog's name
"""
self.args["-source"] = catName
def useColumns(self, columns, sortBy, reverse=False):
"""
@param columns: list of catalog's columns to use
@type columns: list
@param sortBy: define which column to sort by
@type sortBy: str
@param reverse: decide to reverse sort @type reverse: bool
Define which columns will be fetched and which column will be used
for sorting.
"""
self.columns = columns.split(",")
self.args["-out"] = columns
if reverse:
self.args["-sort"] = "-" + sortBy
else:
self.args["-sort"] = sortBy
def sortBy(self, column):
"""
One sets here which column to sort by
@param column: name of column to sort by
@type column: str
"""
def constrainColumns(self, columns):
"""
Use this to add constraints to any of the columns
@param columns: list of dictionaries {COLUMN:condition}
@type columns: list
"""
self.args.update(columns)
def useTarget(self, center, radius=None, box=None):
"""
@param center: center of search in catalog
@type center: L{Position}
@param radius: radius of search
@type radius: float
@param box: box size, if you want a square use an integer
if you want a rectangle use a tuple (ww,hh)
@type box: int | tuple
"""
self.args["-c"] = str(center)
self.args["-c.eq"] = "J2000"
if radius:
self.args["-c.rd"] = radius
elif box:
try:
self.args["-c.bd"] = "=%fx%f" % radius
except:
self.args["-c.bd"] = radius
else:
raise TypeError("You must specify either radius or box size")
def find(self, limit=9999):
"""
@param limit: Number of stars to return from Vizier
@type limit: int
"""
assert "-c.rd" in self.args or "-c.bd" in self.args, "No target selected, use useTarget method first."
self.args["-out.max"] = limit
results = tempfile.NamedTemporaryFile(mode='w+',
prefix="chimera.vizquery",
dir=tempfile.gettempdir())
# query the catalog in Vizier's database
conn = HTTPConnection("webviz.u-strasbg.fr")
s = urllib.urlencode(self.args)
conn.request("POST", "/viz-bin/votable", s)
resp = conn.getresponse()
ret = resp.read()
f = open(results.name, "w")
f.write(ret)
f.close()
obj = []
votable = VOTable(results.name)
for linha in votable.getDataRows():
v = [c.getContent() for c in linha.getNodeList()]
obj.append(dict(zip(self.columns, v)))
return obj
| ankanaan/chimera | src/chimera/util/vizquery.py | Python | gpl-2.0 | 3,452 |
# -*- coding: utf-8 -*-
from core.toad.generictask import GenericTask
from lib.images import Images
__author__ = "Mathieu Desrosiers, Arnaud Bore"
__copyright__ = "Copyright (C) 2016, TOAD"
__credits__ = ["Mathieu Desrosiers", "Arnaud Bore"]
class TensorMrtrix(GenericTask):
def __init__(self, subject):
GenericTask.__init__(self, subject, 'upsampling', 'registration', 'masking', 'qa')
def implement(self):
dwi = self.getUpsamplingImage('dwi', 'upsample')
bFile = self.getUpsamplingImage('grad', None, 'b')
mask = self.getRegistrationImage('mask', 'resample')
iterWLS = self.get('iter') # Number of iteration for tensor estimations
tensorsMrtrix = self.__produceTensors(dwi, bFile, iterWLS, mask)
self.__produceMetrics(tensorsMrtrix, mask, dwi)
# convert diffusion-weighted images to tensor images.
def __produceTensors(self, source, encodingFile, iterWLS, mask=None):
self.info("Starting DWI2Tensor from mrtrix using weighted linear least squares estimator.")
tmp = self.buildName(source, "tmp")
target = self.buildName(source, "tensor")
cmd = "dwi2tensor {} {} -iter {} -grad {} -nthreads {} -quiet ".format(source, tmp, iterWLS, encodingFile, self.getNTreadsMrtrix())
if mask is not None:
cmd += "-mask {}".format(mask)
self.launchCommand(cmd)
return self.rename(tmp, target)
def __produceMetrics(self, source, mask, target):
self.info("Launch tensor2metric from mrtrix.\n")
adc = self.buildName(target, "adc")
fa = self.buildName(target, "fa")
vector = self.buildName(target, "vector")
adImage = self.buildName(target, "ad")
rdImage = self.buildName(target, "rd")
mdImage = self.buildName(target, "md")
value2 = self.buildName(target, "l2")
value3 = self.buildName(target, "l3")
modulate = self.get('modulate')
cmd1 = "tensor2metric {} -adc {} -fa {} -num 1 -vector {} -value {} -modulate {} -nthreads {} -quiet "\
.format(source, adc, fa, vector, adImage , modulate, self.getNTreadsMrtrix())
cmd2 = "tensor2metric {} -num 2 -value {} -modulate {} -nthreads {} -quiet "\
.format(source, value2, modulate, self.getNTreadsMrtrix())
cmd3 = "tensor2metric {} -num 3 -value {} -modulate {} -nthreads {} -quiet "\
.format(source, value3, modulate, self.getNTreadsMrtrix())
for cmd in [cmd1, cmd2, cmd3]:
if mask is not None:
cmd += "-mask {} ".format(mask)
self.launchCommand(cmd)
cmd = "mrmath {} {} mean {} -nthreads {} -quiet ".format(value2, value3, rdImage, self.getNTreadsMrtrix())
self.launchCommand(cmd)
cmd = "mrmath {} {} {} mean {} -nthreads {} -quiet ".format(adImage, value2, value3, mdImage, self.getNTreadsMrtrix())
self.launchCommand(cmd)
def isIgnore(self):
return self.get("ignore")
def meetRequirement(self):
return Images((self.getUpsamplingImage('dwi', 'upsample'), "upsampled diffusion"),
(self.getUpsamplingImage('grad', None, 'b'), "gradient encoding b file"),
(self.getRegistrationImage('mask', 'resample'), 'brain mask'))
def isDirty(self):
return Images((self.getImage("dwi", "tensor"), "mrtrix tensor"),
(self.getImage('dwi', 'adc'), "mean apparent diffusion coefficient (ADC)"),
(self.getImage('dwi', 'vector'), "selected eigenvector(s)"),
(self.getImage('dwi', 'fa'), "fractional anisotropy"),
(self.getImage('dwi', 'ad'), "selected eigenvalue(s) AD" ),
(self.getImage('dwi', 'rd'), "selected eigenvalue(s) RD"),
(self.getImage('dwi', 'md'), "mean diffusivity"))
def qaSupplier(self):
"""Create and supply images for the report generated by qa task
"""
qaImages = Images()
softwareName = 'mrtrix'
#Set information
information = "Estimation using WLS with {} iteration(s)".format(self.get('iter'))
qaImages.setInformation(information)
#Get images
mask = self.getRegistrationImage('mask', 'resample')
#Build qa images
tags = (
('fa', 0.7, 'Fractional anisotropy'),
('ad', 0.005, 'Axial Diffusivity'),
('md', 0.005, 'Mean Diffusivity'),
('rd', 0.005, 'Radial Diffusivity'),
)
for postfix, vmax, description in tags:
image = self.getImage('dwi', postfix)
if image:
imageQa = self.plot3dVolume(
image, fov=mask, vmax=vmax,
colorbar=True, postfix=softwareName)
qaImages.append((imageQa, description))
return qaImages
| mathieudesro/toad | tasks/11-tensormrtrix.py | Python | gpl-2.0 | 4,911 |
"""create and manipulate C data types in Python"""
import os as _os, sys as _sys
__version__ = "1.1.0"
from _ctypes import Union, Structure, Array
from _ctypes import _Pointer
from _ctypes import CFuncPtr as _CFuncPtr
from _ctypes import __version__ as _ctypes_version
from _ctypes import RTLD_LOCAL, RTLD_GLOBAL
from _ctypes import ArgumentError
from struct import calcsize as _calcsize
if __version__ != _ctypes_version:
raise Exception("Version number mismatch", __version__, _ctypes_version)
if _os.name in ("nt", "ce"):
from _ctypes import FormatError
DEFAULT_MODE = RTLD_LOCAL
if _os.name == "posix" and _sys.platform == "darwin":
# On OS X 10.3, we use RTLD_GLOBAL as default mode
# because RTLD_LOCAL does not work at least on some
# libraries. OS X 10.3 is Darwin 7, so we check for
# that.
if int(_os.uname().release.split('.')[0]) < 8:
DEFAULT_MODE = RTLD_GLOBAL
from _ctypes import FUNCFLAG_CDECL as _FUNCFLAG_CDECL, \
FUNCFLAG_PYTHONAPI as _FUNCFLAG_PYTHONAPI, \
FUNCFLAG_USE_ERRNO as _FUNCFLAG_USE_ERRNO, \
FUNCFLAG_USE_LASTERROR as _FUNCFLAG_USE_LASTERROR
# WINOLEAPI -> HRESULT
# WINOLEAPI_(type)
#
# STDMETHODCALLTYPE
#
# STDMETHOD(name)
# STDMETHOD_(type, name)
#
# STDAPICALLTYPE
def create_string_buffer(init, size=None):
"""create_string_buffer(aBytes) -> character array
create_string_buffer(anInteger) -> character array
create_string_buffer(aString, anInteger) -> character array
"""
if isinstance(init, bytes):
if size is None:
size = len(init)+1
buftype = c_char * size
buf = buftype()
buf.value = init
return buf
elif isinstance(init, int):
buftype = c_char * init
buf = buftype()
return buf
raise TypeError(init)
def c_buffer(init, size=None):
## "deprecated, use create_string_buffer instead"
## import warnings
## warnings.warn("c_buffer is deprecated, use create_string_buffer instead",
## DeprecationWarning, stacklevel=2)
return create_string_buffer(init, size)
_c_functype_cache = {}
def CFUNCTYPE(restype, *argtypes, **kw):
"""CFUNCTYPE(restype, *argtypes,
use_errno=False, use_last_error=False) -> function prototype.
restype: the result type
argtypes: a sequence specifying the argument types
The function prototype can be called in different ways to create a
callable object:
prototype(integer address) -> foreign function
prototype(callable) -> create and return a C callable function from callable
prototype(integer index, method name[, paramflags]) -> foreign function calling a COM method
prototype((ordinal number, dll object)[, paramflags]) -> foreign function exported by ordinal
prototype((function name, dll object)[, paramflags]) -> foreign function exported by name
"""
flags = _FUNCFLAG_CDECL
if kw.pop("use_errno", False):
flags |= _FUNCFLAG_USE_ERRNO
if kw.pop("use_last_error", False):
flags |= _FUNCFLAG_USE_LASTERROR
if kw:
raise ValueError("unexpected keyword argument(s) %s" % kw.keys())
try:
return _c_functype_cache[(restype, argtypes, flags)]
except KeyError:
class CFunctionType(_CFuncPtr):
_argtypes_ = argtypes
_restype_ = restype
_flags_ = flags
_c_functype_cache[(restype, argtypes, flags)] = CFunctionType
return CFunctionType
if _os.name in ("nt", "ce"):
from _ctypes import LoadLibrary as _dlopen
from _ctypes import FUNCFLAG_STDCALL as _FUNCFLAG_STDCALL
if _os.name == "ce":
# 'ce' doesn't have the stdcall calling convention
_FUNCFLAG_STDCALL = _FUNCFLAG_CDECL
_win_functype_cache = {}
def WINFUNCTYPE(restype, *argtypes, **kw):
# docstring set later (very similar to CFUNCTYPE.__doc__)
flags = _FUNCFLAG_STDCALL
if kw.pop("use_errno", False):
flags |= _FUNCFLAG_USE_ERRNO
if kw.pop("use_last_error", False):
flags |= _FUNCFLAG_USE_LASTERROR
if kw:
raise ValueError("unexpected keyword argument(s) %s" % kw.keys())
try:
return _win_functype_cache[(restype, argtypes, flags)]
except KeyError:
class WinFunctionType(_CFuncPtr):
_argtypes_ = argtypes
_restype_ = restype
_flags_ = flags
_win_functype_cache[(restype, argtypes, flags)] = WinFunctionType
return WinFunctionType
if WINFUNCTYPE.__doc__:
WINFUNCTYPE.__doc__ = CFUNCTYPE.__doc__.replace("CFUNCTYPE", "WINFUNCTYPE")
elif _os.name == "posix":
from _ctypes import dlopen as _dlopen
from _ctypes import sizeof, byref, addressof, alignment, resize
from _ctypes import get_errno, set_errno
from _ctypes import _SimpleCData
def _check_size(typ, typecode=None):
# Check if sizeof(ctypes_type) against struct.calcsize. This
# should protect somewhat against a misconfigured libffi.
from struct import calcsize
if typecode is None:
# Most _type_ codes are the same as used in struct
typecode = typ._type_
actual, required = sizeof(typ), calcsize(typecode)
if actual != required:
raise SystemError("sizeof(%s) wrong: %d instead of %d" % \
(typ, actual, required))
class py_object(_SimpleCData):
_type_ = "O"
def __repr__(self):
try:
return super().__repr__()
except ValueError:
return "%s(<NULL>)" % type(self).__name__
_check_size(py_object, "P")
class c_short(_SimpleCData):
_type_ = "h"
_check_size(c_short)
class c_ushort(_SimpleCData):
_type_ = "H"
_check_size(c_ushort)
class c_long(_SimpleCData):
_type_ = "l"
_check_size(c_long)
class c_ulong(_SimpleCData):
_type_ = "L"
_check_size(c_ulong)
if _calcsize("i") == _calcsize("l"):
# if int and long have the same size, make c_int an alias for c_long
c_int = c_long
c_uint = c_ulong
else:
class c_int(_SimpleCData):
_type_ = "i"
_check_size(c_int)
class c_uint(_SimpleCData):
_type_ = "I"
_check_size(c_uint)
class c_float(_SimpleCData):
_type_ = "f"
_check_size(c_float)
class c_double(_SimpleCData):
_type_ = "d"
_check_size(c_double)
class c_longdouble(_SimpleCData):
_type_ = "g"
if sizeof(c_longdouble) == sizeof(c_double):
c_longdouble = c_double
if _calcsize("l") == _calcsize("q"):
# if long and long long have the same size, make c_longlong an alias for c_long
c_longlong = c_long
c_ulonglong = c_ulong
else:
class c_longlong(_SimpleCData):
_type_ = "q"
_check_size(c_longlong)
class c_ulonglong(_SimpleCData):
_type_ = "Q"
## def from_param(cls, val):
## return ('d', float(val), val)
## from_param = classmethod(from_param)
_check_size(c_ulonglong)
class c_ubyte(_SimpleCData):
_type_ = "B"
c_ubyte.__ctype_le__ = c_ubyte.__ctype_be__ = c_ubyte
# backward compatibility:
##c_uchar = c_ubyte
_check_size(c_ubyte)
class c_byte(_SimpleCData):
_type_ = "b"
c_byte.__ctype_le__ = c_byte.__ctype_be__ = c_byte
_check_size(c_byte)
class c_char(_SimpleCData):
_type_ = "c"
c_char.__ctype_le__ = c_char.__ctype_be__ = c_char
_check_size(c_char)
class c_char_p(_SimpleCData):
_type_ = "z"
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, c_void_p.from_buffer(self).value)
_check_size(c_char_p, "P")
class c_void_p(_SimpleCData):
_type_ = "P"
c_voidp = c_void_p # backwards compatibility (to a bug)
_check_size(c_void_p)
class c_bool(_SimpleCData):
_type_ = "?"
from _ctypes import POINTER, pointer, _pointer_type_cache
class c_wchar_p(_SimpleCData):
_type_ = "Z"
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, c_void_p.from_buffer(self).value)
class c_wchar(_SimpleCData):
_type_ = "u"
def _reset_cache():
_pointer_type_cache.clear()
_c_functype_cache.clear()
if _os.name in ("nt", "ce"):
_win_functype_cache.clear()
# _SimpleCData.c_wchar_p_from_param
POINTER(c_wchar).from_param = c_wchar_p.from_param
# _SimpleCData.c_char_p_from_param
POINTER(c_char).from_param = c_char_p.from_param
_pointer_type_cache[None] = c_void_p
# XXX for whatever reasons, creating the first instance of a callback
# function is needed for the unittests on Win64 to succeed. This MAY
# be a compiler bug, since the problem occurs only when _ctypes is
# compiled with the MS SDK compiler. Or an uninitialized variable?
CFUNCTYPE(c_int)(lambda: None)
def create_unicode_buffer(init, size=None):
"""create_unicode_buffer(aString) -> character array
create_unicode_buffer(anInteger) -> character array
create_unicode_buffer(aString, anInteger) -> character array
"""
if isinstance(init, str):
if size is None:
size = len(init)+1
buftype = c_wchar * size
buf = buftype()
buf.value = init
return buf
elif isinstance(init, int):
buftype = c_wchar * init
buf = buftype()
return buf
raise TypeError(init)
# XXX Deprecated
def SetPointerType(pointer, cls):
if _pointer_type_cache.get(cls, None) is not None:
raise RuntimeError("This type already exists in the cache")
if id(pointer) not in _pointer_type_cache:
raise RuntimeError("What's this???")
pointer.set_type(cls)
_pointer_type_cache[cls] = pointer
del _pointer_type_cache[id(pointer)]
# XXX Deprecated
def ARRAY(typ, len):
return typ * len
################################################################
class CDLL(object):
"""An instance of this class represents a loaded dll/shared
library, exporting functions using the standard C calling
convention (named 'cdecl' on Windows).
The exported functions can be accessed as attributes, or by
indexing with the function name. Examples:
<obj>.qsort -> callable object
<obj>['qsort'] -> callable object
Calling the functions releases the Python GIL during the call and
reacquires it afterwards.
"""
_func_flags_ = _FUNCFLAG_CDECL
_func_restype_ = c_int
def __init__(self, name, mode=DEFAULT_MODE, handle=None,
use_errno=False,
use_last_error=False):
self._name = name
flags = self._func_flags_
if use_errno:
flags |= _FUNCFLAG_USE_ERRNO
if use_last_error:
flags |= _FUNCFLAG_USE_LASTERROR
class _FuncPtr(_CFuncPtr):
_flags_ = flags
_restype_ = self._func_restype_
self._FuncPtr = _FuncPtr
if handle is None:
self._handle = _dlopen(self._name, mode)
else:
self._handle = handle
def __repr__(self):
return "<%s '%s', handle %x at %#x>" % \
(self.__class__.__name__, self._name,
(self._handle & (_sys.maxsize*2 + 1)),
id(self) & (_sys.maxsize*2 + 1))
def __getattr__(self, name):
if name.startswith('__') and name.endswith('__'):
raise AttributeError(name)
func = self.__getitem__(name)
setattr(self, name, func)
return func
def __getitem__(self, name_or_ordinal):
func = self._FuncPtr((name_or_ordinal, self))
if not isinstance(name_or_ordinal, int):
func.__name__ = name_or_ordinal
return func
class PyDLL(CDLL):
"""This class represents the Python library itself. It allows to
access Python API functions. The GIL is not released, and
Python exceptions are handled correctly.
"""
_func_flags_ = _FUNCFLAG_CDECL | _FUNCFLAG_PYTHONAPI
if _os.name in ("nt", "ce"):
class WinDLL(CDLL):
"""This class represents a dll exporting functions using the
Windows stdcall calling convention.
"""
_func_flags_ = _FUNCFLAG_STDCALL
# XXX Hm, what about HRESULT as normal parameter?
# Mustn't it derive from c_long then?
from _ctypes import _check_HRESULT, _SimpleCData
class HRESULT(_SimpleCData):
_type_ = "l"
# _check_retval_ is called with the function's result when it
# is used as restype. It checks for the FAILED bit, and
# raises an OSError if it is set.
#
# The _check_retval_ method is implemented in C, so that the
# method definition itself is not included in the traceback
# when it raises an error - that is what we want (and Python
# doesn't have a way to raise an exception in the caller's
# frame).
_check_retval_ = _check_HRESULT
class OleDLL(CDLL):
"""This class represents a dll exporting functions using the
Windows stdcall calling convention, and returning HRESULT.
HRESULT error values are automatically raised as OSError
exceptions.
"""
_func_flags_ = _FUNCFLAG_STDCALL
_func_restype_ = HRESULT
class LibraryLoader(object):
def __init__(self, dlltype):
self._dlltype = dlltype
def __getattr__(self, name):
if name[0] == '_':
raise AttributeError(name)
dll = self._dlltype(name)
setattr(self, name, dll)
return dll
def __getitem__(self, name):
return getattr(self, name)
def LoadLibrary(self, name):
return self._dlltype(name)
cdll = LibraryLoader(CDLL)
pydll = LibraryLoader(PyDLL)
if _os.name in ("nt", "ce"):
pythonapi = PyDLL("python dll", None, _sys.dllhandle)
elif _sys.platform == "cygwin":
pythonapi = PyDLL("libpython%d.%d.dll" % _sys.version_info[:2])
else:
pythonapi = PyDLL(None)
if _os.name in ("nt", "ce"):
windll = LibraryLoader(WinDLL)
oledll = LibraryLoader(OleDLL)
if _os.name == "nt":
GetLastError = windll.kernel32.GetLastError
else:
GetLastError = windll.coredll.GetLastError
from _ctypes import get_last_error, set_last_error
def WinError(code=None, descr=None):
if code is None:
code = GetLastError()
if descr is None:
descr = FormatError(code).strip()
return OSError(None, descr, None, code)
if sizeof(c_uint) == sizeof(c_void_p):
c_size_t = c_uint
c_ssize_t = c_int
elif sizeof(c_ulong) == sizeof(c_void_p):
c_size_t = c_ulong
c_ssize_t = c_long
elif sizeof(c_ulonglong) == sizeof(c_void_p):
c_size_t = c_ulonglong
c_ssize_t = c_longlong
# functions
from _ctypes import _memmove_addr, _memset_addr, _string_at_addr, _cast_addr
## void *memmove(void *, const void *, size_t);
memmove = CFUNCTYPE(c_void_p, c_void_p, c_void_p, c_size_t)(_memmove_addr)
## void *memset(void *, int, size_t)
memset = CFUNCTYPE(c_void_p, c_void_p, c_int, c_size_t)(_memset_addr)
def PYFUNCTYPE(restype, *argtypes):
class CFunctionType(_CFuncPtr):
_argtypes_ = argtypes
_restype_ = restype
_flags_ = _FUNCFLAG_CDECL | _FUNCFLAG_PYTHONAPI
return CFunctionType
_cast = PYFUNCTYPE(py_object, c_void_p, py_object, py_object)(_cast_addr)
def cast(obj, typ):
return _cast(obj, obj, typ)
_string_at = PYFUNCTYPE(py_object, c_void_p, c_int)(_string_at_addr)
def string_at(ptr, size=-1):
"""string_at(addr[, size]) -> string
Return the string at addr."""
return _string_at(ptr, size)
try:
from _ctypes import _wstring_at_addr
except ImportError:
pass
else:
_wstring_at = PYFUNCTYPE(py_object, c_void_p, c_int)(_wstring_at_addr)
def wstring_at(ptr, size=-1):
"""wstring_at(addr[, size]) -> string
Return the string at addr."""
return _wstring_at(ptr, size)
if _os.name in ("nt", "ce"): # COM stuff
def DllGetClassObject(rclsid, riid, ppv):
try:
ccom = __import__("comtypes.server.inprocserver", globals(), locals(), ['*'])
except ImportError:
return -2147221231 # CLASS_E_CLASSNOTAVAILABLE
else:
return ccom.DllGetClassObject(rclsid, riid, ppv)
def DllCanUnloadNow():
try:
ccom = __import__("comtypes.server.inprocserver", globals(), locals(), ['*'])
except ImportError:
return 0 # S_OK
return ccom.DllCanUnloadNow()
from ctypes._endian import BigEndianStructure, LittleEndianStructure
# Fill in specifically-sized types
c_int8 = c_byte
c_uint8 = c_ubyte
for kind in [c_short, c_int, c_long, c_longlong]:
if sizeof(kind) == 2: c_int16 = kind
elif sizeof(kind) == 4: c_int32 = kind
elif sizeof(kind) == 8: c_int64 = kind
for kind in [c_ushort, c_uint, c_ulong, c_ulonglong]:
if sizeof(kind) == 2: c_uint16 = kind
elif sizeof(kind) == 4: c_uint32 = kind
elif sizeof(kind) == 8: c_uint64 = kind
del(kind)
_reset_cache()
| Microvellum/Fluid-Designer | win64-vc/2.78/python/lib/ctypes/__init__.py | Python | gpl-3.0 | 16,848 |
# -*- tab-width: 4; indent-tabs-mode: nil; py-indent-offset: 4 -*-
#
# This file is part of the LibreOffice project.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
import six
from libreoffice.util import printing
from libreoffice.util.uno import TypeClass, make_uno_type, uno_cast
class UnoAnyPrinter(object):
'''Prints UNO any'''
def __init__(self, typename, value):
self.value = value
self.typename = typename.replace('com::sun::star::', '')
def to_string(self):
type_desc = self.value['pType']
assert type_desc
type = make_uno_type(type_desc.dereference())
assert type
if type_desc.dereference()['eTypeClass'] == TypeClass.VOID:
return ('%s(%s)' % (self.typename, type.tag))
else:
ptr = self.value['pData']
assert ptr
return ('%s(%s: %s)' % (self.typename, type.tag, str(uno_cast(type, ptr).dereference())))
class UnoReferencePrinter(object):
'''Prints reference to a UNO interface'''
def __init__(self, typename, value):
self.value = value
self.typename = typename.replace('com::sun::star::', '')
def to_string(self):
iface = self.value['_pInterface']
if iface:
try:
return '%s to (%s) %s' % (self.typename, str(iface.dynamic_type), str(iface))
except:
# fallback for potential problem:
# base class 'com::sun::star::uno::XInterface' is ambiguous
return '%s to (XInterface) %s' % (self.typename, str(iface))
else:
return "empty %s" % self.typename
class UnoSequencePrinter(object):
'''Prints UNO Sequence'''
class iterator(six.Iterator):
'''Sequence iterator'''
def __init__(self, first, size):
self.item = first
self.size = size
self.count = 0
def __iter__(self):
return self
def __next__(self):
if self.count == self.size:
raise StopIteration
count = self.count
self.count = self.count + 1
elem = self.item.dereference()
self.item = self.item + 1
return ('[%d]' % count, elem)
def __init__(self, typename, value):
self.value = value
self.typename = typename.replace('com::sun::star::', '')
def to_string(self):
pimpl = self.value['_pSequence']
if pimpl:
impl = pimpl.dereference()
elems = impl['nElements']
if elems == 0:
return "empty %s" % self.typename
else:
return "%s of length %d" % (self.typename, elems)
else:
return "uninitialized %s" % self.typename
def children(self):
pimpl = self.value['_pSequence']
if pimpl:
impl = pimpl.dereference()
elemtype = self.value.type.template_argument(0)
elements = impl['elements'].cast(elemtype.pointer())
return self.iterator(elements, int(impl['nElements']))
else:
# TODO is that the best thing to do here?
return None
def display_hint(self):
if self.value['_pSequence']:
return 'array'
else:
return None
class UnoTypePrinter(object):
'''Prints UNO Type'''
def __init__(self, typename, value):
self.value = value
self.typename = typename.replace('com::sun::star::', '')
def to_string(self):
uno = make_uno_type(self.value)
if uno:
return "%s %s" % (self.typename, uno.tag)
# return "%s %s" % (self.typename, uno.typename)
else:
return "invalid %s" % self.typename
class CppuThreadpoolThreadPoolPrinter(object):
'''Prints cppu_threadpool::ThreadPool objects (a hack to avoid infinite recursion through sal.RtlReferencePrinter when printing an rtl::Reference<cppu_threadpool::ThreadPool> whose std::list<cppu_threadpool::WaitingThread*> m_lstThreads member, via rtl::Reference<cppu_threadpool::ORequestThread> thread member, via rtl::Reference<cppu_threadpool::ThreadPool> m_aThreadPool member, has a circular reference back)'''
def __init__(self, typename, value):
self.typename = typename
self.value = value
def to_string(self):
return '%s@%s' % (self.typename, self.value.address)
printer = None
def build_pretty_printers():
global printer
printer = printing.Printer("libreoffice/cppu")
# basic UNO stuff
printer.add('_uno_Any', UnoAnyPrinter)
printer.add('com::sun::star::uno::Any', UnoAnyPrinter)
printer.add('com::sun::star::uno::Reference', UnoReferencePrinter)
printer.add('com::sun::star::uno::Sequence', UnoSequencePrinter)
printer.add('com::sun::star::uno::Type', UnoTypePrinter)
printer.add('cppu_threadpool::ThreadPool', CppuThreadpoolThreadPoolPrinter)
def register_pretty_printers(obj):
printing.register_pretty_printer(printer, obj)
build_pretty_printers()
# vim:set shiftwidth=4 softtabstop=4 expandtab:
| beppec56/core | solenv/gdb/libreoffice/cppu.py | Python | gpl-3.0 | 5,260 |
#!/usr/bin/env python
##
# wrapping: A program making it easy to use hyperparameter
# optimization software.
# Copyright (C) 2013 Katharina Eggensperger and Matthias Feurer
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from argparse import ArgumentParser
import os
from string import upper
import sys
import tempfile
import smac_to_spearmint
import tpe_to_smac
__authors__ = ["Katharina Eggensperger", "Matthias Feurer"]
__contact__ = "automl.org"
def smac_to_spearmint_helper(space, save=""):
# print "Convert %s from SMAC to SPEARMINT" % space
return smac_to_spearmint.convert_smac_to_spearmint(space)
def smac_to_tpe_helper(space, save=""):
print "This is not yet implemented"
def spearmint_to_smac_helper(space, save=""):
print "This is not yet implemented"
def spearmint_to_tpe_helper(space, save=""):
print "This is not yet implemented"
def tpe_to_spearmint_helper(space, save=""):
try:
import hyperopt
except ImportError:
print "Cannot find hyperopt. To use this converter, modify $PYTHONPATH to contain a hyperopt installation"
# First convert to smac
tmp = tpe_to_smac.convert_tpe_to_smac_from_file(space)
handle, tmp_file_name = tempfile.mkstemp()
fh = open(tmp_file_name, 'w')
fh.write(tmp)
fh.close()
# From smac convert to spearmint
new_space = smac_to_spearmint.convert_smac_to_spearmint(tmp_file_name)
os.remove(tmp_file_name)
return new_space
def tpe_to_smac_helper(space, save=""):
try:
import hyperopt
except ImportError:
print "Cannot find hyperopt. To use this converter, modify $PYTHONPATH to contain a hyperopt installation"
return tpe_to_smac.convert_tpe_to_smac_from_file(space)
def main():
# python convert.py --from SMAC --to TPE -f space.any -s space.else
prog = "python convert.py"
description = "Automatically convert a searchspace from one format to another"
parser = ArgumentParser(description=description, prog=prog)
parser.add_argument("--from", dest="conv_from", choices=['SMAC', 'Smac', 'smac',
'TPE', 'Tpe', 'tpe', 'hyperopt',
'SPEARMINT', 'Spearmint', 'spearmint'],
default="", help="Convert from which format?", required=True)
parser.add_argument("--to", dest="conv_to", choices=['SMAC', 'Smac', 'smac',
'TPE', 'Tpe', 'tpe', 'hyperopt',
'SPEARMINT', 'Spearmint', 'spearmint'],
default="", help="Convert to which format?", required=True)
parser.add_argument("-f", "--file", dest="space",
default="", help="Where is the searchspace to be converted?", required=True)
parser.add_argument("-s", "--save", dest="save",
default="", help="Where to save the new searchspace?")
args, unknown = parser.parse_known_args()
space = os.path.abspath(args.space)
if not os.path.isfile(space):
print "%s is not a valid path" % space
sys.exit(1)
# Unifying strings
args.conv_to = upper(args.conv_to)
args.conv_from = upper(args.conv_from)
if args.conv_from == "HYPEROPT":
args.conv_from = "TPE"
if args.conv_to == "HYPEROPT":
args.conv_to == "TPE"
if args.conv_to == args.conv_from:
print "Converting from %s to %s makes no sense" % (args.conv_to, args.conv_from)
# This is like a switch statement
options = {'SMAC': {'SPEARMINT': smac_to_spearmint_helper,
'TPE': smac_to_tpe_helper},
'SPEARMINT': {'SMAC': spearmint_to_smac_helper,
'TPE': spearmint_to_tpe_helper},
'TPE': {'SPEARMINT': tpe_to_spearmint_helper,
'SMAC': tpe_to_smac_helper}
}
new_space = options[args.conv_from][args.conv_to](space, args.save)
if args.save != "":
fh = open(args.save, 'w')
fh.write(new_space)
fh.close()
else:
print new_space
if __name__ == "__main__":
main() | claesenm/HPOlib | HPOlib/format_converter/convert.py | Python | gpl-3.0 | 4,811 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
from unittest.mock import Mock, patch
from django.test import TestCase
from django.test.client import RequestFactory
from django.test.utils import override_settings
from django.urls import re_path
import pytest
from bedrock.base.urlresolvers import Prefixer, find_supported, reverse, split_path
@pytest.mark.parametrize(
"path, result",
[
# Basic
("en-US/some/action", ("en-US", "some/action")),
# First slash doesn't matter
("/en-US/some/action", ("en-US", "some/action")),
# Nor does capitalization
("En-uS/some/action", ("en-US", "some/action")),
# Unsupported languages return a blank language
("unsupported/some/action", ("", "unsupported/some/action")),
],
)
def test_split_path(path, result):
res = split_path(path)
assert res == result
# Test urlpatterns
urlpatterns = [re_path(r"^test/$", lambda r: None, name="test.view")]
class FakePrefixer:
def __init__(self, fix):
self.fix = fix
@patch("bedrock.base.urlresolvers.get_url_prefix")
@override_settings(ROOT_URLCONF="bedrock.base.tests.test_urlresolvers")
class TestReverse(TestCase):
def test_unicode_url(self, get_url_prefix):
# If the prefixer returns a unicode URL it should be escaped and cast
# as a str object.
get_url_prefix.return_value = FakePrefixer(lambda p: f"/Françoi{p}")
result = reverse("test.view")
# Ensure that UTF-8 characters are escaped properly.
self.assertEqual(result, "/Fran%C3%A7oi/test/")
self.assertEqual(type(result), str)
class TestPrefixer(TestCase):
def setUp(self):
self.factory = RequestFactory()
@override_settings(LANGUAGE_CODE="en-US")
def test_get_language_default_language_code(self):
"""
Should return default set by settings.LANGUAGE_CODE if no 'lang'
url parameter and no Accept-Language header
"""
request = self.factory.get("/")
self.assertFalse("lang" in request.GET)
self.assertFalse(request.headers.get("Accept-Language"))
prefixer = Prefixer(request)
assert prefixer.get_language() == "en-US"
def test_get_language_returns_best(self):
"""
Should pass Accept-Language header value to get_best_language
and return result
"""
request = self.factory.get("/")
request.META["HTTP_ACCEPT_LANGUAGE"] = "de, es"
prefixer = Prefixer(request)
prefixer.get_best_language = Mock(return_value="de")
assert prefixer.get_language() == "de"
prefixer.get_best_language.assert_called_once_with("de, es")
@override_settings(LANGUAGE_CODE="en-US")
def test_get_language_no_best(self):
"""
Should return default set by settings.LANGUAGE_CODE if
get_best_language return value is None
"""
request = self.factory.get("/")
request.META["HTTP_ACCEPT_LANGUAGE"] = "de, es"
prefixer = Prefixer(request)
prefixer.get_best_language = Mock(return_value=None)
assert prefixer.get_language() == "en-US"
prefixer.get_best_language.assert_called_once_with("de, es")
@override_settings(LANGUAGE_URL_MAP={"en-us": "en-US", "de": "de"})
def test_get_best_language_exact_match(self):
"""
Should return exact match if it is in settings.LANGUAGE_URL_MAP
"""
request = self.factory.get("/")
prefixer = Prefixer(request)
assert prefixer.get_best_language("de, es") == "de"
@override_settings(LANGUAGE_URL_MAP={"en-gb": "en-GB", "en-us": "en-US", "es-ar": "es-AR"}, CANONICAL_LOCALES={"es": "es-ES", "en": "en-US"})
def test_get_best_language_prefix_match(self):
"""
Should return a language with a matching prefix from
settings.LANGUAGE_URL_MAP + settings.CANONICAL_LOCALES if it exists but
no exact match does
"""
request = self.factory.get("/")
prefixer = Prefixer(request)
assert prefixer.get_best_language("en") == "en-US"
assert prefixer.get_best_language("en-CA") == "en-US"
assert prefixer.get_best_language("en-GB") == "en-GB"
assert prefixer.get_best_language("en-US") == "en-US"
assert prefixer.get_best_language("es") == "es-ES"
assert prefixer.get_best_language("es-AR") == "es-AR"
assert prefixer.get_best_language("es-CL") == "es-ES"
assert prefixer.get_best_language("es-MX") == "es-ES"
@override_settings(LANGUAGE_URL_MAP={"en-us": "en-US"})
def test_get_best_language_no_match(self):
"""
Should return None if there is no exact match or matching
prefix
"""
request = self.factory.get("/")
prefixer = Prefixer(request)
assert prefixer.get_best_language("de") is None
@override_settings(LANGUAGE_URL_MAP={"en-ar": "en-AR", "en-gb": "en-GB", "en-us": "en-US"}, CANONICAL_LOCALES={"en": "en-US"})
def test_prefixer_with_non_supported_locale(self):
"""
Should set prefixer.locale to a supported locale that repects CANONICAL_LOCALES
when given a URL with a non-supported locale.
"""
request = self.factory.get("/en-CA/")
prefixer = Prefixer(request)
assert prefixer.locale == "en-US"
@override_settings(LANGUAGE_URL_MAP={"es-ar": "es-AR", "en-gb": "en-GB", "es-us": "es-US"}, CANONICAL_LOCALES={"es": "es-ES", "en": "en-US"})
class TestFindSupported(TestCase):
def test_find_supported(self):
assert find_supported("en-CA") == "en-US"
assert find_supported("en-US") == "en-US"
assert find_supported("en-GB") == "en-GB"
assert find_supported("en") == "en-US"
assert find_supported("es-MX") == "es-ES"
assert find_supported("es-AR") == "es-AR"
assert find_supported("es") == "es-ES"
def test_find_supported_none(self):
"""
Should return None if it can't find any supported locale.
"""
assert find_supported("de") is None
assert find_supported("fr") is None
assert find_supported("dude") is None
| mozilla/bedrock | bedrock/base/tests/test_urlresolvers.py | Python | mpl-2.0 | 6,341 |
# -*- coding: utf-8 -*-
"""
sphinx.websupport.search.whooshsearch
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Whoosh search adapter.
:copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from whoosh import index
from whoosh.fields import Schema, ID, TEXT
from whoosh.qparser import QueryParser
from whoosh.analysis import StemmingAnalyzer
from sphinx.util.osutil import ensuredir
from sphinx.websupport.search import BaseSearch
class WhooshSearch(BaseSearch):
"""The whoosh search adapter for sphinx web support."""
# Define the Whoosh Schema for the search index.
schema = Schema(path=ID(stored=True, unique=True),
title=TEXT(field_boost=2.0, stored=True),
text=TEXT(analyzer=StemmingAnalyzer(), stored=True))
def __init__(self, db_path):
ensuredir(db_path)
if index.exists_in(db_path):
self.index = index.open_dir(db_path)
else:
self.index = index.create_in(db_path, schema=self.schema)
self.qparser = QueryParser('text', self.schema)
def init_indexing(self, changed=[]):
for changed_path in changed:
self.index.delete_by_term('path', changed_path)
self.index_writer = self.index.writer()
def finish_indexing(self):
self.index_writer.commit()
def add_document(self, pagename, title, text):
self.index_writer.add_document(path=unicode(pagename),
title=title,
text=text)
def handle_query(self, q):
searcher = self.index.searcher()
whoosh_results = searcher.search(self.qparser.parse(q))
results = []
for result in whoosh_results:
context = self.extract_context(result['text'])
results.append((result['path'],
result.get('title', ''),
context))
return results
| GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/sphinx/websupport/search/whooshsearch.py | Python | agpl-3.0 | 2,010 |
"""
Support tool for disabling user accounts.
"""
from django.contrib.auth import get_user_model
from django.db.models import Q
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext as _
from django.views.generic import View
from rest_framework.generics import GenericAPIView
from edxmako.shortcuts import render_to_response
from lms.djangoapps.support.decorators import require_support_permission
from openedx.core.djangoapps.user_api.accounts.serializers import AccountUserSerializer
from openedx.core.djangoapps.user_authn.utils import generate_password
from util.json_request import JsonResponse
class ManageUserSupportView(View):
"""
View for viewing and managing user accounts, used by the
support team.
"""
@method_decorator(require_support_permission)
def get(self, request):
"""Render the manage user support tool view."""
return render_to_response('support/manage_user.html', {
_('username'): request.GET.get('user', ''),
_('user_support_url'): reverse('support:manage_user'),
_('user_detail_url'): reverse('support:manage_user_detail')
})
class ManageUserDetailView(GenericAPIView):
"""
Allows viewing and disabling learner accounts by support
staff.
"""
# TODO: ARCH-91
# This view is excluded from Swagger doc generation because it
# does not specify a serializer class.
exclude_from_schema = True
@method_decorator(require_support_permission)
def get(self, request, username_or_email):
"""
Returns details for the given user, along with
information about its username and joining date.
"""
try:
user = get_user_model().objects.get(
Q(username=username_or_email) | Q(email=username_or_email)
)
data = AccountUserSerializer(user, context={'request': request}).data
data['status'] = _('Usable') if user.has_usable_password() else _('Unusable')
return JsonResponse(data)
except get_user_model().DoesNotExist:
return JsonResponse([])
@method_decorator(require_support_permission)
def post(self, request, username_or_email):
"""Allows support staff to disable a user's account."""
user = get_user_model().objects.get(
Q(username=username_or_email) | Q(email=username_or_email)
)
if user.has_usable_password():
user.set_unusable_password()
else:
user.set_password(generate_password(length=25))
user.save()
if user.has_usable_password():
password_status = _('Usable')
msg = _('User Enabled Successfully')
else:
password_status = _('Unusable')
msg = _('User Disabled Successfully')
return JsonResponse({'success_msg': msg, 'status': password_status})
| cpennington/edx-platform | lms/djangoapps/support/views/manage_user.py | Python | agpl-3.0 | 2,968 |
# Copyright 2014 Tesora, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from trove.guestagent.strategy import Strategy
from trove.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def get_replication_strategy(replication_driver, ns=__name__):
LOG.debug("Getting replication strategy: %s.", replication_driver)
return Strategy.get_strategy(replication_driver, ns)
| CMSS-BCRDB/RDSV1.0 | trove/guestagent/strategies/replication/__init__.py | Python | apache-2.0 | 955 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import errno
import io
import os
import shutil
import sys
import unittest
from tempfile import mkdtemp
from unittest import mock
import boto3
import pytest
from moto import mock_s3
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.operators.s3 import S3FileTransformOperator
class TestS3FileTransformOperator(unittest.TestCase):
def setUp(self):
self.content = b"input"
self.bucket = "bucket"
self.input_key = "foo"
self.output_key = "bar"
self.bio = io.BytesIO(self.content)
self.tmp_dir = mkdtemp(prefix='test_tmpS3FileTransform_')
self.transform_script = os.path.join(self.tmp_dir, "transform.py")
os.mknod(self.transform_script)
def tearDown(self):
try:
shutil.rmtree(self.tmp_dir)
except OSError as e:
# ENOENT - no such file or directory
if e.errno != errno.ENOENT:
raise e
@mock.patch('subprocess.Popen')
@mock.patch.object(S3FileTransformOperator, 'log')
@mock_s3
def test_execute_with_transform_script(self, mock_log, mock_popen):
process_output = [b"Foo", b"Bar", b"Baz"]
self.mock_process(mock_popen, process_output=process_output)
input_path, output_path = self.s3_paths()
op = S3FileTransformOperator(
source_s3_key=input_path,
dest_s3_key=output_path,
transform_script=self.transform_script,
replace=True,
task_id="task_id",
)
op.execute(None)
mock_log.info.assert_has_calls(
[mock.call(line.decode(sys.getdefaultencoding())) for line in process_output]
)
@mock.patch('subprocess.Popen')
@mock_s3
def test_execute_with_failing_transform_script(self, mock_popen):
self.mock_process(mock_popen, return_code=42)
input_path, output_path = self.s3_paths()
op = S3FileTransformOperator(
source_s3_key=input_path,
dest_s3_key=output_path,
transform_script=self.transform_script,
replace=True,
task_id="task_id",
)
with pytest.raises(AirflowException) as ctx:
op.execute(None)
assert 'Transform script failed: 42' == str(ctx.value)
@mock.patch('subprocess.Popen')
@mock_s3
def test_execute_with_transform_script_args(self, mock_popen):
self.mock_process(mock_popen, process_output=[b"Foo", b"Bar", b"Baz"])
input_path, output_path = self.s3_paths()
script_args = ['arg1', 'arg2']
op = S3FileTransformOperator(
source_s3_key=input_path,
dest_s3_key=output_path,
transform_script=self.transform_script,
script_args=script_args,
replace=True,
task_id="task_id",
)
op.execute(None)
assert script_args == mock_popen.call_args[0][0][3:]
@mock.patch('airflow.providers.amazon.aws.hooks.s3.S3Hook.select_key', return_value="input")
@mock_s3
def test_execute_with_select_expression(self, mock_select_key):
input_path, output_path = self.s3_paths()
select_expression = "SELECT * FROM s3object s"
op = S3FileTransformOperator(
source_s3_key=input_path,
dest_s3_key=output_path,
select_expression=select_expression,
replace=True,
task_id="task_id",
)
op.execute(None)
mock_select_key.assert_called_once_with(key=input_path, expression=select_expression)
conn = boto3.client('s3')
result = conn.get_object(Bucket=self.bucket, Key=self.output_key)
assert self.content == result['Body'].read()
@staticmethod
def mock_process(mock_popen, return_code=0, process_output=None):
mock_proc = mock.MagicMock()
mock_proc.returncode = return_code
mock_proc.stdout.readline.side_effect = process_output or []
mock_proc.wait.return_value = None
mock_popen.return_value.__enter__.return_value = mock_proc
def s3_paths(self):
conn = boto3.client('s3')
conn.create_bucket(Bucket=self.bucket)
conn.upload_fileobj(Bucket=self.bucket, Key=self.input_key, Fileobj=self.bio)
s3_url = "s3://{0}/{1}"
input_path = s3_url.format(self.bucket, self.input_key)
output_path = s3_url.format(self.bucket, self.output_key)
return input_path, output_path
| Acehaidrey/incubator-airflow | tests/providers/amazon/aws/operators/test_s3_file_transform.py | Python | apache-2.0 | 5,277 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import operator
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
from tensorflow.python.util import compat
class VariablesTestCase(test.TestCase, parameterized.TestCase):
@test_util.run_deprecated_v1
def testDistributeStrategy(self):
v = variables.VariableV1(0.0)
self.assertIsNone(v._distribute_strategy)
@test_util.run_v1_only("b/120545219")
def testInitialization(self):
with self.cached_session():
var0 = variables.VariableV1(0.0)
self.assertEqual("Variable:0", var0.name)
self.assertEqual("Variable", var0._shared_name)
self.assertEqual([], var0.get_shape())
self.assertEqual([], var0.get_shape())
self.assertEqual([], var0.shape)
var1 = variables.VariableV1(1.1)
self.assertEqual("Variable_1:0", var1.name)
self.assertEqual("Variable_1", var1._shared_name)
self.assertEqual([], var1.get_shape())
self.assertEqual([], var1.get_shape())
self.assertEqual([], var1.shape)
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(var0)
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(var1)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(0.0, self.evaluate(var0))
self.assertAllClose(1.1, self.evaluate(var1))
@test_util.run_v1_only("b/120545219")
def testInitializationOrder(self):
with self.cached_session():
rnd = variables.Variable(random_ops.random_uniform([3, 6]), name="rnd")
self.assertEqual("rnd:0", rnd.name)
self.assertEqual([3, 6], rnd.get_shape())
self.assertEqual([3, 6], rnd.get_shape())
self.assertEqual([3, 6], rnd.shape)
dep = variables.Variable(rnd.initialized_value(), name="dep")
self.assertEqual("dep:0", dep.name)
self.assertEqual([3, 6], dep.get_shape())
self.assertEqual([3, 6], dep.get_shape())
self.assertEqual([3, 6], dep.shape)
# Currently have to set the shape manually for Add.
added_val = rnd.initialized_value() + dep.initialized_value() + 2.0
added_val.set_shape(rnd.get_shape())
depdep = variables.Variable(added_val, name="depdep")
self.assertEqual("depdep:0", depdep.name)
self.assertEqual([3, 6], depdep.get_shape())
self.assertEqual([3, 6], depdep.get_shape())
self.assertEqual([3, 6], depdep.shape)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(self.evaluate(rnd), self.evaluate(dep))
self.assertAllClose(
self.evaluate(rnd) + self.evaluate(dep) + 2.0, self.evaluate(depdep))
@test_util.run_deprecated_v1
def testCyclicInitializer(self):
with self.cached_session():
cyclic = control_flow_ops.while_loop(
cond=lambda i: i < 10,
body=lambda i: i + 1,
loop_vars=(constant_op.constant(0),))
initial_value = variables._try_guard_against_uninitialized_dependencies(
"test", cyclic)
self.assertIs(initial_value, cyclic)
def testIterable(self):
with self.assertRaisesRegex(TypeError, "not iterable"):
for _ in variables.Variable(0.0):
pass
with self.assertRaisesRegex(TypeError, "not iterable"):
for _ in variables.Variable([0.0, 1.0]):
pass
@test_util.run_deprecated_v1
def testAssignments(self):
with self.cached_session():
var = variables.Variable(0.0)
plus_one = var.assign_add(1.0)
minus_one = var.assign_sub(2.0)
four = var.assign(4.0)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(0.0, self.evaluate(var))
self.assertAllClose(1.0, self.evaluate(plus_one))
self.assertAllClose(1.0, self.evaluate(var))
self.assertAllClose(-1.0, self.evaluate(minus_one))
self.assertAllClose(-1.0, self.evaluate(var))
self.assertAllClose(4.0, self.evaluate(four))
self.assertAllClose(4.0, self.evaluate(var))
@test_util.run_deprecated_v1
def testResourceAssignments(self):
with self.session():
var = resource_variable_ops.ResourceVariable(0.0)
plus_one = var.assign_add(1.0)
minus_one = var.assign_sub(2.0)
four = var.assign(4.0)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(0.0, self.evaluate(var))
self.evaluate(plus_one)
self.assertAllClose(1.0, self.evaluate(var))
self.evaluate(minus_one)
self.assertAllClose(-1.0, self.evaluate(var))
self.evaluate(four)
self.assertAllClose(4.0, self.evaluate(var))
def testAssignDifferentShapesEagerNotAllowed(self):
with context.eager_mode():
var = variables.Variable(np.zeros(shape=[1, 1]))
with self.assertRaisesRegex(ValueError, "shape.*and.*are incompatible"):
var.assign(np.zeros(shape=[2, 2]))
@test_util.run_in_graph_and_eager_modes
def testAssignDifferentShapesAllowed(self):
var = variables.Variable(np.zeros(shape=[1, 1]),
shape=tensor_shape.TensorShape(None))
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(np.zeros(shape=[1, 1]), var.read_value())
self.evaluate(var.assign(np.zeros(shape=[2, 2])))
self.assertAllEqual(np.zeros(shape=[2, 2]), var.read_value())
def testZeroSizeStringAssign(self):
with self.cached_session() as sess:
array = variables.VariableV1(
initial_value=array_ops.zeros((0,), dtype=dtypes.string),
name="foo",
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES])
self.evaluate(variables.local_variables_initializer())
old_value = array.value()
copy_op = array.assign(old_value)
self.assertEqual([], list(self.evaluate(copy_op)))
def _countUpToTest(self, dtype):
with self.cached_session():
zero = constant_op.constant(0, dtype=dtype)
var = variables.Variable(zero)
count_up_to = var.count_up_to(3)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(0, self.evaluate(var))
self.assertEqual(0, self.evaluate(count_up_to))
self.assertEqual(1, self.evaluate(var))
self.assertEqual(1, self.evaluate(count_up_to))
self.assertEqual(2, self.evaluate(var))
self.assertEqual(2, self.evaluate(count_up_to))
self.assertEqual(3, self.evaluate(var))
with self.assertRaisesOpError("Reached limit of 3"):
self.evaluate(count_up_to)
self.assertEqual(3, self.evaluate(var))
with self.assertRaisesOpError("Reached limit of 3"):
self.evaluate(count_up_to)
self.assertEqual(3, self.evaluate(var))
@test_util.run_deprecated_v1
def testCountUpToInt32(self):
self._countUpToTest(dtypes.int32)
@test_util.run_deprecated_v1
def testCountUpToInt64(self):
self._countUpToTest(dtypes.int64)
@test_util.run_v1_only("b/120545219")
def testControlDepsNone(self):
with self.cached_session():
c = constant_op.constant(1.0)
with ops.control_dependencies([c]):
# d get the control dep.
d = constant_op.constant(2.0)
# variables do not.
var_x = variables.VariableV1(2.0)
self.assertEqual([c.op], d.op.control_inputs)
self.assertEqual([], var_x.initializer.control_inputs)
self.assertEqual([], var_x.value().op.control_inputs)
self.assertEqual([], var_x._ref().op.control_inputs) # pylint: disable=protected-access
@test_util.run_v1_only("b/120545219")
def testControlFlow(self):
with self.cached_session() as sess:
v0 = variables.Variable(0, name="v0")
var_dict = {}
# Call get_variable in each of the cond clauses.
def var_in_then_clause():
v1 = variables.Variable(1, name="v1")
var_dict["v1"] = v1
return v1 + v0
def var_in_else_clause():
v2 = variables.Variable(2, name="v2")
var_dict["v2"] = v2
return v2 + v0
add = control_flow_ops.cond(
math_ops.less(v0, 10), var_in_then_clause, var_in_else_clause)
v1 = var_dict["v1"]
v2 = var_dict["v2"]
# We should be able to initialize and run v1 and v2 without initializing
# v0, even if the variable was created with a control dep on v0.
self.evaluate(v1.initializer)
self.assertEqual([1], self.evaluate(v1))
self.evaluate(v2.initializer)
self.assertEqual([2], self.evaluate(v2))
# v0 should still be uninitialized.
with self.assertRaisesRegex(errors_impl.OpError, "uninitialized"):
self.evaluate(v0)
# We should not be able to run 'add' yet.
with self.assertRaisesRegex(errors_impl.OpError, "uninitialized"):
self.evaluate(add)
# If we initialize v0 we should be able to run 'add'.
self.evaluate(v0.initializer)
self.evaluate(add)
@test_util.run_v1_only("b/120545219")
def testControlFlowInitialization(self):
"""Expects an error if an initializer is in a control-flow scope."""
def cond(i, _):
return i < 10
def body(i, _):
zero = array_ops.zeros([], dtype=dtypes.int32)
v = variables.Variable(initial_value=zero)
return (i + 1, v.read_value())
with self.assertRaisesRegex(ValueError, "inside a control-flow"):
control_flow_ops.while_loop(cond, body, [0, 0])
@test_util.run_deprecated_v1
def testUseVariableAsTensor(self):
with self.cached_session():
var_x = variables.Variable(2.0)
var_y = variables.Variable(3.0)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(2.0, self.evaluate(var_x))
self.assertAllClose(3.0, self.evaluate(var_y))
self.assertAllClose(5.0, self.evaluate(math_ops.add(var_x, var_y)))
@test_util.run_deprecated_v1
def testZeroSizeVarSameAsConst(self):
with self.cached_session():
zero_size_var = variables.Variable(array_ops.zeros([0, 2]))
zero_size_const = array_ops.ones([2, 0])
variable_mul = math_ops.matmul(zero_size_const, zero_size_var)
const_mul = math_ops.matmul(
zero_size_const, zero_size_const, transpose_b=True)
self.evaluate(variables.global_variables_initializer())
variable_output = self.evaluate(variable_mul)
self.assertAllClose(self.evaluate(const_mul), variable_output)
self.assertAllClose([[0., 0.], [0., 0.]], variable_output)
@test_util.run_deprecated_v1
def testCachingDevice(self):
with self.cached_session():
var = variables.Variable(2.0)
self.assertEqual(var.device, var.initialized_value().device)
var_cached = variables.Variable(2.0, caching_device="/job:foo")
self.assertFalse(var_cached.device.startswith("/job:foo"))
self.assertTrue(var_cached.value().device.startswith("/job:foo"))
@test_util.run_deprecated_v1
def testCollections(self):
with self.cached_session():
var_x = variables.VariableV1(2.0)
var_y = variables.VariableV1(2.0, trainable=False)
var_z = variables.VariableV1(2.0, trainable=True)
var_t = variables.VariableV1(
2.0,
trainable=True,
collections=[
ops.GraphKeys.TRAINABLE_VARIABLES, ops.GraphKeys.GLOBAL_VARIABLES
])
self.assertEqual([var_x, var_y, var_z, var_t],
variables.global_variables())
self.assertEqual([var_x, var_z, var_t], variables.trainable_variables())
@test_util.run_deprecated_v1
def testCollectionsWithScope(self):
with self.cached_session():
with ops.name_scope("scope_1"):
var_x = variables.VariableV1(2.0)
with ops.name_scope("scope_2"):
var_y = variables.VariableV1(2.0)
self.assertEqual([var_x, var_y], variables.global_variables())
self.assertEqual([var_x], variables.global_variables("scope_1"))
self.assertEqual([var_y], variables.global_variables("scope_2"))
self.assertEqual([var_x, var_y], variables.trainable_variables())
self.assertEqual([var_x], variables.trainable_variables("scope_1"))
self.assertEqual([var_y], variables.trainable_variables("scope_2"))
def testOperatorWrapping(self):
for attr in functools.WRAPPER_ASSIGNMENTS:
self.assertEqual(
getattr(variables.Variable.__add__, attr),
getattr(ops.Tensor.__add__, attr))
@test_util.run_deprecated_v1
def testOperators(self):
with self.cached_session():
var_f = variables.Variable([2.0])
add = var_f + 0.0
radd = 1.0 + var_f
sub = var_f - 1.0
rsub = 1.0 - var_f
mul = var_f * 10.0
rmul = 10.0 * var_f
div = var_f / 10.0
rdiv = 10.0 / var_f
lt = var_f < 3.0
rlt = 3.0 < var_f
le = var_f <= 2.0
rle = 2.0 <= var_f
gt = var_f > 3.0
rgt = 3.0 > var_f
ge = var_f >= 2.0
rge = 2.0 >= var_f
neg = -var_f
abs_v = abs(var_f)
var_i = variables.Variable([20])
mod = var_i % 7
rmod = 103 % var_i
var_b = variables.Variable([True, False])
and_v = operator.and_(var_b, [True, True])
or_v = operator.or_(var_b, [False, True])
xor_v = operator.xor(var_b, [False, False])
invert_v = ~var_b
rnd = np.random.rand(4, 4).astype("f")
var_t = variables.Variable(rnd)
slice_v = var_t[2, 0:0]
var_m = variables.Variable([[2.0, 3.0]])
matmul = var_m.__matmul__([[10.0], [20.0]])
rmatmul = var_m.__rmatmul__([[10.0], [20.0]])
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([2.0], self.evaluate(add))
self.assertAllClose([3.0], self.evaluate(radd))
self.assertAllClose([1.0], self.evaluate(sub))
self.assertAllClose([-1.0], self.evaluate(rsub))
self.assertAllClose([20.0], self.evaluate(mul))
self.assertAllClose([20.0], self.evaluate(rmul))
self.assertAllClose([0.2], self.evaluate(div))
self.assertAllClose([5.0], self.evaluate(rdiv))
self.assertAllClose([-2.0], self.evaluate(neg))
self.assertAllClose([2.0], self.evaluate(abs_v))
self.assertAllClose([True], self.evaluate(lt))
self.assertAllClose([False], self.evaluate(rlt))
self.assertAllClose([True], self.evaluate(le))
self.assertAllClose([True], self.evaluate(rle))
self.assertAllClose([False], self.evaluate(gt))
self.assertAllClose([True], self.evaluate(rgt))
self.assertAllClose([True], self.evaluate(ge))
self.assertAllClose([True], self.evaluate(rge))
self.assertAllClose([6], self.evaluate(mod))
self.assertAllClose([3], self.evaluate(rmod))
self.assertAllClose([True, False], self.evaluate(and_v))
self.assertAllClose([True, True], self.evaluate(or_v))
self.assertAllClose([True, False], self.evaluate(xor_v))
self.assertAllClose([False, True], self.evaluate(invert_v))
self.assertAllClose(rnd[2, 0:0], self.evaluate(slice_v))
self.assertAllClose([[80.0]], self.evaluate(matmul))
self.assertAllClose([[20.0, 30.0], [40.0, 60.0]], self.evaluate(rmatmul))
@test_util.run_deprecated_v1
def testSession(self):
with self.cached_session() as sess:
var = variables.Variable([1, 12])
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([1, 12], self.evaluate(var))
@test_util.run_v1_only("b/120545219")
def testColocation(self):
with ops.device("/job:ps"):
var = variables.VariableV1(0, name="v")
with ops.device("/job:worker/task:7"):
assign_op = var.assign(1)
self.assertDeviceEqual("/job:ps", assign_op.device)
self.assertEqual([b"loc:@v"], assign_op.op.colocation_groups())
@test_util.run_v1_only("b/120545219")
def testInitializerFunction(self):
value = [[-42], [133.7]]
shape = [2, 1]
with self.cached_session():
initializer = lambda: constant_op.constant(value)
v1 = variables.Variable(initializer, dtype=dtypes.float32)
self.assertEqual(shape, v1.get_shape())
self.assertEqual(shape, v1.shape)
self.assertAllClose(value, self.evaluate(v1.initial_value))
with self.assertRaises(errors_impl.FailedPreconditionError):
self.evaluate(v1)
v2 = variables.Variable(
math_ops.negative(v1.initialized_value()), dtype=dtypes.float32)
self.assertEqual(v1.get_shape(), v2.get_shape())
self.assertEqual(v1.shape, v2.shape)
self.assertAllClose(np.negative(value), self.evaluate(v2.initial_value))
with self.assertRaises(errors_impl.FailedPreconditionError):
self.evaluate(v2)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(np.negative(value), self.evaluate(v2))
def testConstraintArg(self):
constraint = lambda x: x
v = variables.Variable(
lambda: constant_op.constant(1.),
constraint=constraint)
self.assertEqual(v.constraint, constraint)
constraint = 0
with self.assertRaises(ValueError):
v = variables.Variable(
lambda: constant_op.constant(1.),
constraint=constraint)
@test_util.run_v1_only("b/120545219")
def testNoRefDataRace(self):
with self.cached_session():
a = variables.Variable([1, 2, 3], dtype=dtypes.float32)
b = variables.Variable(a.initialized_value() + 2)
c = variables.Variable(b.initialized_value() + 2)
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(self.evaluate(a), [1, 2, 3])
self.assertAllEqual(self.evaluate(b), [3, 4, 5])
self.assertAllEqual(self.evaluate(c), [5, 6, 7])
@test_util.run_deprecated_v1
def testInitializerFunctionDevicePlacement(self):
with self.cached_session():
initializer = lambda: constant_op.constant(42.0)
with ops.device("/cpu:100"):
v1 = variables.Variable(initializer, dtype=dtypes.float32, name="v1")
expected_device = "/device:CPU:100"
expected_group_v1 = [b"loc:@v1"]
self.assertEqual(expected_device, v1.op.device)
self.assertEqual(expected_group_v1, v1.op.colocation_groups())
for i in v1.initializer.inputs:
self.assertEqual(expected_group_v1, i.op.colocation_groups())
v2 = variables.Variable(initializer, dtype=dtypes.float32, name="v2")
expected_group_v2 = [b"loc:@v2"]
self.assertEqual(expected_group_v2, v2.op.colocation_groups())
for i in v2.initializer.inputs:
self.assertEqual(expected_group_v2, i.op.colocation_groups())
@test_util.run_v1_only("b/120545219")
def testVariableDefInitializedInstances(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v_def = variables.Variable(
initial_value=constant_op.constant(3.0)).to_proto()
with ops.Graph().as_default(), self.cached_session() as sess:
# v describes a VariableDef-based variable without an initial value.
v = variables.Variable(variable_def=v_def)
self.assertEqual(3.0, self.evaluate(v.initialized_value()))
# initialized_value should not rerun the initializer_op if the variable
# has already been initialized elsewhere.
self.evaluate(v.assign(1.0))
self.assertEqual(1.0, self.evaluate(v.initialized_value()))
v_def.ClearField("initial_value_name")
with ops.Graph().as_default(), self.cached_session() as sess:
# Restoring a legacy VariableDef proto that does not have
# initial_value_name set should still work.
v = variables.Variable(variable_def=v_def)
# We should also be able to re-export the variable to a new meta graph.
self.assertProtoEquals(v_def, v.to_proto())
# But attempts to use initialized_value will result in errors.
with self.assertRaises(ValueError):
self.evaluate(v.initialized_value())
def testTrainableInProto(self):
with ops.Graph().as_default():
non_trainable_variable = variables.Variable(
trainable=False,
initial_value=constant_op.constant(10.0))
self.assertEqual(
False,
variables.Variable(variable_def=non_trainable_variable.to_proto())
.trainable)
trainable_variable = variables.Variable(
trainable=True,
initial_value=constant_op.constant(10.0))
self.assertEqual(
True,
variables.Variable(variable_def=trainable_variable.to_proto())
.trainable)
def testSynchronizationAndAggregationSaved(self):
with ops.Graph().as_default():
original_variable = variables.Variable(
initial_value=constant_op.constant(10.0),
synchronization=variables.VariableSynchronization.NONE,
aggregation=variables.VariableAggregationV2.ONLY_FIRST_REPLICA)
self.assertEqual(variables.VariableSynchronization.NONE,
original_variable.synchronization)
self.assertEqual(variables.VariableAggregation.ONLY_FIRST_REPLICA,
original_variable.aggregation)
laundered = variables.Variable(
variable_def=original_variable.to_proto())
self.assertEqual(
variables.VariableSynchronization.NONE,
laundered.synchronization)
self.assertEqual(variables.VariableAggregationV2.ONLY_FIRST_REPLICA,
laundered.aggregation)
@test_util.run_deprecated_v1
def testLoad(self):
with self.cached_session():
var = variables.Variable(np.zeros((5, 5), np.float32))
self.evaluate(variables.global_variables_initializer())
var.load(np.ones((5, 5), np.float32))
self.assertAllClose(np.ones((5, 5), np.float32), self.evaluate(var))
@test_util.run_v1_only("b/120545219")
def testRepr(self):
var = variables.VariableV1(np.zeros((5, 5), np.float32), name="noop")
self.assertEqual(
"<tf.Variable 'noop:0' shape=(5, 5) dtype=float32_ref>",
repr(var))
def testVariableNamesPreserveNameScopesWithDefun(self):
@function.defun
def create_variable():
with ops.name_scope("foo"):
v = variables.Variable(0.0, name="bar")
self.assertEqual(v.name, "foo/bar:0")
with ops.get_default_graph().as_default():
create_variable()
@parameterized.parameters(variables.VariableV1, variables.Variable)
def testTrainableVariable(self, cls):
v1 = cls(1.0)
self.assertEqual(True, v1.trainable)
v2 = cls(1.0, synchronization=variables.VariableSynchronization.ON_READ)
self.assertEqual(False, v2.trainable)
v3 = cls(1.0, synchronization=variables.VariableSynchronization.ON_READ,
trainable=True)
self.assertEqual(True, v3.trainable)
v4 = cls(1.0, synchronization=variables.VariableSynchronization.ON_READ,
trainable=False)
self.assertEqual(False, v4.trainable)
class IsInitializedTest(test.TestCase):
def testNoVars(self):
with ops.Graph().as_default(), self.cached_session() as sess:
uninited = variables.report_uninitialized_variables()
self.assertEqual(0, self.evaluate(uninited).size)
def testAssertVariablesInitialized(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v = variables.Variable([1, 2], name="v")
w = variables.Variable([3, 4], name="w")
_ = v, w
uninited = variables.report_uninitialized_variables()
self.assertAllEqual(np.array([b"v", b"w"]), self.evaluate(uninited))
self.evaluate(variables.global_variables_initializer())
self.assertEqual(0, self.evaluate(uninited).size)
@test_util.run_v1_only("b/120545219")
def testVariableList(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v = variables.VariableV1([1, 2], name="v")
w = variables.VariableV1([3, 4], name="w")
uninited = variables.report_uninitialized_variables()
self.assertAllEqual(np.array([b"v", b"w"]), self.evaluate(uninited))
self.evaluate(w.initializer)
self.assertAllEqual(np.array([b"v"]), self.evaluate(uninited))
self.evaluate(v.initializer)
self.assertEqual(0, self.evaluate(uninited).size)
def testZeroSizeVarInitialized(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v = variables.Variable(array_ops.zeros([0, 2]), name="v")
uninited = variables.report_uninitialized_variables()
self.evaluate(v.initializer) # not strictly necessary
self.assertEqual(0, self.evaluate(uninited).size)
def testTrainingWithZeroSizeVar(self):
with ops.Graph().as_default(), self.cached_session() as sess:
a = variables.Variable(array_ops.zeros([0, 2]))
b = variables.Variable(array_ops.ones([2, 2]))
objective = math_ops.reduce_sum(b + math_ops.matmul(
a, a, transpose_a=True))
self.evaluate(variables.global_variables_initializer())
do_opt = gradient_descent.GradientDescentOptimizer(0.1).minimize(
objective)
self.evaluate([do_opt])
self.assertAllClose([[0.9, 0.9], [0.9, 0.9]], self.evaluate(b))
@test_util.run_v1_only("b/120545219")
class ObsoleteIsInitializedTest(test.TestCase):
def testNoVars(self):
with ops.Graph().as_default():
self.assertEqual(None, variables.assert_variables_initialized())
def testVariables(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v = variables.VariableV1([1, 2])
w = variables.VariableV1([3, 4])
_ = v, w
inited = variables.assert_variables_initialized()
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(inited)
self.evaluate(variables.global_variables_initializer())
self.evaluate(inited)
def testVariableList(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v = variables.VariableV1([1, 2])
w = variables.VariableV1([3, 4])
inited = variables.assert_variables_initialized([v])
with self.assertRaisesOpError("Attempting to use uninitialized value"):
inited.op.run()
self.evaluate(w.initializer)
with self.assertRaisesOpError("Attempting to use uninitialized value"):
inited.op.run()
self.evaluate(v.initializer)
inited.op.run()
class PartitionedVariableTest(test.TestCase):
def testPartitionedVariable(self):
with ops.Graph().as_default():
v0 = variables.Variable([0])
v1 = variables.Variable([1])
v0._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [0], [1]))
v1._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [1], [1]))
partitions = [2]
# Pass variable_list as [v1, v0] to ensure they are properly
# re-sorted to [v0, v1] based on their slice info offsets.
partitioned_variable = variables.PartitionedVariable(
name="two_vars",
shape=[2],
dtype=v0.dtype,
variable_list=[v1, v0],
partitions=partitions)
concatenated = ops.convert_to_tensor(partitioned_variable)
num_partitions = len(partitioned_variable)
iterated_partitions = list(partitioned_variable)
self.assertEqual(2, num_partitions)
self.assertEqual([v0, v1], iterated_partitions)
self.assertEqual([2], partitioned_variable.get_shape())
self.assertEqual([2], partitioned_variable.shape)
self.assertEqual([2], concatenated.get_shape())
self.assertEqual([2], concatenated.shape)
def testPartitionedVariableFailures(self):
with ops.Graph().as_default():
with self.assertRaisesRegex(ValueError, "empty"):
variables.PartitionedVariable(
name="fail",
shape=2,
dtype=dtypes.int32,
variable_list=[],
partitions=[])
with self.assertRaisesRegex(ValueError, "must have a save_slice_info"):
v0 = variables.Variable([0])
partitions = [1]
variables.PartitionedVariable(
name="two_vars",
shape=[1],
dtype=v0.dtype,
variable_list=[v0],
partitions=partitions)
with self.assertRaisesRegex(ValueError, "full shapes must match"):
v0 = variables.Variable([0])
v1 = variables.Variable([1])
v0._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [0], [1]))
v1._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [1], [1]))
partitions = [2]
variables.PartitionedVariable(
name="two_vars",
shape=[3],
dtype=v0.dtype,
variable_list=[v1, v0],
partitions=partitions)
with self.assertRaisesRegex(ValueError, "must be positive"):
v0 = variables.Variable([0])
v0._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [0], [1]))
partitions = [0]
variables.PartitionedVariable(
name="two_vars",
shape=[2],
dtype=v0.dtype,
variable_list=[v0],
partitions=partitions)
def testPartitionedVariableAssignments(self):
with ops.Graph().as_default(), self.cached_session():
v0 = variables.Variable(initial_value=[0.0])
v1 = variables.Variable(initial_value=[1.0])
v2 = variables.Variable(initial_value=[20.0])
v3 = variables.Variable(initial_value=[30.0])
v0._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [0], [1]))
v1._set_save_slice_info(
variables.Variable.SaveSliceInfo(v1.name, [2], [1], [1]))
v2._set_save_slice_info(
variables.Variable.SaveSliceInfo(v2.name, [2], [0], [1]))
v3._set_save_slice_info(
variables.Variable.SaveSliceInfo(v3.name, [2], [1], [1]))
partitions = [2]
# Pass variable_list as [v1, v0] to ensure they are properly
# re-sorted to [v0, v1] based on their slice info offsets.
pv_0 = variables.PartitionedVariable(
name="two_vars",
shape=[2],
dtype=v0.dtype,
variable_list=[v0, v1],
partitions=partitions)
pv_1 = variables.PartitionedVariable(
name="two_vars",
shape=[2],
dtype=v0.dtype,
variable_list=[v2, v3],
partitions=partitions)
deltas_a = constant_op.constant([1.0, 2.0])
deltas_b = constant_op.constant([3.0, 4.0])
ones = array_ops.ones([2])
plus_delta = pv_0.assign_add(deltas_a)
minus_delta = pv_0.assign_sub(deltas_b)
assign_ones = pv_0.assign(ones)
c_0 = constant_op.constant([2.0])
c_1 = constant_op.constant([3.0])
assign_list = pv_1.assign([c_0, c_1])
assign_part_value = pv_1.assign_add(assign_ones)
assign_part_var = pv_1.assign_sub(pv_0)
self.evaluate(variables.global_variables_initializer())
self.assertEqual([1.0], self.evaluate(plus_delta[0]))
self.assertEqual([1.0], self.evaluate(v0))
self.assertEqual([3.0], self.evaluate(plus_delta[1]))
self.assertEqual([3.0], self.evaluate(v1))
self.assertEqual([-2.0], self.evaluate(minus_delta[0]))
self.assertEqual([-2.0], self.evaluate(v0))
self.assertEqual([-1.0], self.evaluate(minus_delta[1]))
self.assertEqual([-1.0], self.evaluate(v1))
self.assertEqual([1.0], self.evaluate(assign_ones[0]))
self.assertEqual([1.0], self.evaluate(v0))
self.assertEqual([1.0], self.evaluate(assign_ones[1]))
self.assertEqual([1.0], self.evaluate(v1))
self.assertEqual([2.0], self.evaluate(assign_list[0]))
self.assertEqual([2.0], self.evaluate(v2))
self.assertEqual([3.0], self.evaluate(assign_list[1]))
self.assertEqual([3.0], self.evaluate(v3))
self.assertEqual([3.0], self.evaluate(assign_part_value[0]))
self.assertEqual([3.0], self.evaluate(v2))
self.assertEqual([4.0], self.evaluate(assign_part_value[1]))
self.assertEqual([4.0], self.evaluate(v3))
self.assertEqual([2.0], self.evaluate(assign_part_var[0]))
self.assertEqual([2.0], self.evaluate(v2))
self.assertEqual([3.0], self.evaluate(assign_part_var[1]))
self.assertEqual([3.0], self.evaluate(v3))
class VariableContainerTest(test.TestCase):
def testContainer(self):
with ops.Graph().as_default():
v0 = variables.Variable([0])
with ops.container("l1"):
v1 = variables.Variable([1])
with ops.container("l2"):
v2 = variables.Variable([2])
special_v = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="VariableInL3",
container="l3",
shared_name="")
v3 = variables.Variable([3])
v4 = variables.Variable([4])
self.assertEqual(compat.as_bytes(""), v0.op.get_attr("container"))
self.assertEqual(compat.as_bytes("l1"), v1.op.get_attr("container"))
self.assertEqual(compat.as_bytes("l2"), v2.op.get_attr("container"))
self.assertEqual(compat.as_bytes("l3"), special_v.op.get_attr("container"))
self.assertEqual(compat.as_bytes("l1"), v3.op.get_attr("container"))
self.assertEqual(compat.as_bytes(""), v4.op.get_attr("container"))
class AggregationModesTest(test.TestCase):
def testV1V2Equal(self):
v1 = variables.VariableAggregation
v2 = variables.VariableAggregationV2
self.assertEqual(v1.NONE, v2.NONE)
self.assertEqual(v1.SUM, v2.SUM)
self.assertEqual(v1.MEAN, v2.MEAN)
self.assertEqual(v1.ONLY_FIRST_REPLICA, v2.ONLY_FIRST_REPLICA)
self.assertEqual(v1.ONLY_FIRST_TOWER, v2.ONLY_FIRST_REPLICA)
self.assertEqual(v2.NONE, v1.NONE)
self.assertEqual(v2.SUM, v1.SUM)
self.assertEqual(v2.MEAN, v1.MEAN)
self.assertEqual(v2.ONLY_FIRST_REPLICA, v1.ONLY_FIRST_REPLICA)
self.assertEqual(v2.ONLY_FIRST_REPLICA, v1.ONLY_FIRST_TOWER)
self.assertEqual(hash(v1.NONE), hash(v2.NONE))
self.assertEqual(hash(v1.SUM), hash(v2.SUM))
self.assertEqual(hash(v1.MEAN), hash(v2.MEAN))
self.assertEqual(hash(v1.ONLY_FIRST_REPLICA), hash(v2.ONLY_FIRST_REPLICA))
self.assertEqual(hash(v1.ONLY_FIRST_TOWER), hash(v2.ONLY_FIRST_REPLICA))
if __name__ == "__main__":
test.main()
| frreiss/tensorflow-fred | tensorflow/python/kernel_tests/variables_test.py | Python | apache-2.0 | 35,743 |
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import codecs
import functools
import os.path
import re
import sys
import weakref
import ldap.filter
import ldappool
from oslo_log import log
import six
from keystone import exception
from keystone.i18n import _
from keystone.i18n import _LW
LOG = log.getLogger(__name__)
LDAP_VALUES = {'TRUE': True, 'FALSE': False}
CONTROL_TREEDELETE = '1.2.840.113556.1.4.805'
LDAP_SCOPES = {'one': ldap.SCOPE_ONELEVEL,
'sub': ldap.SCOPE_SUBTREE}
LDAP_DEREF = {'always': ldap.DEREF_ALWAYS,
'default': None,
'finding': ldap.DEREF_FINDING,
'never': ldap.DEREF_NEVER,
'searching': ldap.DEREF_SEARCHING}
LDAP_TLS_CERTS = {'never': ldap.OPT_X_TLS_NEVER,
'demand': ldap.OPT_X_TLS_DEMAND,
'allow': ldap.OPT_X_TLS_ALLOW}
# RFC 4511 (The LDAP Protocol) defines a list containing only the OID '1.1' to
# indicate that no attributes should be returned besides the DN.
DN_ONLY = ['1.1']
_utf8_encoder = codecs.getencoder('utf-8')
def utf8_encode(value):
"""Encode a basestring to UTF-8.
If the string is unicode encode it to UTF-8, if the string is
str then assume it's already encoded. Otherwise raise a TypeError.
:param value: A basestring
:returns: UTF-8 encoded version of value
:raises: TypeError if value is not basestring
"""
if isinstance(value, six.text_type):
return _utf8_encoder(value)[0]
elif isinstance(value, six.binary_type):
return value
else:
raise TypeError("value must be basestring, "
"not %s" % value.__class__.__name__)
_utf8_decoder = codecs.getdecoder('utf-8')
def utf8_decode(value):
"""Decode a from UTF-8 into unicode.
If the value is a binary string assume it's UTF-8 encoded and decode
it into a unicode string. Otherwise convert the value from its
type into a unicode string.
:param value: value to be returned as unicode
:returns: value as unicode
:raises: UnicodeDecodeError for invalid UTF-8 encoding
"""
if isinstance(value, six.binary_type):
return _utf8_decoder(value)[0]
return six.text_type(value)
def py2ldap(val):
"""Type convert a Python value to a type accepted by LDAP (unicode).
The LDAP API only accepts strings for values therefore convert
the value's type to a unicode string. A subsequent type conversion
will encode the unicode as UTF-8 as required by the python-ldap API,
but for now we just want a string representation of the value.
:param val: The value to convert to a LDAP string representation
:returns: unicode string representation of value.
"""
if isinstance(val, bool):
return u'TRUE' if val else u'FALSE'
else:
return six.text_type(val)
def enabled2py(val):
"""Similar to ldap2py, only useful for the enabled attribute."""
try:
return LDAP_VALUES[val]
except KeyError:
pass
try:
return int(val)
except ValueError:
pass
return utf8_decode(val)
def ldap2py(val):
"""Convert an LDAP formatted value to Python type used by OpenStack.
Virtually all LDAP values are stored as UTF-8 encoded strings.
OpenStack prefers values which are unicode friendly.
:param val: LDAP formatted value
:returns: val converted to preferred Python type
"""
return utf8_decode(val)
def convert_ldap_result(ldap_result):
"""Convert LDAP search result to Python types used by OpenStack.
Each result tuple is of the form (dn, attrs), where dn is a string
containing the DN (distinguished name) of the entry, and attrs is
a dictionary containing the attributes associated with the
entry. The keys of attrs are strings, and the associated values
are lists of strings.
OpenStack wants to use Python types of its choosing. Strings will
be unicode, truth values boolean, whole numbers int's, etc. DN's will
also be decoded from UTF-8 to unicode.
:param ldap_result: LDAP search result
:returns: list of 2-tuples containing (dn, attrs) where dn is unicode
and attrs is a dict whose values are type converted to
OpenStack preferred types.
"""
py_result = []
at_least_one_referral = False
for dn, attrs in ldap_result:
ldap_attrs = {}
if dn is None:
# this is a Referral object, rather than an Entry object
at_least_one_referral = True
continue
for kind, values in six.iteritems(attrs):
try:
val2py = enabled2py if kind == 'enabled' else ldap2py
ldap_attrs[kind] = [val2py(x) for x in values]
except UnicodeDecodeError:
LOG.debug('Unable to decode value for attribute %s', kind)
py_result.append((utf8_decode(dn), ldap_attrs))
if at_least_one_referral:
LOG.debug(('Referrals were returned and ignored. Enable referral '
'chasing in keystone.conf via [ldap] chase_referrals'))
return py_result
def safe_iter(attrs):
if attrs is None:
return
elif isinstance(attrs, list):
for e in attrs:
yield e
else:
yield attrs
def parse_deref(opt):
try:
return LDAP_DEREF[opt]
except KeyError:
raise ValueError(_('Invalid LDAP deref option: %(option)s. '
'Choose one of: %(options)s') %
{'option': opt,
'options': ', '.join(LDAP_DEREF.keys()), })
def parse_tls_cert(opt):
try:
return LDAP_TLS_CERTS[opt]
except KeyError:
raise ValueError(_(
'Invalid LDAP TLS certs option: %(option)s. '
'Choose one of: %(options)s') % {
'option': opt,
'options': ', '.join(LDAP_TLS_CERTS.keys())})
def ldap_scope(scope):
try:
return LDAP_SCOPES[scope]
except KeyError:
raise ValueError(
_('Invalid LDAP scope: %(scope)s. Choose one of: %(options)s') % {
'scope': scope,
'options': ', '.join(LDAP_SCOPES.keys())})
def prep_case_insensitive(value):
"""Prepare a string for case-insensitive comparison.
This is defined in RFC4518. For simplicity, all this function does is
lowercase all the characters, strip leading and trailing whitespace,
and compress sequences of spaces to a single space.
"""
value = re.sub(r'\s+', ' ', value.strip().lower())
return value
def is_ava_value_equal(attribute_type, val1, val2):
"""Returns True if and only if the AVAs are equal.
When comparing AVAs, the equality matching rule for the attribute type
should be taken into consideration. For simplicity, this implementation
does a case-insensitive comparison.
Note that this function uses prep_case_insenstive so the limitations of
that function apply here.
"""
return prep_case_insensitive(val1) == prep_case_insensitive(val2)
def is_rdn_equal(rdn1, rdn2):
"""Returns True if and only if the RDNs are equal.
* RDNs must have the same number of AVAs.
* Each AVA of the RDNs must be the equal for the same attribute type. The
order isn't significant. Note that an attribute type will only be in one
AVA in an RDN, otherwise the DN wouldn't be valid.
* Attribute types aren't case sensitive. Note that attribute type
comparison is more complicated than implemented. This function only
compares case-insentive. The code should handle multiple names for an
attribute type (e.g., cn, commonName, and 2.5.4.3 are the same).
Note that this function uses is_ava_value_equal to compare AVAs so the
limitations of that function apply here.
"""
if len(rdn1) != len(rdn2):
return False
for attr_type_1, val1, dummy in rdn1:
found = False
for attr_type_2, val2, dummy in rdn2:
if attr_type_1.lower() != attr_type_2.lower():
continue
found = True
if not is_ava_value_equal(attr_type_1, val1, val2):
return False
break
if not found:
return False
return True
def is_dn_equal(dn1, dn2):
"""Returns True if and only if the DNs are equal.
Two DNs are equal if they've got the same number of RDNs and if the RDNs
are the same at each position. See RFC4517.
Note that this function uses is_rdn_equal to compare RDNs so the
limitations of that function apply here.
:param dn1: Either a string DN or a DN parsed by ldap.dn.str2dn.
:param dn2: Either a string DN or a DN parsed by ldap.dn.str2dn.
"""
if not isinstance(dn1, list):
dn1 = ldap.dn.str2dn(utf8_encode(dn1))
if not isinstance(dn2, list):
dn2 = ldap.dn.str2dn(utf8_encode(dn2))
if len(dn1) != len(dn2):
return False
for rdn1, rdn2 in zip(dn1, dn2):
if not is_rdn_equal(rdn1, rdn2):
return False
return True
def dn_startswith(descendant_dn, dn):
"""Returns True if and only if the descendant_dn is under the dn.
:param descendant_dn: Either a string DN or a DN parsed by ldap.dn.str2dn.
:param dn: Either a string DN or a DN parsed by ldap.dn.str2dn.
"""
if not isinstance(descendant_dn, list):
descendant_dn = ldap.dn.str2dn(utf8_encode(descendant_dn))
if not isinstance(dn, list):
dn = ldap.dn.str2dn(utf8_encode(dn))
if len(descendant_dn) <= len(dn):
return False
# Use the last len(dn) RDNs.
return is_dn_equal(descendant_dn[-len(dn):], dn)
@six.add_metaclass(abc.ABCMeta)
class LDAPHandler(object):
'''Abstract class which defines methods for a LDAP API provider.
Native Keystone values cannot be passed directly into and from the
python-ldap API. Type conversion must occur at the LDAP API
boudary, examples of type conversions are:
* booleans map to the strings 'TRUE' and 'FALSE'
* integer values map to their string representation.
* unicode strings are encoded in UTF-8
In addition to handling type conversions at the API boundary we
have the requirement to support more than one LDAP API
provider. Currently we have:
* python-ldap, this is the standard LDAP API for Python, it
requires access to a live LDAP server.
* Fake LDAP which emulates python-ldap. This is used for
testing without requiring a live LDAP server.
To support these requirements we need a layer that performs type
conversions and then calls another LDAP API which is configurable
(e.g. either python-ldap or the fake emulation).
We have an additional constraint at the time of this writing due to
limitations in the logging module. The logging module is not
capable of accepting UTF-8 encoded strings, it will throw an
encoding exception. Therefore all logging MUST be performed prior
to UTF-8 conversion. This means no logging can be performed in the
ldap APIs that implement the python-ldap API because those APIs
are defined to accept only UTF-8 strings. Thus the layer which
performs type conversions must also do the logging. We do the type
conversions in two steps, once to convert all Python types to
unicode strings, then log, then convert the unicode strings to
UTF-8.
There are a variety of ways one could accomplish this, we elect to
use a chaining technique whereby instances of this class simply
call the next member in the chain via the "conn" attribute. The
chain is constructed by passing in an existing instance of this
class as the conn attribute when the class is instantiated.
Here is a brief explanation of why other possible approaches were
not used:
subclassing
To perform the wrapping operations in the correct order
the type convesion class would have to subclass each of
the API providers. This is awkward, doubles the number of
classes, and does not scale well. It requires the type
conversion class to be aware of all possible API
providers.
decorators
Decorators provide an elegant solution to wrap methods and
would be an ideal way to perform type conversions before
calling the wrapped function and then converting the
values returned from the wrapped function. However
decorators need to be aware of the method signature, it
has to know what input parameters need conversion and how
to convert the result. For an API like python-ldap which
has a large number of different method signatures it would
require a large number of specialized
decorators. Experience has shown it's very easy to apply
the wrong decorator due to the inherent complexity and
tendency to cut-n-paste code. Another option is to
parameterize the decorator to make it "smart". Experience
has shown such decorators become insanely complicated and
difficult to understand and debug. Also decorators tend to
hide what's really going on when a method is called, the
operations being performed are not visible when looking at
the implemation of a decorated method, this too experience
has shown leads to mistakes.
Chaining simplifies both wrapping to perform type conversion as
well as the substitution of alternative API providers. One simply
creates a new instance of the API interface and insert it at the
front of the chain. Type conversions are explicit and obvious.
If a new method needs to be added to the API interface one adds it
to the abstract class definition. Should one miss adding the new
method to any derivations of the abstract class the code will fail
to load and run making it impossible to forget updating all the
derived classes.
'''
@abc.abstractmethod
def __init__(self, conn=None):
self.conn = conn
@abc.abstractmethod
def connect(self, url, page_size=0, alias_dereferencing=None,
use_tls=False, tls_cacertfile=None, tls_cacertdir=None,
tls_req_cert='demand', chase_referrals=None, debug_level=None,
use_pool=None, pool_size=None, pool_retry_max=None,
pool_retry_delay=None, pool_conn_timeout=None,
pool_conn_lifetime=None):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def set_option(self, option, invalue):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def get_option(self, option):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def simple_bind_s(self, who='', cred='',
serverctrls=None, clientctrls=None):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def unbind_s(self):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def add_s(self, dn, modlist):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def search_s(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def search_ext(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0,
serverctrls=None, clientctrls=None,
timeout=-1, sizelimit=0):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def result3(self, msgid=ldap.RES_ANY, all=1, timeout=None,
resp_ctrl_classes=None):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def modify_s(self, dn, modlist):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def delete_s(self, dn):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def delete_ext_s(self, dn, serverctrls=None, clientctrls=None):
raise exception.NotImplemented() # pragma: no cover
class PythonLDAPHandler(LDAPHandler):
'''Implementation of the LDAPHandler interface which calls the
python-ldap API.
Note, the python-ldap API requires all string values to be UTF-8
encoded. The KeystoneLDAPHandler enforces this prior to invoking
the methods in this class.
'''
def __init__(self, conn=None):
super(PythonLDAPHandler, self).__init__(conn=conn)
def connect(self, url, page_size=0, alias_dereferencing=None,
use_tls=False, tls_cacertfile=None, tls_cacertdir=None,
tls_req_cert='demand', chase_referrals=None, debug_level=None,
use_pool=None, pool_size=None, pool_retry_max=None,
pool_retry_delay=None, pool_conn_timeout=None,
pool_conn_lifetime=None):
_common_ldap_initialization(url=url,
use_tls=use_tls,
tls_cacertfile=tls_cacertfile,
tls_cacertdir=tls_cacertdir,
tls_req_cert=tls_req_cert,
debug_level=debug_level)
self.conn = ldap.initialize(url)
self.conn.protocol_version = ldap.VERSION3
if alias_dereferencing is not None:
self.conn.set_option(ldap.OPT_DEREF, alias_dereferencing)
self.page_size = page_size
if use_tls:
self.conn.start_tls_s()
if chase_referrals is not None:
self.conn.set_option(ldap.OPT_REFERRALS, int(chase_referrals))
def set_option(self, option, invalue):
return self.conn.set_option(option, invalue)
def get_option(self, option):
return self.conn.get_option(option)
def simple_bind_s(self, who='', cred='',
serverctrls=None, clientctrls=None):
return self.conn.simple_bind_s(who, cred, serverctrls, clientctrls)
def unbind_s(self):
return self.conn.unbind_s()
def add_s(self, dn, modlist):
return self.conn.add_s(dn, modlist)
def search_s(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0):
return self.conn.search_s(base, scope, filterstr,
attrlist, attrsonly)
def search_ext(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0,
serverctrls=None, clientctrls=None,
timeout=-1, sizelimit=0):
return self.conn.search_ext(base, scope,
filterstr, attrlist, attrsonly,
serverctrls, clientctrls,
timeout, sizelimit)
def result3(self, msgid=ldap.RES_ANY, all=1, timeout=None,
resp_ctrl_classes=None):
# The resp_ctrl_classes parameter is a recent addition to the
# API. It defaults to None. We do not anticipate using it.
# To run with older versions of python-ldap we do not pass it.
return self.conn.result3(msgid, all, timeout)
def modify_s(self, dn, modlist):
return self.conn.modify_s(dn, modlist)
def delete_s(self, dn):
return self.conn.delete_s(dn)
def delete_ext_s(self, dn, serverctrls=None, clientctrls=None):
return self.conn.delete_ext_s(dn, serverctrls, clientctrls)
def _common_ldap_initialization(url, use_tls=False, tls_cacertfile=None,
tls_cacertdir=None, tls_req_cert=None,
debug_level=None):
'''Method for common ldap initialization between PythonLDAPHandler and
PooledLDAPHandler.
'''
LOG.debug("LDAP init: url=%s", url)
LOG.debug('LDAP init: use_tls=%s tls_cacertfile=%s tls_cacertdir=%s '
'tls_req_cert=%s tls_avail=%s',
use_tls, tls_cacertfile, tls_cacertdir,
tls_req_cert, ldap.TLS_AVAIL)
if debug_level is not None:
ldap.set_option(ldap.OPT_DEBUG_LEVEL, debug_level)
using_ldaps = url.lower().startswith("ldaps")
if use_tls and using_ldaps:
raise AssertionError(_('Invalid TLS / LDAPS combination'))
# The certificate trust options apply for both LDAPS and TLS.
if use_tls or using_ldaps:
if not ldap.TLS_AVAIL:
raise ValueError(_('Invalid LDAP TLS_AVAIL option: %s. TLS '
'not available') % ldap.TLS_AVAIL)
if tls_cacertfile:
# NOTE(topol)
# python ldap TLS does not verify CACERTFILE or CACERTDIR
# so we add some extra simple sanity check verification
# Also, setting these values globally (i.e. on the ldap object)
# works but these values are ignored when setting them on the
# connection
if not os.path.isfile(tls_cacertfile):
raise IOError(_("tls_cacertfile %s not found "
"or is not a file") %
tls_cacertfile)
ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, tls_cacertfile)
elif tls_cacertdir:
# NOTE(topol)
# python ldap TLS does not verify CACERTFILE or CACERTDIR
# so we add some extra simple sanity check verification
# Also, setting these values globally (i.e. on the ldap object)
# works but these values are ignored when setting them on the
# connection
if not os.path.isdir(tls_cacertdir):
raise IOError(_("tls_cacertdir %s not found "
"or is not a directory") %
tls_cacertdir)
ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, tls_cacertdir)
if tls_req_cert in LDAP_TLS_CERTS.values():
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, tls_req_cert)
else:
LOG.debug("LDAP TLS: invalid TLS_REQUIRE_CERT Option=%s",
tls_req_cert)
class MsgId(list):
'''Wrapper class to hold connection and msgid.'''
pass
def use_conn_pool(func):
'''Use this only for connection pool specific ldap API.
This adds connection object to decorated API as next argument after self.
'''
def wrapper(self, *args, **kwargs):
# assert isinstance(self, PooledLDAPHandler)
with self._get_pool_connection() as conn:
self._apply_options(conn)
return func(self, conn, *args, **kwargs)
return wrapper
class PooledLDAPHandler(LDAPHandler):
'''Implementation of the LDAPHandler interface which uses pooled
connection manager.
Pool specific configuration is defined in [ldap] section.
All other LDAP configuration is still used from [ldap] section
Keystone LDAP authentication logic authenticates an end user using its DN
and password via LDAP bind to establish supplied password is correct.
This can fill up the pool quickly (as pool re-uses existing connection
based on its bind data) and would not leave space in pool for connection
re-use for other LDAP operations.
Now a separate pool can be established for those requests when related flag
'use_auth_pool' is enabled. That pool can have its own size and
connection lifetime. Other pool attributes are shared between those pools.
If 'use_pool' is disabled, then 'use_auth_pool' does not matter.
If 'use_auth_pool' is not enabled, then connection pooling is not used for
those LDAP operations.
Note, the python-ldap API requires all string values to be UTF-8
encoded. The KeystoneLDAPHandler enforces this prior to invoking
the methods in this class.
'''
# Added here to allow override for testing
Connector = ldappool.StateConnector
auth_pool_prefix = 'auth_pool_'
connection_pools = {} # static connector pool dict
def __init__(self, conn=None, use_auth_pool=False):
super(PooledLDAPHandler, self).__init__(conn=conn)
self.who = ''
self.cred = ''
self.conn_options = {} # connection specific options
self.page_size = None
self.use_auth_pool = use_auth_pool
self.conn_pool = None
def connect(self, url, page_size=0, alias_dereferencing=None,
use_tls=False, tls_cacertfile=None, tls_cacertdir=None,
tls_req_cert='demand', chase_referrals=None, debug_level=None,
use_pool=None, pool_size=None, pool_retry_max=None,
pool_retry_delay=None, pool_conn_timeout=None,
pool_conn_lifetime=None):
_common_ldap_initialization(url=url,
use_tls=use_tls,
tls_cacertfile=tls_cacertfile,
tls_cacertdir=tls_cacertdir,
tls_req_cert=tls_req_cert,
debug_level=debug_level)
self.page_size = page_size
# Following two options are not added in common initialization as they
# need to follow a sequence in PythonLDAPHandler code.
if alias_dereferencing is not None:
self.set_option(ldap.OPT_DEREF, alias_dereferencing)
if chase_referrals is not None:
self.set_option(ldap.OPT_REFERRALS, int(chase_referrals))
if self.use_auth_pool: # separate pool when use_auth_pool enabled
pool_url = self.auth_pool_prefix + url
else:
pool_url = url
try:
self.conn_pool = self.connection_pools[pool_url]
except KeyError:
self.conn_pool = ldappool.ConnectionManager(
url,
size=pool_size,
retry_max=pool_retry_max,
retry_delay=pool_retry_delay,
timeout=pool_conn_timeout,
connector_cls=self.Connector,
use_tls=use_tls,
max_lifetime=pool_conn_lifetime)
self.connection_pools[pool_url] = self.conn_pool
def set_option(self, option, invalue):
self.conn_options[option] = invalue
def get_option(self, option):
value = self.conn_options.get(option)
# if option was not specified explicitly, then use connection default
# value for that option if there.
if value is None:
with self._get_pool_connection() as conn:
value = conn.get_option(option)
return value
def _apply_options(self, conn):
# if connection has a lifetime, then it already has options specified
if conn.get_lifetime() > 30:
return
for option, invalue in six.iteritems(self.conn_options):
conn.set_option(option, invalue)
def _get_pool_connection(self):
return self.conn_pool.connection(self.who, self.cred)
def simple_bind_s(self, who='', cred='',
serverctrls=None, clientctrls=None):
'''Not using use_conn_pool decorator here as this API takes cred as
input.
'''
self.who = who
self.cred = cred
with self._get_pool_connection() as conn:
self._apply_options(conn)
def unbind_s(self):
# After connection generator is done `with` statement execution block
# connection is always released via finally block in ldappool.
# So this unbind is a no op.
pass
@use_conn_pool
def add_s(self, conn, dn, modlist):
return conn.add_s(dn, modlist)
@use_conn_pool
def search_s(self, conn, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0):
return conn.search_s(base, scope, filterstr, attrlist,
attrsonly)
def search_ext(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0,
serverctrls=None, clientctrls=None,
timeout=-1, sizelimit=0):
'''This API is asynchoronus API which returns MsgId instance to be used
in result3 call.
To work with result3 API in predicatable manner, same LDAP connection
is needed which provided msgid. So wrapping used connection and msgid
in MsgId class. The connection associated with search_ext is released
once last hard reference to MsgId object is freed. This will happen
when the method is done with returned MsgId usage.
'''
conn_ctxt = self._get_pool_connection()
conn = conn_ctxt.__enter__()
try:
msgid = conn.search_ext(base, scope,
filterstr, attrlist, attrsonly,
serverctrls, clientctrls,
timeout, sizelimit)
except Exception:
conn_ctxt.__exit__(*sys.exc_info())
raise
res = MsgId((conn, msgid))
weakref.ref(res, functools.partial(conn_ctxt.__exit__,
None, None, None))
return res
def result3(self, msgid, all=1, timeout=None,
resp_ctrl_classes=None):
'''This method is used to wait for and return the result of an
operation previously initiated by one of the LDAP asynchronous
operation routines (eg search_ext()) It returned an invocation
identifier (a message id) upon successful initiation of their
operation.
Input msgid is expected to be instance of class MsgId which has LDAP
session/connection used to execute search_ext and message idenfier.
The connection associated with search_ext is released once last hard
reference to MsgId object is freed. This will happen when function
which requested msgId and used it in result3 exits.
'''
conn, msg_id = msgid
return conn.result3(msg_id, all, timeout)
@use_conn_pool
def modify_s(self, conn, dn, modlist):
return conn.modify_s(dn, modlist)
@use_conn_pool
def delete_s(self, conn, dn):
return conn.delete_s(dn)
@use_conn_pool
def delete_ext_s(self, conn, dn, serverctrls=None, clientctrls=None):
return conn.delete_ext_s(dn, serverctrls, clientctrls)
class KeystoneLDAPHandler(LDAPHandler):
'''Convert data types and perform logging.
This LDAP inteface wraps the python-ldap based interfaces. The
python-ldap interfaces require string values encoded in UTF-8. The
OpenStack logging framework at the time of this writing is not
capable of accepting strings encoded in UTF-8, the log functions
will throw decoding errors if a non-ascii character appears in a
string.
Prior to the call Python data types are converted to a string
representation as required by the LDAP APIs.
Then logging is performed so we can track what is being
sent/received from LDAP. Also the logging filters security
sensitive items (i.e. passwords).
Then the string values are encoded into UTF-8.
Then the LDAP API entry point is invoked.
Data returned from the LDAP call is converted back from UTF-8
encoded strings into the Python data type used internally in
OpenStack.
'''
def __init__(self, conn=None):
super(KeystoneLDAPHandler, self).__init__(conn=conn)
self.page_size = 0
def __enter__(self):
return self
def _disable_paging(self):
# Disable the pagination from now on
self.page_size = 0
def connect(self, url, page_size=0, alias_dereferencing=None,
use_tls=False, tls_cacertfile=None, tls_cacertdir=None,
tls_req_cert='demand', chase_referrals=None, debug_level=None,
use_pool=None, pool_size=None,
pool_retry_max=None, pool_retry_delay=None,
pool_conn_timeout=None, pool_conn_lifetime=None):
self.page_size = page_size
return self.conn.connect(url, page_size, alias_dereferencing,
use_tls, tls_cacertfile, tls_cacertdir,
tls_req_cert, chase_referrals,
debug_level=debug_level,
use_pool=use_pool,
pool_size=pool_size,
pool_retry_max=pool_retry_max,
pool_retry_delay=pool_retry_delay,
pool_conn_timeout=pool_conn_timeout,
pool_conn_lifetime=pool_conn_lifetime)
def set_option(self, option, invalue):
return self.conn.set_option(option, invalue)
def get_option(self, option):
return self.conn.get_option(option)
def simple_bind_s(self, who='', cred='',
serverctrls=None, clientctrls=None):
LOG.debug("LDAP bind: who=%s", who)
who_utf8 = utf8_encode(who)
cred_utf8 = utf8_encode(cred)
return self.conn.simple_bind_s(who_utf8, cred_utf8,
serverctrls=serverctrls,
clientctrls=clientctrls)
def unbind_s(self):
LOG.debug("LDAP unbind")
return self.conn.unbind_s()
def add_s(self, dn, modlist):
ldap_attrs = [(kind, [py2ldap(x) for x in safe_iter(values)])
for kind, values in modlist]
logging_attrs = [(kind, values
if kind != 'userPassword'
else ['****'])
for kind, values in ldap_attrs]
LOG.debug('LDAP add: dn=%s attrs=%s',
dn, logging_attrs)
dn_utf8 = utf8_encode(dn)
ldap_attrs_utf8 = [(kind, [utf8_encode(x) for x in safe_iter(values)])
for kind, values in ldap_attrs]
return self.conn.add_s(dn_utf8, ldap_attrs_utf8)
def search_s(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0):
# NOTE(morganfainberg): Remove "None" singletons from this list, which
# allows us to set mapped attributes to "None" as defaults in config.
# Without this filtering, the ldap query would raise a TypeError since
# attrlist is expected to be an iterable of strings.
if attrlist is not None:
attrlist = [attr for attr in attrlist if attr is not None]
LOG.debug('LDAP search: base=%s scope=%s filterstr=%s '
'attrs=%s attrsonly=%s',
base, scope, filterstr, attrlist, attrsonly)
if self.page_size:
ldap_result = self._paged_search_s(base, scope,
filterstr, attrlist)
else:
base_utf8 = utf8_encode(base)
filterstr_utf8 = utf8_encode(filterstr)
if attrlist is None:
attrlist_utf8 = None
else:
attrlist_utf8 = map(utf8_encode, attrlist)
ldap_result = self.conn.search_s(base_utf8, scope,
filterstr_utf8,
attrlist_utf8, attrsonly)
py_result = convert_ldap_result(ldap_result)
return py_result
def search_ext(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0,
serverctrls=None, clientctrls=None,
timeout=-1, sizelimit=0):
if attrlist is not None:
attrlist = [attr for attr in attrlist if attr is not None]
LOG.debug('LDAP search_ext: base=%s scope=%s filterstr=%s '
'attrs=%s attrsonly=%s'
'serverctrls=%s clientctrls=%s timeout=%s sizelimit=%s',
base, scope, filterstr, attrlist, attrsonly,
serverctrls, clientctrls, timeout, sizelimit)
return self.conn.search_ext(base, scope,
filterstr, attrlist, attrsonly,
serverctrls, clientctrls,
timeout, sizelimit)
def _paged_search_s(self, base, scope, filterstr, attrlist=None):
res = []
use_old_paging_api = False
# The API for the simple paged results control changed between
# python-ldap 2.3 and 2.4. We need to detect the capabilities
# of the python-ldap version we are using.
if hasattr(ldap, 'LDAP_CONTROL_PAGE_OID'):
use_old_paging_api = True
lc = ldap.controls.SimplePagedResultsControl(
controlType=ldap.LDAP_CONTROL_PAGE_OID,
criticality=True,
controlValue=(self.page_size, ''))
page_ctrl_oid = ldap.LDAP_CONTROL_PAGE_OID
else:
lc = ldap.controls.libldap.SimplePagedResultsControl(
criticality=True,
size=self.page_size,
cookie='')
page_ctrl_oid = ldap.controls.SimplePagedResultsControl.controlType
base_utf8 = utf8_encode(base)
filterstr_utf8 = utf8_encode(filterstr)
if attrlist is None:
attrlist_utf8 = None
else:
attrlist = [attr for attr in attrlist if attr is not None]
attrlist_utf8 = map(utf8_encode, attrlist)
msgid = self.conn.search_ext(base_utf8,
scope,
filterstr_utf8,
attrlist_utf8,
serverctrls=[lc])
# Endless loop request pages on ldap server until it has no data
while True:
# Request to the ldap server a page with 'page_size' entries
rtype, rdata, rmsgid, serverctrls = self.conn.result3(msgid)
# Receive the data
res.extend(rdata)
pctrls = [c for c in serverctrls
if c.controlType == page_ctrl_oid]
if pctrls:
# LDAP server supports pagination
if use_old_paging_api:
est, cookie = pctrls[0].controlValue
lc.controlValue = (self.page_size, cookie)
else:
cookie = lc.cookie = pctrls[0].cookie
if cookie:
# There is more data still on the server
# so we request another page
msgid = self.conn.search_ext(base_utf8,
scope,
filterstr_utf8,
attrlist_utf8,
serverctrls=[lc])
else:
# Exit condition no more data on server
break
else:
LOG.warning(_LW('LDAP Server does not support paging. '
'Disable paging in keystone.conf to '
'avoid this message.'))
self._disable_paging()
break
return res
def result3(self, msgid=ldap.RES_ANY, all=1, timeout=None,
resp_ctrl_classes=None):
ldap_result = self.conn.result3(msgid, all, timeout, resp_ctrl_classes)
LOG.debug('LDAP result3: msgid=%s all=%s timeout=%s '
'resp_ctrl_classes=%s ldap_result=%s',
msgid, all, timeout, resp_ctrl_classes, ldap_result)
py_result = convert_ldap_result(ldap_result)
return py_result
def modify_s(self, dn, modlist):
ldap_modlist = [
(op, kind, (None if values is None
else [py2ldap(x) for x in safe_iter(values)]))
for op, kind, values in modlist]
logging_modlist = [(op, kind, (values if kind != 'userPassword'
else ['****']))
for op, kind, values in ldap_modlist]
LOG.debug('LDAP modify: dn=%s modlist=%s',
dn, logging_modlist)
dn_utf8 = utf8_encode(dn)
ldap_modlist_utf8 = [
(op, kind, (None if values is None
else [utf8_encode(x) for x in safe_iter(values)]))
for op, kind, values in ldap_modlist]
return self.conn.modify_s(dn_utf8, ldap_modlist_utf8)
def delete_s(self, dn):
LOG.debug("LDAP delete: dn=%s", dn)
dn_utf8 = utf8_encode(dn)
return self.conn.delete_s(dn_utf8)
def delete_ext_s(self, dn, serverctrls=None, clientctrls=None):
LOG.debug('LDAP delete_ext: dn=%s serverctrls=%s clientctrls=%s',
dn, serverctrls, clientctrls)
dn_utf8 = utf8_encode(dn)
return self.conn.delete_ext_s(dn_utf8, serverctrls, clientctrls)
def __exit__(self, exc_type, exc_val, exc_tb):
self.unbind_s()
_HANDLERS = {}
def register_handler(prefix, handler):
_HANDLERS[prefix] = handler
def _get_connection(conn_url, use_pool=False, use_auth_pool=False):
for prefix, handler in six.iteritems(_HANDLERS):
if conn_url.startswith(prefix):
return handler()
if use_pool:
return PooledLDAPHandler(use_auth_pool=use_auth_pool)
else:
return PythonLDAPHandler()
def filter_entity(entity_ref):
"""Filter out private items in an entity dict.
:param entity_ref: the entity dictionary. The 'dn' field will be removed.
'dn' is used in LDAP, but should not be returned to the user. This
value may be modified.
:returns: entity_ref
"""
if entity_ref:
entity_ref.pop('dn', None)
return entity_ref
class BaseLdap(object):
DEFAULT_SUFFIX = "dc=example,dc=com"
DEFAULT_OU = None
DEFAULT_STRUCTURAL_CLASSES = None
DEFAULT_ID_ATTR = 'cn'
DEFAULT_OBJECTCLASS = None
DEFAULT_FILTER = None
DEFAULT_EXTRA_ATTR_MAPPING = []
DUMB_MEMBER_DN = 'cn=dumb,dc=nonexistent'
NotFound = None
notfound_arg = None
options_name = None
model = None
attribute_options_names = {}
immutable_attrs = []
attribute_ignore = []
tree_dn = None
def __init__(self, conf):
self.LDAP_URL = conf.ldap.url
self.LDAP_USER = conf.ldap.user
self.LDAP_PASSWORD = conf.ldap.password
self.LDAP_SCOPE = ldap_scope(conf.ldap.query_scope)
self.alias_dereferencing = parse_deref(conf.ldap.alias_dereferencing)
self.page_size = conf.ldap.page_size
self.use_tls = conf.ldap.use_tls
self.tls_cacertfile = conf.ldap.tls_cacertfile
self.tls_cacertdir = conf.ldap.tls_cacertdir
self.tls_req_cert = parse_tls_cert(conf.ldap.tls_req_cert)
self.attribute_mapping = {}
self.chase_referrals = conf.ldap.chase_referrals
self.debug_level = conf.ldap.debug_level
# LDAP Pool specific attribute
self.use_pool = conf.ldap.use_pool
self.pool_size = conf.ldap.pool_size
self.pool_retry_max = conf.ldap.pool_retry_max
self.pool_retry_delay = conf.ldap.pool_retry_delay
self.pool_conn_timeout = conf.ldap.pool_connection_timeout
self.pool_conn_lifetime = conf.ldap.pool_connection_lifetime
# End user authentication pool specific config attributes
self.use_auth_pool = self.use_pool and conf.ldap.use_auth_pool
self.auth_pool_size = conf.ldap.auth_pool_size
self.auth_pool_conn_lifetime = conf.ldap.auth_pool_connection_lifetime
if self.options_name is not None:
self.suffix = conf.ldap.suffix
if self.suffix is None:
self.suffix = self.DEFAULT_SUFFIX
dn = '%s_tree_dn' % self.options_name
self.tree_dn = (getattr(conf.ldap, dn)
or '%s,%s' % (self.DEFAULT_OU, self.suffix))
idatt = '%s_id_attribute' % self.options_name
self.id_attr = getattr(conf.ldap, idatt) or self.DEFAULT_ID_ATTR
objclass = '%s_objectclass' % self.options_name
self.object_class = (getattr(conf.ldap, objclass)
or self.DEFAULT_OBJECTCLASS)
for k, v in six.iteritems(self.attribute_options_names):
v = '%s_%s_attribute' % (self.options_name, v)
self.attribute_mapping[k] = getattr(conf.ldap, v)
attr_mapping_opt = ('%s_additional_attribute_mapping' %
self.options_name)
attr_mapping = (getattr(conf.ldap, attr_mapping_opt)
or self.DEFAULT_EXTRA_ATTR_MAPPING)
self.extra_attr_mapping = self._parse_extra_attrs(attr_mapping)
ldap_filter = '%s_filter' % self.options_name
self.ldap_filter = getattr(conf.ldap,
ldap_filter) or self.DEFAULT_FILTER
allow_create = '%s_allow_create' % self.options_name
self.allow_create = getattr(conf.ldap, allow_create)
allow_update = '%s_allow_update' % self.options_name
self.allow_update = getattr(conf.ldap, allow_update)
allow_delete = '%s_allow_delete' % self.options_name
self.allow_delete = getattr(conf.ldap, allow_delete)
member_attribute = '%s_member_attribute' % self.options_name
self.member_attribute = getattr(conf.ldap, member_attribute, None)
self.structural_classes = self.DEFAULT_STRUCTURAL_CLASSES
if self.notfound_arg is None:
self.notfound_arg = self.options_name + '_id'
attribute_ignore = '%s_attribute_ignore' % self.options_name
self.attribute_ignore = getattr(conf.ldap, attribute_ignore)
self.use_dumb_member = conf.ldap.use_dumb_member
self.dumb_member = (conf.ldap.dumb_member or
self.DUMB_MEMBER_DN)
self.subtree_delete_enabled = conf.ldap.allow_subtree_delete
def _not_found(self, object_id):
if self.NotFound is None:
return exception.NotFound(target=object_id)
else:
return self.NotFound(**{self.notfound_arg: object_id})
def _parse_extra_attrs(self, option_list):
mapping = {}
for item in option_list:
try:
ldap_attr, attr_map = item.split(':')
except Exception:
LOG.warn(_LW(
'Invalid additional attribute mapping: "%s". '
'Format must be <ldap_attribute>:<keystone_attribute>'),
item)
continue
mapping[ldap_attr] = attr_map
return mapping
def _is_dumb_member(self, member_dn):
"""Checks that member is a dumb member.
:param member_dn: DN of member to be checked.
"""
return (self.use_dumb_member
and is_dn_equal(member_dn, self.dumb_member))
def get_connection(self, user=None, password=None, end_user_auth=False):
use_pool = self.use_pool
pool_size = self.pool_size
pool_conn_lifetime = self.pool_conn_lifetime
if end_user_auth:
if not self.use_auth_pool:
use_pool = False
else:
pool_size = self.auth_pool_size
pool_conn_lifetime = self.auth_pool_conn_lifetime
conn = _get_connection(self.LDAP_URL, use_pool,
use_auth_pool=end_user_auth)
conn = KeystoneLDAPHandler(conn=conn)
conn.connect(self.LDAP_URL,
page_size=self.page_size,
alias_dereferencing=self.alias_dereferencing,
use_tls=self.use_tls,
tls_cacertfile=self.tls_cacertfile,
tls_cacertdir=self.tls_cacertdir,
tls_req_cert=self.tls_req_cert,
chase_referrals=self.chase_referrals,
debug_level=self.debug_level,
use_pool=use_pool,
pool_size=pool_size,
pool_retry_max=self.pool_retry_max,
pool_retry_delay=self.pool_retry_delay,
pool_conn_timeout=self.pool_conn_timeout,
pool_conn_lifetime=pool_conn_lifetime
)
if user is None:
user = self.LDAP_USER
if password is None:
password = self.LDAP_PASSWORD
# not all LDAP servers require authentication, so we don't bind
# if we don't have any user/pass
if user and password:
conn.simple_bind_s(user, password)
return conn
def _id_to_dn_string(self, object_id):
return u'%s=%s,%s' % (self.id_attr,
ldap.dn.escape_dn_chars(
six.text_type(object_id)),
self.tree_dn)
def _id_to_dn(self, object_id):
if self.LDAP_SCOPE == ldap.SCOPE_ONELEVEL:
return self._id_to_dn_string(object_id)
with self.get_connection() as conn:
search_result = conn.search_s(
self.tree_dn, self.LDAP_SCOPE,
u'(&(%(id_attr)s=%(id)s)(objectclass=%(objclass)s))' %
{'id_attr': self.id_attr,
'id': ldap.filter.escape_filter_chars(
six.text_type(object_id)),
'objclass': self.object_class},
attrlist=DN_ONLY)
if search_result:
dn, attrs = search_result[0]
return dn
else:
return self._id_to_dn_string(object_id)
@staticmethod
def _dn_to_id(dn):
return utf8_decode(ldap.dn.str2dn(utf8_encode(dn))[0][0][1])
def _ldap_res_to_model(self, res):
# LDAP attribute names may be returned in a different case than
# they are defined in the mapping, so we need to check for keys
# in a case-insensitive way. We use the case specified in the
# mapping for the model to ensure we have a predictable way of
# retrieving values later.
lower_res = {k.lower(): v for k, v in six.iteritems(res[1])}
id_attrs = lower_res.get(self.id_attr.lower())
if not id_attrs:
message = _('ID attribute %(id_attr)s not found in LDAP '
'object %(dn)s') % ({'id_attr': self.id_attr,
'dn': res[0]})
raise exception.NotFound(message=message)
if len(id_attrs) > 1:
# FIXME(gyee): if this is a multi-value attribute and it has
# multiple values, we can't use it as ID. Retain the dn_to_id
# logic here so it does not potentially break existing
# deployments. We need to fix our read-write LDAP logic so
# it does not get the ID from DN.
message = _LW('ID attribute %(id_attr)s for LDAP object %(dn)s '
'has multiple values and therefore cannot be used '
'as an ID. Will get the ID from DN instead') % (
{'id_attr': self.id_attr,
'dn': res[0]})
LOG.warn(message)
id_val = self._dn_to_id(res[0])
else:
id_val = id_attrs[0]
obj = self.model(id=id_val)
for k in obj.known_keys:
if k in self.attribute_ignore:
continue
try:
map_attr = self.attribute_mapping.get(k, k)
if map_attr is None:
# Ignore attributes that are mapped to None.
continue
v = lower_res[map_attr.lower()]
except KeyError:
pass
else:
try:
obj[k] = v[0]
except IndexError:
obj[k] = None
return obj
def check_allow_create(self):
if not self.allow_create:
action = _('LDAP %s create') % self.options_name
raise exception.ForbiddenAction(action=action)
def check_allow_update(self):
if not self.allow_update:
action = _('LDAP %s update') % self.options_name
raise exception.ForbiddenAction(action=action)
def check_allow_delete(self):
if not self.allow_delete:
action = _('LDAP %s delete') % self.options_name
raise exception.ForbiddenAction(action=action)
def affirm_unique(self, values):
if values.get('name') is not None:
try:
self.get_by_name(values['name'])
except exception.NotFound:
pass
else:
raise exception.Conflict(type=self.options_name,
details=_('Duplicate name, %s.') %
values['name'])
if values.get('id') is not None:
try:
self.get(values['id'])
except exception.NotFound:
pass
else:
raise exception.Conflict(type=self.options_name,
details=_('Duplicate ID, %s.') %
values['id'])
def create(self, values):
self.affirm_unique(values)
object_classes = self.structural_classes + [self.object_class]
attrs = [('objectClass', object_classes)]
for k, v in six.iteritems(values):
if k in self.attribute_ignore:
continue
if k == 'id':
# no need to check if v is None as 'id' will always have
# a value
attrs.append((self.id_attr, [v]))
elif v is not None:
attr_type = self.attribute_mapping.get(k, k)
if attr_type is not None:
attrs.append((attr_type, [v]))
extra_attrs = [attr for attr, name
in six.iteritems(self.extra_attr_mapping)
if name == k]
for attr in extra_attrs:
attrs.append((attr, [v]))
if 'groupOfNames' in object_classes and self.use_dumb_member:
attrs.append(('member', [self.dumb_member]))
with self.get_connection() as conn:
conn.add_s(self._id_to_dn(values['id']), attrs)
return values
def _ldap_get(self, object_id, ldap_filter=None):
query = (u'(&(%(id_attr)s=%(id)s)'
u'%(filter)s'
u'(objectClass=%(object_class)s))'
% {'id_attr': self.id_attr,
'id': ldap.filter.escape_filter_chars(
six.text_type(object_id)),
'filter': (ldap_filter or self.ldap_filter or ''),
'object_class': self.object_class})
with self.get_connection() as conn:
try:
attrs = list(set(([self.id_attr] +
self.attribute_mapping.values() +
self.extra_attr_mapping.keys())))
res = conn.search_s(self.tree_dn,
self.LDAP_SCOPE,
query,
attrs)
except ldap.NO_SUCH_OBJECT:
return None
try:
return res[0]
except IndexError:
return None
def _ldap_get_all(self, ldap_filter=None):
query = u'(&%s(objectClass=%s))' % (ldap_filter or
self.ldap_filter or
'', self.object_class)
with self.get_connection() as conn:
try:
attrs = list(set(([self.id_attr] +
self.attribute_mapping.values() +
self.extra_attr_mapping.keys())))
return conn.search_s(self.tree_dn,
self.LDAP_SCOPE,
query,
attrs)
except ldap.NO_SUCH_OBJECT:
return []
def _ldap_get_list(self, search_base, scope, query_params=None,
attrlist=None):
query = u'(objectClass=%s)' % self.object_class
if query_params:
def calc_filter(attrname, value):
val_esc = ldap.filter.escape_filter_chars(value)
return '(%s=%s)' % (attrname, val_esc)
query = (u'(&%s%s)' %
(query, ''.join([calc_filter(k, v) for k, v in
six.iteritems(query_params)])))
with self.get_connection() as conn:
return conn.search_s(search_base, scope, query, attrlist)
def get(self, object_id, ldap_filter=None):
res = self._ldap_get(object_id, ldap_filter)
if res is None:
raise self._not_found(object_id)
else:
return self._ldap_res_to_model(res)
def get_by_name(self, name, ldap_filter=None):
query = (u'(%s=%s)' % (self.attribute_mapping['name'],
ldap.filter.escape_filter_chars(
six.text_type(name))))
res = self.get_all(query)
try:
return res[0]
except IndexError:
raise self._not_found(name)
def get_all(self, ldap_filter=None):
return [self._ldap_res_to_model(x)
for x in self._ldap_get_all(ldap_filter)]
def update(self, object_id, values, old_obj=None):
if old_obj is None:
old_obj = self.get(object_id)
modlist = []
for k, v in six.iteritems(values):
if k == 'id':
# id can't be modified.
continue
if k in self.attribute_ignore:
# Handle 'enabled' specially since can't disable if ignored.
if k == 'enabled' and (not v):
action = _("Disabling an entity where the 'enable' "
"attribute is ignored by configuration.")
raise exception.ForbiddenAction(action=action)
continue
# attribute value has not changed
if k in old_obj and old_obj[k] == v:
continue
if k in self.immutable_attrs:
msg = (_("Cannot change %(option_name)s %(attr)s") %
{'option_name': self.options_name, 'attr': k})
raise exception.ValidationError(msg)
if v is None:
if old_obj.get(k) is not None:
modlist.append((ldap.MOD_DELETE,
self.attribute_mapping.get(k, k),
None))
continue
current_value = old_obj.get(k)
if current_value is None:
op = ldap.MOD_ADD
modlist.append((op, self.attribute_mapping.get(k, k), [v]))
elif current_value != v:
op = ldap.MOD_REPLACE
modlist.append((op, self.attribute_mapping.get(k, k), [v]))
if modlist:
with self.get_connection() as conn:
try:
conn.modify_s(self._id_to_dn(object_id), modlist)
except ldap.NO_SUCH_OBJECT:
raise self._not_found(object_id)
return self.get(object_id)
def delete(self, object_id):
with self.get_connection() as conn:
try:
conn.delete_s(self._id_to_dn(object_id))
except ldap.NO_SUCH_OBJECT:
raise self._not_found(object_id)
def deleteTree(self, object_id):
tree_delete_control = ldap.controls.LDAPControl(CONTROL_TREEDELETE,
0,
None)
with self.get_connection() as conn:
try:
conn.delete_ext_s(self._id_to_dn(object_id),
serverctrls=[tree_delete_control])
except ldap.NO_SUCH_OBJECT:
raise self._not_found(object_id)
except ldap.NOT_ALLOWED_ON_NONLEAF:
# Most LDAP servers do not support the tree_delete_control.
# In these servers, the usual idiom is to first perform a
# search to get the entries to delete, then delete them in
# in order of child to parent, since LDAP forbids the
# deletion of a parent entry before deleting the children
# of that parent. The simplest way to do that is to delete
# the entries in order of the length of the DN, from longest
# to shortest DN.
dn = self._id_to_dn(object_id)
scope = ldap.SCOPE_SUBTREE
# With some directory servers, an entry with objectclass
# ldapsubentry will not be returned unless it is explicitly
# requested, by specifying the objectclass in the search
# filter. We must specify this, with objectclass=*, in an
# LDAP filter OR clause, in order to return all entries
filt = '(|(objectclass=*)(objectclass=ldapsubentry))'
# We only need the DNs of the entries. Since no attributes
# will be returned, we do not have to specify attrsonly=1.
entries = conn.search_s(dn, scope, filt, attrlist=DN_ONLY)
if entries:
for dn in sorted((e[0] for e in entries),
key=len, reverse=True):
conn.delete_s(dn)
else:
LOG.debug('No entries in LDAP subtree %s', dn)
def add_member(self, member_dn, member_list_dn):
"""Add member to the member list.
:param member_dn: DN of member to be added.
:param member_list_dn: DN of group to which the
member will be added.
:raises: exception.Conflict: If the user was already a member.
self.NotFound: If the group entry didn't exist.
"""
with self.get_connection() as conn:
try:
mod = (ldap.MOD_ADD, self.member_attribute, member_dn)
conn.modify_s(member_list_dn, [mod])
except ldap.TYPE_OR_VALUE_EXISTS:
raise exception.Conflict(_('Member %(member)s '
'is already a member'
' of group %(group)s') % {
'member': member_dn,
'group': member_list_dn})
except ldap.NO_SUCH_OBJECT:
raise self._not_found(member_list_dn)
def remove_member(self, member_dn, member_list_dn):
"""Remove member from the member list.
:param member_dn: DN of member to be removed.
:param member_list_dn: DN of group from which the
member will be removed.
:raises: self.NotFound: If the group entry didn't exist.
ldap.NO_SUCH_ATTRIBUTE: If the user wasn't a member.
"""
with self.get_connection() as conn:
try:
mod = (ldap.MOD_DELETE, self.member_attribute, member_dn)
conn.modify_s(member_list_dn, [mod])
except ldap.NO_SUCH_OBJECT:
raise self._not_found(member_list_dn)
def _delete_tree_nodes(self, search_base, scope, query_params=None):
query = u'(objectClass=%s)' % self.object_class
if query_params:
query = (u'(&%s%s)' %
(query, ''.join(['(%s=%s)'
% (k, ldap.filter.escape_filter_chars(v))
for k, v in
six.iteritems(query_params)])))
not_deleted_nodes = []
with self.get_connection() as conn:
try:
nodes = conn.search_s(search_base, scope, query,
attrlist=DN_ONLY)
except ldap.NO_SUCH_OBJECT:
LOG.debug('Could not find entry with dn=%s', search_base)
raise self._not_found(self._dn_to_id(search_base))
else:
for node_dn, _t in nodes:
try:
conn.delete_s(node_dn)
except ldap.NO_SUCH_OBJECT:
not_deleted_nodes.append(node_dn)
if not_deleted_nodes:
LOG.warn(_LW("When deleting entries for %(search_base)s, could not"
" delete nonexistent entries %(entries)s%(dots)s"),
{'search_base': search_base,
'entries': not_deleted_nodes[:3],
'dots': '...' if len(not_deleted_nodes) > 3 else ''})
def filter_query(self, hints, query=None):
"""Applies filtering to a query.
:param hints: contains the list of filters, which may be None,
indicating that there are no filters to be applied.
If it's not None, then any filters satisfied here will be
removed so that the caller will know if any filters
remain to be applied.
:param query: LDAP query into which to include filters
:returns query: LDAP query, updated with any filters satisfied
"""
def build_filter(filter_, hints):
"""Build a filter for the query.
:param filter_: the dict that describes this filter
:param hints: contains the list of filters yet to be satisfied.
:returns query: LDAP query term to be added
"""
ldap_attr = self.attribute_mapping[filter_['name']]
val_esc = ldap.filter.escape_filter_chars(filter_['value'])
if filter_['case_sensitive']:
# NOTE(henry-nash): Although dependent on the schema being
# used, most LDAP attributes are configured with case
# insensitive matching rules, so we'll leave this to the
# controller to filter.
return
if filter_['name'] == 'enabled':
# NOTE(henry-nash): Due to the different options for storing
# the enabled attribute (e,g, emulated or not), for now we
# don't try and filter this at the driver level - we simply
# leave the filter to be handled by the controller. It seems
# unlikley that this will cause a signifcant performance
# issue.
return
# TODO(henry-nash): Currently there are no booleans (other than
# 'enabled' that is handled above) on which you can filter. If
# there were, we would need to add special handling here to
# convert the booleans values to 'TRUE' and 'FALSE'. To do that
# we would also need to know which filter keys were actually
# booleans (this is related to bug #1411478).
if filter_['comparator'] == 'equals':
query_term = (u'(%(attr)s=%(val)s)'
% {'attr': ldap_attr, 'val': val_esc})
elif filter_['comparator'] == 'contains':
query_term = (u'(%(attr)s=*%(val)s*)'
% {'attr': ldap_attr, 'val': val_esc})
elif filter_['comparator'] == 'startswith':
query_term = (u'(%(attr)s=%(val)s*)'
% {'attr': ldap_attr, 'val': val_esc})
elif filter_['comparator'] == 'endswith':
query_term = (u'(%(attr)s=*%(val)s)'
% {'attr': ldap_attr, 'val': val_esc})
else:
# It's a filter we don't understand, so let the caller
# work out if they need to do something with it.
return
return query_term
if hints is None:
return query
filter_list = []
satisfied_filters = []
for filter_ in hints.filters:
if filter_['name'] not in self.attribute_mapping:
continue
new_filter = build_filter(filter_, hints)
if new_filter is not None:
filter_list.append(new_filter)
satisfied_filters.append(filter_)
if filter_list:
query = u'(&%s%s)' % (query, ''.join(filter_list))
# Remove satisfied filters, then the caller will know remaining filters
for filter_ in satisfied_filters:
hints.filters.remove(filter_)
return query
class EnabledEmuMixIn(BaseLdap):
"""Emulates boolean 'enabled' attribute if turned on.
Creates groupOfNames holding all enabled objects of this class, all missing
objects are considered disabled.
Options:
* $name_enabled_emulation - boolean, on/off
* $name_enabled_emulation_dn - DN of that groupOfNames, default is
cn=enabled_${name}s,${tree_dn}
Where ${name}s is the plural of self.options_name ('users' or 'tenants'),
${tree_dn} is self.tree_dn.
"""
def __init__(self, conf):
super(EnabledEmuMixIn, self).__init__(conf)
enabled_emulation = '%s_enabled_emulation' % self.options_name
self.enabled_emulation = getattr(conf.ldap, enabled_emulation)
enabled_emulation_dn = '%s_enabled_emulation_dn' % self.options_name
self.enabled_emulation_dn = getattr(conf.ldap, enabled_emulation_dn)
if not self.enabled_emulation_dn:
naming_attr_name = 'cn'
naming_attr_value = 'enabled_%ss' % self.options_name
sub_vals = (naming_attr_name, naming_attr_value, self.tree_dn)
self.enabled_emulation_dn = '%s=%s,%s' % sub_vals
naming_attr = (naming_attr_name, [naming_attr_value])
else:
# Extract the attribute name and value from the configured DN.
naming_dn = ldap.dn.str2dn(utf8_encode(self.enabled_emulation_dn))
naming_rdn = naming_dn[0][0]
naming_attr = (utf8_decode(naming_rdn[0]),
utf8_decode(naming_rdn[1]))
self.enabled_emulation_naming_attr = naming_attr
def _get_enabled(self, object_id):
dn = self._id_to_dn(object_id)
query = '(member=%s)' % dn
with self.get_connection() as conn:
try:
enabled_value = conn.search_s(self.enabled_emulation_dn,
ldap.SCOPE_BASE,
query, ['cn'])
except ldap.NO_SUCH_OBJECT:
return False
else:
return bool(enabled_value)
def _add_enabled(self, object_id):
if not self._get_enabled(object_id):
modlist = [(ldap.MOD_ADD,
'member',
[self._id_to_dn(object_id)])]
with self.get_connection() as conn:
try:
conn.modify_s(self.enabled_emulation_dn, modlist)
except ldap.NO_SUCH_OBJECT:
attr_list = [('objectClass', ['groupOfNames']),
('member', [self._id_to_dn(object_id)]),
self.enabled_emulation_naming_attr]
if self.use_dumb_member:
attr_list[1][1].append(self.dumb_member)
conn.add_s(self.enabled_emulation_dn, attr_list)
def _remove_enabled(self, object_id):
modlist = [(ldap.MOD_DELETE,
'member',
[self._id_to_dn(object_id)])]
with self.get_connection() as conn:
try:
conn.modify_s(self.enabled_emulation_dn, modlist)
except (ldap.NO_SUCH_OBJECT, ldap.NO_SUCH_ATTRIBUTE):
pass
def create(self, values):
if self.enabled_emulation:
enabled_value = values.pop('enabled', True)
ref = super(EnabledEmuMixIn, self).create(values)
if 'enabled' not in self.attribute_ignore:
if enabled_value:
self._add_enabled(ref['id'])
ref['enabled'] = enabled_value
return ref
else:
return super(EnabledEmuMixIn, self).create(values)
def get(self, object_id, ldap_filter=None):
ref = super(EnabledEmuMixIn, self).get(object_id, ldap_filter)
if 'enabled' not in self.attribute_ignore and self.enabled_emulation:
ref['enabled'] = self._get_enabled(object_id)
return ref
def get_all(self, ldap_filter=None):
if 'enabled' not in self.attribute_ignore and self.enabled_emulation:
# had to copy BaseLdap.get_all here to ldap_filter by DN
tenant_list = [self._ldap_res_to_model(x)
for x in self._ldap_get_all(ldap_filter)
if x[0] != self.enabled_emulation_dn]
for tenant_ref in tenant_list:
tenant_ref['enabled'] = self._get_enabled(tenant_ref['id'])
return tenant_list
else:
return super(EnabledEmuMixIn, self).get_all(ldap_filter)
def update(self, object_id, values, old_obj=None):
if 'enabled' not in self.attribute_ignore and self.enabled_emulation:
data = values.copy()
enabled_value = data.pop('enabled', None)
ref = super(EnabledEmuMixIn, self).update(object_id, data, old_obj)
if enabled_value is not None:
if enabled_value:
self._add_enabled(object_id)
else:
self._remove_enabled(object_id)
ref['enabled'] = enabled_value
return ref
else:
return super(EnabledEmuMixIn, self).update(
object_id, values, old_obj)
def delete(self, object_id):
if self.enabled_emulation:
self._remove_enabled(object_id)
super(EnabledEmuMixIn, self).delete(object_id)
class ProjectLdapStructureMixin(object):
"""Project LDAP Structure shared between LDAP backends.
This is shared between the resource and assignment LDAP backends.
"""
DEFAULT_OU = 'ou=Groups'
DEFAULT_STRUCTURAL_CLASSES = []
DEFAULT_OBJECTCLASS = 'groupOfNames'
DEFAULT_ID_ATTR = 'cn'
NotFound = exception.ProjectNotFound
notfound_arg = 'project_id' # NOTE(yorik-sar): while options_name = tenant
options_name = 'project'
attribute_options_names = {'name': 'name',
'description': 'desc',
'enabled': 'enabled',
'domain_id': 'domain_id'}
immutable_attrs = ['name']
| jumpstarter-io/keystone | keystone/common/ldap/core.py | Python | apache-2.0 | 76,368 |
import logging
import re
from airflow.hooks import PigCliHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class PigOperator(BaseOperator):
"""
Executes pig script.
:param pig: the pig latin script to be executed
:type pig: string
:param pig_cli_conn_id: reference to the Hive database
:type pig_cli_conn_id: string
:param pigparams_jinja_translate: when True, pig params-type templating
${var} gets translated into jinja-type templating {{ var }}. Note that
you may want to use this along with the
``DAG(user_defined_macros=myargs)`` parameter. View the DAG
object documentation for more details.
:type pigparams_jinja_translate: boolean
"""
template_fields = ('pig',)
template_ext = ('.pig', '.piglatin',)
ui_color = '#f0e4ec'
@apply_defaults
def __init__(
self, pig,
pig_cli_conn_id='pig_cli_default',
pigparams_jinja_translate=False,
*args, **kwargs):
super(PigOperator, self).__init__(*args, **kwargs)
self.pigparams_jinja_translate = pigparams_jinja_translate
self.pig = pig
self.pig_cli_conn_id = pig_cli_conn_id
def get_hook(self):
return PigCliHook(pig_cli_conn_id=self.pig_cli_conn_id)
def prepare_template(self):
if self.pigparams_jinja_translate:
self.pig = re.sub(
"(\$([a-zA-Z_][a-zA-Z0-9_]*))", "{{ \g<2> }}", self.pig)
def execute(self, context):
logging.info('Executing: ' + self.pig)
self.hook = self.get_hook()
self.hook.run_cli(pig=self.pig)
def on_kill(self):
self.hook.kill()
| dud225/incubator-airflow | airflow/operators/pig_operator.py | Python | apache-2.0 | 1,716 |
# Joint copyright:
# - Copyright 2012,2013 Wikimedia Foundation
# - Copyright 2012,2013 Antoine "hashar" Musso
# - Copyright 2013 Arnaud Fabre
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from testscenarios.testcase import TestWithScenarios
from testtools import TestCase
from jenkins_jobs.modules import general
from tests.base import BaseTestCase
from tests.base import get_scenarios
class TestCaseModuleGeneral(TestWithScenarios, BaseTestCase, TestCase):
fixtures_path = os.path.join(os.path.dirname(__file__), 'fixtures')
scenarios = get_scenarios(fixtures_path)
klass = general.General
| joostvdg/jenkins-job-builder | tests/general/test_general.py | Python | apache-2.0 | 1,118 |
"""
@package mi.dataset.parser
@file marine-integrations/mi/dataset/parser/ctdav_n_auv.py
@author Jeff Roy
@brief Parser and particle Classes and tools for the ctdav_n_auv data
Release notes:
initial release
"""
__author__ = 'Jeff Roy'
__license__ = 'Apache 2.0'
from mi.core.log import get_logger
log = get_logger()
from mi.dataset.parser.auv_common import \
AuvCommonParticle, \
AuvCommonParser, \
compute_timestamp
# The structure below is a list of tuples
# Each tuple consists of
# parameter name, index into raw data parts list, encoding function
CTDAV_N_AUV_PARAM_MAP = [
# message ID is typically index 0
('mission_epoch', 1, int),
('auv_latitude', 2, float),
('auv_longitude', 3, float),
('mission_time', 4, int),
('m_depth', 5, float),
('ctdav_n_auv_conductivity', 6, float),
('temperature', 7, float),
('salinity', 8, float),
('speed_of_sound', 9, float),
('dissolved_oxygen', 10, float),
('powered_on', 11, int)
]
class CtdavNAuvInstrumentParticle(AuvCommonParticle):
_auv_param_map = CTDAV_N_AUV_PARAM_MAP
# must provide a parameter map for _build_parsed_values
class CtdavNAuvTelemeteredParticle(CtdavNAuvInstrumentParticle):
# set the data_particle_type for the DataParticle class
_data_particle_type = "ctdav_n_auv_instrument"
class CtdavNAuvRecoveredParticle(CtdavNAuvInstrumentParticle):
# set the data_particle_type for the DataParticle class
_data_particle_type = "ctdav_n_auv_instrument_recovered"
CTDAV_N_AUV_ID = '1181' # message ID of ctdav_n records
CTDAV_N_AUV_FIELD_COUNT = 12 # number of expected fields in an ctdav_n record
CTDAV_N_AUV_TELEMETERED_MESSAGE_MAP = [(CTDAV_N_AUV_ID,
CTDAV_N_AUV_FIELD_COUNT,
compute_timestamp,
CtdavNAuvTelemeteredParticle)]
CTDAV_N_AUV_RECOVERED_MESSAGE_MAP = [(CTDAV_N_AUV_ID,
CTDAV_N_AUV_FIELD_COUNT,
compute_timestamp,
CtdavNAuvRecoveredParticle)]
class CtdavNAuvParser(AuvCommonParser):
def __init__(self,
stream_handle,
exception_callback,
is_telemetered):
if is_telemetered:
message_map = CTDAV_N_AUV_TELEMETERED_MESSAGE_MAP
else:
message_map = CTDAV_N_AUV_RECOVERED_MESSAGE_MAP
# provide message ID and # of fields to parent class
super(CtdavNAuvParser, self).__init__(stream_handle,
exception_callback,
message_map)
| janeen666/mi-instrument | mi/dataset/parser/ctdav_n_auv.py | Python | bsd-2-clause | 2,741 |
# Copyright 2009-2015 Eucalyptus Systems, Inc.
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from requestbuilder import Arg
from euca2ools.commands.iam import IAMRequest, AS_ACCOUNT, arg_user
class DeactivateMFADevice(IAMRequest):
DESCRIPTION = 'Deactivate an MFA device'
ARGS = [arg_user(
help='user owning the MFA device to deactivate (required)'),
Arg('-s', '--serial-number', dest='SerialNumber', metavar='SERIAL',
required=True, help='''serial number of the MFA device to
deactivate (required)'''),
AS_ACCOUNT]
| vasiliykochergin/euca2ools | euca2ools/commands/iam/deactivatemfadevice.py | Python | bsd-2-clause | 1,876 |
"""scanf.py: scanf-style input for Python.
Danny Yoo (dyoo@hkn.eecs.berkeley.edu)
The initial motivation for this module was based on a posting on
Python-tutor:
http://mail.python.org/pipermail/tutor/2004-July/030480.html
I haven't been able to find a nice module to do scanf-style input.
Even the Library Reference recommends regular expressions as a
substitute:
http://docs.python.org/lib/node109.html
But there appears to have been activity about this on python-list:
http://aspn.activestate.com/ASPN/Mail/Message/python-list/785450
Still, let's see if we can get a close equivalent scanf() in place.
At the least, it'll be fun for me, and it might be useful for people
who are still recovering from C. *grin*
Functions provided:
scanf(formatString) -- formatted scanning across stdin
sscanf(sourceString, formatString) -- formated scanning across strings
fscanf(sourceFile, formatString) -- formated scanning across files
The behavior of this scanf() will be slightly different from that
defined in C, because, in truth, I'm a little lazy, and am not quite
sure if people will need all of scanf's features in typical Python
programming.
But let's first show what conversions this scanf() will support.
Format strings are of the following form:
% [*] [width] [format]
where [*] and [width] are optional, and [format] is mandatory. The
optional flags modify the format.
* suppresses variable capture.
width maximum character width.
We support the following scanf conversion formats (copied from K&R):
d decimal integer.
i integer. The integer may be in octal (leading zero) or
hexadecimal (leading 0x or 0X). ## fixme
o octal integer (with or without leading zero). ## fixme
x hexadecimal integer (with or without leading 0x or 0X) ## fixme
c characters. The next input characters (default 1) are
placed at the indicated spot. The normal skip over white space
is suppressed; to read the next non-white space character, use
%1s.
s character string (not quoted).
f floating-point number with optional sign and optional decimal point.
% literal %; no assignment is made.
Literal characters can appear in the scanf format string: they must
match the same characters in the input.
There is no guarantee of what happens if calls to scanf are mixed with
other input functions. See the BUGS section below for details on this.
If the input doesn't conform to the format string, a FormatError is
raised.
Example format strings:
"%d %d" Two decimal integers.
"%d.%d.%d.%d" Four decimal integers, separated by literal periods.
The periods won't be captured.
"hello %s" Literally matches "hello" followed by any number of
spaces, followed by a captured word.
There's also an interface for calling the internal function bscanf()
that works on CharacterBuffer types, if in the future there is
something that supports getc() and ungetc() natively. There's also an
undocumented compile() function that takes format strings and returns
a function that can scan through CharacterBuffers. Ooops, I guess I
just documented it. *grin*
######################################################################
BUGS and GOTCHAS:
One major problem that I'm running into is a lack of ungetc(); it
would be nice if there were such a function in Python, but I can't
find it. I have to simulate it by using a CharacterBuffer object, but
it's not an ideal solution.
So at most, you may lose a single character to the internal buffers
maintained by this module if you use scanf(). The other two *scanf()
functions, thankfully, aren't effected by this problem, since I can
simulate ungetc() more accurately by using seek() in the other two
cases.
If you really need to get that buffered character back, you can grab
it through _STDIN.lastChar, though manually fiddling with this is not
recommended.
So use scanf() with the following caveat: unlike C's stdin(), this
version scanf() can't be interchanged with calls to other input
functions without some kind of weird side effect. We keep a
one-character buffer into stdin, so at most you might lose one
character to the internal buffers.
fscanf() is only allowed to work on things that support both read(1)
and seek(1, -1), since then I can reliably do a ungetch-like thing.
scanf("%s") can be dangerous in a hostile environment, since it's very
possible for something to pass in a huge string without spaces. So use
an explicit width instead if you can help it."""
import sys
from string import whitespace as WHITESPACE
from string import digits as DIGITS
__all__ = ['scanf', 'sscanf', 'fscanf']
__version__ = '1.0'
class CharacterBuffer(object):
"""A CharacterBuffer allows us to get a character, and to "unget" a
character. Abstract class"""
def getch(self):
"""Returns the next character. If there are no more characters
left in the stream, returns the empty string."""
pass # implement me!
def ungetch(self, ch):
"""Tries to put back a character. Can be called at most once
between calls to getch()."""
pass # implement me!
def scanCharacterSet(self, characterSet, maxChars=0):
"""Support function that scans across a buffer till we hit
something outside the allowable characterSet."""
return self.scanPredicate(lambda ch: ch in characterSet, maxChars)
def scanPredicate(self, predicate, maxChars=0):
"""Support function that scans across a buffer till we hit
something outside what's allowable by the predicate."""
chars = []
countChars = 0
while True:
if (maxChars != 0 and countChars >= maxChars):
break
ch = self.getch()
if ch != '' and predicate(ch):
chars.append(ch)
countChars += 1
else:
self.ungetch(ch)
break
return ''.join(chars)
class CharacterBufferFromIterable(CharacterBuffer):
"""Implementation of CharacterBuffers for iterable things.
We keep a 'lastChar' attribute to simulate ungetc()."""
def __init__(self, iterable):
self.iterator = iter(iterable)
self.lastChar = ''
def getch(self):
if self.lastChar == '':
try:
return self.iterator.next()
except StopIteration:
return ''
else:
(ch, self.lastChar) = (self.lastChar, '')
return ch
def ungetch(self, ch):
self.lastChar = ch
class CharacterBufferFromFile(CharacterBuffer):
"""Implementation of CharacterBuffers for files. We use the native
read(1) and seek() calls, so we don't have to do so much magic."""
def __init__(self, myfile):
self.myfile = myfile
def getch(self):
return self.myfile.read(1)
def ungetch(self, ch):
self.myfile.seek(- len(ch), 1)
def readiter(inputFile, *args):
"""Returns an iterator that calls read(*args) on the inputFile."""
while True:
ch = inputFile.read(*args)
if ch:
yield ch
else:
raise StopIteration
def isIterable(thing):
"""Returns true if 'thing' looks iterable."""
try:
iter(thing)
except TypeError:
return False
return True
def isFileLike(thing):
"""Returns true if thing looks like a file."""
if hasattr(thing, "read") and hasattr(thing, "seek"):
try:
thing.seek(1, 1)
thing.seek(-1, 1)
return True
except IOError:
pass
return False
def makeCharBuffer(thing):
"""Try to coerse 'thing' into a CharacterBuffer. 'thing' can be
an instance of:
1. CharacterBuffer
2. A file-like object,
3. An iterable.
makeCharBuffer() will make guesses in that order.
"""
if isinstance(thing, CharacterBuffer):
return thing
elif isFileLike(thing):
# this check must come before isIterable, since files
# provide a line-based iterator that we don't want to use.
# Plus we want to take advantage of file.seek()
return CharacterBufferFromFile(thing)
elif isIterable(thing):
return CharacterBufferFromIterable(thing)
else:
raise ValueError, ("Can't coerse %r to CharacterBuffer" % thing)
class CappedBuffer(CharacterBuffer):
"""Implementation of a buffer that caps the number of bytes we can
getch(). The cap may or may not include whitespace characters."""
def __init__(self, buffer, width, ignoreWhitespace=False):
self.buffer = buffer
self.bytesRead = 0
self.width = width
self.ignoreWhitespace = ignoreWhitespace
def getch(self):
if self.bytesRead < self.width:
nextChar = self.buffer.getch()
if not self.isIgnoredChar(nextChar):
self.bytesRead += len(nextChar)
return nextChar
else:
return ''
def isIgnoredChar(self, ch):
return self.ignoreWhitespace and isWhitespaceChar(ch)
def ungetch(self, ch):
self.buffer.ungetch(ch)
if not self.isIgnoredChar(ch):
self.bytesRead -= len(ch)
# make sure wacky things don't happen when ungetch()ing.
assert self.bytesRead >= 0
class FormatError(ValueError):
"""A FormatError is raised if we run into errors while scanning
for input."""
pass
class IncompleteCaptureError(ValueError):
"""The *scanf() functions raise IncompleteCaptureError if a problem
occurs doing scanning."""
pass
try:
"""We keep a module-level STDIN CharacterBuffer, so that we can call
scanf() several times and not lose characters between invocations."""
_STDIN = CharacterBufferFromIterable(sys.stdin)
def scanf(formatString):
"""scanf(formatString) -> tuple
Scans standard input for formats specified in the formatString. See
module's docs for list of supported format characters."""
return bscanf(_STDIN, formatString)
except:
TypeError
def sscanf(inputString, formatString):
"""sscanf(inputString, formatString) -> tuple
Scans inputString for formats specified in the formatString. See
module's docs for list of supported format characters."""
return bscanf(CharacterBufferFromIterable(inputString), formatString)
def fscanf(inputFile, formatString):
"""fscanf(inputFile, formatString) -> tuple
Scans inputFile for formats specified in the formatString. See
module's docs for list of supported format characters."""
buffer = CharacterBufferFromFile(inputFile)
return bscanf(buffer, formatString)
def bscanf(buffer, formatString):
"""fscanf(buffer, formatString) -> tuple
Scans a CharacterBuffer 'buffer' for formats specified in the
formatString. See scanf module's docs for list of supported format
characters."""
# TODO: we may want to do some caching here of compiled formatStrings,
# similar to that of the 're' module.
parser = compile(formatString)
return parser(buffer)
def isWhitespaceChar(ch, _set=set(WHITESPACE)):
"""Returns true if the charcter looks like whitespace.
We follow the definition of C's isspace() function.
"""
return ch in _set
def handleWhitespace(buffer):
"""Scans for whitespace. Returns all the whitespace it collects."""
chars = []
while True:
ch = buffer.getch()
if isWhitespaceChar(ch):
chars.append(ch)
else:
buffer.ungetch(ch)
break
return ''.join(chars)
# We keep a few sets as module variables just to incur the cost of
# constructing them just once.
_PLUS_MINUS_SET = set("+-")
_DIGIT_SET = set(DIGITS)
_OCT_SET = set("01234567")
_HEX_SET = set("0123456789ABCDEFabcdef")
def handleDecimalInt(buffer, optional=False, allowLeadingWhitespace=True):
"""Tries to scan for an integer. If 'optional' is set to False,
returns None if an integer can't be successfully scanned."""
if allowLeadingWhitespace:
handleWhitespace(buffer) # eat leading spaces
chars = []
chars += buffer.scanCharacterSet(_PLUS_MINUS_SET, 1)
chars += buffer.scanCharacterSet(_DIGIT_SET)
try:
return int(''.join(chars), 10)
except ValueError:
if optional:
return None
raise FormatError, ("invalid literal characters: %s" % ''.join(chars))
def handleOct(buffer):
chars = []
chars += buffer.scanCharacterSet(_PLUS_MINUS_SET)
chars += buffer.scanCharacterSet(_OCT_SET)
try:
return int(''.join(chars), 8)
except ValueError:
raise FormatError, ("invalid literal characters: %s" % ''.join(chars))
def handleInt(buffer, base=0):
chars = []
chars += buffer.scanCharacterSet(_PLUS_MINUS_SET)
chars += buffer.scanCharacterSet("0")
if chars and chars[-1] == '0':
chars += buffer.scanCharacterSet("xX")
chars += buffer.scanCharacterSet(_HEX_SET)
try:
return int(''.join(chars), base)
except ValueError:
raise FormatError, ("invalid literal characters: %s" % ''.join(chars))
def handleHex(buffer):
return handleInt(buffer, 16)
def handleFloat(buffer, allowLeadingWhitespace=True):
if allowLeadingWhitespace:
handleWhitespace(buffer) # eat leading whitespace
chars = []
chars += buffer.scanCharacterSet(_PLUS_MINUS_SET)
chars += buffer.scanCharacterSet(_DIGIT_SET)
chars += buffer.scanCharacterSet(".")
chars += buffer.scanCharacterSet(_DIGIT_SET)
chars += buffer.scanCharacterSet("eE")
chars += buffer.scanCharacterSet(_PLUS_MINUS_SET)
chars += buffer.scanCharacterSet(_DIGIT_SET)
try:
return float(''.join(chars))
except ValueError:
raise FormatError, ("invalid literal characters: %s" % ''.join(chars))
def handleChars(buffer,
allowLeadingWhitespace=False,
isBadCharacter=lambda ch: False,
optional=False):
"""Read as many characters are there are in the buffer."""
if allowLeadingWhitespace:
handleWhitespace(buffer)
chars = []
chars += buffer.scanPredicate(lambda ch: not isBadCharacter(ch))
if chars:
return ''.join(chars)
else:
if optional:
return None
raise FormatError, ("Empty buffer.")
def handleString(buffer, allowLeadingWhitespace=True):
"""Reading a string format is just an application of reading
characters (skipping leading spaces, and reading up to space)."""
return handleChars(buffer,
allowLeadingWhitespace=allowLeadingWhitespace,
isBadCharacter=isWhitespaceChar)
def makeHandleLiteral(literal):
def f(buffer, optional=False):
ch = buffer.getch()
if ch == literal:
return ch
else:
buffer.ungetch(ch)
if optional:
return None
raise FormatError, ("%s != %s" % (literal, ch))
return f
def makeWidthLimitedHandler(handler, width, ignoreWhitespace=False):
"""Constructs a Handler that caps the number of bytes that can be read
from the byte buffer."""
def f(buffer):
return handler(CappedBuffer(buffer, width, ignoreWhitespace))
return f
"""Just for kicks: handleChar is a handler for a single character."""
handleChar = makeWidthLimitedHandler(handleChars, 1, ignoreWhitespace=False)
def makeIgnoredHandler(handler):
def f(buffer):
handler(buffer)
return None
return f
class CompiledPattern:
def __init__(self, handlers, formatString):
self.handlers = handlers
self.formatString = formatString
def __call__(self, buffer):
results = []
try:
for h in self.handlers:
value = h(buffer)
# We use None as the sentinel value that ignored handlers
# will emit.
if value is not None:
results.append(value)
return tuple(results)
except FormatError, e:
raise IncompleteCaptureError, (e, tuple(results))
def __repr__(self):
return "compile(%r)" % self.formatString
def compile(formatString):
"""Given a format string, emits a new CompiledPattern that eats
CharacterBuffers and returns captured values as a tuple.
If there's a failure during scanning, raises IncompleteCaptureError,
with args being a two-tuple of the FormatError, and the results that
were captured before the error occurred.
"""
handlers = []
formatBuffer = CharacterBufferFromIterable(formatString)
while True:
ch = formatBuffer.getch()
if ch == '':
break
if isWhitespaceChar(ch):
handleWhitespace(formatBuffer)
handlers.append(makeIgnoredHandler(handleWhitespace))
elif ch == '%':
handlers.append(_compileFormat(formatBuffer))
else:
handlers.append(makeIgnoredHandler(makeHandleLiteral(ch)))
return CompiledPattern(handlers, formatString)
def _compileFormat(formatBuffer):
def readOptionalSuppression():
f = makeHandleLiteral("*")
return f(formatBuffer, optional=True) == "*"
def readOptionalWidth():
return handleDecimalInt(formatBuffer,
optional=True,
allowLeadingWhitespace=False)
def readFormat():
return formatBuffer.getch() # Finally, read the format
suppression = readOptionalSuppression()
width = readOptionalWidth()
formatCh = readFormat()
handler = makeFormattedHandler(suppression, width, formatCh)
if handler:
return handler
else:
# At this point, since we couldn't figure out the format, die loudly.
raise FormatError, ("Invalid format character %s" % formatCh)
_FORMAT_HANDLERS = {'d': handleDecimalInt,
'i': handleInt,
'x': handleHex,
'o': handleOct,
's': handleString,
'f': handleFloat,
'%': makeIgnoredHandler(makeHandleLiteral('%'))
}
def makeFormattedHandler(suppression, width, formatCh):
"""Given suppression, width, and a formatType, returns a function
that eats a buffer and returns that thing."""
def applySuppression(handler):
if suppression:
return makeIgnoredHandler(handler)
return handler
def applyWidth(handler):
if width != None:
return makeWidthLimitedHandler(handler, width,
ignoreWhitespace=True)
return handler
# 'c' is a special case: it's the only handler that can't ignore
# whitespace.
if formatCh == 'c':
if width == None:
return applySuppression(handleChar)
else:
return applySuppression(
makeWidthLimitedHandler(handleChars, width,
ignoreWhitespace=False))
if formatCh in _FORMAT_HANDLERS:
return applySuppression(applyWidth(_FORMAT_HANDLERS[formatCh]))
else:
return None
| assumptionsoup/pymel | pymel/util/scanf.py | Python | bsd-3-clause | 19,439 |
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..mesh import MeshWarpMaths
def test_MeshWarpMaths_inputs():
input_map = dict(float_trait=dict(),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_surf=dict(mandatory=True,
),
operation=dict(usedefault=True,
),
operator=dict(mandatory=True,
),
out_file=dict(usedefault=True,
),
out_warp=dict(usedefault=True,
),
)
inputs = MeshWarpMaths.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_MeshWarpMaths_outputs():
output_map = dict(out_file=dict(),
out_warp=dict(),
)
outputs = MeshWarpMaths.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| mick-d/nipype | nipype/algorithms/tests/test_auto_MeshWarpMaths.py | Python | bsd-3-clause | 1,014 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
""" An extensible ASCII table reader and writer.
"""
from .core import (InconsistentTableError,
ParameterError,
NoType, StrType, NumType, FloatType, IntType, AllType,
Column,
BaseInputter, ContinuationLinesInputter,
BaseHeader,
BaseData,
BaseOutputter, TableOutputter,
BaseReader,
BaseSplitter, DefaultSplitter, WhitespaceSplitter,
convert_numpy,
masked
)
from .basic import (Basic, BasicHeader, BasicData,
Rdb,
Csv,
Tab,
NoHeader,
CommentedHeader)
from .fastbasic import (FastBasic,
FastCsv,
FastTab,
FastNoHeader,
FastCommentedHeader,
FastRdb)
from .cds import Cds
from .ecsv import Ecsv
from .latex import Latex, AASTex, latexdicts
from .html import HTML
from .ipac import Ipac
from .daophot import Daophot
from .sextractor import SExtractor
from .fixedwidth import (FixedWidth, FixedWidthNoHeader,
FixedWidthTwoLine, FixedWidthSplitter,
FixedWidthHeader, FixedWidthData)
from .rst import RST
from .ui import (set_guess, get_reader, read, get_writer, write, get_read_trace)
from . import connect
| stargaser/astropy | astropy/io/ascii/__init__.py | Python | bsd-3-clause | 1,565 |
import os
import json
import glob
def main():
prefix = os.environ['PREFIX']
info_files = glob.glob(os.path.join(prefix, 'conda-meta',
'conda-build-test-numpy-run-1.0-py*0.json'))
assert len(info_files) == 1
info_file = info_files[0]
with open(info_file, 'r') as fh:
info = json.load(fh)
assert len(info['depends']) == 2
depends = sorted(info['depends'])
# With no version
assert depends[0] == 'numpy'
assert depends[1].startswith('python ')
if __name__ == '__main__':
main()
| sandhujasmine/conda-build | tests/test-recipes/metadata/numpy_run/run_test.py | Python | bsd-3-clause | 559 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
def forwards(apps, schema_editor):
"""Create Peers group."""
Group = apps.get_model('auth', 'Group')
Group.objects.create(name='Peers')
def backwards(apps, schema_editor):
"""Delete Peers group."""
Group = apps.get_model('auth', 'Group')
Group.objects.filter(name='Peers').delete()
class Migration(migrations.Migration):
dependencies = [
('profiles', '0002_auto_20160908_1534'),
]
operations = [
migrations.RunPython(forwards, backwards)
]
| tsmrachel/remo | remo/profiles/migrations/0003_auto_20160921_1608.py | Python | bsd-3-clause | 610 |
"""
@author: Deniz Altinbuken, Emin Gun Sirer
@note: Queue proxy
@copyright: See LICENSE
"""
from concoord.clientproxy import ClientProxy
class Queue:
def __init__(self, bootstrap, timeout=60, debug=False, token=None):
self.proxy = ClientProxy(bootstrap, timeout, debug, token)
def __concoordinit__(self):
return self.proxy.invoke_command('__init__')
def append(self, item):
return self.proxy.invoke_command('append', item)
def remove(self):
return self.proxy.invoke_command('remove')
def get_size(self):
return self.proxy.invoke_command('get_size')
def get_queue(self):
return self.proxy.invoke_command('get_queue')
def __str__(self):
return self.proxy.invoke_command('__str__')
| tempbottle/concoord | concoord/proxy/queue.py | Python | bsd-3-clause | 770 |
###
# Copyright (c) 2002-2005, Jeremiah Fincher
# Copyright (c) 2009, James Vega
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
"""
Includes wrappers for commands.
"""
import time
import types
import getopt
import inspect
import threading
import supybot.log as log
import supybot.conf as conf
import supybot.utils as utils
import supybot.world as world
import supybot.ircdb as ircdb
import supybot.ircmsgs as ircmsgs
import supybot.ircutils as ircutils
import supybot.callbacks as callbacks
###
# Non-arg wrappers -- these just change the behavior of a command without
# changing the arguments given to it.
###
# Thread has to be a non-arg wrapper because by the time we're parsing and
# validating arguments, we're inside the function we'd want to thread.
def thread(f):
"""Makes sure a command spawns a thread when called."""
def newf(self, irc, msg, args, *L, **kwargs):
if world.isMainThread():
targetArgs = (self.callingCommand, irc, msg, args) + tuple(L)
t = callbacks.CommandThread(target=self._callCommand,
args=targetArgs, kwargs=kwargs)
t.start()
else:
f(self, irc, msg, args, *L, **kwargs)
return utils.python.changeFunctionName(newf, f.func_name, f.__doc__)
class UrlSnarfThread(world.SupyThread):
def __init__(self, *args, **kwargs):
assert 'url' in kwargs
kwargs['name'] = 'Thread #%s (for snarfing %s)' % \
(world.threadsSpawned, kwargs.pop('url'))
super(UrlSnarfThread, self).__init__(*args, **kwargs)
self.setDaemon(True)
def run(self):
try:
super(UrlSnarfThread, self).run()
except utils.web.Error, e:
log.debug('Exception in urlSnarfer: %s', utils.exnToString(e))
class SnarfQueue(ircutils.FloodQueue):
timeout = conf.supybot.snarfThrottle
def key(self, channel):
return channel
_snarfed = SnarfQueue()
class SnarfIrc(object):
def __init__(self, irc, channel, url):
self.irc = irc
self.url = url
self.channel = channel
def __getattr__(self, attr):
return getattr(self.irc, attr)
def reply(self, *args, **kwargs):
_snarfed.enqueue(self.channel, self.url)
return self.irc.reply(*args, **kwargs)
# This lock is used to serialize the calls to snarfers, so
# earlier snarfers are guaranteed to beat out later snarfers.
_snarfLock = threading.Lock()
def urlSnarfer(f):
"""Protects the snarfer from loops (with other bots) and whatnot."""
def newf(self, irc, msg, match, *L, **kwargs):
url = match.group(0)
channel = msg.args[0]
if not irc.isChannel(channel):
return
if ircdb.channels.getChannel(channel).lobotomized:
self.log.info('Not snarfing in %s: lobotomized.', channel)
return
if _snarfed.has(channel, url):
self.log.info('Throttling snarf of %s in %s.', url, channel)
return
irc = SnarfIrc(irc, channel, url)
def doSnarf():
_snarfLock.acquire()
try:
# This has to be *after* we've acquired the lock so we can be
# sure that all previous urlSnarfers have already run to
# completion.
if msg.repliedTo:
self.log.debug('Not snarfing, msg is already repliedTo.')
return
f(self, irc, msg, match, *L, **kwargs)
finally:
_snarfLock.release()
if threading.currentThread() is not world.mainThread:
doSnarf()
else:
L = list(L)
t = UrlSnarfThread(target=doSnarf, url=url)
t.start()
newf = utils.python.changeFunctionName(newf, f.func_name, f.__doc__)
return newf
###
# Converters, which take irc, msg, args, and a state object, and build up the
# validated and converted args for the method in state.args.
###
# This is just so we can centralize this, since it may change.
def _int(s):
base = 10
if s.startswith('0x'):
base = 16
s = s[2:]
elif s.startswith('0b'):
base = 2
s = s[2:]
elif s.startswith('0') and len(s) > 1:
base = 8
s = s[1:]
try:
return int(s, base)
except ValueError:
if base == 10:
return int(float(s))
else:
raise
def getInt(irc, msg, args, state, type='integer', p=None):
try:
i = _int(args[0])
if p is not None:
if not p(i):
state.errorInvalid(type, args[0])
state.args.append(i)
del args[0]
except ValueError:
state.errorInvalid(type, args[0])
def getNonInt(irc, msg, args, state, type='non-integer value'):
try:
i = _int(args[0])
state.errorInvalid(type, args[0])
except ValueError:
state.args.append(args.pop(0))
def getLong(irc, msg, args, state, type='long'):
getInt(irc, msg, args, state, type)
state.args[-1] = long(state.args[-1])
def getFloat(irc, msg, args, state, type='floating point number'):
try:
state.args.append(float(args[0]))
del args[0]
except ValueError:
state.errorInvalid(type, args[0])
def getPositiveInt(irc, msg, args, state, *L):
getInt(irc, msg, args, state,
p=lambda i: i>0, type='positive integer', *L)
def getNonNegativeInt(irc, msg, args, state, *L):
getInt(irc, msg, args, state,
p=lambda i: i>=0, type='non-negative integer', *L)
def getIndex(irc, msg, args, state):
getInt(irc, msg, args, state, type='index')
if state.args[-1] > 0:
state.args[-1] -= 1
def getId(irc, msg, args, state, kind=None):
type = 'id'
if kind is not None and not kind.endswith('id'):
type = kind + ' id'
original = args[0]
try:
args[0] = args[0].lstrip('#')
getInt(irc, msg, args, state, type=type)
except Exception, e:
args[0] = original
raise
def getExpiry(irc, msg, args, state):
now = int(time.time())
try:
expires = _int(args[0])
if expires:
expires += now
state.args.append(expires)
del args[0]
except ValueError:
state.errorInvalid('number of seconds', args[0])
def getBoolean(irc, msg, args, state):
try:
state.args.append(utils.str.toBool(args[0]))
del args[0]
except ValueError:
state.errorInvalid('boolean', args[0])
def getNetworkIrc(irc, msg, args, state, errorIfNoMatch=False):
if args:
for otherIrc in world.ircs:
if otherIrc.network.lower() == args[0].lower():
state.args.append(otherIrc)
del args[0]
return
if errorIfNoMatch:
raise callbacks.ArgumentError
else:
state.args.append(irc)
def getHaveOp(irc, msg, args, state, action='do that'):
if not state.channel:
getChannel(irc, msg, args, state)
if state.channel not in irc.state.channels:
state.error('I\'m not even in %s.' % state.channel, Raise=True)
if not irc.state.channels[state.channel].isOp(irc.nick):
state.error('I need to be opped to %s.' % action, Raise=True)
def validChannel(irc, msg, args, state):
if irc.isChannel(args[0]):
state.args.append(args.pop(0))
else:
state.errorInvalid('channel', args[0])
def getHostmask(irc, msg, args, state):
if ircutils.isUserHostmask(args[0]):
state.args.append(args.pop(0))
else:
try:
hostmask = irc.state.nickToHostmask(args[0])
state.args.append(hostmask)
del args[0]
except KeyError:
state.errorInvalid('nick or hostmask', args[0])
def getBanmask(irc, msg, args, state):
getHostmask(irc, msg, args, state)
if not state.channel:
getChannel(irc, msg, args, state)
banmaskstyle = conf.supybot.protocols.irc.banmask
state.args[-1] = banmaskstyle.makeBanmask(state.args[-1])
def getUser(irc, msg, args, state):
try:
state.args.append(ircdb.users.getUser(msg.prefix))
except KeyError:
state.errorNotRegistered(Raise=True)
def getOtherUser(irc, msg, args, state):
if ircutils.isUserHostmask(args[0]):
state.errorNoUser(args[0])
try:
state.args.append(ircdb.users.getUser(args[0]))
del args[0]
except KeyError:
try:
getHostmask(irc, msg, [args[0]], state)
hostmask = state.args.pop()
state.args.append(ircdb.users.getUser(hostmask))
del args[0]
except (KeyError, callbacks.Error):
state.errorNoUser(name=args[0])
def _getRe(f):
def get(irc, msg, args, state, convert=True):
original = args[:]
s = args.pop(0)
def isRe(s):
try:
_ = f(s)
return True
except ValueError:
return False
try:
while len(s) < 512 and not isRe(s):
s += ' ' + args.pop(0)
if len(s) < 512:
if convert:
state.args.append(f(s))
else:
state.args.append(s)
else:
state.errorInvalid('regular expression', s)
except IndexError:
args[:] = original
state.errorInvalid('regular expression', s)
return get
getMatcher = _getRe(utils.str.perlReToPythonRe)
getReplacer = _getRe(utils.str.perlReToReplacer)
def getNick(irc, msg, args, state):
if ircutils.isNick(args[0]):
if 'nicklen' in irc.state.supported:
if len(args[0]) > irc.state.supported['nicklen']:
state.errorInvalid('nick', args[0],
'That nick is too long for this server.')
state.args.append(args.pop(0))
else:
state.errorInvalid('nick', args[0])
def getSeenNick(irc, msg, args, state, errmsg=None):
try:
_ = irc.state.nickToHostmask(args[0])
state.args.append(args.pop(0))
except KeyError:
if errmsg is None:
errmsg = 'I haven\'t seen %s.' % args[0]
state.error(errmsg, Raise=True)
def getChannel(irc, msg, args, state):
if args and irc.isChannel(args[0]):
channel = args.pop(0)
elif irc.isChannel(msg.args[0]):
channel = msg.args[0]
else:
state.log.debug('Raising ArgumentError because there is no channel.')
raise callbacks.ArgumentError
state.channel = channel
state.args.append(channel)
def getChannelDb(irc, msg, args, state, **kwargs):
channelSpecific = conf.supybot.databases.plugins.channelSpecific
try:
getChannel(irc, msg, args, state, **kwargs)
channel = channelSpecific.getChannelLink(state.channel)
state.channel = channel
state.args[-1] = channel
except (callbacks.ArgumentError, IndexError):
if channelSpecific():
raise
channel = channelSpecific.link()
if not conf.get(channelSpecific.link.allow, channel):
log.warning('channelSpecific.link is globally set to %s, but '
'%s disallowed linking to its db.', channel, channel)
raise
else:
channel = channelSpecific.getChannelLink(channel)
state.channel = channel
state.args.append(channel)
def inChannel(irc, msg, args, state):
if not state.channel:
getChannel(irc, msg, args, state)
if state.channel not in irc.state.channels:
state.error('I\'m not in %s.' % state.channel, Raise=True)
def onlyInChannel(irc, msg, args, state):
if not (irc.isChannel(msg.args[0]) and msg.args[0] in irc.state.channels):
state.error('This command may only be given in a channel that I am in.',
Raise=True)
else:
state.channel = msg.args[0]
state.args.append(state.channel)
def callerInGivenChannel(irc, msg, args, state):
channel = args[0]
if irc.isChannel(channel):
if channel in irc.state.channels:
if msg.nick in irc.state.channels[channel].users:
state.args.append(args.pop(0))
else:
state.error('You must be in %s.' % channel, Raise=True)
else:
state.error('I\'m not in %s.' % channel, Raise=True)
else:
state.errorInvalid('channel', args[0])
def nickInChannel(irc, msg, args, state):
originalArgs = state.args[:]
inChannel(irc, msg, args, state)
state.args = originalArgs
if args[0] not in irc.state.channels[state.channel].users:
state.error('%s is not in %s.' % (args[0], state.channel), Raise=True)
state.args.append(args.pop(0))
def getChannelOrNone(irc, msg, args, state):
try:
getChannel(irc, msg, args, state)
except callbacks.ArgumentError:
state.args.append(None)
def checkChannelCapability(irc, msg, args, state, cap):
if not state.channel:
getChannel(irc, msg, args, state)
cap = ircdb.canonicalCapability(cap)
cap = ircdb.makeChannelCapability(state.channel, cap)
if not ircdb.checkCapability(msg.prefix, cap):
state.errorNoCapability(cap, Raise=True)
def getOp(irc, msg, args, state):
checkChannelCapability(irc, msg, args, state, 'op')
def getHalfop(irc, msg, args, state):
checkChannelCapability(irc, msg, args, state, 'halfop')
def getVoice(irc, msg, args, state):
checkChannelCapability(irc, msg, args, state, 'voice')
def getLowered(irc, msg, args, state):
state.args.append(ircutils.toLower(args.pop(0)))
def getSomething(irc, msg, args, state, errorMsg=None, p=None):
if p is None:
p = lambda _: True
if not args[0] or not p(args[0]):
if errorMsg is None:
errorMsg = 'You must not give the empty string as an argument.'
state.error(errorMsg, Raise=True)
else:
state.args.append(args.pop(0))
def getSomethingNoSpaces(irc, msg, args, state, *L):
def p(s):
return len(s.split(None, 1)) == 1
getSomething(irc, msg, args, state, p=p, *L)
def private(irc, msg, args, state):
if irc.isChannel(msg.args[0]):
state.errorRequiresPrivacy(Raise=True)
def public(irc, msg, args, state, errmsg=None):
if not irc.isChannel(msg.args[0]):
if errmsg is None:
errmsg = 'This message must be sent in a channel.'
state.error(errmsg, Raise=True)
def checkCapability(irc, msg, args, state, cap):
cap = ircdb.canonicalCapability(cap)
if not ircdb.checkCapability(msg.prefix, cap):
state.errorNoCapability(cap, Raise=True)
def owner(irc, msg, args, state):
checkCapability(irc, msg, args, state, 'owner')
def admin(irc, msg, args, state):
checkCapability(irc, msg, args, state, 'admin')
def anything(irc, msg, args, state):
state.args.append(args.pop(0))
def getGlob(irc, msg, args, state):
glob = args.pop(0)
if '*' not in glob and '?' not in glob:
glob = '*%s*' % glob
state.args.append(glob)
def getUrl(irc, msg, args, state):
if utils.web.urlRe.match(args[0]):
state.args.append(args.pop(0))
else:
state.errorInvalid('url', args[0])
def getEmail(irc, msg, args, state):
if utils.net.emailRe.match(args[0]):
state.args.append(args.pop(0))
else:
state.errorInvalid('email', args[0])
def getHttpUrl(irc, msg, args, state):
if utils.web.httpUrlRe.match(args[0]):
state.args.append(args.pop(0))
elif utils.web.httpUrlRe.match('http://' + args[0]):
state.args.append('http://' + args.pop(0))
else:
state.errorInvalid('http url', args[0])
def getNow(irc, msg, args, state):
state.args.append(int(time.time()))
def getCommandName(irc, msg, args, state):
if ' ' in args[0]:
state.errorInvalid('command name', args[0])
else:
state.args.append(callbacks.canonicalName(args.pop(0)))
def getIp(irc, msg, args, state):
if utils.net.isIP(args[0]):
state.args.append(args.pop(0))
else:
state.errorInvalid('ip', args[0])
def getLetter(irc, msg, args, state):
if len(args[0]) == 1:
state.args.append(args.pop(0))
else:
state.errorInvalid('letter', args[0])
def getMatch(irc, msg, args, state, regexp, errmsg):
m = regexp.search(args[0])
if m is not None:
state.args.append(m)
del args[0]
else:
state.error(errmsg, Raise=True)
def getLiteral(irc, msg, args, state, literals, errmsg=None):
# ??? Should we allow abbreviations?
if isinstance(literals, basestring):
literals = (literals,)
abbrevs = utils.abbrev(literals)
if args[0] in abbrevs:
state.args.append(abbrevs[args.pop(0)])
elif errmsg is not None:
state.error(errmsg, Raise=True)
else:
raise callbacks.ArgumentError
def getTo(irc, msg, args, state):
if args[0].lower() == 'to':
args.pop(0)
def getPlugin(irc, msg, args, state, require=True):
cb = irc.getCallback(args[0])
if cb is not None:
state.args.append(cb)
del args[0]
elif require:
state.errorInvalid('plugin', args[0])
else:
state.args.append(None)
def getIrcColor(irc, msg, args, state):
if args[0] in ircutils.mircColors:
state.args.append(ircutils.mircColors[args.pop(0)])
else:
state.errorInvalid('irc color')
def getText(irc, msg, args, state):
if args:
state.args.append(' '.join(args))
args[:] = []
else:
raise IndexError
wrappers = ircutils.IrcDict({
'id': getId,
'ip': getIp,
'int': getInt,
'index': getIndex,
'color': getIrcColor,
'now': getNow,
'url': getUrl,
'email': getEmail,
'httpUrl': getHttpUrl,
'long': getLong,
'float': getFloat,
'nonInt': getNonInt,
'positiveInt': getPositiveInt,
'nonNegativeInt': getNonNegativeInt,
'letter': getLetter,
'haveOp': getHaveOp,
'expiry': getExpiry,
'literal': getLiteral,
'to': getTo,
'nick': getNick,
'seenNick': getSeenNick,
'channel': getChannel,
'inChannel': inChannel,
'onlyInChannel': onlyInChannel,
'nickInChannel': nickInChannel,
'networkIrc': getNetworkIrc,
'callerInGivenChannel': callerInGivenChannel,
'plugin': getPlugin,
'boolean': getBoolean,
'lowered': getLowered,
'anything': anything,
'something': getSomething,
'filename': getSomething, # XXX Check for validity.
'commandName': getCommandName,
'text': getText,
'glob': getGlob,
'somethingWithoutSpaces': getSomethingNoSpaces,
'capability': getSomethingNoSpaces,
'channelDb': getChannelDb,
'hostmask': getHostmask,
'banmask': getBanmask,
'user': getUser,
'matches': getMatch,
'public': public,
'private': private,
'otherUser': getOtherUser,
'regexpMatcher': getMatcher,
'validChannel': validChannel,
'regexpReplacer': getReplacer,
'owner': owner,
'admin': admin,
'checkCapability': checkCapability,
'checkChannelCapability': checkChannelCapability,
'op': getOp,
'halfop': getHalfop,
'voice': getVoice,
})
def addConverter(name, wrapper):
wrappers[name] = wrapper
class UnknownConverter(KeyError):
pass
def getConverter(name):
try:
return wrappers[name]
except KeyError, e:
raise UnknownConverter, str(e)
def callConverter(name, irc, msg, args, state, *L):
getConverter(name)(irc, msg, args, state, *L)
###
# Contexts. These determine what the nature of conversions is; whether they're
# defaulted, or many of them are allowed, etc. Contexts should be reusable;
# i.e., they should not maintain state between calls.
###
def contextify(spec):
if not isinstance(spec, context):
spec = context(spec)
return spec
def setDefault(state, default):
if callable(default):
state.args.append(default())
else:
state.args.append(default)
class context(object):
def __init__(self, spec):
self.args = ()
self.spec = spec # for repr
if isinstance(spec, tuple):
assert spec, 'tuple spec must not be empty.'
self.args = spec[1:]
self.converter = getConverter(spec[0])
elif spec is None:
self.converter = getConverter('anything')
elif isinstance(spec, basestring):
self.args = ()
self.converter = getConverter(spec)
else:
assert isinstance(spec, context)
self.converter = spec
def __call__(self, irc, msg, args, state):
log.debug('args before %r: %r', self, args)
self.converter(irc, msg, args, state, *self.args)
log.debug('args after %r: %r', self, args)
def __repr__(self):
return '<%s for %s>' % (self.__class__.__name__, self.spec)
class rest(context):
def __call__(self, irc, msg, args, state):
if args:
original = args[:]
args[:] = [' '.join(args)]
try:
super(rest, self).__call__(irc, msg, args, state)
except Exception, e:
args[:] = original
else:
raise IndexError
# additional means: Look for this (and make sure it's of this type). If
# there are no arguments for us to check, then use our default.
class additional(context):
def __init__(self, spec, default=None):
self.__parent = super(additional, self)
self.__parent.__init__(spec)
self.default = default
def __call__(self, irc, msg, args, state):
try:
self.__parent.__call__(irc, msg, args, state)
except IndexError:
log.debug('Got IndexError, returning default.')
setDefault(state, self.default)
# optional means: Look for this, but if it's not the type I'm expecting or
# there are no arguments for us to check, then use the default value.
class optional(additional):
def __call__(self, irc, msg, args, state):
try:
super(optional, self).__call__(irc, msg, args, state)
except (callbacks.ArgumentError, callbacks.Error), e:
log.debug('Got %s, returning default.', utils.exnToString(e))
state.errored = False
setDefault(state, self.default)
class any(context):
def __init__(self, spec, continueOnError=False):
self.__parent = super(any, self)
self.__parent.__init__(spec)
self.continueOnError = continueOnError
def __call__(self, irc, msg, args, state):
st = state.essence()
try:
while args:
self.__parent.__call__(irc, msg, args, st)
except IndexError:
pass
except (callbacks.ArgumentError, callbacks.Error), e:
if not self.continueOnError:
raise
else:
log.debug('Got %s, returning default.', utils.exnToString(e))
pass
state.args.append(st.args)
class many(any):
def __call__(self, irc, msg, args, state):
super(many, self).__call__(irc, msg, args, state)
if not state.args[-1]:
state.args.pop()
raise callbacks.ArgumentError
class first(context):
def __init__(self, *specs, **kw):
if 'default' in kw:
self.default = kw.pop('default')
assert not kw, 'Bad kwargs for first.__init__'
self.spec = specs # for __repr__
self.specs = map(contextify, specs)
def __call__(self, irc, msg, args, state):
errored = False
for spec in self.specs:
try:
spec(irc, msg, args, state)
return
except Exception, e:
errored = state.errored
state.errored = False
continue
if hasattr(self, 'default'):
state.args.append(self.default)
else:
state.errored = errored
raise e
class reverse(context):
def __call__(self, irc, msg, args, state):
args[:] = args[::-1]
super(reverse, self).__call__(irc, msg, args, state)
args[:] = args[::-1]
class commalist(context):
def __call__(self, irc, msg, args, state):
original = args[:]
st = state.essence()
trailingComma = True
try:
while trailingComma:
arg = args.pop(0)
if not arg.endswith(','):
trailingComma = False
for part in arg.split(','):
if part: # trailing commas
super(commalist, self).__call__(irc, msg, [part], st)
state.args.append(st.args)
except Exception, e:
args[:] = original
raise
class getopts(context):
"""The empty string indicates that no argument is taken; None indicates
that there is no converter for the argument."""
def __init__(self, getopts):
self.spec = getopts # for repr
self.getopts = {}
self.getoptL = []
for (name, spec) in getopts.iteritems():
if spec == '':
self.getoptL.append(name)
self.getopts[name] = None
else:
self.getoptL.append(name + '=')
self.getopts[name] = contextify(spec)
log.debug('getopts: %r', self.getopts)
log.debug('getoptL: %r', self.getoptL)
def __call__(self, irc, msg, args, state):
log.debug('args before %r: %r', self, args)
(optlist, rest) = getopt.getopt(args, '', self.getoptL)
getopts = []
for (opt, arg) in optlist:
opt = opt[2:] # Strip --
log.debug('opt: %r, arg: %r', opt, arg)
context = self.getopts[opt]
if context is not None:
st = state.essence()
context(irc, msg, [arg], st)
assert len(st.args) == 1
getopts.append((opt, st.args[0]))
else:
getopts.append((opt, True))
state.args.append(getopts)
args[:] = rest
log.debug('args after %r: %r', self, args)
###
# This is our state object, passed to converters along with irc, msg, and args.
###
class State(object):
log = log
def __init__(self, types):
self.args = []
self.kwargs = {}
self.types = types
self.channel = None
self.errored = False
def __getattr__(self, attr):
if attr.startswith('error'):
self.errored = True
return getattr(dynamic.irc, attr)
else:
raise AttributeError, attr
def essence(self):
st = State(self.types)
for (attr, value) in self.__dict__.iteritems():
if attr not in ('args', 'kwargs'):
setattr(st, attr, value)
return st
def __repr__(self):
return '%s(args=%r, kwargs=%r, channel=%r)' % (self.__class__.__name__,
self.args, self.kwargs,
self.channel)
###
# This is a compiled Spec object.
###
class Spec(object):
def _state(self, types, attrs={}):
st = State(types)
st.__dict__.update(attrs)
st.allowExtra = self.allowExtra
return st
def __init__(self, types, allowExtra=False):
self.types = types
self.allowExtra = allowExtra
utils.seq.mapinto(contextify, self.types)
def __call__(self, irc, msg, args, stateAttrs={}):
state = self._state(self.types[:], stateAttrs)
while state.types:
context = state.types.pop(0)
try:
context(irc, msg, args, state)
except IndexError:
raise callbacks.ArgumentError
if args and not state.allowExtra:
log.debug('args and not self.allowExtra: %r', args)
raise callbacks.ArgumentError
return state
def wrap(f, specList=[], name=None, **kw):
name = name or f.func_name
spec = Spec(specList, **kw)
def newf(self, irc, msg, args, **kwargs):
state = spec(irc, msg, args, stateAttrs={'cb': self, 'log': self.log})
self.log.debug('State before call: %s', state)
if state.errored:
self.log.debug('Refusing to call %s due to state.errored.', f)
else:
try:
f(self, irc, msg, args, *state.args, **state.kwargs)
except TypeError:
self.log.error('Spec: %s', specList)
self.log.error('Received args: %s', args)
code = f.func_code
funcArgs = inspect.getargs(code)[0][len(self.commandArgs):]
self.log.error('Extra args: %s', funcArgs)
raise
return utils.python.changeFunctionName(newf, name, f.__doc__)
__all__ = [
# Contexts.
'any', 'many',
'optional', 'additional',
'rest', 'getopts',
'first', 'reverse',
'commalist',
# Converter helpers.
'getConverter', 'addConverter', 'callConverter',
# Decorators.
'urlSnarfer', 'thread',
# Functions.
'wrap',
# Stuff for testing.
'Spec',
]
# This doesn't work. Suck.
## if world.testing:
## __all__.append('Spec')
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| kblin/supybot-gsoc | src/commands.py | Python | bsd-3-clause | 30,909 |
# coding: utf-8
from __future__ import unicode_literals
import asyncore
import email
import os
import shutil
import smtpd
import sys
import tempfile
import threading
from django.core import mail
from django.core.mail import (EmailMessage, mail_admins, mail_managers,
EmailMultiAlternatives, send_mail, send_mass_mail)
from django.core.mail.backends import console, dummy, locmem, filebased, smtp
from django.core.mail.message import BadHeaderError
from django.test import TestCase
from django.test.utils import override_settings
from django.utils.encoding import force_str, force_text
from django.utils.six import PY3, StringIO
from django.utils.translation import ugettext_lazy
class MailTests(TestCase):
"""
Non-backend specific tests.
"""
def test_ascii(self):
email = EmailMessage('Subject', 'Content', 'from@example.com', ['to@example.com'])
message = email.message()
self.assertEqual(message['Subject'], 'Subject')
self.assertEqual(message.get_payload(), 'Content')
self.assertEqual(message['From'], 'from@example.com')
self.assertEqual(message['To'], 'to@example.com')
def test_multiple_recipients(self):
email = EmailMessage('Subject', 'Content', 'from@example.com', ['to@example.com', 'other@example.com'])
message = email.message()
self.assertEqual(message['Subject'], 'Subject')
self.assertEqual(message.get_payload(), 'Content')
self.assertEqual(message['From'], 'from@example.com')
self.assertEqual(message['To'], 'to@example.com, other@example.com')
def test_cc(self):
"""Regression test for #7722"""
email = EmailMessage('Subject', 'Content', 'from@example.com', ['to@example.com'], cc=['cc@example.com'])
message = email.message()
self.assertEqual(message['Cc'], 'cc@example.com')
self.assertEqual(email.recipients(), ['to@example.com', 'cc@example.com'])
# Test multiple CC with multiple To
email = EmailMessage('Subject', 'Content', 'from@example.com', ['to@example.com', 'other@example.com'], cc=['cc@example.com', 'cc.other@example.com'])
message = email.message()
self.assertEqual(message['Cc'], 'cc@example.com, cc.other@example.com')
self.assertEqual(email.recipients(), ['to@example.com', 'other@example.com', 'cc@example.com', 'cc.other@example.com'])
# Testing with Bcc
email = EmailMessage('Subject', 'Content', 'from@example.com', ['to@example.com', 'other@example.com'], cc=['cc@example.com', 'cc.other@example.com'], bcc=['bcc@example.com'])
message = email.message()
self.assertEqual(message['Cc'], 'cc@example.com, cc.other@example.com')
self.assertEqual(email.recipients(), ['to@example.com', 'other@example.com', 'cc@example.com', 'cc.other@example.com', 'bcc@example.com'])
def test_recipients_as_tuple(self):
email = EmailMessage('Subject', 'Content', 'from@example.com', ('to@example.com', 'other@example.com'), cc=('cc@example.com', 'cc.other@example.com'), bcc=('bcc@example.com',))
message = email.message()
self.assertEqual(message['Cc'], 'cc@example.com, cc.other@example.com')
self.assertEqual(email.recipients(), ['to@example.com', 'other@example.com', 'cc@example.com', 'cc.other@example.com', 'bcc@example.com'])
def test_header_injection(self):
email = EmailMessage('Subject\nInjection Test', 'Content', 'from@example.com', ['to@example.com'])
self.assertRaises(BadHeaderError, email.message)
email = EmailMessage(ugettext_lazy('Subject\nInjection Test'), 'Content', 'from@example.com', ['to@example.com'])
self.assertRaises(BadHeaderError, email.message)
def test_space_continuation(self):
"""
Test for space continuation character in long (ascii) subject headers (#7747)
"""
email = EmailMessage('Long subject lines that get wrapped should contain a space continuation character to get expected behavior in Outlook and Thunderbird', 'Content', 'from@example.com', ['to@example.com'])
message = email.message()
# Note that in Python 3, maximum line length has increased from 76 to 78
self.assertEqual(message['Subject'].encode(), b'Long subject lines that get wrapped should contain a space continuation\n character to get expected behavior in Outlook and Thunderbird')
def test_message_header_overrides(self):
"""
Specifying dates or message-ids in the extra headers overrides the
default values (#9233)
"""
headers = {"date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"}
email = EmailMessage('subject', 'content', 'from@example.com', ['to@example.com'], headers=headers)
self.assertEqual(sorted(email.message().items()), [
('Content-Transfer-Encoding', '7bit'),
('Content-Type', 'text/plain; charset="utf-8"'),
('From', 'from@example.com'),
('MIME-Version', '1.0'),
('Message-ID', 'foo'),
('Subject', 'subject'),
('To', 'to@example.com'),
('date', 'Fri, 09 Nov 2001 01:08:47 -0000'),
])
def test_from_header(self):
"""
Make sure we can manually set the From header (#9214)
"""
email = EmailMessage('Subject', 'Content', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
message = email.message()
self.assertEqual(message['From'], 'from@example.com')
def test_to_header(self):
"""
Make sure we can manually set the To header (#17444)
"""
email = EmailMessage('Subject', 'Content', 'bounce@example.com',
['list-subscriber@example.com', 'list-subscriber2@example.com'],
headers={'To': 'mailing-list@example.com'})
message = email.message()
self.assertEqual(message['To'], 'mailing-list@example.com')
self.assertEqual(email.to, ['list-subscriber@example.com', 'list-subscriber2@example.com'])
# If we don't set the To header manually, it should default to the `to` argument to the constructor
email = EmailMessage('Subject', 'Content', 'bounce@example.com',
['list-subscriber@example.com', 'list-subscriber2@example.com'])
message = email.message()
self.assertEqual(message['To'], 'list-subscriber@example.com, list-subscriber2@example.com')
self.assertEqual(email.to, ['list-subscriber@example.com', 'list-subscriber2@example.com'])
def test_multiple_message_call(self):
"""
Regression for #13259 - Make sure that headers are not changed when
calling EmailMessage.message()
"""
email = EmailMessage('Subject', 'Content', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
message = email.message()
self.assertEqual(message['From'], 'from@example.com')
message = email.message()
self.assertEqual(message['From'], 'from@example.com')
def test_unicode_address_header(self):
"""
Regression for #11144 - When a to/from/cc header contains unicode,
make sure the email addresses are parsed correctly (especially with
regards to commas)
"""
email = EmailMessage('Subject', 'Content', 'from@example.com', ['"Firstname Sürname" <to@example.com>', 'other@example.com'])
self.assertEqual(email.message()['To'], '=?utf-8?q?Firstname_S=C3=BCrname?= <to@example.com>, other@example.com')
email = EmailMessage('Subject', 'Content', 'from@example.com', ['"Sürname, Firstname" <to@example.com>', 'other@example.com'])
self.assertEqual(email.message()['To'], '=?utf-8?q?S=C3=BCrname=2C_Firstname?= <to@example.com>, other@example.com')
def test_unicode_headers(self):
email = EmailMessage("Gżegżółka", "Content", "from@example.com", ["to@example.com"],
headers={"Sender": '"Firstname Sürname" <sender@example.com>',
"Comments": 'My Sürname is non-ASCII'})
message = email.message()
self.assertEqual(message['Subject'], '=?utf-8?b?R8W8ZWfFvMOzxYJrYQ==?=')
self.assertEqual(message['Sender'], '=?utf-8?q?Firstname_S=C3=BCrname?= <sender@example.com>')
self.assertEqual(message['Comments'], '=?utf-8?q?My_S=C3=BCrname_is_non-ASCII?=')
def test_safe_mime_multipart(self):
"""
Make sure headers can be set with a different encoding than utf-8 in
SafeMIMEMultipart as well
"""
headers = {"Date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"}
subject, from_email, to = 'hello', 'from@example.com', '"Sürname, Firstname" <to@example.com>'
text_content = 'This is an important message.'
html_content = '<p>This is an <strong>important</strong> message.</p>'
msg = EmailMultiAlternatives('Message from Firstname Sürname', text_content, from_email, [to], headers=headers)
msg.attach_alternative(html_content, "text/html")
msg.encoding = 'iso-8859-1'
self.assertEqual(msg.message()['To'], '=?iso-8859-1?q?S=FCrname=2C_Firstname?= <to@example.com>')
self.assertEqual(msg.message()['Subject'], '=?iso-8859-1?q?Message_from_Firstname_S=FCrname?=')
def test_encoding(self):
"""
Regression for #12791 - Encode body correctly with other encodings
than utf-8
"""
email = EmailMessage('Subject', 'Firstname Sürname is a great guy.', 'from@example.com', ['other@example.com'])
email.encoding = 'iso-8859-1'
message = email.message()
self.assertTrue(message.as_string().startswith('Content-Type: text/plain; charset="iso-8859-1"\nMIME-Version: 1.0\nContent-Transfer-Encoding: quoted-printable\nSubject: Subject\nFrom: from@example.com\nTo: other@example.com'))
self.assertEqual(message.get_payload(), 'Firstname S=FCrname is a great guy.')
# Make sure MIME attachments also works correctly with other encodings than utf-8
text_content = 'Firstname Sürname is a great guy.'
html_content = '<p>Firstname Sürname is a <strong>great</strong> guy.</p>'
msg = EmailMultiAlternatives('Subject', text_content, 'from@example.com', ['to@example.com'])
msg.encoding = 'iso-8859-1'
msg.attach_alternative(html_content, "text/html")
self.assertEqual(msg.message().get_payload(0).as_string(), 'Content-Type: text/plain; charset="iso-8859-1"\nMIME-Version: 1.0\nContent-Transfer-Encoding: quoted-printable\n\nFirstname S=FCrname is a great guy.')
self.assertEqual(msg.message().get_payload(1).as_string(), 'Content-Type: text/html; charset="iso-8859-1"\nMIME-Version: 1.0\nContent-Transfer-Encoding: quoted-printable\n\n<p>Firstname S=FCrname is a <strong>great</strong> guy.</p>')
def test_attachments(self):
"""Regression test for #9367"""
headers = {"Date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"}
subject, from_email, to = 'hello', 'from@example.com', 'to@example.com'
text_content = 'This is an important message.'
html_content = '<p>This is an <strong>important</strong> message.</p>'
msg = EmailMultiAlternatives(subject, text_content, from_email, [to], headers=headers)
msg.attach_alternative(html_content, "text/html")
msg.attach("an attachment.pdf", b"%PDF-1.4.%...", mimetype="application/pdf")
msg_str = msg.message().as_string()
message = email.message_from_string(msg_str)
self.assertTrue(message.is_multipart())
self.assertEqual(message.get_content_type(), 'multipart/mixed')
self.assertEqual(message.get_default_type(), 'text/plain')
payload = message.get_payload()
self.assertEqual(payload[0].get_content_type(), 'multipart/alternative')
self.assertEqual(payload[1].get_content_type(), 'application/pdf')
def test_non_ascii_attachment_filename(self):
"""Regression test for #14964"""
headers = {"Date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"}
subject, from_email, to = 'hello', 'from@example.com', 'to@example.com'
content = 'This is the message.'
msg = EmailMessage(subject, content, from_email, [to], headers=headers)
# Unicode in file name
msg.attach("une pièce jointe.pdf", b"%PDF-1.4.%...", mimetype="application/pdf")
msg_str = msg.message().as_string()
message = email.message_from_string(msg_str)
payload = message.get_payload()
self.assertEqual(payload[1].get_filename(), 'une pièce jointe.pdf')
def test_dummy_backend(self):
"""
Make sure that dummy backends returns correct number of sent messages
"""
connection = dummy.EmailBackend()
email = EmailMessage('Subject', 'Content', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
self.assertEqual(connection.send_messages([email, email, email]), 3)
def test_arbitrary_keyword(self):
"""
Make sure that get_connection() accepts arbitrary keyword that might be
used with custom backends.
"""
c = mail.get_connection(fail_silently=True, foo='bar')
self.assertTrue(c.fail_silently)
def test_custom_backend(self):
"""Test custom backend defined in this suite."""
conn = mail.get_connection('mail.custombackend.EmailBackend')
self.assertTrue(hasattr(conn, 'test_outbox'))
email = EmailMessage('Subject', 'Content', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
conn.send_messages([email])
self.assertEqual(len(conn.test_outbox), 1)
def test_backend_arg(self):
"""Test backend argument of mail.get_connection()"""
self.assertIsInstance(mail.get_connection('django.core.mail.backends.smtp.EmailBackend'), smtp.EmailBackend)
self.assertIsInstance(mail.get_connection('django.core.mail.backends.locmem.EmailBackend'), locmem.EmailBackend)
self.assertIsInstance(mail.get_connection('django.core.mail.backends.dummy.EmailBackend'), dummy.EmailBackend)
self.assertIsInstance(mail.get_connection('django.core.mail.backends.console.EmailBackend'), console.EmailBackend)
tmp_dir = tempfile.mkdtemp()
try:
self.assertIsInstance(mail.get_connection('django.core.mail.backends.filebased.EmailBackend', file_path=tmp_dir), filebased.EmailBackend)
finally:
shutil.rmtree(tmp_dir)
self.assertIsInstance(mail.get_connection(), locmem.EmailBackend)
@override_settings(
EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend',
ADMINS=[('nobody', 'nobody@example.com')],
MANAGERS=[('nobody', 'nobody@example.com')])
def test_connection_arg(self):
"""Test connection argument to send_mail(), et. al."""
mail.outbox = []
# Send using non-default connection
connection = mail.get_connection('mail.custombackend.EmailBackend')
send_mail('Subject', 'Content', 'from@example.com', ['to@example.com'], connection=connection)
self.assertEqual(mail.outbox, [])
self.assertEqual(len(connection.test_outbox), 1)
self.assertEqual(connection.test_outbox[0].subject, 'Subject')
connection = mail.get_connection('mail.custombackend.EmailBackend')
send_mass_mail([
('Subject1', 'Content1', 'from1@example.com', ['to1@example.com']),
('Subject2', 'Content2', 'from2@example.com', ['to2@example.com']),
], connection=connection)
self.assertEqual(mail.outbox, [])
self.assertEqual(len(connection.test_outbox), 2)
self.assertEqual(connection.test_outbox[0].subject, 'Subject1')
self.assertEqual(connection.test_outbox[1].subject, 'Subject2')
connection = mail.get_connection('mail.custombackend.EmailBackend')
mail_admins('Admin message', 'Content', connection=connection)
self.assertEqual(mail.outbox, [])
self.assertEqual(len(connection.test_outbox), 1)
self.assertEqual(connection.test_outbox[0].subject, '[Django] Admin message')
connection = mail.get_connection('mail.custombackend.EmailBackend')
mail_managers('Manager message', 'Content', connection=connection)
self.assertEqual(mail.outbox, [])
self.assertEqual(len(connection.test_outbox), 1)
self.assertEqual(connection.test_outbox[0].subject, '[Django] Manager message')
def test_dont_mangle_from_in_body(self):
# Regression for #13433 - Make sure that EmailMessage doesn't mangle
# 'From ' in message body.
email = EmailMessage('Subject', 'From the future', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
self.assertFalse('>From the future' in email.message().as_string())
def test_dont_base64_encode(self):
# Ticket #3472
# Shouldn't use Base64 encoding at all
msg = EmailMessage('Subject', 'UTF-8 encoded body', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
self.assertFalse('Content-Transfer-Encoding: base64' in msg.message().as_string())
# Ticket #11212
# Shouldn't use quoted printable, should detect it can represent content with 7 bit data
msg = EmailMessage('Subject', 'Body with only ASCII characters.', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
s = msg.message().as_string()
self.assertFalse('Content-Transfer-Encoding: quoted-printable' in s)
self.assertTrue('Content-Transfer-Encoding: 7bit' in s)
# Shouldn't use quoted printable, should detect it can represent content with 8 bit data
msg = EmailMessage('Subject', 'Body with latin characters: àáä.', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
s = msg.message().as_string()
self.assertFalse(str('Content-Transfer-Encoding: quoted-printable') in s)
self.assertTrue(str('Content-Transfer-Encoding: 8bit') in s)
msg = EmailMessage('Subject', 'Body with non latin characters: А Б В Г Д Е Ж Ѕ З И І К Л М Н О П.', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
s = msg.message().as_string()
self.assertFalse(str('Content-Transfer-Encoding: quoted-printable') in s)
self.assertTrue(str('Content-Transfer-Encoding: 8bit') in s)
class BaseEmailBackendTests(object):
email_backend = None
def setUp(self):
self.settings_override = override_settings(EMAIL_BACKEND=self.email_backend)
self.settings_override.enable()
def tearDown(self):
self.settings_override.disable()
def assertStartsWith(self, first, second):
if not first.startswith(second):
self.longMessage = True
self.assertEqual(first[:len(second)], second, "First string doesn't start with the second.")
def get_mailbox_content(self):
raise NotImplementedError
def flush_mailbox(self):
raise NotImplementedError
def get_the_message(self):
mailbox = self.get_mailbox_content()
self.assertEqual(len(mailbox), 1,
"Expected exactly one message, got %d.\n%r" % (len(mailbox), [
m.as_string() for m in mailbox]))
return mailbox[0]
def test_send(self):
email = EmailMessage('Subject', 'Content', 'from@example.com', ['to@example.com'])
num_sent = mail.get_connection().send_messages([email])
self.assertEqual(num_sent, 1)
message = self.get_the_message()
self.assertEqual(message["subject"], "Subject")
self.assertEqual(message.get_payload(), "Content")
self.assertEqual(message["from"], "from@example.com")
self.assertEqual(message.get_all("to"), ["to@example.com"])
def test_send_unicode(self):
email = EmailMessage('Chère maman', 'Je t\'aime très fort', 'from@example.com', ['to@example.com'])
num_sent = mail.get_connection().send_messages([email])
self.assertEqual(num_sent, 1)
message = self.get_the_message()
self.assertEqual(message["subject"], '=?utf-8?q?Ch=C3=A8re_maman?=')
self.assertEqual(force_text(message.get_payload()), 'Je t\'aime très fort')
def test_send_many(self):
email1 = EmailMessage('Subject', 'Content1', 'from@example.com', ['to@example.com'])
email2 = EmailMessage('Subject', 'Content2', 'from@example.com', ['to@example.com'])
num_sent = mail.get_connection().send_messages([email1, email2])
self.assertEqual(num_sent, 2)
messages = self.get_mailbox_content()
self.assertEqual(len(messages), 2)
self.assertEqual(messages[0].get_payload(), "Content1")
self.assertEqual(messages[1].get_payload(), "Content2")
def test_send_verbose_name(self):
email = EmailMessage("Subject", "Content", '"Firstname Sürname" <from@example.com>',
["to@example.com"])
email.send()
message = self.get_the_message()
self.assertEqual(message["subject"], "Subject")
self.assertEqual(message.get_payload(), "Content")
self.assertEqual(message["from"], "=?utf-8?q?Firstname_S=C3=BCrname?= <from@example.com>")
@override_settings(MANAGERS=[('nobody', 'nobody@example.com')])
def test_html_mail_managers(self):
"""Test html_message argument to mail_managers"""
mail_managers('Subject', 'Content', html_message='HTML Content')
message = self.get_the_message()
self.assertEqual(message.get('subject'), '[Django] Subject')
self.assertEqual(message.get_all('to'), ['nobody@example.com'])
self.assertTrue(message.is_multipart())
self.assertEqual(len(message.get_payload()), 2)
self.assertEqual(message.get_payload(0).get_payload(), 'Content')
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_payload(), 'HTML Content')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
@override_settings(ADMINS=[('nobody', 'nobody@example.com')])
def test_html_mail_admins(self):
"""Test html_message argument to mail_admins """
mail_admins('Subject', 'Content', html_message='HTML Content')
message = self.get_the_message()
self.assertEqual(message.get('subject'), '[Django] Subject')
self.assertEqual(message.get_all('to'), ['nobody@example.com'])
self.assertTrue(message.is_multipart())
self.assertEqual(len(message.get_payload()), 2)
self.assertEqual(message.get_payload(0).get_payload(), 'Content')
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_payload(), 'HTML Content')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
@override_settings(
ADMINS=[('nobody', 'nobody+admin@example.com')],
MANAGERS=[('nobody', 'nobody+manager@example.com')])
def test_manager_and_admin_mail_prefix(self):
"""
String prefix + lazy translated subject = bad output
Regression for #13494
"""
mail_managers(ugettext_lazy('Subject'), 'Content')
message = self.get_the_message()
self.assertEqual(message.get('subject'), '[Django] Subject')
self.flush_mailbox()
mail_admins(ugettext_lazy('Subject'), 'Content')
message = self.get_the_message()
self.assertEqual(message.get('subject'), '[Django] Subject')
@override_settings(ADMINS=(), MANAGERS=())
def test_empty_admins(self):
"""
Test that mail_admins/mail_managers doesn't connect to the mail server
if there are no recipients (#9383)
"""
mail_admins('hi', 'there')
self.assertEqual(self.get_mailbox_content(), [])
mail_managers('hi', 'there')
self.assertEqual(self.get_mailbox_content(), [])
def test_message_cc_header(self):
"""
Regression test for #7722
"""
email = EmailMessage('Subject', 'Content', 'from@example.com', ['to@example.com'], cc=['cc@example.com'])
mail.get_connection().send_messages([email])
message = self.get_the_message()
self.assertStartsWith(message.as_string(), 'Content-Type: text/plain; charset="utf-8"\nMIME-Version: 1.0\nContent-Transfer-Encoding: 7bit\nSubject: Subject\nFrom: from@example.com\nTo: to@example.com\nCc: cc@example.com\nDate: ')
def test_idn_send(self):
"""
Regression test for #14301
"""
self.assertTrue(send_mail('Subject', 'Content', 'from@öäü.com', ['to@öäü.com']))
message = self.get_the_message()
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get('from'), 'from@xn--4ca9at.com')
self.assertEqual(message.get('to'), 'to@xn--4ca9at.com')
self.flush_mailbox()
m = EmailMessage('Subject', 'Content', 'from@öäü.com',
['to@öäü.com'], cc=['cc@öäü.com'])
m.send()
message = self.get_the_message()
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get('from'), 'from@xn--4ca9at.com')
self.assertEqual(message.get('to'), 'to@xn--4ca9at.com')
self.assertEqual(message.get('cc'), 'cc@xn--4ca9at.com')
def test_recipient_without_domain(self):
"""
Regression test for #15042
"""
self.assertTrue(send_mail("Subject", "Content", "tester", ["django"]))
message = self.get_the_message()
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get('from'), "tester")
self.assertEqual(message.get('to'), "django")
def test_close_connection(self):
"""
Test that connection can be closed (even when not explicitely opened)
"""
conn = mail.get_connection(username='', password='')
try:
conn.close()
except Exception as e:
self.fail("close() unexpectedly raised an exception: %s" % e)
class LocmemBackendTests(BaseEmailBackendTests, TestCase):
email_backend = 'django.core.mail.backends.locmem.EmailBackend'
def get_mailbox_content(self):
return [m.message() for m in mail.outbox]
def flush_mailbox(self):
mail.outbox = []
def tearDown(self):
super(LocmemBackendTests, self).tearDown()
mail.outbox = []
def test_locmem_shared_messages(self):
"""
Make sure that the locmen backend populates the outbox.
"""
connection = locmem.EmailBackend()
connection2 = locmem.EmailBackend()
email = EmailMessage('Subject', 'Content', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
connection.send_messages([email])
connection2.send_messages([email])
self.assertEqual(len(mail.outbox), 2)
def test_validate_multiline_headers(self):
# Ticket #18861 - Validate emails when using the locmem backend
with self.assertRaises(BadHeaderError):
send_mail('Subject\nMultiline', 'Content', 'from@example.com', ['to@example.com'])
class FileBackendTests(BaseEmailBackendTests, TestCase):
email_backend = 'django.core.mail.backends.filebased.EmailBackend'
def setUp(self):
super(FileBackendTests, self).setUp()
self.tmp_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.tmp_dir)
self._settings_override = override_settings(EMAIL_FILE_PATH=self.tmp_dir)
self._settings_override.enable()
def tearDown(self):
self._settings_override.disable()
super(FileBackendTests, self).tearDown()
def flush_mailbox(self):
for filename in os.listdir(self.tmp_dir):
os.unlink(os.path.join(self.tmp_dir, filename))
def get_mailbox_content(self):
messages = []
for filename in os.listdir(self.tmp_dir):
with open(os.path.join(self.tmp_dir, filename), 'r') as fp:
session = force_text(fp.read()).split('\n' + ('-' * 79) + '\n')
messages.extend(email.message_from_string(force_str(m)) for m in session if m)
return messages
def test_file_sessions(self):
"""Make sure opening a connection creates a new file"""
msg = EmailMessage('Subject', 'Content', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
connection = mail.get_connection()
connection.send_messages([msg])
self.assertEqual(len(os.listdir(self.tmp_dir)), 1)
with open(os.path.join(self.tmp_dir, os.listdir(self.tmp_dir)[0])) as fp:
message = email.message_from_file(fp)
self.assertEqual(message.get_content_type(), 'text/plain')
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get('from'), 'from@example.com')
self.assertEqual(message.get('to'), 'to@example.com')
connection2 = mail.get_connection()
connection2.send_messages([msg])
self.assertEqual(len(os.listdir(self.tmp_dir)), 2)
connection.send_messages([msg])
self.assertEqual(len(os.listdir(self.tmp_dir)), 2)
msg.connection = mail.get_connection()
self.assertTrue(connection.open())
msg.send()
self.assertEqual(len(os.listdir(self.tmp_dir)), 3)
msg.send()
self.assertEqual(len(os.listdir(self.tmp_dir)), 3)
connection.close()
class ConsoleBackendTests(BaseEmailBackendTests, TestCase):
email_backend = 'django.core.mail.backends.console.EmailBackend'
def setUp(self):
super(ConsoleBackendTests, self).setUp()
self.__stdout = sys.stdout
self.stream = sys.stdout = StringIO()
def tearDown(self):
del self.stream
sys.stdout = self.__stdout
del self.__stdout
super(ConsoleBackendTests, self).tearDown()
def flush_mailbox(self):
self.stream = sys.stdout = StringIO()
def get_mailbox_content(self):
messages = force_text(self.stream.getvalue()).split('\n' + ('-' * 79) + '\n')
return [email.message_from_string(force_str(m)) for m in messages if m]
def test_console_stream_kwarg(self):
"""
Test that the console backend can be pointed at an arbitrary stream.
"""
s = StringIO()
connection = mail.get_connection('django.core.mail.backends.console.EmailBackend', stream=s)
send_mail('Subject', 'Content', 'from@example.com', ['to@example.com'], connection=connection)
self.assertTrue(s.getvalue().startswith('Content-Type: text/plain; charset="utf-8"\nMIME-Version: 1.0\nContent-Transfer-Encoding: 7bit\nSubject: Subject\nFrom: from@example.com\nTo: to@example.com\nDate: '))
class FakeSMTPServer(smtpd.SMTPServer, threading.Thread):
"""
Asyncore SMTP server wrapped into a thread. Based on DummyFTPServer from:
http://svn.python.org/view/python/branches/py3k/Lib/test/test_ftplib.py?revision=86061&view=markup
"""
def __init__(self, *args, **kwargs):
threading.Thread.__init__(self)
smtpd.SMTPServer.__init__(self, *args, **kwargs)
self._sink = []
self.active = False
self.active_lock = threading.Lock()
self.sink_lock = threading.Lock()
def process_message(self, peer, mailfrom, rcpttos, data):
m = email.message_from_string(data)
if PY3:
maddr = email.utils.parseaddr(m.get('from'))[1]
else:
maddr = email.Utils.parseaddr(m.get('from'))[1]
if mailfrom != maddr:
return "553 '%s' != '%s'" % (mailfrom, maddr)
with self.sink_lock:
self._sink.append(m)
def get_sink(self):
with self.sink_lock:
return self._sink[:]
def flush_sink(self):
with self.sink_lock:
self._sink[:] = []
def start(self):
assert not self.active
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def run(self):
self.active = True
self.__flag.set()
while self.active and asyncore.socket_map:
with self.active_lock:
asyncore.loop(timeout=0.1, count=1)
asyncore.close_all()
def stop(self):
if self.active:
self.active = False
self.join()
class SMTPBackendTests(BaseEmailBackendTests, TestCase):
email_backend = 'django.core.mail.backends.smtp.EmailBackend'
@classmethod
def setUpClass(cls):
cls.server = FakeSMTPServer(('127.0.0.1', 0), None)
cls._settings_override = override_settings(
EMAIL_HOST="127.0.0.1",
EMAIL_PORT=cls.server.socket.getsockname()[1])
cls._settings_override.enable()
cls.server.start()
@classmethod
def tearDownClass(cls):
cls._settings_override.disable()
cls.server.stop()
def setUp(self):
super(SMTPBackendTests, self).setUp()
self.server.flush_sink()
def tearDown(self):
self.server.flush_sink()
super(SMTPBackendTests, self).tearDown()
def flush_mailbox(self):
self.server.flush_sink()
def get_mailbox_content(self):
return self.server.get_sink()
@override_settings(EMAIL_HOST_USER="not empty username",
EMAIL_HOST_PASSWORD="not empty password")
def test_email_authentication_use_settings(self):
backend = smtp.EmailBackend()
self.assertEqual(backend.username, 'not empty username')
self.assertEqual(backend.password, 'not empty password')
@override_settings(EMAIL_HOST_USER="not empty username",
EMAIL_HOST_PASSWORD="not empty password")
def test_email_authentication_override_settings(self):
backend = smtp.EmailBackend(username='username', password='password')
self.assertEqual(backend.username, 'username')
self.assertEqual(backend.password, 'password')
@override_settings(EMAIL_HOST_USER="not empty username",
EMAIL_HOST_PASSWORD="not empty password")
def test_email_disabled_authentication(self):
backend = smtp.EmailBackend(username='', password='')
self.assertEqual(backend.username, '')
self.assertEqual(backend.password, '')
def test_server_stopped(self):
"""
Test that closing the backend while the SMTP server is stopped doesn't
raise an exception.
"""
backend = smtp.EmailBackend(username='', password='')
backend.open()
self.server.stop()
try:
backend.close()
except Exception as e:
self.fail("close() unexpectedly raised an exception: %s" % e)
| makinacorpus/django | tests/mail/tests.py | Python | bsd-3-clause | 35,289 |
"""
showspec is my homegrown spectrum plotter, meant to somewhat follow STARLINK's
SPLAT and have functionality similar to GAIA, but with an emphasis on producing
publication-quality plots (which, while splat may do, it does unreproducibly)
.. todo::
-add spectrum arithmetic tools
(as is, you can use numpy.interp with sp.vind and sp.spectrum pretty
easily)
-implement other fitters
-e.g., NH3 hyperfine, Voigt
-move to object-oriented pylab/pyplot implementation (for bulk / speedup work)
-allow for non-plotting fitting work (curious... I've never needed that yet)
-Equivalent Width measurement without gaussian fitting
-probably should be part of the baseline code
-write documentation other people can read
12/21/2011 - ALL of the above to do is IS DONE! It's now hosted at <http://pyspeckit.bitbucket.org>
"""
import math
import pylab
from pylab import *
for k,v in pylab.__dict__.iteritems():
if hasattr(v,'__module__'):
if v.__module__ is None:
locals()[k].__module__ = 'pylab'
import matplotlib
from agpy.mpfit import mpfit
from collapse_gaussfit import *
from ratosexagesimal import *
import pyfits
import gaussfitter
import numpy
from numpy import isnan
from mad import MAD,nanmedian
def steppify(arr,isX=False,interval=0,sign=+1.0):
"""
*support function*
Converts an array to double-length for step plotting
"""
if isX and interval==0:
interval = numpy.abs(arr[1]-arr[0]) / 2.0
newarr = pylab.array(zip(arr-sign*interval,arr+sign*interval)).ravel()
return newarr
class SpecPlotter:
"""
SpecPlotter class. Takes in a spectrum or data cube, plotting properties,
and a velocity axis determination function. Look at splat_1d for a wrapper
that might actually be useful.
Whew, needs more documentation
"""
def __init__(self, cube, axis=None, xtol=None, ytol=None, vconv=lambda
x: x, xtora=lambda x: x, ytodec=lambda x: x, specname=None,
dv=None, color='k', hdr=None, errspec=None, maskspec=None,
fig=None, fignum=1, clear=True, title=None, xunits='km/s',
erralpha=0.2, ivconv=None, autorefresh=True, reffreq=None,
gaiafignum=0, gaiafig=None, clickid=None, **kwargs ):
self.vconv = vconv
self.xtora = xtora
self.ytodec = ytodec
self.cube = cube # where(numpy.isnan(cube),0,cube)
if len(self.cube.shape) > 1:
self.spectrum = cube[:,0,0] # spectrum is what's plotted; cube is the "raw data"
else:
self.spectrum = cube # spectrum is what's plotted; cube is the "raw data"
self.specname=specname
self.dv=dv
self.reffreq=reffreq
self.scale=1.0
self.units='K'
self.xunits=xunits
self.voff=0.0
self.offset=0.0
self.continuum=0.0
self.errspec = errspec
self.erralpha=erralpha
self.plotcolor=color
self.specfit = Specfit(self)
self.fitspec = self.specfit
self.baseline = Baseline(self)
#self.fft = FFT(self)
#self.psd = self.fft.psd
self.vmin=None
self.vmax=None
self.title=title
self.ivconv=ivconv
self.autorefresh=autorefresh
self.spectrumplot=None
self.errorplot=None
self.gaiafignum = gaiafignum
self.gaiafig = gaiafig
self.clickid = clickid
self.plotkwargs = kwargs
if maskspec is not None:
self.maskspec = maskspec
else:
self.maskspec = zeros(self.cube.shape)
self.linecollections =[]
self.texts =[]
if hdr: self.header = hdr
# figure out where to put the plot
if fig is None and axis is None:
fig=figure(fignum)
if clear: fig.clf()
self.axis = fig.gca()
elif fig is None and axis is None:
self.axis = pylab.gca()
elif fig is not None and axis is None:
if clear: fig.clf()
self.axis = fig.gca()
elif fig is None and axis is not None:
self.axis = axis
else: # if figure and axis are both set, just use axis
self.axis = axis
if clear: self.axis.clear()
def __call__(self, event):
"""
Connects map cube to specplotter...
"""
if event.inaxes:
clickX = event.xdata
clickY = event.ydata
tb = get_current_fig_manager().toolbar
#if ((self.axis is None) or (self.axis==event.inaxes)) and tb.mode=='':
if event.button==1 and tb.mode=='':
print "OverPlotting spectrum from point %i,%i" % (clickX,clickY)
self.plotspec(clickY,clickX,button=event.button,cube=True)
elif event.button==2:
print "Plotting spectrum from point %i,%i" % (clickX,clickY)
self.plotspec(clickY,clickX,button=event.button,cube=True,clear=True)
elif event.button==3:
print "Disconnecting GAIA-like tool"
self.gaiafig.canvas.mpl_disconnect(self.clickid)
else:
print "Call failed for some reason: "
print "event: ",event
def plotspec(self, i=0, j=0, cube=False, title=None,
clear=False, color=None, continuum=None,
axis=None, offset=None, scale=None, voff=None, vmin=None,
vmax=None, units=None, xunits=None, erralpha=None, plotpix=False,
errstyle='fill', autorefresh=None, button=None, **kwargs):
"""
Plot a spectrum
Originally written to plot spectra from data cubes, hence the i,j parameter
to specify the location in the cube
Now, cube defaults to False, but you can still pass in a data cube.
Inputs:
title,color, kwargs - semi-obvious plot-related comands
axis - You can pass in a Matplotlib axis instance and it will plot on that
clear - Clear the axis before plotting?
continuum - if you've already subtracted out a continuum, you can add
it back in (only if it is a constant offset). It will be included in
the spectrum
offset - Like continuum, but ONLY for plotting purposes. Will move the
plot vertically but will NOT include values in the .spectrum
scale - multiplicative factor to scale the data by (NOT for plotting
purposes; modifies spectrum)
voff - Shift the spectrum on the velocity axis by this amount
vmin,vmax - only plot within this range
(note that these keywords passed to splat_1d MAY crop the spectrum)
units - units of the data. At the moment, no conversions are done
xunits - units of the Y axis. Can affect other procedures, like show_lines,
and some unit conversion (Hz to GHz) is done
erralpha - Transparency of the errorbars if plotted
errstyle - style of errorbars if plotted
plotpix - if set, will plot against a pixel (channel) axis instead of a
physical axis
autorefresh - automatically update the plot when fitting gaussians, labeling,
etc?
"""
if kwargs.has_key('fignum'): kwargs.pop('fignum') # HACK because I want __init__ to accept different kwargs
if kwargs.has_key('fig'): kwargs.pop('fig') # is there a better workaround?
if scale is not None: self.scale = scale
if units is not None: self.units = units
if xunits is not None: self.xunits= xunits
if voff is not None: self.voff = voff
if offset is not None: self.offset= offset
if continuum is not None: self.continuum= continuum
if color is not None: self.plotcolor=color
if erralpha is not None: self.erralpha= erralpha
if vmax is not None: self.vmax = vmax
if vmin is not None: self.vmin = vmin
if title is not None: self.title = title
if autorefresh is not None: self.autorefresh = autorefresh
if axis is None: axis=self.axis # allow spectrum to be plotted on other axis
if clear: axis.clear()
if plotpix:
self.vind = arange(self.cube.shape[0])
else:
self.vind = self.vconv(arange(self.cube.shape[0])) + self.voff
if kwargs.has_key('fignum'): kwargs.pop('fignum')
if kwargs.has_key('linewidth'):
linewidth = kwargs.pop('linewidth')
else:
linewidth=0.5
if cube or len(self.cube.shape) == 3:
self.spectrum = self.cube[:,i,j]*self.scale+self.continuum-self.baseline.basespec
self.spectrumplot = axis.plot(self.vind,self.spectrum+self.offset,color=self.plotcolor,
linestyle='steps-mid',linewidth=linewidth,
**kwargs)
else:
if self.maskspec.sum() > 0:
nanmask = where(self.maskspec,numpy.nan,1)
self.spectrum = self.cube*self.scale*nanmask+self.continuum-self.baseline.basespec
self.spectrumplot = axis.plot(self.vind,self.spectrum+self.offset,color=self.plotcolor,
linestyle='steps-mid',linewidth=linewidth,
**kwargs)
else:
self.spectrum = self.cube*self.scale+self.continuum-self.baseline.basespec
self.spectrumplot = axis.plot(self.vind,self.spectrum+self.offset,color=self.plotcolor,
linestyle='steps-mid',linewidth=linewidth,
**kwargs)
if self.errspec is not None:
if errstyle == 'fill':
self.errorplot = [axis.fill_between(steppify(self.vind,isX=True,sign=sign(self.dv)),
steppify(self.spectrum+self.offset-self.errspec*self.scale),
steppify(self.spectrum+self.offset+self.errspec*self.scale),
facecolor=self.plotcolor, alpha=self.erralpha, **kwargs)]
elif errstyle == 'bars':
self.errorplot = axis.errorbar(self.vind, self.spectrum+self.offset,
yerr=self.errspec*self.scale, ecolor=self.plotcolor, fmt=None,
**kwargs)
if vmin is not None: xlo = self.vmin
else: xlo=self.vind.min()
if vmax is not None: xhi = self.vmax
else: xhi=self.vind.max()
axis.set_xlim(xlo,xhi)
if self.title is not None:
axis.set_title(self.title)
elif self.xtora and self.ytodec:
axis.set_title("Spectrum at %s %s" %
(ratos(self.xtora(i)),dectos(self.ytodec(j))))
elif self.specname:
axis.set_title("Spectrum of %s" % self.specname)
if isinstance(self.xunits,str):
axis.set_xlabel(self.xunits)
else:
axis.set_xlabel("V$_{LSR}$ (km s$^{-1}$)")
self.xunits = 'km/s'
if units in ['Ta*','Tastar','K']:
axis.set_ylabel("$T_A^*$ (K)")
elif units == 'mJy':
axis.set_ylabel("$S_\\nu$ (mJy)")
elif units == 'Jy':
axis.set_ylabel("$S_\\nu$ (Jy)")
else:
axis.set_ylabel(self.units)
if self.autorefresh: self.refresh()
def save(self,fname,**kwargs):
"""
Save the current spectrum (useful for saving baselined data)
"""
newfile = pyfits.PrimaryHDU(data=self.cube,header=self.header)
newfile.writeto(fname,**kwargs)
def savefig(self,fname,bbox_inches='tight',**kwargs):
"""
simple wrapper of maplotlib's savefig.
"""
self.axis.figure.savefig(fname,bbox_inches=bbox_inches,**kwargs)
def showlines(self,linefreqs,linenames,ctype='freq',cunit='hz',yscale=0.8,vofflines=0.0,
voffunit='km/s',**kwargs):
"""
Overplot vertical lines and labels at the frequencies (or velocities) of each line
yscale - fraction of maximum at which to label
"""
self.clearlines()
if ctype != 'freq':
print "Sorry, non-frequency units not implemented yet."
return
speedoflight=2.99792458e5
if self.reffreq and self.xunits in ('km/s','m/s'):
linefreqs = -(array(linefreqs)-self.reffreq)/self.reffreq * speedoflight
if 'hz' in cunit or 'Hz' in cunit:
linefreqs *= (1.0 + vofflines / speedoflight)
else:
linefreqs += vofflines
ymax = (self.spectrum[self.spectrum==self.spectrum]).max()
for lf,ln in zip(linefreqs,linenames):
if lf < self.vind.max() and lf > self.vind.min():
self.linecollections.append(vlines(lf,0,ymax,**kwargs))
self.texts.append(text(lf,ymax*yscale,ln,rotation='vertical',**kwargs))
if self.autorefresh: self.refresh()
def clearlines(self):
if len(self.texts) > 0:
for T in self.texts:
if T in self.axis.texts:
self.axis.texts.remove(T)
if len(self.linecollections) > 0:
for LC in self.linecollections:
if LC in self.axis.collections:
self.axis.collections.remove(LC)
def refresh(self):
self.axis.figure.canvas.draw()
class FFT:
def __init__(self,specplotter,fignum=3,axis=None, color='k'):
self.specplotter=specplotter
if axis is None:
self.fignum=fignum
self.figure=figure(self.fignum)
self.axis=gca()
else:
self.axis=axis
self.figure=self.axis.figure
self.fignum=None
#self.axis.clear()
self.color=color
self.fftplot=None
self.setspec()
self.setshift()
self.clear()
def __call__(self,psd=False,shift=True):
self.setspec()
if psd:
self.psd(shift=shift)
else:
self.fft(shift=shift)
def fft(self,shift=True,logplot=False,**kwargs):
self.clear()
self.setshift(shift)
if logplot: self.axis.set_yscale('log')
else: self.axis.set_yscale('linear')
self.fftspec = fft(self.spectofft)
self.realfft = self.fftspec.real
self.imagfft = self.fftspec.imag
self.fftplot = self.axis.plot(self.shiftfunc(self.realfft),
drawstyle='steps-mid',color=self.color,**kwargs)
self.refresh()
def psd(self,logplot=True,shift=True,**kwargs):
self.clear()
if logplot: self.axis.set_yscale('log')
else: self.axis.set_yscale('linear')
self.setshift(shift)
self.psdspec = fft(self.spectofft) * fft(self.spectofft[::-1])
self.psdreal = abs(self.psdspec)
self.fftplot = self.axis.plot(self.shiftfunc(self.psdreal),
drawstyle='steps-mid',color=self.color,**kwargs)
if logplot: self.axis.set_yscale('log')
else: self.axis.set_yscale('linear')
self.refresh()
def setshift(self,shift=True):
if shift: self.shiftfunc = fftshift
else: self.shiftfunc = lambda x: x
def setspec(self):
self.spectofft = copy(self.specplotter.spectrum)
OKmask = (self.spectofft==self.spectofft)
self.spectofft[(True-OKmask)] = 0
def clear(self):
if self.fftplot is not None:
for p in self.fftplot:
p.set_visible(False)
if p in self.axis.lines: self.axis.lines.remove(p)
self.axis.clear()
self.refresh()
def refresh(self):
self.axis.figure.canvas.draw()
class PSD(FFT):
def __call__(self,shift=True):
self.setspec()
self.setshift(shift)
self.clear()
self.psd()
self.refresh()
class Baseline:
def __init__(self,specplotter):
self.baselinepars = None
self.order = None
self.basespec = zeros(specplotter.spectrum.shape[0])
self.excludemask = zeros(specplotter.spectrum.shape[0],dtype='bool')
self.OKmask = ones(specplotter.spectrum.shape[0],dtype='bool')
self.specplotter = specplotter
self.blleg = None
self.click = 0
self.nclicks_b1 = 0
self.nclicks_b2 = 0
self.fitregion=[]
self.excludevelo = []
self.excludepix = []
def __call__(self, order=1, annotate=False, excludefit=False, save=True,
exclude=None, exclusionlevel=0.01,
interactive=False, **kwargs):
"""
Fit and remove a polynomial from the spectrum.
It will be saved in the variable "self.basespec"
and the fit parameters will be saved in "self.order"
function baseline(spectrum,xarr=None,xmin=None,xmax=None,order=1,quiet=True,exclude=None):
Subtract a baseline from a spectrum
If xmin,xmax are not specified, defaults to ignoring first and last 10% of spectrum
exclude is a set of start/end indices to ignore when baseline fitting
(ignored by setting error to infinite in fitting procedure)
excludefit creates a mask based on the fitted gaussian model (assuming
that it has a zero-height) using an exclusion level of (exclusionlevel)
* the smallest gaussian peak that was fit
"basespec" is added back to the spectrum before fitting so you can run this
procedure multiple times without losing information
"""
specfit = self.specplotter.specfit
self.order = order
fitp = zeros(self.order+1)
self.spectofit = self.specplotter.spectrum+self.basespec
self.OKmask = (self.spectofit==self.spectofit)
if exclude == 'interactive' or interactive:
self.excludemask[:] = True
self.excludevelo = []
self.excludepix = []
self.click = self.specplotter.axis.figure.canvas.mpl_connect('button_press_event',self.selectregion)
else:
if excludefit and specfit.modelpars is not None:
#vlo = self.specplotter.specfit.modelpars[1] - 2*self.specplotter.specfit.modelpars[2]
#vhi = self.specplotter.specfit.modelpars[1] + 2*self.specplotter.specfit.modelpars[2]
#exclude = [argmin(abs(self.specplotter.vind-vlo)),argmin(abs(self.specplotter.vind-vhi))]
specfit.fullsizemodel() # make sure the spectrum is the right size
self.excludemask = abs(specfit.model) > exclusionlevel*abs(min(specfit.modelpars[0::3]))
else:
self.excludemask[:] = False
self.dofit(exclude=exclude,annotate=annotate,**kwargs)
if save: self.savefit()
def dofit(self, exclude=None, excludeunits='velo', annotate=False,
**kwargs):
"""
Do the baseline fitting and save and plot the results.
Can specify a region to exclude using velocity units or pixel units
"""
if exclude is not None and excludeunits in ['velo','km/s']:
if len(exclude) % 2 == 0:
self.excludevelo = exclude
self.excludepix = []
for vl,vu in zip(exclude[::2],exclude[1::2]):
xl = argmin(abs(self.specplotter.vind-vl))
xu = argmin(abs(self.specplotter.vind-vu))
if xl > xu: xl,xu=xu,xl
self.excludemask[xl:xu] = True
self.excludepix += [xl,xu]
elif excludeunits in ['pix','pixel','chan','channel']:
if len(exclude) % 2 == 0:
self.excludepix = []
for xl,xu in zip(exclude[::2],exclude[1::2]):
if xl > xu: xl,xu=xu,xl
self.excludemask[xl:xu] = True
self.excludepix += [xl,xu]
self.specplotter.spectrum, self.baselinepars = baseline(
self.spectofit,
xarr=self.specplotter.vind,
order=self.order, exclude=None,
mask=(True-self.OKmask)+self.excludemask,
**kwargs)
self.basespec = poly1d(self.baselinepars)(self.specplotter.vind)
if self.specplotter.spectrumplot is not None:
[self.specplotter.axis.lines.remove(p) for p in self.specplotter.spectrumplot]
if self.specplotter.errorplot is not None:
[self.specplotter.axis.collections.remove(p) for p in self.specplotter.errorplot if isinstance(p,matplotlib.collections.PolyCollection)]
[self.specplotter.axis.lines.remove(p) for p in self.specplotter.errorplot if isinstance(p,matplotlib.lines.Line2D)]
self.specplotter.plotspec(**self.specplotter.plotkwargs)
self.specplotter.axis.set_ylim(
abs(self.specplotter.spectrum[self.OKmask].min())*1.1*sign(self.specplotter.spectrum[self.OKmask].min()),
abs(self.specplotter.spectrum[self.OKmask].max())*1.1*sign(self.specplotter.spectrum[self.OKmask].max()))
if annotate: self.annotate() # refreshes automatically
elif self.specplotter.autorefresh: self.specplotter.refresh()
def selectregion(self,event,annotate=False):
"""
select regions for baseline fitting
"""
if event.button == 1:
if self.nclicks_b1 == 0:
self.bx1 = argmin(abs(event.xdata-self.specplotter.vind))
self.excludevelo += [self.specplotter.vind]
self.excludepix += [self.bx1]
self.nclicks_b1 += 1
elif self.nclicks_b1 == 1:
self.bx2 = argmin(abs(event.xdata-self.specplotter.vind))
self.nclicks_b1 -= 1
if self.bx1 > self.bx2: self.bx1,self.bx2 = self.bx2,self.bx1
self.fitregion += self.specplotter.axis.plot(
self.specplotter.vind[self.bx1:self.bx2],
self.specplotter.spectrum[self.bx1:self.bx2]+self.specplotter.offset,
drawstyle='steps-mid',
color='g',alpha=0.5)
self.specplotter.refresh()
self.excludemask[self.bx1:self.bx2] = False
self.excludevelo += [self.specplotter.vind]
self.excludepix += [self.bx2]
if event.button in [2,3]:
disconnect(self.click)
self.dofit(exclude=None,annotate=annotate)
for p in self.fitregion:
p.set_visible(False)
self.specplotter.axis.lines.remove(p)
self.fitregion=[] # I should be able to just remove from the list... but it breaks the loop...
self.specplotter.refresh()
def annotate(self,loc='upper left'):
bltext = "bl: $y=$"+"".join(["$%+6.3gx^{%i}$" % (f,self.order-i)
for i,f in enumerate(self.baselinepars)])
#self.blleg = text(xloc,yloc ,bltext,transform = self.specplotter.axis.transAxes)
self.clearlegend()
pl = matplotlib.collections.CircleCollection([0],edgecolors=['k'])
self.blleg = self.specplotter.axis.legend(
(pl,),
(bltext,),loc=loc,markerscale=0.001,
borderpad=0.1, handlelength=0.1, handletextpad=0.1
)
self.specplotter.axis.add_artist(self.blleg)
if self.specplotter.autorefresh: self.specplotter.refresh()
def clearlegend(self):
if self.blleg is not None:
self.blleg.set_visible(False)
if self.blleg in self.specplotter.axis.artists:
self.specplotter.axis.artists.remove(self.blleg)
if self.specplotter.autorefresh: self.specplotter.refresh()
def savefit(self):
if self.baselinepars is not None:
for ii,p in enumerate(self.baselinepars):
self.specplotter.header.update('BLCOEF%0.2i' % (ii),p,comment="Baseline power-law best-fit coefficient x^%i" % (self.order-ii-1))
class Specfit:
def __init__(self,specplotter):
self.model = None
self.modelpars = None
self.modelerrs = None
self.modelplot = None
self.guessplot = []
self.fitregion = []
self.ngauss = 0
self.nclicks_b1 = 0
self.nclicks_b2 = 0
self.gx1 = 0
self.gx2 = specplotter.spectrum.shape[0]
self.guesses = []
self.click = 0
self.fitkwargs = {}
self.auto = False
self.autoannotate = True
self.specplotter = specplotter
self.gaussleg=None
self.residuals=None
self.setfitspec()
#self.seterrspec()
def __call__(self, interactive=False, usemoments=True, fitcolor='r',
multifit=False, guesses=None, annotate=True, save=True,
**kwargs):
"""
Fit gaussians to a spectrum
guesses = [height,amplitude,center,width]
"""
self.fitcolor = fitcolor
self.clear()
self.ngauss = 0
self.fitkwargs = kwargs
if interactive:
print "Left-click twice to select a fitting range, then middle-click twice to select a peak and width"
self.nclicks_b1 = 0
self.nclicks_b2 = 0
self.guesses = []
self.click = self.specplotter.axis.figure.canvas.mpl_connect('button_press_event',self.makeguess)
self.autoannotate = annotate
elif multifit:
if guesses is None:
print "You must input guesses when using multifit. Also, baseline first!"
else:
self.guesses = guesses
self.multifit()
self.autoannotate = annotate
else:
#print "Non-interactive, 1D fit with automatic guessing"
if self.specplotter.baseline.order is None:
self.specplotter.baseline.order=0
self.onedfit(usemoments=usemoments,annotate=annotate,**kwargs)
else:
self.onedfit(usemoments=usemoments,annotate=annotate,
vheight=False,height=0.0,**kwargs)
if self.specplotter.autorefresh: self.specplotter.refresh()
if save: self.savefit()
def seterrspec(self,usestd=None,useresiduals=True):
if self.specplotter.errspec is not None and not usestd:
self.errspec = self.specplotter.errspec
elif self.residuals is not None and useresiduals:
self.errspec = ones(self.spectofit.shape[0]) * self.residuals.std()
else: self.errspec = ones(self.spectofit.shape[0]) * self.spectofit.std()
def setfitspec(self):
self.spectofit = copy(self.specplotter.spectrum)
OKmask = (self.spectofit==self.spectofit)
self.spectofit[(True-OKmask)] = 0
self.seterrspec()
self.errspec[(True-OKmask)] = 1e10
def multifit(self):
self.ngauss = len(self.guesses)/3
self.setfitspec()
if self.fitkwargs.has_key('negamp'): self.fitkwargs.pop('negamp')
mpp,model,mpperr,chi2 = gaussfitter.multigaussfit(
self.specplotter.vind[self.gx1:self.gx2],
self.spectofit[self.gx1:self.gx2],
err=self.errspec[self.gx1:self.gx2],
ngauss=self.ngauss,
params=self.guesses,
**self.fitkwargs)
self.chi2 = chi2
self.dof = self.gx2-self.gx1-self.ngauss*3
self.model = model
self.modelpars = mpp.tolist()
self.modelerrs = mpperr.tolist()
self.modelplot = self.specplotter.axis.plot(
self.specplotter.vind[self.gx1:self.gx2],
self.model+self.specplotter.offset, color=self.fitcolor, linewidth=0.5)
self.residuals = self.spectofit[self.gx1:self.gx2] - self.model
if self.autoannotate:
self.annotate()
def onedfit(self, usemoments=True, annotate=True, vheight=True, height=0, negamp=None,**kwargs):
self.ngauss = 1
self.auto = True
self.setfitspec()
if usemoments: # this can be done within gaussfit but I want to save them
self.guesses = gaussfitter.onedmoments(
self.specplotter.vind[self.gx1:self.gx2],
self.spectofit[self.gx1:self.gx2],
vheight=vheight,negamp=negamp,**kwargs)
if vheight is False: self.guesses = [height]+self.guesses
else:
if negamp: self.guesses = [height,-1,0,1]
else: self.guesses = [height,1,0,1]
mpp,model,mpperr,chi2 = gaussfitter.onedgaussfit(
self.specplotter.vind[self.gx1:self.gx2],
self.spectofit[self.gx1:self.gx2],
err=self.errspec[self.gx1:self.gx2],
vheight=vheight,
params=self.guesses,
**self.fitkwargs)
self.chi2 = chi2
self.dof = self.gx2-self.gx1-self.ngauss*3-vheight
if vheight:
self.specplotter.baseline.baselinepars = mpp[:1] # first item in list form
self.model = model - mpp[0]
else: self.model = model
self.residuals = self.spectofit[self.gx1:self.gx2] - self.model
self.modelpars = mpp[1:].tolist()
self.modelerrs = mpperr[1:].tolist()
self.modelplot = self.specplotter.axis.plot(
self.specplotter.vind[self.gx1:self.gx2],
self.model+self.specplotter.offset, color=self.fitcolor, linewidth=0.5)
if annotate:
self.annotate()
if vheight: self.specplotter.baseline.annotate()
def fullsizemodel(self):
"""
If the gaussian was fit to a sub-region of the spectrum,
expand it (with zeros) to fill the spectrum. You can
always recover the original by:
origmodel = model[gx1:gx2]
"""
if self.model.shape != self.specplotter.spectrum.shape:
temp = zeros(self.specplotter.spectrum.shape)
temp[self.gx1:self.gx2] = self.model
self.model = temp
self.residuals = self.spectofit - self.model
def plotresiduals(self,fig=None,axis=None,clear=True,**kwargs):
"""
Plot residuals of the fit. Specify a figure or
axis; defaults to figure(2).
kwargs are passed to matplotlib plot
"""
if axis is None:
fig=figure(2)
self.residualaxis = gca()
if clear: self.residualaxis.clear()
else:
self.residualaxis = axis
if clear: self.residualaxis.clear()
self.residualplot = self.residualaxis.plot(self.specplotter.vind[self.gx1:self.gx2],
self.residuals,drawstyle='steps-mid',
linewidth=0.5, color='k', **kwargs)
if self.specplotter.vmin is not None and self.specplotter.vmax is not None:
self.residualaxis.set_xlim(self.specplotter.vmin,self.specplotter.vmax)
self.residualaxis.figure.canvas.draw()
def annotate(self,loc='upper right'):
#text(xloc,yloc ,"c=%g" % self.modelpars[1],transform = self.specplotter.axis.transAxes)
#text(xloc,yloc-0.05,"w=%g" % self.modelpars[2],transform = self.specplotter.axis.transAxes)
#text(xloc,yloc-0.10,"a=%g" % self.modelpars[0],transform = self.specplotter.axis.transAxes)
self.clearlegend()
pl = matplotlib.collections.CircleCollection([0],edgecolors=['k'])
self.gaussleg = self.specplotter.axis.legend(
tuple([pl]*3*self.ngauss),
tuple(flatten(
[("c%i=%6.4g $\\pm$ %6.4g" % (jj,self.modelpars[1+jj*3],self.modelerrs[1+jj*3]),
"w%i=%6.4g $\\pm$ %6.4g" % (jj,self.modelpars[2+jj*3],self.modelerrs[2+jj*3]),
"a%i=%6.4g $\\pm$ %6.4g" % (jj,self.modelpars[0+jj*3],self.modelerrs[0+jj*3]))
for jj in range(self.ngauss)])),
loc=loc,markerscale=0.01,
borderpad=0.1, handlelength=0.1, handletextpad=0.1
)
self.gaussleg.draggable(True)
self.specplotter.axis.add_artist(self.gaussleg)
if self.specplotter.autorefresh: self.specplotter.refresh()
def selectregion(self,event):
if self.nclicks_b1 == 0:
self.gx1 = argmin(abs(event.xdata-self.specplotter.vind))
self.nclicks_b1 += 1
elif self.nclicks_b1 == 1:
self.gx2 = argmin(abs(event.xdata-self.specplotter.vind))
self.nclicks_b1 -= 1
if self.gx1 > self.gx2: self.gx1,self.gx2 = self.gx2,self.gx1
if abs(self.gx1-self.gx2) > 3: # can't fit w/ fewer data than pars
self.fitregion = self.specplotter.axis.plot(
self.specplotter.vind[self.gx1:self.gx2],
self.specplotter.spectrum[self.gx1:self.gx2]+self.specplotter.offset,
drawstyle='steps-mid',
color='c')
if self.guesses == []:
self.guesses = gaussfitter.onedmoments(
self.specplotter.vind[self.gx1:self.gx2],
self.spectofit[self.gx1:self.gx2],
vheight=0)
self.ngauss = 1
self.auto = True
else:
print "Fitting region is too small (channels %i:%i). Try again." % (self.gx1,self.gx2)
def guesspeakwidth(self,event):
if self.nclicks_b2 % 2 == 0:
if self.auto:
self.guesses[:2] = [event.ydata,event.xdata]
else:
self.guesses += [event.ydata,event.xdata,1]
self.ngauss += 1
self.nclicks_b2 += 1
self.guessplot += [self.specplotter.axis.scatter(event.xdata,event.ydata,marker='x',c='r')]
elif self.nclicks_b2 % 2 == 1:
self.guesses[-1] = abs(event.xdata-self.guesses[-2])
self.nclicks_b2 += 1
self.guessplot += self.specplotter.axis.plot([event.xdata,
2*self.guesses[-2]-event.xdata],[event.ydata]*2,
color='r')
if self.auto:
self.auto = False
if self.nclicks_b2 / 2 > self.ngauss:
print "There have been %i middle-clicks but there are only %i gaussians" % (self.nclicks_b2,self.ngauss)
self.ngauss += 1
def clear(self,legend=True):
if self.modelplot is not None:
for p in self.modelplot:
p.set_visible(False)
if legend: self.clearlegend()
def makeguess(self,event):
if event.button == 1:
self.selectregion(event)
elif event.button == 2:
self.guesspeakwidth(event)
elif event.button == 3:
disconnect(self.click)
if self.ngauss > 0:
print len(self.guesses)/3," Guesses: ",self.guesses," X channel range: ",self.gx1,self.gx2
if len(self.guesses) % 3 == 0:
self.multifit()
for p in self.guessplot + self.fitregion:
p.set_visible(False)
else:
print "error, wrong # of pars"
if self.specplotter.autorefresh: self.specplotter.refresh()
def clearlegend(self):
if self.gaussleg is not None:
self.gaussleg.set_visible(False)
if self.gaussleg in self.specplotter.axis.artists:
self.specplotter.axis.artists.remove(self.gaussleg)
if self.specplotter.autorefresh: self.specplotter.refresh()
def savefit(self):
if self.modelpars is not None:
for ii,p in enumerate(self.modelpars):
if ii % 3 == 0: self.specplotter.header.update('AMP%1i' % (ii/3),p,comment="Gaussian best fit amplitude #%i" % (ii/3))
if ii % 3 == 1: self.specplotter.header.update('CEN%1i' % (ii/3),p,comment="Gaussian best fit center #%i" % (ii/3))
if ii % 3 == 2: self.specplotter.header.update('WID%1i' % (ii/3),p,comment="Gaussian best fit width #%i" % (ii/3))
def mapplot(plane,cube,vconv=lambda x: x,xtora=lambda x: x,ytodec=lambda x: x, gaiafignum=0, specfignum=1):
gaiafig = figure(gaiafignum)
gaiafig.clf()
gaiaax = gaiafig.add_subplot(111)
gaiaax.imshow(plane)
sp = SpecPlotter(cube, vconv=vconv, xtora=xtora, ytodec=ytodec,
gaiafignum=gaiafignum, fignum=specfignum, gaiafig=gaiafig)
sp.clickid = gaiafig.canvas.mpl_connect('button_press_event',sp)
#connect('button_press_event',sp)
def splat_3d(filename,xi=0,yi=0,vmin=None,vmax=None,button=1,dobaseline=False,exclude=None,
smooth=None,smoothto=None,smoothtype='gaussian',order=1,savepre=None,**kwargs):
"""
Inputs:
vmin,vmax - range over which to baseline and plottransform = ax.transAxes
exclude - (internal) range to exclude from baseline fit
"""
dv,v0,p3,hdr,cube,xtora,ytodec,vconv,xunits,conversion_factor,units = open_3d(filename)
if units is None: units="UNITS"
if xunits is None: xunits="km/s"
if conversion_factor == 0 or conversion_factor is None: conversion_factor=1.0
sp = splat_1d(vpars=[dv, v0, p3], hdr=hdr, spec=cube[:, yi, xi],
xtora=xtora, ytodec=ytodec, vconv=vconv, units=units,
conversion_factor=conversion_factor, xunits=xunits, **kwargs)
sp.cube = cube
return sp
def gaia(filename,estimator='max',axis=0):
f = pyfits.open(filename)
hdr = f[0].header
cube = f[0].data
dv,v0,p3 = hdr.get('CD3_3'),hdr.get('CRVAL3'),hdr.get('CRPIX3')
dr,r0,p1 = hdr.get('CD1_1'),hdr.get('CRVAL1'),hdr.get('CRPIX1')
dd,d0,p2 = hdr.get('CD2_2'),hdr.get('CRVAL2'),hdr.get('CRPIX2')
if dv is None: dv = hdr.get('CDELT3')
if dr is None: dr = hdr.get('CDELT1')
if dd is None: dd = hdr.get('CDELT2')
xtora = lambda x: (x-p1+1)*dr+r0 # convert pixel coordinates to RA/Dec/Velocity
ytodec = lambda y: (y-p2+1)*dd+d0
vconv = lambda v: (v-p3+1)*dv+v0
if axis > 0:
cube = cube.swapaxes(0,axis)
if estimator == 'max':
p = where(isnan(cube),0,cube).max(axis=0)
elif estimator == 'int':
p = where(isnan(cube),0,cube).sum(axis=0) * dv
elif estimator == 'intdivmax':
cut = MAD(cube.ravel()) + nanmedian(cube.ravel())
if cut < 0:
cut = 0
m = where(isnan(cube),0,cube).max(axis=0)
i = where(isnan(cube),0,cube).sum(axis=0) * dv
p = where(i<0,0,i)/where(m<=cut,numpy.inf,m)
elif estimator[-5:] == ".fits":
p = pyfits.open(estimator)[0].data
mapplot(p,cube,vconv,xtora,ytodec)
def baseline_file(filename,outfilename,vmin=None,vmax=None,order=1,crop=False):
f = pyfits.open(filename)
hdr = f[0].header
cube = f[0].data.squeeze()
dv,v0,p3 = hdr.get('CD3_3'),hdr.get('CRVAL3'),hdr.get('CRPIX3')
dr,r0,p1 = hdr.get('CD1_1'),hdr.get('CRVAL1'),hdr.get('CRPIX1')
dd,d0,p2 = hdr.get('CD2_2'),hdr.get('CRVAL2'),hdr.get('CRPIX2')
if dv is None: dv = hdr.get('CDELT3')
if dr is None: dr = hdr.get('CDELT1')
if dd is None: dd = hdr.get('CDELT2')
vconv = lambda v: (v-p3+1)*dv+v0
varr = vconv(arange(cube.shape[-1]))
if vmin is None: argvmin = None
else: argvmin = argmin(abs(varr-vmin))
if vmax is None: argvmax = None
else: argvmax = argmin(abs(varr-vmax))
bspec,bfit = baseline(cube,vmin=argvmin,vmax=argvmax,order=order)
def baseline(spectrum,xarr=None,xmin='default',xmax='default',order=1,quiet=True,exclude=None,
mask=None):
"""
Subtract a baseline from a spectrum
If xmin,xmax are not specified, defaults to ignoring first and last 10% of spectrum
*unless* order > 1, in which case ignoring the ends tends to cause strange effects
exclude is a set of start/end indices to ignore when baseline fitting
(ignored by setting error to infinite in fitting procedure)
"""
if xmin == 'default':
if order <= 1: xmin = floor( spectrum.shape[-1]*0.1 )
else: xmin = 0
elif xmin is None:
xmin = 0
if xmax == 'default':
if order <= 1: xmax = ceil( spectrum.shape[-1]*0.9 )
else: xmax = spectrum.shape[-1]
elif xmax is None:
xmax = spectrum.shape[-1]
pguess = [1]*(order+1)
if xarr is None:
xarr = indices(spectrum.shape).squeeze()
subxarr = xarr[xmin:xmax]
def mpfitfun(data,err):
def f(p,fjac=None): return [0,numpy.ravel((poly1d(p)(subxarr)-data)/err)]
return f
err = ones(spectrum.shape)
if exclude is not None:
err[exclude[0]:exclude[1]] = 1e10
if mask is not None:
if mask.dtype.name != 'bool': mask = mask.astype('bool')
err[mask] = 1e10
spectrum[mask] = 0
if (spectrum!=spectrum).sum() > 0:
print "There is an error in baseline: some values are NaN"
import pdb; pdb.set_trace()
mp = mpfit(mpfitfun(spectrum[xmin:xmax],err[xmin:xmax]),xall=pguess,quiet=quiet)
fitp = mp.params
bestfit = poly1d(fitp)(xarr).squeeze()
return (spectrum-bestfit),fitp
def open_3d(filename):
f = pyfits.open(filename)
hdr = f[0].header
cube = f[0].data
if len(cube.shape) == 4: cube=cube[0,:,:,:]
#cube = reshape(cube.mean(axis=2).mean(axis=1),[cube.shape[0],1,1])
dv,v0,p3 = hdr.get('CD3_3'),hdr.get('CRVAL3'),hdr.get('CRPIX3')
dr,r0,p1 = hdr.get('CD1_1'),hdr.get('CRVAL1'),hdr.get('CRPIX1')
dd,d0,p2 = hdr.get('CD2_2'),hdr.get('CRVAL2'),hdr.get('CRPIX2')
if dv is None: dv = hdr.get('CDELT3')
if dr is None: dr = hdr.get('CDELT1')
if dd is None: dd = hdr.get('CDELT2')
xtora = lambda x: (x-p1+1)*dr+r0 # convert pixel coordinates to RA/Dec/Velocity
ytodec = lambda y: (y-p2+1)*dd+d0
vconv = lambda v: (v-p3+1)*dv+v0
if hdr.get('CUNIT3') in ['m/s','M/S']:
conversion_factor = 1000.0
xunits = 'km/s' # change to km/s because you're converting units
else:
xunits = hdr.get('CUNIT3')
if xunits in ("hz","Hz"):
print "Converting from Hz to GHz"
xunits = "GHz"
conversion_factor = 1.0e9
else:
conversion_factor = 1.0
units = hdr.get('BUNIT')
return dv,v0,p3,hdr,cube,xtora,ytodec,vconv,xunits,conversion_factor,units
def open_1d(filename,specnum=0,wcstype='',errspecnum=None,maskspecnum=None):
"""
Grabs all the relevant pieces of a 1d spectrum for plotting
wcstype is the suffix on the WCS type to get to velocity/frequency/whatever
"""
f = pyfits.open(filename)
hdr = f[0].header
spec = f[0].data
errspec = None
maskspec = None
if hdr.get('NAXIS') == 2:
if errspecnum is not None:
errspec = spec[errspecnum,:]
if maskspecnum is not None:
maskspec = spec[maskspecnum,:]
if isinstance(specnum,list):
spec = spec[specnum,:].mean(axis=0)
elif isinstance(specnum,int):
spec = spec[specnum,:]
else:
raise TypeError("Specnum is of wrong type (not a list of integers or an integer). Type: %s" %
str(type(specnum)))
elif hdr.get('NAXIS') > 2:
raise ValueError("Too many axes for open_1d (splat_1d) - use cube instead")
if hdr.get('CD1_1'+wcstype):
dv,v0,p3 = hdr['CD1_1'+wcstype],hdr['CRVAL1'+wcstype],hdr['CRPIX1'+wcstype]
else:
dv,v0,p3 = hdr['CDELT1'+wcstype],hdr['CRVAL1'+wcstype],hdr['CRPIX1'+wcstype]
if hdr.get('OBJECT'):
specname = hdr.get('OBJECT')
elif hdr.get('GLON') and hdr.get('GLAT'):
specname = "%s %s" % (hdr.get('GLON'),hdr.get('GLAT'))
else:
specname = filename.rstrip(".fits")
if hdr.get('CUNIT1'+wcstype) in ['m/s','M/S']:
conversion_factor = 1000.0
xunits = 'km/s' # change to km/s because you're converting units
else:
xunits = hdr.get('CUNIT1'+wcstype)
if xunits in ("hz","Hz"):
print "Converting from Hz to GHz"
xunits = "GHz"
conversion_factor = 1.0e9
else:
conversion_factor = 1.0
vconv = lambda v: ((v-p3+1)*dv+v0)/conversion_factor
xtora=None
ytodec=None
units = hdr.get('BUNIT').strip()
if hdr.get('CTYPE1'+wcstype):
xtype = hdr.get('CTYPE1'+wcstype)
else:
xtype = 'VLSR'
if hdr.get('REFFREQ'+wcstype):
reffreq = hdr.get('REFFREQ'+wcstype)
else:
reffreq = None
return dv,v0,p3,conversion_factor,hdr,spec,vconv,xtora,ytodec,specname,units,xunits,errspec,maskspec,reffreq
def splat_1d(filename=None,vmin=None,vmax=None,button=1,dobaseline=False,
exclude=None,smooth=None,order=1,savepre=None,vcrop=True,
vconv=None,vpars=None,hdr=None,spec=None,xtora=None,ytodec=None,
specname=None,quiet=True,specnum=0,errspecnum=None,wcstype='',
offset=0.0, continuum=0.0, annotatebaseline=False, plotspectrum=True,
smoothto=None, xunits=None, units=None, conversion_factor=None,
smoothtype='gaussian',convmode='valid',maskspecnum=None,**kwargs):
"""
Wrapper for specplotter creation. Works nicely with 1D spectra with well-defined
FITS headers (i.e., CRVAL1, CRPIX1, CDELT1, and optionally CUNIT1 and CTYPE1)
This documentation needs to be updated a lot... I implemented a lot of features
without documenting them, which was a mistake
Inputs:
vmin,vmax - range over which to baseline and plot
exclude - (internal) range to exclude from baseline fit
vcrop - will vmin/vmax crop out data, or just set the plot limits?
"""
if (vpars and vconv and hdr and spec is not None and xtora and ytodec
and units and xunits and conversion_factor):
dv,v0,p3 = vpars
errspec = None
maskspec = None
reffreq = None
if units is None and kwargs.has_key('units'): units = kwargs.pop('units')
else:
dv,v0,p3,conversion_factor,hdr,spec,vconv,xtora,ytodec,specname_file,units,xunits,errspec,maskspec,reffreq = \
open_1d(filename,specnum=specnum,wcstype=wcstype,errspecnum=errspecnum,maskspecnum=maskspecnum)
if specname is None: specname=specname_file
if units is None and kwargs.has_key('units'): units = kwargs.pop('units')
if type(continuum)==type('str'):
if hdr.get(continuum) is not None:
continuum = hdr.get(continuum)
else:
raise ValueError("Continuum specified but none present.")
varr = vconv(arange(spec.shape[0]))
if vmin is None or vcrop==False: argvmin = 0
else:
argvmin = argmin(abs(varr-vmin))
if dv > 0:
hdr.update('CRPIX1'+wcstype,p3-argvmin)
if vmax is None or vcrop==False: argvmax = spec.shape[0]
else:
argvmax = argmin(abs(varr-vmax))
if dv < 0:
hdr.update('CRPIX1'+wcstype,p3-argvmax)
if argvmin > argvmax:
argvmin,argvmax = argvmax,argvmin
#if exclude is not None: exclude = exclude[::-1]
elif argvmin == argvmax:
raise Exception("Error: no data in velocity range %g:%g for source %s."
% (vmin,vmax,filename))
# these lines were meant to automatically put "exclude" into velocity
# units; this is now done in the baseline code
#if exclude is not None:
# exclude[0] = argmin(abs(varr-exclude[0]))
# exclude[1] = argmin(abs(varr-exclude[1]))
# exclude = array(exclude) - argvmin
vconv = lambda v: ((v-p3+argvmin+1)*dv+v0) / conversion_factor
ivconv = lambda V: p3-1-argvmin+(V*conversion_factor-v0)/dv
specplot = spec[argvmin:argvmax]
if errspec is not None: errspec=errspec[argvmin:argvmax]
if maskspec is not None: maskspec=maskspec[argvmin:argvmax]
if smoothto:
smooth = abs(smoothto/dv)
if smooth:
roundsmooth = round(smooth) # can only downsample by integers
# change fitter first
if smoothtype == 'hanning':
specplot = convolve(specplot,hanning(2+roundsmooth)/hanning(2+roundsmooth).sum(),convmode)[::roundsmooth]
kernsize = smooth
ones_sameshape = zeros(smooth+2)
ones_sameshape[1:-1] = 1
elif smoothtype == 'boxcar':
specplot = convolve(specplot,ones(roundsmooth)/float(roundsmooth),convmode)[::roundsmooth]
kernsize = roundsmooth
ones_sameshape = ones(roundsmooth)
elif smoothtype == 'gaussian':
speclen = specplot.shape[0]
xkern = linspace(-1*smooth,smooth,smooth*3)
kernel = exp(-xkern**2/(2*(smooth/sqrt(8*log(2)))**2))
kernel /= kernel.sum()
kernsize = len(kernel)
specplot = convolve(specplot,kernel,convmode)[::roundsmooth]
ones_sameshape = zeros(roundsmooth*3)
ones_sameshape[roundsmooth:-roundsmooth] = 1
if errspec is not None:
errspec = sqrt(convolve(errspec**2,ones_sameshape,convmode)[::roundsmooth]) / float(roundsmooth)
if maskspec is not None:
maskspec = array(convolve(maskspec,ones_sameshape,convmode)[::roundsmooth],dtype='bool')
if maskspec.shape != specplot.shape: import pdb; pdb.set_trace()
# this bit of code may also make sense, but I'm shifting the center pixel instead
# b/c it's easier (?) to deal with velocity range
#v0 += (abs(dv)*smooth - abs(dv))/2.0 # pixel center moves by half the original pixel size
dv *= roundsmooth
if convmode == 'same':
newrefpix = (p3-argvmin)/roundsmooth
elif convmode == 'full':
newrefpix = (p3-0.5-argvmin+kernsize/2.0)/roundsmooth
elif convmode == 'valid':
newrefpix = (p3-0.5-argvmin-kernsize/2.0)/roundsmooth
# this was resolved by advanced guess-and check
# but also, sort of makes sense: FITS refers to the *center* of a pixel. You want to
# shift 1/2 pixel to the right so that the first pixel goes from 0 to 1
vconv = lambda v: ((v-newrefpix)*dv+v0)/conversion_factor
ivconv = lambda V: newrefpix+(V*conversion_factor-v0)/dv
hdr.update('CRPIX1'+wcstype,newrefpix+1)
hdr.update('CDELT1'+wcstype,dv)
sp = SpecPlotter(specplot, vconv=vconv, xtora=xtora, ytodec=ytodec,
specname=specname, dv=dv/conversion_factor, hdr=hdr, reffreq=reffreq,
errspec=errspec, maskspec=maskspec, xunits=xunits, **kwargs)
if plotspectrum:
sp.plotspec(button=button, cube=False, vmin=vmin, vmax=vmax,
units=units, offset=offset, continuum=continuum,
**kwargs)
if dobaseline:
sp.baseline(exclude=exclude,order=order,quiet=quiet,annotate=annotatebaseline)
if plotspectrum: sp.refresh()
if hdr.get('GLON') and hdr.get('GLAT'):
sp.glon = hdr.get('GLON')
sp.glat = hdr.get('GLAT')
if savepre is not None:
glon,glat = sp.glon,sp.glat
if glat < 0: pm=""
else: pm = "+"
savename = savepre + "G%07.3f%0s%07.3f_" % (glon,pm,glat) + hdr.get('MOLECULE').replace(' ','') + hdr.get('TRANSITI').replace(' ','')
savefig(savename+'.png')
return sp
def splat_tspec(filename,specnum=0,**kwargs):
"""
Same as splat_1d for tspec data
"""
tdata = pyfits.getdata(filename)
theader = pyfits.getheader(filename)
if len(tdata.shape) == 3:
tdata = tdata[specnum,:,:]
wavelength = tdata[0,:]
spectrum = tdata[1,:]
error = tdata[2,:]
vconv = lambda x: wavelength[x]
ivconv = lambda x: argmin(abs(wavelength-x))
specname='TSPEC'
dv = median(wavelength[1:] - wavelength[:-1])
sp = SpecPlotter(spectrum,vconv=vconv,specname=specname,dv=dv,hdr=theader)
sp.plotspec(cube=False,units=theader.get('YUNITS'),xunits=theader.get('XUNITS'),**kwargs)
return sp
| keflavich/agpy | agpy/showspec.py | Python | mit | 51,670 |
# This file is part of the REMOTE API
#
# Copyright 2006-2016 Coppelia Robotics GmbH. All rights reserved.
# marc@coppeliarobotics.com
# www.coppeliarobotics.com
#
# The REMOTE API is licensed under the terms of GNU GPL:
#
# -------------------------------------------------------------------
# The REMOTE API is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# THE REMOTE API IS DISTRIBUTED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED
# WARRANTY. THE USER WILL USE IT AT HIS/HER OWN RISK. THE ORIGINAL
# AUTHORS AND COPPELIA ROBOTICS GMBH WILL NOT BE LIABLE FOR DATA LOSS,
# DAMAGES, LOSS OF PROFITS OR ANY OTHER KIND OF LOSS WHILE USING OR
# MISUSING THIS SOFTWARE.
#
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the REMOTE API. If not, see <http://www.gnu.org/licenses/>.
# -------------------------------------------------------------------
#
# This file was automatically created for V-REP release V3.3.2 on August 29th 2016
#constants
#Scene object types. Values are serialized
sim_object_shape_type =0
sim_object_joint_type =1
sim_object_graph_type =2
sim_object_camera_type =3
sim_object_dummy_type =4
sim_object_proximitysensor_type =5
sim_object_reserved1 =6
sim_object_reserved2 =7
sim_object_path_type =8
sim_object_visionsensor_type =9
sim_object_volume_type =10
sim_object_mill_type =11
sim_object_forcesensor_type =12
sim_object_light_type =13
sim_object_mirror_type =14
#General object types. Values are serialized
sim_appobj_object_type =109
sim_appobj_collision_type =110
sim_appobj_distance_type =111
sim_appobj_simulation_type =112
sim_appobj_ik_type =113
sim_appobj_constraintsolver_type=114
sim_appobj_collection_type =115
sim_appobj_ui_type =116
sim_appobj_script_type =117
sim_appobj_pathplanning_type =118
sim_appobj_RESERVED_type =119
sim_appobj_texture_type =120
# Ik calculation methods. Values are serialized
sim_ik_pseudo_inverse_method =0
sim_ik_damped_least_squares_method =1
sim_ik_jacobian_transpose_method =2
# Ik constraints. Values are serialized
sim_ik_x_constraint =1
sim_ik_y_constraint =2
sim_ik_z_constraint =4
sim_ik_alpha_beta_constraint=8
sim_ik_gamma_constraint =16
sim_ik_avoidance_constraint =64
# Ik calculation results
sim_ikresult_not_performed =0
sim_ikresult_success =1
sim_ikresult_fail =2
# Scene object sub-types. Values are serialized
# Light sub-types
sim_light_omnidirectional_subtype =1
sim_light_spot_subtype =2
sim_light_directional_subtype =3
# Joint sub-types
sim_joint_revolute_subtype =10
sim_joint_prismatic_subtype =11
sim_joint_spherical_subtype =12
# Shape sub-types
sim_shape_simpleshape_subtype =20
sim_shape_multishape_subtype =21
# Proximity sensor sub-types
sim_proximitysensor_pyramid_subtype =30
sim_proximitysensor_cylinder_subtype=31
sim_proximitysensor_disc_subtype =32
sim_proximitysensor_cone_subtype =33
sim_proximitysensor_ray_subtype =34
# Mill sub-types
sim_mill_pyramid_subtype =40
sim_mill_cylinder_subtype =41
sim_mill_disc_subtype =42
sim_mill_cone_subtype =42
# No sub-type
sim_object_no_subtype =200
#Scene object main properties (serialized)
sim_objectspecialproperty_collidable =0x0001
sim_objectspecialproperty_measurable =0x0002
#reserved =0x0004
#reserved =0x0008
sim_objectspecialproperty_detectable_ultrasonic =0x0010
sim_objectspecialproperty_detectable_infrared =0x0020
sim_objectspecialproperty_detectable_laser =0x0040
sim_objectspecialproperty_detectable_inductive =0x0080
sim_objectspecialproperty_detectable_capacitive =0x0100
sim_objectspecialproperty_renderable =0x0200
sim_objectspecialproperty_detectable_all =sim_objectspecialproperty_detectable_ultrasonic|sim_objectspecialproperty_detectable_infrared|sim_objectspecialproperty_detectable_laser|sim_objectspecialproperty_detectable_inductive|sim_objectspecialproperty_detectable_capacitive
sim_objectspecialproperty_cuttable =0x0400
sim_objectspecialproperty_pathplanning_ignored =0x0800
# Model properties (serialized)
sim_modelproperty_not_collidable =0x0001
sim_modelproperty_not_measurable =0x0002
sim_modelproperty_not_renderable =0x0004
sim_modelproperty_not_detectable =0x0008
sim_modelproperty_not_cuttable =0x0010
sim_modelproperty_not_dynamic =0x0020
sim_modelproperty_not_respondable =0x0040 # cannot be selected if sim_modelproperty_not_dynamic is not selected
sim_modelproperty_not_reset =0x0080 # Model is not reset at simulation end. This flag is cleared at simulation end
sim_modelproperty_not_visible =0x0100 # Whole model is invisible independent of local visibility settings
sim_modelproperty_not_model =0xf000 # object is not a model
# Check the documentation instead of comments below!!
# Following messages are dispatched to the Lua-message container
sim_message_ui_button_state_change =0 # a UI button slider etc. changed (due to a user's action). aux[0]=UI handle aux[1]=button handle aux[2]=button attributes aux[3]=slider position (if slider)
sim_message_reserved9 =1 # Do not use
sim_message_object_selection_changed=2
sim_message_reserved10 =3 # do not use
sim_message_model_loaded =4
sim_message_reserved11 =5 # do not use
sim_message_keypress =6 # a key was pressed while the focus was on a page (aux[0]=key aux[1]=ctrl and shift key state)
sim_message_bannerclicked =7 # a banner was clicked (aux[0]=banner ID)
# Following messages are dispatched only to the C-API (not available from Lua)
sim_message_for_c_api_only_start =0x100 # Do not use
sim_message_reserved1 =0x101 # Do not use
sim_message_reserved2 =0x102 # Do not use
sim_message_reserved3 =0x103 # Do not use
sim_message_eventcallback_scenesave =0x104 # about to save a scene
sim_message_eventcallback_modelsave =0x105 # about to save a model (current selection will be saved)
sim_message_eventcallback_moduleopen =0x106 # called when simOpenModule in Lua is called
sim_message_eventcallback_modulehandle =0x107 # called when simHandleModule in Lua is called with argument false
sim_message_eventcallback_moduleclose =0x108 # called when simCloseModule in Lua is called
sim_message_reserved4 =0x109 # Do not use
sim_message_reserved5 =0x10a # Do not use
sim_message_reserved6 =0x10b # Do not use
sim_message_reserved7 =0x10c # Do not use
sim_message_eventcallback_instancepass =0x10d # Called once every main application loop pass. auxiliaryData[0] contains event flags of events that happened since last time
sim_message_eventcallback_broadcast =0x10e
sim_message_eventcallback_imagefilter_enumreset =0x10f
sim_message_eventcallback_imagefilter_enumerate =0x110
sim_message_eventcallback_imagefilter_adjustparams =0x111
sim_message_eventcallback_imagefilter_reserved =0x112
sim_message_eventcallback_imagefilter_process =0x113
sim_message_eventcallback_reserved1 =0x114 # do not use
sim_message_eventcallback_reserved2 =0x115 # do not use
sim_message_eventcallback_reserved3 =0x116 # do not use
sim_message_eventcallback_reserved4 =0x117 # do not use
sim_message_eventcallback_abouttoundo =0x118 # the undo button was hit and a previous state is about to be restored
sim_message_eventcallback_undoperformed =0x119 # the undo button was hit and a previous state restored
sim_message_eventcallback_abouttoredo =0x11a # the redo button was hit and a future state is about to be restored
sim_message_eventcallback_redoperformed =0x11b # the redo button was hit and a future state restored
sim_message_eventcallback_scripticondblclick =0x11c # scipt icon was double clicked. (aux[0]=object handle associated with script set replyData[0] to 1 if script should not be opened)
sim_message_eventcallback_simulationabouttostart =0x11d
sim_message_eventcallback_simulationended =0x11e
sim_message_eventcallback_reserved5 =0x11f # do not use
sim_message_eventcallback_keypress =0x120 # a key was pressed while the focus was on a page (aux[0]=key aux[1]=ctrl and shift key state)
sim_message_eventcallback_modulehandleinsensingpart =0x121 # called when simHandleModule in Lua is called with argument true
sim_message_eventcallback_renderingpass =0x122 # called just before the scene is rendered
sim_message_eventcallback_bannerclicked =0x123 # called when a banner was clicked (aux[0]=banner ID)
sim_message_eventcallback_menuitemselected =0x124 # auxiliaryData[0] indicates the handle of the item auxiliaryData[1] indicates the state of the item
sim_message_eventcallback_refreshdialogs =0x125 # aux[0]=refresh degree (0=light 1=medium 2=full)
sim_message_eventcallback_sceneloaded =0x126
sim_message_eventcallback_modelloaded =0x127
sim_message_eventcallback_instanceswitch =0x128
sim_message_eventcallback_guipass =0x129
sim_message_eventcallback_mainscriptabouttobecalled =0x12a
sim_message_eventcallback_rmlposition =0x12b #the command simRMLPosition was called. The appropriate plugin should handle the call
sim_message_eventcallback_rmlvelocity =0x12c # the command simRMLVelocity was called. The appropriate plugin should handle the call
sim_message_simulation_start_resume_request =0x1000
sim_message_simulation_pause_request =0x1001
sim_message_simulation_stop_request =0x1002
# Scene object properties. Combine with the | operator
sim_objectproperty_reserved1 =0x0000
sim_objectproperty_reserved2 =0x0001
sim_objectproperty_reserved3 =0x0002
sim_objectproperty_reserved4 =0x0003
sim_objectproperty_reserved5 =0x0004 # formely sim_objectproperty_visible
sim_objectproperty_reserved6 =0x0008 # formely sim_objectproperty_wireframe
sim_objectproperty_collapsed =0x0010
sim_objectproperty_selectable =0x0020
sim_objectproperty_reserved7 =0x0040
sim_objectproperty_selectmodelbaseinstead =0x0080
sim_objectproperty_dontshowasinsidemodel =0x0100
# reserved =0x0200
sim_objectproperty_canupdatedna =0x0400
sim_objectproperty_selectinvisible =0x0800
sim_objectproperty_depthinvisible =0x1000
# type of arguments (input and output) for custom lua commands
sim_lua_arg_nil =0
sim_lua_arg_bool =1
sim_lua_arg_int =2
sim_lua_arg_float =3
sim_lua_arg_string =4
sim_lua_arg_invalid =5
sim_lua_arg_table =8
# custom user interface properties. Values are serialized.
sim_ui_property_visible =0x0001
sim_ui_property_visibleduringsimulationonly =0x0002
sim_ui_property_moveable =0x0004
sim_ui_property_relativetoleftborder =0x0008
sim_ui_property_relativetotopborder =0x0010
sim_ui_property_fixedwidthfont =0x0020
sim_ui_property_systemblock =0x0040
sim_ui_property_settocenter =0x0080
sim_ui_property_rolledup =0x0100
sim_ui_property_selectassociatedobject =0x0200
sim_ui_property_visiblewhenobjectselected =0x0400
# button properties. Values are serialized.
sim_buttonproperty_button =0x0000
sim_buttonproperty_label =0x0001
sim_buttonproperty_slider =0x0002
sim_buttonproperty_editbox =0x0003
sim_buttonproperty_staydown =0x0008
sim_buttonproperty_enabled =0x0010
sim_buttonproperty_borderless =0x0020
sim_buttonproperty_horizontallycentered =0x0040
sim_buttonproperty_ignoremouse =0x0080
sim_buttonproperty_isdown =0x0100
sim_buttonproperty_transparent =0x0200
sim_buttonproperty_nobackgroundcolor =0x0400
sim_buttonproperty_rollupaction =0x0800
sim_buttonproperty_closeaction =0x1000
sim_buttonproperty_verticallycentered =0x2000
sim_buttonproperty_downupevent =0x4000
# Simulation status
sim_simulation_stopped =0x00 # Simulation is stopped
sim_simulation_paused =0x08 # Simulation is paused
sim_simulation_advancing =0x10 # Simulation is advancing
sim_simulation_advancing_firstafterstop =sim_simulation_advancing|0x00 # First simulation pass (1x)
sim_simulation_advancing_running =sim_simulation_advancing|0x01 # Normal simulation pass (>=1x)
# reserved =sim_simulation_advancing|0x02
sim_simulation_advancing_lastbeforepause =sim_simulation_advancing|0x03 # Last simulation pass before pause (1x)
sim_simulation_advancing_firstafterpause =sim_simulation_advancing|0x04 # First simulation pass after pause (1x)
sim_simulation_advancing_abouttostop =sim_simulation_advancing|0x05 # "Trying to stop" simulation pass (>=1x)
sim_simulation_advancing_lastbeforestop =sim_simulation_advancing|0x06 # Last simulation pass (1x)
# Script execution result (first return value)
sim_script_no_error =0
sim_script_main_script_nonexistent =1
sim_script_main_script_not_called =2
sim_script_reentrance_error =4
sim_script_lua_error =8
sim_script_call_error =16
# Script types (serialized!)
sim_scripttype_mainscript =0
sim_scripttype_childscript =1
sim_scripttype_jointctrlcallback =4
sim_scripttype_contactcallback =5
sim_scripttype_customizationscript =6
sim_scripttype_generalcallback =7
# API call error messages
sim_api_errormessage_ignore =0 # does not memorize nor output errors
sim_api_errormessage_report =1 # memorizes errors (default for C-API calls)
sim_api_errormessage_output =2 # memorizes and outputs errors (default for Lua-API calls)
# special argument of some functions
sim_handle_all =-2
sim_handle_all_except_explicit =-3
sim_handle_self =-4
sim_handle_main_script =-5
sim_handle_tree =-6
sim_handle_chain =-7
sim_handle_single =-8
sim_handle_default =-9
sim_handle_all_except_self =-10
sim_handle_parent =-11
# special handle flags
sim_handleflag_assembly =0x400000
sim_handleflag_model =0x800000
# distance calculation methods (serialized)
sim_distcalcmethod_dl =0
sim_distcalcmethod_dac =1
sim_distcalcmethod_max_dl_dac =2
sim_distcalcmethod_dl_and_dac =3
sim_distcalcmethod_sqrt_dl2_and_dac2=4
sim_distcalcmethod_dl_if_nonzero =5
sim_distcalcmethod_dac_if_nonzero =6
# Generic dialog styles
sim_dlgstyle_message =0
sim_dlgstyle_input =1
sim_dlgstyle_ok =2
sim_dlgstyle_ok_cancel =3
sim_dlgstyle_yes_no =4
sim_dlgstyle_dont_center =32# can be combined with one of above values. Only with this flag can the position of the related UI be set just after dialog creation
# Generic dialog return values
sim_dlgret_still_open =0
sim_dlgret_ok =1
sim_dlgret_cancel =2
sim_dlgret_yes =3
sim_dlgret_no =4
# Path properties
sim_pathproperty_show_line =0x0001
sim_pathproperty_show_orientation =0x0002
sim_pathproperty_closed_path =0x0004
sim_pathproperty_automatic_orientation =0x0008
sim_pathproperty_invert_velocity =0x0010
sim_pathproperty_infinite_acceleration =0x0020
sim_pathproperty_flat_path =0x0040
sim_pathproperty_show_position =0x0080
sim_pathproperty_auto_velocity_profile_translation =0x0100
sim_pathproperty_auto_velocity_profile_rotation =0x0200
sim_pathproperty_endpoints_at_zero =0x0400
sim_pathproperty_keep_x_up =0x0800
# drawing objects
# following are mutually exclusive
sim_drawing_points =0 # 3 values per point (point size in pixels)
sim_drawing_lines =1 # 6 values per line (line size in pixels)
sim_drawing_triangles =2 # 9 values per triangle
sim_drawing_trianglepoints =3 # 6 values per point (3 for triangle position 3 for triangle normal vector) (triangle size in meters)
sim_drawing_quadpoints =4 # 6 values per point (3 for quad position 3 for quad normal vector) (quad size in meters)
sim_drawing_discpoints =5 # 6 values per point (3 for disc position 3 for disc normal vector) (disc size in meters)
sim_drawing_cubepoints =6 # 6 values per point (3 for cube position 3 for cube normal vector) (cube size in meters)
sim_drawing_spherepoints =7 # 3 values per point (sphere size in meters)
# following can be or-combined
sim_drawing_itemcolors =0x00020 # +3 values per item (each item has its own ambient color (rgb values)).
# Mutually exclusive with sim_drawing_vertexcolors
sim_drawing_vertexcolors =0x00040 # +3 values per vertex (each vertex has its own ambient color (rgb values). Only for sim_drawing_lines (+6) and for sim_drawing_triangles(+9)). Mutually exclusive with sim_drawing_itemcolors
sim_drawing_itemsizes =0x00080 # +1 value per item (each item has its own size). Not for sim_drawing_triangles
sim_drawing_backfaceculling =0x00100 # back faces are not displayed for all items
sim_drawing_wireframe =0x00200 # all items displayed in wireframe
sim_drawing_painttag =0x00400 # all items are tagged as paint (for additinal processing at a later stage)
sim_drawing_followparentvisibility =0x00800 # if the object is associated with a scene object then it follows that visibility otherwise it is always visible
sim_drawing_cyclic =0x01000 # if the max item count was reached then the first items are overwritten.
sim_drawing_50percenttransparency =0x02000 # the drawing object will be 50% transparent
sim_drawing_25percenttransparency =0x04000 # the drawing object will be 25% transparent
sim_drawing_12percenttransparency =0x08000 # the drawing object will be 12.5% transparent
sim_drawing_emissioncolor =0x10000 # When used in combination with sim_drawing_itemcolors or sim_drawing_vertexcolors then the specified colors will be for the emissive component
sim_drawing_facingcamera =0x20000 # Only for trianglepoints quadpoints discpoints and cubepoints. If specified the normal verctor is calculated to face the camera (each item data requires 3 values less)
sim_drawing_overlay =0x40000 # When specified objects are always drawn on top of "regular objects"
sim_drawing_itemtransparency =0x80000 # +1 value per item (each item has its own transparency value (0-1)). Not compatible with sim_drawing_vertexcolors
# banner values
# following can be or-combined
sim_banner_left =0x00001 # Banners display on the left of the specified point
sim_banner_right =0x00002 # Banners display on the right of the specified point
sim_banner_nobackground =0x00004 # Banners have no background rectangle
sim_banner_overlay =0x00008 # When specified banners are always drawn on top of "regular objects"
sim_banner_followparentvisibility =0x00010 # if the object is associated with a scene object then it follows that visibility otherwise it is always visible
sim_banner_clickselectsparent =0x00020 # if the object is associated with a scene object then clicking the banner will select the scene object
sim_banner_clicktriggersevent =0x00040 # if the banner is clicked an event is triggered (sim_message_eventcallback_bannerclicked and sim_message_bannerclicked are generated)
sim_banner_facingcamera =0x00080 # If specified the banner will always face the camera by rotating around the banner's vertical axis (y-axis)
sim_banner_fullyfacingcamera =0x00100 # If specified the banner will always fully face the camera (the banner's orientation is same as the camera looking at it)
sim_banner_backfaceculling =0x00200 # If specified the banner will only be visible from one side
sim_banner_keepsamesize =0x00400 # If specified the banner will always appear in the same size. In that case size represents the character height in pixels
sim_banner_bitmapfont =0x00800 # If specified a fixed-size bitmap font is used. The text will also always fully face the camera and be right
# to the specified position. Bitmap fonts are not clickable
# particle objects following are mutually exclusive
sim_particle_points1 =0 # 6 values per point (pt1 and pt2. Pt1 is start position pt2-pt1 is the initial velocity vector). i
#Point is 1 pixel big. Only appearance is a point internally handled as a perfect sphere
sim_particle_points2 =1 # 6 values per point. Point is 2 pixel big. Only appearance is a point internally handled as a perfect sphere
sim_particle_points4 =2 # 6 values per point. Point is 4 pixel big. Only appearance is a point internally handled as a perfect sphere
sim_particle_roughspheres =3 # 6 values per sphere. Only appearance is rough. Internally a perfect sphere
sim_particle_spheres =4 # 6 values per sphere. Internally a perfect sphere
# following can be or-combined
sim_particle_respondable1to4 =0x0020 # the particles are respondable against shapes (against all objects that have at least one bit 1-4 activated in the global respondable mask)
sim_particle_respondable5to8 =0x0040 # the particles are respondable against shapes (against all objects that have at least one bit 5-8 activated in the global respondable mask)
sim_particle_particlerespondable =0x0080 # the particles are respondable against each other
sim_particle_ignoresgravity =0x0100 # the particles ignore the effect of gravity. Not compatible with sim_particle_water
sim_particle_invisible =0x0200 # the particles are invisible
sim_particle_itemsizes =0x0400 # +1 value per particle (each particle can have a different size)
sim_particle_itemdensities =0x0800 # +1 value per particle (each particle can have a different density)
sim_particle_itemcolors =0x1000 # +3 values per particle (each particle can have a different color)
sim_particle_cyclic =0x2000 # if the max item count was reached then the first items are overwritten.
sim_particle_emissioncolor =0x4000 # When used in combination with sim_particle_itemcolors then the specified colors will be for the emissive component
sim_particle_water =0x8000 # the particles are water particles (no weight in the water (i.e. when z<0)). Not compatible with sim_particle_ignoresgravity
sim_particle_painttag =0x10000 # The particles can be seen by vision sensors (sim_particle_invisible must not be set)
# custom user interface menu attributes
sim_ui_menu_title =1
sim_ui_menu_minimize =2
sim_ui_menu_close =4
sim_ui_menu_systemblock =8
# Boolean parameters
sim_boolparam_hierarchy_visible =0
sim_boolparam_console_visible =1
sim_boolparam_collision_handling_enabled =2
sim_boolparam_distance_handling_enabled =3
sim_boolparam_ik_handling_enabled =4
sim_boolparam_gcs_handling_enabled =5
sim_boolparam_dynamics_handling_enabled =6
sim_boolparam_joint_motion_handling_enabled =7
sim_boolparam_path_motion_handling_enabled =8
sim_boolparam_proximity_sensor_handling_enabled =9
sim_boolparam_vision_sensor_handling_enabled =10
sim_boolparam_mill_handling_enabled =11
sim_boolparam_browser_visible =12
sim_boolparam_scene_and_model_load_messages =13
sim_reserved0 =14
sim_boolparam_shape_textures_are_visible =15
sim_boolparam_display_enabled =16
sim_boolparam_infotext_visible =17
sim_boolparam_statustext_open =18
sim_boolparam_fog_enabled =19
sim_boolparam_rml2_available =20
sim_boolparam_rml4_available =21
sim_boolparam_mirrors_enabled =22
sim_boolparam_aux_clip_planes_enabled =23
sim_boolparam_full_model_copy_from_api =24
sim_boolparam_realtime_simulation =25
sim_boolparam_force_show_wireless_emission =27
sim_boolparam_force_show_wireless_reception =28
sim_boolparam_video_recording_triggered =29
sim_boolparam_threaded_rendering_enabled =32
sim_boolparam_fullscreen =33
sim_boolparam_headless =34
sim_boolparam_hierarchy_toolbarbutton_enabled =35
sim_boolparam_browser_toolbarbutton_enabled =36
sim_boolparam_objectshift_toolbarbutton_enabled =37
sim_boolparam_objectrotate_toolbarbutton_enabled=38
sim_boolparam_force_calcstruct_all_visible =39
sim_boolparam_force_calcstruct_all =40
sim_boolparam_exit_request =41
sim_boolparam_play_toolbarbutton_enabled =42
sim_boolparam_pause_toolbarbutton_enabled =43
sim_boolparam_stop_toolbarbutton_enabled =44
sim_boolparam_waiting_for_trigger =45
# Integer parameters
sim_intparam_error_report_mode =0 # Check sim_api_errormessage_... constants above for valid values
sim_intparam_program_version =1 # e.g Version 2.1.4 --> 20104. Can only be read
sim_intparam_instance_count =2 # do not use anymore (always returns 1 since V-REP 2.5.11)
sim_intparam_custom_cmd_start_id =3 # can only be read
sim_intparam_compilation_version =4 # 0=evaluation version 1=full version 2=player version. Can only be read
sim_intparam_current_page =5
sim_intparam_flymode_camera_handle =6 # can only be read
sim_intparam_dynamic_step_divider =7 # can only be read
sim_intparam_dynamic_engine =8 # 0=Bullet 1=ODE. 2=Vortex.
sim_intparam_server_port_start =9 # can only be read
sim_intparam_server_port_range =10 # can only be read
sim_intparam_visible_layers =11
sim_intparam_infotext_style =12
sim_intparam_settings =13
sim_intparam_edit_mode_type =14 # can only be read
sim_intparam_server_port_next =15 # is initialized at sim_intparam_server_port_start
sim_intparam_qt_version =16 # version of the used Qt framework
sim_intparam_event_flags_read =17 # can only be read
sim_intparam_event_flags_read_clear =18 # can only be read
sim_intparam_platform =19 # can only be read
sim_intparam_scene_unique_id =20 # can only be read
sim_intparam_work_thread_count =21
sim_intparam_mouse_x =22
sim_intparam_mouse_y =23
sim_intparam_core_count =24
sim_intparam_work_thread_calc_time_ms =25
sim_intparam_idle_fps =26
sim_intparam_prox_sensor_select_down =27
sim_intparam_prox_sensor_select_up =28
sim_intparam_stop_request_counter =29
sim_intparam_program_revision =30
sim_intparam_mouse_buttons =31
sim_intparam_dynamic_warning_disabled_mask =32
sim_intparam_simulation_warning_disabled_mask =33
sim_intparam_scene_index =34
sim_intparam_motionplanning_seed =35
sim_intparam_speedmodifier =36
# Float parameters
sim_floatparam_rand=0 # random value (0.0-1.0)
sim_floatparam_simulation_time_step =1
sim_floatparam_stereo_distance =2
# String parameters
sim_stringparam_application_path=0 # path of V-REP's executable
sim_stringparam_video_filename=1
sim_stringparam_app_arg1 =2
sim_stringparam_app_arg2 =3
sim_stringparam_app_arg3 =4
sim_stringparam_app_arg4 =5
sim_stringparam_app_arg5 =6
sim_stringparam_app_arg6 =7
sim_stringparam_app_arg7 =8
sim_stringparam_app_arg8 =9
sim_stringparam_app_arg9 =10
sim_stringparam_scene_path_and_name =13
# Array parameters
sim_arrayparam_gravity =0
sim_arrayparam_fog =1
sim_arrayparam_fog_color =2
sim_arrayparam_background_color1=3
sim_arrayparam_background_color2=4
sim_arrayparam_ambient_light =5
sim_arrayparam_random_euler =6
sim_objintparam_visibility_layer= 10
sim_objfloatparam_abs_x_velocity= 11
sim_objfloatparam_abs_y_velocity= 12
sim_objfloatparam_abs_z_velocity= 13
sim_objfloatparam_abs_rot_velocity= 14
sim_objfloatparam_objbbox_min_x= 15
sim_objfloatparam_objbbox_min_y= 16
sim_objfloatparam_objbbox_min_z= 17
sim_objfloatparam_objbbox_max_x= 18
sim_objfloatparam_objbbox_max_y= 19
sim_objfloatparam_objbbox_max_z= 20
sim_objfloatparam_modelbbox_min_x= 21
sim_objfloatparam_modelbbox_min_y= 22
sim_objfloatparam_modelbbox_min_z= 23
sim_objfloatparam_modelbbox_max_x= 24
sim_objfloatparam_modelbbox_max_y= 25
sim_objfloatparam_modelbbox_max_z= 26
sim_objintparam_collection_self_collision_indicator= 27
sim_objfloatparam_transparency_offset= 28
sim_objintparam_child_role= 29
sim_objintparam_parent_role= 30
sim_objintparam_manipulation_permissions= 31
sim_objintparam_illumination_handle= 32
sim_visionfloatparam_near_clipping= 1000
sim_visionfloatparam_far_clipping= 1001
sim_visionintparam_resolution_x= 1002
sim_visionintparam_resolution_y= 1003
sim_visionfloatparam_perspective_angle= 1004
sim_visionfloatparam_ortho_size= 1005
sim_visionintparam_disabled_light_components= 1006
sim_visionintparam_rendering_attributes= 1007
sim_visionintparam_entity_to_render= 1008
sim_visionintparam_windowed_size_x= 1009
sim_visionintparam_windowed_size_y= 1010
sim_visionintparam_windowed_pos_x= 1011
sim_visionintparam_windowed_pos_y= 1012
sim_visionintparam_pov_focal_blur= 1013
sim_visionfloatparam_pov_blur_distance= 1014
sim_visionfloatparam_pov_aperture= 1015
sim_visionintparam_pov_blur_sampled= 1016
sim_visionintparam_render_mode= 1017
sim_jointintparam_motor_enabled= 2000
sim_jointintparam_ctrl_enabled= 2001
sim_jointfloatparam_pid_p= 2002
sim_jointfloatparam_pid_i= 2003
sim_jointfloatparam_pid_d= 2004
sim_jointfloatparam_intrinsic_x= 2005
sim_jointfloatparam_intrinsic_y= 2006
sim_jointfloatparam_intrinsic_z= 2007
sim_jointfloatparam_intrinsic_qx= 2008
sim_jointfloatparam_intrinsic_qy= 2009
sim_jointfloatparam_intrinsic_qz= 2010
sim_jointfloatparam_intrinsic_qw= 2011
sim_jointfloatparam_velocity= 2012
sim_jointfloatparam_spherical_qx= 2013
sim_jointfloatparam_spherical_qy= 2014
sim_jointfloatparam_spherical_qz= 2015
sim_jointfloatparam_spherical_qw= 2016
sim_jointfloatparam_upper_limit= 2017
sim_jointfloatparam_kc_k= 2018
sim_jointfloatparam_kc_c= 2019
sim_jointfloatparam_ik_weight= 2021
sim_jointfloatparam_error_x= 2022
sim_jointfloatparam_error_y= 2023
sim_jointfloatparam_error_z= 2024
sim_jointfloatparam_error_a= 2025
sim_jointfloatparam_error_b= 2026
sim_jointfloatparam_error_g= 2027
sim_jointfloatparam_error_pos= 2028
sim_jointfloatparam_error_angle= 2029
sim_jointintparam_velocity_lock= 2030
sim_jointintparam_vortex_dep_handle= 2031
sim_jointfloatparam_vortex_dep_multiplication= 2032
sim_jointfloatparam_vortex_dep_offset= 2033
sim_shapefloatparam_init_velocity_x= 3000
sim_shapefloatparam_init_velocity_y= 3001
sim_shapefloatparam_init_velocity_z= 3002
sim_shapeintparam_static= 3003
sim_shapeintparam_respondable= 3004
sim_shapefloatparam_mass= 3005
sim_shapefloatparam_texture_x= 3006
sim_shapefloatparam_texture_y= 3007
sim_shapefloatparam_texture_z= 3008
sim_shapefloatparam_texture_a= 3009
sim_shapefloatparam_texture_b= 3010
sim_shapefloatparam_texture_g= 3011
sim_shapefloatparam_texture_scaling_x= 3012
sim_shapefloatparam_texture_scaling_y= 3013
sim_shapeintparam_culling= 3014
sim_shapeintparam_wireframe= 3015
sim_shapeintparam_compound= 3016
sim_shapeintparam_convex= 3017
sim_shapeintparam_convex_check= 3018
sim_shapeintparam_respondable_mask= 3019
sim_shapefloatparam_init_velocity_a= 3020
sim_shapefloatparam_init_velocity_b= 3021
sim_shapefloatparam_init_velocity_g= 3022
sim_shapestringparam_color_name= 3023
sim_shapeintparam_edge_visibility= 3024
sim_shapefloatparam_shading_angle= 3025
sim_shapefloatparam_edge_angle= 3026
sim_shapeintparam_edge_borders_hidden= 3027
sim_proxintparam_ray_invisibility= 4000
sim_forcefloatparam_error_x= 5000
sim_forcefloatparam_error_y= 5001
sim_forcefloatparam_error_z= 5002
sim_forcefloatparam_error_a= 5003
sim_forcefloatparam_error_b= 5004
sim_forcefloatparam_error_g= 5005
sim_forcefloatparam_error_pos= 5006
sim_forcefloatparam_error_angle= 5007
sim_lightintparam_pov_casts_shadows= 8000
sim_cameraintparam_disabled_light_components= 9000
sim_camerafloatparam_perspective_angle= 9001
sim_camerafloatparam_ortho_size= 9002
sim_cameraintparam_rendering_attributes= 9003
sim_cameraintparam_pov_focal_blur= 9004
sim_camerafloatparam_pov_blur_distance= 9005
sim_camerafloatparam_pov_aperture= 9006
sim_cameraintparam_pov_blur_samples= 9007
sim_dummyintparam_link_type= 10000
sim_mirrorfloatparam_width= 12000
sim_mirrorfloatparam_height= 12001
sim_mirrorfloatparam_reflectance= 12002
sim_mirrorintparam_enable= 12003
sim_pplanfloatparam_x_min= 20000
sim_pplanfloatparam_x_range= 20001
sim_pplanfloatparam_y_min= 20002
sim_pplanfloatparam_y_range= 20003
sim_pplanfloatparam_z_min= 20004
sim_pplanfloatparam_z_range= 20005
sim_pplanfloatparam_delta_min= 20006
sim_pplanfloatparam_delta_range= 20007
sim_mplanintparam_nodes_computed= 25000
sim_mplanintparam_prepare_nodes= 25001
sim_mplanintparam_clear_nodes= 25002
# User interface elements
sim_gui_menubar =0x0001
sim_gui_popups =0x0002
sim_gui_toolbar1 =0x0004
sim_gui_toolbar2 =0x0008
sim_gui_hierarchy =0x0010
sim_gui_infobar =0x0020
sim_gui_statusbar =0x0040
sim_gui_scripteditor =0x0080
sim_gui_scriptsimulationparameters =0x0100
sim_gui_dialogs =0x0200
sim_gui_browser =0x0400
sim_gui_all =0xffff
# Joint modes
sim_jointmode_passive =0
sim_jointmode_motion =1
sim_jointmode_ik =2
sim_jointmode_ikdependent =3
sim_jointmode_dependent =4
sim_jointmode_force =5
# Navigation and selection modes with the mouse. Lower byte values are mutually exclusive upper byte bits can be combined
sim_navigation_passive =0x0000
sim_navigation_camerashift =0x0001
sim_navigation_camerarotate =0x0002
sim_navigation_camerazoom =0x0003
sim_navigation_cameratilt =0x0004
sim_navigation_cameraangle =0x0005
sim_navigation_camerafly =0x0006
sim_navigation_objectshift =0x0007
sim_navigation_objectrotate =0x0008
sim_navigation_reserved2 =0x0009
sim_navigation_reserved3 =0x000A
sim_navigation_jointpathtest =0x000B
sim_navigation_ikmanip =0x000C
sim_navigation_objectmultipleselection =0x000D
# Bit-combine following values and add them to one of above's values for a valid navigation mode
sim_navigation_reserved4 =0x0100
sim_navigation_clickselection =0x0200
sim_navigation_ctrlselection =0x0400
sim_navigation_shiftselection =0x0800
sim_navigation_camerazoomwheel =0x1000
sim_navigation_camerarotaterightbutton =0x2000
#Remote API constants
SIMX_VERSION =0
# Remote API message header structure
SIMX_HEADER_SIZE =18
simx_headeroffset_crc =0 # 1 simxUShort. Generated by the client or server. The CRC for the message
simx_headeroffset_version =2 # 1 byte. Generated by the client or server. The version of the remote API software
simx_headeroffset_message_id =3 # 1 simxInt. Generated by the client (and used in a reply by the server)
simx_headeroffset_client_time =7 # 1 simxInt. Client time stamp generated by the client (and sent back by the server)
simx_headeroffset_server_time =11 # 1 simxInt. Generated by the server when a reply is generated. The server timestamp
simx_headeroffset_scene_id =15 # 1 simxUShort. Generated by the server. A unique ID identifying the scene currently displayed
simx_headeroffset_server_state =17 # 1 byte. Generated by the server. Bit coded 0 set --> simulation not stopped 1 set --> simulation paused 2 set --> real-time switch on 3-5 edit mode type (0=no edit mode 1=triangle 2=vertex 3=edge 4=path 5=UI)
# Remote API command header
SIMX_SUBHEADER_SIZE =26
simx_cmdheaderoffset_mem_size =0 # 1 simxInt. Generated by the client or server. The buffer size of the command.
simx_cmdheaderoffset_full_mem_size =4 # 1 simxInt. Generated by the client or server. The full buffer size of the command (applies to split chunks).
simx_cmdheaderoffset_pdata_offset0 =8 # 1 simxUShort. Generated by the client or server. The amount of data that is part of the command identification.
simx_cmdheaderoffset_pdata_offset1 =10 # 1 simxInt. Generated by the client or server. The amount of shift of the pure data buffer (applies to split chunks).
simx_cmdheaderoffset_cmd=14 # 1 simxInt. Generated by the client (and used in a reply by the server). The command combined with the operation mode of the command.
simx_cmdheaderoffset_delay_or_split =18 # 1 simxUShort. Generated by the client or server. The amount of delay in ms of a continuous command or the max. pure data size to send at once (applies to split commands).
simx_cmdheaderoffset_sim_time =20 # 1 simxInt. Generated by the server. The simulation time (in ms) when the command was executed (or 0 if simulation is not running)
simx_cmdheaderoffset_status =24 # 1 byte. Generated by the server. (1 bit 0 is set --> error in function execution on server side). The client writes bit 1 if command cannot be overwritten
simx_cmdheaderoffset_reserved =25 # 1 byte. Not yet used
# Regular operation modes
simx_opmode_oneshot =0x000000 # sends command as one chunk. Reply will also come as one chunk. Doesn't wait for the reply.
simx_opmode_blocking =0x010000 # sends command as one chunk. Reply will also come as one chunk. Waits for the reply (_REPLY_WAIT_TIMEOUT_IN_MS is the timeout).
simx_opmode_oneshot_wait =0x010000 # sends command as one chunk. Reply will also come as one chunk. Waits for the reply (_REPLY_WAIT_TIMEOUT_IN_MS is the timeout).
simx_opmode_continuous =0x020000
simx_opmode_streaming =0x020000 # sends command as one chunk. Command will be stored on the server and always executed
#(every x ms (as far as possible) where x can be 0-65535. just add x to opmode_continuous).
# A reply will be sent continuously each time as one chunk. Doesn't wait for the reply.
# Operation modes for heavy data
simx_opmode_oneshot_split =0x030000 # sends command as several chunks (max chunk size is x bytes where x can be _MIN_SPLIT_AMOUNT_IN_BYTES-65535. Just add x to opmode_oneshot_split). Reply will also come as several chunks. Doesn't wait for the reply.
simx_opmode_continuous_split =0x040000
simx_opmode_streaming_split =0x040000 # sends command as several chunks (max chunk size is x bytes where x can be _MIN_SPLIT_AMOUNT_IN_BYTES-65535. Just add x to opmode_continuous_split). Command will be stored on the server and always executed. A reply will be sent continuously each time as several chunks. Doesn't wait for the reply.
# Special operation modes
simx_opmode_discontinue =0x050000 # removes and cancels all commands stored on the client or server side (also continuous commands)
simx_opmode_buffer =0x060000 # doesn't send anything but checks if a reply for the given command is available in the input buffer (i.e. previously received from the server)
simx_opmode_remove =0x070000 # doesn't send anything and doesn't return any specific value. It just erases a similar command reply in the inbox (to free some memory)
# Command return codes
simx_return_ok =0x000000
simx_return_novalue_flag =0x000001 # input buffer doesn't contain the specified command
simx_return_timeout_flag =0x000002 # command reply not received in time for opmode_oneshot_wait operation mode
simx_return_illegal_opmode_flag =0x000004 # command doesn't support the specified operation mode
simx_return_remote_error_flag =0x000008 # command caused an error on the server side
simx_return_split_progress_flag =0x000010 # previous similar command not yet fully processed (applies to opmode_oneshot_split operation modes)
simx_return_local_error_flag =0x000020 # command caused an error on the client side
simx_return_initialize_error_flag =0x000040 # simxStart was not yet called
# Following for backward compatibility (same as above)
simx_error_noerror =0x000000
simx_error_novalue_flag =0x000001 # input buffer doesn't contain the specified command
simx_error_timeout_flag =0x000002 # command reply not received in time for opmode_oneshot_wait operation mode
simx_error_illegal_opmode_flag =0x000004 # command doesn't support the specified operation mode
simx_error_remote_error_flag =0x000008 # command caused an error on the server side
simx_error_split_progress_flag =0x000010 # previous similar command not yet fully processed (applies to opmode_oneshot_split operation modes)
simx_error_local_error_flag =0x000020 # command caused an error on the client side
simx_error_initialize_error_flag =0x000040 # simxStart was not yet called
| jpedrorl/Robotics-AI | vrep/vrepConst.py | Python | mit | 45,009 |
import pytest
def test_unknown_virtual_host(docker_compose, nginxproxy):
r = nginxproxy.get("http://nginx-proxy/")
assert r.status_code == 503
def test_forwards_to_web1(docker_compose, nginxproxy):
r = nginxproxy.get("http://web1.nginx-proxy.local/port")
assert r.status_code == 200
assert r.text == "answer from port 81\n"
def test_forwards_to_web2(docker_compose, nginxproxy):
r = nginxproxy.get("http://web2.nginx-proxy.local/port")
assert r.status_code == 200
assert r.text == "answer from port 82\n" | jwilder/nginx-proxy | test/test_multiple-networks.py | Python | mit | 545 |
# -*- coding: utf-8 -*-
"""
@author: Fabio Erculiani <lxnay@sabayon.org>
@contact: lxnay@sabayon.org
@copyright: Fabio Erculiani
@license: GPL-2
I{EntropyRepository} is the MySQL implementation of the repository
interface.
"""
import os
import hashlib
import time
try:
import thread
except ImportError:
import _thread as thread
import threading
import subprocess
from entropy.const import etpConst, const_debug_write, \
const_convert_to_unicode, const_pid_exists
import entropy.dep
import entropy.tools
from entropy.db.sql import EntropySQLRepository, SQLConnectionWrapper, \
SQLCursorWrapper
from entropy.db.exceptions import Warning, Error, InterfaceError, \
DatabaseError, DataError, OperationalError, IntegrityError, \
InternalError, ProgrammingError, NotSupportedError, RestartTransaction
class MySQLCursorWrapper(SQLCursorWrapper):
"""
This class wraps a MySQL cursor and
makes execute(), executemany() return
the cursor itself.
"""
def __init__(self, cursor, exceptions, errno):
self._errno = errno
self._conn_wr = cursor.connection
super(MySQLCursorWrapper, self).__init__(cursor, exceptions)
def _proxy_call(self, *args, **kwargs):
"""
Reimplemented from SQLCursorWrapper.
Raise RestartTransaction if MySQL fails to execute
the query due to a detected deadlock.
"""
try:
return super(MySQLCursorWrapper, self)._proxy_call(
*args, **kwargs)
except ProgrammingError as err:
tx_errnos = (
self._errno['ER_LOCK_WAIT_TIMEOUT'],
self._errno['ER_LOCK_DEADLOCK'])
if err.args[0].errno in tx_errnos:
const_debug_write(
__name__,
"deadlock detected, asking to restart transaction")
# rollback, is it needed?
self._conn_wr.rollback()
raise RestartTransaction(err.args[0])
raise
def wrap(self, method, *args, **kwargs):
return self._proxy_call(method, *args, **kwargs)
def execute(self, *args, **kwargs):
# force oursql to empty the resultset
self._cur = self._cur.connection.cursor()
self._proxy_call(self._cur.execute, *args, **kwargs)
return self
def executemany(self, *args, **kwargs):
# force oursql to empty the resultset
self._cur = self._cur.connection.cursor()
self._proxy_call(self._cur.executemany, *args, **kwargs)
return self
def close(self, *args, **kwargs):
return self._proxy_call(self._cur.close, *args, **kwargs)
def fetchone(self, *args, **kwargs):
return self._proxy_call(self._cur.fetchone, *args, **kwargs)
def fetchall(self, *args, **kwargs):
return self._proxy_call(self._cur.fetchall, *args, **kwargs)
def fetchmany(self, *args, **kwargs):
return self._proxy_call(self._cur.fetchmany, *args, **kwargs)
def executescript(self, script):
for sql in script.split(";"):
if not sql.strip():
continue
self.execute(sql)
return self
def callproc(self, *args, **kwargs):
return self._proxy_call(self._cur.callproc, *args, **kwargs)
def nextset(self, *args, **kwargs):
return self._proxy_call(self._cur.nextset, *args, **kwargs)
def __iter__(self):
cur = iter(self._cur)
return MySQLCursorWrapper(cur, self._excs)
def __next__(self):
return self.wrap(next, self._cur)
def next(self):
return self.wrap(self._cur.next)
class MySQLConnectionWrapper(SQLConnectionWrapper):
"""
This class wraps a MySQL connection and
makes execute(), executemany() return
the connection itself.
"""
def __init__(self, connection, exceptions):
SQLConnectionWrapper.__init__(self, connection, exceptions)
def interrupt(self):
"""
Reimplemented from SQLConnectionWrapper.
"""
# Not supported by MySQL, NO-OP
return
def ping(self):
"""
Reimplemented from SQLConnectionWrapper.
"""
return self._proxy_call(self._excs, self._con.ping)
def unicode(self):
"""
Reimplemented from SQLConnectionWrapper.
"""
# This is a NO-OP, we are always unicode
return
class EntropyMySQLRepository(EntropySQLRepository):
"""
EntropyMySQLRepository implements MySQL based storage. In a Model-View based
design pattern, this can be considered the "model".
"""
# bump this every time schema changes and databaseStructureUpdate
# should be triggered
_SCHEMA_REVISION = 1
_INSERT_OR_REPLACE = "REPLACE"
_INSERT_OR_IGNORE = "INSERT IGNORE"
_UPDATE_OR_REPLACE = None
class MySQLSchema(object):
def get_init(self):
data = """
CREATE TABLE baseinfo (
idpackage INTEGER(10) UNSIGNED NOT NULL
AUTO_INCREMENT PRIMARY KEY,
atom VARCHAR(75) NOT NULL,
category VARCHAR(128) NOT NULL,
name VARCHAR(75) NOT NULL,
version VARCHAR(75) NOT NULL,
versiontag VARCHAR(75) NOT NULL,
revision INTEGER(10) NOT NULL,
branch VARCHAR(75) NOT NULL,
slot VARCHAR(75) NOT NULL,
license VARCHAR(256) NOT NULL,
etpapi INTEGER(10) NOT NULL,
`trigger` INTEGER(10) NOT NULL
);
CREATE TABLE extrainfo (
idpackage INTEGER(10) UNSIGNED PRIMARY KEY NOT NULL,
description VARCHAR(256) NOT NULL,
homepage VARCHAR(1024) NOT NULL,
download VARCHAR(512) NOT NULL,
size VARCHAR(128) NOT NULL,
chost VARCHAR(256) NOT NULL,
cflags VARCHAR(512) NOT NULL,
cxxflags VARCHAR(512) NOT NULL,
digest CHAR(32) NOT NULL,
datecreation VARCHAR(32) NOT NULL,
FOREIGN KEY(idpackage)
REFERENCES baseinfo(idpackage) ON DELETE CASCADE
);
CREATE TABLE content (
idpackage INTEGER(10) UNSIGNED NOT NULL,
file VARCHAR(512) NOT NULL,
type VARCHAR(3) NOT NULL,
FOREIGN KEY(idpackage)
REFERENCES baseinfo(idpackage) ON DELETE CASCADE
);
CREATE TABLE contentsafety (
idpackage INTEGER(10) UNSIGNED NOT NULL,
file VARCHAR(512) NOT NULL,
mtime REAL,
sha256 CHAR(64) NOT NULL,
FOREIGN KEY(idpackage)
REFERENCES baseinfo(idpackage) ON DELETE CASCADE
);
CREATE TABLE provide (
idpackage INTEGER(10) UNSIGNED NOT NULL,
atom VARCHAR(75) NOT NULL,
is_default INTEGER(10) NOT NULL DEFAULT 0,
FOREIGN KEY(idpackage)
REFERENCES baseinfo(idpackage) ON DELETE CASCADE
);
CREATE TABLE dependencies (
idpackage INTEGER(10) UNSIGNED NOT NULL,
iddependency INTEGER(10) UNSIGNED NOT NULL,
type INTEGER(10) NOT NULL,
FOREIGN KEY(idpackage)
REFERENCES baseinfo(idpackage) ON DELETE CASCADE
);
CREATE TABLE dependenciesreference (
iddependency INTEGER(10) UNSIGNED NOT NULL
AUTO_INCREMENT PRIMARY KEY,
dependency VARCHAR(1024) NOT NULL
);
CREATE TABLE conflicts (
idpackage INTEGER(10) UNSIGNED NOT NULL,
conflict VARCHAR(128) NOT NULL,
FOREIGN KEY(idpackage)
REFERENCES baseinfo(idpackage) ON DELETE CASCADE
);
CREATE TABLE mirrorlinks (
mirrorname VARCHAR(75) NOT NULL,
mirrorlink VARCHAR(512) NOT NULL
);
CREATE TABLE sources (
idpackage INTEGER(10) UNSIGNED NOT NULL,
idsource INTEGER(10) UNSIGNED NOT NULL,
FOREIGN KEY(idpackage)
REFERENCES baseinfo(idpackage) ON DELETE CASCADE
);
CREATE TABLE sourcesreference (
idsource INTEGER(10) UNSIGNED NOT NULL
AUTO_INCREMENT PRIMARY KEY,
source VARCHAR(512) NOT NULL
);
CREATE TABLE useflags (
idpackage INTEGER(10) UNSIGNED NOT NULL,
idflag INTEGER(10) UNSIGNED NOT NULL,
FOREIGN KEY(idpackage)
REFERENCES baseinfo(idpackage) ON DELETE CASCADE
);
CREATE TABLE useflagsreference (
idflag INTEGER(10) UNSIGNED NOT NULL
AUTO_INCREMENT PRIMARY KEY,
flagname VARCHAR(75) NOT NULL
);
CREATE TABLE keywords (
idpackage INTEGER(10) UNSIGNED NOT NULL,
idkeyword INTEGER(10) UNSIGNED NOT NULL,
FOREIGN KEY(idpackage)
REFERENCES baseinfo(idpackage) ON DELETE CASCADE
);
CREATE TABLE keywordsreference (
idkeyword INTEGER(10) UNSIGNED NOT NULL
AUTO_INCREMENT PRIMARY KEY,
keywordname VARCHAR(75) NOT NULL
);
CREATE TABLE configprotect (
idpackage INTEGER(10) UNSIGNED NOT NULL PRIMARY KEY,
idprotect INTEGER(10) UNSIGNED NOT NULL,
FOREIGN KEY(idpackage)
REFERENCES baseinfo(idpackage) ON DELETE CASCADE
);
CREATE TABLE configprotectmask (
idpackage INTEGER(10) UNSIGNED NOT NULL PRIMARY KEY,
idprotect INTEGER(10) UNSIGNED NOT NULL,
FOREIGN KEY(idpackage)
REFERENCES baseinfo(idpackage) ON DELETE CASCADE
);
CREATE TABLE configprotectreference (
idprotect INTEGER(10) UNSIGNED NOT NULL
AUTO_INCREMENT PRIMARY KEY,
protect VARCHAR(512) NOT NULL
);
CREATE TABLE systempackages (
idpackage INTEGER(10) UNSIGNED NOT NULL PRIMARY KEY,
FOREIGN KEY(idpackage)
REFERENCES baseinfo(idpackage) ON DELETE CASCADE
);
CREATE TABLE injected (
idpackage INTEGER(10) UNSIGNED NOT NULL PRIMARY KEY,
FOREIGN KEY(idpackage)
REFERENCES baseinfo(idpackage) ON DELETE CASCADE
);
CREATE TABLE installedtable (
idpackage INTEGER(10) UNSIGNED NOT NULL PRIMARY KEY,
repositoryname VARCHAR(75) NOT NULL,
source INTEGER(10) NOT NULL,
FOREIGN KEY(idpackage)
REFERENCES baseinfo(idpackage) ON DELETE CASCADE
);
CREATE TABLE sizes (
idpackage INTEGER(10) UNSIGNED NOT NULL PRIMARY KEY,
size BIGINT UNSIGNED NOT NULL,
FOREIGN KEY(idpackage)
REFERENCES baseinfo(idpackage) ON DELETE CASCADE
);
CREATE TABLE counters (
counter INTEGER(10) NOT NULL,
idpackage INTEGER(10) UNSIGNED NOT NULL,
branch VARCHAR(75) NOT NULL,
PRIMARY KEY(idpackage, branch),
FOREIGN KEY(idpackage)
REFERENCES baseinfo(idpackage) ON DELETE CASCADE
);
CREATE TABLE trashedcounters (
counter INTEGER(10) NOT NULL
);
CREATE TABLE needed_libs (
idpackage INTEGER(10) UNSIGNED NOT NULL,
lib_user_path VARCHAR(512) NOT NULL,
lib_user_soname VARCHAR(75) NOT NULL,
soname VARCHAR(75) NOT NULL,
elfclass INTEGER(10) NOT NULL,
rpath VARCHAR(1024) NOT NULL,
FOREIGN KEY(idpackage)
REFERENCES baseinfo(idpackage) ON DELETE CASCADE
);
CREATE TABLE provided_libs (
idpackage INTEGER(10) UNSIGNED NOT NULL,
library VARCHAR(75) NOT NULL,
path VARCHAR(75) NOT NULL,
elfclass INTEGER(10) NOT NULL,
FOREIGN KEY(idpackage)
REFERENCES baseinfo(idpackage) ON DELETE CASCADE
);
CREATE TABLE treeupdates (
repository VARCHAR(75) NOT NULL PRIMARY KEY,
digest CHAR(32) NOT NULL
);
CREATE TABLE treeupdatesactions (
idupdate INTEGER(10) UNSIGNED NOT NULL
AUTO_INCREMENT PRIMARY KEY,
repository VARCHAR(75) NOT NULL,
command VARCHAR(256) NOT NULL,
branch VARCHAR(75) NOT NULL,
date VARCHAR(75) NOT NULL
);
CREATE TABLE licensedata (
licensename VARCHAR(75) NOT NULL UNIQUE,
`text` MEDIUMBLOB,
compressed INTEGER(10) NOT NULL
);
CREATE TABLE licenses_accepted (
licensename VARCHAR(75) NOT NULL UNIQUE
);
CREATE TABLE triggers (
idpackage INTEGER(10) UNSIGNED NOT NULL PRIMARY KEY,
data MEDIUMBLOB,
FOREIGN KEY(idpackage)
REFERENCES baseinfo(idpackage) ON DELETE CASCADE
);
CREATE TABLE entropy_misc_counters (
idtype INTEGER(10) NOT NULL PRIMARY KEY,
counter INTEGER(10) NOT NULL
);
CREATE TABLE categoriesdescription (
category VARCHAR(128) NOT NULL,
locale VARCHAR(75) NOT NULL,
description VARCHAR(256) NOT NULL
);
CREATE TABLE packagesets (
setname VARCHAR(75) NOT NULL,
dependency VARCHAR(1024) NOT NULL
);
CREATE TABLE packagechangelogs (
category VARCHAR(75) NOT NULL,
name VARCHAR(75) NOT NULL,
changelog MEDIUMBLOB NOT NULL,
PRIMARY KEY (category, name)
);
CREATE TABLE automergefiles (
idpackage INTEGER(10) UNSIGNED NOT NULL,
configfile VARCHAR(512) NOT NULL,
`md5` CHAR(32) NOT NULL,
FOREIGN KEY(idpackage)
REFERENCES baseinfo(idpackage) ON DELETE CASCADE
);
CREATE TABLE packagedesktopmime (
idpackage INTEGER(10) UNSIGNED NOT NULL,
name VARCHAR(75),
mimetype VARCHAR(4096),
executable VARCHAR(128),
icon VARCHAR(75),
FOREIGN KEY(idpackage)
REFERENCES baseinfo(idpackage) ON DELETE CASCADE
);
CREATE TABLE packagedownloads (
idpackage INTEGER(10) UNSIGNED NOT NULL,
download VARCHAR(512) NOT NULL,
type VARCHAR(75) NOT NULL,
size BIGINT UNSIGNED NOT NULL,
disksize BIGINT UNSIGNED NOT NULL,
`md5` CHAR(32) NOT NULL,
`sha1` CHAR(40) NOT NULL,
`sha256` CHAR(64) NOT NULL,
`sha512` CHAR(128) NOT NULL,
`gpg` BLOB,
FOREIGN KEY(idpackage)
REFERENCES baseinfo(idpackage) ON DELETE CASCADE
);
CREATE TABLE provided_mime (
mimetype VARCHAR(640) NOT NULL,
idpackage INTEGER(10) UNSIGNED NOT NULL,
FOREIGN KEY(idpackage)
REFERENCES baseinfo(idpackage) ON DELETE CASCADE
);
CREATE TABLE packagesignatures (
idpackage INTEGER(10) UNSIGNED NOT NULL PRIMARY KEY,
sha1 CHAR(40),
sha256 CHAR(64),
sha512 CHAR(128),
gpg BLOB,
FOREIGN KEY(idpackage)
REFERENCES baseinfo(idpackage) ON DELETE CASCADE
);
CREATE TABLE packagespmphases (
idpackage INTEGER(10) UNSIGNED NOT NULL PRIMARY KEY,
phases VARCHAR(512) NOT NULL,
FOREIGN KEY(idpackage)
REFERENCES baseinfo(idpackage) ON DELETE CASCADE
);
CREATE TABLE packagespmrepository (
idpackage INTEGER(10) UNSIGNED NOT NULL PRIMARY KEY,
repository VARCHAR(75) NOT NULL,
FOREIGN KEY(idpackage)
REFERENCES baseinfo(idpackage) ON DELETE CASCADE
);
CREATE TABLE entropy_branch_migration (
repository VARCHAR(75) NOT NULL,
from_branch VARCHAR(75) NOT NULL,
to_branch VARCHAR(75) NOT NULL,
post_migration_md5sum CHAR(32) NOT NULL,
post_upgrade_md5sum CHAR(32) NOT NULL,
PRIMARY KEY (repository, from_branch, to_branch)
);
CREATE TABLE xpakdata (
idpackage INTEGER(10) UNSIGNED NOT NULL PRIMARY KEY,
data LONGBLOB NOT NULL
);
CREATE TABLE settings (
setting_name VARCHAR(75) NOT NULL,
setting_value VARCHAR(75) NOT NULL,
PRIMARY KEY(setting_name)
);
"""
return data
class MySQLProxy(object):
_mod = None
_excs = None
_errnos = None
_lock = threading.Lock()
PORT = 3306
@staticmethod
def get():
"""
Lazily load the MySQL module.
"""
if EntropyMySQLRepository.MySQLProxy._mod is None:
with EntropyMySQLRepository.MySQLProxy._lock:
if EntropyMySQLRepository.MySQLProxy._mod is None:
import oursql
EntropyMySQLRepository.MySQLProxy._excs = oursql
EntropyMySQLRepository.MySQLProxy._mod = oursql
EntropyMySQLRepository.MySQLProxy._errnos = \
oursql.errnos
return EntropyMySQLRepository.MySQLProxy._mod
@staticmethod
def exceptions():
"""
Get the MySQL exceptions module.
"""
_mod = EntropyMySQLRepository.MySQLProxy.get()
return EntropyMySQLRepository.MySQLProxy._excs
@staticmethod
def errno():
"""
Get the MySQL errno module.
"""
_mod = EntropyMySQLRepository.MySQLProxy.get()
return EntropyMySQLRepository.MySQLProxy._errnos
Schema = MySQLSchema
ModuleProxy = MySQLProxy
def __init__(self, uri, readOnly = False, xcache = False,
name = None, indexing = True, skipChecks = False,
direct = False):
"""
EntropyMySQLRepository constructor.
@param uri: the connection URI
@type uri: string
@keyword readOnly: open file in read-only mode
@type readOnly: bool
@keyword xcache: enable on-disk cache
@type xcache: bool
@keyword name: repository identifier
@type name: string
@keyword indexing: enable database indexes
@type indexing: bool
@keyword skipChecks: if True, skip integrity checks
@type skipChecks: bool
@keyword direct: True, if direct mode should be always enabled
@type direct: bool
"""
self._mysql = self.ModuleProxy.get()
# setup uri mysql://user:pass@host/database
split_url = entropy.tools.spliturl(uri)
if split_url is None:
raise DatabaseError("Invalid URI")
if split_url.scheme != "mysql":
raise DatabaseError("Invalid Scheme")
netloc = split_url.netloc
if not netloc:
raise DatabaseError("Invalid Netloc")
try:
self._host = netloc.split("@", 1)[-1]
except IndexError:
raise DatabaseError("Invalid Host")
try:
user_pass = "@".join(netloc.split("@")[:-1])
self._user = user_pass.split(":")[0]
except IndexError:
raise DatabaseError("Invalid User")
try:
self._password = user_pass.split(":", 1)[1]
except IndexError:
raise DatabaseError("Invalid Password")
db_port = split_url.path.lstrip("/")
if not db_port:
raise DatabaseError("Invalid Database")
try:
if ":" in db_port:
db = ":".join(db_port.split(":")[:-1])
else:
db = db_port
except IndexError:
raise DatabaseError("Invalid Database Name")
if db == ":memory:":
raise DatabaseError(
"Memory Database not supported (I use BLOBs)")
try:
if ":" in db_port:
port = db_port.split(":")[-1].strip()
if port:
self._port = int(port)
else:
raise ValueError()
else:
raise IndexError()
except IndexError:
self._port = EntropyMySQLRepository.MySQLProxy.PORT
except ValueError:
raise DatabaseError("Invalid Port")
EntropySQLRepository.__init__(
self, db, readOnly, skipChecks, indexing,
xcache, False, name)
self.__structure_update = False
if not self._skip_checks:
try:
if self._doesTableExist('baseinfo') and \
self._doesTableExist('extrainfo'):
self.__structure_update = True
except Error:
self._cleanup_all(
_cleanup_main_thread=False)
raise
if self.__structure_update:
self._databaseStructureUpdates()
def _concatOperator(self, fields):
"""
Reimplemented from EntropySQLRepository.
"""
return "CONCAT(" + ", ".join(fields) + ")"
def _cursor(self):
"""
Reimplemented from EntropySQLRepository.
"""
current_thread = threading.current_thread()
c_key = self._cursor_connection_pool_key()
cursor = None
with self._cursor_pool_mutex():
threads = set()
cursor_pool = self._cursor_pool()
cursor_data = cursor_pool.get(c_key)
if cursor_data is not None:
cursor, threads = cursor_data
# handle possible thread ident clashing
# in the cleanup thread function, because
# thread idents are recycled
# on thread termination
threads.add(current_thread)
if cursor is None:
conn = self._connection_impl(_from_cursor=True)
cursor = conn.cursor()
cursor.execute("SET storage_engine=InnoDB;")
cursor.execute("SET autocommit=OFF;")
cursor = MySQLCursorWrapper(
cursor, self.ModuleProxy.exceptions(),
self.ModuleProxy().errno())
cursor_pool[c_key] = cursor, threads
self._start_cleanup_monitor(current_thread, c_key)
return cursor
def _connection_impl(self, _from_cursor=False):
"""
Connection getter method implementation, adds
_from_cursor argument to avoid calling the
cleanup routine if True.
"""
current_thread = threading.current_thread()
c_key = self._cursor_connection_pool_key()
conn = None
with self._connection_pool_mutex():
threads = set()
connection_pool = self._connection_pool()
conn_data = connection_pool.get(c_key)
if conn_data is not None:
conn, threads = conn_data
# handle possible thread ident clashing
# in the cleanup thread function
# because thread idents are recycled on
# thread termination
threads.add(current_thread)
if conn is None:
conn = MySQLConnectionWrapper.connect(
self.ModuleProxy, self._mysql,
MySQLConnectionWrapper,
host = self._host, user = self._user,
passwd = self._password, db = self._db,
port = self._port, autoreconnect = True)
connection_pool[c_key] = conn, threads
if not _from_cursor:
self._start_cleanup_monitor(current_thread, c_key)
else:
conn.ping()
return conn
def _connection(self):
"""
Reimplemented from EntropySQLRepository.
"""
return self._connection_impl()
def __show_info(self):
password = hashlib.new("md5")
password.update(self._password)
first_part = "<EntropyRepository instance at " + \
"%s - host: %s, db: %s, port: %s, user: %s, hpass: %s" % (
hex(id(self)), self._host, self._db, self._port, self._user,
password.hexdigest(),)
second_part = ", ro: %s|%s, caching: %s, indexing: %s" % (
self._readonly, self.readonly(), self.caching(),
self._indexing,)
third_part = ", name: %s, skip_upd: %s, st_upd: %s" % (
self.name, self._skip_checks, self.__structure_update,)
fourth_part = ", conn_pool: %s, cursor_cache: %s>" % (
self._connection_pool(), self._cursor_pool(),)
return first_part + second_part + third_part + fourth_part
def __repr__(self):
return self.__show_info()
def __str__(self):
return self.__show_info()
def __unicode__(self):
return self.__show_info()
def close(self, safe=False):
"""
Reimplemented from EntropyRepositoryBase.
Needs to call superclass method.
"""
super(EntropyMySQLRepository, self).close()
self._cleanup_all(_cleanup_main_thread=not safe)
# live cache must be discarded every time the repository is closed
# in order to avoid data mismatches for long-running processes
# that load and unload Entropy Framework often.
# like "client-updates-daemon".
self._discardLiveCache()
def vacuum(self):
"""
Reimplemented from EntropyRepositoryBase.
@todo: should it run OPTIMIZE TABLE for each table?
"""
return
def initializeRepository(self):
"""
Reimplemented from EntropyRepositoryBase.
"""
my = self.Schema()
self.dropAllIndexes()
self._cursor().execute("SET FOREIGN_KEY_CHECKS = 0;")
try:
for table in self._listAllTables():
try:
cur = self._cursor().execute(
"DROP TABLE %s" % (table,))
except OperationalError:
# skip tables that can't be dropped
continue
finally:
self._cursor().execute("SET FOREIGN_KEY_CHECKS = 1;")
self.commit()
self._cursor().executescript(my.get_init())
self._clearLiveCache("_doesTableExist")
self._clearLiveCache("_doesColumnInTableExist")
self._setupInitialSettings()
self._databaseStructureUpdates()
self._clearLiveCache("_doesTableExist")
self._clearLiveCache("_doesColumnInTableExist")
self.commit()
super(EntropyMySQLRepository, self).initializeRepository()
def setSpmUid(self, package_id, spm_package_uid, branch = None):
"""
Reimplemented from EntropySQLRepository.
Specialized version that only handles UNIQUE
constraint violations.
"""
branchstring = ''
insertdata = (spm_package_uid, package_id)
if branch:
branchstring = ', branch = (?)'
insertdata += (branch,)
try:
cur = self._cursor().execute("""
UPDATE counters SET counter = ? %s
WHERE idpackage = ?""" % (branchstring,), insertdata)
except IntegrityError as err:
errno = self.ModuleProxy.errno()
if err.args[0].errno != errno['ER_DUP_ENTRY']:
raise
# fallback to replace
cur = self._cursor().execute("""
REPLACE INTO counters SET counter = ? %s
WHERE idpackage = ?""" % (branchstring,), insertdata)
def handlePackage(self, pkg_data, revision = None,
formattedContent = False):
"""
Reimplemented from EntropySQLRepository.
"""
raise NotImplementedError()
def _setupInitialSettings(self):
"""
Setup initial repository settings
"""
query = """
REPLACE INTO settings VALUES ("arch", '%s');
""" % (etpConst['currentarch'],)
self._cursor().executescript(query)
self.commit()
self._settings_cache.clear()
def _databaseStructureUpdates(self):
"""
Do not forget to bump _SCHEMA_REVISION whenever
you add more tables
"""
try:
current_schema_rev = int(self.getSetting("schema_revision"))
except (KeyError, ValueError):
current_schema_rev = -1
if current_schema_rev == EntropyMySQLRepository._SCHEMA_REVISION \
and not os.getenv("ETP_REPO_SCHEMA_UPDATE"):
return
old_readonly = self._readonly
self._readonly = False
# !!! insert schema changes here
self._readonly = old_readonly
self._connection().commit()
if not old_readonly:
# it seems that it's causing locking issues
# so, just execute it when in read/write mode
self._setSetting("schema_revision",
EntropyMySQLRepository._SCHEMA_REVISION)
self._connection().commit()
def integrity_check(self):
"""
Reimplemented from EntropyRepositoryBase.
"""
return
@staticmethod
def importRepository(dumpfile, db, data = None):
"""
Reimplemented from EntropyRepositoryBase.
"""
dumpfile = os.path.realpath(dumpfile)
if not entropy.tools.is_valid_path_string(dumpfile):
raise AttributeError("dumpfile value is invalid")
if data is None:
raise AttributeError(
"connection data required (dict)")
try:
host, port, user, password = data['host'], \
data['port'], data['user'], data['password']
except KeyError as err:
raise AttributeError(err)
try:
with open(dumpfile, "rb") as f_in:
proc = subprocess.Popen(
("/usr/bin/mysql",
"-u", user, "-h", host,
"-P", str(port), "-p" + password,
"-D", db), bufsize = -1, stdin = f_in)
return proc.wait()
except OSError:
return 1
def exportRepository(self, dumpfile):
"""
Reimplemented from EntropyRepositoryBase.
"""
try:
proc = subprocess.Popen(
("/usr/bin/mysqldump",
"-u", self._user, "-h", self._host,
"-P", str(self._port), "-p" + self._password,
"--databases", self._db), bufsize = -1, stdout = dumpfile)
return proc.wait()
except OSError:
return 1
raise NotImplementedError()
def _listAllTables(self):
"""
List all available tables in this repository database.
@return: available tables
@rtype: list
"""
cur = self._cursor().execute("SHOW TABLES;")
return self._cur2tuple(cur)
def _doesTableExist(self, table, temporary = False):
# NOTE: override cache when temporary is True
if temporary:
# temporary table do not pop-up with the statement below, so
# we need to handle them with "care"
try:
cur = self._cursor().execute("""
SELECT count(*) FROM `%s` LIMIT 1""" % (table,))
cur.fetchone()
except OperationalError:
return False
return True
# speed up a bit if we already reported a table as existing
cached = self._getLiveCache("_doesTableExist")
if cached is None:
cached = {}
elif table in cached:
# avoid memleak with python3.x
obj = cached[table]
del cached
return obj
exists = False
query = "SHOW TABLES LIKE '%s'" % (table,)
cur = self._cursor().execute(query)
rslt = cur.fetchone() is not None
cached[table] = exists
self._setLiveCache("_doesTableExist", cached)
# avoid python3.x memleak
del cached
return exists
def _doesColumnInTableExist(self, table, column):
# speed up a bit if we already reported a column as existing
d_tup = (table, column,)
cached = self._getLiveCache("_doesColumnInTableExist")
if cached is None:
cached = {}
elif d_tup in cached:
# avoid memleak with python3.x
obj = cached[d_tup]
del cached
return obj
exists = False
try:
cur = self._cursor().execute("""
SHOW COLUMNS FROM `%s` WHERE field = `%s`
""" % (column, table))
rslt = cur.fetchone() is not None
except OperationalError:
exists = False
cached[d_tup] = exists
self._setLiveCache("_doesColumnInTableExist", cached)
# avoid python3.x memleak
del cached
return exists
def mtime(self):
"""
Reimplemented from EntropyRepositoryBase.
"""
raise IOError("Not supported by MySQL Repository")
def dropAllIndexes(self):
"""
Reimplemented from EntropyRepositoryBase.
"""
for table in self._listAllTables():
cur = self._cursor().execute("""
SHOW INDEX FROM `%s` WHERE Key_name != 'PRIMARY';
""" % (table,))
for index_tuple in cur:
index = index_tuple[2]
try:
self._cursor().execute(
"DROP INDEX `%s` ON `%s`" % (
index, table,))
except OperationalError:
continue
except IntegrityError as err:
errno = self.ModuleProxy.errno()
if err.args[0].errno != errno['ER_DROP_INDEX_FK']:
raise
| mudler/entropy | lib/entropy/db/mysql.py | Python | gpl-2.0 | 36,707 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007 Johann Prieur <johann.prieur@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
import xml.sax.saxutils as xml
def soap_header(from_member_name, friendly_name, proxy, msnp_ver, build_ver,
to_member_name, message_number, security_token, app_id,
lock_key):
"""Returns the SOAP xml header"""
# FIXME : escape the parameters
return """<From memberName="%(from_member_name)s" friendlyName="%(friendly_name)s" xml:lang="en-US" proxy="%(proxy)s" xmlns="http://messenger.msn.com/ws/2004/09/oim/" msnpVer="%(msnp_ver)s" buildVer="%(build_ver)s"/>
<To memberName="%(to_member_name)s" xmlns="http://messenger.msn.com/ws/2004/09/oim/"/>
<Ticket passport="%(passport)s" appid="%(app_id)s" lockkey="%(lock_key)s" xmlns="http://messenger.msn.com/ws/2004/09/oim/"/>
<Sequence xmlns="http://schemas.xmlsoap.org/ws/2003/03/rm">
<Identifier xmlns="http://schemas.xmlsoap.org/ws/2002/07/utility">
http://messenger.msn.com
</Identifier>
<MessageNumber>%(message_number)s</MessageNumber>
</Sequence>""" % { 'from_member_name' : from_member_name,
'friendly_name' : friendly_name,
'proxy' : proxy,
'msnp_ver' : msnp_ver,
'build_ver' : build_ver,
'to_member_name' : to_member_name,
'passport' : xml.escape(security_token),
'app_id' : app_id,
'lock_key' : lock_key,
'message_number' : message_number }
def transport_headers():
"""Returns a dictionary, containing transport (http) headers
to use for the request"""
return {}
def soap_action():
"""Returns the SOAPAction value to pass to the transport
or None if no SOAPAction needs to be specified"""
return "http://messenger.live.com/ws/2006/09/oim/Store2"
def soap_body(message_type, message_content):
"""Returns the SOAP xml body"""
return """<MessageType xmlns="http://messenger.msn.com/ws/2004/09/oim/">
%s
</MessageType>
<Content xmlns="http://messenger.msn.com/ws/2004/09/oim/">
%s
</Content>""" % (message_type, message_content)
def process_response(soap_response):
return True
| emesene/papyon | papyon/service/description/OIM/Store2.py | Python | gpl-2.0 | 3,213 |
#
# Copyright 2013 IBM Corp.
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Author: Doug Hellmann <doug.hellmann@dreamhost.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Middleware to replace the plain text message body of an error
response with one formatted so the client can parse it.
Based on pecan.middleware.errordocument
"""
import json
from lxml import etree
import webob
from ceilometer.api import hooks
from ceilometer.openstack.common import gettextutils
from ceilometer.openstack.common.gettextutils import _
from ceilometer.openstack.common import log
LOG = log.getLogger(__name__)
class ParsableErrorMiddleware(object):
"""Replace error body with something the client can parse."""
@staticmethod
def best_match_language(accept_language):
"""Determines best available locale from the Accept-Language header.
:returns: the best language match or None if the 'Accept-Language'
header was not available in the request.
"""
if not accept_language:
return None
all_languages = gettextutils.get_available_languages('ceilometer')
return accept_language.best_match(all_languages)
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
# Request for this state, modified by replace_start_response()
# and used when an error is being reported.
state = {}
def replacement_start_response(status, headers, exc_info=None):
"""Overrides the default response to make errors parsable."""
try:
status_code = int(status.split(' ')[0])
state['status_code'] = status_code
except (ValueError, TypeError): # pragma: nocover
raise Exception((
'ErrorDocumentMiddleware received an invalid '
'status %s' % status
))
else:
if (state['status_code'] / 100) not in (2, 3):
# Remove some headers so we can replace them later
# when we have the full error message and can
# compute the length.
headers = [(h, v)
for (h, v) in headers
if h not in ('Content-Length', 'Content-Type')
]
# Save the headers in case we need to modify them.
state['headers'] = headers
return start_response(status, headers, exc_info)
app_iter = self.app(environ, replacement_start_response)
if (state['status_code'] / 100) not in (2, 3):
req = webob.Request(environ)
# Find the first TranslationHook in the array of hooks and use the
# translatable_error object from it
error = None
for hook in self.app.hooks:
if isinstance(hook, hooks.TranslationHook):
error = hook.local_error.translatable_error
break
user_locale = self.best_match_language(req.accept_language)
if (req.accept.best_match(['application/json', 'application/xml'])
== 'application/xml'):
try:
# simple check xml is valid
fault = etree.fromstring('\n'.join(app_iter))
# Add the translated error to the xml data
if error is not None:
for fault_string in fault.findall('faultstring'):
fault_string.text = (
gettextutils.translate(
error, user_locale))
body = ['<error_message>' + etree.tostring(fault)
+ '</error_message>']
except etree.XMLSyntaxError as err:
LOG.error(_('Error parsing HTTP response: %s') % err)
body = ['<error_message>%s' % state['status_code']
+ '</error_message>']
state['headers'].append(('Content-Type', 'application/xml'))
else:
try:
fault = json.loads('\n'.join(app_iter))
if error is not None and 'faultstring' in fault:
fault['faultstring'] = (
gettextutils.translate(
error, user_locale))
body = [json.dumps({'error_message': fault})]
except ValueError as err:
body = [json.dumps({'error_message': '\n'.join(app_iter)})]
state['headers'].append(('Content-Type', 'application/json'))
state['headers'].append(('Content-Length', str(len(body[0]))))
else:
body = app_iter
return body
| ChinaMassClouds/copenstack-server | openstack/src/ceilometer-2014.2.2/ceilometer/api/middleware.py | Python | gpl-2.0 | 5,417 |
#
# Copyright 2013, 2018 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
''' A parser for blocks written in C++ '''
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import re
import sys
import logging
logger = logging.getLogger(__name__)
def dummy_translator(the_type, default_v=None):
""" Doesn't really translate. """
return the_type
class ParserCCBlock(object):
""" Class to read blocks written in C++ """
def __init__(self, filename_cc, filename_h, blockname, version, type_trans=dummy_translator):
with open(filename_cc) as f:
self.code_cc = f.read()
with open(filename_h) as f:
self.code_h = f.read()
self.blockname = blockname
self.type_trans = type_trans
self.version = version
def read_io_signature(self):
""" Scans a .cc file for an IO signature. """
def _figure_out_iotype_and_vlen(iosigcall, typestr):
""" From a type identifier, returns the data type.
E.g., for sizeof(int), it will return 'int'.
Returns a list! """
if 'gr::io_signature::makev' in iosigcall:
logger.error('tbi')
raise ValueError
return {'type': [_typestr_to_iotype(x) for x in typestr.split(',')],
'vlen': [_typestr_to_vlen(x) for x in typestr.split(',')]
}
def _typestr_to_iotype(typestr):
""" Convert a type string (e.g. sizeof(int) * vlen) to the type (e.g. 'int'). """
type_match = re.search(r'sizeof\s*\(([^)]*)\)', typestr)
if type_match is None:
return self.type_trans('char')
return self.type_trans(type_match.group(1))
def _typestr_to_vlen(typestr):
""" From a type identifier, returns the vector length of the block's
input/out. E.g., for 'sizeof(int) * 10', it returns 10. For
'sizeof(int)', it returns '1'. For 'sizeof(int) * vlen', it returns
the string vlen. """
# Catch fringe case where no sizeof() is given
if typestr.find('sizeof') == -1:
return typestr
if typestr.find('*') == -1:
return '1'
vlen_parts = typestr.split('*')
for fac in vlen_parts:
if fac.find('sizeof') != -1:
vlen_parts.remove(fac)
if len(vlen_parts) == 1:
return vlen_parts[0].strip()
elif len(vlen_parts) > 1:
return '*'.join(vlen_parts).strip()
iosig = {}
iosig_regex = r'(?P<incall>gr::io_signature::make[23v]?)\s*\(\s*(?P<inmin>[^,]+),\s*(?P<inmax>[^,]+),' + \
r'\s*(?P<intype>(\([^\)]*\)|[^)])+)\),\s*' + \
r'(?P<outcall>gr::io_signature::make[23v]?)\s*\(\s*(?P<outmin>[^,]+),\s*(?P<outmax>[^,]+),' + \
r'\s*(?P<outtype>(\([^\)]*\)|[^)])+)\)'
iosig_match = re.compile(iosig_regex, re.MULTILINE).search(self.code_cc)
try:
iosig['in'] = _figure_out_iotype_and_vlen(iosig_match.group('incall'),
iosig_match.group('intype'))
iosig['in']['min_ports'] = iosig_match.group('inmin')
iosig['in']['max_ports'] = iosig_match.group('inmax')
except Exception:
logger.error("Error: Can't parse input signature.")
try:
iosig['out'] = _figure_out_iotype_and_vlen(iosig_match.group('outcall'),
iosig_match.group('outtype'))
iosig['out']['min_ports'] = iosig_match.group('outmin')
iosig['out']['max_ports'] = iosig_match.group('outmax')
except Exception:
logger.error("Error: Can't parse output signature.")
return iosig
def read_params(self):
""" Read the parameters required to initialize the block """
def _scan_param_list(start_idx):
""" Go through a parameter list and return a tuple each:
(type, name, default_value). Python's re just doesn't cut
it for C++ code :( """
i = start_idx
c = self.code_h
if c[i] != '(':
raise ValueError
i += 1
param_list = []
read_state = 'type'
in_string = False
parens_count = 0 # Counts ()
brackets_count = 0 # Counts <>
end_of_list = False
this_type = ''
this_name = ''
this_defv = ''
WHITESPACE = ' \t\n\r\f\v'
while not end_of_list:
# Keep track of (), stop when reaching final closing parens
if not in_string:
if c[i] == ')':
if parens_count == 0:
if read_state == 'type' and len(this_type):
raise ValueError(
'Found closing parentheses before finishing '
'last argument (this is how far I got: {})'.format \
(str(param_list))
)
if len(this_type):
param_list.append((this_type, this_name, this_defv))
end_of_list = True
break
else:
parens_count -= 1
elif c[i] == '(':
parens_count += 1
# Parameter type (int, const std::string, std::vector<gr_complex>, unsigned long ...)
if read_state == 'type':
if c[i] == '<':
brackets_count += 1
if c[i] == '>':
brackets_count -= 1
if c[i] == '&':
i += 1
continue
if c[i] in WHITESPACE and brackets_count == 0:
while c[i] in WHITESPACE:
i += 1
continue
if this_type == 'const' or this_type == '': # Ignore this
this_type = ''
elif this_type == 'unsigned': # Continue
this_type += ' '
continue
else:
read_state = 'name'
continue
this_type += c[i]
i += 1
continue
# Parameter name
if read_state == 'name':
if c[i] == '&' or c[i] in WHITESPACE:
i += 1
elif c[i] == '=':
if parens_count != 0:
raise ValueError(
'While parsing argument {} ({}): name finished but no closing parentheses.'.format \
(len(param_list)+1, this_type + ' ' + this_name)
)
read_state = 'defv'
i += 1
elif c[i] == ',':
if parens_count:
raise ValueError(
'While parsing argument {} ({}): name finished but no closing parentheses.'.format \
(len(param_list)+1, this_type + ' ' + this_name)
)
read_state = 'defv'
else:
this_name += c[i]
i += 1
continue
# Default value
if read_state == 'defv':
if in_string:
if c[i] == '"' and c[i-1] != '\\':
in_string = False
else:
this_defv += c[i]
elif c[i] == ',':
if parens_count:
raise ValueError(
'While parsing argument {} ({}): default value finished but no closing parentheses.'.format \
(len(param_list)+1, this_type + ' ' + this_name)
)
read_state = 'type'
param_list.append((this_type, this_name, this_defv))
this_type = ''
this_name = ''
this_defv = ''
else:
this_defv += c[i]
i += 1
continue
return param_list
# Go, go, go!
if self.version in ('37', '38'):
make_regex = r'static\s+sptr\s+make\s*'
else:
make_regex = r'(?<=_API)\s+\w+_sptr\s+\w+_make_\w+\s*'
make_match = re.compile(make_regex, re.MULTILINE).search(self.code_h)
try:
params_list = _scan_param_list(make_match.end(0))
except ValueError as ve:
logger.error("Can't parse the argument list: ", ve.args[0])
sys.exit(0)
params = []
for plist in params_list:
params.append({'type': self.type_trans(plist[0], plist[2]),
'key': plist[1],
'default': plist[2],
'in_constructor': True})
return params
| bastibl/gnuradio | gr-utils/python/modtool/tools/parser_cc_block.py | Python | gpl-3.0 | 10,443 |
"""Weighted maximum matching in general graphs.
The algorithm is taken from "Efficient Algorithms for Finding Maximum
Matching in Graphs" by Zvi Galil, ACM Computing Surveys, 1986.
It is based on the "blossom" method for finding augmenting paths and
the "primal-dual" method for finding a matching of maximum weight, both
due to Jack Edmonds.
Some ideas came from "Implementation of algorithms for maximum matching
on non-bipartite graphs" by H.J. Gabow, Standford Ph.D. thesis, 1973.
A C program for maximum weight matching by Ed Rothberg was used extensively
to validate this new code.
Copyright 2009 Joris van Rantwijk, license GPL2 or later
"""
# If assigned, DEBUG(str) is called with lots of debug messages.
from sys import stderr
def debug(s):
print >>stderr, 'DEBUG:', s
DEBUG = debug
DEBUG = None
# Check delta2/delta3 computation after every substage;
# only works on integer weights, slows down the algorithm to O(n^4).
CHECK_DELTA = False
# Check optimality of solution before returning; only works on integer weights.
CHECK_OPTIMUM = True
# read input
input = file('testdata/bench_mwmatching_2039_250_a.gr')
s = input.next().split()
#s = raw_input().split()
assert s[0] == 'p' and s[1] == 'edge'
edges = []
for i in xrange(int(s[3])):
#s = raw_input().split()
s = input.next().split()
assert len(s) == 4 and s[0] == 'e'
edges.append((int(s[1]), int(s[2]), int(s[3])))
maxcardinality = True
"""Compute a maximum-weighted matching in the general undirected
weighted graph given by "edges". If "maxcardinality" is true,
only maximum-cardinality matchings are considered as solutions.
Edges is a sequence of tuples (i, j, wt) describing an undirected
edge between vertex i and vertex j with weight wt. There is at most
one edge between any two vertices; no vertex has an edge to itself.
Vertices are identified by consecutive, non-negative integers.
Return a list "mate", such that mate[i] == j if vertex i is
matched to vertex j, and mate[i] == -1 if vertex i is not matched.
This function takes time O(n ** 3)."""
#
# Vertices are numbered 0 .. (nvertex-1).
# Non-trivial blossoms are numbered nvertex .. (2*nvertex-1)
#
# Edges are numbered 0 .. (nedge-1).
# Edge endpoints are numbered 0 .. (2*nedge-1), such that endpoints
# (2*k) and (2*k+1) both belong to edge k.
#
# Many terms used in the comments (sub-blossom, T-vertex) come from
# the paper by Galil; read the paper before reading this code.
#
# Count vertices.
nedge = len(edges)
nvertex = 0
for (i, j, w) in edges:
assert i >= 0 and j >= 0 and i != j
if i >= nvertex:
nvertex = i + 1
if j >= nvertex:
nvertex = j + 1
# Find the maximum edge weight.
maxweight = max(0, max([ wt for (i, j, wt) in edges ]))
# If p is an edge endpoint,
# endpoint[p] is the vertex to which endpoint p is attached.
# Not modified by the algorithm.
endpoint = [ edges[p//2][p%2] for p in xrange(2*nedge) ]
# If v is a vertex,
# neighbend[v] is the list of remote endpoints of the edges attached to v.
# Not modified by the algorithm.
neighbend = [ [ ] for i in xrange(nvertex) ]
for k in xrange(len(edges)):
(i, j, w) = edges[k]
neighbend[i].append(2*k+1)
neighbend[j].append(2*k)
# If v is a vertex,
# mate[v] is the remote endpoint of its matched edge, or -1 if it is single
# (i.e. endpoint[mate[v]] is v's partner vertex).
# Initially all vertices are single; updated during augmentation.
mate = nvertex * [ -1 ]
# If b is a top-level blossom,
# label[b] is 0 if b is unlabeled (free);
# 1 if b is an S-vertex/blossom;
# 2 if b is a T-vertex/blossom.
# The label of a vertex is found by looking at the label of its
# top-level containing blossom.
# If v is a vertex inside a T-blossom,
# label[v] is 2 iff v is reachable from an S-vertex outside the blossom.
# Labels are assigned during a stage and reset after each augmentation.
label = (2 * nvertex) * [ 0 ]
# If b is a labeled top-level blossom,
# labelend[b] is the remote endpoint of the edge through which b obtained
# its label, or -1 if b's base vertex is single.
# If v is a vertex inside a T-blossom and label[v] == 2,
# labelend[v] is the remote endpoint of the edge through which v is
# reachable from outside the blossom.
labelend = (2 * nvertex) * [ -1 ]
# If v is a vertex,
# inblossom[v] is the top-level blossom to which v belongs.
# If v is a top-level vertex, v is itself a blossom (a trivial blossom)
# and inblossom[v] == v.
# Initially all vertices are top-level trivial blossoms.
inblossom = range(nvertex)
# If b is a sub-blossom,
# blossomparent[b] is its immediate parent (sub-)blossom.
# If b is a top-level blossom, blossomparent[b] is -1.
blossomparent = (2 * nvertex) * [ -1 ]
# If b is a non-trivial (sub-)blossom,
# blossomchilds[b] is an ordered list of its sub-blossoms, starting with
# the base and going round the blossom.
blossomchilds = (2 * nvertex) * [ None ]
# If b is a (sub-)blossom,
# blossombase[b] is its base VERTEX (i.e. recursive sub-blossom).
blossombase = range(nvertex) + nvertex * [ -1 ]
# If b is a non-trivial (sub-)blossom,
# blossomendps[b] is a list of endpoints on its connecting edges,
# such that blossomendps[b][i] is the local endpoint of blossomchilds[b][i]
# on the edge that connects it to blossomchilds[b][wrap(i+1)].
blossomendps = (2 * nvertex) * [ None ]
# If v is a free vertex (or an unreached vertex inside a T-blossom),
# bestedge[v] is the edge to an S-vertex with least slack,
# or -1 if there is no such edge.
# If b is a (possibly trivial) top-level S-blossom,
# bestedge[b] is the least-slack edge to a different S-blossom,
# or -1 if there is no such edge.
# This is used for efficient computation of delta2 and delta3.
bestedge = (2 * nvertex) * [ -1 ]
# If b is a non-trivial top-level S-blossom,
# blossombestedges[b] is a list of least-slack edges to neighbouring
# S-blossoms, or None if no such list has been computed yet.
# This is used for efficient computation of delta3.
blossombestedges = (2 * nvertex) * [ None ]
# List of currently unused blossom numbers.
unusedblossoms = range(nvertex, 2*nvertex)
# If v is a vertex,
# dualvar[v] = 2 * u(v) where u(v) is the v's variable in the dual
# optimization problem (multiplication by two ensures integer values
# throughout the algorithm if all edge weights are integers).
# If b is a non-trivial blossom,
# dualvar[b] = z(b) where z(b) is b's variable in the dual optimization
# problem.
dualvar = nvertex * [ maxweight ] + nvertex * [ 0 ]
# If allowedge[k] is true, edge k has zero slack in the optimization
# problem; if allowedge[k] is false, the edge's slack may or may not
# be zero.
allowedge = nedge * [ False ]
# Queue of newly discovered S-vertices.
queue = [ ]
# Return 2 * slack of edge k (does not work inside blossoms).
def slack(k):
(i, j, wt) = edges[k]
return dualvar[i] + dualvar[j] - 2 * wt
# Generate the leaf vertices of a blossom.
def blossomLeaves(b):
if b < nvertex:
yield b
else:
for t in blossomchilds[b]:
if t < nvertex:
yield t
else:
for v in blossomLeaves(t):
yield v
# Assign label t to the top-level blossom containing vertex w
# and record the fact that w was reached through the edge with
# remote endpoint p.
def assignLabel(w, t, p):
if DEBUG: DEBUG('assignLabel(%d,%d,%d)' % (w, t, p))
b = inblossom[w]
assert label[w] == 0 and label[b] == 0
label[w] = label[b] = t
labelend[w] = labelend[b] = p
bestedge[w] = bestedge[b] = -1
if t == 1:
# b became an S-vertex/blossom; add it(s vertices) to the queue.
queue.extend(blossomLeaves(b))
if DEBUG: DEBUG('PUSH ' + str(list(blossomLeaves(b))))
elif t == 2:
# b became a T-vertex/blossom; assign label S to its mate.
# (If b is a non-trivial blossom, its base is the only vertex
# with an external mate.)
base = blossombase[b]
assert mate[base] >= 0
assignLabel(endpoint[mate[base]], 1, mate[base] ^ 1)
# Trace back from vertices v and w to discover either a new blossom
# or an augmenting path. Return the base vertex of the new blossom or -1.
def scanBlossom(v, w):
if DEBUG: DEBUG('scanBlossom(%d,%d)' % (v, w))
# Trace back from v and w, placing breadcrumbs as we go.
path = [ ]
base = -1
while v != -1 or w != -1:
# Look for a breadcrumb in v's blossom or put a new breadcrumb.
b = inblossom[v]
if label[b] & 4:
base = blossombase[b]
break
assert label[b] == 1
path.append(b)
label[b] = 5
# Trace one step back.
assert labelend[b] == mate[blossombase[b]]
if labelend[b] == -1:
# The base of blossom b is single; stop tracing this path.
v = -1
else:
v = endpoint[labelend[b]]
b = inblossom[v]
assert label[b] == 2
# b is a T-blossom; trace one more step back.
assert labelend[b] >= 0
v = endpoint[labelend[b]]
# Swap v and w so that we alternate between both paths.
if w != -1:
v, w = w, v
# Remove breadcrumbs.
for b in path:
label[b] = 1
# Return base vertex, if we found one.
return base
# Construct a new blossom with given base, containing edge k which
# connects a pair of S vertices. Label the new blossom as S; set its dual
# variable to zero; relabel its T-vertices to S and add them to the queue.
def addBlossom(base, k):
(v, w, wt) = edges[k]
bb = inblossom[base]
bv = inblossom[v]
bw = inblossom[w]
# Create blossom.
b = unusedblossoms.pop()
if DEBUG: DEBUG('addBlossom(%d,%d) (v=%d w=%d) -> %d' % (base, k, v, w, b))
blossombase[b] = base
blossomparent[b] = -1
blossomparent[bb] = b
# Make list of sub-blossoms and their interconnecting edge endpoints.
blossomchilds[b] = path = [ ]
blossomendps[b] = endps = [ ]
# Trace back from v to base.
while bv != bb:
# Add bv to the new blossom.
blossomparent[bv] = b
path.append(bv)
endps.append(labelend[bv])
assert (label[bv] == 2 or
(label[bv] == 1 and labelend[bv] == mate[blossombase[bv]]))
# Trace one step back.
assert labelend[bv] >= 0
v = endpoint[labelend[bv]]
bv = inblossom[v]
# Reverse lists, add endpoint that connects the pair of S vertices.
path.append(bb)
path.reverse()
endps.reverse()
endps.append(2*k)
# Trace back from w to base.
while bw != bb:
# Add bw to the new blossom.
blossomparent[bw] = b
path.append(bw)
endps.append(labelend[bw] ^ 1)
assert (label[bw] == 2 or
(label[bw] == 1 and labelend[bw] == mate[blossombase[bw]]))
# Trace one step back.
assert labelend[bw] >= 0
w = endpoint[labelend[bw]]
bw = inblossom[w]
# Set label to S.
assert label[bb] == 1
label[b] = 1
labelend[b] = labelend[bb]
# Set dual variable to zero.
dualvar[b] = 0
# Relabel vertices.
for v in blossomLeaves(b):
if label[inblossom[v]] == 2:
# This T-vertex now turns into an S-vertex because it becomes
# part of an S-blossom; add it to the queue.
queue.append(v)
inblossom[v] = b
# Compute blossombestedges[b].
bestedgeto = (2 * nvertex) * [ -1 ]
for bv in path:
if blossombestedges[bv] is None:
# This subblossom does not have a list of least-slack edges;
# get the information from the vertices.
nblists = [ [ p // 2 for p in neighbend[v] ]
for v in blossomLeaves(bv) ]
else:
# Walk this subblossom's least-slack edges.
nblists = [ blossombestedges[bv] ]
for nblist in nblists:
for k in nblist:
(i, j, wt) = edges[k]
if inblossom[j] == b:
i, j = j, i
bj = inblossom[j]
if (bj != b and label[bj] == 1 and
(bestedgeto[bj] == -1 or
slack(k) < slack(bestedgeto[bj]))):
bestedgeto[bj] = k
# Forget about least-slack edges of the subblossom.
blossombestedges[bv] = None
bestedge[bv] = -1
blossombestedges[b] = [ k for k in bestedgeto if k != -1 ]
# Select bestedge[b].
bestedge[b] = -1
for k in blossombestedges[b]:
if bestedge[b] == -1 or slack(k) < slack(bestedge[b]):
bestedge[b] = k
if DEBUG: DEBUG('blossomchilds[%d]=' % b + repr(blossomchilds[b]))
# Expand the given top-level blossom.
def expandBlossom(b, endstage):
if DEBUG: DEBUG('expandBlossom(%d,%d) %s' % (b, endstage, repr(blossomchilds[b])))
# Convert sub-blossoms into top-level blossoms.
for s in blossomchilds[b]:
blossomparent[s] = -1
if s < nvertex:
inblossom[s] = s
elif endstage and dualvar[s] == 0:
# Recursively expand this sub-blossom.
expandBlossom(s, endstage)
else:
for v in blossomLeaves(s):
inblossom[v] = s
# If we expand a T-blossom during a stage, its sub-blossoms must be
# relabeled.
if (not endstage) and label[b] == 2:
# Start at the sub-blossom through which the expanding
# blossom obtained its label, and relabel sub-blossoms untili
# we reach the base.
# Figure out through which sub-blossom the expanding blossom
# obtained its label initially.
assert labelend[b] >= 0
entrychild = inblossom[endpoint[labelend[b] ^ 1]]
# Decide in which direction we will go round the blossom.
j = blossomchilds[b].index(entrychild)
if j & 1:
# Start index is odd; go forward and wrap.
j -= len(blossomchilds[b])
jstep = 1
endptrick = 0
else:
# Start index is even; go backward.
jstep = -1
endptrick = 1
# Move along the blossom until we get to the base.
p = labelend[b]
while j != 0:
# Relabel the T-sub-blossom.
label[endpoint[p ^ 1]] = 0
label[endpoint[blossomendps[b][j-endptrick]^endptrick^1]] = 0
assignLabel(endpoint[p ^ 1], 2, p)
# Step to the next S-sub-blossom and note its forward endpoint.
allowedge[blossomendps[b][j-endptrick]//2] = True
j += jstep
p = blossomendps[b][j-endptrick] ^ endptrick
# Step to the next T-sub-blossom.
allowedge[p//2] = True
j += jstep
# Relabel the base T-sub-blossom WITHOUT stepping through to
# its mate (so don't call assignLabel).
bv = blossomchilds[b][j]
label[endpoint[p ^ 1]] = label[bv] = 2
labelend[endpoint[p ^ 1]] = labelend[bv] = p
bestedge[bv] = -1
# Continue along the blossom until we get back to entrychild.
j += jstep
while blossomchilds[b][j] != entrychild:
# Examine the vertices of the sub-blossom to see whether
# it is reachable from a neighbouring S-vertex outside the
# expanding blossom.
bv = blossomchilds[b][j]
if label[bv] == 1:
# This sub-blossom just got label S through one of its
# neighbours; leave it.
j += jstep
continue
for v in blossomLeaves(bv):
if label[v] != 0:
break
# If the sub-blossom contains a reachable vertex, assign
# label T to the sub-blossom.
if label[v] != 0:
assert label[v] == 2
assert inblossom[v] == bv
label[v] = 0
label[endpoint[mate[blossombase[bv]]]] = 0
assignLabel(v, 2, labelend[v])
j += jstep
# Recycle the blossom number.
label[b] = labelend[b] = -1
blossomchilds[b] = blossomendps[b] = None
blossombase[b] = -1
blossombestedges[b] = None
bestedge[b] = -1
unusedblossoms.append(b)
# Swap matched/unmatched edges over an alternating path through blossom b
# between vertex v and the base vertex. Keep blossom bookkeeping consistent.
def augmentBlossom(b, v):
if DEBUG: DEBUG('augmentBlossom(%d,%d)' % (b, v))
# Bubble up through the blossom tree from vertex v to an immediate
# sub-blossom of b.
t = v
while blossomparent[t] != b:
t = blossomparent[t]
# Recursively deal with the first sub-blossom.
if t >= nvertex:
augmentBlossom(t, v)
# Decide in which direction we will go round the blossom.
i = j = blossomchilds[b].index(t)
if i & 1:
# Start index is odd; go forward and wrap.
j -= len(blossomchilds[b])
jstep = 1
endptrick = 0
else:
# Start index is even; go backward.
jstep = -1
endptrick = 1
# Move along the blossom until we get to the base.
while j != 0:
# Step to the next sub-blossom and augment it recursively.
j += jstep
t = blossomchilds[b][j]
p = blossomendps[b][j-endptrick] ^ endptrick
if t >= nvertex:
augmentBlossom(t, endpoint[p])
# Step to the next sub-blossom and augment it recursively.
j += jstep
t = blossomchilds[b][j]
if t >= nvertex:
augmentBlossom(t, endpoint[p ^ 1])
# Match the edge connecting those sub-blossoms.
mate[endpoint[p]] = p ^ 1
mate[endpoint[p ^ 1]] = p
if DEBUG: DEBUG('PAIR %d %d (k=%d)' % (endpoint[p], endpoint[p^1], p//2))
# Rotate the list of sub-blossoms to put the new base at the front.
blossomchilds[b] = blossomchilds[b][i:] + blossomchilds[b][:i]
blossomendps[b] = blossomendps[b][i:] + blossomendps[b][:i]
blossombase[b] = blossombase[blossomchilds[b][0]]
assert blossombase[b] == v
# Swap matched/unmatched edges over an alternating path between two
# single vertices. The augmenting path runs through edge k, which
# connects a pair of S vertices.
def augmentMatching(k):
(v, w, wt) = edges[k]
if DEBUG: DEBUG('augmentMatching(%d) (v=%d w=%d)' % (k, v, w))
if DEBUG: DEBUG('PAIR %d %d (k=%d)' % (v, w, k))
for (s, p) in ((v, 2*k+1), (w, 2*k)):
# Match vertex s to remote endpoint p. Then trace back from s
# until we find a single vertex, swapping matched and unmatched
# edges as we go.
while 1:
bs = inblossom[s]
assert label[bs] == 1
assert labelend[bs] == mate[blossombase[bs]]
# Augment through the S-blossom from s to base.
if bs >= nvertex:
augmentBlossom(bs, s)
# Update mate[s]
mate[s] = p
# Trace one step back.
if labelend[bs] == -1:
# Reached single vertex; stop.
break
t = endpoint[labelend[bs]]
bt = inblossom[t]
assert label[bt] == 2
# Trace one step back.
assert labelend[bt] >= 0
s = endpoint[labelend[bt]]
j = endpoint[labelend[bt] ^ 1]
# Augment through the T-blossom from j to base.
assert blossombase[bt] == t
if bt >= nvertex:
augmentBlossom(bt, j)
# Update mate[j]
mate[j] = labelend[bt]
# Keep the opposite endpoint;
# it will be assigned to mate[s] in the next step.
p = labelend[bt] ^ 1
if DEBUG: DEBUG('PAIR %d %d (k=%d)' % (s, t, p//2))
# Verify that the optimum solution has been reached.
def verifyOptimum():
if maxcardinality:
# Vertices may have negative dual;
# find a constant non-negative number to add to all vertex duals.
vdualoffset = max(0, -min(dualvar[:nvertex]))
else:
vdualoffset = 0
# 0. all dual variables are non-negative
assert min(dualvar[:nvertex]) + vdualoffset >= 0
assert min(dualvar[nvertex:]) >= 0
# 0. all edges have non-negative slack and
# 1. all matched edges have zero slack;
for k in xrange(nedge):
(i, j, wt) = edges[k]
s = dualvar[i] + dualvar[j] - 2 * wt
iblossoms = [ i ]
jblossoms = [ j ]
while blossomparent[iblossoms[-1]] != -1:
iblossoms.append(blossomparent[iblossoms[-1]])
while blossomparent[jblossoms[-1]] != -1:
jblossoms.append(blossomparent[jblossoms[-1]])
iblossoms.reverse()
jblossoms.reverse()
for (bi, bj) in zip(iblossoms, jblossoms):
if bi != bj:
break
s += 2 * dualvar[bi]
assert s >= 0
if mate[i] // 2 == k or mate[j] // 2 == k:
assert mate[i] // 2 == k and mate[j] // 2 == k
assert s == 0
# 2. all single vertices have zero dual value;
for v in xrange(nvertex):
assert mate[v] >= 0 or dualvar[v] + vdualoffset == 0
# 3. all blossoms with positive dual value are full.
for b in xrange(nvertex, 2*nvertex):
if blossombase[b] >= 0 and dualvar[b] > 0:
assert len(blossomendps[b]) % 2 == 1
for p in blossomendps[b][1::2]:
assert mate[endpoint[p]] == p ^ 1
assert mate[endpoint[p ^ 1]] == p
# Ok.
# Check optimized delta2 against a trivial computation.
def checkDelta2():
for v in xrange(nvertex):
if label[inblossom[v]] == 0:
#bd = None
bk = -1
for p in neighbend[v]:
k = p // 2
w = endpoint[p]
if label[inblossom[w]] == 1:
d = slack(k)
if bk == -1 or d < bd:
bk = k
bd = d
if DEBUG and (bestedge[v] != -1 or bk != -1) and (bestedge[v] == -1 or bd != slack(bestedge[v])):
DEBUG('v=' + str(v) + ' bk=' + str(bk) + ' bd=' + str(bd) + ' bestedge=' + str(bestedge[v]) + ' slack=' + str(slack(bestedge[v])))
assert (bk == -1 and bestedge[v] == -1) or (bestedge[v] != -1 and bd == slack(bestedge[v]))
# Check optimized delta3 against a trivial computation.
def checkDelta3():
bk = -1
# bd = None
tbk = -1
# tbd = None
for b in xrange(2 * nvertex):
if blossomparent[b] == -1 and label[b] == 1:
for v in blossomLeaves(b):
for p in neighbend[v]:
k = p // 2
w = endpoint[p]
if inblossom[w] != b and label[inblossom[w]] == 1:
d = slack(k)
if bk == -1 or d < bd:
bk = k
bd = d
if bestedge[b] != -1:
(i, j, wt) = edges[bestedge[b]]
assert inblossom[i] == b or inblossom[j] == b
assert inblossom[i] != b or inblossom[j] != b
assert label[inblossom[i]] == 1 and label[inblossom[j]] == 1
if tbk == -1 or slack(bestedge[b]) < tbd:
tbk = bestedge[b]
tbd = slack(bestedge[b])
if DEBUG and bd != tbd:
DEBUG('bk=%d tbk=%d bd=%s tbd=%s' % (bk, tbk, repr(bd), repr(tbd)))
assert bd == tbd
# Main loop: continue until no further improvement is possible.
for t in xrange(nvertex):
# Each iteration of this loop is a "stage".
# A stage finds an augmenting path and uses that to improve
# the matching.
if DEBUG: DEBUG('STAGE %d' % t)
# Remove labels from top-level blossoms/vertices.
label[:] = (2 * nvertex) * [ 0 ]
# Forget all about least-slack edges.
bestedge[:] = (2 * nvertex) * [ -1 ]
blossombestedges[nvertex:] = nvertex * [ None ]
# Loss of labeling means that we can not be sure that currently
# allowable edges remain allowable througout this stage.
allowedge[:] = nedge * [ False ]
# Make queue empty.
queue[:] = [ ]
# Label single blossoms/vertices with S and put them in the queue.
for v in xrange(nvertex):
if mate[v] == -1 and label[inblossom[v]] == 0:
assignLabel(v, 1, -1)
# Loop until we succeed in augmenting the matching.
augmented = 0
while 1:
# Each iteration of this loop is a "substage".
# A substage tries to find an augmenting path;
# if found, the path is used to improve the matching and
# the stage ends. If there is no augmenting path, the
# primal-dual method is used to pump some slack out of
# the dual variables.
if DEBUG: DEBUG('SUBSTAGE')
# Continue labeling until all vertices which are reachable
# through an alternating path have got a label.
while queue and not augmented:
# Take an S vertex from the queue.
v = queue.pop()
if DEBUG: DEBUG('POP v=%d' % v)
assert label[inblossom[v]] == 1
# Scan its neighbours:
for p in neighbend[v]:
k = p // 2
w = endpoint[p]
# w is a neighbour to v
if inblossom[v] == inblossom[w]:
# this edge is internal to a blossom; ignore it
continue
if not allowedge[k]:
kslack = slack(k)
if kslack <= 0:
# edge k has zero slack => it is allowable
allowedge[k] = True
if allowedge[k]:
if label[inblossom[w]] == 0:
# (C1) w is a free vertex;
# label w with T and label its mate with S (R12).
assignLabel(w, 2, p ^ 1)
elif label[inblossom[w]] == 1:
# (C2) w is an S-vertex (not in the same blossom);
# follow back-links to discover either an
# augmenting path or a new blossom.
base = scanBlossom(v, w)
if base >= 0:
# Found a new blossom; add it to the blossom
# bookkeeping and turn it into an S-blossom.
addBlossom(base, k)
else:
# Found an augmenting path; augment the
# matching and end this stage.
augmentMatching(k)
augmented = 1
break
elif label[w] == 0:
# w is inside a T-blossom, but w itself has not
# yet been reached from outside the blossom;
# mark it as reached (we need this to relabel
# during T-blossom expansion).
assert label[inblossom[w]] == 2
label[w] = 2
labelend[w] = p ^ 1
elif label[inblossom[w]] == 1:
# keep track of the least-slack non-allowable edge to
# a different S-blossom.
b = inblossom[v]
if bestedge[b] == -1 or kslack < slack(bestedge[b]):
bestedge[b] = k
elif label[w] == 0:
# w is a free vertex (or an unreached vertex inside
# a T-blossom) but we can not reach it yet;
# keep track of the least-slack edge that reaches w.
if bestedge[w] == -1 or kslack < slack(bestedge[w]):
bestedge[w] = k
if augmented:
break
# There is no augmenting path under these constraints;
# compute delta and reduce slack in the optimization problem.
# (Note that our vertex dual variables, edge slacks and delta's
# are pre-multiplied by two.)
deltatype = -1
# delta = deltaedge = deltablossom = None # XXX shedskin: int/None mixing
# Verify data structures for delta2/delta3 computation.
if CHECK_DELTA:
checkDelta2()
checkDelta3()
# Compute delta1: the minumum value of any vertex dual.
if not maxcardinality:
deltatype = 1
delta = min(dualvar[:nvertex])
# Compute delta2: the minimum slack on any edge between
# an S-vertex and a free vertex.
for v in xrange(nvertex):
if label[inblossom[v]] == 0 and bestedge[v] != -1:
d = slack(bestedge[v])
if deltatype == -1 or d < delta:
delta = d
deltatype = 2
deltaedge = bestedge[v]
# Compute delta3: half the minimum slack on any edge between
# a pair of S-blossoms.
for b in xrange(2 * nvertex):
if ( blossomparent[b] == -1 and label[b] == 1 and
bestedge[b] != -1 ):
kslack = slack(bestedge[b])
#if type(kslack) in (int, long):
assert (kslack % 2) == 0 # XXX shedskin
d = kslack // 2
#else:
# d = kslack / 2
if deltatype == -1 or d < delta:
delta = d
deltatype = 3
deltaedge = bestedge[b]
# Compute delta4: minimum z variable of any T-blossom.
for b in xrange(nvertex, 2*nvertex):
if ( blossombase[b] >= 0 and blossomparent[b] == -1 and
label[b] == 2 and
(deltatype == -1 or dualvar[b] < delta) ):
delta = dualvar[b]
deltatype = 4
deltablossom = b
if deltatype == -1:
# No further improvement possible; max-cardinality optimum
# reached. Do a final delta update to make the optimum
# verifyable.
assert maxcardinality
deltatype = 1
delta = max(0, min(dualvar[:nvertex]))
# Update dual variables according to delta.
for v in xrange(nvertex):
if label[inblossom[v]] == 1:
# S-vertex: 2*u = 2*u - 2*delta
dualvar[v] -= delta
elif label[inblossom[v]] == 2:
# T-vertex: 2*u = 2*u + 2*delta
dualvar[v] += delta
for b in xrange(nvertex, 2*nvertex):
if blossombase[b] >= 0 and blossomparent[b] == -1:
if label[b] == 1:
# top-level S-blossom: z = z + 2*delta
dualvar[b] += delta
elif label[b] == 2:
# top-level T-blossom: z = z - 2*delta
dualvar[b] -= delta
# Take action at the point where minimum delta occurred.
if DEBUG: DEBUG('delta%d=%f' % (deltatype, delta))
if deltatype == 1:
# No further improvement possible; optimum reached.
break
elif deltatype == 2:
# Use the least-slack edge to continue the search.
allowedge[deltaedge] = True
(i, j, wt) = edges[deltaedge]
if label[inblossom[i]] == 0:
i, j = j, i
assert label[inblossom[i]] == 1
queue.append(i)
elif deltatype == 3:
# Use the least-slack edge to continue the search.
allowedge[deltaedge] = True
(i, j, wt) = edges[deltaedge]
assert label[inblossom[i]] == 1
queue.append(i)
elif deltatype == 4:
# Expand the least-z blossom.
expandBlossom(deltablossom, False)
# End of a this substage.
# Stop when no more augmenting path can be found.
if not augmented:
break
# End of a stage; expand all S-blossoms which have dualvar = 0.
for b in xrange(nvertex, 2*nvertex):
if ( blossomparent[b] == -1 and blossombase[b] >= 0 and
label[b] == 1 and dualvar[b] == 0 ):
expandBlossom(b, True)
# Verify that we reached the optimum solution.
if CHECK_OPTIMUM:
verifyOptimum()
# Transform mate[] such that mate[v] is the vertex to which v is paired.
for v in xrange(nvertex):
if mate[v] >= 0:
mate[v] = endpoint[mate[v]]
for v in xrange(nvertex):
assert mate[v] == -1 or mate[mate[v]] == v
print mate
| tomspur/shedskin | examples/mwmatching.py | Python | gpl-3.0 | 32,668 |
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Jason Lowe-Power
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Jason Lowe-Power
from m5.params import *
from m5.SimObject import SimObject
class SimpleObject(SimObject):
type = 'SimpleObject'
cxx_header = "learning_gem5/part2/simple_object.hh"
| vineodd/PIMSim | GEM5Simulation/gem5/src/learning_gem5/part2/SimpleObject.py | Python | gpl-3.0 | 1,750 |
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2011, Timothy Legge <timlegge@gmail.com> and Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os
from contextlib import closing
class Bookmark(): # {{{
'''
A simple class fetching bookmark data
kobo-specific
'''
def __init__(self, db_path, contentid, path, id, book_format, bookmark_extension):
self.book_format = book_format
self.bookmark_extension = bookmark_extension
self.book_length = 0 # Not Used
self.id = id
self.last_read = 0
self.last_read_location = 0 # Not Used
self.path = path
self.timestamp = 0
self.user_notes = None
self.db_path = db_path
self.contentid = contentid
self.percent_read = 0
self.get_bookmark_data()
self.get_book_length() # Not Used
def get_bookmark_data(self):
''' Return the timestamp and last_read_location '''
import sqlite3 as sqlite
user_notes = {}
self.timestamp = os.path.getmtime(self.path)
with closing(sqlite.connect(self.db_path)) as connection:
# return bytestrings if the content cannot the decoded as unicode
connection.text_factory = lambda x: unicode(x, "utf-8", "ignore")
cursor = connection.cursor()
t = (self.contentid,)
cursor.execute('select bm.bookmarkid, bm.contentid, bm.volumeid, '
'bm.text, bm.annotation, bm.ChapterProgress, '
'bm.StartContainerChildIndex, bm.StartOffset, c.BookTitle, '
'c.TITLE, c.volumeIndex, c.___NumPages '
'from Bookmark bm inner join Content c on '
'bm.contentid = c.contentid and '
'bm.volumeid = ? order by bm.volumeid, bm.chapterprogress', t)
previous_chapter = 0
bm_count = 0
for row in cursor:
current_chapter = row[10]
if previous_chapter == current_chapter:
bm_count = bm_count + 1
else:
bm_count = 0
text = row[3]
annotation = row[4]
# A dog ear (bent upper right corner) is a bookmark
if row[6] == row[7] == 0: # StartContainerChildIndex = StartOffset = 0
e_type = 'Bookmark'
text = row[9]
# highlight is text with no annotation
elif text is not None and (annotation is None or annotation == ""):
e_type = 'Highlight'
elif text and annotation:
e_type = 'Annotation'
else:
e_type = 'Unknown annotation type'
note_id = row[10] + bm_count
chapter_title = row[9]
# book_title = row[8]
chapter_progress = min(round(float(100*row[5]),2),100)
user_notes[note_id] = dict(id=self.id,
displayed_location=note_id,
type=e_type,
text=text,
annotation=annotation,
chapter=row[10],
chapter_title=chapter_title,
chapter_progress=chapter_progress)
previous_chapter = row[10]
# debug_print("e_type:" , e_type, '\t', 'loc: ', note_id, 'text: ', text,
# 'annotation: ', annotation, 'chapter_title: ', chapter_title,
# 'chapter_progress: ', chapter_progress, 'date: ')
cursor.execute('select datelastread, ___PercentRead from content '
'where bookid is Null and '
'contentid = ?', t)
for row in cursor:
self.last_read = row[0]
self.percent_read = row[1]
# print row[1]
cursor.close()
# self.last_read_location = self.last_read - self.pdf_page_offset
self.user_notes = user_notes
def get_book_length(self):
#TL self.book_length = 0
#TL self.book_length = int(unpack('>I', record0[0x04:0x08])[0])
pass
# }}}
| insomnia-lab/calibre | src/calibre/devices/kobo/bookmark.py | Python | gpl-3.0 | 4,557 |
from net.grinder.script.Grinder import grinder
from net.grinder.script import Test
from net.grinder.plugin.http import HTTPRequest
from HTTPClient import NVPair
from java.util import Random
newCellTest = Test(1, "Make a new Cell")
evaluationTest = Test(2, "Evaluate")
updateTest = Test(3, "Poll until evaluated")
deleteCellTest = Test(4, "Delete Cell")
class TestRunner:
def __call__(self):
random = Random()
worksheet = random.nextInt(10)
base_url = 'http://localhost:8080/home/admin/%s' % worksheet
request = newCellTest.wrap(HTTPRequest(url=base_url + "/new_cell_after"))
result = request.POST((NVPair("id","0"),))
new_cell = result.text.split()[0].rstrip('___S_A_G_E___')
request = evaluationTest.wrap(HTTPRequest(url=base_url + "/eval"))
a, b = random.nextInt(10**1), random.nextInt(10**1)
evalData = ( NVPair("id", new_cell),
NVPair("input", "%s * %s"% (a,b)),
NVPair("newcell", "0"),)
result = request.POST(evalData)
count = 0
while (True):
request = updateTest.wrap(HTTPRequest(url=base_url + "/cell_update"))
getData = ( NVPair("id", new_cell),)
result = request.POST(getData)
count += 1
if result.text.find('pre') != -1:
print 'wait',count,'test',a,'*',b,'=', strip_answer(result.text)
break
request = deleteCellTest.wrap(HTTPRequest(url=base_url + "/delete_cell"))
getData = ( NVPair("id", new_cell),)
result = request.POST(getData)
def strip_answer(text):
#<pre class="shrunk">532962756677</pre>
st = text.find('<pre')
end = text.find('</pre>')
return text[st + 20 : end]
| topisani/sagenb | sagenb/testing/grinder/simpleeval_multi.py | Python | gpl-3.0 | 1,778 |
from base64 import standard_b64encode
from datetime import timedelta
import re
import shutil
import socket
import traceback
import xmlrpclib
from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList
from couchpotato.core.helpers.encoding import ss, sp
from couchpotato.core.helpers.variable import tryInt, md5, cleanHost
from couchpotato.core.logger import CPLog
log = CPLog(__name__)
autoload = 'NZBGet'
class NZBGet(DownloaderBase):
protocol = ['nzb']
rpc = 'xmlrpc'
def download(self, data = None, media = None, filedata = None):
""" Send a torrent/nzb file to the downloader
:param data: dict returned from provider
Contains the release information
:param media: media dict with information
Used for creating the filename when possible
:param filedata: downloaded torrent/nzb filedata
The file gets downloaded in the searcher and send to this function
This is done to have failed checking before using the downloader, so the downloader
doesn't need to worry about that
:return: boolean
One faile returns false, but the downloaded should log his own errors
"""
if not media: media = {}
if not data: data = {}
if not filedata:
log.error('Unable to get NZB file: %s', traceback.format_exc())
return False
log.info('Sending "%s" to NZBGet.', data.get('name'))
nzb_name = ss('%s.nzb' % self.createNzbName(data, media))
rpc = self.getRPC()
try:
if rpc.writelog('INFO', 'CouchPotato connected to drop off %s.' % nzb_name):
log.debug('Successfully connected to NZBGet')
else:
log.info('Successfully connected to NZBGet, but unable to send a message')
except socket.error:
log.error('NZBGet is not responding. Please ensure that NZBGet is running and host setting is correct.')
return False
except xmlrpclib.ProtocolError as e:
if e.errcode == 401:
log.error('Password is incorrect.')
else:
log.error('Protocol Error: %s', e)
return False
if re.search(r"^0", rpc.version()):
xml_response = rpc.append(nzb_name, self.conf('category'), False, standard_b64encode(filedata.strip()))
else:
xml_response = rpc.append(nzb_name, self.conf('category'), tryInt(self.conf('priority')), False, standard_b64encode(filedata.strip()))
if xml_response:
log.info('NZB sent successfully to NZBGet')
nzb_id = md5(data['url']) # about as unique as they come ;)
couchpotato_id = "couchpotato=" + nzb_id
groups = rpc.listgroups()
file_id = [item['LastID'] for item in groups if item['NZBFilename'] == nzb_name]
confirmed = rpc.editqueue("GroupSetParameter", 0, couchpotato_id, file_id)
if confirmed:
log.debug('couchpotato parameter set in nzbget download')
return self.downloadReturnId(nzb_id)
else:
log.error('NZBGet could not add %s to the queue.', nzb_name)
return False
def test(self):
""" Check if connection works
:return: bool
"""
rpc = self.getRPC()
try:
if rpc.writelog('INFO', 'CouchPotato connected to test connection'):
log.debug('Successfully connected to NZBGet')
else:
log.info('Successfully connected to NZBGet, but unable to send a message')
except socket.error:
log.error('NZBGet is not responding. Please ensure that NZBGet is running and host setting is correct.')
return False
except xmlrpclib.ProtocolError as e:
if e.errcode == 401:
log.error('Password is incorrect.')
else:
log.error('Protocol Error: %s', e)
return False
return True
def getAllDownloadStatus(self, ids):
""" Get status of all active downloads
:param ids: list of (mixed) downloader ids
Used to match the releases for this downloader as there could be
other downloaders active that it should ignore
:return: list of releases
"""
log.debug('Checking NZBGet download status.')
rpc = self.getRPC()
try:
if rpc.writelog('INFO', 'CouchPotato connected to check status'):
log.debug('Successfully connected to NZBGet')
else:
log.info('Successfully connected to NZBGet, but unable to send a message')
except socket.error:
log.error('NZBGet is not responding. Please ensure that NZBGet is running and host setting is correct.')
return []
except xmlrpclib.ProtocolError as e:
if e.errcode == 401:
log.error('Password is incorrect.')
else:
log.error('Protocol Error: %s', e)
return []
# Get NZBGet data
try:
status = rpc.status()
groups = rpc.listgroups()
queue = rpc.postqueue(0)
history = rpc.history()
except:
log.error('Failed getting data: %s', traceback.format_exc(1))
return []
release_downloads = ReleaseDownloadList(self)
for nzb in groups:
try:
nzb_id = [param['Value'] for param in nzb['Parameters'] if param['Name'] == 'couchpotato'][0]
except:
nzb_id = nzb['NZBID']
if nzb_id in ids:
log.debug('Found %s in NZBGet download queue', nzb['NZBFilename'])
timeleft = -1
try:
if nzb['ActiveDownloads'] > 0 and nzb['DownloadRate'] > 0 and not (status['DownloadPaused'] or status['Download2Paused']):
timeleft = str(timedelta(seconds = nzb['RemainingSizeMB'] / status['DownloadRate'] * 2 ^ 20))
except:
pass
release_downloads.append({
'id': nzb_id,
'name': nzb['NZBFilename'],
'original_status': 'DOWNLOADING' if nzb['ActiveDownloads'] > 0 else 'QUEUED',
# Seems to have no native API function for time left. This will return the time left after NZBGet started downloading this item
'timeleft': timeleft,
})
for nzb in queue: # 'Parameters' is not passed in rpc.postqueue
if nzb['NZBID'] in ids:
log.debug('Found %s in NZBGet postprocessing queue', nzb['NZBFilename'])
release_downloads.append({
'id': nzb['NZBID'],
'name': nzb['NZBFilename'],
'original_status': nzb['Stage'],
'timeleft': str(timedelta(seconds = 0)) if not status['PostPaused'] else -1,
})
for nzb in history:
try:
nzb_id = [param['Value'] for param in nzb['Parameters'] if param['Name'] == 'couchpotato'][0]
except:
nzb_id = nzb['NZBID']
if nzb_id in ids:
log.debug('Found %s in NZBGet history. TotalStatus: %s, ParStatus: %s, ScriptStatus: %s, Log: %s', (nzb['NZBFilename'] , nzb['Status'], nzb['ParStatus'], nzb['ScriptStatus'] , nzb['Log']))
release_downloads.append({
'id': nzb_id,
'name': nzb['NZBFilename'],
'status': 'completed' if 'SUCCESS' in nzb['Status'] else 'failed',
'original_status': nzb['Status'],
'timeleft': str(timedelta(seconds = 0)),
'folder': sp(nzb['DestDir'])
})
return release_downloads
def removeFailed(self, release_download):
log.info('%s failed downloading, deleting...', release_download['name'])
rpc = self.getRPC()
try:
if rpc.writelog('INFO', 'CouchPotato connected to delete some history'):
log.debug('Successfully connected to NZBGet')
else:
log.info('Successfully connected to NZBGet, but unable to send a message')
except socket.error:
log.error('NZBGet is not responding. Please ensure that NZBGet is running and host setting is correct.')
return False
except xmlrpclib.ProtocolError as e:
if e.errcode == 401:
log.error('Password is incorrect.')
else:
log.error('Protocol Error: %s', e)
return False
try:
history = rpc.history()
nzb_id = None
path = None
for hist in history:
for param in hist['Parameters']:
if param['Name'] == 'couchpotato' and param['Value'] == release_download['id']:
nzb_id = hist['ID']
path = hist['DestDir']
if nzb_id and path and rpc.editqueue('HistoryDelete', 0, "", [tryInt(nzb_id)]):
shutil.rmtree(path, True)
except:
log.error('Failed deleting: %s', traceback.format_exc(0))
return False
return True
def getRPC(self):
url = cleanHost(host = self.conf('host'), ssl = self.conf('ssl'), username = self.conf('username'), password = self.conf('password')) + self.rpc
return xmlrpclib.ServerProxy(url)
config = [{
'name': 'nzbget',
'groups': [
{
'tab': 'downloaders',
'list': 'download_providers',
'name': 'nzbget',
'label': 'NZBGet',
'description': 'Use <a href="http://nzbget.net" target="_blank">NZBGet</a> to download NZBs.',
'wizard': True,
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
'radio_group': 'nzb',
},
{
'name': 'host',
'default': 'localhost:6789',
'description': 'Hostname with port. Usually <strong>localhost:6789</strong>',
},
{
'name': 'ssl',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Use HyperText Transfer Protocol Secure, or <strong>https</strong>',
},
{
'name': 'username',
'default': 'nzbget',
'advanced': True,
'description': 'Set a different username to connect. Default: nzbget',
},
{
'name': 'password',
'type': 'password',
'description': 'Default NZBGet password is <i>tegbzn6789</i>',
},
{
'name': 'category',
'default': 'Movies',
'description': 'The category CP places the nzb in. Like <strong>movies</strong> or <strong>couchpotato</strong>',
},
{
'name': 'priority',
'advanced': True,
'default': '0',
'type': 'dropdown',
'values': [('Very Low', -100), ('Low', -50), ('Normal', 0), ('High', 50), ('Very High', 100), ('Forced', 900)],
'description': 'Only change this if you are using NZBget 13.0 or higher',
},
{
'name': 'manual',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Disable this downloader for automated searches, but use it when I manually send a release.',
},
{
'name': 'delete_failed',
'default': True,
'advanced': True,
'type': 'bool',
'description': 'Delete a release after the download has failed.',
},
],
}
],
}]
| xombiemp/CouchPotatoServer | couchpotato/core/downloaders/nzbget.py | Python | gpl-3.0 | 12,455 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Unit tests for the `corpora.Dictionary` class.
"""
from collections import Mapping
import logging
import tempfile
import unittest
import os
import os.path
import scipy
import gensim
from gensim.corpora import Dictionary
from six import PY3
from six.moves import zip
# sample data files are located in the same folder
module_path = os.path.dirname(__file__)
def get_tmpfile(suffix):
return os.path.join(tempfile.gettempdir(), suffix)
class TestDictionary(unittest.TestCase):
def setUp(self):
self.texts = [
['human', 'interface', 'computer'],
['survey', 'user', 'computer', 'system', 'response', 'time'],
['eps', 'user', 'interface', 'system'],
['system', 'human', 'system', 'eps'],
['user', 'response', 'time'],
['trees'],
['graph', 'trees'],
['graph', 'minors', 'trees'],
['graph', 'minors', 'survey']]
def testDocFreqOneDoc(self):
texts = [['human', 'interface', 'computer']]
d = Dictionary(texts)
expected = {0: 1, 1: 1, 2: 1}
self.assertEqual(d.dfs, expected)
def testDocFreqAndToken2IdForSeveralDocsWithOneWord(self):
# two docs
texts = [['human'], ['human']]
d = Dictionary(texts)
expected = {0: 2}
self.assertEqual(d.dfs, expected)
# only one token (human) should exist
expected = {'human': 0}
self.assertEqual(d.token2id, expected)
# three docs
texts = [['human'], ['human'], ['human']]
d = Dictionary(texts)
expected = {0: 3}
self.assertEqual(d.dfs, expected)
# only one token (human) should exist
expected = {'human': 0}
self.assertEqual(d.token2id, expected)
# four docs
texts = [['human'], ['human'], ['human'], ['human']]
d = Dictionary(texts)
expected = {0: 4}
self.assertEqual(d.dfs, expected)
# only one token (human) should exist
expected = {'human': 0}
self.assertEqual(d.token2id, expected)
def testDocFreqForOneDocWithSeveralWord(self):
# two words
texts = [['human', 'cat']]
d = Dictionary(texts)
expected = {0: 1, 1: 1}
self.assertEqual(d.dfs, expected)
# three words
texts = [['human', 'cat', 'minors']]
d = Dictionary(texts)
expected = {0: 1, 1: 1, 2: 1}
self.assertEqual(d.dfs, expected)
def testBuild(self):
d = Dictionary(self.texts)
expected = {0: 2, 1: 2, 2: 2, 3: 2, 4: 2, 5: 3, 6: 2, 7: 3, 8: 2,
9: 3, 10: 3, 11: 2}
self.assertEqual(d.dfs, expected)
expected = {'computer': 0, 'eps': 8, 'graph': 10, 'human': 1,
'interface': 2, 'minors': 11, 'response': 3, 'survey': 4,
'system': 5, 'time': 6, 'trees': 9, 'user': 7}
self.assertEqual(d.token2id, expected)
def testFilter(self):
d = Dictionary(self.texts)
d.filter_extremes(no_below=2, no_above=1.0, keep_n=4)
expected = {0: 3, 1: 3, 2: 3, 3: 3}
self.assertEqual(d.dfs, expected)
def test_doc2bow(self):
d = Dictionary([["žluťoučký"], ["žluťoučký"]])
# pass a utf8 string
self.assertEqual(d.doc2bow(["žluťoučký"]), [(0, 1)])
# doc2bow must raise a TypeError if passed a string instead of array of strings by accident
self.assertRaises(TypeError, d.doc2bow, "žluťoučký")
# unicode must be converted to utf8
self.assertEqual(d.doc2bow([u'\u017elu\u0165ou\u010dk\xfd']), [(0, 1)])
def test_saveAsText_and_loadFromText(self):
"""`Dictionary` can be saved as textfile and loaded again from textfile. """
tmpf = get_tmpfile('dict_test.txt')
for sort_by_word in [True, False]:
d = Dictionary(self.texts)
d.save_as_text(tmpf, sort_by_word=sort_by_word)
self.assertTrue(os.path.exists(tmpf))
d_loaded = Dictionary.load_from_text(tmpf)
self.assertNotEqual(d_loaded, None)
self.assertEqual(d_loaded.token2id, d.token2id)
def test_from_corpus(self):
"""build `Dictionary` from an existing corpus"""
documents = ["Human machine interface for lab abc computer applications",
"A survey of user opinion of computer system response time",
"The EPS user interface management system",
"System and human system engineering testing of EPS",
"Relation of user perceived response time to error measurement",
"The generation of random binary unordered trees",
"The intersection graph of paths in trees",
"Graph minors IV Widths of trees and well quasi ordering",
"Graph minors A survey"]
stoplist = set('for a of the and to in'.split())
texts = [[word for word in document.lower().split() if word not in stoplist]
for document in documents]
# remove words that appear only once
all_tokens = sum(texts, [])
tokens_once = set(word for word in set(all_tokens) if all_tokens.count(word) == 1)
texts = [[word for word in text if word not in tokens_once]
for text in texts]
dictionary = Dictionary(texts)
corpus = [dictionary.doc2bow(text) for text in texts]
# Create dictionary from corpus without a token map
dictionary_from_corpus = Dictionary.from_corpus(corpus)
dict_token2id_vals = sorted(dictionary.token2id.values())
dict_from_corpus_vals = sorted(dictionary_from_corpus.token2id.values())
self.assertEqual(dict_token2id_vals, dict_from_corpus_vals)
self.assertEqual(dictionary.dfs, dictionary_from_corpus.dfs)
self.assertEqual(dictionary.num_docs, dictionary_from_corpus.num_docs)
self.assertEqual(dictionary.num_pos, dictionary_from_corpus.num_pos)
self.assertEqual(dictionary.num_nnz, dictionary_from_corpus.num_nnz)
# Create dictionary from corpus with an id=>token map
dictionary_from_corpus_2 = Dictionary.from_corpus(corpus, id2word=dictionary)
self.assertEqual(dictionary.token2id, dictionary_from_corpus_2.token2id)
self.assertEqual(dictionary.dfs, dictionary_from_corpus_2.dfs)
self.assertEqual(dictionary.num_docs, dictionary_from_corpus_2.num_docs)
self.assertEqual(dictionary.num_pos, dictionary_from_corpus_2.num_pos)
self.assertEqual(dictionary.num_nnz, dictionary_from_corpus_2.num_nnz)
# Ensure Sparse2Corpus is compatible with from_corpus
bow = gensim.matutils.Sparse2Corpus(scipy.sparse.rand(10, 100))
dictionary = Dictionary.from_corpus(bow)
self.assertEqual(dictionary.num_docs, 100)
def test_dict_interface(self):
"""Test Python 2 dict-like interface in both Python 2 and 3."""
d = Dictionary(self.texts)
self.assertTrue(isinstance(d, Mapping))
self.assertEqual(list(zip(d.keys(), d.values())), list(d.items()))
# Even in Py3, we want the iter* members.
self.assertEqual(list(d.items()), list(d.iteritems()))
self.assertEqual(list(d.keys()), list(d.iterkeys()))
self.assertEqual(list(d.values()), list(d.itervalues()))
# XXX Do we want list results from the dict members in Py3 too?
if not PY3:
self.assertTrue(isinstance(d.items(), list))
self.assertTrue(isinstance(d.keys(), list))
self.assertTrue(isinstance(d.values(), list))
#endclass TestDictionary
if __name__ == '__main__':
logging.basicConfig(level=logging.WARNING)
unittest.main()
| ccri/gensim | gensim/test/test_corpora_dictionary.py | Python | gpl-3.0 | 7,902 |
#!/usr/bin/env python
import mozhttpd
import mozfile
import os
import tempfile
import unittest
class TestBasic(unittest.TestCase):
""" Test basic Mozhttpd capabilites """
def test_basic(self):
""" Test mozhttpd can serve files """
tempdir = tempfile.mkdtemp()
# sizes is a dict of the form: name -> [size, binary_string, filepath]
sizes = {'small': [128], 'large': [16384]}
for k in sizes.keys():
# Generate random binary string
sizes[k].append(os.urandom(sizes[k][0]))
# Add path of file with binary string to list
fpath = os.path.join(tempdir, k)
sizes[k].append(fpath)
# Write binary string to file
with open(fpath, 'wb') as f:
f.write(sizes[k][1])
server = mozhttpd.MozHttpd(docroot=tempdir)
server.start()
server_url = server.get_url()
# Retrieve file and check contents matchup
for k in sizes.keys():
retrieved_content = mozfile.load(server_url + k).read()
self.assertEqual(retrieved_content, sizes[k][1])
# Cleanup tempdir and related files
mozfile.rmtree(tempdir)
if __name__ == '__main__':
unittest.main()
| vladikoff/fxa-mochitest | tests/mozbase/mozhttpd/tests/basic.py | Python | mpl-2.0 | 1,260 |
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import socket
def get_free_port():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("", 0))
port = sock.getsockname()[1]
sock.close()
return port
| safchain/contrail-sandesh | library/python/pysandesh/test/test_utils.py | Python | apache-2.0 | 258 |
# -*- coding: utf-8 -
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
#
# Copyright 2011 Cloudant, Inc.
from bucky.metrics.metric import Metric, MetricValue as MV
class Gauge(Metric):
def __init__(self, name):
self.name = name
self.value = 0.0
def update(self, value):
self.value = value
def clear(self):
pass
def metrics(self):
return [MV(self.name, self.value)]
| JoseKilo/bucky | bucky/metrics/gauge.py | Python | apache-2.0 | 922 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Integration test for sequence feature columns with SequenceExamples."""
import string
import tempfile
from google.protobuf import text_format
from tensorflow.core.example import example_pb2
from tensorflow.python.feature_column import feature_column_v2 as fc
from tensorflow.python.feature_column import sequence_feature_column as sfc
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class SequenceExampleParsingTest(test.TestCase):
def test_seq_ex_in_sequence_categorical_column_with_identity(self):
self._test_parsed_sequence_example(
'int_list', sfc.sequence_categorical_column_with_identity,
10, [3, 6], [2, 4, 6])
def test_seq_ex_in_sequence_categorical_column_with_hash_bucket(self):
self._test_parsed_sequence_example(
'bytes_list', sfc.sequence_categorical_column_with_hash_bucket,
10, [3, 4], [compat.as_bytes(x) for x in 'acg'])
def test_seq_ex_in_sequence_categorical_column_with_vocabulary_list(self):
self._test_parsed_sequence_example(
'bytes_list', sfc.sequence_categorical_column_with_vocabulary_list,
list(string.ascii_lowercase), [3, 4],
[compat.as_bytes(x) for x in 'acg'])
def test_seq_ex_in_sequence_categorical_column_with_vocabulary_file(self):
_, fname = tempfile.mkstemp()
with open(fname, 'w') as f:
f.write(string.ascii_lowercase)
self._test_parsed_sequence_example(
'bytes_list', sfc.sequence_categorical_column_with_vocabulary_file,
fname, [3, 4], [compat.as_bytes(x) for x in 'acg'])
def _test_parsed_sequence_example(
self, col_name, col_fn, col_arg, shape, values):
"""Helper function to check that each FeatureColumn parses correctly.
Args:
col_name: string, name to give to the feature column. Should match
the name that the column will parse out of the features dict.
col_fn: function used to create the feature column. For example,
sequence_numeric_column.
col_arg: second arg that the target feature column is expecting.
shape: the expected dense_shape of the feature after parsing into
a SparseTensor.
values: the expected values at index [0, 2, 6] of the feature
after parsing into a SparseTensor.
"""
example = _make_sequence_example()
columns = [
fc.categorical_column_with_identity('int_ctx', num_buckets=100),
fc.numeric_column('float_ctx'),
col_fn(col_name, col_arg)
]
context, seq_features = parsing_ops.parse_single_sequence_example(
example.SerializeToString(),
context_features=fc.make_parse_example_spec_v2(columns[:2]),
sequence_features=fc.make_parse_example_spec_v2(columns[2:]))
with self.cached_session() as sess:
ctx_result, seq_result = sess.run([context, seq_features])
self.assertEqual(list(seq_result[col_name].dense_shape), shape)
self.assertEqual(
list(seq_result[col_name].values[[0, 2, 6]]), values)
self.assertEqual(list(ctx_result['int_ctx'].dense_shape), [1])
self.assertEqual(ctx_result['int_ctx'].values[0], 5)
self.assertEqual(list(ctx_result['float_ctx'].shape), [1])
self.assertAlmostEqual(ctx_result['float_ctx'][0], 123.6, places=1)
_SEQ_EX_PROTO = """
context {
feature {
key: "float_ctx"
value {
float_list {
value: 123.6
}
}
}
feature {
key: "int_ctx"
value {
int64_list {
value: 5
}
}
}
}
feature_lists {
feature_list {
key: "bytes_list"
value {
feature {
bytes_list {
value: "a"
}
}
feature {
bytes_list {
value: "b"
value: "c"
}
}
feature {
bytes_list {
value: "d"
value: "e"
value: "f"
value: "g"
}
}
}
}
feature_list {
key: "float_list"
value {
feature {
float_list {
value: 1.0
}
}
feature {
float_list {
value: 3.0
value: 3.0
value: 3.0
}
}
feature {
float_list {
value: 5.0
value: 5.0
value: 5.0
value: 5.0
value: 5.0
}
}
}
}
feature_list {
key: "int_list"
value {
feature {
int64_list {
value: 2
value: 2
}
}
feature {
int64_list {
value: 4
value: 4
value: 4
value: 4
}
}
feature {
int64_list {
value: 6
value: 6
value: 6
value: 6
value: 6
value: 6
}
}
}
}
}
"""
def _make_sequence_example():
example = example_pb2.SequenceExample()
return text_format.Parse(_SEQ_EX_PROTO, example)
if __name__ == '__main__':
test.main()
| tensorflow/tensorflow | tensorflow/python/feature_column/sequence_feature_column_integration_test.py | Python | apache-2.0 | 5,669 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
"""This module includes various useful functions for setting a webserver up."""
import sys, logging, os
def processPyPath(ServerConfig):
"""Use ServerConfig to add to the python path."""
if ServerConfig.get('pypath_append'):
path_append = ServerConfig['pypath_append'].split(':')
#expand all ~'s in the list
path_append = [os.path.expanduser(path) for path in path_append]
sys.path.extend(path_append)
if ServerConfig.get('pypath_prepend'):
path_prepend = ServerConfig['pypath_prepend'].split(':')
path_prepend.reverse()
for path in path_prepend:
path = os.path.expanduser(path)
sys.path.insert(0, path)
def normalizeUrlList(url_list):
"""Add necessary default entries that the user did not enter."""
for dict in url_list:
if not dict.get('kp.app_object'):
dict['kp.app_object'] = 'application'
def normalizeWsgiVars(WsgiConfig):
"""Put WSGI config data in a state that the server expects."""
WsgiConfig['wsgi_ver'] = tuple(WsgiConfig['wsgi_ver'].split('.'))
def initializeLogger(consolename='kamaelia'):
"""This sets up the logging system."""
formatter = logging.Formatter('%(levelname)s/%(name)s: %(message)s')
console = logging.StreamHandler(sys.stdout)
console.setFormatter(formatter)
consolelogger = logging.getLogger(consolename)
consolelogger.setLevel(logging.DEBUG)
consolelogger.addHandler(console)
from Kamaelia.Apps.JMB.Common.Console import setConsoleName
setConsoleName(consolename)
from atexit import register
register(killLoggers)
def killLoggers():
"""Shuts down the logging system and flushes input."""
logging.shutdown()
| sparkslabs/kamaelia_ | Sketches/MPS/BugReports/FixTests/Kamaelia/Kamaelia/Apps/JMB/Common/ServerSetup.py | Python | apache-2.0 | 2,672 |
"""Plugwise Climate component for Home Assistant."""
import logging
from Plugwise_Smile.Smile import Smile
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
CURRENT_HVAC_COOL,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
HVAC_MODE_AUTO,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS
from homeassistant.core import callback
from . import SmileGateway
from .const import DEFAULT_MAX_TEMP, DEFAULT_MIN_TEMP, DOMAIN, SCHEDULE_OFF, SCHEDULE_ON
HVAC_MODES_HEAT_ONLY = [HVAC_MODE_HEAT, HVAC_MODE_AUTO]
HVAC_MODES_HEAT_COOL = [HVAC_MODE_HEAT_COOL, HVAC_MODE_AUTO]
SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Smile Thermostats from a config entry."""
api = hass.data[DOMAIN][config_entry.entry_id]["api"]
coordinator = hass.data[DOMAIN][config_entry.entry_id]["coordinator"]
entities = []
thermostat_classes = [
"thermostat",
"zone_thermostat",
"thermostatic_radiator_valve",
]
all_devices = api.get_all_devices()
for dev_id, device_properties in all_devices.items():
if device_properties["class"] not in thermostat_classes:
continue
thermostat = PwThermostat(
api,
coordinator,
device_properties["name"],
dev_id,
device_properties["location"],
device_properties["class"],
DEFAULT_MIN_TEMP,
DEFAULT_MAX_TEMP,
)
entities.append(thermostat)
async_add_entities(entities, True)
class PwThermostat(SmileGateway, ClimateEntity):
"""Representation of an Plugwise thermostat."""
def __init__(
self, api, coordinator, name, dev_id, loc_id, model, min_temp, max_temp
):
"""Set up the Plugwise API."""
super().__init__(api, coordinator, name, dev_id)
self._api = api
self._loc_id = loc_id
self._model = model
self._min_temp = min_temp
self._max_temp = max_temp
self._selected_schema = None
self._last_active_schema = None
self._preset_mode = None
self._presets = None
self._presets_list = None
self._heating_state = None
self._cooling_state = None
self._compressor_state = None
self._dhw_state = None
self._hvac_mode = None
self._schema_names = None
self._schema_status = None
self._temperature = None
self._setpoint = None
self._water_pressure = None
self._schedule_temp = None
self._hvac_mode = None
self._single_thermostat = self._api.single_master_thermostat()
self._unique_id = f"{dev_id}-climate"
@property
def hvac_action(self):
"""Return the current action."""
if self._single_thermostat:
if self._heating_state:
return CURRENT_HVAC_HEAT
if self._cooling_state:
return CURRENT_HVAC_COOL
return CURRENT_HVAC_IDLE
if self._heating_state is not None:
if self._setpoint > self._temperature:
return CURRENT_HVAC_HEAT
return CURRENT_HVAC_IDLE
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS
@property
def device_state_attributes(self):
"""Return the device specific state attributes."""
attributes = {}
if self._schema_names:
attributes["available_schemas"] = self._schema_names
if self._selected_schema:
attributes["selected_schema"] = self._selected_schema
return attributes
@property
def preset_modes(self):
"""Return the available preset modes list."""
return self._presets_list
@property
def hvac_modes(self):
"""Return the available hvac modes list."""
if self._heating_state is not None:
if self._compressor_state is not None:
return HVAC_MODES_HEAT_COOL
return HVAC_MODES_HEAT_ONLY
@property
def hvac_mode(self):
"""Return current active hvac state."""
return self._hvac_mode
@property
def target_temperature(self):
"""Return the target_temperature."""
return self._setpoint
@property
def preset_mode(self):
"""Return the active preset."""
if self._presets:
return self._preset_mode
return None
@property
def current_temperature(self):
"""Return the current room temperature."""
return self._temperature
@property
def min_temp(self):
"""Return the minimal temperature possible to set."""
return self._min_temp
@property
def max_temp(self):
"""Return the maximum temperature possible to set."""
return self._max_temp
@property
def temperature_unit(self):
"""Return the unit of measured temperature."""
return TEMP_CELSIUS
async def async_set_temperature(self, **kwargs):
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if (temperature is not None) and (
self._min_temp < temperature < self._max_temp
):
try:
await self._api.set_temperature(self._loc_id, temperature)
self._setpoint = temperature
self.async_write_ha_state()
except Smile.PlugwiseError:
_LOGGER.error("Error while communicating to device")
else:
_LOGGER.error("Invalid temperature requested")
async def async_set_hvac_mode(self, hvac_mode):
"""Set the hvac mode."""
state = SCHEDULE_OFF
if hvac_mode == HVAC_MODE_AUTO:
state = SCHEDULE_ON
try:
await self._api.set_temperature(self._loc_id, self._schedule_temp)
self._setpoint = self._schedule_temp
except Smile.PlugwiseError:
_LOGGER.error("Error while communicating to device")
try:
await self._api.set_schedule_state(
self._loc_id, self._last_active_schema, state
)
self._hvac_mode = hvac_mode
self.async_write_ha_state()
except Smile.PlugwiseError:
_LOGGER.error("Error while communicating to device")
async def async_set_preset_mode(self, preset_mode):
"""Set the preset mode."""
try:
await self._api.set_preset(self._loc_id, preset_mode)
self._preset_mode = preset_mode
self._setpoint = self._presets.get(self._preset_mode, "none")[0]
self.async_write_ha_state()
except Smile.PlugwiseError:
_LOGGER.error("Error while communicating to device")
@callback
def _async_process_data(self):
"""Update the data for this climate device."""
climate_data = self._api.get_device_data(self._dev_id)
heater_central_data = self._api.get_device_data(self._api.heater_id)
if "setpoint" in climate_data:
self._setpoint = climate_data["setpoint"]
if "temperature" in climate_data:
self._temperature = climate_data["temperature"]
if "schedule_temperature" in climate_data:
self._schedule_temp = climate_data["schedule_temperature"]
if "available_schedules" in climate_data:
self._schema_names = climate_data["available_schedules"]
if "selected_schedule" in climate_data:
self._selected_schema = climate_data["selected_schedule"]
self._schema_status = False
if self._selected_schema is not None:
self._schema_status = True
if "last_used" in climate_data:
self._last_active_schema = climate_data["last_used"]
if "presets" in climate_data:
self._presets = climate_data["presets"]
if self._presets:
self._presets_list = list(self._presets)
if "active_preset" in climate_data:
self._preset_mode = climate_data["active_preset"]
if heater_central_data.get("heating_state") is not None:
self._heating_state = heater_central_data["heating_state"]
if heater_central_data.get("cooling_state") is not None:
self._cooling_state = heater_central_data["cooling_state"]
if heater_central_data.get("compressor_state") is not None:
self._compressor_state = heater_central_data["compressor_state"]
if self._schema_status:
self._hvac_mode = HVAC_MODE_AUTO
elif self._heating_state is not None:
self._hvac_mode = HVAC_MODE_HEAT
if self._compressor_state is not None:
self._hvac_mode = HVAC_MODE_HEAT_COOL
self.async_write_ha_state()
| robbiet480/home-assistant | homeassistant/components/plugwise/climate.py | Python | apache-2.0 | 9,147 |
"""Check for nonlocal and used-before-assignment"""
# pylint: disable=missing-docstring, unused-variable, no-init, too-few-public-methods
__revision__ = 0
def test_ok():
""" uses nonlocal """
cnt = 1
def wrap():
nonlocal cnt
cnt = cnt + 1
wrap()
def test_fail():
""" doesn't use nonlocal """
cnt = 1
def wrap():
cnt = cnt + 1 # [used-before-assignment]
wrap()
def test_fail2():
""" use nonlocal, but for other variable """
cnt = 1
count = 1
def wrap():
nonlocal count
cnt = cnt + 1 # [used-before-assignment]
wrap()
def test_fail3(arg: test_fail4): # [used-before-assignment]
""" Depends on `test_fail4`, in argument annotation. """
return arg
# +1: [used-before-assignment, used-before-assignment]
def test_fail4(*args: test_fail5, **kwargs: undefined):
""" Depends on `test_fail5` and `undefined` in
variable and named arguments annotations.
"""
return args, kwargs
def test_fail5()->undefined1: # [used-before-assignment]
""" Depends on `undefined1` in function return annotation. """
def undefined():
""" no op """
def undefined1():
""" no op """
| arju88nair/projectCulminate | venv/lib/python3.5/site-packages/pylint/test/functional/used_before_assignment_nonlocal.py | Python | apache-2.0 | 1,234 |
"""Support for Geofency."""
import logging
from aiohttp import web
import voluptuous as vol
from homeassistant.components.device_tracker import DOMAIN as DEVICE_TRACKER
from homeassistant.const import (
ATTR_LATITUDE,
ATTR_LONGITUDE,
ATTR_NAME,
CONF_WEBHOOK_ID,
HTTP_OK,
HTTP_UNPROCESSABLE_ENTITY,
STATE_NOT_HOME,
)
from homeassistant.helpers import config_entry_flow
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.util import slugify
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
CONF_MOBILE_BEACONS = "mobile_beacons"
CONFIG_SCHEMA = vol.Schema(
{
vol.Optional(DOMAIN): vol.Schema(
{
vol.Optional(CONF_MOBILE_BEACONS, default=[]): vol.All(
cv.ensure_list, [cv.string]
)
}
)
},
extra=vol.ALLOW_EXTRA,
)
ATTR_ADDRESS = "address"
ATTR_BEACON_ID = "beaconUUID"
ATTR_CURRENT_LATITUDE = "currentLatitude"
ATTR_CURRENT_LONGITUDE = "currentLongitude"
ATTR_DEVICE = "device"
ATTR_ENTRY = "entry"
BEACON_DEV_PREFIX = "beacon"
LOCATION_ENTRY = "1"
LOCATION_EXIT = "0"
TRACKER_UPDATE = f"{DOMAIN}_tracker_update"
def _address(value: str) -> str:
r"""Coerce address by replacing '\n' with ' '."""
return value.replace("\n", " ")
WEBHOOK_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ADDRESS): vol.All(cv.string, _address),
vol.Required(ATTR_DEVICE): vol.All(cv.string, slugify),
vol.Required(ATTR_ENTRY): vol.Any(LOCATION_ENTRY, LOCATION_EXIT),
vol.Required(ATTR_LATITUDE): cv.latitude,
vol.Required(ATTR_LONGITUDE): cv.longitude,
vol.Required(ATTR_NAME): vol.All(cv.string, slugify),
vol.Optional(ATTR_CURRENT_LATITUDE): cv.latitude,
vol.Optional(ATTR_CURRENT_LONGITUDE): cv.longitude,
vol.Optional(ATTR_BEACON_ID): cv.string,
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, hass_config):
"""Set up the Geofency component."""
config = hass_config.get(DOMAIN, {})
mobile_beacons = config.get(CONF_MOBILE_BEACONS, [])
hass.data[DOMAIN] = {
"beacons": [slugify(beacon) for beacon in mobile_beacons],
"devices": set(),
"unsub_device_tracker": {},
}
return True
async def handle_webhook(hass, webhook_id, request):
"""Handle incoming webhook from Geofency."""
try:
data = WEBHOOK_SCHEMA(dict(await request.post()))
except vol.MultipleInvalid as error:
return web.Response(text=error.error_message, status=HTTP_UNPROCESSABLE_ENTITY)
if _is_mobile_beacon(data, hass.data[DOMAIN]["beacons"]):
return _set_location(hass, data, None)
if data["entry"] == LOCATION_ENTRY:
location_name = data["name"]
else:
location_name = STATE_NOT_HOME
if ATTR_CURRENT_LATITUDE in data:
data[ATTR_LATITUDE] = data[ATTR_CURRENT_LATITUDE]
data[ATTR_LONGITUDE] = data[ATTR_CURRENT_LONGITUDE]
return _set_location(hass, data, location_name)
def _is_mobile_beacon(data, mobile_beacons):
"""Check if we have a mobile beacon."""
return ATTR_BEACON_ID in data and data["name"] in mobile_beacons
def _device_name(data):
"""Return name of device tracker."""
if ATTR_BEACON_ID in data:
return f"{BEACON_DEV_PREFIX}_{data['name']}"
return data["device"]
def _set_location(hass, data, location_name):
"""Fire HA event to set location."""
device = _device_name(data)
async_dispatcher_send(
hass,
TRACKER_UPDATE,
device,
(data[ATTR_LATITUDE], data[ATTR_LONGITUDE]),
location_name,
data,
)
return web.Response(text=f"Setting location for {device}", status=HTTP_OK)
async def async_setup_entry(hass, entry):
"""Configure based on config entry."""
hass.components.webhook.async_register(
DOMAIN, "Geofency", entry.data[CONF_WEBHOOK_ID], handle_webhook
)
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, DEVICE_TRACKER)
)
return True
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
hass.components.webhook.async_unregister(entry.data[CONF_WEBHOOK_ID])
hass.data[DOMAIN]["unsub_device_tracker"].pop(entry.entry_id)()
await hass.config_entries.async_forward_entry_unload(entry, DEVICE_TRACKER)
return True
# pylint: disable=invalid-name
async_remove_entry = config_entry_flow.webhook_async_remove_entry
| nkgilley/home-assistant | homeassistant/components/geofency/__init__.py | Python | apache-2.0 | 4,573 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
from openstack_dashboard.test.integration_tests import helpers
from openstack_dashboard.test.integration_tests.regions import messages
class TestSecuritygroup(helpers.TestCase):
SEC_GROUP_NAME = helpers.gen_random_resource_name("securitygroup")
RULE_PORT = str(random.randint(9000, 9999))
@property
def securitygroup_page(self):
return self.home_pg.\
go_to_compute_accessandsecurity_securitygroupspage()
def _create_securitygroup(self):
page = self.securitygroup_page
page.create_securitygroup(self.SEC_GROUP_NAME)
self.assertTrue(page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(page.find_message_and_dismiss(messages.ERROR))
self.assertTrue(page.is_securitygroup_present(self.SEC_GROUP_NAME))
def _delete_securitygroup(self):
page = self.securitygroup_page
page.delete_securitygroup(self.SEC_GROUP_NAME)
self.assertTrue(page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(page.find_message_and_dismiss(messages.ERROR))
self.assertFalse(page.is_securitygroup_present(self.SEC_GROUP_NAME))
def _add_rule(self):
page = self.securitygroup_page
page = page.go_to_manage_rules(self.SEC_GROUP_NAME)
page.create_rule(self.RULE_PORT)
self.assertTrue(page.find_message_and_dismiss(messages.SUCCESS))
self.assertTrue(page.is_port_present(self.RULE_PORT))
def _delete_rule_by_table_action(self):
page = self.securitygroup_page
page = page.go_to_manage_rules(self.SEC_GROUP_NAME)
page.delete_rules(self.RULE_PORT)
self.assertTrue(page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(page.find_message_and_dismiss(messages.ERROR))
self.assertFalse(page.is_port_present(self.RULE_PORT))
def _delete_rule_by_row_action(self):
page = self.securitygroup_page
page = page.go_to_manage_rules(self.SEC_GROUP_NAME)
page.delete_rule(self.RULE_PORT)
self.assertTrue(page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(page.find_message_and_dismiss(messages.ERROR))
self.assertFalse(page.is_port_present(self.RULE_PORT))
def test_securitygroup_create_delete(self):
"""tests the security group creation and deletion functionalities:
* creates a new security group
* verifies the security group appears in the security groups table
* deletes the newly created security group
* verifies the security group does not appear in the table after
deletion
"""
self._create_securitygroup()
self._delete_securitygroup()
def test_managerules_create_delete_by_row(self):
"""tests the manage rules creation and deletion functionalities:
* create a new security group
* verifies the security group appears in the security groups table
* creates a new rule
* verifies the rule appears in the rules table
* delete the newly created rule
* verifies the rule does not appear in the table after deletion
* deletes the newly created security group
* verifies the security group does not appear in the table after
deletion
"""
self._create_securitygroup()
self._add_rule()
self._delete_rule_by_row_action()
self._delete_securitygroup()
def test_managerules_create_delete_by_table(self):
"""tests the manage rules creation and deletion functionalities:
* create a new security group
* verifies the security group appears in the security groups table
* creates a new rule
* verifies the rule appears in the rules table
* delete the newly created rule
* verifies the rule does not appear in the table after deletion
* deletes the newly created security group
* verifies the security group does not appear in the table after
deletion
"""
self._create_securitygroup()
self._add_rule()
self._delete_rule_by_table_action()
self._delete_securitygroup()
| coreycb/horizon | openstack_dashboard/test/integration_tests/tests/test_security_groups.py | Python | apache-2.0 | 4,753 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import decimal
import datetime
import pandas as pd
from pyspark import pandas as ps
from pyspark.pandas.tests.data_type_ops.testing_utils import TestCasesUtils
from pyspark.testing.pandasutils import PandasOnSparkTestCase
class ComplexOpsTest(PandasOnSparkTestCase, TestCasesUtils):
@property
def pser(self):
return pd.Series([[1, 2, 3]])
@property
def psser(self):
return ps.from_pandas(self.pser)
@property
def numeric_array_pdf(self):
psers = {
"int": pd.Series([[1, 2, 3]]),
"float": pd.Series([[0.1, 0.2, 0.3]]),
"decimal": pd.Series([[decimal.Decimal(1), decimal.Decimal(2), decimal.Decimal(3)]]),
}
return pd.concat(psers, axis=1)
@property
def numeric_array_psdf(self):
return ps.from_pandas(self.numeric_array_pdf)
@property
def numeric_array_df_cols(self):
return self.numeric_array_pdf.columns
@property
def non_numeric_array_pdf(self):
psers = {
"string": pd.Series([["x", "y", "z"]]),
"date": pd.Series(
[[datetime.date(1994, 1, 1), datetime.date(1994, 1, 2), datetime.date(1994, 1, 3)]]
),
"bool": pd.Series([[True, True, False]]),
}
return pd.concat(psers, axis=1)
@property
def non_numeric_array_psdf(self):
return ps.from_pandas(self.non_numeric_array_pdf)
@property
def non_numeric_array_df_cols(self):
return self.non_numeric_array_pdf.columns
@property
def array_pdf(self):
return pd.concat([self.numeric_array_pdf, self.non_numeric_array_pdf], axis=1)
@property
def array_psdf(self):
return ps.from_pandas(self.array_pdf)
@property
def array_df_cols(self):
return self.array_pdf.columns
@property
def complex_pdf(self):
psers = {
"this_array": self.pser,
"that_array": pd.Series([[2, 3, 4]]),
"this_struct": pd.Series([("x", 1)]),
"that_struct": pd.Series([("a", 2)]),
}
return pd.concat(psers, axis=1)
@property
def complex_psdf(self):
pssers = {
"this_array": self.psser,
"that_array": ps.Series([[2, 3, 4]]),
"this_struct": ps.Index([("x", 1)]).to_series().reset_index(drop=True),
"that_struct": ps.Index([("a", 2)]).to_series().reset_index(drop=True),
}
return ps.concat(pssers, axis=1)
def test_add(self):
pdf, psdf = self.array_pdf, self.array_psdf
for col in self.array_df_cols:
self.assert_eq(pdf[col] + pdf[col], psdf[col] + psdf[col])
# Numeric array + Numeric array
for col in self.numeric_array_df_cols:
pser1, psser1 = pdf[col], psdf[col]
for other_col in self.numeric_array_df_cols:
pser2, psser2 = pdf[other_col], psdf[other_col]
self.assert_eq((pser1 + pser2).sort_values(), (psser1 + psser2).sort_values())
# Non-numeric array + Non-numeric array
self.assertRaises(
TypeError,
lambda: psdf["string"] + psdf["bool"],
)
self.assertRaises(
TypeError,
lambda: psdf["string"] + psdf["date"],
)
self.assertRaises(
TypeError,
lambda: psdf["bool"] + psdf["date"],
)
for col in self.non_numeric_array_df_cols:
pser, psser = pdf[col], psdf[col]
self.assert_eq(pser + pser, psser + psser)
# Numeric array + Non-numeric array
for numeric_col in self.numeric_array_df_cols:
for non_numeric_col in self.non_numeric_array_df_cols:
self.assertRaises(TypeError, lambda: psdf[numeric_col] + psdf[non_numeric_col])
def test_sub(self):
self.assertRaises(TypeError, lambda: self.psser - "x")
self.assertRaises(TypeError, lambda: self.psser - 1)
psdf = self.array_psdf
for col in self.array_df_cols:
for other_col in self.array_df_cols:
self.assertRaises(TypeError, lambda: psdf[col] - psdf[other_col])
def test_mul(self):
self.assertRaises(TypeError, lambda: self.psser * "x")
self.assertRaises(TypeError, lambda: self.psser * 1)
psdf = self.array_psdf
for col in self.array_df_cols:
for other_col in self.array_df_cols:
self.assertRaises(TypeError, lambda: psdf[col] * psdf[other_col])
def test_truediv(self):
self.assertRaises(TypeError, lambda: self.psser / "x")
self.assertRaises(TypeError, lambda: self.psser / 1)
psdf = self.array_psdf
for col in self.array_df_cols:
for other_col in self.array_df_cols:
self.assertRaises(TypeError, lambda: psdf[col] / psdf[other_col])
def test_floordiv(self):
self.assertRaises(TypeError, lambda: self.psser // "x")
self.assertRaises(TypeError, lambda: self.psser // 1)
psdf = self.array_psdf
for col in self.array_df_cols:
for other_col in self.array_df_cols:
self.assertRaises(TypeError, lambda: psdf[col] // psdf[other_col])
def test_mod(self):
self.assertRaises(TypeError, lambda: self.psser % "x")
self.assertRaises(TypeError, lambda: self.psser % 1)
psdf = self.array_psdf
for col in self.array_df_cols:
for other_col in self.array_df_cols:
self.assertRaises(TypeError, lambda: psdf[col] % psdf[other_col])
def test_pow(self):
self.assertRaises(TypeError, lambda: self.psser ** "x")
self.assertRaises(TypeError, lambda: self.psser ** 1)
psdf = self.array_psdf
for col in self.array_df_cols:
for other_col in self.array_df_cols:
self.assertRaises(TypeError, lambda: psdf[col] ** psdf[other_col])
def test_radd(self):
self.assertRaises(TypeError, lambda: "x" + self.psser)
self.assertRaises(TypeError, lambda: 1 + self.psser)
def test_rsub(self):
self.assertRaises(TypeError, lambda: "x" - self.psser)
self.assertRaises(TypeError, lambda: 1 - self.psser)
def test_rmul(self):
self.assertRaises(TypeError, lambda: "x" * self.psser)
self.assertRaises(TypeError, lambda: 2 * self.psser)
def test_rtruediv(self):
self.assertRaises(TypeError, lambda: "x" / self.psser)
self.assertRaises(TypeError, lambda: 1 / self.psser)
def test_rfloordiv(self):
self.assertRaises(TypeError, lambda: "x" // self.psser)
self.assertRaises(TypeError, lambda: 1 // self.psser)
def test_rmod(self):
self.assertRaises(TypeError, lambda: 1 % self.psser)
def test_rpow(self):
self.assertRaises(TypeError, lambda: "x" ** self.psser)
self.assertRaises(TypeError, lambda: 1 ** self.psser)
def test_and(self):
self.assertRaises(TypeError, lambda: self.psser & True)
self.assertRaises(TypeError, lambda: self.psser & False)
self.assertRaises(TypeError, lambda: self.psser & self.psser)
def test_rand(self):
self.assertRaises(TypeError, lambda: True & self.psser)
self.assertRaises(TypeError, lambda: False & self.psser)
def test_or(self):
self.assertRaises(TypeError, lambda: self.psser | True)
self.assertRaises(TypeError, lambda: self.psser | False)
self.assertRaises(TypeError, lambda: self.psser | self.psser)
def test_ror(self):
self.assertRaises(TypeError, lambda: True | self.psser)
self.assertRaises(TypeError, lambda: False | self.psser)
def test_from_to_pandas(self):
pdf, psdf = self.array_pdf, self.array_psdf
for col in self.array_df_cols:
pser, psser = pdf[col], psdf[col]
self.assert_eq(pser, psser.to_pandas())
self.assert_eq(ps.from_pandas(pser), psser)
def test_isnull(self):
pdf, psdf = self.array_pdf, self.array_psdf
for col in self.array_df_cols:
pser, psser = pdf[col], psdf[col]
self.assert_eq(pser.isnull(), psser.isnull())
def test_astype(self):
self.assert_eq(self.pser.astype(str), self.psser.astype(str))
def test_neg(self):
self.assertRaises(TypeError, lambda: -self.psser)
def test_abs(self):
self.assertRaises(TypeError, lambda: abs(self.psser))
def test_invert(self):
self.assertRaises(TypeError, lambda: ~self.psser)
def test_eq(self):
pdf, psdf = self.complex_pdf, self.complex_pdf
self.assert_eq(
pdf["this_array"] == pdf["that_array"], psdf["this_array"] == psdf["that_array"]
)
self.assert_eq(
pdf["this_struct"] == pdf["that_struct"], psdf["this_struct"] == psdf["that_struct"]
)
self.assert_eq(
pdf["this_array"] == pdf["this_array"], psdf["this_array"] == psdf["this_array"]
)
self.assert_eq(
pdf["this_struct"] == pdf["this_struct"], psdf["this_struct"] == psdf["this_struct"]
)
def test_ne(self):
pdf, psdf = self.complex_pdf, self.complex_pdf
self.assert_eq(
pdf["this_array"] != pdf["that_array"], psdf["this_array"] != psdf["that_array"]
)
self.assert_eq(
pdf["this_struct"] != pdf["that_struct"], psdf["this_struct"] != psdf["that_struct"]
)
self.assert_eq(
pdf["this_array"] != pdf["this_array"], psdf["this_array"] != psdf["this_array"]
)
self.assert_eq(
pdf["this_struct"] != pdf["this_struct"], psdf["this_struct"] != psdf["this_struct"]
)
def test_lt(self):
pdf, psdf = self.complex_pdf, self.complex_pdf
self.assert_eq(
pdf["this_array"] < pdf["that_array"], psdf["this_array"] < psdf["that_array"]
)
self.assert_eq(
pdf["this_struct"] < pdf["that_struct"], psdf["this_struct"] < psdf["that_struct"]
)
self.assert_eq(
pdf["this_array"] < pdf["this_array"], psdf["this_array"] < psdf["this_array"]
)
self.assert_eq(
pdf["this_struct"] < pdf["this_struct"], psdf["this_struct"] < psdf["this_struct"]
)
def test_le(self):
pdf, psdf = self.complex_pdf, self.complex_pdf
self.assert_eq(
pdf["this_array"] <= pdf["that_array"], psdf["this_array"] <= psdf["that_array"]
)
self.assert_eq(
pdf["this_struct"] <= pdf["that_struct"], psdf["this_struct"] <= psdf["that_struct"]
)
self.assert_eq(
pdf["this_array"] <= pdf["this_array"], psdf["this_array"] <= psdf["this_array"]
)
self.assert_eq(
pdf["this_struct"] <= pdf["this_struct"], psdf["this_struct"] <= psdf["this_struct"]
)
def test_gt(self):
pdf, psdf = self.complex_pdf, self.complex_pdf
self.assert_eq(
pdf["this_array"] > pdf["that_array"], psdf["this_array"] > psdf["that_array"]
)
self.assert_eq(
pdf["this_struct"] > pdf["that_struct"], psdf["this_struct"] > psdf["that_struct"]
)
self.assert_eq(
pdf["this_array"] > pdf["this_array"], psdf["this_array"] > psdf["this_array"]
)
self.assert_eq(
pdf["this_struct"] > pdf["this_struct"], psdf["this_struct"] > psdf["this_struct"]
)
def test_ge(self):
pdf, psdf = self.complex_pdf, self.complex_pdf
self.assert_eq(
pdf["this_array"] >= pdf["that_array"], psdf["this_array"] >= psdf["that_array"]
)
self.assert_eq(
pdf["this_struct"] >= pdf["that_struct"], psdf["this_struct"] >= psdf["that_struct"]
)
self.assert_eq(
pdf["this_array"] >= pdf["this_array"], psdf["this_array"] >= psdf["this_array"]
)
self.assert_eq(
pdf["this_struct"] >= pdf["this_struct"], psdf["this_struct"] >= psdf["this_struct"]
)
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.data_type_ops.test_complex_ops import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| chuckchen/spark | python/pyspark/pandas/tests/data_type_ops/test_complex_ops.py | Python | apache-2.0 | 13,290 |
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the 'License'); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bson
import mock
from st2common.models.db.rule_enforcement import RuleEnforcementDB
from st2common.persistence.rule_enforcement import RuleEnforcement
from st2common.transport.publishers import PoolPublisher
from st2common.exceptions.db import StackStormDBObjectNotFoundError
from st2tests import DbTestCase
SKIP_DELETE = False
@mock.patch.object(PoolPublisher, 'publish', mock.MagicMock())
class RuleEnforcementModelTest(DbTestCase):
def test_ruleenforcment_crud(self):
saved = RuleEnforcementModelTest._create_save_rule_enforcement()
retrieved = RuleEnforcement.get_by_id(saved.id)
self.assertEqual(saved.rule.ref, retrieved.rule.ref,
'Same rule enforcement was not returned.')
self.assertTrue(retrieved.enforced_at is not None)
# test update
RULE_ID = str(bson.ObjectId())
self.assertEqual(retrieved.rule.id, None)
retrieved.rule.id = RULE_ID
saved = RuleEnforcement.add_or_update(retrieved)
retrieved = RuleEnforcement.get_by_id(saved.id)
self.assertEqual(retrieved.rule.id, RULE_ID,
'Update to rule enforcement failed.')
# cleanup
RuleEnforcementModelTest._delete([retrieved])
try:
retrieved = RuleEnforcement.get_by_id(saved.id)
except StackStormDBObjectNotFoundError:
retrieved = None
self.assertIsNone(retrieved, 'managed to retrieve after delete.')
@staticmethod
def _create_save_rule_enforcement():
created = RuleEnforcementDB(trigger_instance_id=str(bson.ObjectId()),
rule={'ref': 'foo_pack.foo_rule',
'uid': 'rule:foo_pack:foo_rule'},
execution_id=str(bson.ObjectId()))
return RuleEnforcement.add_or_update(created)
@staticmethod
def _delete(model_objects):
global SKIP_DELETE
if SKIP_DELETE:
return
for model_object in model_objects:
model_object.delete()
| punalpatel/st2 | st2common/tests/unit/test_db_rule_enforcement.py | Python | apache-2.0 | 2,863 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops.matrix_inverse."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
class InverseOpTest(test.TestCase):
def _verifyInverse(self, x, np_type):
for adjoint in False, True:
y = x.astype(np_type)
with self.cached_session(use_gpu=True):
# Verify that x^{-1} * x == Identity matrix.
inv = linalg_ops.matrix_inverse(y, adjoint=adjoint)
tf_ans = test_util.matmul_without_tf32(inv, y, adjoint_b=adjoint)
np_ans = np.identity(y.shape[-1])
if x.ndim > 2:
tiling = list(y.shape)
tiling[-2:] = [1, 1]
np_ans = np.tile(np_ans, tiling)
out = self.evaluate(tf_ans)
self.assertAllClose(np_ans, out, rtol=1e-4, atol=1e-3)
self.assertShapeEqual(y, tf_ans)
def _verifyInverseReal(self, x):
for np_type in [np.float32, np.float64]:
self._verifyInverse(x, np_type)
def _verifyInverseComplex(self, x):
for np_type in [np.complex64, np.complex128]:
self._verifyInverse(x, np_type)
def _makeBatch(self, matrix1, matrix2):
matrix_batch = np.concatenate(
[np.expand_dims(matrix1, 0),
np.expand_dims(matrix2, 0)])
matrix_batch = np.tile(matrix_batch, [2, 3, 1, 1])
return matrix_batch
def testNonsymmetric(self):
# 2x2 matrices
matrix1 = np.array([[1., 2.], [3., 4.]])
matrix2 = np.array([[1., 3.], [3., 5.]])
self._verifyInverseReal(matrix1)
self._verifyInverseReal(matrix2)
# A multidimensional batch of 2x2 matrices
self._verifyInverseReal(self._makeBatch(matrix1, matrix2))
matrix1 = matrix1.astype(np.complex64)
matrix1 += 1j * matrix1
matrix2 = matrix2.astype(np.complex64)
matrix2 += 1j * matrix2
self._verifyInverseComplex(matrix1)
self._verifyInverseComplex(matrix2)
# Complex batch
self._verifyInverseComplex(self._makeBatch(matrix1, matrix2))
def testSymmetricPositiveDefinite(self):
# 2x2 matrices
matrix1 = np.array([[2., 1.], [1., 2.]])
matrix2 = np.array([[3., -1.], [-1., 3.]])
self._verifyInverseReal(matrix1)
self._verifyInverseReal(matrix2)
# A multidimensional batch of 2x2 matrices
self._verifyInverseReal(self._makeBatch(matrix1, matrix2))
matrix1 = matrix1.astype(np.complex64)
matrix1 += 1j * matrix1
matrix2 = matrix2.astype(np.complex64)
matrix2 += 1j * matrix2
self._verifyInverseComplex(matrix1)
self._verifyInverseComplex(matrix2)
# Complex batch
self._verifyInverseComplex(self._makeBatch(matrix1, matrix2))
@test_util.deprecated_graph_mode_only
def testNonSquareMatrix(self):
# When the inverse of a non-square matrix is attempted we should return
# an error
with self.assertRaises(ValueError):
linalg_ops.matrix_inverse(np.array([[1., 2., 3.], [3., 4., 5.]]))
@test_util.deprecated_graph_mode_only
def testWrongDimensions(self):
# The input to the inverse should be at least a 2-dimensional tensor.
tensor3 = constant_op.constant([1., 2.])
with self.assertRaises(ValueError):
linalg_ops.matrix_inverse(tensor3)
def testNotInvertible(self):
# The input should be invertible.
with self.cached_session():
with self.assertRaisesOpError("Input is not invertible."):
# All rows of the matrix below add to zero.
tensor3 = constant_op.constant([[1., 0., -1.], [-1., 1., 0.],
[0., -1., 1.]])
linalg_ops.matrix_inverse(tensor3).eval()
def testEmpty(self):
self._verifyInverseReal(np.empty([0, 2, 2]))
self._verifyInverseReal(np.empty([2, 0, 0]))
def testRandomSmallAndLarge(self):
np.random.seed(42)
for dtype in np.float32, np.float64, np.complex64, np.complex128:
for batch_dims in [(), (1,), (3,), (2, 2)]:
for size in 8, 31, 32:
shape = batch_dims + (size, size)
matrix = np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(shape)).reshape(shape).astype(dtype)
self._verifyInverseReal(matrix)
@test_util.deprecated_graph_mode_only
def testConcurrentExecutesWithoutError(self):
with self.session(use_gpu=True) as sess:
all_ops = []
for adjoint_ in True, False:
matrix1 = random_ops.random_normal([5, 5], seed=42)
matrix2 = random_ops.random_normal([5, 5], seed=42)
inv1 = linalg_ops.matrix_inverse(matrix1, adjoint=adjoint_)
inv2 = linalg_ops.matrix_inverse(matrix2, adjoint=adjoint_)
all_ops += [inv1, inv2]
inv = self.evaluate(all_ops)
self.assertAllEqual(inv[0], inv[1])
self.assertAllEqual(inv[2], inv[3])
class MatrixInverseBenchmark(test.Benchmark):
shapes = [
(4, 4),
(10, 10),
(16, 16),
(101, 101),
(256, 256),
(1000, 1000),
(1024, 1024),
(2048, 2048),
(513, 4, 4),
(513, 16, 16),
(513, 256, 256),
]
def _GenerateMatrix(self, shape):
batch_shape = shape[:-2]
shape = shape[-2:]
assert shape[0] == shape[1]
n = shape[0]
matrix = np.ones(shape).astype(np.float32) / (
2.0 * n) + np.diag(np.ones(n).astype(np.float32))
return variables.Variable(np.tile(matrix, batch_shape + (1, 1)))
def benchmarkMatrixInverseOp(self):
for adjoint in False, True:
for shape in self.shapes:
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/cpu:0"):
matrix = self._GenerateMatrix(shape)
inv = linalg_ops.matrix_inverse(matrix, adjoint=adjoint)
self.evaluate(variables.global_variables_initializer())
self.run_op_benchmark(
sess,
control_flow_ops.group(inv),
min_iters=25,
name="matrix_inverse_cpu_{shape}_adjoint_{adjoint}".format(
shape=shape, adjoint=adjoint))
if test.is_gpu_available(True):
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/gpu:0"):
matrix = self._GenerateMatrix(shape)
inv = linalg_ops.matrix_inverse(matrix, adjoint=adjoint)
self.evaluate(variables.global_variables_initializer())
self.run_op_benchmark(
sess,
control_flow_ops.group(inv),
min_iters=25,
name="matrix_inverse_gpu_{shape}_adjoint_{adjoint}".format(
shape=shape, adjoint=adjoint))
if __name__ == "__main__":
test.main()
| karllessard/tensorflow | tensorflow/python/kernel_tests/matrix_inverse_op_test.py | Python | apache-2.0 | 7,837 |
"""
"""
import traceback
from AnyQt.QtWidgets import QWidget, QPlainTextEdit, QVBoxLayout, QSizePolicy
from AnyQt.QtGui import QTextCursor, QTextCharFormat, QFont
from AnyQt.QtCore import Qt, QObject, QCoreApplication, QThread, QSize
from AnyQt.QtCore import pyqtSignal as Signal
class TerminalView(QPlainTextEdit):
def __init__(self, *args, **kwargs):
QPlainTextEdit.__init__(self, *args, **kwargs)
self.setFrameStyle(QPlainTextEdit.NoFrame)
self.setTextInteractionFlags(Qt.TextBrowserInteraction)
self.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOn)
self.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
font = self.font()
font.setStyleHint(QFont.Monospace)
font.setFamily("Monospace")
self.setFont(font)
def sizeHint(self):
metrics = self.fontMetrics()
width = metrics.boundingRect("_" * 81).width()
height = metrics.lineSpacing()
scroll_width = self.verticalScrollBar().width()
size = QSize(width + scroll_width, height * 25)
return size
class OutputView(QWidget):
def __init__(self, parent=None, **kwargs):
QWidget.__init__(self, parent, **kwargs)
self.__lines = 5000
self.setLayout(QVBoxLayout())
self.layout().setContentsMargins(0, 0, 0, 0)
self.__text = TerminalView()
self.__currentCharFormat = self.__text.currentCharFormat()
self.layout().addWidget(self.__text)
def setMaximumLines(self, lines):
"""
Set the maximum number of lines to keep displayed.
"""
if self.__lines != lines:
self.__lines = lines
self.__text.setMaximumBlockCount(lines)
def maximumLines(self):
"""
Return the maximum number of lines in the display.
"""
return self.__lines
def clear(self):
"""
Clear the displayed text.
"""
self.__text.clear()
def setCurrentCharFormat(self, charformat):
"""Set the QTextCharFormat to be used when writing.
"""
if self.__currentCharFormat != charformat:
self.__currentCharFormat = charformat
def currentCharFormat(self):
return self.__currentCharFormat
def toPlainText(self):
"""
Return the full contents of the output view.
"""
return self.__text.toPlainText()
# A file like interface.
def write(self, string):
self.__text.moveCursor(QTextCursor.End, QTextCursor.MoveAnchor)
self.__text.setCurrentCharFormat(self.__currentCharFormat)
self.__text.insertPlainText(string)
def writelines(self, lines):
self.write("".join(lines))
def flush(self):
pass
def writeWithFormat(self, string, charformat):
self.__text.moveCursor(QTextCursor.End, QTextCursor.MoveAnchor)
self.__text.setCurrentCharFormat(charformat)
self.__text.insertPlainText(string)
def writelinesWithFormat(self, lines, charformat):
self.writeWithFormat("".join(lines), charformat)
def formated(self, color=None, background=None, weight=None,
italic=None, underline=None, font=None):
"""
Return a formated file like object proxy.
"""
charformat = update_char_format(
self.currentCharFormat(), color, background, weight,
italic, underline, font
)
return formater(self, charformat)
def update_char_format(baseformat, color=None, background=None, weight=None,
italic=None, underline=None, font=None):
"""
Return a copy of `baseformat` :class:`QTextCharFormat` with
updated color, weight, background and font properties.
"""
charformat = QTextCharFormat(baseformat)
if color is not None:
charformat.setForeground(color)
if background is not None:
charformat.setBackground(background)
if font is not None:
charformat.setFont(font)
else:
font = update_font(baseformat.font(), weight, italic, underline)
charformat.setFont(font)
return charformat
def update_font(basefont, weight=None, italic=None, underline=None,
pixelSize=None, pointSize=None):
"""
Return a copy of `basefont` :class:`QFont` with updated properties.
"""
font = QFont(basefont)
if weight is not None:
font.setWeight(weight)
if italic is not None:
font.setItalic(italic)
if underline is not None:
font.setUnderline(underline)
if pixelSize is not None:
font.setPixelSize(pixelSize)
if pointSize is not None:
font.setPointSize(pointSize)
return font
class formater(object):
def __init__(self, outputview, charformat):
self.outputview = outputview
self.charformat = charformat
def write(self, string):
self.outputview.writeWithFormat(string, self.charformat)
def writelines(self, lines):
self.outputview.writelines(lines, self.charformat)
def flush(self):
self.outputview.flush()
def formated(self, color=None, background=None, weight=None,
italic=None, underline=None, font=None):
charformat = update_char_format(self.charformat, color, background,
weight, italic, underline, font)
return formater(self.outputview, charformat)
def __enter__(self):
return self
def __exit__(self, *args):
self.outputview = None
self.charformat = None
class TextStream(QObject):
stream = Signal(str)
flushed = Signal()
def __init__(self, parent=None):
QObject.__init__(self, parent)
def write(self, string):
self.stream.emit(string)
def writelines(self, lines):
self.stream.emit("".join(lines))
def flush(self):
self.flushed.emit()
class ExceptHook(QObject):
handledException = Signal(object)
def __init__(self, parent=None, stream=None, canvas=None, **kwargs):
QObject.__init__(self, parent, **kwargs)
self._stream = stream
self._canvas = canvas
def __call__(self, exc_type, exc_value, tb):
if self._stream:
header = exc_type.__name__ + ' Exception'
if QThread.currentThread() != QCoreApplication.instance().thread():
header += " (in non-GUI thread)"
text = traceback.format_exception(exc_type, exc_value, tb)
text.insert(0, '{:-^79}\n'.format(' ' + header + ' '))
text.append('-' * 79 + '\n')
self._stream.writelines(text)
self.handledException.emit(((exc_type, exc_value, tb), self._canvas))
| cheral/orange3 | Orange/canvas/application/outputview.py | Python | bsd-2-clause | 6,738 |
import unittest
from django.dispatch.saferef import safeRef
from django.utils.six.moves import xrange
class Test1(object):
def x(self):
pass
def test2(obj):
pass
class Test2(object):
def __call__(self, obj):
pass
class SaferefTests(unittest.TestCase):
def setUp(self):
ts = []
ss = []
for x in xrange(5000):
t = Test1()
ts.append(t)
s = safeRef(t.x, self._closure)
ss.append(s)
ts.append(test2)
ss.append(safeRef(test2, self._closure))
for x in xrange(30):
t = Test2()
ts.append(t)
s = safeRef(t, self._closure)
ss.append(s)
self.ts = ts
self.ss = ss
self.closureCount = 0
def tearDown(self):
del self.ts
del self.ss
def testIn(self):
"""Test the "in" operator for safe references (cmp)"""
for t in self.ts[:50]:
self.assertTrue(safeRef(t.x) in self.ss)
def testValid(self):
"""Test that the references are valid (return instance methods)"""
for s in self.ss:
self.assertTrue(s())
def testShortCircuit(self):
"""Test that creation short-circuits to reuse existing references"""
sd = {}
for s in self.ss:
sd[s] = 1
for t in self.ts:
if hasattr(t, 'x'):
self.assertTrue(safeRef(t.x) in sd)
else:
self.assertTrue(safeRef(t) in sd)
def testRepresentation(self):
"""Test that the reference object's representation works
XXX Doesn't currently check the results, just that no error
is raised
"""
repr(self.ss[-1])
def _closure(self, ref):
"""Dumb utility mechanism to increment deletion counter"""
self.closureCount += 1
| Beeblio/django | tests/dispatch/tests/test_saferef.py | Python | bsd-3-clause | 1,886 |
#!/usr/bin/env python
import time
import unittest
import dpkt
class TestPerf(unittest.TestCase):
rounds = 10000
def setUp(self):
self.start = time.time()
def tearDown(self):
print self.rounds / (time.time() - self.start), 'rounds/s'
def test_pack(self):
for i in xrange(self.rounds):
str(dpkt.ip.IP())
print 'pack:',
def test_unpack(self):
buf = str(dpkt.ip.IP())
for i in xrange(self.rounds):
dpkt.ip.IP(buf)
print 'unpack:',
if __name__ == '__main__':
unittest.main()
| lkash/test | tests/test-perf2.py | Python | bsd-3-clause | 590 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import TestCase, RequestFactory
from django.core.urlresolvers import reverse
from django.template import Template, Context
from django.core.cache import cache
from djconfig import config
from ...core.tests import utils
from .models import CommentBookmark
from .forms import BookmarkForm
class CommentBookmarkViewTest(TestCase):
def setUp(self):
cache.clear()
self.user = utils.create_user()
self.category = utils.create_category()
self.topic = utils.create_topic(category=self.category, user=self.user)
self.comment = utils.create_comment(topic=self.topic)
def test_bookmark_create(self):
"""
create comment
"""
utils.login(self)
form_data = {'comment_number': 999, }
response = self.client.post(reverse('spirit:comment:bookmark:create', kwargs={'topic_id': self.topic.pk, }),
HTTP_X_REQUESTED_WITH='XMLHttpRequest',
data=form_data)
self.assertEqual(response.status_code, 200)
class CommentBookmarkModelsTest(TestCase):
def setUp(self):
cache.clear()
self.user = utils.create_user()
self.category = utils.create_category()
self.topic = utils.create_topic(category=self.category, user=self.user)
for _ in range(config.comments_per_page * 4): # 4 pages
utils.create_comment(user=self.user, topic=self.topic)
def test_comment_bookmark_update_or_create(self):
"""
Should update or create the comment number
"""
page = 2
CommentBookmark.update_or_create(
user=self.user,
topic=self.topic,
comment_number=CommentBookmark.page_to_comment_number(page)
)
comment_bookmark = CommentBookmark.objects.get(user=self.user, topic=self.topic)
self.assertEqual(comment_bookmark.comment_number, config.comments_per_page * (page - 1) + 1)
def test_comment_bookmark_update_or_create_invalid_page(self):
"""
Should do nothing when receiving an invalid page
"""
page = 'im_a_string'
CommentBookmark.update_or_create(
user=self.user,
topic=self.topic,
comment_number=CommentBookmark.page_to_comment_number(page)
)
self.assertEqual(len(CommentBookmark.objects.all()), 0)
class CommentBookmarkFormTest(TestCase):
def test_form(self):
form_data = {'comment_number': 999, }
form = BookmarkForm(data=form_data)
self.assertEqual(form.is_valid(), True)
class CommentBookmarkTemplateTagsTest(TestCase):
def setUp(self):
cache.clear()
self.user = utils.create_user()
self.category = utils.create_category()
self.topic = utils.create_topic(self.category)
self.comment = utils.create_comment(topic=self.topic)
def populate_bookmarks(self):
"""
should populate the topic's bookmark
"""
bookmark = CommentBookmark.objects.create(user=self.user, topic=self.topic, comment_number=10)
out = Template(
"{% load comment_bookmark %}"
"{% populate_bookmarks topics=topics user=user %}"
"{{ topics.0.bookmark.get_absolute_url }}"
).render(Context({'topics': [self.topic, ], 'user': self.user}))
self.assertEqual(out, bookmark.get_absolute_url())
| gogobook/Spirit | spirit/comment/bookmark/tests.py | Python | mit | 3,505 |
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
__doc__ = """
gyptest.py -- test runner for GYP tests.
"""
import os
import optparse
import shlex
import subprocess
import sys
class CommandRunner(object):
"""
Executor class for commands, including "commands" implemented by
Python functions.
"""
verbose = True
active = True
def __init__(self, dictionary={}):
self.subst_dictionary(dictionary)
def subst_dictionary(self, dictionary):
self._subst_dictionary = dictionary
def subst(self, string, dictionary=None):
"""
Substitutes (via the format operator) the values in the specified
dictionary into the specified command.
The command can be an (action, string) tuple. In all cases, we
perform substitution on strings and don't worry if something isn't
a string. (It's probably a Python function to be executed.)
"""
if dictionary is None:
dictionary = self._subst_dictionary
if dictionary:
try:
string = string % dictionary
except TypeError:
pass
return string
def display(self, command, stdout=None, stderr=None):
if not self.verbose:
return
if type(command) == type(()):
func = command[0]
args = command[1:]
s = '%s(%s)' % (func.__name__, ', '.join(map(repr, args)))
if type(command) == type([]):
# TODO: quote arguments containing spaces
# TODO: handle meta characters?
s = ' '.join(command)
else:
s = self.subst(command)
if not s.endswith('\n'):
s += '\n'
sys.stdout.write(s)
sys.stdout.flush()
def execute(self, command, stdout=None, stderr=None):
"""
Executes a single command.
"""
if not self.active:
return 0
if type(command) == type(''):
command = self.subst(command)
cmdargs = shlex.split(command)
if cmdargs[0] == 'cd':
command = (os.chdir,) + tuple(cmdargs[1:])
if type(command) == type(()):
func = command[0]
args = command[1:]
return func(*args)
else:
if stdout is sys.stdout:
# Same as passing sys.stdout, except python2.4 doesn't fail on it.
subout = None
else:
# Open pipe for anything else so Popen works on python2.4.
subout = subprocess.PIPE
if stderr is sys.stderr:
# Same as passing sys.stderr, except python2.4 doesn't fail on it.
suberr = None
elif stderr is None:
# Merge with stdout if stderr isn't specified.
suberr = subprocess.STDOUT
else:
# Open pipe for anything else so Popen works on python2.4.
suberr = subprocess.PIPE
p = subprocess.Popen(command,
shell=(sys.platform == 'win32'),
stdout=subout,
stderr=suberr)
p.wait()
if stdout is None:
self.stdout = p.stdout.read()
elif stdout is not sys.stdout:
stdout.write(p.stdout.read())
if stderr not in (None, sys.stderr):
stderr.write(p.stderr.read())
return p.returncode
def run(self, command, display=None, stdout=None, stderr=None):
"""
Runs a single command, displaying it first.
"""
if display is None:
display = command
self.display(display)
return self.execute(command, stdout, stderr)
class Unbuffered(object):
def __init__(self, fp):
self.fp = fp
def write(self, arg):
self.fp.write(arg)
self.fp.flush()
def __getattr__(self, attr):
return getattr(self.fp, attr)
sys.stdout = Unbuffered(sys.stdout)
sys.stderr = Unbuffered(sys.stderr)
def is_test_name(f):
return f.startswith('gyptest') and f.endswith('.py')
def find_all_gyptest_files(directory):
result = []
for root, dirs, files in os.walk(directory):
if '.svn' in dirs:
dirs.remove('.svn')
result.extend([ os.path.join(root, f) for f in files if is_test_name(f) ])
result.sort()
return result
def main(argv=None):
if argv is None:
argv = sys.argv
usage = "gyptest.py [-ahlnq] [-f formats] [test ...]"
parser = optparse.OptionParser(usage=usage)
parser.add_option("-a", "--all", action="store_true",
help="run all tests")
parser.add_option("-C", "--chdir", action="store", default=None,
help="chdir to the specified directory")
parser.add_option("-f", "--format", action="store", default='',
help="run tests with the specified formats")
parser.add_option("-G", '--gyp_option', action="append", default=[],
help="Add -G options to the gyp command line")
parser.add_option("-l", "--list", action="store_true",
help="list available tests and exit")
parser.add_option("-n", "--no-exec", action="store_true",
help="no execute, just print the command line")
parser.add_option("--passed", action="store_true",
help="report passed tests")
parser.add_option("--path", action="append", default=[],
help="additional $PATH directory")
parser.add_option("-q", "--quiet", action="store_true",
help="quiet, don't print test command lines")
opts, args = parser.parse_args(argv[1:])
if opts.chdir:
os.chdir(opts.chdir)
if opts.path:
extra_path = [os.path.abspath(p) for p in opts.path]
extra_path = os.pathsep.join(extra_path)
os.environ['PATH'] = extra_path + os.pathsep + os.environ['PATH']
if not args:
if not opts.all:
sys.stderr.write('Specify -a to get all tests.\n')
return 1
args = ['test']
tests = []
for arg in args:
if os.path.isdir(arg):
tests.extend(find_all_gyptest_files(os.path.normpath(arg)))
else:
if not is_test_name(os.path.basename(arg)):
print >>sys.stderr, arg, 'is not a valid gyp test name.'
sys.exit(1)
tests.append(arg)
if opts.list:
for test in tests:
print test
sys.exit(0)
CommandRunner.verbose = not opts.quiet
CommandRunner.active = not opts.no_exec
cr = CommandRunner()
os.environ['PYTHONPATH'] = os.path.abspath('test/lib')
if not opts.quiet:
sys.stdout.write('PYTHONPATH=%s\n' % os.environ['PYTHONPATH'])
passed = []
failed = []
no_result = []
if opts.format:
format_list = opts.format.split(',')
else:
# TODO: not duplicate this mapping from pylib/gyp/__init__.py
format_list = {
'aix5': ['make'],
'freebsd7': ['make'],
'freebsd8': ['make'],
'openbsd5': ['make'],
'cygwin': ['msvs'],
'win32': ['msvs', 'ninja'],
'linux2': ['make', 'ninja'],
'linux3': ['make', 'ninja'],
'darwin': ['make', 'ninja', 'xcode', 'xcode-ninja'],
}[sys.platform]
for format in format_list:
os.environ['TESTGYP_FORMAT'] = format
if not opts.quiet:
sys.stdout.write('TESTGYP_FORMAT=%s\n' % format)
gyp_options = []
for option in opts.gyp_option:
gyp_options += ['-G', option]
if gyp_options and not opts.quiet:
sys.stdout.write('Extra Gyp options: %s\n' % gyp_options)
for test in tests:
status = cr.run([sys.executable, test] + gyp_options,
stdout=sys.stdout,
stderr=sys.stderr)
if status == 2:
no_result.append(test)
elif status:
failed.append(test)
else:
passed.append(test)
if not opts.quiet:
def report(description, tests):
if tests:
if len(tests) == 1:
sys.stdout.write("\n%s the following test:\n" % description)
else:
fmt = "\n%s the following %d tests:\n"
sys.stdout.write(fmt % (description, len(tests)))
sys.stdout.write("\t" + "\n\t".join(tests) + "\n")
if opts.passed:
report("Passed", passed)
report("Failed", failed)
report("No result from", no_result)
if failed:
return 1
else:
return 0
if __name__ == "__main__":
sys.exit(main())
| hoho/dosido | nodejs/tools/gyp/gyptest.py | Python | mit | 8,032 |
# engine/__init__.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""SQL connections, SQL execution and high-level DB-API interface.
The engine package defines the basic components used to interface
DB-API modules with higher-level statement construction,
connection-management, execution and result contexts. The primary
"entry point" class into this package is the Engine and its public
constructor ``create_engine()``.
This package includes:
base.py
Defines interface classes and some implementation classes which
comprise the basic components used to interface between a DB-API,
constructed and plain-text statements, connections, transactions,
and results.
default.py
Contains default implementations of some of the components defined
in base.py. All current database dialects use the classes in
default.py as base classes for their own database-specific
implementations.
strategies.py
The mechanics of constructing ``Engine`` objects are represented
here. Defines the ``EngineStrategy`` class which represents how
to go from arguments specified to the ``create_engine()``
function, to a fully constructed ``Engine``, including
initialization of connection pooling, dialects, and specific
subclasses of ``Engine``.
threadlocal.py
The ``TLEngine`` class is defined here, which is a subclass of
the generic ``Engine`` and tracks ``Connection`` and
``Transaction`` objects against the identity of the current
thread. This allows certain programming patterns based around
the concept of a "thread-local connection" to be possible.
The ``TLEngine`` is created by using the "threadlocal" engine
strategy in conjunction with the ``create_engine()`` function.
url.py
Defines the ``URL`` class which represents the individual
components of a string URL passed to ``create_engine()``. Also
defines a basic module-loading strategy for the dialect specifier
within a URL.
"""
from .interfaces import (
Connectable,
CreateEnginePlugin,
Dialect,
ExecutionContext,
ExceptionContext,
# backwards compat
Compiled,
TypeCompiler
)
from .base import (
Connection,
Engine,
NestedTransaction,
RootTransaction,
Transaction,
TwoPhaseTransaction,
)
from .result import (
BaseRowProxy,
BufferedColumnResultProxy,
BufferedColumnRow,
BufferedRowResultProxy,
FullyBufferedResultProxy,
ResultProxy,
RowProxy,
)
from .util import (
connection_memoize
)
from . import util, strategies
# backwards compat
from ..sql import ddl
default_strategy = 'plain'
def create_engine(*args, **kwargs):
"""Create a new :class:`.Engine` instance.
The standard calling form is to send the URL as the
first positional argument, usually a string
that indicates database dialect and connection arguments::
engine = create_engine("postgresql://scott:tiger@localhost/test")
Additional keyword arguments may then follow it which
establish various options on the resulting :class:`.Engine`
and its underlying :class:`.Dialect` and :class:`.Pool`
constructs::
engine = create_engine("mysql://scott:tiger@hostname/dbname",
encoding='latin1', echo=True)
The string form of the URL is
``dialect[+driver]://user:password@host/dbname[?key=value..]``, where
``dialect`` is a database name such as ``mysql``, ``oracle``,
``postgresql``, etc., and ``driver`` the name of a DBAPI, such as
``psycopg2``, ``pyodbc``, ``cx_oracle``, etc. Alternatively,
the URL can be an instance of :class:`~sqlalchemy.engine.url.URL`.
``**kwargs`` takes a wide variety of options which are routed
towards their appropriate components. Arguments may be specific to
the :class:`.Engine`, the underlying :class:`.Dialect`, as well as the
:class:`.Pool`. Specific dialects also accept keyword arguments that
are unique to that dialect. Here, we describe the parameters
that are common to most :func:`.create_engine()` usage.
Once established, the newly resulting :class:`.Engine` will
request a connection from the underlying :class:`.Pool` once
:meth:`.Engine.connect` is called, or a method which depends on it
such as :meth:`.Engine.execute` is invoked. The :class:`.Pool` in turn
will establish the first actual DBAPI connection when this request
is received. The :func:`.create_engine` call itself does **not**
establish any actual DBAPI connections directly.
.. seealso::
:doc:`/core/engines`
:doc:`/dialects/index`
:ref:`connections_toplevel`
:param case_sensitive=True: if False, result column names
will match in a case-insensitive fashion, that is,
``row['SomeColumn']``.
.. versionchanged:: 0.8
By default, result row names match case-sensitively.
In version 0.7 and prior, all matches were case-insensitive.
:param connect_args: a dictionary of options which will be
passed directly to the DBAPI's ``connect()`` method as
additional keyword arguments. See the example
at :ref:`custom_dbapi_args`.
:param convert_unicode=False: if set to True, sets
the default behavior of ``convert_unicode`` on the
:class:`.String` type to ``True``, regardless
of a setting of ``False`` on an individual
:class:`.String` type, thus causing all :class:`.String`
-based columns
to accommodate Python ``unicode`` objects. This flag
is useful as an engine-wide setting when using a
DBAPI that does not natively support Python
``unicode`` objects and raises an error when
one is received (such as pyodbc with FreeTDS).
See :class:`.String` for further details on
what this flag indicates.
:param creator: a callable which returns a DBAPI connection.
This creation function will be passed to the underlying
connection pool and will be used to create all new database
connections. Usage of this function causes connection
parameters specified in the URL argument to be bypassed.
:param echo=False: if True, the Engine will log all statements
as well as a repr() of their parameter lists to the engines
logger, which defaults to sys.stdout. The ``echo`` attribute of
``Engine`` can be modified at any time to turn logging on and
off. If set to the string ``"debug"``, result rows will be
printed to the standard output as well. This flag ultimately
controls a Python logger; see :ref:`dbengine_logging` for
information on how to configure logging directly.
:param echo_pool=False: if True, the connection pool will log
all checkouts/checkins to the logging stream, which defaults to
sys.stdout. This flag ultimately controls a Python logger; see
:ref:`dbengine_logging` for information on how to configure logging
directly.
:param encoding: Defaults to ``utf-8``. This is the string
encoding used by SQLAlchemy for string encode/decode
operations which occur within SQLAlchemy, **outside of
the DBAPI.** Most modern DBAPIs feature some degree of
direct support for Python ``unicode`` objects,
what you see in Python 2 as a string of the form
``u'some string'``. For those scenarios where the
DBAPI is detected as not supporting a Python ``unicode``
object, this encoding is used to determine the
source/destination encoding. It is **not used**
for those cases where the DBAPI handles unicode
directly.
To properly configure a system to accommodate Python
``unicode`` objects, the DBAPI should be
configured to handle unicode to the greatest
degree as is appropriate - see
the notes on unicode pertaining to the specific
target database in use at :ref:`dialect_toplevel`.
Areas where string encoding may need to be accommodated
outside of the DBAPI include zero or more of:
* the values passed to bound parameters, corresponding to
the :class:`.Unicode` type or the :class:`.String` type
when ``convert_unicode`` is ``True``;
* the values returned in result set columns corresponding
to the :class:`.Unicode` type or the :class:`.String`
type when ``convert_unicode`` is ``True``;
* the string SQL statement passed to the DBAPI's
``cursor.execute()`` method;
* the string names of the keys in the bound parameter
dictionary passed to the DBAPI's ``cursor.execute()``
as well as ``cursor.setinputsizes()`` methods;
* the string column names retrieved from the DBAPI's
``cursor.description`` attribute.
When using Python 3, the DBAPI is required to support
*all* of the above values as Python ``unicode`` objects,
which in Python 3 are just known as ``str``. In Python 2,
the DBAPI does not specify unicode behavior at all,
so SQLAlchemy must make decisions for each of the above
values on a per-DBAPI basis - implementations are
completely inconsistent in their behavior.
:param execution_options: Dictionary execution options which will
be applied to all connections. See
:meth:`~sqlalchemy.engine.Connection.execution_options`
:param implicit_returning=True: When ``True``, a RETURNING-
compatible construct, if available, will be used to
fetch newly generated primary key values when a single row
INSERT statement is emitted with no existing returning()
clause. This applies to those backends which support RETURNING
or a compatible construct, including Postgresql, Firebird, Oracle,
Microsoft SQL Server. Set this to ``False`` to disable
the automatic usage of RETURNING.
:param isolation_level: this string parameter is interpreted by various
dialects in order to affect the transaction isolation level of the
database connection. The parameter essentially accepts some subset of
these string arguments: ``"SERIALIZABLE"``, ``"REPEATABLE_READ"``,
``"READ_COMMITTED"``, ``"READ_UNCOMMITTED"`` and ``"AUTOCOMMIT"``.
Behavior here varies per backend, and
individual dialects should be consulted directly.
Note that the isolation level can also be set on a per-:class:`.Connection`
basis as well, using the
:paramref:`.Connection.execution_options.isolation_level`
feature.
.. seealso::
:attr:`.Connection.default_isolation_level` - view default level
:paramref:`.Connection.execution_options.isolation_level`
- set per :class:`.Connection` isolation level
:ref:`SQLite Transaction Isolation <sqlite_isolation_level>`
:ref:`Postgresql Transaction Isolation <postgresql_isolation_level>`
:ref:`MySQL Transaction Isolation <mysql_isolation_level>`
:ref:`session_transaction_isolation` - for the ORM
:param label_length=None: optional integer value which limits
the size of dynamically generated column labels to that many
characters. If less than 6, labels are generated as
"_(counter)". If ``None``, the value of
``dialect.max_identifier_length`` is used instead.
:param listeners: A list of one or more
:class:`~sqlalchemy.interfaces.PoolListener` objects which will
receive connection pool events.
:param logging_name: String identifier which will be used within
the "name" field of logging records generated within the
"sqlalchemy.engine" logger. Defaults to a hexstring of the
object's id.
:param max_overflow=10: the number of connections to allow in
connection pool "overflow", that is connections that can be
opened above and beyond the pool_size setting, which defaults
to five. this is only used with :class:`~sqlalchemy.pool.QueuePool`.
:param module=None: reference to a Python module object (the module
itself, not its string name). Specifies an alternate DBAPI module to
be used by the engine's dialect. Each sub-dialect references a
specific DBAPI which will be imported before first connect. This
parameter causes the import to be bypassed, and the given module to
be used instead. Can be used for testing of DBAPIs as well as to
inject "mock" DBAPI implementations into the :class:`.Engine`.
:param paramstyle=None: The `paramstyle <http://legacy.python.org/dev/peps/pep-0249/#paramstyle>`_
to use when rendering bound parameters. This style defaults to the
one recommended by the DBAPI itself, which is retrieved from the
``.paramstyle`` attribute of the DBAPI. However, most DBAPIs accept
more than one paramstyle, and in particular it may be desirable
to change a "named" paramstyle into a "positional" one, or vice versa.
When this attribute is passed, it should be one of the values
``"qmark"``, ``"numeric"``, ``"named"``, ``"format"`` or
``"pyformat"``, and should correspond to a parameter style known
to be supported by the DBAPI in use.
:param pool=None: an already-constructed instance of
:class:`~sqlalchemy.pool.Pool`, such as a
:class:`~sqlalchemy.pool.QueuePool` instance. If non-None, this
pool will be used directly as the underlying connection pool
for the engine, bypassing whatever connection parameters are
present in the URL argument. For information on constructing
connection pools manually, see :ref:`pooling_toplevel`.
:param poolclass=None: a :class:`~sqlalchemy.pool.Pool`
subclass, which will be used to create a connection pool
instance using the connection parameters given in the URL. Note
this differs from ``pool`` in that you don't actually
instantiate the pool in this case, you just indicate what type
of pool to be used.
:param pool_logging_name: String identifier which will be used within
the "name" field of logging records generated within the
"sqlalchemy.pool" logger. Defaults to a hexstring of the object's
id.
:param pool_size=5: the number of connections to keep open
inside the connection pool. This used with
:class:`~sqlalchemy.pool.QueuePool` as
well as :class:`~sqlalchemy.pool.SingletonThreadPool`. With
:class:`~sqlalchemy.pool.QueuePool`, a ``pool_size`` setting
of 0 indicates no limit; to disable pooling, set ``poolclass`` to
:class:`~sqlalchemy.pool.NullPool` instead.
:param pool_recycle=-1: this setting causes the pool to recycle
connections after the given number of seconds has passed. It
defaults to -1, or no timeout. For example, setting to 3600
means connections will be recycled after one hour. Note that
MySQL in particular will disconnect automatically if no
activity is detected on a connection for eight hours (although
this is configurable with the MySQLDB connection itself and the
server configuration as well).
:param pool_reset_on_return='rollback': set the "reset on return"
behavior of the pool, which is whether ``rollback()``,
``commit()``, or nothing is called upon connections
being returned to the pool. See the docstring for
``reset_on_return`` at :class:`.Pool`.
.. versionadded:: 0.7.6
:param pool_timeout=30: number of seconds to wait before giving
up on getting a connection from the pool. This is only used
with :class:`~sqlalchemy.pool.QueuePool`.
:param strategy='plain': selects alternate engine implementations.
Currently available are:
* the ``threadlocal`` strategy, which is described in
:ref:`threadlocal_strategy`;
* the ``mock`` strategy, which dispatches all statement
execution to a function passed as the argument ``executor``.
See `example in the FAQ
<http://docs.sqlalchemy.org/en/latest/faq/metadata_schema.html#how-can-i-get-the-create-table-drop-table-output-as-a-string>`_.
:param executor=None: a function taking arguments
``(sql, *multiparams, **params)``, to which the ``mock`` strategy will
dispatch all statement execution. Used only by ``strategy='mock'``.
"""
strategy = kwargs.pop('strategy', default_strategy)
strategy = strategies.strategies[strategy]
return strategy.create(*args, **kwargs)
def engine_from_config(configuration, prefix='sqlalchemy.', **kwargs):
"""Create a new Engine instance using a configuration dictionary.
The dictionary is typically produced from a config file.
The keys of interest to ``engine_from_config()`` should be prefixed, e.g.
``sqlalchemy.url``, ``sqlalchemy.echo``, etc. The 'prefix' argument
indicates the prefix to be searched for. Each matching key (after the
prefix is stripped) is treated as though it were the corresponding keyword
argument to a :func:`.create_engine` call.
The only required key is (assuming the default prefix) ``sqlalchemy.url``,
which provides the :ref:`database URL <database_urls>`.
A select set of keyword arguments will be "coerced" to their
expected type based on string values. The set of arguments
is extensible per-dialect using the ``engine_config_types`` accessor.
:param configuration: A dictionary (typically produced from a config file,
but this is not a requirement). Items whose keys start with the value
of 'prefix' will have that prefix stripped, and will then be passed to
:ref:`create_engine`.
:param prefix: Prefix to match and then strip from keys
in 'configuration'.
:param kwargs: Each keyword argument to ``engine_from_config()`` itself
overrides the corresponding item taken from the 'configuration'
dictionary. Keyword arguments should *not* be prefixed.
"""
options = dict((key[len(prefix):], configuration[key])
for key in configuration
if key.startswith(prefix))
options['_coerce_config'] = True
options.update(kwargs)
url = options.pop('url')
return create_engine(url, **options)
__all__ = (
'create_engine',
'engine_from_config',
)
| MarkWh1te/xueqiu_predict | python3_env/lib/python3.4/site-packages/sqlalchemy/engine/__init__.py | Python | mit | 18,857 |
import re
from fir_artifacts.artifacts import AbstractArtifact
class URL(AbstractArtifact):
key = 'url'
display_name = 'URLs'
regex = r"""
(?P<search>
((?P<scheme>[\w]{2,9}):\/\/)?
([\S]*\:[\S]*\@)?
(?P<hostname>(
((([\w\-]+\.)+)
([a-zA-Z]{2,6}))
|([\d+]{1,3}\.[\d+]{1,3}\.[\d+]{1,3}\.[\d+]{1,3})
)
)
(\:[\d]{1,5})?
(?P<path>(\/[\/\~\w\-_%\.\*\#\$&%]*)?
(\?[\~\w\-_%\.&=\*\#\$%]*)?
(\#[\S]*)?)
)
"""
@classmethod
def find(cls, data):
urls = []
_re = re.compile(cls.regex, re.VERBOSE)
for i in re.finditer(_re, data):
url = i.group('search')
if url.find('/') != -1:
urls.append(url)
return urls
| gcrahay/FIR | fir_artifacts/url.py | Python | gpl-3.0 | 884 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Agile Business Group
# (<http://www.agilebg.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from openerp import models, fields
class AccountTaxRegistry(models.Model):
_name = 'account.tax.registry'
name = fields.Char('Name', required=True)
company_id = fields.Many2one(
'res.company', 'Company', required=True,
default=lambda self: self.env['res.company']._company_default_get(
'account.tax.registry'))
journal_ids = fields.One2many(
'account.journal', 'tax_registry_id', 'Journals', readonly=True)
type = fields.Selection([
('customer', 'Customer Invoices'),
('supplier', 'Supplier Invoices'),
('corrispettivi', 'Corrispettivi'),
], 'Layout', required=True)
| abstract-open-solutions/l10n-italy | l10n_it_vat_registries/account_tax_registry.py | Python | agpl-3.0 | 1,452 |
# -*- coding: utf-8 -*-
"""
Unit tests for LMS instructor-initiated background tasks helper functions.
Tests that CSV grade report generation works with unicode emails.
"""
import ddt
from mock import Mock, patch
import tempfile
from openedx.core.djangoapps.course_groups import cohorts
import unicodecsv
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
from capa.tests.response_xml_factory import MultipleChoiceResponseXMLFactory
from certificates.models import CertificateStatuses
from certificates.tests.factories import GeneratedCertificateFactory, CertificateWhitelistFactory
from course_modes.models import CourseMode
from courseware.tests.factories import InstructorFactory
from instructor_task.tests.test_base import InstructorTaskCourseTestCase, TestReportMixin, InstructorTaskModuleTestCase
from openedx.core.djangoapps.course_groups.models import CourseUserGroupPartitionGroup
from openedx.core.djangoapps.course_groups.tests.helpers import CohortFactory
import openedx.core.djangoapps.user_api.course_tag.api as course_tag_api
from openedx.core.djangoapps.user_api.partition_schemes import RandomUserPartitionScheme
from shoppingcart.models import Order, PaidCourseRegistration, CourseRegistrationCode, Invoice, \
CourseRegistrationCodeInvoiceItem, InvoiceTransaction, Coupon
from student.tests.factories import UserFactory, CourseModeFactory
from student.models import CourseEnrollment, CourseEnrollmentAllowed, ManualEnrollmentAudit, ALLOWEDTOENROLL_TO_ENROLLED
from verify_student.tests.factories import SoftwareSecurePhotoVerificationFactory
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.partitions.partitions import Group, UserPartition
from instructor_task.models import ReportStore
from instructor_task.tasks_helper import (
cohort_students_and_upload,
upload_problem_responses_csv,
upload_grades_csv,
upload_problem_grade_report,
upload_students_csv,
upload_may_enroll_csv,
upload_enrollment_report,
upload_exec_summary_report,
generate_students_certificates,
)
from openedx.core.djangoapps.util.testing import ContentGroupTestCase, TestConditionalContent
@ddt.ddt
class TestInstructorGradeReport(TestReportMixin, InstructorTaskCourseTestCase):
"""
Tests that CSV grade report generation works.
"""
def setUp(self):
super(TestInstructorGradeReport, self).setUp()
self.course = CourseFactory.create()
@ddt.data([u'student@example.com', u'ni\xf1o@example.com'])
def test_unicode_emails(self, emails):
"""
Test that students with unicode characters in emails is handled.
"""
for i, email in enumerate(emails):
self.create_student('student{0}'.format(i), email)
self.current_task = Mock()
self.current_task.update_state = Mock()
with patch('instructor_task.tasks_helper._get_current_task') as mock_current_task:
mock_current_task.return_value = self.current_task
result = upload_grades_csv(None, None, self.course.id, None, 'graded')
num_students = len(emails)
self.assertDictContainsSubset({'attempted': num_students, 'succeeded': num_students, 'failed': 0}, result)
@patch('instructor_task.tasks_helper._get_current_task')
@patch('instructor_task.tasks_helper.iterate_grades_for')
def test_grading_failure(self, mock_iterate_grades_for, _mock_current_task):
"""
Test that any grading errors are properly reported in the
progress dict and uploaded to the report store.
"""
# mock an error response from `iterate_grades_for`
mock_iterate_grades_for.return_value = [
(self.create_student('username', 'student@example.com'), {}, 'Cannot grade student')
]
result = upload_grades_csv(None, None, self.course.id, None, 'graded')
self.assertDictContainsSubset({'attempted': 1, 'succeeded': 0, 'failed': 1}, result)
report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')
self.assertTrue(any('grade_report_err' in item[0] for item in report_store.links_for(self.course.id)))
def _verify_cell_data_for_user(self, username, course_id, column_header, expected_cell_content):
"""
Verify cell data in the grades CSV for a particular user.
"""
with patch('instructor_task.tasks_helper._get_current_task'):
result = upload_grades_csv(None, None, course_id, None, 'graded')
self.assertDictContainsSubset({'attempted': 2, 'succeeded': 2, 'failed': 0}, result)
report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')
report_csv_filename = report_store.links_for(course_id)[0][0]
with open(report_store.path_to(course_id, report_csv_filename)) as csv_file:
for row in unicodecsv.DictReader(csv_file):
if row.get('username') == username:
self.assertEqual(row[column_header], expected_cell_content)
def test_cohort_data_in_grading(self):
"""
Test that cohort data is included in grades csv if cohort configuration is enabled for course.
"""
cohort_groups = ['cohort 1', 'cohort 2']
course = CourseFactory.create(cohort_config={'cohorted': True, 'auto_cohort': True,
'auto_cohort_groups': cohort_groups})
user_1 = 'user_1'
user_2 = 'user_2'
CourseEnrollment.enroll(UserFactory.create(username=user_1), course.id)
CourseEnrollment.enroll(UserFactory.create(username=user_2), course.id)
# In auto cohorting a group will be assigned to a user only when user visits a problem
# In grading calculation we only add a group in csv if group is already assigned to
# user rather than creating a group automatically at runtime
self._verify_cell_data_for_user(user_1, course.id, 'Cohort Name', '')
self._verify_cell_data_for_user(user_2, course.id, 'Cohort Name', '')
def test_unicode_cohort_data_in_grading(self):
"""
Test that cohorts can contain unicode characters.
"""
course = CourseFactory.create(cohort_config={'cohorted': True})
# Create users and manually assign cohorts
user1 = UserFactory.create(username='user1')
user2 = UserFactory.create(username='user2')
CourseEnrollment.enroll(user1, course.id)
CourseEnrollment.enroll(user2, course.id)
professor_x = u'ÞrÖfessÖr X'
magneto = u'MàgnëtÖ'
cohort1 = CohortFactory(course_id=course.id, name=professor_x)
cohort2 = CohortFactory(course_id=course.id, name=magneto)
cohort1.users.add(user1)
cohort2.users.add(user2)
self._verify_cell_data_for_user(user1.username, course.id, 'Cohort Name', professor_x)
self._verify_cell_data_for_user(user2.username, course.id, 'Cohort Name', magneto)
def test_unicode_user_partitions(self):
"""
Test that user partition groups can contain unicode characters.
"""
user_groups = [u'ÞrÖfessÖr X', u'MàgnëtÖ']
user_partition = UserPartition(
0,
'x_man',
'X Man',
[
Group(0, user_groups[0]),
Group(1, user_groups[1])
]
)
# Create course with group configurations
self.initialize_course(
course_factory_kwargs={
'user_partitions': [user_partition]
}
)
_groups = [group.name for group in self.course.user_partitions[0].groups]
self.assertEqual(_groups, user_groups)
def test_cohort_scheme_partition(self):
"""
Test that cohort-schemed user partitions are ignored in the
grades export.
"""
# Set up a course with 'cohort' and 'random' user partitions.
cohort_scheme_partition = UserPartition(
0,
'Cohort-schemed Group Configuration',
'Group Configuration based on Cohorts',
[Group(0, 'Group A'), Group(1, 'Group B')],
scheme_id='cohort'
)
experiment_group_a = Group(2, u'Expériment Group A')
experiment_group_b = Group(3, u'Expériment Group B')
experiment_partition = UserPartition(
1,
u'Content Expériment Configuration',
u'Group Configuration for Content Expériments',
[experiment_group_a, experiment_group_b],
scheme_id='random'
)
course = CourseFactory.create(
cohort_config={'cohorted': True},
user_partitions=[cohort_scheme_partition, experiment_partition]
)
# Create user_a and user_b which are enrolled in the course
# and assigned to experiment_group_a and experiment_group_b,
# respectively.
user_a = UserFactory.create(username='user_a')
user_b = UserFactory.create(username='user_b')
CourseEnrollment.enroll(user_a, course.id)
CourseEnrollment.enroll(user_b, course.id)
course_tag_api.set_course_tag(
user_a,
course.id,
RandomUserPartitionScheme.key_for_partition(experiment_partition),
experiment_group_a.id
)
course_tag_api.set_course_tag(
user_b,
course.id,
RandomUserPartitionScheme.key_for_partition(experiment_partition),
experiment_group_b.id
)
# Assign user_a to a group in the 'cohort'-schemed user
# partition (by way of a cohort) to verify that the user
# partition group does not show up in the "Experiment Group"
# cell.
cohort_a = CohortFactory.create(course_id=course.id, name=u'Cohørt A', users=[user_a])
CourseUserGroupPartitionGroup(
course_user_group=cohort_a,
partition_id=cohort_scheme_partition.id,
group_id=cohort_scheme_partition.groups[0].id
).save()
# Verify that we see user_a and user_b in their respective
# content experiment groups, and that we do not see any
# content groups.
experiment_group_message = u'Experiment Group ({content_experiment})'
self._verify_cell_data_for_user(
user_a.username,
course.id,
experiment_group_message.format(
content_experiment=experiment_partition.name
),
experiment_group_a.name
)
self._verify_cell_data_for_user(
user_b.username,
course.id,
experiment_group_message.format(
content_experiment=experiment_partition.name
),
experiment_group_b.name
)
# Make sure cohort info is correct.
cohort_name_header = 'Cohort Name'
self._verify_cell_data_for_user(
user_a.username,
course.id,
cohort_name_header,
cohort_a.name
)
self._verify_cell_data_for_user(
user_b.username,
course.id,
cohort_name_header,
''
)
@patch('instructor_task.tasks_helper._get_current_task')
@patch('instructor_task.tasks_helper.iterate_grades_for')
def test_unicode_in_csv_header(self, mock_iterate_grades_for, _mock_current_task):
"""
Tests that CSV grade report works if unicode in headers.
"""
# mock a response from `iterate_grades_for`
mock_iterate_grades_for.return_value = [
(
self.create_student('username', 'student@example.com'),
{'section_breakdown': [{'label': u'\u8282\u540e\u9898 01'}], 'percent': 0, 'grade': None},
'Cannot grade student'
)
]
result = upload_grades_csv(None, None, self.course.id, None, 'graded')
self.assertDictContainsSubset({'attempted': 1, 'succeeded': 1, 'failed': 0}, result)
class TestProblemResponsesReport(TestReportMixin, InstructorTaskCourseTestCase):
"""
Tests that generation of CSV files listing student answers to a
given problem works.
"""
def setUp(self):
super(TestProblemResponsesReport, self).setUp()
self.course = CourseFactory.create()
def test_success(self):
task_input = {'problem_location': ''}
with patch('instructor_task.tasks_helper._get_current_task'):
with patch('instructor_task.tasks_helper.list_problem_responses') as patched_data_source:
patched_data_source.return_value = [
{'username': 'user0', 'state': u'state0'},
{'username': 'user1', 'state': u'state1'},
{'username': 'user2', 'state': u'state2'},
]
result = upload_problem_responses_csv(None, None, self.course.id, task_input, 'calculated')
report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')
links = report_store.links_for(self.course.id)
self.assertEquals(len(links), 1)
self.assertDictContainsSubset({'attempted': 3, 'succeeded': 3, 'failed': 0}, result)
@ddt.ddt
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_PAID_COURSE_REGISTRATION': True})
class TestInstructorDetailedEnrollmentReport(TestReportMixin, InstructorTaskCourseTestCase):
"""
Tests that CSV detailed enrollment generation works.
"""
def setUp(self):
super(TestInstructorDetailedEnrollmentReport, self).setUp()
self.course = CourseFactory.create()
# create testing invoice 1
self.instructor = InstructorFactory(course_key=self.course.id)
self.sale_invoice_1 = Invoice.objects.create(
total_amount=1234.32, company_name='Test1', company_contact_name='TestName',
company_contact_email='Test@company.com',
recipient_name='Testw', recipient_email='test1@test.com', customer_reference_number='2Fwe23S',
internal_reference="A", course_id=self.course.id, is_valid=True
)
self.invoice_item = CourseRegistrationCodeInvoiceItem.objects.create(
invoice=self.sale_invoice_1,
qty=1,
unit_price=1234.32,
course_id=self.course.id
)
def test_success(self):
self.create_student('student', 'student@example.com')
task_input = {'features': []}
with patch('instructor_task.tasks_helper._get_current_task'):
result = upload_enrollment_report(None, None, self.course.id, task_input, 'generating_enrollment_report')
self.assertDictContainsSubset({'attempted': 1, 'succeeded': 1, 'failed': 0}, result)
def test_student_paid_course_enrollment_report(self):
"""
test to check the paid user enrollment csv report status
and enrollment source.
"""
student = UserFactory()
student_cart = Order.get_cart_for_user(student)
PaidCourseRegistration.add_to_order(student_cart, self.course.id)
student_cart.purchase()
task_input = {'features': []}
with patch('instructor_task.tasks_helper._get_current_task'):
result = upload_enrollment_report(None, None, self.course.id, task_input, 'generating_enrollment_report')
self.assertDictContainsSubset({'attempted': 1, 'succeeded': 1, 'failed': 0}, result)
self._verify_cell_data_in_csv(student.username, 'Enrollment Source', 'Credit Card - Individual')
self._verify_cell_data_in_csv(student.username, 'Payment Status', 'purchased')
def test_student_manually_enrolled_in_detailed_enrollment_source(self):
"""
test to check the manually enrolled user enrollment report status
and enrollment source.
"""
student = UserFactory()
enrollment = CourseEnrollment.enroll(student, self.course.id)
ManualEnrollmentAudit.create_manual_enrollment_audit(
self.instructor, student.email, ALLOWEDTOENROLL_TO_ENROLLED,
'manually enrolling unenrolled user', enrollment
)
task_input = {'features': []}
with patch('instructor_task.tasks_helper._get_current_task'):
result = upload_enrollment_report(None, None, self.course.id, task_input, 'generating_enrollment_report')
enrollment_source = u'manually enrolled by user_id {user_id}, enrollment state transition: {transition}'.format(
user_id=self.instructor.id, transition=ALLOWEDTOENROLL_TO_ENROLLED) # pylint: disable=no-member
self.assertDictContainsSubset({'attempted': 1, 'succeeded': 1, 'failed': 0}, result)
self._verify_cell_data_in_csv(student.username, 'Enrollment Source', enrollment_source)
self._verify_cell_data_in_csv(student.username, 'Payment Status', 'TBD')
def test_student_used_enrollment_code_for_course_enrollment(self):
"""
test to check the user enrollment source and payment status in the
enrollment detailed report
"""
student = UserFactory()
self.client.login(username=student.username, password='test')
student_cart = Order.get_cart_for_user(student)
paid_course_reg_item = PaidCourseRegistration.add_to_order(student_cart, self.course.id)
# update the quantity of the cart item paid_course_reg_item
resp = self.client.post(reverse('shoppingcart.views.update_user_cart'),
{'ItemId': paid_course_reg_item.id, 'qty': '4'})
self.assertEqual(resp.status_code, 200)
student_cart.purchase()
course_reg_codes = CourseRegistrationCode.objects.filter(order=student_cart)
redeem_url = reverse('register_code_redemption', args=[course_reg_codes[0].code])
response = self.client.get(redeem_url)
self.assertEquals(response.status_code, 200)
# check button text
self.assertTrue('Activate Course Enrollment' in response.content)
response = self.client.post(redeem_url)
self.assertEquals(response.status_code, 200)
task_input = {'features': []}
with patch('instructor_task.tasks_helper._get_current_task'):
result = upload_enrollment_report(None, None, self.course.id, task_input, 'generating_enrollment_report')
self.assertDictContainsSubset({'attempted': 1, 'succeeded': 1, 'failed': 0}, result)
self._verify_cell_data_in_csv(student.username, 'Enrollment Source', 'Used Registration Code')
self._verify_cell_data_in_csv(student.username, 'Payment Status', 'purchased')
def test_student_used_invoice_unpaid_enrollment_code_for_course_enrollment(self):
"""
test to check the user enrollment source and payment status in the
enrollment detailed report
"""
student = UserFactory()
self.client.login(username=student.username, password='test')
course_registration_code = CourseRegistrationCode(
code='abcde',
course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor,
invoice=self.sale_invoice_1,
invoice_item=self.invoice_item,
mode_slug='honor'
)
course_registration_code.save()
redeem_url = reverse('register_code_redemption', args=['abcde'])
response = self.client.get(redeem_url)
self.assertEquals(response.status_code, 200)
# check button text
self.assertTrue('Activate Course Enrollment' in response.content)
response = self.client.post(redeem_url)
self.assertEquals(response.status_code, 200)
task_input = {'features': []}
with patch('instructor_task.tasks_helper._get_current_task'):
result = upload_enrollment_report(None, None, self.course.id, task_input, 'generating_enrollment_report')
self.assertDictContainsSubset({'attempted': 1, 'succeeded': 1, 'failed': 0}, result)
self._verify_cell_data_in_csv(student.username, 'Enrollment Source', 'Used Registration Code')
self._verify_cell_data_in_csv(student.username, 'Payment Status', 'Invoice Outstanding')
def test_student_used_invoice_paid_enrollment_code_for_course_enrollment(self):
"""
test to check the user enrollment source and payment status in the
enrollment detailed report
"""
student = UserFactory()
self.client.login(username=student.username, password='test')
invoice_transaction = InvoiceTransaction(
invoice=self.sale_invoice_1,
amount=self.sale_invoice_1.total_amount,
status='completed',
created_by=self.instructor,
last_modified_by=self.instructor
)
invoice_transaction.save()
course_registration_code = CourseRegistrationCode(
code='abcde',
course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor,
invoice=self.sale_invoice_1,
invoice_item=self.invoice_item,
mode_slug='honor'
)
course_registration_code.save()
redeem_url = reverse('register_code_redemption', args=['abcde'])
response = self.client.get(redeem_url)
self.assertEquals(response.status_code, 200)
# check button text
self.assertTrue('Activate Course Enrollment' in response.content)
response = self.client.post(redeem_url)
self.assertEquals(response.status_code, 200)
task_input = {'features': []}
with patch('instructor_task.tasks_helper._get_current_task'):
result = upload_enrollment_report(None, None, self.course.id, task_input, 'generating_enrollment_report')
self.assertDictContainsSubset({'attempted': 1, 'succeeded': 1, 'failed': 0}, result)
self._verify_cell_data_in_csv(student.username, 'Enrollment Source', 'Used Registration Code')
self._verify_cell_data_in_csv(student.username, 'Payment Status', 'Invoice Paid')
def _verify_cell_data_in_csv(self, username, column_header, expected_cell_content):
"""
Verify that the last ReportStore CSV contains the expected content.
"""
report_store = ReportStore.from_config(config_name='FINANCIAL_REPORTS')
report_csv_filename = report_store.links_for(self.course.id)[0][0]
with open(report_store.path_to(self.course.id, report_csv_filename)) as csv_file:
# Expand the dict reader generator so we don't lose it's content
for row in unicodecsv.DictReader(csv_file):
if row.get('Username') == username:
self.assertEqual(row[column_header], expected_cell_content)
@ddt.ddt
class TestProblemGradeReport(TestReportMixin, InstructorTaskModuleTestCase):
"""
Test that the problem CSV generation works.
"""
def setUp(self):
super(TestProblemGradeReport, self).setUp()
self.initialize_course()
# Add unicode data to CSV even though unicode usernames aren't
# technically possible in openedx.
self.student_1 = self.create_student(u'üser_1')
self.student_2 = self.create_student(u'üser_2')
self.csv_header_row = [u'Student ID', u'Email', u'Username', u'Final Grade']
@patch('instructor_task.tasks_helper._get_current_task')
def test_no_problems(self, _get_current_task):
"""
Verify that we see no grade information for a course with no graded
problems.
"""
result = upload_problem_grade_report(None, None, self.course.id, None, 'graded')
self.assertDictContainsSubset({'action_name': 'graded', 'attempted': 2, 'succeeded': 2, 'failed': 0}, result)
self.verify_rows_in_csv([
dict(zip(
self.csv_header_row,
[unicode(self.student_1.id), self.student_1.email, self.student_1.username, '0.0']
)),
dict(zip(
self.csv_header_row,
[unicode(self.student_2.id), self.student_2.email, self.student_2.username, '0.0']
))
])
@patch('instructor_task.tasks_helper._get_current_task')
def test_single_problem(self, _get_current_task):
vertical = ItemFactory.create(
parent_location=self.problem_section.location,
category='vertical',
metadata={'graded': True},
display_name='Problem Vertical'
)
self.define_option_problem(u'Pröblem1', parent=vertical)
self.submit_student_answer(self.student_1.username, u'Pröblem1', ['Option 1'])
result = upload_problem_grade_report(None, None, self.course.id, None, 'graded')
self.assertDictContainsSubset({'action_name': 'graded', 'attempted': 2, 'succeeded': 2, 'failed': 0}, result)
problem_name = u'Homework 1: Problem - Pröblem1'
header_row = self.csv_header_row + [problem_name + ' (Earned)', problem_name + ' (Possible)']
self.verify_rows_in_csv([
dict(zip(
header_row,
[
unicode(self.student_1.id),
self.student_1.email,
self.student_1.username,
'0.01', '1.0', '2.0']
)),
dict(zip(
header_row,
[
unicode(self.student_2.id),
self.student_2.email,
self.student_2.username,
'0.0', 'N/A', 'N/A'
]
))
])
@patch('instructor_task.tasks_helper._get_current_task')
@patch('instructor_task.tasks_helper.iterate_grades_for')
@ddt.data(u'Cannöt grade student', '')
def test_grading_failure(self, error_message, mock_iterate_grades_for, _mock_current_task):
"""
Test that any grading errors are properly reported in the progress
dict and uploaded to the report store.
"""
# mock an error response from `iterate_grades_for`
student = self.create_student(u'username', u'student@example.com')
mock_iterate_grades_for.return_value = [
(student, {}, error_message)
]
result = upload_problem_grade_report(None, None, self.course.id, None, 'graded')
self.assertDictContainsSubset({'attempted': 1, 'succeeded': 0, 'failed': 1}, result)
report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')
self.assertTrue(any('grade_report_err' in item[0] for item in report_store.links_for(self.course.id)))
self.verify_rows_in_csv([
{
u'Student ID': unicode(student.id),
u'Email': student.email,
u'Username': student.username,
u'error_msg': error_message if error_message else "Unknown error"
}
])
class TestProblemReportSplitTestContent(TestReportMixin, TestConditionalContent, InstructorTaskModuleTestCase):
"""
Test the problem report on a course that has split tests.
"""
OPTION_1 = 'Option 1'
OPTION_2 = 'Option 2'
def setUp(self):
super(TestProblemReportSplitTestContent, self).setUp()
self.problem_a_url = u'pröblem_a_url'
self.problem_b_url = u'pröblem_b_url'
self.define_option_problem(self.problem_a_url, parent=self.vertical_a)
self.define_option_problem(self.problem_b_url, parent=self.vertical_b)
def test_problem_grade_report(self):
"""
Test that we generate the correct the correct grade report when dealing with A/B tests.
In order to verify that the behavior of the grade report is correct, we submit answers for problems
that the student won't have access to. A/B tests won't restrict access to the problems, but it should
not show up in that student's course tree when generating the grade report, hence the N/A's in the grade report.
"""
# student A will get 100%, student B will get 50% because
# OPTION_1 is the correct option, and OPTION_2 is the
# incorrect option
self.submit_student_answer(self.student_a.username, self.problem_a_url, [self.OPTION_1, self.OPTION_1])
self.submit_student_answer(self.student_a.username, self.problem_b_url, [self.OPTION_1, self.OPTION_1])
self.submit_student_answer(self.student_b.username, self.problem_a_url, [self.OPTION_1, self.OPTION_2])
self.submit_student_answer(self.student_b.username, self.problem_b_url, [self.OPTION_1, self.OPTION_2])
with patch('instructor_task.tasks_helper._get_current_task'):
result = upload_problem_grade_report(None, None, self.course.id, None, 'graded')
self.assertDictContainsSubset(
{'action_name': 'graded', 'attempted': 2, 'succeeded': 2, 'failed': 0}, result
)
problem_names = [u'Homework 1: Problem - pröblem_a_url', u'Homework 1: Problem - pröblem_b_url']
header_row = [u'Student ID', u'Email', u'Username', u'Final Grade']
for problem in problem_names:
header_row += [problem + ' (Earned)', problem + ' (Possible)']
self.verify_rows_in_csv([
dict(zip(
header_row,
[
unicode(self.student_a.id),
self.student_a.email,
self.student_a.username,
u'1.0', u'2.0', u'2.0', u'N/A', u'N/A'
]
)),
dict(zip(
header_row,
[
unicode(self.student_b.id),
self.student_b.email,
self.student_b.username, u'0.5', u'N/A', u'N/A', u'1.0', u'2.0'
]
))
])
class TestProblemReportCohortedContent(TestReportMixin, ContentGroupTestCase, InstructorTaskModuleTestCase):
"""
Test the problem report on a course that has cohorted content.
"""
def setUp(self):
super(TestProblemReportCohortedContent, self).setUp()
# construct cohorted problems to work on.
self.add_course_content()
vertical = ItemFactory.create(
parent_location=self.problem_section.location,
category='vertical',
metadata={'graded': True},
display_name='Problem Vertical'
)
self.define_option_problem(
u"Pröblem0",
parent=vertical,
group_access={self.course.user_partitions[0].id: [self.course.user_partitions[0].groups[0].id]}
)
self.define_option_problem(
u"Pröblem1",
parent=vertical,
group_access={self.course.user_partitions[0].id: [self.course.user_partitions[0].groups[1].id]}
)
def _format_user_grade(self, header_row, user, grade):
"""
Helper method that format the user grade
Args:
header_row(list): header row of csv containing Student ID, Email, Username etc
user(object): Django user object
grade(list): Users' grade list
"""
return dict(zip(
header_row,
[
unicode(user.id),
user.email,
user.username,
] + grade
))
def test_cohort_content(self):
self.submit_student_answer(self.alpha_user.username, u'Pröblem0', ['Option 1', 'Option 1'])
resp = self.submit_student_answer(self.alpha_user.username, u'Pröblem1', ['Option 1', 'Option 1'])
self.assertEqual(resp.status_code, 404)
resp = self.submit_student_answer(self.beta_user.username, u'Pröblem0', ['Option 1', 'Option 2'])
self.assertEqual(resp.status_code, 404)
self.submit_student_answer(self.beta_user.username, u'Pröblem1', ['Option 1', 'Option 2'])
with patch('instructor_task.tasks_helper._get_current_task'):
result = upload_problem_grade_report(None, None, self.course.id, None, 'graded')
self.assertDictContainsSubset(
{'action_name': 'graded', 'attempted': 4, 'succeeded': 4, 'failed': 0}, result
)
problem_names = [u'Homework 1: Problem - Pröblem0', u'Homework 1: Problem - Pröblem1']
header_row = [u'Student ID', u'Email', u'Username', u'Final Grade']
for problem in problem_names:
header_row += [problem + ' (Earned)', problem + ' (Possible)']
user_grades = [
{'user': self.staff_user, 'grade': [u'0.0', u'N/A', u'N/A', u'N/A', u'N/A']},
{'user': self.alpha_user, 'grade': [u'1.0', u'2.0', u'2.0', u'N/A', u'N/A']},
{'user': self.beta_user, 'grade': [u'0.5', u'N/A', u'N/A', u'1.0', u'2.0']},
{'user': self.non_cohorted_user, 'grade': [u'0.0', u'N/A', u'N/A', u'N/A', u'N/A']},
]
# Verify generated grades and expected grades match
expected_grades = [self._format_user_grade(header_row, **user_grade) for user_grade in user_grades]
self.verify_rows_in_csv(expected_grades)
@patch('courseware.grades.MaxScoresCache.get', Mock(return_value=1))
def test_cohort_content_with_maxcache(self):
"""
Tests the cohoted course grading to test the scenario in which `max_scores_cache` is set for the course
problems.
"""
# Course is cohorted
self.assertTrue(cohorts.is_course_cohorted(self.course.id))
# Verify user groups
self.assertEquals(
cohorts.get_cohort(self.alpha_user, self.course.id).id,
self.course.user_partitions[0].groups[0].id,
"alpha_user should be assigned to the correct cohort"
)
self.assertEquals(
cohorts.get_cohort(self.beta_user, self.course.id).id,
self.course.user_partitions[0].groups[1].id,
"beta_user should be assigned to the correct cohort"
)
# Verify user enrollment
for user in [self.alpha_user, self.beta_user, self.non_cohorted_user]:
self.assertTrue(CourseEnrollment.is_enrolled(user, self.course.id))
self.submit_student_answer(self.alpha_user.username, u'Pröblem0', ['Option 1', 'Option 1'])
resp = self.submit_student_answer(self.alpha_user.username, u'Pröblem1', ['Option 1', 'Option 1'])
self.assertEqual(resp.status_code, 404)
resp = self.submit_student_answer(self.beta_user.username, u'Pröblem0', ['Option 1', 'Option 2'])
self.assertEqual(resp.status_code, 404)
self.submit_student_answer(self.beta_user.username, u'Pröblem1', ['Option 1', 'Option 2'])
with patch('instructor_task.tasks_helper._get_current_task'):
result = upload_problem_grade_report(None, None, self.course.id, None, 'graded')
self.assertDictContainsSubset(
{'action_name': 'graded', 'attempted': 4, 'succeeded': 4, 'failed': 0}, result
)
problem_names = [u'Homework 1: Problem - Pröblem0', u'Homework 1: Problem - Pröblem1']
header_row = [u'Student ID', u'Email', u'Username', u'Final Grade']
for problem in problem_names:
header_row += [problem + ' (Earned)', problem + ' (Possible)']
user_grades = [
{'user': self.staff_user, 'grade': [u'0.0', u'N/A', u'N/A', u'N/A', u'N/A']},
{'user': self.alpha_user, 'grade': [u'1.0', u'2.0', u'2.0', u'N/A', u'N/A']},
{'user': self.beta_user, 'grade': [u'0.5', u'N/A', u'N/A', u'1.0', u'2.0']},
{'user': self.non_cohorted_user, 'grade': [u'0.0', u'N/A', u'N/A', u'N/A', u'N/A']},
]
# Verify generated grades and expected grades match
expected_grades = [self._format_user_grade(header_row, **grade) for grade in user_grades]
self.verify_rows_in_csv(expected_grades)
@ddt.ddt
class TestExecutiveSummaryReport(TestReportMixin, InstructorTaskCourseTestCase):
"""
Tests that Executive Summary report generation works.
"""
def setUp(self):
super(TestExecutiveSummaryReport, self).setUp()
self.course = CourseFactory.create()
CourseModeFactory.create(course_id=self.course.id, min_price=50)
self.instructor = InstructorFactory(course_key=self.course.id)
self.student1 = UserFactory()
self.student2 = UserFactory()
self.student1_cart = Order.get_cart_for_user(self.student1)
self.student2_cart = Order.get_cart_for_user(self.student2)
self.sale_invoice_1 = Invoice.objects.create(
total_amount=1234.32, company_name='Test1', company_contact_name='TestName',
company_contact_email='Test@company.com',
recipient_name='Testw', recipient_email='test1@test.com', customer_reference_number='2Fwe23S',
internal_reference="A", course_id=self.course.id, is_valid=True
)
InvoiceTransaction.objects.create(
invoice=self.sale_invoice_1,
amount=self.sale_invoice_1.total_amount,
status='completed',
created_by=self.instructor,
last_modified_by=self.instructor
)
self.invoice_item = CourseRegistrationCodeInvoiceItem.objects.create(
invoice=self.sale_invoice_1,
qty=10,
unit_price=1234.32,
course_id=self.course.id
)
for i in range(5):
coupon = Coupon(
code='coupon{0}'.format(i), description='test_description', course_id=self.course.id,
percentage_discount='{0}'.format(i), created_by=self.instructor, is_active=True,
)
coupon.save()
def test_successfully_generate_executive_summary_report(self):
"""
Test that successfully generates the executive summary report.
"""
task_input = {'features': []}
with patch('instructor_task.tasks_helper._get_current_task'):
result = upload_exec_summary_report(
None, None, self.course.id,
task_input, 'generating executive summary report'
)
ReportStore.from_config(config_name='FINANCIAL_REPORTS')
self.assertDictContainsSubset({'attempted': 1, 'succeeded': 1, 'failed': 0}, result)
def students_purchases(self):
"""
Students purchases the courses using enrollment
and coupon codes.
"""
self.client.login(username=self.student1.username, password='test')
paid_course_reg_item = PaidCourseRegistration.add_to_order(self.student1_cart, self.course.id)
# update the quantity of the cart item paid_course_reg_item
resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {
'ItemId': paid_course_reg_item.id, 'qty': '4'
})
self.assertEqual(resp.status_code, 200)
# apply the coupon code to the item in the cart
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': 'coupon1'})
self.assertEqual(resp.status_code, 200)
self.student1_cart.purchase()
course_reg_codes = CourseRegistrationCode.objects.filter(order=self.student1_cart)
redeem_url = reverse('register_code_redemption', args=[course_reg_codes[0].code])
response = self.client.get(redeem_url)
self.assertEquals(response.status_code, 200)
# check button text
self.assertTrue('Activate Course Enrollment' in response.content)
response = self.client.post(redeem_url)
self.assertEquals(response.status_code, 200)
self.client.login(username=self.student2.username, password='test')
PaidCourseRegistration.add_to_order(self.student2_cart, self.course.id)
# apply the coupon code to the item in the cart
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': 'coupon1'})
self.assertEqual(resp.status_code, 200)
self.student2_cart.purchase()
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_PAID_COURSE_REGISTRATION': True})
def test_generate_executive_summary_report(self):
"""
test to generate executive summary report
and then test the report authenticity.
"""
self.students_purchases()
task_input = {'features': []}
with patch('instructor_task.tasks_helper._get_current_task'):
result = upload_exec_summary_report(
None, None, self.course.id,
task_input, 'generating executive summary report'
)
report_store = ReportStore.from_config(config_name='FINANCIAL_REPORTS')
expected_data = [
'Gross Revenue Collected', '$1481.82',
'Gross Revenue Pending', '$0.00',
'Average Price per Seat', '$296.36',
'Number of seats purchased using coupon codes', '<td>2</td>'
]
self.assertDictContainsSubset({'attempted': 1, 'succeeded': 1, 'failed': 0}, result)
self._verify_html_file_report(report_store, expected_data)
def _verify_html_file_report(self, report_store, expected_data):
"""
Verify grade report data.
"""
report_html_filename = report_store.links_for(self.course.id)[0][0]
with open(report_store.path_to(self.course.id, report_html_filename)) as html_file:
html_file_data = html_file.read()
for data in expected_data:
self.assertTrue(data in html_file_data)
@ddt.ddt
class TestStudentReport(TestReportMixin, InstructorTaskCourseTestCase):
"""
Tests that CSV student profile report generation works.
"""
def setUp(self):
super(TestStudentReport, self).setUp()
self.course = CourseFactory.create()
def test_success(self):
self.create_student('student', 'student@example.com')
task_input = {'features': []}
with patch('instructor_task.tasks_helper._get_current_task'):
result = upload_students_csv(None, None, self.course.id, task_input, 'calculated')
report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')
links = report_store.links_for(self.course.id)
self.assertEquals(len(links), 1)
self.assertDictContainsSubset({'attempted': 1, 'succeeded': 1, 'failed': 0}, result)
@ddt.data([u'student', u'student\xec'])
def test_unicode_usernames(self, students):
"""
Test that students with unicode characters in their usernames
are handled.
"""
for i, student in enumerate(students):
self.create_student(username=student, email='student{0}@example.com'.format(i))
self.current_task = Mock()
self.current_task.update_state = Mock()
task_input = {
'features': [
'id', 'username', 'name', 'email', 'language', 'location',
'year_of_birth', 'gender', 'level_of_education', 'mailing_address',
'goals'
]
}
with patch('instructor_task.tasks_helper._get_current_task') as mock_current_task:
mock_current_task.return_value = self.current_task
result = upload_students_csv(None, None, self.course.id, task_input, 'calculated')
# This assertion simply confirms that the generation completed with no errors
num_students = len(students)
self.assertDictContainsSubset({'attempted': num_students, 'succeeded': num_students, 'failed': 0}, result)
@ddt.ddt
class TestListMayEnroll(TestReportMixin, InstructorTaskCourseTestCase):
"""
Tests that generation of CSV files containing information about
students who may enroll in a given course (but have not signed up
for it yet) works.
"""
def _create_enrollment(self, email):
"Factory method for creating CourseEnrollmentAllowed objects."
return CourseEnrollmentAllowed.objects.create(
email=email, course_id=self.course.id
)
def setUp(self):
super(TestListMayEnroll, self).setUp()
self.course = CourseFactory.create()
def test_success(self):
self._create_enrollment('user@example.com')
task_input = {'features': []}
with patch('instructor_task.tasks_helper._get_current_task'):
result = upload_may_enroll_csv(None, None, self.course.id, task_input, 'calculated')
report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')
links = report_store.links_for(self.course.id)
self.assertEquals(len(links), 1)
self.assertDictContainsSubset({'attempted': 1, 'succeeded': 1, 'failed': 0}, result)
def test_unicode_email_addresses(self):
"""
Test handling of unicode characters in email addresses of students
who may enroll in a course.
"""
enrollments = [u'student@example.com', u'ni\xf1o@example.com']
for email in enrollments:
self._create_enrollment(email)
task_input = {'features': ['email']}
with patch('instructor_task.tasks_helper._get_current_task'):
result = upload_may_enroll_csv(None, None, self.course.id, task_input, 'calculated')
# This assertion simply confirms that the generation completed with no errors
num_enrollments = len(enrollments)
self.assertDictContainsSubset({'attempted': num_enrollments, 'succeeded': num_enrollments, 'failed': 0}, result)
class MockDefaultStorage(object):
"""Mock django's DefaultStorage"""
def __init__(self):
pass
def open(self, file_name):
"""Mock out DefaultStorage.open with standard python open"""
return open(file_name)
@patch('instructor_task.tasks_helper.DefaultStorage', new=MockDefaultStorage)
class TestCohortStudents(TestReportMixin, InstructorTaskCourseTestCase):
"""
Tests that bulk student cohorting works.
"""
def setUp(self):
super(TestCohortStudents, self).setUp()
self.course = CourseFactory.create()
self.cohort_1 = CohortFactory(course_id=self.course.id, name='Cohort 1')
self.cohort_2 = CohortFactory(course_id=self.course.id, name='Cohort 2')
self.student_1 = self.create_student(username=u'student_1\xec', email='student_1@example.com')
self.student_2 = self.create_student(username='student_2', email='student_2@example.com')
self.csv_header_row = ['Cohort Name', 'Exists', 'Students Added', 'Students Not Found']
def _cohort_students_and_upload(self, csv_data):
"""
Call `cohort_students_and_upload` with a file generated from `csv_data`.
"""
with tempfile.NamedTemporaryFile() as temp_file:
temp_file.write(csv_data.encode('utf-8'))
temp_file.flush()
with patch('instructor_task.tasks_helper._get_current_task'):
return cohort_students_and_upload(None, None, self.course.id, {'file_name': temp_file.name}, 'cohorted')
def test_username(self):
result = self._cohort_students_and_upload(
u'username,email,cohort\n'
u'student_1\xec,,Cohort 1\n'
u'student_2,,Cohort 2'
)
self.assertDictContainsSubset({'total': 2, 'attempted': 2, 'succeeded': 2, 'failed': 0}, result)
self.verify_rows_in_csv(
[
dict(zip(self.csv_header_row, ['Cohort 1', 'True', '1', ''])),
dict(zip(self.csv_header_row, ['Cohort 2', 'True', '1', ''])),
],
verify_order=False
)
def test_email(self):
result = self._cohort_students_and_upload(
'username,email,cohort\n'
',student_1@example.com,Cohort 1\n'
',student_2@example.com,Cohort 2'
)
self.assertDictContainsSubset({'total': 2, 'attempted': 2, 'succeeded': 2, 'failed': 0}, result)
self.verify_rows_in_csv(
[
dict(zip(self.csv_header_row, ['Cohort 1', 'True', '1', ''])),
dict(zip(self.csv_header_row, ['Cohort 2', 'True', '1', ''])),
],
verify_order=False
)
def test_username_and_email(self):
result = self._cohort_students_and_upload(
u'username,email,cohort\n'
u'student_1\xec,student_1@example.com,Cohort 1\n'
u'student_2,student_2@example.com,Cohort 2'
)
self.assertDictContainsSubset({'total': 2, 'attempted': 2, 'succeeded': 2, 'failed': 0}, result)
self.verify_rows_in_csv(
[
dict(zip(self.csv_header_row, ['Cohort 1', 'True', '1', ''])),
dict(zip(self.csv_header_row, ['Cohort 2', 'True', '1', ''])),
],
verify_order=False
)
def test_prefer_email(self):
"""
Test that `cohort_students_and_upload` greedily prefers 'email' over
'username' when identifying the user. This means that if a correct
email is present, an incorrect or non-matching username will simply be
ignored.
"""
result = self._cohort_students_and_upload(
u'username,email,cohort\n'
u'student_1\xec,student_1@example.com,Cohort 1\n' # valid username and email
u'Invalid,student_2@example.com,Cohort 2' # invalid username, valid email
)
self.assertDictContainsSubset({'total': 2, 'attempted': 2, 'succeeded': 2, 'failed': 0}, result)
self.verify_rows_in_csv(
[
dict(zip(self.csv_header_row, ['Cohort 1', 'True', '1', ''])),
dict(zip(self.csv_header_row, ['Cohort 2', 'True', '1', ''])),
],
verify_order=False
)
def test_non_existent_user(self):
result = self._cohort_students_and_upload(
'username,email,cohort\n'
'Invalid,,Cohort 1\n'
'student_2,also_fake@bad.com,Cohort 2'
)
self.assertDictContainsSubset({'total': 2, 'attempted': 2, 'succeeded': 0, 'failed': 2}, result)
self.verify_rows_in_csv(
[
dict(zip(self.csv_header_row, ['Cohort 1', 'True', '0', 'Invalid'])),
dict(zip(self.csv_header_row, ['Cohort 2', 'True', '0', 'also_fake@bad.com'])),
],
verify_order=False
)
def test_non_existent_cohort(self):
result = self._cohort_students_and_upload(
'username,email,cohort\n'
',student_1@example.com,Does Not Exist\n'
'student_2,,Cohort 2'
)
self.assertDictContainsSubset({'total': 2, 'attempted': 2, 'succeeded': 1, 'failed': 1}, result)
self.verify_rows_in_csv(
[
dict(zip(self.csv_header_row, ['Does Not Exist', 'False', '0', ''])),
dict(zip(self.csv_header_row, ['Cohort 2', 'True', '1', ''])),
],
verify_order=False
)
def test_too_few_commas(self):
"""
A CSV file may be malformed and lack traling commas at the end of a row.
In this case, those cells take on the value None by the CSV parser.
Make sure we handle None values appropriately.
i.e.:
header_1,header_2,header_3
val_1,val_2,val_3 <- good row
val_1,, <- good row
val_1 <- bad row; no trailing commas to indicate empty rows
"""
result = self._cohort_students_and_upload(
u'username,email,cohort\n'
u'student_1\xec,\n'
u'student_2'
)
self.assertDictContainsSubset({'total': 2, 'attempted': 2, 'succeeded': 0, 'failed': 2}, result)
self.verify_rows_in_csv(
[
dict(zip(self.csv_header_row, ['', 'False', '0', ''])),
],
verify_order=False
)
def test_only_header_row(self):
result = self._cohort_students_and_upload(
u'username,email,cohort'
)
self.assertDictContainsSubset({'total': 0, 'attempted': 0, 'succeeded': 0, 'failed': 0}, result)
self.verify_rows_in_csv([])
def test_carriage_return(self):
"""
Test that we can handle carriage returns in our file.
"""
result = self._cohort_students_and_upload(
u'username,email,cohort\r'
u'student_1\xec,,Cohort 1\r'
u'student_2,,Cohort 2'
)
self.assertDictContainsSubset({'total': 2, 'attempted': 2, 'succeeded': 2, 'failed': 0}, result)
self.verify_rows_in_csv(
[
dict(zip(self.csv_header_row, ['Cohort 1', 'True', '1', ''])),
dict(zip(self.csv_header_row, ['Cohort 2', 'True', '1', ''])),
],
verify_order=False
)
def test_carriage_return_line_feed(self):
"""
Test that we can handle carriage returns and line feeds in our file.
"""
result = self._cohort_students_and_upload(
u'username,email,cohort\r\n'
u'student_1\xec,,Cohort 1\r\n'
u'student_2,,Cohort 2'
)
self.assertDictContainsSubset({'total': 2, 'attempted': 2, 'succeeded': 2, 'failed': 0}, result)
self.verify_rows_in_csv(
[
dict(zip(self.csv_header_row, ['Cohort 1', 'True', '1', ''])),
dict(zip(self.csv_header_row, ['Cohort 2', 'True', '1', ''])),
],
verify_order=False
)
def test_move_users_to_new_cohort(self):
self.cohort_1.users.add(self.student_1)
self.cohort_2.users.add(self.student_2)
result = self._cohort_students_and_upload(
u'username,email,cohort\n'
u'student_1\xec,,Cohort 2\n'
u'student_2,,Cohort 1'
)
self.assertDictContainsSubset({'total': 2, 'attempted': 2, 'succeeded': 2, 'failed': 0}, result)
self.verify_rows_in_csv(
[
dict(zip(self.csv_header_row, ['Cohort 1', 'True', '1', ''])),
dict(zip(self.csv_header_row, ['Cohort 2', 'True', '1', ''])),
],
verify_order=False
)
def test_move_users_to_same_cohort(self):
self.cohort_1.users.add(self.student_1)
self.cohort_2.users.add(self.student_2)
result = self._cohort_students_and_upload(
u'username,email,cohort\n'
u'student_1\xec,,Cohort 1\n'
u'student_2,,Cohort 2'
)
self.assertDictContainsSubset({'total': 2, 'attempted': 2, 'skipped': 2, 'failed': 0}, result)
self.verify_rows_in_csv(
[
dict(zip(self.csv_header_row, ['Cohort 1', 'True', '0', ''])),
dict(zip(self.csv_header_row, ['Cohort 2', 'True', '0', ''])),
],
verify_order=False
)
@ddt.ddt
@patch('instructor_task.tasks_helper.DefaultStorage', new=MockDefaultStorage)
class TestGradeReportEnrollmentAndCertificateInfo(TestReportMixin, InstructorTaskModuleTestCase):
"""
Test that grade report has correct user enrolment, verification, and certificate information.
"""
def setUp(self):
super(TestGradeReportEnrollmentAndCertificateInfo, self).setUp()
self.initialize_course()
self.create_problem()
self.columns_to_check = [
'Enrollment Track',
'Verification Status',
'Certificate Eligible',
'Certificate Delivered',
'Certificate Type'
]
def create_problem(self, problem_display_name='test_problem', parent=None):
"""
Create a multiple choice response problem.
"""
if parent is None:
parent = self.problem_section
factory = MultipleChoiceResponseXMLFactory()
args = {'choices': [False, True, False]}
problem_xml = factory.build_xml(**args)
ItemFactory.create(
parent_location=parent.location,
parent=parent,
category="problem",
display_name=problem_display_name,
data=problem_xml
)
def user_is_embargoed(self, user, is_embargoed):
"""
Set a users emabargo state.
"""
user_profile = UserFactory(username=user.username, email=user.email).profile
user_profile.allow_certificate = not is_embargoed
user_profile.save()
def _verify_csv_data(self, username, expected_data):
"""
Verify grade report data.
"""
with patch('instructor_task.tasks_helper._get_current_task'):
upload_grades_csv(None, None, self.course.id, None, 'graded')
report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')
report_csv_filename = report_store.links_for(self.course.id)[0][0]
with open(report_store.path_to(self.course.id, report_csv_filename)) as csv_file:
for row in unicodecsv.DictReader(csv_file):
if row.get('username') == username:
csv_row_data = [row[column] for column in self.columns_to_check]
self.assertEqual(csv_row_data, expected_data)
def _create_user_data(self,
user_enroll_mode,
has_passed,
whitelisted,
is_embargoed,
verification_status,
certificate_status,
certificate_mode):
"""
Create user data to be used during grade report generation.
"""
user = self.create_student('u1', mode=user_enroll_mode)
if has_passed:
self.submit_student_answer('u1', 'test_problem', ['choice_1'])
CertificateWhitelistFactory.create(user=user, course_id=self.course.id, whitelist=whitelisted)
self.user_is_embargoed(user, is_embargoed)
if user_enroll_mode in CourseMode.VERIFIED_MODES:
SoftwareSecurePhotoVerificationFactory.create(user=user, status=verification_status)
GeneratedCertificateFactory.create(
user=user,
course_id=self.course.id,
status=certificate_status,
mode=certificate_mode
)
return user
@ddt.data(
(
'verified', False, False, False, 'approved', 'notpassing', 'honor',
['verified', 'ID Verified', 'N', 'N', 'N/A']
),
(
'verified', False, True, False, 'approved', 'downloadable', 'verified',
['verified', 'ID Verified', 'Y', 'Y', 'verified']
),
(
'honor', True, True, True, 'approved', 'restricted', 'honor',
['honor', 'N/A', 'N', 'N', 'N/A']
),
(
'verified', True, True, False, 'must_retry', 'downloadable', 'honor',
['verified', 'Not ID Verified', 'Y', 'Y', 'honor']
),
)
@ddt.unpack
def test_grade_report_enrollment_and_certificate_info(
self,
user_enroll_mode,
has_passed,
whitelisted,
is_embargoed,
verification_status,
certificate_status,
certificate_mode,
expected_output
):
user = self._create_user_data(
user_enroll_mode,
has_passed,
whitelisted,
is_embargoed,
verification_status,
certificate_status,
certificate_mode
)
self._verify_csv_data(user.username, expected_output)
@override_settings(CERT_QUEUE='test-queue')
class TestCertificateGeneration(InstructorTaskModuleTestCase):
"""
Test certificate generation task works.
"""
def setUp(self):
super(TestCertificateGeneration, self).setUp()
self.initialize_course()
def test_certificate_generation_for_students(self):
"""
Verify that certificates generated for all eligible students enrolled in a course.
"""
# create 10 students
students = [self.create_student(username='student_{}'.format(i), email='student_{}@example.com'.format(i))
for i in xrange(1, 11)]
# mark 2 students to have certificates generated already
for student in students[:2]:
GeneratedCertificateFactory.create(
user=student,
course_id=self.course.id,
status=CertificateStatuses.downloadable,
mode='honor'
)
# white-list 5 students
for student in students[2:7]:
CertificateWhitelistFactory.create(user=student, course_id=self.course.id, whitelist=True)
current_task = Mock()
current_task.update_state = Mock()
with self.assertNumQueries(125):
with patch('instructor_task.tasks_helper._get_current_task') as mock_current_task:
mock_current_task.return_value = current_task
with patch('capa.xqueue_interface.XQueueInterface.send_to_queue') as mock_queue:
mock_queue.return_value = (0, "Successfully queued")
result = generate_students_certificates(None, None, self.course.id, None, 'certificates generated')
self.assertDictContainsSubset(
{
'action_name': 'certificates generated',
'total': 10,
'attempted': 8,
'succeeded': 5,
'failed': 3,
'skipped': 2
},
result
)
| jamiefolsom/edx-platform | lms/djangoapps/instructor_task/tests/test_tasks_helper.py | Python | agpl-3.0 | 61,734 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
from openerp.tools.translate import _
from openerp.exceptions import UserError
class account_bank_statement(osv.osv):
_inherit = 'account.bank.statement'
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
# bypass obsolete statement line resequencing
if vals.get('line_ids', False) or context.get('ebanking_import', False):
res = super(osv.osv, self).write(cr, uid, ids, vals, context=context)
else:
res = super(account_bank_statement, self).write(cr, uid, ids, vals, context=context)
return res
def button_confirm_bank(self, cr, uid, ids, context=None):
bank_statement_line_obj = self.pool.get('account.bank.statement.line')
super(account_bank_statement, self).button_confirm_bank(cr, uid, ids, context=context)
for st in self.browse(cr, uid, ids, context=context):
if st.line_ids:
line_ids = [l.id for l in st.line_ids]
cr.execute("UPDATE account_bank_statement_line \
SET state='confirm' WHERE id in %s ",
(tuple(line_ids),))
bank_statement_line_obj.invalidate_cache(cr, uid, ['state'], line_ids, context=context)
return True
def button_cancel(self, cr, uid, ids, context=None):
bank_statement_line_obj = self.pool.get('account.bank.statement.line')
super(account_bank_statement, self).button_cancel(cr, uid, ids, context=context)
for st in self.browse(cr, uid, ids, context=context):
if st.line_ids:
line_ids = [l.id for l in st.line_ids]
cr.execute("UPDATE account_bank_statement_line \
SET state='draft' WHERE id in %s ",
(tuple(line_ids),))
bank_statement_line_obj.invalidate_cache(cr, uid, ['state'], line_ids, context=context)
return True
class account_bank_statement_line_global(osv.osv):
_name = 'account.bank.statement.line.global'
_description = 'Batch Payment Info'
_columns = {
'name': fields.char('OBI', required=True, help="Originator to Beneficiary Information"),
'code': fields.char('Code', size=64, required=True),
'parent_id': fields.many2one('account.bank.statement.line.global', 'Parent Code', ondelete='cascade'),
'child_ids': fields.one2many('account.bank.statement.line.global', 'parent_id', 'Child Codes', copy=True),
'type': fields.selection([
('iso20022', 'ISO 20022'),
('coda', 'CODA'),
('manual', 'Manual'),
], 'Type', required=True),
'amount': fields.float('Amount', digits_compute=dp.get_precision('Account')),
'bank_statement_line_ids': fields.one2many('account.bank.statement.line', 'globalisation_id', 'Bank Statement Lines'),
}
_rec_name = 'code'
_defaults = {
'code': lambda s,c,u,ctx={}: s.pool.get('ir.sequence').next_by_code(c, u, 'account.bank.statement.line.global'),
'name': '/',
}
_sql_constraints = [
('code_uniq', 'unique (code)', 'The code must be unique !'),
]
def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=100):
if not args:
args = []
ids = []
if name:
ids = self.search(cr, user, [('code', 'ilike', name)] + args, limit=limit)
if not ids:
ids = self.search(cr, user, [('name', operator, name)] + args, limit=limit)
if not ids and len(name.split()) >= 2:
#Separating code and name for searching
operand1, operand2 = name.split(' ', 1) #name can contain spaces
ids = self.search(cr, user, [('code', 'like', operand1), ('name', operator, operand2)] + args, limit=limit)
else:
ids = self.search(cr, user, args, context=context, limit=limit)
return self.name_get(cr, user, ids, context=context)
class account_bank_statement_line(osv.osv):
_inherit = 'account.bank.statement.line'
_columns = {
'val_date': fields.date('Value Date', states={'confirm': [('readonly', True)]}),
'globalisation_id': fields.many2one('account.bank.statement.line.global', 'Globalisation ID',
states={'confirm': [('readonly', True)]},
help="Code to identify transactions belonging to the same globalisation level within a batch payment"),
'globalisation_amount': fields.related('globalisation_id', 'amount', type='float',
relation='account.bank.statement.line.global', string='Glob. Amount', readonly=True),
'state': fields.selection([('draft', 'Draft'), ('confirm', 'Confirmed')],
'Status', required=True, readonly=True, copy=False),
'counterparty_name': fields.char('Counterparty Name', size=35),
'counterparty_bic': fields.char('Counterparty BIC', size=11),
'counterparty_number': fields.char('Counterparty Number', size=34),
'counterparty_currency': fields.char('Counterparty Currency', size=3),
}
_defaults = {
'state': 'draft',
}
def unlink(self, cr, uid, ids, context=None):
if context is None:
context = {}
if context.get('block_statement_line_delete', False):
raise UserError(_('Delete operation not allowed. Please go to the associated bank statement in order to delete and/or modify bank statement line.'))
return super(account_bank_statement_line, self).unlink(cr, uid, ids, context=context)
| addition-it-solutions/project-all | addons/account_bank_statement_extensions/account_bank_statement.py | Python | agpl-3.0 | 6,685 |
#!/usr/bin/env python
#
# Copyright (C) Citrix Systems Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation; version 2.1 only.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import sys, os, re
import scsiutil, util
import xml.dom.minidom
import xs_errors, time
import glob
DEVPATH='/dev/disk/by-id'
DMDEVPATH='/dev/mapper'
SYSFS_PATH1='/sys/class/scsi_host'
SYSFS_PATH2='/sys/class/scsi_disk'
SYSFS_PATH3='/sys/class/fc_transport'
DRIVER_BLACKLIST = ['^(s|p|)ata_.*', '^ahci$', '^pdc_adma$', '^iscsi_tcp$']
INVALID_DEVICE_NAME = ''
def getManufacturer(s):
(rc,stdout,stderr) = util.doexec(['/sbin/modinfo', '-d', s])
if stdout:
return stdout.strip()
else:
return "Unknown"
def update_devs_dict(devs, dev, entry):
if dev != INVALID_DEVICE_NAME:
devs[dev] = entry
def adapters(filterstr="any"):
dict = {}
devs = {}
adt = {}
for a in os.listdir(SYSFS_PATH1):
proc = match_hbadevs(a, filterstr)
if not proc:
continue
adt[a] = proc
id = a.replace("host","")
scsiutil.rescan([id])
emulex = False
paths = []
if proc == "lpfc":
emulex = True
paths.append(SYSFS_PATH3)
else:
for p in [os.path.join(SYSFS_PATH1,a,"device","session*"),os.path.join(SYSFS_PATH1,a,"device"),\
os.path.join(SYSFS_PATH2,"%s:*"%id)]:
paths += glob.glob(p)
if not len(paths):
continue
for path in paths:
for i in filter(match_targets,os.listdir(path)):
tgt = i.replace('target','')
if emulex:
sysfs = os.path.join(SYSFS_PATH3,i,"device")
else:
sysfs = SYSFS_PATH2
for lun in os.listdir(sysfs):
if not match_LUNs(lun,tgt):
continue
if emulex:
dir = os.path.join(sysfs,lun)
else:
dir = os.path.join(sysfs,lun,"device")
(dev, entry) = _extract_dev(dir, proc, id, lun)
update_devs_dict(devs, dev, entry)
# for new qlogic sysfs layout (rport under device, then target)
for i in filter(match_rport,os.listdir(path)):
newpath = os.path.join(path, i)
for j in filter(match_targets,os.listdir(newpath)):
tgt = j.replace('target','')
sysfs = SYSFS_PATH2
for lun in os.listdir(sysfs):
if not match_LUNs(lun,tgt):
continue
dir = os.path.join(sysfs,lun,"device")
(dev, entry) = _extract_dev(dir, proc, id, lun)
update_devs_dict(devs, dev, entry)
# for new mptsas sysfs entries, check for phy* node
for i in filter(match_phy,os.listdir(path)):
(target,lunid) = i.replace('phy-','').split(':')
tgt = "%s:0:0:%s" % (target,lunid)
sysfs = SYSFS_PATH2
for lun in os.listdir(sysfs):
if not match_LUNs(lun,tgt):
continue
dir = os.path.join(sysfs,lun,"device")
(dev, entry) = _extract_dev(dir, proc, id, lun)
update_devs_dict(devs, dev, entry)
if path.startswith(SYSFS_PATH2):
os.path.join(path,"device","block:*")
dev = _extract_dev_name(os.path.join(path, 'device'))
if devs.has_key(dev):
continue
hbtl = os.path.basename(path)
(h,b,t,l) = hbtl.split(':')
entry = {'procname':proc, 'host':id, 'target':l}
update_devs_dict(devs, dev, entry)
dict['devs'] = devs
dict['adt'] = adt
return dict
def _get_driver_name(scsihost):
driver_name = 'Unknown'
if os.path.exists(os.path.join(SYSFS_PATH1, scsihost, 'fnic_state')):
driver_name = 'fnic'
if os.path.exists(os.path.join(SYSFS_PATH1, scsihost, 'lpfc_fcp_class')):
driver_name = 'lpfc'
if os.path.exists(os.path.join(SYSFS_PATH1, scsihost, '84xx_fw_version')):
driver_name = 'qla2xxx'
if 'Unknown' == driver_name:
namepath = os.path.join(SYSFS_PATH1, scsihost, 'driver_name')
if not os.path.exists(namepath):
namepath = os.path.join(SYSFS_PATH1, scsihost, 'proc_name')
if os.path.exists(namepath):
try:
f = open(namepath, 'r')
line = f.readline()[:-1]
f.close()
if not line in ['<NULL>', '(NULL)', '']:
driver_name = line
except IOError:
pass
if 'Unknown' == driver_name:
ueventpath = os.path.join(SYSFS_PATH1, scsihost, 'uevent')
if os.path.exists(ueventpath):
try:
f = open(ueventpath, 'r')
for line in f:
if line.startswith('PHYSDEVDRIVER='):
driver_name = line.replace('PHYSDEVDRIVER=','').strip()
f.close()
except IOError:
pass
return driver_name
def _parseHostId(str):
id = str.split()
val = "%s:%s:%s" % (id[1],id[3],id[5])
return val.replace(',','')
def _genMPPHBA(id):
devs = scsiutil.cacheSCSIidentifiers()
mppdict = {}
for dev in devs:
item = devs[dev]
if item[1] == id:
arr = scsiutil._genArrayIdentifier(dev)
if not len(arr):
continue
try:
cmd = ['/usr/sbin/mppUtil', '-a']
for line in util.doexec(cmd)[1].split('\n'):
if line.find(arr) != -1:
rec = line.split()[0]
cmd2 = ['/usr/sbin/mppUtil', '-g',rec]
li = []
for newline in util.doexec(cmd2)[1].split('\n'):
if newline.find('hostId') != -1:
li.append(_parseHostId(newline))
mppdict[dev.split('/')[-1]] = li
except:
continue
return mppdict
def match_hbadevs(s, filterstr):
driver_name = _get_driver_name(s)
if match_host(s) and not match_blacklist(driver_name) \
and ( filterstr == "any" or match_filterstr(filterstr, driver_name) ):
return driver_name
else:
return ""
def match_blacklist(driver_name):
return re.search("(" + ")|(".join(DRIVER_BLACKLIST) + ")", driver_name)
def match_filterstr(filterstr, driver_name):
return re.search("^%s" % filterstr, driver_name)
def match_host(s):
return re.search("^host[0-9]", s)
def match_rport(s):
regex = re.compile("^rport-*")
return regex.search(s, 0)
def match_targets(s):
regex = re.compile("^target[0-9]")
return regex.search(s, 0)
def match_phy(s):
regex = re.compile("^phy-*")
return regex.search(s, 0)
def match_LUNs(s, prefix):
regex = re.compile("^%s" % prefix)
return regex.search(s, 0)
def match_dev(s):
regex = re.compile("^block:")
return regex.search(s, 0)
def _extract_dev_name(device_dir):
"""Returns the name of the block device from sysfs e.g. 'sda'"""
kernel_version = os.uname()[2]
if kernel_version.startswith('2.6'):
# sub-directory of form block:sdx/
dev = filter(match_dev, os.listdir(device_dir))[0]
# remove 'block:' from entry and return
return dev.lstrip('block:')
elif kernel_version.startswith('3.'):
# directory for device name lives inside block directory e.g. block/sdx
return _get_block_device_name_with_kernel_3x(device_dir)
else:
msg = 'Kernel version detected: %s' % kernel_version
raise xs_errors.XenError('UnsupportedKernel', msg)
def _get_block_device_name_with_kernel_3x(device_dir):
devs = glob.glob(os.path.join(device_dir, 'block/*'))
if len(devs):
# prune path to extract the device name
return os.path.basename(devs[0])
else:
return INVALID_DEVICE_NAME
def _extract_dev(device_dir, procname, host, target):
"""Returns device name and creates dictionary entry for it"""
dev = _extract_dev_name(device_dir)
entry = {}
entry['procname'] = procname
entry['host'] = host
entry['target'] = target
return (dev, entry)
def _add_host_parameters_to_adapter(dom, adapter, host_class, host_id,
parameters):
"""Adds additional information about the adapter to the the adapter node"""
host_path = os.path.join('/sys/class/', host_class, 'host%s' % (host_id))
if os.path.exists(host_path):
host_entry = dom.createElement(host_class)
adapter.appendChild(host_entry)
for parameter in parameters:
try:
filehandle = open(os.path.join(host_path, parameter))
parameter_value = filehandle.read(512).strip()
filehandle.close()
if parameter_value:
entry = dom.createElement(parameter)
host_entry.appendChild(entry)
text_node = dom.createTextNode(parameter_value)
entry.appendChild(text_node)
except IOError:
pass
def scan(srobj):
systemrootID = util.getrootdevID()
hbadict = srobj.hbadict
hbas = srobj.hbas
dom = xml.dom.minidom.Document()
e = dom.createElement("Devlist")
dom.appendChild(e)
if not os.path.exists(DEVPATH):
return dom.toprettyxml()
devs = srobj.devs
vdis = {}
for key in hbadict:
hba = hbadict[key]
path = os.path.join("/dev",key)
realpath = path
obj = srobj.vdi("")
try:
obj._query(realpath, devs[realpath][4])
except:
continue
# Test for root dev or existing PBD
if len(obj.SCSIid) and len(systemrootID) and util.match_scsiID(obj.SCSIid, systemrootID):
util.SMlog("Ignoring root device %s" % realpath)
continue
elif util.test_SCSIid(srobj.session, None, obj.SCSIid):
util.SMlog("SCSIid in use, ignoring (%s)" % obj.SCSIid)
continue
elif not devs.has_key(realpath):
continue
ids = devs[realpath]
obj.adapter = ids[1]
obj.channel = ids[2]
obj.id = ids[3]
obj.lun = ids[4]
obj.hba = hba['procname']
obj.numpaths = 1
if vdis.has_key(obj.SCSIid):
vdis[obj.SCSIid].numpaths += 1
vdis[obj.SCSIid].path += " [%s]" % key
elif obj.hba == 'mpp':
mppdict = _genMPPHBA(obj.adapter)
if mppdict.has_key(key):
item = mppdict[key]
adapters = ''
for i in item:
if len(adapters):
adapters += ', '
obj.numpaths += 1
adapters += i
if len(adapters):
obj.mpp = adapters
vdis[obj.SCSIid] = obj
else:
vdis[obj.SCSIid] = obj
for key in vdis:
obj = vdis[key]
d = dom.createElement("BlockDevice")
e.appendChild(d)
for attr in ['path','numpaths','SCSIid','vendor','serial','size','adapter','channel','id','lun','hba','mpp']:
try:
aval = getattr(obj, attr)
except AttributeError:
if attr in ['mpp']:
continue
raise xs_errors.XenError('InvalidArg', \
opterr='Missing required field [%s]' % attr)
entry = dom.createElement(attr)
d.appendChild(entry)
textnode = dom.createTextNode(str(aval))
entry.appendChild(textnode)
for key in hbas.iterkeys():
a = dom.createElement("Adapter")
e.appendChild(a)
entry = dom.createElement('host')
a.appendChild(entry)
textnode = dom.createTextNode(key)
entry.appendChild(textnode)
entry = dom.createElement('name')
a.appendChild(entry)
textnode = dom.createTextNode(hbas[key])
entry.appendChild(textnode)
entry = dom.createElement('manufacturer')
a.appendChild(entry)
textnode = dom.createTextNode(getManufacturer(hbas[key]))
entry.appendChild(textnode)
id = key.replace("host","")
entry = dom.createElement('id')
a.appendChild(entry)
textnode = dom.createTextNode(id)
entry.appendChild(textnode)
_add_host_parameters_to_adapter(dom, a, 'fc_host', id,
['node_name', 'port_name',
'port_state', 'speed',
'supported_speeds'])
_add_host_parameters_to_adapter(dom, a, 'iscsi_host', id,
['hwaddress', 'initiatorname',
'ipaddress', 'port_speed',
'port_state'])
return dom.toprettyxml()
def check_iscsi(adapter):
ret = False
str = "host%s" % adapter
try:
filename = os.path.join('/sys/class/scsi_host',str,'proc_name')
f = open(filename, 'r')
if f.readline().find("iscsi_tcp") != -1:
ret = True
except:
pass
return ret
def match_nonpartitions(s):
regex = re.compile("-part[0-9]")
if not regex.search(s, 0):
return True
| robertbreker/sm | drivers/devscan.py | Python | lgpl-2.1 | 14,406 |
'''OpenGL extension ARB.robustness_isolation
This module customises the behaviour of the
OpenGL.raw.GL.ARB.robustness_isolation to provide a more
Python-friendly API
Overview (from the spec)
GL_ARB_robustness and supporting window system extensions allow
creating an OpenGL context supporting graphics reset notification
behavior. GL_ARB_robustness_isolation provides stronger
guarantees about the possible side-effects of a graphics reset.
It is expected that there may be a performance cost associated
with isolating an application or share group from other contexts
on the GPU. For this reason, GL_ARB_robustness_isolation is
phrased as an opt-in mechanism, with a new context creation bit
defined in the window system bindings. It is expected that
implementations might only advertise the strings in this extension
if both the implementation supports the desired isolation
properties, and the context was created with the appropriate reset
isolation bit.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ARB/robustness_isolation.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.robustness_isolation import *
from OpenGL.raw.GL.ARB.robustness_isolation import _EXTENSION_NAME
def glInitRobustnessIsolationARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION | stack-of-tasks/rbdlpy | tutorial/lib/python2.7/site-packages/OpenGL/GL/ARB/robustness_isolation.py | Python | lgpl-3.0 | 1,604 |
"""
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import subprocess
import socket
from time import sleep
import tinctest
from tinctest.lib import local_path
from gppylib.commands.base import Command
from mpp.lib.config import GPDBConfig
from mpp.lib.PSQL import PSQL
from mpp.gpdb.tests.storage.walrepl.lib.pg_util import GpUtility
from mpp.gpdb.tests.storage.walrepl.gpinitstandby import GpinitStandby
from mpp.gpdb.tests.storage.walrepl.lib.verify import StandbyVerify
from mpp.gpdb.tests.storage.walrepl.lib.standby import Standby
config = GPDBConfig()
class OOMClass(object):
'''Class for methods required for OOM testcase'''
standby_port = '5433'
standby_dirname = 'newstandby'
def __init__(self):
self.gpinit = GpinitStandby()
self.mdd = os.environ.get('MASTER_DATA_DIRECTORY')
self.config = GPDBConfig()
self.pgutil = GpUtility()
self.verify = StandbyVerify()
self.host = socket.gethostname()
self.standby_loc = os.path.join(os.path.split(self.mdd)[0],
self.standby_dirname)
self.standby = Standby(self.standby_loc, self.standby_port)
def create_standby(self):
self.pgutil.clean_dir(self.host,self.standby_loc)
self.gpinit.run(option = '-P %s -s %s -F pg_system:%s' % (self.standby_port, self.host, self.standby_loc))
def setup_oom(self):
# Build it before testing.
thisdir = os.path.dirname(__file__)
builddir = os.path.join(thisdir, 'lib')
subprocess.check_call(['make', '-C', builddir, 'install'])
#Copy oom_malloc.so and wrapper.sh to all the segment nodes
for host in config.get_hosts() :
if host.strip() == self.host :
continue
cmd = "gpssh -h %s -e 'mkdir -p %s'; scp %s/oom_malloc.so %s:%s/; scp %s/wrapper.sh %s:%s/" % (host.strip(), builddir, builddir, host.strip(), builddir, builddir, host.strip(), builddir)
self.pgutil.run(cmd)
def touch_malloc(self):
# Touch file oom_malloc in standby directory
cmd = 'touch %s/oom_malloc' % self.standby_loc
self.pgutil.run(cmd)
def startdb(self):
(rc, result) = self.pgutil.run('gpstart -a --wrapper %s' % (local_path('lib/wrapper.sh')))
if rc != 0 and 'Could not start standby master' in result :
return False
return True
def restartdb(self):
# Remove file oom_malloc from standby
cmd = 'rm %s/oom_malloc' % self.standby_loc
self.pgutil.run(cmd)
(rc, result) = self.pgutil.run('gpstop -ar')
if rc == 0 and (self.verify.check_pg_stat_replication()):
return True
return False
def psql_and_oom(self):
#Touch oom_malloc in standby_dir and issue PSQL : Check if processes are gone
self.touch_malloc()
PSQL.run_sql_command('Drop table if exists wal_oomt1;Create table wal_oomt1(a1 int, a2 text) with(appendonly=true);')
sleep(2)
if not (self.verify.check_standby_processes()):
return True
return False
def start_standby(self):
# Remove oom_malloc and start standby : Check if all processes are back
cmd = 'rm %s/oom_malloc' % self.standby_loc
self.pgutil.run(cmd)
res = self.standby.start()
sleep(2)
if (self.verify.check_standby_processes()) :
return True
return False
| edespino/gpdb | src/test/tinc/tincrepo/mpp/gpdb/tests/storage/walrepl/oom/__init__.py | Python | apache-2.0 | 4,065 |
# Copyright 2013 Kylin OS, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
class LiveMigrateForm(forms.SelfHandlingForm):
current_host = forms.CharField(label=_("Current Host"),
required=False,
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
host = forms.ChoiceField(label=_("New Host"),
help_text=_("Choose a Host to migrate to."))
disk_over_commit = forms.BooleanField(label=_("Disk Over Commit"),
initial=False, required=False)
block_migration = forms.BooleanField(label=_("Block Migration"),
initial=False, required=False)
def __init__(self, request, *args, **kwargs):
super(LiveMigrateForm, self).__init__(request, *args, **kwargs)
initial = kwargs.get('initial', {})
instance_id = initial.get('instance_id')
self.fields['instance_id'] = forms.CharField(widget=forms.HiddenInput,
initial=instance_id)
self.fields['host'].choices = self.populate_host_choices(request,
initial)
def populate_host_choices(self, request, initial):
hosts = initial.get('hosts')
current_host = initial.get('current_host')
host_list = [(host.host_name,
host.host_name)
for host in hosts
if host.service.startswith('compute') and
host.host_name != current_host]
if host_list:
host_list.insert(0, ("", _("Select a new host")))
else:
host_list.insert(0, ("", _("No other hosts available.")))
return sorted(host_list)
def handle(self, request, data):
try:
block_migration = data['block_migration']
disk_over_commit = data['disk_over_commit']
api.nova.server_live_migrate(request,
data['instance_id'],
data['host'],
block_migration=block_migration,
disk_over_commit=disk_over_commit)
msg = _('The instance is preparing the live migration '
'to host "%s".') % data['host']
messages.success(request, msg)
return True
except Exception:
msg = _('Failed to live migrate instance to '
'host "%s".') % data['host']
redirect = reverse('horizon:admin:instances:index')
exceptions.handle(request, msg, redirect=redirect)
| zouyapeng/horizon-newtouch | openstack_dashboard/dashboards/admin/instances/forms.py | Python | apache-2.0 | 3,554 |
# Copyright 2011 Grid Dynamics
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from webob import exc
from nova.api.openstack.api_version_request \
import MAX_PROXY_API_SUPPORT_VERSION
from nova.api.openstack import common
from nova.api.openstack.compute.schemas import networks as schema
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova import exception
from nova.i18n import _
from nova import network
from nova.objects import base as base_obj
from nova.objects import fields as obj_fields
from nova.policies import networks as net_policies
ALIAS = 'os-networks'
def network_dict(context, network):
fields = ('id', 'cidr', 'netmask', 'gateway', 'broadcast', 'dns1', 'dns2',
'cidr_v6', 'gateway_v6', 'label', 'netmask_v6')
admin_fields = ('created_at', 'updated_at', 'deleted_at', 'deleted',
'injected', 'bridge', 'vlan', 'vpn_public_address',
'vpn_public_port', 'vpn_private_address', 'dhcp_start',
'project_id', 'host', 'bridge_interface', 'multi_host',
'priority', 'rxtx_base', 'mtu', 'dhcp_server',
'enable_dhcp', 'share_address')
if network:
# NOTE(mnaser): We display a limited set of fields so users can know
# what networks are available, extra system-only fields
# are only visible if they are an admin.
if context.is_admin:
fields += admin_fields
# TODO(mriedem): Remove the NovaObject type check once the
# network.create API is returning objects.
is_obj = isinstance(network, base_obj.NovaObject)
result = {}
for field in fields:
# NOTE(mriedem): If network is an object, IPAddress fields need to
# be cast to a string so they look the same in the response as
# before the objects conversion.
if is_obj and isinstance(network.fields[field].AUTO_TYPE,
obj_fields.IPAddress):
# NOTE(danms): Here, network should be an object, which could
# have come from neutron and thus be missing most of the
# attributes. Providing a default to get() avoids trying to
# lazy-load missing attributes.
val = network.get(field, None)
if val is not None:
result[field] = str(val)
else:
result[field] = val
else:
# It's either not an object or it's not an IPAddress field.
result[field] = network.get(field, None)
uuid = network.get('uuid')
if uuid:
result['id'] = uuid
return result
else:
return {}
class NetworkController(wsgi.Controller):
def __init__(self, network_api=None):
self.network_api = network_api or network.API()
@wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION)
@extensions.expected_errors(())
def index(self, req):
context = req.environ['nova.context']
context.can(net_policies.POLICY_ROOT % 'view')
networks = self.network_api.get_all(context)
result = [network_dict(context, net_ref) for net_ref in networks]
return {'networks': result}
@wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION)
@wsgi.response(202)
@extensions.expected_errors((404, 501))
@wsgi.action("disassociate")
def _disassociate_host_and_project(self, req, id, body):
context = req.environ['nova.context']
context.can(net_policies.BASE_POLICY_NAME)
try:
self.network_api.associate(context, id, host=None, project=None)
except exception.NetworkNotFound:
msg = _("Network not found")
raise exc.HTTPNotFound(explanation=msg)
except NotImplementedError:
common.raise_feature_not_supported()
@wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION)
@extensions.expected_errors(404)
def show(self, req, id):
context = req.environ['nova.context']
context.can(net_policies.POLICY_ROOT % 'view')
try:
network = self.network_api.get(context, id)
except exception.NetworkNotFound:
msg = _("Network not found")
raise exc.HTTPNotFound(explanation=msg)
return {'network': network_dict(context, network)}
@wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION)
@wsgi.response(202)
@extensions.expected_errors((404, 409))
def delete(self, req, id):
context = req.environ['nova.context']
context.can(net_policies.BASE_POLICY_NAME)
try:
self.network_api.delete(context, id)
except exception.NetworkInUse as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.NetworkNotFound:
msg = _("Network not found")
raise exc.HTTPNotFound(explanation=msg)
@wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION)
@extensions.expected_errors((400, 409, 501))
@validation.schema(schema.create)
def create(self, req, body):
context = req.environ['nova.context']
context.can(net_policies.BASE_POLICY_NAME)
params = body["network"]
cidr = params.get("cidr") or params.get("cidr_v6")
params["num_networks"] = 1
params["network_size"] = netaddr.IPNetwork(cidr).size
try:
network = self.network_api.create(context, **params)[0]
except (exception.InvalidCidr,
exception.InvalidIntValue,
exception.InvalidAddress,
exception.NetworkNotCreated) as ex:
raise exc.HTTPBadRequest(explanation=ex.format_message)
except (exception.CidrConflict,
exception.DuplicateVlan) as ex:
raise exc.HTTPConflict(explanation=ex.format_message())
return {"network": network_dict(context, network)}
@wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION)
@wsgi.response(202)
@extensions.expected_errors((400, 501))
@validation.schema(schema.add_network_to_project)
def add(self, req, body):
context = req.environ['nova.context']
context.can(net_policies.BASE_POLICY_NAME)
network_id = body['id']
project_id = context.project_id
try:
self.network_api.add_network_to_project(
context, project_id, network_id)
except NotImplementedError:
common.raise_feature_not_supported()
except (exception.NoMoreNetworks,
exception.NetworkNotFoundForUUID) as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
class Networks(extensions.V21APIExtensionBase):
"""Admin-only Network Management Extension."""
name = "Networks"
alias = ALIAS
version = 1
def get_resources(self):
member_actions = {'action': 'POST'}
collection_actions = {'add': 'POST'}
res = extensions.ResourceExtension(
ALIAS, NetworkController(),
member_actions=member_actions,
collection_actions=collection_actions)
return [res]
def get_controller_extensions(self):
return []
| hanlind/nova | nova/api/openstack/compute/networks.py | Python | apache-2.0 | 8,024 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for utilities working with arbitrarily nested structures."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import time
from absl.testing import parameterized
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.util import nest
from tensorflow.python.util.compat import collections_abc
try:
import attr # pylint:disable=g-import-not-at-top
except ImportError:
attr = None
class _CustomMapping(collections_abc.Mapping):
def __init__(self, *args, **kwargs):
self._wrapped = dict(*args, **kwargs)
def __getitem__(self, key):
return self._wrapped[key]
def __iter__(self):
return iter(self._wrapped)
def __len__(self):
return len(self._wrapped)
class _CustomSequenceThatRaisesException(collections.Sequence):
def __len__(self):
return 1
def __getitem__(self, item):
raise ValueError("Cannot get item: %s" % item)
class NestTest(parameterized.TestCase, test.TestCase):
PointXY = collections.namedtuple("Point", ["x", "y"]) # pylint: disable=invalid-name
unsafe_map_pattern = ("nest cannot guarantee that it is safe to map one to "
"the other.")
bad_pack_pattern = ("Attempted to pack value:\n .+\ninto a sequence, but "
"found incompatible type `<(type|class) 'str'>` instead.")
if attr:
class BadAttr(object):
"""Class that has a non-iterable __attrs_attrs__."""
__attrs_attrs__ = None
@attr.s
class SampleAttr(object):
field1 = attr.ib()
field2 = attr.ib()
@attr.s
class UnsortedSampleAttr(object):
field3 = attr.ib()
field1 = attr.ib()
field2 = attr.ib()
@test_util.assert_no_new_pyobjects_executing_eagerly
def testAttrsFlattenAndPack(self):
if attr is None:
self.skipTest("attr module is unavailable.")
field_values = [1, 2]
sample_attr = NestTest.SampleAttr(*field_values)
self.assertFalse(nest._is_attrs(field_values))
self.assertTrue(nest._is_attrs(sample_attr))
flat = nest.flatten(sample_attr)
self.assertEqual(field_values, flat)
restructured_from_flat = nest.pack_sequence_as(sample_attr, flat)
self.assertIsInstance(restructured_from_flat, NestTest.SampleAttr)
self.assertEqual(restructured_from_flat, sample_attr)
# Check that flatten fails if attributes are not iterable
with self.assertRaisesRegexp(TypeError, "object is not iterable"):
flat = nest.flatten(NestTest.BadAttr())
@parameterized.parameters(
{"values": [1, 2, 3]},
{"values": [{"B": 10, "A": 20}, [1, 2], 3]},
{"values": [(1, 2), [3, 4], 5]},
{"values": [PointXY(1, 2), 3, 4]},
)
@test_util.assert_no_new_pyobjects_executing_eagerly
def testAttrsMapStructure(self, values):
if attr is None:
self.skipTest("attr module is unavailable.")
structure = NestTest.UnsortedSampleAttr(*values)
new_structure = nest.map_structure(lambda x: x, structure)
self.assertEqual(structure, new_structure)
@test_util.assert_no_new_pyobjects_executing_eagerly
def testFlattenAndPack(self):
structure = ((3, 4), 5, (6, 7, (9, 10), 8))
flat = ["a", "b", "c", "d", "e", "f", "g", "h"]
self.assertEqual(nest.flatten(structure), [3, 4, 5, 6, 7, 9, 10, 8])
self.assertEqual(
nest.pack_sequence_as(structure, flat), (("a", "b"), "c",
("d", "e", ("f", "g"), "h")))
structure = (NestTest.PointXY(x=4, y=2),
((NestTest.PointXY(x=1, y=0),),))
flat = [4, 2, 1, 0]
self.assertEqual(nest.flatten(structure), flat)
restructured_from_flat = nest.pack_sequence_as(structure, flat)
self.assertEqual(restructured_from_flat, structure)
self.assertEqual(restructured_from_flat[0].x, 4)
self.assertEqual(restructured_from_flat[0].y, 2)
self.assertEqual(restructured_from_flat[1][0][0].x, 1)
self.assertEqual(restructured_from_flat[1][0][0].y, 0)
self.assertEqual([5], nest.flatten(5))
self.assertEqual([np.array([5])], nest.flatten(np.array([5])))
self.assertEqual("a", nest.pack_sequence_as(5, ["a"]))
self.assertEqual(
np.array([5]), nest.pack_sequence_as("scalar", [np.array([5])]))
with self.assertRaisesRegexp(
ValueError, self.unsafe_map_pattern):
nest.pack_sequence_as("scalar", [4, 5])
with self.assertRaisesRegexp(TypeError, self.bad_pack_pattern):
nest.pack_sequence_as([4, 5], "bad_sequence")
with self.assertRaises(ValueError):
nest.pack_sequence_as([5, 6, [7, 8]], ["a", "b", "c"])
@parameterized.parameters({"mapping_type": collections.OrderedDict},
{"mapping_type": _CustomMapping})
@test_util.assert_no_new_pyobjects_executing_eagerly
def testFlattenDictOrder(self, mapping_type):
"""`flatten` orders dicts by key, including OrderedDicts."""
ordered = mapping_type([("d", 3), ("b", 1), ("a", 0), ("c", 2)])
plain = {"d": 3, "b": 1, "a": 0, "c": 2}
ordered_flat = nest.flatten(ordered)
plain_flat = nest.flatten(plain)
self.assertEqual([0, 1, 2, 3], ordered_flat)
self.assertEqual([0, 1, 2, 3], plain_flat)
@parameterized.parameters({"mapping_type": collections.OrderedDict},
{"mapping_type": _CustomMapping})
def testPackDictOrder(self, mapping_type):
"""Packing orders dicts by key, including OrderedDicts."""
custom = mapping_type([("d", 0), ("b", 0), ("a", 0), ("c", 0)])
plain = {"d": 0, "b": 0, "a": 0, "c": 0}
seq = [0, 1, 2, 3]
custom_reconstruction = nest.pack_sequence_as(custom, seq)
plain_reconstruction = nest.pack_sequence_as(plain, seq)
self.assertIsInstance(custom_reconstruction, mapping_type)
self.assertIsInstance(plain_reconstruction, dict)
self.assertEqual(
mapping_type([("d", 3), ("b", 1), ("a", 0), ("c", 2)]),
custom_reconstruction)
self.assertEqual({"d": 3, "b": 1, "a": 0, "c": 2}, plain_reconstruction)
@test_util.assert_no_new_pyobjects_executing_eagerly
def testFlattenAndPackMappingViews(self):
"""`flatten` orders dicts by key, including OrderedDicts."""
ordered = collections.OrderedDict([("d", 3), ("b", 1), ("a", 0), ("c", 2)])
# test flattening
ordered_keys_flat = nest.flatten(ordered.keys())
ordered_values_flat = nest.flatten(ordered.values())
ordered_items_flat = nest.flatten(ordered.items())
self.assertEqual([3, 1, 0, 2], ordered_values_flat)
self.assertEqual(["d", "b", "a", "c"], ordered_keys_flat)
self.assertEqual(["d", 3, "b", 1, "a", 0, "c", 2], ordered_items_flat)
# test packing
self.assertEqual([("d", 3), ("b", 1), ("a", 0), ("c", 2)],
nest.pack_sequence_as(ordered.items(), ordered_items_flat))
Abc = collections.namedtuple("A", ("b", "c")) # pylint: disable=invalid-name
@test_util.assert_no_new_pyobjects_executing_eagerly
def testFlattenAndPack_withDicts(self):
# A nice messy mix of tuples, lists, dicts, and `OrderedDict`s.
mess = [
"z",
NestTest.Abc(3, 4), {
"d": _CustomMapping({
41: 4
}),
"c": [
1,
collections.OrderedDict([
("b", 3),
("a", 2),
]),
],
"b": 5
}, 17
]
flattened = nest.flatten(mess)
self.assertEqual(flattened, ["z", 3, 4, 5, 1, 2, 3, 4, 17])
structure_of_mess = [
14,
NestTest.Abc("a", True),
{
"d": _CustomMapping({
41: 42
}),
"c": [
0,
collections.OrderedDict([
("b", 9),
("a", 8),
]),
],
"b": 3
},
"hi everybody",
]
unflattened = nest.pack_sequence_as(structure_of_mess, flattened)
self.assertEqual(unflattened, mess)
# Check also that the OrderedDict was created, with the correct key order.
unflattened_ordered_dict = unflattened[2]["c"][1]
self.assertIsInstance(unflattened_ordered_dict, collections.OrderedDict)
self.assertEqual(list(unflattened_ordered_dict.keys()), ["b", "a"])
unflattened_custom_mapping = unflattened[2]["d"]
self.assertIsInstance(unflattened_custom_mapping, _CustomMapping)
self.assertEqual(list(unflattened_custom_mapping.keys()), [41])
def testFlatten_numpyIsNotFlattened(self):
structure = np.array([1, 2, 3])
flattened = nest.flatten(structure)
self.assertLen(flattened, 1)
def testFlatten_stringIsNotFlattened(self):
structure = "lots of letters"
flattened = nest.flatten(structure)
self.assertLen(flattened, 1)
unflattened = nest.pack_sequence_as("goodbye", flattened)
self.assertEqual(structure, unflattened)
def testPackSequenceAs_notIterableError(self):
with self.assertRaisesRegexp(
TypeError, self.bad_pack_pattern):
nest.pack_sequence_as("hi", "bye")
def testPackSequenceAs_wrongLengthsError(self):
with self.assertRaisesRegexp(
ValueError,
"Structure had 2 elements, but flat_sequence had 3 elements."):
nest.pack_sequence_as(["hello", "world"],
["and", "goodbye", "again"])
@test_util.assert_no_new_pyobjects_executing_eagerly
def testIsNested(self):
self.assertFalse(nest.is_nested("1234"))
self.assertTrue(nest.is_nested([1, 3, [4, 5]]))
self.assertTrue(nest.is_nested(((7, 8), (5, 6))))
self.assertTrue(nest.is_nested([]))
self.assertTrue(nest.is_nested({"a": 1, "b": 2}))
self.assertTrue(nest.is_nested({"a": 1, "b": 2}.keys()))
self.assertTrue(nest.is_nested({"a": 1, "b": 2}.values()))
self.assertTrue(nest.is_nested({"a": 1, "b": 2}.items()))
self.assertFalse(nest.is_nested(set([1, 2])))
ones = array_ops.ones([2, 3])
self.assertFalse(nest.is_nested(ones))
self.assertFalse(nest.is_nested(math_ops.tanh(ones)))
self.assertFalse(nest.is_nested(np.ones((4, 5))))
@parameterized.parameters({"mapping_type": _CustomMapping},
{"mapping_type": dict})
def testFlattenDictItems(self, mapping_type):
dictionary = mapping_type({(4, 5, (6, 8)): ("a", "b", ("c", "d"))})
flat = {4: "a", 5: "b", 6: "c", 8: "d"}
self.assertEqual(nest.flatten_dict_items(dictionary), flat)
with self.assertRaises(TypeError):
nest.flatten_dict_items(4)
bad_dictionary = mapping_type({(4, 5, (4, 8)): ("a", "b", ("c", "d"))})
with self.assertRaisesRegexp(ValueError, "not unique"):
nest.flatten_dict_items(bad_dictionary)
another_bad_dictionary = mapping_type({
(4, 5, (6, 8)): ("a", "b", ("c", ("d", "e")))
})
with self.assertRaisesRegexp(
ValueError, "Key had [0-9]* elements, but value had [0-9]* elements"):
nest.flatten_dict_items(another_bad_dictionary)
# pylint does not correctly recognize these as class names and
# suggests to use variable style under_score naming.
# pylint: disable=invalid-name
Named0ab = collections.namedtuple("named_0", ("a", "b"))
Named1ab = collections.namedtuple("named_1", ("a", "b"))
SameNameab = collections.namedtuple("same_name", ("a", "b"))
SameNameab2 = collections.namedtuple("same_name", ("a", "b"))
SameNamexy = collections.namedtuple("same_name", ("x", "y"))
SameName1xy = collections.namedtuple("same_name_1", ("x", "y"))
SameName1xy2 = collections.namedtuple("same_name_1", ("x", "y"))
NotSameName = collections.namedtuple("not_same_name", ("a", "b"))
# pylint: enable=invalid-name
class SameNamedType1(SameNameab):
pass
@test_util.assert_no_new_pyobjects_executing_eagerly
def testAssertSameStructure(self):
structure1 = (((1, 2), 3), 4, (5, 6))
structure2 = ((("foo1", "foo2"), "foo3"), "foo4", ("foo5", "foo6"))
structure_different_num_elements = ("spam", "eggs")
structure_different_nesting = (((1, 2), 3), 4, 5, (6,))
nest.assert_same_structure(structure1, structure2)
nest.assert_same_structure("abc", 1.0)
nest.assert_same_structure("abc", np.array([0, 1]))
nest.assert_same_structure("abc", constant_op.constant([0, 1]))
with self.assertRaisesRegexp(
ValueError,
("The two structures don't have the same nested structure\\.\n\n"
"First structure:.*?\n\n"
"Second structure:.*\n\n"
"More specifically: Substructure "
r'"type=tuple str=\(\(1, 2\), 3\)" is a sequence, while '
'substructure "type=str str=spam" is not\n'
"Entire first structure:\n"
r"\(\(\(\., \.\), \.\), \., \(\., \.\)\)\n"
"Entire second structure:\n"
r"\(\., \.\)")):
nest.assert_same_structure(structure1, structure_different_num_elements)
with self.assertRaisesRegexp(
ValueError,
("The two structures don't have the same nested structure\\.\n\n"
"First structure:.*?\n\n"
"Second structure:.*\n\n"
r'More specifically: Substructure "type=list str=\[0, 1\]" '
r'is a sequence, while substructure "type=ndarray str=\[0 1\]" '
"is not")):
nest.assert_same_structure([0, 1], np.array([0, 1]))
with self.assertRaisesRegexp(
ValueError,
("The two structures don't have the same nested structure\\.\n\n"
"First structure:.*?\n\n"
"Second structure:.*\n\n"
r'More specifically: Substructure "type=list str=\[0, 1\]" '
'is a sequence, while substructure "type=int str=0" '
"is not")):
nest.assert_same_structure(0, [0, 1])
self.assertRaises(TypeError, nest.assert_same_structure, (0, 1), [0, 1])
with self.assertRaisesRegexp(
ValueError,
("don't have the same nested structure\\.\n\n"
"First structure: .*?\n\nSecond structure: ")):
nest.assert_same_structure(structure1, structure_different_nesting)
self.assertRaises(TypeError, nest.assert_same_structure, (0, 1),
NestTest.Named0ab("a", "b"))
nest.assert_same_structure(NestTest.Named0ab(3, 4),
NestTest.Named0ab("a", "b"))
self.assertRaises(TypeError, nest.assert_same_structure,
NestTest.Named0ab(3, 4), NestTest.Named1ab(3, 4))
with self.assertRaisesRegexp(
ValueError,
("don't have the same nested structure\\.\n\n"
"First structure: .*?\n\nSecond structure: ")):
nest.assert_same_structure(NestTest.Named0ab(3, 4),
NestTest.Named0ab([3], 4))
with self.assertRaisesRegexp(
ValueError,
("don't have the same nested structure\\.\n\n"
"First structure: .*?\n\nSecond structure: ")):
nest.assert_same_structure([[3], 4], [3, [4]])
structure1_list = [[[1, 2], 3], 4, [5, 6]]
with self.assertRaisesRegexp(TypeError,
"don't have the same sequence type"):
nest.assert_same_structure(structure1, structure1_list)
nest.assert_same_structure(structure1, structure2, check_types=False)
nest.assert_same_structure(structure1, structure1_list, check_types=False)
with self.assertRaisesRegexp(ValueError,
"don't have the same set of keys"):
nest.assert_same_structure({"a": 1}, {"b": 1})
nest.assert_same_structure(NestTest.SameNameab(0, 1),
NestTest.SameNameab2(2, 3))
# This assertion is expected to pass: two namedtuples with the same
# name and field names are considered to be identical.
nest.assert_same_structure(
NestTest.SameNameab(NestTest.SameName1xy(0, 1), 2),
NestTest.SameNameab2(NestTest.SameName1xy2(2, 3), 4))
expected_message = "The two structures don't have the same.*"
with self.assertRaisesRegexp(ValueError, expected_message):
nest.assert_same_structure(
NestTest.SameNameab(0, NestTest.SameNameab2(1, 2)),
NestTest.SameNameab2(NestTest.SameNameab(0, 1), 2))
self.assertRaises(TypeError, nest.assert_same_structure,
NestTest.SameNameab(0, 1), NestTest.NotSameName(2, 3))
self.assertRaises(TypeError, nest.assert_same_structure,
NestTest.SameNameab(0, 1), NestTest.SameNamexy(2, 3))
self.assertRaises(TypeError, nest.assert_same_structure,
NestTest.SameNameab(0, 1), NestTest.SameNamedType1(2, 3))
EmptyNT = collections.namedtuple("empty_nt", "") # pylint: disable=invalid-name
def testHeterogeneousComparison(self):
nest.assert_same_structure({"a": 4}, _CustomMapping(a=3))
nest.assert_same_structure(_CustomMapping(b=3), {"b": 4})
@test_util.assert_no_new_pyobjects_executing_eagerly
def testMapStructure(self):
structure1 = (((1, 2), 3), 4, (5, 6))
structure2 = (((7, 8), 9), 10, (11, 12))
structure1_plus1 = nest.map_structure(lambda x: x + 1, structure1)
nest.assert_same_structure(structure1, structure1_plus1)
self.assertAllEqual(
[2, 3, 4, 5, 6, 7],
nest.flatten(structure1_plus1))
structure1_plus_structure2 = nest.map_structure(
lambda x, y: x + y, structure1, structure2)
self.assertEqual(
(((1 + 7, 2 + 8), 3 + 9), 4 + 10, (5 + 11, 6 + 12)),
structure1_plus_structure2)
self.assertEqual(3, nest.map_structure(lambda x: x - 1, 4))
self.assertEqual(7, nest.map_structure(lambda x, y: x + y, 3, 4))
structure3 = collections.defaultdict(list)
structure3["a"] = [1, 2, 3, 4]
structure3["b"] = [2, 3, 4, 5]
expected_structure3 = collections.defaultdict(list)
expected_structure3["a"] = [2, 3, 4, 5]
expected_structure3["b"] = [3, 4, 5, 6]
self.assertEqual(expected_structure3,
nest.map_structure(lambda x: x + 1, structure3))
# Empty structures
self.assertEqual((), nest.map_structure(lambda x: x + 1, ()))
self.assertEqual([], nest.map_structure(lambda x: x + 1, []))
self.assertEqual({}, nest.map_structure(lambda x: x + 1, {}))
self.assertEqual(NestTest.EmptyNT(), nest.map_structure(lambda x: x + 1,
NestTest.EmptyNT()))
# This is checking actual equality of types, empty list != empty tuple
self.assertNotEqual((), nest.map_structure(lambda x: x + 1, []))
with self.assertRaisesRegexp(TypeError, "callable"):
nest.map_structure("bad", structure1_plus1)
with self.assertRaisesRegexp(ValueError, "at least one structure"):
nest.map_structure(lambda x: x)
with self.assertRaisesRegexp(ValueError, "same number of elements"):
nest.map_structure(lambda x, y: None, (3, 4), (3, 4, 5))
with self.assertRaisesRegexp(ValueError, "same nested structure"):
nest.map_structure(lambda x, y: None, 3, (3,))
with self.assertRaisesRegexp(TypeError, "same sequence type"):
nest.map_structure(lambda x, y: None, ((3, 4), 5), [(3, 4), 5])
with self.assertRaisesRegexp(ValueError, "same nested structure"):
nest.map_structure(lambda x, y: None, ((3, 4), 5), (3, (4, 5)))
structure1_list = [[[1, 2], 3], 4, [5, 6]]
with self.assertRaisesRegexp(TypeError, "same sequence type"):
nest.map_structure(lambda x, y: None, structure1, structure1_list)
nest.map_structure(lambda x, y: None, structure1, structure1_list,
check_types=False)
with self.assertRaisesRegexp(ValueError, "same nested structure"):
nest.map_structure(lambda x, y: None, ((3, 4), 5), (3, (4, 5)),
check_types=False)
with self.assertRaisesRegexp(ValueError,
"Only valid keyword argument.*foo"):
nest.map_structure(lambda x: None, structure1, foo="a")
with self.assertRaisesRegexp(ValueError,
"Only valid keyword argument.*foo"):
nest.map_structure(lambda x: None, structure1, check_types=False, foo="a")
ABTuple = collections.namedtuple("ab_tuple", "a, b") # pylint: disable=invalid-name
@test_util.assert_no_new_pyobjects_executing_eagerly
def testMapStructureWithStrings(self):
inp_a = NestTest.ABTuple(a="foo", b=("bar", "baz"))
inp_b = NestTest.ABTuple(a=2, b=(1, 3))
out = nest.map_structure(lambda string, repeats: string * repeats,
inp_a,
inp_b)
self.assertEqual("foofoo", out.a)
self.assertEqual("bar", out.b[0])
self.assertEqual("bazbazbaz", out.b[1])
nt = NestTest.ABTuple(a=("something", "something_else"),
b="yet another thing")
rev_nt = nest.map_structure(lambda x: x[::-1], nt)
# Check the output is the correct structure, and all strings are reversed.
nest.assert_same_structure(nt, rev_nt)
self.assertEqual(nt.a[0][::-1], rev_nt.a[0])
self.assertEqual(nt.a[1][::-1], rev_nt.a[1])
self.assertEqual(nt.b[::-1], rev_nt.b)
@test_util.run_deprecated_v1
def testMapStructureOverPlaceholders(self):
inp_a = (array_ops.placeholder(dtypes.float32, shape=[3, 4]),
array_ops.placeholder(dtypes.float32, shape=[3, 7]))
inp_b = (array_ops.placeholder(dtypes.float32, shape=[3, 4]),
array_ops.placeholder(dtypes.float32, shape=[3, 7]))
output = nest.map_structure(lambda x1, x2: x1 + x2, inp_a, inp_b)
nest.assert_same_structure(output, inp_a)
self.assertShapeEqual(np.zeros((3, 4)), output[0])
self.assertShapeEqual(np.zeros((3, 7)), output[1])
feed_dict = {
inp_a: (np.random.randn(3, 4), np.random.randn(3, 7)),
inp_b: (np.random.randn(3, 4), np.random.randn(3, 7))
}
with self.cached_session() as sess:
output_np = sess.run(output, feed_dict=feed_dict)
self.assertAllClose(output_np[0],
feed_dict[inp_a][0] + feed_dict[inp_b][0])
self.assertAllClose(output_np[1],
feed_dict[inp_a][1] + feed_dict[inp_b][1])
def testAssertShallowStructure(self):
inp_ab = ["a", "b"]
inp_abc = ["a", "b", "c"]
with self.assertRaisesWithLiteralMatch( # pylint: disable=g-error-prone-assert-raises
ValueError,
nest._STRUCTURES_HAVE_MISMATCHING_LENGTHS.format(
input_length=len(inp_ab),
shallow_length=len(inp_abc))):
nest.assert_shallow_structure(inp_abc, inp_ab)
inp_ab1 = [(1, 1), (2, 2)]
inp_ab2 = [[1, 1], [2, 2]]
with self.assertRaisesWithLiteralMatch(
TypeError,
nest._STRUCTURES_HAVE_MISMATCHING_TYPES.format(
shallow_type=type(inp_ab2[0]),
input_type=type(inp_ab1[0]))):
nest.assert_shallow_structure(inp_ab2, inp_ab1)
nest.assert_shallow_structure(inp_ab2, inp_ab1, check_types=False)
inp_ab1 = {"a": (1, 1), "b": {"c": (2, 2)}}
inp_ab2 = {"a": (1, 1), "b": {"d": (2, 2)}}
with self.assertRaisesWithLiteralMatch(
ValueError,
nest._SHALLOW_TREE_HAS_INVALID_KEYS.format(["d"])):
nest.assert_shallow_structure(inp_ab2, inp_ab1)
inp_ab = collections.OrderedDict([("a", 1), ("b", (2, 3))])
inp_ba = collections.OrderedDict([("b", (2, 3)), ("a", 1)])
nest.assert_shallow_structure(inp_ab, inp_ba)
# This assertion is expected to pass: two namedtuples with the same
# name and field names are considered to be identical.
inp_shallow = NestTest.SameNameab(1, 2)
inp_deep = NestTest.SameNameab2(1, [1, 2, 3])
nest.assert_shallow_structure(inp_shallow, inp_deep, check_types=False)
nest.assert_shallow_structure(inp_shallow, inp_deep, check_types=True)
def testFlattenUpTo(self):
# Shallow tree ends at scalar.
input_tree = [[[2, 2], [3, 3]], [[4, 9], [5, 5]]]
shallow_tree = [[True, True], [False, True]]
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [[2, 2], [3, 3], [4, 9], [5, 5]])
self.assertEqual(flattened_shallow_tree, [True, True, False, True])
# Shallow tree ends at string.
input_tree = [[("a", 1), [("b", 2), [("c", 3), [("d", 4)]]]]]
shallow_tree = [["level_1", ["level_2", ["level_3", ["level_4"]]]]]
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
input_tree_flattened = nest.flatten(input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree,
[("a", 1), ("b", 2), ("c", 3), ("d", 4)])
self.assertEqual(input_tree_flattened, ["a", 1, "b", 2, "c", 3, "d", 4])
# Make sure dicts are correctly flattened, yielding values, not keys.
input_tree = {"a": 1, "b": {"c": 2}, "d": [3, (4, 5)]}
shallow_tree = {"a": 0, "b": 0, "d": [0, 0]}
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree,
[1, {"c": 2}, 3, (4, 5)])
# Namedtuples.
ab_tuple = NestTest.ABTuple
input_tree = ab_tuple(a=[0, 1], b=2)
shallow_tree = ab_tuple(a=0, b=1)
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree,
[[0, 1], 2])
# Nested dicts, OrderedDicts and namedtuples.
input_tree = collections.OrderedDict(
[("a", ab_tuple(a=[0, {"b": 1}], b=2)),
("c", {"d": 3, "e": collections.OrderedDict([("f", 4)])})])
shallow_tree = input_tree
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree, [0, 1, 2, 3, 4])
shallow_tree = collections.OrderedDict([("a", 0), ("c", {"d": 3, "e": 1})])
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree,
[ab_tuple(a=[0, {"b": 1}], b=2),
3,
collections.OrderedDict([("f", 4)])])
shallow_tree = collections.OrderedDict([("a", 0), ("c", 0)])
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree,
[ab_tuple(a=[0, {"b": 1}], b=2),
{"d": 3, "e": collections.OrderedDict([("f", 4)])}])
## Shallow non-list edge-case.
# Using iterable elements.
input_tree = ["input_tree"]
shallow_tree = "shallow_tree"
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
input_tree = ["input_tree_0", "input_tree_1"]
shallow_tree = "shallow_tree"
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
# Using non-iterable elements.
input_tree = [0]
shallow_tree = 9
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
input_tree = [0, 1]
shallow_tree = 9
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
## Both non-list edge-case.
# Using iterable elements.
input_tree = "input_tree"
shallow_tree = "shallow_tree"
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
# Using non-iterable elements.
input_tree = 0
shallow_tree = 0
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
## Input non-list edge-case.
# Using iterable elements.
input_tree = "input_tree"
shallow_tree = ["shallow_tree"]
expected_message = ("If shallow structure is a sequence, input must also "
"be a sequence. Input has type: <(type|class) 'str'>.")
with self.assertRaisesRegexp(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, shallow_tree)
input_tree = "input_tree"
shallow_tree = ["shallow_tree_9", "shallow_tree_8"]
with self.assertRaisesRegexp(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, shallow_tree)
# Using non-iterable elements.
input_tree = 0
shallow_tree = [9]
expected_message = ("If shallow structure is a sequence, input must also "
"be a sequence. Input has type: <(type|class) 'int'>.")
with self.assertRaisesRegexp(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, shallow_tree)
input_tree = 0
shallow_tree = [9, 8]
with self.assertRaisesRegexp(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, shallow_tree)
input_tree = [(1,), (2,), 3]
shallow_tree = [(1,), (2,)]
expected_message = nest._STRUCTURES_HAVE_MISMATCHING_LENGTHS.format(
input_length=len(input_tree), shallow_length=len(shallow_tree))
with self.assertRaisesRegexp(ValueError, expected_message): # pylint: disable=g-error-prone-assert-raises
nest.assert_shallow_structure(shallow_tree, input_tree)
def testFlattenWithTuplePathsUpTo(self):
def get_paths_and_values(shallow_tree, input_tree):
path_value_pairs = nest.flatten_with_tuple_paths_up_to(
shallow_tree, input_tree)
paths = [p for p, _ in path_value_pairs]
values = [v for _, v in path_value_pairs]
return paths, values
# Shallow tree ends at scalar.
input_tree = [[[2, 2], [3, 3]], [[4, 9], [5, 5]]]
shallow_tree = [[True, True], [False, True]]
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree_paths,
[(0, 0), (0, 1), (1, 0), (1, 1)])
self.assertEqual(flattened_input_tree, [[2, 2], [3, 3], [4, 9], [5, 5]])
self.assertEqual(flattened_shallow_tree_paths,
[(0, 0), (0, 1), (1, 0), (1, 1)])
self.assertEqual(flattened_shallow_tree, [True, True, False, True])
# Shallow tree ends at string.
input_tree = [[("a", 1), [("b", 2), [("c", 3), [("d", 4)]]]]]
shallow_tree = [["level_1", ["level_2", ["level_3", ["level_4"]]]]]
(input_tree_flattened_as_shallow_tree_paths,
input_tree_flattened_as_shallow_tree) = get_paths_and_values(shallow_tree,
input_tree)
input_tree_flattened_paths = [p for p, _ in
nest.flatten_with_tuple_paths(input_tree)]
input_tree_flattened = nest.flatten(input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree_paths,
[(0, 0), (0, 1, 0), (0, 1, 1, 0), (0, 1, 1, 1, 0)])
self.assertEqual(input_tree_flattened_as_shallow_tree,
[("a", 1), ("b", 2), ("c", 3), ("d", 4)])
self.assertEqual(input_tree_flattened_paths,
[(0, 0, 0), (0, 0, 1),
(0, 1, 0, 0), (0, 1, 0, 1),
(0, 1, 1, 0, 0), (0, 1, 1, 0, 1),
(0, 1, 1, 1, 0, 0), (0, 1, 1, 1, 0, 1)])
self.assertEqual(input_tree_flattened, ["a", 1, "b", 2, "c", 3, "d", 4])
# Make sure dicts are correctly flattened, yielding values, not keys.
input_tree = {"a": 1, "b": {"c": 2}, "d": [3, (4, 5)]}
shallow_tree = {"a": 0, "b": 0, "d": [0, 0]}
(input_tree_flattened_as_shallow_tree_paths,
input_tree_flattened_as_shallow_tree) = get_paths_and_values(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree_paths,
[("a",), ("b",), ("d", 0), ("d", 1)])
self.assertEqual(input_tree_flattened_as_shallow_tree,
[1, {"c": 2}, 3, (4, 5)])
# Namedtuples.
ab_tuple = collections.namedtuple("ab_tuple", "a, b")
input_tree = ab_tuple(a=[0, 1], b=2)
shallow_tree = ab_tuple(a=0, b=1)
(input_tree_flattened_as_shallow_tree_paths,
input_tree_flattened_as_shallow_tree) = get_paths_and_values(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree_paths,
[("a",), ("b",)])
self.assertEqual(input_tree_flattened_as_shallow_tree,
[[0, 1], 2])
# Nested dicts, OrderedDicts and namedtuples.
input_tree = collections.OrderedDict(
[("a", ab_tuple(a=[0, {"b": 1}], b=2)),
("c", {"d": 3, "e": collections.OrderedDict([("f", 4)])})])
shallow_tree = input_tree
(input_tree_flattened_as_shallow_tree_paths,
input_tree_flattened_as_shallow_tree) = get_paths_and_values(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree_paths,
[("a", "a", 0),
("a", "a", 1, "b"),
("a", "b"),
("c", "d"),
("c", "e", "f")])
self.assertEqual(input_tree_flattened_as_shallow_tree, [0, 1, 2, 3, 4])
shallow_tree = collections.OrderedDict([("a", 0), ("c", {"d": 3, "e": 1})])
(input_tree_flattened_as_shallow_tree_paths,
input_tree_flattened_as_shallow_tree) = get_paths_and_values(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree_paths,
[("a",),
("c", "d"),
("c", "e")])
self.assertEqual(input_tree_flattened_as_shallow_tree,
[ab_tuple(a=[0, {"b": 1}], b=2),
3,
collections.OrderedDict([("f", 4)])])
shallow_tree = collections.OrderedDict([("a", 0), ("c", 0)])
(input_tree_flattened_as_shallow_tree_paths,
input_tree_flattened_as_shallow_tree) = get_paths_and_values(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree_paths,
[("a",), ("c",)])
self.assertEqual(input_tree_flattened_as_shallow_tree,
[ab_tuple(a=[0, {"b": 1}], b=2),
{"d": 3, "e": collections.OrderedDict([("f", 4)])}])
## Shallow non-list edge-case.
# Using iterable elements.
input_tree = ["input_tree"]
shallow_tree = "shallow_tree"
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree_paths, [()])
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree_paths, [()])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
input_tree = ["input_tree_0", "input_tree_1"]
shallow_tree = "shallow_tree"
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree_paths, [()])
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree_paths, [()])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
# Test case where len(shallow_tree) < len(input_tree)
input_tree = {"a": "A", "b": "B", "c": "C"}
shallow_tree = {"a": 1, "c": 2}
with self.assertRaisesWithLiteralMatch( # pylint: disable=g-error-prone-assert-raises
ValueError,
nest._STRUCTURES_HAVE_MISMATCHING_LENGTHS.format(
input_length=len(input_tree),
shallow_length=len(shallow_tree))):
get_paths_and_values(shallow_tree, input_tree)
# Using non-iterable elements.
input_tree = [0]
shallow_tree = 9
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree_paths, [()])
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree_paths, [()])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
input_tree = [0, 1]
shallow_tree = 9
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree_paths, [()])
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree_paths, [()])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
## Both non-list edge-case.
# Using iterable elements.
input_tree = "input_tree"
shallow_tree = "shallow_tree"
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree_paths, [()])
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree_paths, [()])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
# Using non-iterable elements.
input_tree = 0
shallow_tree = 0
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree_paths, [()])
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree_paths, [()])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
## Input non-list edge-case.
# Using iterable elements.
input_tree = "input_tree"
shallow_tree = ["shallow_tree"]
with self.assertRaisesWithLiteralMatch(
TypeError,
nest._IF_SHALLOW_IS_SEQ_INPUT_MUST_BE_SEQ.format(type(input_tree))):
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree_paths, [(0,)])
self.assertEqual(flattened_shallow_tree, shallow_tree)
input_tree = "input_tree"
shallow_tree = ["shallow_tree_9", "shallow_tree_8"]
with self.assertRaisesWithLiteralMatch(
TypeError,
nest._IF_SHALLOW_IS_SEQ_INPUT_MUST_BE_SEQ.format(type(input_tree))):
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree_paths, [(0,), (1,)])
self.assertEqual(flattened_shallow_tree, shallow_tree)
# Using non-iterable elements.
input_tree = 0
shallow_tree = [9]
with self.assertRaisesWithLiteralMatch(
TypeError,
nest._IF_SHALLOW_IS_SEQ_INPUT_MUST_BE_SEQ.format(type(input_tree))):
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree_paths, [(0,)])
self.assertEqual(flattened_shallow_tree, shallow_tree)
input_tree = 0
shallow_tree = [9, 8]
with self.assertRaisesWithLiteralMatch(
TypeError,
nest._IF_SHALLOW_IS_SEQ_INPUT_MUST_BE_SEQ.format(type(input_tree))):
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree_paths, [(0,), (1,)])
self.assertEqual(flattened_shallow_tree, shallow_tree)
def testMapStructureUpTo(self):
# Named tuples.
ab_tuple = collections.namedtuple("ab_tuple", "a, b")
op_tuple = collections.namedtuple("op_tuple", "add, mul")
inp_val = ab_tuple(a=2, b=3)
inp_ops = ab_tuple(a=op_tuple(add=1, mul=2), b=op_tuple(add=2, mul=3))
out = nest.map_structure_up_to(
inp_val, lambda val, ops: (val + ops.add) * ops.mul, inp_val, inp_ops)
self.assertEqual(out.a, 6)
self.assertEqual(out.b, 15)
# Lists.
data_list = [[2, 4, 6, 8], [[1, 3, 5, 7, 9], [3, 5, 7]]]
name_list = ["evens", ["odds", "primes"]]
out = nest.map_structure_up_to(
name_list, lambda name, sec: "first_{}_{}".format(len(sec), name),
name_list, data_list)
self.assertEqual(out, ["first_4_evens", ["first_5_odds", "first_3_primes"]])
# Dicts.
inp_val = dict(a=2, b=3)
inp_ops = dict(a=dict(add=1, mul=2), b=dict(add=2, mul=3))
out = nest.map_structure_up_to(
inp_val,
lambda val, ops: (val + ops["add"]) * ops["mul"], inp_val, inp_ops)
self.assertEqual(out["a"], 6)
self.assertEqual(out["b"], 15)
# Non-equal dicts.
inp_val = dict(a=2, b=3)
inp_ops = dict(a=dict(add=1, mul=2), c=dict(add=2, mul=3))
with self.assertRaisesWithLiteralMatch(
ValueError,
nest._SHALLOW_TREE_HAS_INVALID_KEYS.format(["b"])):
nest.map_structure_up_to(
inp_val,
lambda val, ops: (val + ops["add"]) * ops["mul"], inp_val, inp_ops)
# Dict+custom mapping.
inp_val = dict(a=2, b=3)
inp_ops = _CustomMapping(a=dict(add=1, mul=2), b=dict(add=2, mul=3))
out = nest.map_structure_up_to(
inp_val,
lambda val, ops: (val + ops["add"]) * ops["mul"], inp_val, inp_ops)
self.assertEqual(out["a"], 6)
self.assertEqual(out["b"], 15)
# Non-equal dict/mapping.
inp_val = dict(a=2, b=3)
inp_ops = _CustomMapping(a=dict(add=1, mul=2), c=dict(add=2, mul=3))
with self.assertRaisesWithLiteralMatch(
ValueError,
nest._SHALLOW_TREE_HAS_INVALID_KEYS.format(["b"])):
nest.map_structure_up_to(
inp_val,
lambda val, ops: (val + ops["add"]) * ops["mul"], inp_val, inp_ops)
def testGetTraverseShallowStructure(self):
scalar_traverse_input = [3, 4, (1, 2, [0]), [5, 6], {"a": (7,)}, []]
scalar_traverse_r = nest.get_traverse_shallow_structure(
lambda s: not isinstance(s, tuple),
scalar_traverse_input)
self.assertEqual(scalar_traverse_r,
[True, True, False, [True, True], {"a": False}, []])
nest.assert_shallow_structure(scalar_traverse_r,
scalar_traverse_input)
structure_traverse_input = [(1, [2]), ([1], 2)]
structure_traverse_r = nest.get_traverse_shallow_structure(
lambda s: (True, False) if isinstance(s, tuple) else True,
structure_traverse_input)
self.assertEqual(structure_traverse_r,
[(True, False), ([True], False)])
nest.assert_shallow_structure(structure_traverse_r,
structure_traverse_input)
with self.assertRaisesRegexp(TypeError, "returned structure"):
nest.get_traverse_shallow_structure(lambda _: [True], 0)
with self.assertRaisesRegexp(TypeError, "returned a non-bool scalar"):
nest.get_traverse_shallow_structure(lambda _: 1, [1])
with self.assertRaisesRegexp(
TypeError, "didn't return a depth=1 structure of bools"):
nest.get_traverse_shallow_structure(lambda _: [1], [1])
def testYieldFlatStringPaths(self):
for inputs_expected in ({"inputs": [], "expected": []},
{"inputs": 3, "expected": [()]},
{"inputs": [3], "expected": [(0,)]},
{"inputs": {"a": 3}, "expected": [("a",)]},
{"inputs": {"a": {"b": 4}},
"expected": [("a", "b")]},
{"inputs": [{"a": 2}], "expected": [(0, "a")]},
{"inputs": [{"a": [2]}], "expected": [(0, "a", 0)]},
{"inputs": [{"a": [(23, 42)]}],
"expected": [(0, "a", 0, 0), (0, "a", 0, 1)]},
{"inputs": [{"a": ([23], 42)}],
"expected": [(0, "a", 0, 0), (0, "a", 1)]},
{"inputs": {"a": {"a": 2}, "c": [[[4]]]},
"expected": [("a", "a"), ("c", 0, 0, 0)]},
{"inputs": {"0": [{"1": 23}]},
"expected": [("0", 0, "1")]}):
inputs = inputs_expected["inputs"]
expected = inputs_expected["expected"]
self.assertEqual(list(nest.yield_flat_paths(inputs)), expected)
# We cannot define namedtuples within @parameterized argument lists.
# pylint: disable=invalid-name
Foo = collections.namedtuple("Foo", ["a", "b"])
Bar = collections.namedtuple("Bar", ["c", "d"])
# pylint: enable=invalid-name
@parameterized.parameters([
dict(inputs=[], expected=[]),
dict(inputs=[23, "42"], expected=[("0", 23), ("1", "42")]),
dict(inputs=[[[[108]]]], expected=[("0/0/0/0", 108)]),
dict(inputs=Foo(a=3, b=Bar(c=23, d=42)),
expected=[("a", 3), ("b/c", 23), ("b/d", 42)]),
dict(inputs=Foo(a=Bar(c=23, d=42), b=Bar(c=0, d="thing")),
expected=[("a/c", 23), ("a/d", 42), ("b/c", 0), ("b/d", "thing")]),
dict(inputs=Bar(c=42, d=43),
expected=[("c", 42), ("d", 43)]),
dict(inputs=Bar(c=[42], d=43),
expected=[("c/0", 42), ("d", 43)]),
])
def testFlattenWithStringPaths(self, inputs, expected):
self.assertEqual(
nest.flatten_with_joined_string_paths(inputs, separator="/"),
expected)
@parameterized.parameters([
dict(inputs=[], expected=[]),
dict(inputs=[23, "42"], expected=[((0,), 23), ((1,), "42")]),
dict(inputs=[[[[108]]]], expected=[((0, 0, 0, 0), 108)]),
dict(inputs=Foo(a=3, b=Bar(c=23, d=42)),
expected=[(("a",), 3), (("b", "c"), 23), (("b", "d"), 42)]),
dict(inputs=Foo(a=Bar(c=23, d=42), b=Bar(c=0, d="thing")),
expected=[(("a", "c"), 23), (("a", "d"), 42), (("b", "c"), 0),
(("b", "d"), "thing")]),
dict(inputs=Bar(c=42, d=43),
expected=[(("c",), 42), (("d",), 43)]),
dict(inputs=Bar(c=[42], d=43),
expected=[(("c", 0), 42), (("d",), 43)]),
])
def testFlattenWithTuplePaths(self, inputs, expected):
self.assertEqual(nest.flatten_with_tuple_paths(inputs), expected)
@parameterized.named_parameters(
("tuples", (1, 2), (3, 4), True, (("0", 4), ("1", 6))),
("dicts", {"a": 1, "b": 2}, {"b": 4, "a": 3}, True,
{"a": ("a", 4), "b": ("b", 6)}),
("mixed", (1, 2), [3, 4], False, (("0", 4), ("1", 6))),
("nested",
{"a": [2, 3], "b": [1, 2, 3]}, {"b": [5, 6, 7], "a": [8, 9]}, True,
{"a": [("a/0", 10), ("a/1", 12)],
"b": [("b/0", 6), ("b/1", 8), ("b/2", 10)]}))
def testMapWithPathsCompatibleStructures(self, s1, s2, check_types, expected):
def format_sum(path, *values):
return (path, sum(values))
result = nest.map_structure_with_paths(format_sum, s1, s2,
check_types=check_types)
self.assertEqual(expected, result)
@parameterized.named_parameters(
("tuples", (1, 2, 3), (4, 5), ValueError),
("dicts", {"a": 1}, {"b": 2}, ValueError),
("mixed", (1, 2), [3, 4], TypeError),
("nested",
{"a": [2, 3, 4], "b": [1, 3]},
{"b": [5, 6], "a": [8, 9]},
ValueError
))
def testMapWithPathsIncompatibleStructures(self, s1, s2, error_type):
with self.assertRaises(error_type):
nest.map_structure_with_paths(lambda path, *s: 0, s1, s2)
@parameterized.named_parameters([
dict(testcase_name="Tuples", s1=(1, 2), s2=(3, 4),
check_types=True, expected=(((0,), 4), ((1,), 6))),
dict(testcase_name="Dicts", s1={"a": 1, "b": 2}, s2={"b": 4, "a": 3},
check_types=True, expected={"a": (("a",), 4), "b": (("b",), 6)}),
dict(testcase_name="Mixed", s1=(1, 2), s2=[3, 4],
check_types=False, expected=(((0,), 4), ((1,), 6))),
dict(testcase_name="Nested",
s1={"a": [2, 3], "b": [1, 2, 3]},
s2={"b": [5, 6, 7], "a": [8, 9]},
check_types=True,
expected={"a": [(("a", 0), 10), (("a", 1), 12)],
"b": [(("b", 0), 6), (("b", 1), 8), (("b", 2), 10)]}),
])
def testMapWithTuplePathsCompatibleStructures(
self, s1, s2, check_types, expected):
def path_and_sum(path, *values):
return path, sum(values)
result = nest.map_structure_with_tuple_paths(
path_and_sum, s1, s2, check_types=check_types)
self.assertEqual(expected, result)
@parameterized.named_parameters([
dict(testcase_name="Tuples", s1=(1, 2, 3), s2=(4, 5),
error_type=ValueError),
dict(testcase_name="Dicts", s1={"a": 1}, s2={"b": 2},
error_type=ValueError),
dict(testcase_name="Mixed", s1=(1, 2), s2=[3, 4], error_type=TypeError),
dict(testcase_name="Nested",
s1={"a": [2, 3, 4], "b": [1, 3]},
s2={"b": [5, 6], "a": [8, 9]},
error_type=ValueError)
])
def testMapWithTuplePathsIncompatibleStructures(self, s1, s2, error_type):
with self.assertRaises(error_type):
nest.map_structure_with_tuple_paths(lambda path, *s: 0, s1, s2)
def testFlattenCustomSequenceThatRaisesException(self): # b/140746865
seq = _CustomSequenceThatRaisesException()
with self.assertRaisesRegexp(ValueError, "Cannot get item"):
nest.flatten(seq)
def testListToTuple(self):
input_sequence = [1, (2, {3: [4, 5, (6,)]}, None, 7, [[[8]]])]
expected = (1, (2, {3: (4, 5, (6,))}, None, 7, (((8,),),)))
nest.assert_same_structure(
nest.list_to_tuple(input_sequence),
expected,
)
class NestBenchmark(test.Benchmark):
def run_and_report(self, s1, s2, name):
burn_iter, test_iter = 100, 30000
for _ in xrange(burn_iter):
nest.assert_same_structure(s1, s2)
t0 = time.time()
for _ in xrange(test_iter):
nest.assert_same_structure(s1, s2)
t1 = time.time()
self.report_benchmark(iters=test_iter, wall_time=(t1 - t0) / test_iter,
name=name)
def benchmark_assert_structure(self):
s1 = (((1, 2), 3), 4, (5, 6))
s2 = ((("foo1", "foo2"), "foo3"), "foo4", ("foo5", "foo6"))
self.run_and_report(s1, s2, "assert_same_structure_6_elem")
s1 = (((1, 2), 3), 4, (5, 6)) * 10
s2 = ((("foo1", "foo2"), "foo3"), "foo4", ("foo5", "foo6")) * 10
self.run_and_report(s1, s2, "assert_same_structure_60_elem")
if __name__ == "__main__":
test.main()
| gunan/tensorflow | tensorflow/python/util/nest_test.py | Python | apache-2.0 | 54,367 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-08-09 17:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('osf', '0048_merge_20170804_0910'),
]
operations = [
migrations.AddField(
model_name='preprintprovider',
name='preprint_word',
field=models.CharField(choices=[('preprint', 'Preprint'), ('paper', 'Paper'), ('thesis', 'Thesis'), ('none', 'None')], default='preprint', max_length=10),
),
]
| mfraezz/osf.io | osf/migrations/0049_preprintprovider_preprint_word.py | Python | apache-2.0 | 579 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module is deprecated.
Please use :mod:`airflow.providers.amazon.aws.transfers.imap_attachment_to_s3`.
"""
import warnings
from airflow.providers.amazon.aws.transfers.imap_attachment_to_s3 import ImapAttachmentToS3Operator # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.amazon.aws.transfers.imap_attachment_to_s3`.",
DeprecationWarning,
stacklevel=2,
)
| apache/incubator-airflow | airflow/contrib/operators/imap_attachment_to_s3_operator.py | Python | apache-2.0 | 1,197 |
from django.contrib.auth.models import Permission
from django.test import TestCase
from django.urls import reverse
from wagtail.core.models import Page
from wagtail.tests.utils import WagtailTestUtils
class TestWorkflowHistoryDetail(TestCase, WagtailTestUtils):
fixtures = ['test.json']
def setUp(self):
self.user = self.create_test_user()
self.login(self.user)
self.christmas_event = Page.objects.get(url_path='/home/events/christmas/')
self.christmas_event.save_revision()
workflow = self.christmas_event.get_workflow()
self.workflow_state = workflow.start(self.christmas_event, self.user)
def test_get_index(self):
response = self.client.get(
reverse('wagtailadmin_pages:workflow_history', args=[self.christmas_event.id])
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, reverse('wagtailadmin_pages:edit', args=[self.christmas_event.id]))
self.assertContains(response, reverse('wagtailadmin_pages:workflow_history_detail', args=[self.christmas_event.id, self.workflow_state.id]))
def test_get_index_with_bad_permissions(self):
# Remove privileges from user
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
)
self.user.save()
response = self.client.get(
reverse('wagtailadmin_pages:workflow_history', args=[self.christmas_event.id])
)
self.assertEqual(response.status_code, 302)
def test_get_detail(self):
response = self.client.get(
reverse('wagtailadmin_pages:workflow_history_detail', args=[self.christmas_event.id, self.workflow_state.id])
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, reverse('wagtailadmin_pages:edit', args=[self.christmas_event.id]))
self.assertContains(response, reverse('wagtailadmin_pages:workflow_history', args=[self.christmas_event.id]))
def test_get_detail_with_bad_permissions(self):
# Remove privileges from user
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
)
self.user.save()
response = self.client.get(
reverse('wagtailadmin_pages:workflow_history_detail', args=[self.christmas_event.id, self.workflow_state.id])
)
self.assertEqual(response.status_code, 302)
| kaedroho/wagtail | wagtail/admin/tests/pages/test_workflow_history.py | Python | bsd-3-clause | 2,629 |
"""Univariate features selection."""
# Authors: V. Michel, B. Thirion, G. Varoquaux, A. Gramfort, E. Duchesnay.
# L. Buitinck, A. Joly
# License: BSD 3 clause
import numpy as np
import warnings
from scipy import special, stats
from scipy.sparse import issparse
from ..base import BaseEstimator
from ..preprocessing import LabelBinarizer
from ..utils import (as_float_array, check_array, check_X_y, safe_sqr,
safe_mask)
from ..utils.extmath import norm, safe_sparse_dot, row_norms
from ..utils.validation import check_is_fitted
from .base import SelectorMixin
def _clean_nans(scores):
"""
Fixes Issue #1240: NaNs can't be properly compared, so change them to the
smallest value of scores's dtype. -inf seems to be unreliable.
"""
# XXX where should this function be called? fit? scoring functions
# themselves?
scores = as_float_array(scores, copy=True)
scores[np.isnan(scores)] = np.finfo(scores.dtype).min
return scores
######################################################################
# Scoring functions
# The following function is a rewriting of scipy.stats.f_oneway
# Contrary to the scipy.stats.f_oneway implementation it does not
# copy the data while keeping the inputs unchanged.
def f_oneway(*args):
"""Performs a 1-way ANOVA.
The one-way ANOVA tests the null hypothesis that 2 or more groups have
the same population mean. The test is applied to samples from two or
more groups, possibly with differing sizes.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
sample1, sample2, ... : array_like, sparse matrices
The sample measurements should be given as arguments.
Returns
-------
F-value : float
The computed F-value of the test.
p-value : float
The associated p-value from the F-distribution.
Notes
-----
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent
2. Each sample is from a normally distributed population
3. The population standard deviations of the groups are all equal. This
property is known as homoscedasticity.
If these assumptions are not true for a given set of data, it may still be
possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`_) although
with some loss of power.
The algorithm is from Heiman[2], pp.394-7.
See ``scipy.stats.f_oneway`` that should give the same results while
being less efficient.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 14.
http://faculty.vassar.edu/lowry/ch14pt1.html
.. [2] Heiman, G.W. Research Methods in Statistics. 2002.
"""
n_classes = len(args)
args = [as_float_array(a) for a in args]
n_samples_per_class = np.array([a.shape[0] for a in args])
n_samples = np.sum(n_samples_per_class)
ss_alldata = sum(safe_sqr(a).sum(axis=0) for a in args)
sums_args = [np.asarray(a.sum(axis=0)) for a in args]
square_of_sums_alldata = sum(sums_args) ** 2
square_of_sums_args = [s ** 2 for s in sums_args]
sstot = ss_alldata - square_of_sums_alldata / float(n_samples)
ssbn = 0.
for k, _ in enumerate(args):
ssbn += square_of_sums_args[k] / n_samples_per_class[k]
ssbn -= square_of_sums_alldata / float(n_samples)
sswn = sstot - ssbn
dfbn = n_classes - 1
dfwn = n_samples - n_classes
msb = ssbn / float(dfbn)
msw = sswn / float(dfwn)
constant_features_idx = np.where(msw == 0.)[0]
if (np.nonzero(msb)[0].size != msb.size and constant_features_idx.size):
warnings.warn("Features %s are constant." % constant_features_idx,
UserWarning)
f = msb / msw
# flatten matrix to vector in sparse case
f = np.asarray(f).ravel()
prob = special.fdtrc(dfbn, dfwn, f)
return f, prob
def f_classif(X, y):
"""Compute the ANOVA F-value for the provided sample.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : {array-like, sparse matrix} shape = [n_samples, n_features]
The set of regressors that will be tested sequentially.
y : array of shape(n_samples)
The data matrix.
Returns
-------
F : array, shape = [n_features,]
The set of F values.
pval : array, shape = [n_features,]
The set of p-values.
See also
--------
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'])
args = [X[safe_mask(X, y == k)] for k in np.unique(y)]
return f_oneway(*args)
def _chisquare(f_obs, f_exp):
"""Fast replacement for scipy.stats.chisquare.
Version from https://github.com/scipy/scipy/pull/2525 with additional
optimizations.
"""
f_obs = np.asarray(f_obs, dtype=np.float64)
k = len(f_obs)
# Reuse f_obs for chi-squared statistics
chisq = f_obs
chisq -= f_exp
chisq **= 2
chisq /= f_exp
chisq = chisq.sum(axis=0)
return chisq, special.chdtrc(k - 1, chisq)
def chi2(X, y):
"""Compute chi-squared stats between each non-negative feature and class.
This score can be used to select the n_features features with the
highest values for the test chi-squared statistic from X, which must
contain only non-negative features such as booleans or frequencies
(e.g., term counts in document classification), relative to the classes.
Recall that the chi-square test measures dependence between stochastic
variables, so using this function "weeds out" the features that are the
most likely to be independent of class and therefore irrelevant for
classification.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features_in)
Sample vectors.
y : array-like, shape = (n_samples,)
Target vector (class labels).
Returns
-------
chi2 : array, shape = (n_features,)
chi2 statistics of each feature.
pval : array, shape = (n_features,)
p-values of each feature.
Notes
-----
Complexity of this algorithm is O(n_classes * n_features).
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
f_regression: F-value between label/feature for regression tasks.
"""
# XXX: we might want to do some of the following in logspace instead for
# numerical stability.
X = check_array(X, accept_sparse='csr')
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative.")
Y = LabelBinarizer().fit_transform(y)
if Y.shape[1] == 1:
Y = np.append(1 - Y, Y, axis=1)
observed = safe_sparse_dot(Y.T, X) # n_classes * n_features
feature_count = X.sum(axis=0).reshape(1, -1)
class_prob = Y.mean(axis=0).reshape(1, -1)
expected = np.dot(class_prob.T, feature_count)
return _chisquare(observed, expected)
def f_regression(X, y, center=True):
"""Univariate linear regression tests.
Quick linear model for testing the effect of a single regressor,
sequentially for many regressors.
This is done in 2 steps:
1. The cross correlation between each regressor and the target is computed,
that is, ((X[:, i] - mean(X[:, i])) * (y - mean_y)) / (std(X[:, i]) *
std(y)).
2. It is converted to an F score then to a p-value.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : {array-like, sparse matrix} shape = (n_samples, n_features)
The set of regressors that will be tested sequentially.
y : array of shape(n_samples).
The data matrix
center : True, bool,
If true, X and y will be centered.
Returns
-------
F : array, shape=(n_features,)
F values of features.
pval : array, shape=(n_features,)
p-values of F-scores.
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
chi2: Chi-squared stats of non-negative features for classification tasks.
"""
if issparse(X) and center:
raise ValueError("center=True only allowed for dense data")
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64)
if center:
y = y - np.mean(y)
X = X.copy('F') # faster in fortran
X -= X.mean(axis=0)
# compute the correlation
corr = safe_sparse_dot(y, X)
corr /= row_norms(X.T)
corr /= norm(y)
# convert to p-value
degrees_of_freedom = y.size - (2 if center else 1)
F = corr ** 2 / (1 - corr ** 2) * degrees_of_freedom
pv = stats.f.sf(F, 1, degrees_of_freedom)
return F, pv
######################################################################
# Base classes
class _BaseFilter(BaseEstimator, SelectorMixin):
"""Initialize the univariate feature selection.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues) or a single array with scores.
"""
def __init__(self, score_func):
self.score_func = score_func
def fit(self, X, y):
"""Run score function on (X, y) and get the appropriate features.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y, ['csr', 'csc'])
if not callable(self.score_func):
raise TypeError("The score function should be a callable, %s (%s) "
"was passed."
% (self.score_func, type(self.score_func)))
self._check_params(X, y)
score_func_ret = self.score_func(X, y)
if isinstance(score_func_ret, (list, tuple)):
self.scores_, self.pvalues_ = score_func_ret
self.pvalues_ = np.asarray(self.pvalues_)
else:
self.scores_ = score_func_ret
self.pvalues_ = None
self.scores_ = np.asarray(self.scores_)
return self
def _check_params(self, X, y):
pass
######################################################################
# Specific filters
######################################################################
class SelectPercentile(_BaseFilter):
"""Select features according to a percentile of the highest scores.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues) or a single array with scores.
Default is f_classif (see below "See also"). The default function only
works with classification tasks.
percentile : int, optional, default=10
Percent of features to keep.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores, None if `score_func` returned only scores.
Notes
-----
Ties between features with equal scores will be broken in an unspecified
way.
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
mutual_info_classif: Mutual information for a discrete target.
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
mutual_info_regression: Mutual information for a continuous target.
SelectKBest: Select features based on the k highest scores.
SelectFpr: Select features based on a false positive rate test.
SelectFdr: Select features based on an estimated false discovery rate.
SelectFwe: Select features based on family-wise error rate.
GenericUnivariateSelect: Univariate feature selector with configurable mode.
"""
def __init__(self, score_func=f_classif, percentile=10):
super(SelectPercentile, self).__init__(score_func)
self.percentile = percentile
def _check_params(self, X, y):
if not 0 <= self.percentile <= 100:
raise ValueError("percentile should be >=0, <=100; got %r"
% self.percentile)
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
# Cater for NaNs
if self.percentile == 100:
return np.ones(len(self.scores_), dtype=np.bool)
elif self.percentile == 0:
return np.zeros(len(self.scores_), dtype=np.bool)
scores = _clean_nans(self.scores_)
treshold = stats.scoreatpercentile(scores,
100 - self.percentile)
mask = scores > treshold
ties = np.where(scores == treshold)[0]
if len(ties):
max_feats = int(len(scores) * self.percentile / 100)
kept_ties = ties[:max_feats - mask.sum()]
mask[kept_ties] = True
return mask
class SelectKBest(_BaseFilter):
"""Select features according to the k highest scores.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues) or a single array with scores.
Default is f_classif (see below "See also"). The default function only
works with classification tasks.
k : int or "all", optional, default=10
Number of top features to select.
The "all" option bypasses selection, for use in a parameter search.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores, None if `score_func` returned only scores.
Notes
-----
Ties between features with equal scores will be broken in an unspecified
way.
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
mutual_info_classif: Mutual information for a discrete target.
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
mutual_info_regression: Mutual information for a continious target.
SelectPercentile: Select features based on percentile of the highest scores.
SelectFpr: Select features based on a false positive rate test.
SelectFdr: Select features based on an estimated false discovery rate.
SelectFwe: Select features based on family-wise error rate.
GenericUnivariateSelect: Univariate feature selector with configurable mode.
"""
def __init__(self, score_func=f_classif, k=10):
super(SelectKBest, self).__init__(score_func)
self.k = k
def _check_params(self, X, y):
if not (self.k == "all" or 0 <= self.k <= X.shape[1]):
raise ValueError("k should be >=0, <= n_features; got %r."
"Use k='all' to return all features."
% self.k)
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
if self.k == 'all':
return np.ones(self.scores_.shape, dtype=bool)
elif self.k == 0:
return np.zeros(self.scores_.shape, dtype=bool)
else:
scores = _clean_nans(self.scores_)
mask = np.zeros(scores.shape, dtype=bool)
# Request a stable sort. Mergesort takes more memory (~40MB per
# megafeature on x86-64).
mask[np.argsort(scores, kind="mergesort")[-self.k:]] = 1
return mask
class SelectFpr(_BaseFilter):
"""Filter: Select the pvalues below alpha based on a FPR test.
FPR test stands for False Positive Rate test. It controls the total
amount of false detections.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
Default is f_classif (see below "See also"). The default function only
works with classification tasks.
alpha : float, optional
The highest p-value for features to be kept.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
chi2: Chi-squared stats of non-negative features for classification tasks.
mutual_info_classif:
f_regression: F-value between label/feature for regression tasks.
mutual_info_regression: Mutual information between features and the target.
SelectPercentile: Select features based on percentile of the highest scores.
SelectKBest: Select features based on the k highest scores.
SelectFdr: Select features based on an estimated false discovery rate.
SelectFwe: Select features based on family-wise error rate.
GenericUnivariateSelect: Univariate feature selector with configurable mode.
"""
def __init__(self, score_func=f_classif, alpha=5e-2):
super(SelectFpr, self).__init__(score_func)
self.alpha = alpha
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
return self.pvalues_ < self.alpha
class SelectFdr(_BaseFilter):
"""Filter: Select the p-values for an estimated false discovery rate
This uses the Benjamini-Hochberg procedure. ``alpha`` is an upper bound
on the expected false discovery rate.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
Default is f_classif (see below "See also"). The default function only
works with classification tasks.
alpha : float, optional
The highest uncorrected p-value for features to keep.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
References
----------
https://en.wikipedia.org/wiki/False_discovery_rate
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
mutual_info_classif: Mutual information for a discrete target.
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
mutual_info_regression: Mutual information for a contnuous target.
SelectPercentile: Select features based on percentile of the highest scores.
SelectKBest: Select features based on the k highest scores.
SelectFpr: Select features based on a false positive rate test.
SelectFwe: Select features based on family-wise error rate.
GenericUnivariateSelect: Univariate feature selector with configurable mode.
"""
def __init__(self, score_func=f_classif, alpha=5e-2):
super(SelectFdr, self).__init__(score_func)
self.alpha = alpha
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
n_features = len(self.pvalues_)
sv = np.sort(self.pvalues_)
selected = sv[sv <= float(self.alpha) / n_features
* np.arange(n_features)]
if selected.size == 0:
return np.zeros_like(self.pvalues_, dtype=bool)
return self.pvalues_ <= selected.max()
class SelectFwe(_BaseFilter):
"""Filter: Select the p-values corresponding to Family-wise error rate
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
Default is f_classif (see below "See also"). The default function only
works with classification tasks.
alpha : float, optional
The highest uncorrected p-value for features to keep.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
SelectPercentile: Select features based on percentile of the highest scores.
SelectKBest: Select features based on the k highest scores.
SelectFpr: Select features based on a false positive rate test.
SelectFdr: Select features based on an estimated false discovery rate.
GenericUnivariateSelect: Univariate feature selector with configurable mode.
"""
def __init__(self, score_func=f_classif, alpha=5e-2):
super(SelectFwe, self).__init__(score_func)
self.alpha = alpha
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
return (self.pvalues_ < self.alpha / len(self.pvalues_))
######################################################################
# Generic filter
######################################################################
# TODO this class should fit on either p-values or scores,
# depending on the mode.
class GenericUnivariateSelect(_BaseFilter):
"""Univariate feature selector with configurable strategy.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues). For modes 'percentile' or 'kbest' it can return
a single array scores.
mode : {'percentile', 'k_best', 'fpr', 'fdr', 'fwe'}
Feature selection mode.
param : float or int depending on the feature selection mode
Parameter of the corresponding mode.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores, None if `score_func` returned scores only.
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
mutual_info_classif: Mutual information for a discrete target.
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
mutual_info_regression: Mutual information for a continuous target.
SelectPercentile: Select features based on percentile of the highest scores.
SelectKBest: Select features based on the k highest scores.
SelectFpr: Select features based on a false positive rate test.
SelectFdr: Select features based on an estimated false discovery rate.
SelectFwe: Select features based on family-wise error rate.
"""
_selection_modes = {'percentile': SelectPercentile,
'k_best': SelectKBest,
'fpr': SelectFpr,
'fdr': SelectFdr,
'fwe': SelectFwe}
def __init__(self, score_func=f_classif, mode='percentile', param=1e-5):
super(GenericUnivariateSelect, self).__init__(score_func)
self.mode = mode
self.param = param
def _make_selector(self):
selector = self._selection_modes[self.mode](score_func=self.score_func)
# Now perform some acrobatics to set the right named parameter in
# the selector
possible_params = selector._get_param_names()
possible_params.remove('score_func')
selector.set_params(**{possible_params[0]: self.param})
return selector
def _check_params(self, X, y):
if self.mode not in self._selection_modes:
raise ValueError("The mode passed should be one of %s, %r,"
" (type %s) was passed."
% (self._selection_modes.keys(), self.mode,
type(self.mode)))
self._make_selector()._check_params(X, y)
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
selector = self._make_selector()
selector.pvalues_ = self.pvalues_
selector.scores_ = self.scores_
return selector._get_support_mask()
| toastedcornflakes/scikit-learn | sklearn/feature_selection/univariate_selection.py | Python | bsd-3-clause | 25,381 |
# Copyright 2018 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import random
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
# List of configuration experiments for correctness fuzzing.
# List of <probability>, <1st config name>, <2nd config name>, <2nd d8>.
# Probabilities must add up to 100.
with open(os.path.join(THIS_DIR, 'v8_fuzz_experiments.json')) as f:
FOOZZIE_EXPERIMENTS = json.load(f)
# Additional flag experiments. List of tuples like
# (<likelihood to use flags in [0,1)>, <flag>).
with open(os.path.join(THIS_DIR, 'v8_fuzz_flags.json')) as f:
ADDITIONAL_FLAGS = json.load(f)
class Config(object):
def __init__(self, name, rng=None):
"""
Args:
name: Name of the used fuzzer.
rng: Random number generator for generating experiments.
random_seed: Random-seed used for d8 throughout one fuzz session.
"""
self.name = name
self.rng = rng or random.Random()
def choose_foozzie_flags(self, foozzie_experiments=None, additional_flags=None):
"""Randomly chooses a configuration from FOOZZIE_EXPERIMENTS.
Args:
foozzie_experiments: Override experiment config for testing.
additional_flags: Override additional flags for testing.
Returns: List of flags to pass to v8_foozzie.py fuzz harness.
"""
foozzie_experiments = foozzie_experiments or FOOZZIE_EXPERIMENTS
additional_flags = additional_flags or ADDITIONAL_FLAGS
# Add additional flags to second config based on experiment percentages.
extra_flags = []
for p, flags in additional_flags:
if self.rng.random() < p:
for flag in flags.split():
extra_flags.append('--second-config-extra-flags=%s' % flag)
# Calculate flags determining the experiment.
acc = 0
threshold = self.rng.random() * 100
for prob, first_config, second_config, second_d8 in foozzie_experiments:
acc += prob
if acc > threshold:
return [
'--first-config=' + first_config,
'--second-config=' + second_config,
'--second-d8=' + second_d8,
] + extra_flags
assert False
| endlessm/chromium-browser | v8/tools/clusterfuzz/v8_fuzz_config.py | Python | bsd-3-clause | 2,226 |
__author__ = 'elijahethun'
| yarhajile/sven-daemon | Sven/Module/__init__.py | Python | gpl-2.0 | 27 |
""" A universal module with functions / classes without dependencies. """
import sys
import contextlib
import functools
import re
import os
from jedi._compatibility import reraise
_sep = os.path.sep
if os.path.altsep is not None:
_sep += os.path.altsep
_path_re = re.compile('(?:\.[^{0}]+|[{0}]__init__\.py)$'.format(re.escape(_sep)))
del _sep
def to_list(func):
def wrapper(*args, **kwargs):
return list(func(*args, **kwargs))
return wrapper
def unite(iterable):
"""Turns a two dimensional array into a one dimensional."""
return set(typ for types in iterable for typ in types)
class UncaughtAttributeError(Exception):
"""
Important, because `__getattr__` and `hasattr` catch AttributeErrors
implicitly. This is really evil (mainly because of `__getattr__`).
`hasattr` in Python 2 is even more evil, because it catches ALL exceptions.
Therefore this class originally had to be derived from `BaseException`
instead of `Exception`. But because I removed relevant `hasattr` from
the code base, we can now switch back to `Exception`.
:param base: return values of sys.exc_info().
"""
def safe_property(func):
return property(reraise_uncaught(func))
def reraise_uncaught(func):
"""
Re-throw uncaught `AttributeError`.
Usage: Put ``@rethrow_uncaught`` in front of the function
which does **not** suppose to raise `AttributeError`.
AttributeError is easily get caught by `hasattr` and another
``except AttributeError`` clause. This becomes problem when you use
a lot of "dynamic" attributes (e.g., using ``@property``) because you
can't distinguish if the property does not exist for real or some code
inside of the "dynamic" attribute through that error. In a well
written code, such error should not exist but getting there is very
difficult. This decorator is to help us getting there by changing
`AttributeError` to `UncaughtAttributeError` to avoid unexpected catch.
This helps us noticing bugs earlier and facilitates debugging.
.. note:: Treating StopIteration here is easy.
Add that feature when needed.
"""
@functools.wraps(func)
def wrapper(*args, **kwds):
try:
return func(*args, **kwds)
except AttributeError:
exc_info = sys.exc_info()
reraise(UncaughtAttributeError(exc_info[1]), exc_info[2])
return wrapper
class PushBackIterator(object):
def __init__(self, iterator):
self.pushes = []
self.iterator = iterator
self.current = None
def push_back(self, value):
self.pushes.append(value)
def __iter__(self):
return self
def next(self):
""" Python 2 Compatibility """
return self.__next__()
def __next__(self):
if self.pushes:
self.current = self.pushes.pop()
else:
self.current = next(self.iterator)
return self.current
@contextlib.contextmanager
def ignored(*exceptions):
"""
Context manager that ignores all of the specified exceptions. This will
be in the standard library starting with Python 3.4.
"""
try:
yield
except exceptions:
pass
def indent_block(text, indention=' '):
"""This function indents a text block with a default of four spaces."""
temp = ''
while text and text[-1] == '\n':
temp += text[-1]
text = text[:-1]
lines = text.split('\n')
return '\n'.join(map(lambda s: indention + s, lines)) + temp
def dotted_from_fs_path(fs_path, sys_path):
"""
Changes `/usr/lib/python3.4/email/utils.py` to `email.utils`. I.e.
compares the path with sys.path and then returns the dotted_path. If the
path is not in the sys.path, just returns None.
"""
if os.path.basename(fs_path).startswith('__init__.'):
# We are calculating the path. __init__ files are not interesting.
fs_path = os.path.dirname(fs_path)
# prefer
# - UNIX
# /path/to/pythonX.Y/lib-dynload
# /path/to/pythonX.Y/site-packages
# - Windows
# C:\path\to\DLLs
# C:\path\to\Lib\site-packages
# over
# - UNIX
# /path/to/pythonX.Y
# - Windows
# C:\path\to\Lib
path = ''
for s in sys_path:
if (fs_path.startswith(s) and len(path) < len(s)):
path = s
# - Window
# X:\path\to\lib-dynload/datetime.pyd => datetime
module_path = fs_path[len(path):].lstrip(os.path.sep).lstrip('/')
# - Window
# Replace like X:\path\to\something/foo/bar.py
return _path_re.sub('', module_path).replace(os.path.sep, '.').replace('/', '.')
| technologiescollege/Blockly-rduino-communication | scripts_XP/Lib/site-packages/jedi/evaluate/utils.py | Python | gpl-3.0 | 4,702 |
"""
Copyright 2013 Steven Diamond
This file is part of CVXPY.
CVXPY is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CVXPY is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CVXPY. If not, see <http://www.gnu.org/licenses/>.
"""
from cvxpy.lin_ops.lin_utils import *
from cvxpy.lin_ops.lin_op import *
from cvxpy.expressions.constants import Parameter
import cvxpy.interface as intf
import numpy as np
import scipy.sparse as sp
import unittest
from cvxpy.tests.base_test import BaseTest
import sys
PY2 = sys.version_info < (3, 0)
class test_lin_ops(BaseTest):
""" Unit tests for the lin_ops module. """
def test_variables(self):
"""Test creating a variable.
"""
var = create_var((5, 4), var_id=1)
self.assertEqual(var.size, (5, 4))
self.assertEqual(var.data, 1)
self.assertEqual(len(var.args), 0)
self.assertEqual(var.type, VARIABLE)
def test_param(self):
"""Test creating a parameter.
"""
A = Parameter(5, 4)
var = create_param(A, (5, 4))
self.assertEqual(var.size, (5, 4))
self.assertEqual(len(var.args), 0)
self.assertEqual(var.type, PARAM)
def test_constant(self):
"""Test creating a constant.
"""
# Scalar constant.
size = (1, 1)
mat = create_const(1.0, size)
self.assertEqual(mat.size, size)
self.assertEqual(len(mat.args), 0)
self.assertEqual(mat.type, SCALAR_CONST)
assert mat.data == 1.0
# Dense matrix constant.
size = (5, 4)
mat = create_const(np.ones(size), size)
self.assertEqual(mat.size, size)
self.assertEqual(len(mat.args), 0)
self.assertEqual(mat.type, DENSE_CONST)
assert (mat.data == np.ones(size)).all()
# Sparse matrix constant.
size = (5, 5)
mat = create_const(sp.eye(5), size, sparse=True)
self.assertEqual(mat.size, size)
self.assertEqual(len(mat.args), 0)
self.assertEqual(mat.type, SPARSE_CONST)
assert (mat.data.todense() == sp.eye(5).todense()).all()
def test_add_expr(self):
"""Test adding lin expr.
"""
size = (5, 4)
x = create_var(size)
y = create_var(size)
# Expanding dict.
add_expr = sum_expr([x, y])
self.assertEqual(add_expr.size, size)
assert len(add_expr.args) == 2
def test_get_vars(self):
"""Test getting vars from an expression.
"""
size = (5, 4)
x = create_var(size)
y = create_var(size)
A = create_const(np.ones(size), size)
# Expanding dict.
add_expr = sum_expr([x, y, A])
vars_ = get_expr_vars(add_expr)
ref = [(x.data, size), (y.data, size)]
if PY2:
self.assertItemsEqual(vars_, ref)
else:
self.assertCountEqual(vars_, ref)
def test_neg_expr(self):
"""Test negating an expression.
"""
size = (5, 4)
var = create_var(size)
expr = neg_expr(var)
assert len(expr.args) == 1
self.assertEqual(expr.size, size)
self.assertEqual(expr.type, NEG)
def test_eq_constr(self):
"""Test creating an equality constraint.
"""
size = (5, 5)
x = create_var(size)
y = create_var(size)
lh_expr = sum_expr([x, y])
value = np.ones(size)
rh_expr = create_const(value, size)
constr = create_eq(lh_expr, rh_expr)
self.assertEqual(constr.size, size)
vars_ = get_expr_vars(constr.expr)
ref = [(x.data, size), (y.data, size)]
if PY2:
self.assertItemsEqual(vars_, ref)
else:
self.assertCountEqual(vars_, ref)
def test_leq_constr(self):
"""Test creating a less than or equal constraint.
"""
size = (5, 5)
x = create_var(size)
y = create_var(size)
lh_expr = sum_expr([x, y])
value = np.ones(size)
rh_expr = create_const(value, size)
constr = create_leq(lh_expr, rh_expr)
self.assertEqual(constr.size, size)
vars_ = get_expr_vars(constr.expr)
ref = [(x.data, size), (y.data, size)]
if PY2:
self.assertItemsEqual(vars_, ref)
else:
self.assertCountEqual(vars_, ref)
def test_sum_entries(self):
"""Test sum entries op.
"""
size = (5, 5)
x = create_var(size)
expr = sum_entries(x)
self.assertEqual(expr.size, (1, 1))
self.assertEqual(len(expr.args), 1)
self.assertEqual(expr.type, lo.SUM_ENTRIES)
| sdpython/cvxpy | cvxpy/tests/test_lin_ops.py | Python | gpl-3.0 | 5,078 |
#!/usr/bin/env python
"""
AMQP Clock
Fires off simple messages at one-minute intervals to a topic
exchange named 'clock', with the topic of the message being
the local time as 'year.month.date.dow.hour.minute',
for example: '2007.11.26.1.12.33', where the dow (day of week)
is 0 for Sunday, 1 for Monday, and so on (similar to Unix crontab).
A consumer could then bind a queue to the routing key '#.0'
for example to get a message at the beginning of each hour.
2007-11-26 Barry Pederson <bp@barryp.org>
"""
from datetime import datetime
from optparse import OptionParser
from time import sleep
import amqplib.client_0_8 as amqp
Message = amqp.Message
EXCHANGE_NAME = 'clock'
TOPIC_PATTERN = '%Y.%m.%d.%w.%H.%M' # Python datetime.strftime() pattern
def main():
parser = OptionParser()
parser.add_option('--host', dest='host',
help='AMQP server to connect to (default: %default)',
default='localhost')
parser.add_option('-u', '--userid', dest='userid',
help='AMQP userid to authenticate as (default: %default)',
default='guest')
parser.add_option('-p', '--password', dest='password',
help='AMQP password to authenticate with (default: %default)',
default='guest')
parser.add_option('--ssl', dest='ssl', action='store_true',
help='Enable SSL with AMQP server (default: not enabled)',
default=False)
options, args = parser.parse_args()
conn = amqp.Connection(options.host, options.userid, options.password)
ch = conn.channel()
ch.access_request('/data', write=True, active=True)
ch.exchange_declare(EXCHANGE_NAME, type='topic')
# Make sure our first message is close to the beginning
# of a minute
now = datetime.now()
if now.second > 0:
sleep(60 - now.second)
while True:
now = datetime.now()
msg = Message(timestamp=now)
msg_topic = now.strftime(TOPIC_PATTERN)
ch.basic_publish(msg, EXCHANGE_NAME, routing_key=msg_topic)
# Don't know how long the basic_publish took, so
# grab the time again.
now = datetime.now()
sleep(60 - now.second)
ch.close()
conn.close()
if __name__ == '__main__':
main()
| mzdaniel/oh-mainline | vendor/packages/amqplib/demo/amqp_clock.py | Python | agpl-3.0 | 2,344 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sql
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine; bind
# migrate_engine to your metadata
meta = sql.MetaData()
meta.bind = migrate_engine
# catalog
service_table = sql.Table(
'service',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('type', sql.String(255)),
sql.Column('extra', sql.Text()))
service_table.create(migrate_engine, checkfirst=True)
endpoint_table = sql.Table(
'endpoint',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('region', sql.String(255)),
sql.Column('service_id',
sql.String(64),
sql.ForeignKey('service.id'),
nullable=False),
sql.Column('extra', sql.Text()))
endpoint_table.create(migrate_engine, checkfirst=True)
# identity
role_table = sql.Table(
'role',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(255), unique=True, nullable=False))
role_table.create(migrate_engine, checkfirst=True)
if migrate_engine.name == 'ibm_db_sa':
# NOTE(blk-u): SQLAlchemy for PostgreSQL picks the name tenant_name_key
# for the unique constraint, but for DB2 doesn't give the UC a name
# unless we tell it to and there is no DDL to alter a column to drop
# an unnamed unique constraint, so this code creates a named unique
# constraint on the name column rather than an unnamed one.
# (This is used in migration 16.)
tenant_table = sql.Table(
'tenant',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(64), nullable=False),
sql.Column('extra', sql.Text()),
sql.UniqueConstraint('name', name='tenant_name_key'))
else:
tenant_table = sql.Table(
'tenant',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(64), unique=True, nullable=False),
sql.Column('extra', sql.Text()))
tenant_table.create(migrate_engine, checkfirst=True)
metadata_table = sql.Table(
'metadata',
meta,
sql.Column('user_id', sql.String(64), primary_key=True),
sql.Column('tenant_id', sql.String(64), primary_key=True),
sql.Column('data', sql.Text()))
metadata_table.create(migrate_engine, checkfirst=True)
ec2_credential_table = sql.Table(
'ec2_credential',
meta,
sql.Column('access', sql.String(64), primary_key=True),
sql.Column('secret', sql.String(64)),
sql.Column('user_id', sql.String(64)),
sql.Column('tenant_id', sql.String(64)))
ec2_credential_table.create(migrate_engine, checkfirst=True)
if migrate_engine.name == 'ibm_db_sa':
# NOTE(blk-u): SQLAlchemy for PostgreSQL picks the name user_name_key
# for the unique constraint, but for DB2 doesn't give the UC a name
# unless we tell it to and there is no DDL to alter a column to drop
# an unnamed unique constraint, so this code creates a named unique
# constraint on the name column rather than an unnamed one.
# (This is used in migration 16.)
user_table = sql.Table(
'user',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(64), nullable=False),
sql.Column('extra', sql.Text()),
sql.UniqueConstraint('name', name='user_name_key'))
else:
user_table = sql.Table(
'user',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(64), unique=True, nullable=False),
sql.Column('extra', sql.Text()))
user_table.create(migrate_engine, checkfirst=True)
user_tenant_membership_table = sql.Table(
'user_tenant_membership',
meta,
sql.Column(
'user_id',
sql.String(64),
sql.ForeignKey('user.id'),
primary_key=True),
sql.Column(
'tenant_id',
sql.String(64),
sql.ForeignKey('tenant.id'),
primary_key=True))
user_tenant_membership_table.create(migrate_engine, checkfirst=True)
# token
token_table = sql.Table(
'token',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('expires', sql.DateTime()),
sql.Column('extra', sql.Text()))
token_table.create(migrate_engine, checkfirst=True)
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
meta = sql.MetaData()
meta.bind = migrate_engine
tables = ['user_tenant_membership', 'token', 'user', 'tenant', 'role',
'metadata', 'ec2_credential', 'endpoint', 'service']
for t in tables:
table = sql.Table(t, meta, autoload=True)
table.drop(migrate_engine, checkfirst=True)
| dsiddharth/access-keys | keystone/common/sql/migrate_repo/versions/001_add_initial_tables.py | Python | apache-2.0 | 5,811 |
import mock
import pytest
from rest_framework import exceptions
from addons.wiki.models import WikiPage
from addons.wiki.tests.factories import WikiFactory, WikiVersionFactory
from api.base.settings.defaults import API_BASE
from api_tests.wikis.views.test_wiki_detail import WikiCRUDTestCase
from framework.auth.core import Auth
from osf_tests.factories import (
AuthUserFactory,
ProjectFactory,
OSFGroupFactory,
RegistrationFactory,
)
from osf.utils.permissions import WRITE, READ
from tests.base import fake
@pytest.fixture()
def user():
return AuthUserFactory()
def create_wiki_payload(name):
return {
'data': {
'type': 'wikis',
'attributes': {
'name': name
}
}
}
@pytest.mark.django_db
class TestNodeWikiList:
@pytest.fixture()
def add_project_wiki_page(self):
def add_page(node, user):
with mock.patch('osf.models.AbstractNode.update_search'):
wiki_page = WikiFactory(node=node, user=user)
WikiVersionFactory(wiki_page=wiki_page)
return wiki_page
return add_page
@pytest.fixture()
def non_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def public_project(self, user):
return ProjectFactory(is_public=True, creator=user)
@pytest.fixture()
def public_wiki(self, add_project_wiki_page, user, public_project):
return add_project_wiki_page(public_project, user)
@pytest.fixture()
def public_url(self, public_project, public_wiki):
return '/{}nodes/{}/wikis/'.format(API_BASE, public_project._id)
@pytest.fixture()
def private_project(self, user):
return ProjectFactory(creator=user)
@pytest.fixture()
def private_wiki(self, add_project_wiki_page, user, private_project):
return add_project_wiki_page(private_project, user)
@pytest.fixture()
def private_url(self, private_project, private_wiki):
return '/{}nodes/{}/wikis/'.format(API_BASE, private_project._id)
@pytest.fixture()
def public_registration(self, user, public_project, public_wiki):
public_registration = RegistrationFactory(
project=public_project, user=user, is_public=True)
return public_registration
@pytest.fixture()
def public_registration_url(self, public_registration):
return '/{}registrations/{}/wikis/'.format(
API_BASE, public_registration._id)
@pytest.fixture()
def private_registration(self, user, private_project, private_wiki):
private_registration = RegistrationFactory(
project=private_project, user=user)
return private_registration
@pytest.fixture()
def private_registration_url(self, private_registration):
return '/{}registrations/{}/wikis/'.format(
API_BASE, private_registration._id)
def test_return_wikis(
self, app, user, non_contrib, private_registration, private_project,
public_wiki, private_wiki, public_url, private_url,
private_registration_url):
# test_return_public_node_wikis_logged_out_user
res = app.get(public_url)
assert res.status_code == 200
wiki_ids = [wiki['id'] for wiki in res.json['data']]
assert public_wiki._id in wiki_ids
# test_return_public_node_wikis_logged_in_non_contributor
res = app.get(public_url, auth=non_contrib.auth)
assert res.status_code == 200
wiki_ids = [wiki['id'] for wiki in res.json['data']]
assert public_wiki._id in wiki_ids
# test_return_public_node_wikis_logged_in_contributor
res = app.get(public_url, auth=user.auth)
assert res.status_code == 200
wiki_ids = [wiki['id'] for wiki in res.json['data']]
assert public_wiki._id in wiki_ids
# test_return_private_node_wikis_logged_out_user
res = app.get(private_url, expect_errors=True)
assert res.status_code == 401
assert res.json['errors'][0]['detail'] == exceptions.NotAuthenticated.default_detail
# test_return_private_node_wikis_logged_in_osf_group_member
group_mem = AuthUserFactory()
group = OSFGroupFactory(creator=group_mem)
private_project.add_osf_group(group, READ)
res = app.get(private_url, auth=group_mem.auth)
assert res.status_code == 200
wiki_ids = [wiki['id'] for wiki in res.json['data']]
assert private_wiki._id in wiki_ids
# test_return_private_node_wikis_logged_in_non_contributor
res = app.get(private_url, auth=non_contrib.auth, expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
# test_return_private_node_wikis_logged_in_contributor
res = app.get(private_url, auth=user.auth)
assert res.status_code == 200
wiki_ids = [wiki['id'] for wiki in res.json['data']]
assert private_wiki._id in wiki_ids
# test_return_registration_wikis_logged_out_user
res = app.get(private_registration_url, expect_errors=True)
assert res.status_code == 401
assert res.json['errors'][0]['detail'] == exceptions.NotAuthenticated.default_detail
# test_return_registration_wikis_logged_in_non_contributor
res = app.get(
private_registration_url,
auth=non_contrib.auth,
expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
# test_return_registration_wikis_logged_in_contributor
res = app.get(private_registration_url, auth=user.auth)
assert res.status_code == 200
wiki_ids = [wiki['id'] for wiki in res.json['data']]
assert WikiPage.objects.get_for_node(private_registration, 'home')._id in wiki_ids
def test_wikis_not_returned_for_withdrawn_registration(
self, app, user, private_registration, private_registration_url):
private_registration.is_public = True
withdrawal = private_registration.retract_registration(
user=user, save=True)
token = list(withdrawal.approval_state.values())[0]['approval_token']
# TODO: Remove mocking when StoredFileNode is implemented
with mock.patch('osf.models.AbstractNode.update_search'):
withdrawal.approve_retraction(user, token)
withdrawal.save()
res = app.get(
private_registration_url,
auth=user.auth,
expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
def test_do_not_return_disabled_wiki(self, app, user, public_url, public_project):
public_project.delete_addon('wiki', auth=Auth(user))
res = app.get(public_url, expect_errors=True)
assert res.status_code == 404
def test_relationship_links(
self, app, user, public_project, private_project,
public_registration, private_registration,
public_url, private_url, public_registration_url,
private_registration_url):
# test_public_node_wikis_relationship_links
res = app.get(public_url)
expected_nodes_relationship_url = '{}nodes/{}/'.format(
API_BASE, public_project._id)
expected_comments_relationship_url = '{}nodes/{}/comments/'.format(
API_BASE, public_project._id)
assert expected_nodes_relationship_url in res.json['data'][
0]['relationships']['node']['links']['related']['href']
assert expected_comments_relationship_url in res.json['data'][
0]['relationships']['comments']['links']['related']['href']
# test_private_node_wikis_relationship_links
res = app.get(private_url, auth=user.auth)
expected_nodes_relationship_url = '{}nodes/{}/'.format(
API_BASE, private_project._id)
expected_comments_relationship_url = '{}nodes/{}/comments/'.format(
API_BASE, private_project._id)
assert expected_nodes_relationship_url in res.json['data'][
0]['relationships']['node']['links']['related']['href']
assert expected_comments_relationship_url in res.json['data'][
0]['relationships']['comments']['links']['related']['href']
# test_public_registration_wikis_relationship_links
res = app.get(public_registration_url)
expected_nodes_relationship_url = '{}registrations/{}/'.format(
API_BASE, public_registration._id)
expected_comments_relationship_url = '{}registrations/{}/comments/'.format(
API_BASE, public_registration._id)
assert expected_nodes_relationship_url in res.json['data'][
0]['relationships']['node']['links']['related']['href']
assert expected_comments_relationship_url in res.json['data'][
0]['relationships']['comments']['links']['related']['href']
# test_private_registration_wikis_relationship_links
res = app.get(private_registration_url, auth=user.auth)
expected_nodes_relationship_url = '{}registrations/{}/'.format(
API_BASE, private_registration._id)
expected_comments_relationship_url = '{}registrations/{}/comments/'.format(
API_BASE, private_registration._id)
assert expected_nodes_relationship_url in res.json['data'][
0]['relationships']['node']['links']['related']['href']
assert expected_comments_relationship_url in res.json['data'][
0]['relationships']['comments']['links']['related']['href']
def test_not_returned(
self, app, public_project, public_registration,
public_url, public_registration_url):
# test_registration_wikis_not_returned_from_nodes_endpoint
res = app.get(public_url)
node_relationships = [
node_wiki['relationships']['node']['links']['related']['href']
for node_wiki in res.json['data']
]
assert res.status_code == 200
assert len(node_relationships) == 1
assert public_project._id in node_relationships[0]
# test_node_wikis_not_returned_from_registrations_endpoint
res = app.get(public_registration_url)
node_relationships = [
node_wiki['relationships']['node']['links']['related']['href']
for node_wiki in res.json['data']
]
assert res.status_code == 200
assert len(node_relationships) == 1
assert public_registration._id in node_relationships[0]
@pytest.mark.django_db
class TestFilterNodeWikiList:
@pytest.fixture()
def private_project(self, user):
return ProjectFactory(creator=user)
@pytest.fixture()
def base_url(self, private_project):
return '/{}nodes/{}/wikis/'.format(API_BASE, private_project._id)
@pytest.fixture()
def wiki(self, user, private_project):
with mock.patch('osf.models.AbstractNode.update_search'):
wiki_page = WikiFactory(node=private_project, user=user)
WikiVersionFactory(wiki_page=wiki_page)
return wiki_page
@pytest.fixture()
def date(self, wiki):
return wiki.modified.strftime('%Y-%m-%dT%H:%M:%S.%f')
def test_filter_node_wiki_list(self, app, user, wiki, date, base_url):
# test_node_wikis_with_no_filter_returns_all
res = app.get(base_url, auth=user.auth)
wiki_ids = [item['id'] for item in res.json['data']]
assert wiki._id in wiki_ids
# test_filter_wikis_by_page_name
url = base_url + '?filter[name]=home'
res = app.get(url, auth=user.auth)
assert len(res.json['data']) == 1
assert res.json['data'][0]['attributes']['name'] == 'home'
# test_filter_wikis_modified_on_date
url = base_url + '?filter[date_modified][eq]={}'.format(date)
res = app.get(url, auth=user.auth)
assert len(res.json['data']) == 1
# test_filter_wikis_modified_before_date
url = base_url + '?filter[date_modified][lt]={}'.format(date)
res = app.get(url, auth=user.auth)
assert len(res.json['data']) == 0
# test_filter_wikis_modified_after_date
url = base_url + '?filter[date_modified][gt]={}'.format(date)
res = app.get(url, auth=user.auth)
assert len(res.json['data']) == 0
@pytest.mark.django_db
class TestNodeWikiCreate(WikiCRUDTestCase):
@pytest.fixture
def url_node_public(self, project_public):
return '/{}nodes/{}/wikis/'.format(API_BASE, project_public._id)
@pytest.fixture
def url_node_private(self, project_private):
return '/{}nodes/{}/wikis/'.format(API_BASE, project_private._id)
@pytest.fixture
def url_registration_public(self, wiki_registration_public):
return '/{}registrations/{}/wikis/'.format(API_BASE, wiki_registration_public.node._id)
@pytest.fixture
def url_registration_private(self, wiki_registration_private):
return '/{}registrations/{}/wikis/'.format(API_BASE, wiki_registration_private.node._id)
def test_create_public_wiki_page(self, app, user_write_contributor, url_node_public):
page_name = fake.word()
res = app.post_json_api(url_node_public, create_wiki_payload(page_name), auth=user_write_contributor.auth)
assert res.status_code == 201
assert res.json['data']['attributes']['name'] == page_name
def test_create_public_wiki_page_with_content(self, app, user_write_contributor, url_node_public, project_public):
page_name = 'using random variables in tests can sometimes expose Testmon problems!'
payload = create_wiki_payload(page_name)
payload['data']['attributes']['content'] = 'my first wiki page'
res = app.post_json_api(url_node_public, payload, auth=user_write_contributor.auth)
assert res.status_code == 201
assert res.json['data']['attributes']['name'] == page_name
wiki_page = WikiPage.objects.get_for_node(project_public, page_name)
assert wiki_page.get_version().content == 'my first wiki page'
# test_osf_group_member_write
group_mem = AuthUserFactory()
group = OSFGroupFactory(creator=group_mem)
project_public.add_osf_group(group, WRITE)
res = app.post_json_api(url_node_public, create_wiki_payload(fake.word()), auth=group_mem.auth, expect_errors=True)
assert res.status_code == 201
def test_create_public_wiki_page_with_empty_content(self, app, user_write_contributor, url_node_public, project_public):
page_name = fake.word()
payload = create_wiki_payload(page_name)
payload['data']['attributes']['content'] = ''
res = app.post_json_api(url_node_public, payload, auth=user_write_contributor.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'This field may not be blank.'
def test_do_not_create_public_wiki_page(
self, app, user_creator, user_read_contributor, user_non_contributor,
url_node_public, wiki_public, project_public
):
# test_do_not_create_home_wiki_page
res = app.post_json_api(url_node_public, create_wiki_payload('home'), auth=user_creator.auth, expect_errors=True)
assert res.status_code == 409
assert res.json['errors'][0]['detail'] == "A wiki page with the name 'home' already exists."
# test_do_not_create_wiki_page_name_exists
res = app.post_json_api(url_node_public, create_wiki_payload(wiki_public.page_name), auth=user_creator.auth, expect_errors=True)
assert res.status_code == 409
assert res.json['errors'][0]['detail'] == "A wiki page with the name '{}' already exists.".format(wiki_public.page_name)
# test_do_not_create_public_wiki_page_as_read_contributor
res = app.post_json_api(url_node_public, create_wiki_payload(fake.word()), auth=user_read_contributor.auth, expect_errors=True)
assert res.status_code == 403
# test_do_not_create_public_wiki_page_as_non_contributor
res = app.post_json_api(url_node_public, create_wiki_payload(fake.word()), auth=user_non_contributor.auth, expect_errors=True)
assert res.status_code == 403
# test_do_not_create_public_wiki_page_as_read_osf_group_member
group_mem = AuthUserFactory()
group = OSFGroupFactory(creator=group_mem)
project_public.add_osf_group(group, READ)
res = app.post_json_api(url_node_public, create_wiki_payload(fake.word()), auth=group_mem.auth, expect_errors=True)
assert res.status_code == 403
# test_do_not_create_public_wiki_page_as_unauthenticated
res = app.post_json_api(url_node_public, create_wiki_payload(fake.word()), expect_errors=True)
assert res.status_code == 401
def test_create_private_wiki_page(self, app, user_write_contributor, url_node_private):
page_name = fake.word()
res = app.post_json_api(url_node_private, create_wiki_payload(page_name), auth=user_write_contributor.auth)
assert res.status_code == 201
assert res.json['data']['attributes']['name'] == page_name
def test_do_not_create_private_wiki_page(
self, app, wiki_private, url_node_private,
user_read_contributor, user_non_contributor
):
# test_do_not_create_private_wiki_page_as_read_contributor
res = app.post_json_api(url_node_private, create_wiki_payload(fake.word()), auth=user_read_contributor.auth, expect_errors=True)
assert res.status_code == 403
# test_do_not_create_private_wiki_page_as_non_contributor
res = app.post_json_api(url_node_private, create_wiki_payload(fake.word()), auth=user_non_contributor.auth, expect_errors=True)
assert res.status_code == 403
# test_do_not_create_private_wiki_page_as_unauthenticated
res = app.post_json_api(url_node_private, create_wiki_payload(fake.word()), expect_errors=True)
assert res.status_code == 401
def test_do_not_create_registration_wiki_page(
self, app, user_creator,
url_registration_public, url_registration_private
):
# test_do_not_create_wiki_on_public_registration
res = app.post_json_api(url_registration_public, create_wiki_payload(fake.word()), auth=user_creator.auth, expect_errors=True)
assert res.status_code == 405
# test_do_not_create_wiki_on_embargoed_registration
res = app.post_json_api(url_registration_private, create_wiki_payload(fake.word()), auth=user_creator.auth, expect_errors=True)
assert res.status_code == 405
def test_do_not_create_wiki_page_if_disabled(
self, app, user_creator,
project_public, url_node_public, wiki_public
):
project_public.delete_addon('wiki', auth=Auth(user_creator))
page_name = fake.word()
res = app.post_json_api(url_node_public, create_wiki_payload(page_name), auth=user_creator.auth, expect_errors=True)
assert res.status_code == 404
def test_do_not_create_wiki_page_if_publicly_editable_non_contrib(
self, app, user_creator, user_non_contributor,
project_public, url_node_public, wiki_public
):
project_public.addons_wiki_node_settings.set_editing(True, auth=Auth(user_creator))
page_name = fake.word()
res = app.post_json_api(url_node_public, create_wiki_payload(page_name), auth=user_non_contributor.auth, expect_errors=True)
assert res.status_code == 403
| mfraezz/osf.io | api_tests/nodes/views/test_node_wiki_list.py | Python | apache-2.0 | 19,731 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2011 Nexenta Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
:mod:`nexenta.volume` -- Driver to store volumes on Nexenta Appliance
=====================================================================
.. automodule:: nexenta.volume
.. moduleauthor:: Yuriy Taraday <yorik.sar@gmail.com>
"""
from nova import exception
from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.volume import driver
from nova.volume import nexenta
from nova.volume.nexenta import jsonrpc
LOG = logging.getLogger(__name__)
FLAGS = flags.FLAGS
nexenta_opts = [
cfg.StrOpt('nexenta_host',
default='',
help='IP address of Nexenta SA'),
cfg.IntOpt('nexenta_rest_port',
default=2000,
help='HTTP port to connect to Nexenta REST API server'),
cfg.StrOpt('nexenta_rest_protocol',
default='auto',
help='Use http or https for REST connection (default auto)'),
cfg.StrOpt('nexenta_user',
default='admin',
help='User name to connect to Nexenta SA'),
cfg.StrOpt('nexenta_password',
default='nexenta',
help='Password to connect to Nexenta SA'),
cfg.IntOpt('nexenta_iscsi_target_portal_port',
default=3260,
help='Nexenta target portal port'),
cfg.StrOpt('nexenta_volume',
default='nova',
help='pool on SA that will hold all volumes'),
cfg.StrOpt('nexenta_target_prefix',
default='iqn.1986-03.com.sun:02:nova-',
help='IQN prefix for iSCSI targets'),
cfg.StrOpt('nexenta_target_group_prefix',
default='nova/',
help='prefix for iSCSI target groups on SA'),
cfg.StrOpt('nexenta_blocksize',
default='',
help='block size for volumes (blank=default,8KB)'),
cfg.BoolOpt('nexenta_sparse',
default=False,
help='flag to create sparse volumes'),
]
FLAGS.register_opts(nexenta_opts)
class NexentaDriver(driver.ISCSIDriver): # pylint: disable=R0921
"""Executes volume driver commands on Nexenta Appliance."""
def __init__(self):
super(NexentaDriver, self).__init__()
def do_setup(self, context):
protocol = FLAGS.nexenta_rest_protocol
auto = protocol == 'auto'
if auto:
protocol = 'http'
self.nms = jsonrpc.NexentaJSONProxy(
'%s://%s:%s/rest/nms/' % (protocol, FLAGS.nexenta_host,
FLAGS.nexenta_rest_port),
FLAGS.nexenta_user, FLAGS.nexenta_password, auto=auto)
def check_for_setup_error(self):
"""Verify that the volume for our zvols exists.
:raise: :py:exc:`LookupError`
"""
if not self.nms.volume.object_exists(FLAGS.nexenta_volume):
raise LookupError(_("Volume %s does not exist in Nexenta SA"),
FLAGS.nexenta_volume)
@staticmethod
def _get_zvol_name(volume_name):
"""Return zvol name that corresponds given volume name."""
return '%s/%s' % (FLAGS.nexenta_volume, volume_name)
@staticmethod
def _get_target_name(volume_name):
"""Return iSCSI target name to access volume."""
return '%s%s' % (FLAGS.nexenta_target_prefix, volume_name)
@staticmethod
def _get_target_group_name(volume_name):
"""Return Nexenta iSCSI target group name for volume."""
return '%s%s' % (FLAGS.nexenta_target_group_prefix, volume_name)
def create_volume(self, volume):
"""Create a zvol on appliance.
:param volume: volume reference
"""
self.nms.zvol.create(
self._get_zvol_name(volume['name']),
'%sG' % (volume['size'],),
FLAGS.nexenta_blocksize, FLAGS.nexenta_sparse)
def delete_volume(self, volume):
"""Destroy a zvol on appliance.
:param volume: volume reference
"""
try:
self.nms.zvol.destroy(self._get_zvol_name(volume['name']), '')
except nexenta.NexentaException as exc:
if "zvol has children" in exc.args[1]:
raise exception.VolumeIsBusy
else:
raise
def create_snapshot(self, snapshot):
"""Create snapshot of existing zvol on appliance.
:param snapshot: shapshot reference
"""
self.nms.zvol.create_snapshot(
self._get_zvol_name(snapshot['volume_name']),
snapshot['name'], '')
def create_volume_from_snapshot(self, volume, snapshot):
"""Create new volume from other's snapshot on appliance.
:param volume: reference of volume to be created
:param snapshot: reference of source snapshot
"""
self.nms.zvol.clone(
'%s@%s' % (self._get_zvol_name(snapshot['volume_name']),
snapshot['name']),
self._get_zvol_name(volume['name']))
def delete_snapshot(self, snapshot):
"""Delete volume's snapshot on appliance.
:param snapshot: shapshot reference
"""
try:
self.nms.snapshot.destroy(
'%s@%s' % (self._get_zvol_name(snapshot['volume_name']),
snapshot['name']),
'')
except nexenta.NexentaException as exc:
if "snapshot has dependent clones" in exc.args[1]:
raise exception.SnapshotIsBusy
else:
raise
def local_path(self, volume):
"""Return local path to existing local volume.
We never have local volumes, so it raises NotImplementedError.
:raise: :py:exc:`NotImplementedError`
"""
LOG.error(_("Call to local_path should not happen."
" Verify that use_local_volumes flag is turned off."))
raise NotImplementedError
def _do_export(self, _ctx, volume, ensure=False):
"""Do all steps to get zvol exported as LUN 0 at separate target.
:param volume: reference of volume to be exported
:param ensure: if True, ignore errors caused by already existing
resources
:return: iscsiadm-formatted provider location string
"""
zvol_name = self._get_zvol_name(volume['name'])
target_name = self._get_target_name(volume['name'])
target_group_name = self._get_target_group_name(volume['name'])
try:
self.nms.iscsitarget.create_target({'target_name': target_name})
except nexenta.NexentaException as exc:
if not ensure or 'already configured' not in exc.args[1]:
raise
else:
LOG.info(_('Ignored target creation error "%s"'
' while ensuring export'), exc)
try:
self.nms.stmf.create_targetgroup(target_group_name)
except nexenta.NexentaException as exc:
if not ensure or 'already exists' not in exc.args[1]:
raise
else:
LOG.info(_('Ignored target group creation error "%s"'
' while ensuring export'), exc)
try:
self.nms.stmf.add_targetgroup_member(target_group_name,
target_name)
except nexenta.NexentaException as exc:
if not ensure or 'already exists' not in exc.args[1]:
raise
else:
LOG.info(_('Ignored target group member addition error "%s"'
' while ensuring export'), exc)
try:
self.nms.scsidisk.create_lu(zvol_name, {})
except nexenta.NexentaException as exc:
if not ensure or 'in use' not in exc.args[1]:
raise
else:
LOG.info(_('Ignored LU creation error "%s"'
' while ensuring export'), exc)
try:
self.nms.scsidisk.add_lun_mapping_entry(zvol_name, {
'target_group': target_group_name,
'lun': '0'})
except nexenta.NexentaException as exc:
if not ensure or 'view entry exists' not in exc.args[1]:
raise
else:
LOG.info(_('Ignored LUN mapping entry addition error "%s"'
' while ensuring export'), exc)
return '%s:%s,1 %s' % (FLAGS.nexenta_host,
FLAGS.nexenta_iscsi_target_portal_port,
target_name)
def create_export(self, _ctx, volume):
"""Create new export for zvol.
:param volume: reference of volume to be exported
:return: iscsiadm-formatted provider location string
"""
loc = self._do_export(_ctx, volume, ensure=False)
return {'provider_location': loc}
def ensure_export(self, _ctx, volume):
"""Recreate parts of export if necessary.
:param volume: reference of volume to be exported
"""
self._do_export(_ctx, volume, ensure=True)
def remove_export(self, _ctx, volume):
"""Destroy all resources created to export zvol.
:param volume: reference of volume to be unexported
"""
zvol_name = self._get_zvol_name(volume['name'])
target_name = self._get_target_name(volume['name'])
target_group_name = self._get_target_group_name(volume['name'])
self.nms.scsidisk.delete_lu(zvol_name)
try:
self.nms.stmf.destroy_targetgroup(target_group_name)
except nexenta.NexentaException as exc:
# We assume that target group is already gone
LOG.warn(_('Got error trying to destroy target group'
' %(target_group)s, assuming it is already gone: %(exc)s'),
{'target_group': target_group_name, 'exc': exc})
try:
self.nms.iscsitarget.delete_target(target_name)
except nexenta.NexentaException as exc:
# We assume that target is gone as well
LOG.warn(_('Got error trying to delete target %(target)s,'
' assuming it is already gone: %(exc)s'),
{'target': target_name, 'exc': exc})
| tylertian/Openstack | openstack F/nova/nova/volume/nexenta/volume.py | Python | apache-2.0 | 11,026 |
import json
import pytest
import jsonschema
from framework.auth.core import Auth
from framework.exceptions import PermissionsError
from website.settings import DOI_FORMAT, DATACITE_PREFIX
from website.project.licenses import set_license
from osf.models import FileMetadataSchema, NodeLicense, NodeLog
from osf_tests.factories import ProjectFactory, SubjectFactory, AuthUserFactory
from osf.utils.permissions import READ
from api_tests.utils import create_test_file
@pytest.fixture()
def node():
return ProjectFactory()
@pytest.fixture()
def osf_file(node):
return create_test_file(target=node, user=node.creator)
def inject_placeholder_doi(json_data):
# the OSF cannot currently issue DOIs for a file, which is required for datacite schema validation.
# Manually add a placeholder in tests for validation until we handle this better.
placeholder = DOI_FORMAT.format(prefix=DATACITE_PREFIX, guid='placeholder')
json_data['identifier'] = {'identifierType': 'DOI', 'identifier': placeholder}
return json_data
@pytest.mark.django_db
class TestFileMetadataRecordSerializer:
def test_record_created_post_save(self, node, osf_file):
# check there's a record for every FileMetadataSchema
assert FileMetadataSchema.objects.count() > 0
assert osf_file.records.count() == FileMetadataSchema.objects.count()
for record in osf_file.records.all().select_related('file'):
assert record.file == osf_file
def test_serialize_record_datacite(self, node, osf_file):
# Test all of the parts of serialize_json that are auto-generated
# from relationships and properties on the node and file
# add a contributor with an ORCID
contributor = AuthUserFactory()
contributor.external_identity = {
'ORCID': {
'0000-0001-9143-4653': 'VERIFIED'
}
}
contributor.save()
node.add_contributor(contributor, save=False)
# add subjects, tags, license, and guid
tags = ['fish', 'scale']
[osf_file.add_tag(tag, auth=Auth(node.creator), save=False) for tag in tags]
bepress_subject = SubjectFactory(text='BePress Text')
new_subject = SubjectFactory(bepress_subject=bepress_subject)
node.subjects.add(new_subject)
no_license = NodeLicense.objects.get(name='CC0 1.0 Universal')
license_detail = {
'id': no_license.license_id,
'year': '2018',
'copyrightHolders': ['Woop', 'Yeah']
}
set_license(node, license_detail, Auth(node.creator))
osf_file.save()
node.save()
osf_file.target.reload()
record = osf_file.records.get(schema___id='datacite')
serialized_record = json.loads(record.serialize())
# test titles
titles = [title['title'] for title in serialized_record['titles']]
assert osf_file.name in titles
assert node.title in titles
# test dates
dates = [date['date'] for date in serialized_record['dates']]
assert str(osf_file.created) in dates
assert str(osf_file.modified) in dates
assert str(osf_file.created.year) == serialized_record['publicationYear']
# no resource type provided
assert serialized_record['resourceType']['resourceType'] == '(:unas)'
assert serialized_record['resourceType']['resourceTypeGeneral'] == 'Other'
# guid in alternate identifiers
file_guid = osf_file.guids.first()._id
alternate_identifier = serialized_record['alternateIdentifiers'][0]
assert file_guid in alternate_identifier['alternateIdentifier']
# check for tags and subjects
subjects_in_record = [sub['subject'] for sub in serialized_record['subjects']]
assert bepress_subject.text in subjects_in_record
for tag in tags:
assert tag in subjects_in_record
# node license
rights = serialized_record['rightsList'][0]
assert rights['rights'] == no_license.name
assert rights['rightsURI'] == no_license.url
# test most recent version
assert serialized_record['version'] == osf_file.versions.first().identifier
def test_validate(self, node, osf_file):
record = osf_file.records.get(schema___id='datacite')
json_data = json.loads(record.serialize())
assert jsonschema.validate(
inject_placeholder_doi(json_data),
record.schema.schema
) is None
@pytest.mark.django_db
class TestFileMetadataRecord:
@pytest.fixture()
def initial_metadata(self):
return {
'file_description': 'Hello this is a description',
'resource_type': 'Book',
'related_publication_doi': '10.123/fkosf/hello'
}
@pytest.fixture()
def record(self, osf_file):
return osf_file.records.first()
def test_update_record(self, node, record, initial_metadata):
record.metadata = initial_metadata
record.save()
partial_metadata = {
'funders': [
{'funding_agency': 'Hello'},
{'funding_agency': 'Ric Flair', 'grant_number': 'Woooooo'},
]
}
record.update(partial_metadata, user=node.creator)
# Make sure an update creates a node log
assert node.logs.latest().action == NodeLog.FILE_METADATA_UPDATED
# Make sure old fields are cleared
assert list(initial_metadata.keys()) not in list(record.metadata.keys())
full_metadata = {
'funders': [
{'funding_agency': 'Hello'},
{'funding_agency': 'Ric Flair', 'grant_number': 'Woooooo'},
],
'file_description': 'Hey this is a great interesting important file',
'resource_type': 'Funding Submission',
'related_publication_doi': '10.12345/fk2osf.io/hello/'
}
record.update(full_metadata, user=node.creator)
json_data = json.loads(record.serialize())
datacite_user_entered_fields = ['fundingReferences', 'resourceType', 'descriptions', 'relatedIdentifiers']
for field in datacite_user_entered_fields:
assert field in json_data.keys()
# validate record with all user entered metadata
assert jsonschema.validate(
inject_placeholder_doi(json_data),
record.schema.schema
) is None
def test_update_fails_with_incorrect_metadata(self, node, record):
# metadata not in schema fails
wrong_metadata = {
'favorite_schema': 'crossref'
}
with pytest.raises(jsonschema.ValidationError):
record.update(wrong_metadata, user=node.creator)
record.reload()
assert record.metadata == {}
assert node.logs.latest().action != NodeLog.FILE_METADATA_UPDATED
# metadata not matching schema pattern fails
wrong_doi = {
'related_publication_doi': 'whatever'
}
with pytest.raises(jsonschema.ValidationError):
record.update(wrong_doi, user=node.creator)
# resource_type not in specified options fails
wrong_resource_type = {
'resource_type': 'Scrap Book'
}
with pytest.raises(jsonschema.ValidationError):
record.update(wrong_resource_type, user=node.creator)
# funders but no funding agency
no_funding_agency_metadata = {
'funders': [
{'grant_number': 'Woooo'}
]
}
with pytest.raises(jsonschema.ValidationError):
record.update(no_funding_agency_metadata, user=node.creator)
# additional properties for funders fails
more_funders_metadata = {
'funders': [
{'funding_agency': 'Woop', 'there_it': 'is'}
]
}
with pytest.raises(jsonschema.ValidationError):
record.update(more_funders_metadata, user=node.creator)
def test_update_permissions(self, node, record, initial_metadata):
# Can't update with non-contributor auth
rando = AuthUserFactory()
with pytest.raises(PermissionsError):
record.update(initial_metadata, user=rando)
# Can't update with read-only auth
read_contrib = AuthUserFactory()
node.add_contributor(read_contrib, permissions=READ)
node.save()
with pytest.raises(PermissionsError):
record.update(initial_metadata, user=read_contrib)
# Can't update with no auth
with pytest.raises(PermissionsError):
record.update(initial_metadata, user=None)
def test_forked_file_has_metadata_copied(self, node, record, initial_metadata):
record.metadata = initial_metadata
record.save()
fork = node.fork_node(auth=Auth(node.creator))
forked_record = fork.files.first().records.first()
assert forked_record.metadata == record.metadata
| Johnetordoff/osf.io | osf_tests/test_file_metadata.py | Python | apache-2.0 | 9,031 |
import datetime
import logging
from django.core.management.base import BaseCommand
from django.db import connection, transaction
from framework.celery_tasks import app as celery_app
logger = logging.getLogger(__name__)
TABLES_TO_POPULATE_WITH_MODIFIED = [
'addons_zotero_usersettings',
'addons_dropbox_usersettings',
'addons_dropbox_nodesettings',
'addons_figshare_nodesettings',
'addons_figshare_usersettings',
'addons_forward_nodesettings',
'addons_github_nodesettings',
'addons_github_usersettings',
'addons_gitlab_nodesettings',
'addons_gitlab_usersettings',
'addons_googledrive_nodesettings',
'addons_googledrive_usersettings',
'addons_mendeley_nodesettings',
'addons_mendeley_usersettings',
'addons_onedrive_nodesettings',
'addons_onedrive_usersettings',
'addons_osfstorage_nodesettings',
'addons_osfstorage_usersettings',
'addons_bitbucket_nodesettings',
'addons_bitbucket_usersettings',
'addons_owncloud_nodesettings',
'addons_box_nodesettings',
'addons_owncloud_usersettings',
'addons_box_usersettings',
'addons_dataverse_nodesettings',
'addons_dataverse_usersettings',
'addons_s3_nodesettings',
'addons_s3_usersettings',
'addons_twofactor_usersettings',
'addons_wiki_nodesettings',
'addons_zotero_nodesettings'
]
UPDATE_DELETED_WITH_MODIFIED = """UPDATE {} SET deleted=modified
WHERE id IN (SELECT id FROM {} WHERE is_deleted AND deleted IS NULL LIMIT {}) RETURNING id;"""
@celery_app.task(name='management.commands.addon_deleted_date')
def populate_deleted(dry_run=False, page_size=1000):
with transaction.atomic():
for table in TABLES_TO_POPULATE_WITH_MODIFIED:
run_statements(UPDATE_DELETED_WITH_MODIFIED, page_size, table)
if dry_run:
raise RuntimeError('Dry Run -- Transaction rolled back')
def run_statements(statement, page_size, table):
logger.info('Populating deleted column in table {}'.format(table))
with connection.cursor() as cursor:
cursor.execute(statement.format(table, table, page_size))
rows = cursor.fetchall()
if rows:
logger.info('Table {} still has rows to populate'.format(table))
class Command(BaseCommand):
help = '''Populates new deleted field for various models. Ensure you have run migrations
before running this script.'''
def add_arguments(self, parser):
parser.add_argument(
'--dry_run',
type=bool,
default=False,
help='Run queries but do not write files',
)
parser.add_argument(
'--page_size',
type=int,
default=1000,
help='How many rows to process at a time',
)
def handle(self, *args, **options):
script_start_time = datetime.datetime.now()
logger.info('Script started time: {}'.format(script_start_time))
logger.debug(options)
dry_run = options['dry_run']
page_size = options['page_size']
if dry_run:
logger.info('DRY RUN')
populate_deleted(dry_run, page_size)
script_finish_time = datetime.datetime.now()
logger.info('Script finished time: {}'.format(script_finish_time))
logger.info('Run time {}'.format(script_finish_time - script_start_time))
| mfraezz/osf.io | osf/management/commands/addon_deleted_date.py | Python | apache-2.0 | 3,358 |
"""Tests for the Bond fan device."""
from datetime import timedelta
from typing import Optional
from bond_api import Action, DeviceType, Direction
from homeassistant import core
from homeassistant.components import fan
from homeassistant.components.fan import (
ATTR_DIRECTION,
ATTR_SPEED,
ATTR_SPEED_LIST,
DIRECTION_FORWARD,
DIRECTION_REVERSE,
DOMAIN as FAN_DOMAIN,
SERVICE_SET_DIRECTION,
SERVICE_SET_SPEED,
SPEED_OFF,
)
from homeassistant.const import ATTR_ENTITY_ID, SERVICE_TURN_OFF, SERVICE_TURN_ON
from homeassistant.helpers.entity_registry import EntityRegistry
from homeassistant.util import utcnow
from .common import (
help_test_entity_available,
patch_bond_action,
patch_bond_device_state,
setup_platform,
)
from tests.common import async_fire_time_changed
def ceiling_fan(name: str):
"""Create a ceiling fan with given name."""
return {
"name": name,
"type": DeviceType.CEILING_FAN,
"actions": ["SetSpeed", "SetDirection"],
}
async def turn_fan_on(
hass: core.HomeAssistant, fan_id: str, speed: Optional[str] = None
) -> None:
"""Turn the fan on at the specified speed."""
service_data = {ATTR_ENTITY_ID: fan_id}
if speed:
service_data[fan.ATTR_SPEED] = speed
await hass.services.async_call(
FAN_DOMAIN,
SERVICE_TURN_ON,
service_data=service_data,
blocking=True,
)
await hass.async_block_till_done()
async def test_entity_registry(hass: core.HomeAssistant):
"""Tests that the devices are registered in the entity registry."""
await setup_platform(
hass,
FAN_DOMAIN,
ceiling_fan("name-1"),
bond_version={"bondid": "test-hub-id"},
bond_device_id="test-device-id",
)
registry: EntityRegistry = await hass.helpers.entity_registry.async_get_registry()
entity = registry.entities["fan.name_1"]
assert entity.unique_id == "test-hub-id_test-device-id"
async def test_non_standard_speed_list(hass: core.HomeAssistant):
"""Tests that the device is registered with custom speed list if number of supported speeds differs form 3."""
await setup_platform(
hass,
FAN_DOMAIN,
ceiling_fan("name-1"),
bond_device_id="test-device-id",
props={"max_speed": 6},
)
actual_speeds = hass.states.get("fan.name_1").attributes[ATTR_SPEED_LIST]
assert actual_speeds == [
fan.SPEED_OFF,
fan.SPEED_LOW,
fan.SPEED_MEDIUM,
fan.SPEED_HIGH,
]
with patch_bond_device_state():
with patch_bond_action() as mock_set_speed_low:
await turn_fan_on(hass, "fan.name_1", fan.SPEED_LOW)
mock_set_speed_low.assert_called_once_with(
"test-device-id", Action.set_speed(1)
)
with patch_bond_action() as mock_set_speed_medium:
await turn_fan_on(hass, "fan.name_1", fan.SPEED_MEDIUM)
mock_set_speed_medium.assert_called_once_with(
"test-device-id", Action.set_speed(3)
)
with patch_bond_action() as mock_set_speed_high:
await turn_fan_on(hass, "fan.name_1", fan.SPEED_HIGH)
mock_set_speed_high.assert_called_once_with(
"test-device-id", Action.set_speed(6)
)
async def test_fan_speed_with_no_max_seed(hass: core.HomeAssistant):
"""Tests that fans without max speed (increase/decrease controls) map speed to HA standard."""
await setup_platform(
hass,
FAN_DOMAIN,
ceiling_fan("name-1"),
bond_device_id="test-device-id",
props={"no": "max_speed"},
state={"power": 1, "speed": 14},
)
assert hass.states.get("fan.name_1").attributes["speed"] == fan.SPEED_HIGH
async def test_turn_on_fan_with_speed(hass: core.HomeAssistant):
"""Tests that turn on command delegates to set speed API."""
await setup_platform(
hass, FAN_DOMAIN, ceiling_fan("name-1"), bond_device_id="test-device-id"
)
with patch_bond_action() as mock_set_speed, patch_bond_device_state():
await turn_fan_on(hass, "fan.name_1", fan.SPEED_LOW)
mock_set_speed.assert_called_with("test-device-id", Action.set_speed(1))
async def test_turn_on_fan_without_speed(hass: core.HomeAssistant):
"""Tests that turn on command delegates to turn on API."""
await setup_platform(
hass, FAN_DOMAIN, ceiling_fan("name-1"), bond_device_id="test-device-id"
)
with patch_bond_action() as mock_turn_on, patch_bond_device_state():
await turn_fan_on(hass, "fan.name_1")
mock_turn_on.assert_called_with("test-device-id", Action.turn_on())
async def test_turn_on_fan_with_off_speed(hass: core.HomeAssistant):
"""Tests that turn on command delegates to turn off API."""
await setup_platform(
hass, FAN_DOMAIN, ceiling_fan("name-1"), bond_device_id="test-device-id"
)
with patch_bond_action() as mock_turn_off, patch_bond_device_state():
await turn_fan_on(hass, "fan.name_1", fan.SPEED_OFF)
mock_turn_off.assert_called_with("test-device-id", Action.turn_off())
async def test_set_speed_off(hass: core.HomeAssistant):
"""Tests that set_speed(off) command delegates to turn off API."""
await setup_platform(
hass, FAN_DOMAIN, ceiling_fan("name-1"), bond_device_id="test-device-id"
)
with patch_bond_action() as mock_turn_off, patch_bond_device_state():
await hass.services.async_call(
FAN_DOMAIN,
SERVICE_SET_SPEED,
service_data={ATTR_ENTITY_ID: "fan.name_1", ATTR_SPEED: SPEED_OFF},
blocking=True,
)
await hass.async_block_till_done()
mock_turn_off.assert_called_with("test-device-id", Action.turn_off())
async def test_turn_off_fan(hass: core.HomeAssistant):
"""Tests that turn off command delegates to API."""
await setup_platform(
hass, FAN_DOMAIN, ceiling_fan("name-1"), bond_device_id="test-device-id"
)
with patch_bond_action() as mock_turn_off, patch_bond_device_state():
await hass.services.async_call(
FAN_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: "fan.name_1"},
blocking=True,
)
await hass.async_block_till_done()
mock_turn_off.assert_called_once_with("test-device-id", Action.turn_off())
async def test_update_reports_fan_on(hass: core.HomeAssistant):
"""Tests that update command sets correct state when Bond API reports fan power is on."""
await setup_platform(hass, FAN_DOMAIN, ceiling_fan("name-1"))
with patch_bond_device_state(return_value={"power": 1, "speed": 1}):
async_fire_time_changed(hass, utcnow() + timedelta(seconds=30))
await hass.async_block_till_done()
assert hass.states.get("fan.name_1").state == "on"
async def test_update_reports_fan_off(hass: core.HomeAssistant):
"""Tests that update command sets correct state when Bond API reports fan power is off."""
await setup_platform(hass, FAN_DOMAIN, ceiling_fan("name-1"))
with patch_bond_device_state(return_value={"power": 0, "speed": 1}):
async_fire_time_changed(hass, utcnow() + timedelta(seconds=30))
await hass.async_block_till_done()
assert hass.states.get("fan.name_1").state == "off"
async def test_update_reports_direction_forward(hass: core.HomeAssistant):
"""Tests that update command sets correct direction when Bond API reports fan direction is forward."""
await setup_platform(hass, FAN_DOMAIN, ceiling_fan("name-1"))
with patch_bond_device_state(return_value={"direction": Direction.FORWARD}):
async_fire_time_changed(hass, utcnow() + timedelta(seconds=30))
await hass.async_block_till_done()
assert hass.states.get("fan.name_1").attributes[ATTR_DIRECTION] == DIRECTION_FORWARD
async def test_update_reports_direction_reverse(hass: core.HomeAssistant):
"""Tests that update command sets correct direction when Bond API reports fan direction is reverse."""
await setup_platform(hass, FAN_DOMAIN, ceiling_fan("name-1"))
with patch_bond_device_state(return_value={"direction": Direction.REVERSE}):
async_fire_time_changed(hass, utcnow() + timedelta(seconds=30))
await hass.async_block_till_done()
assert hass.states.get("fan.name_1").attributes[ATTR_DIRECTION] == DIRECTION_REVERSE
async def test_set_fan_direction(hass: core.HomeAssistant):
"""Tests that set direction command delegates to API."""
await setup_platform(
hass, FAN_DOMAIN, ceiling_fan("name-1"), bond_device_id="test-device-id"
)
with patch_bond_action() as mock_set_direction, patch_bond_device_state():
await hass.services.async_call(
FAN_DOMAIN,
SERVICE_SET_DIRECTION,
{ATTR_ENTITY_ID: "fan.name_1", ATTR_DIRECTION: DIRECTION_FORWARD},
blocking=True,
)
await hass.async_block_till_done()
mock_set_direction.assert_called_once_with(
"test-device-id", Action.set_direction(Direction.FORWARD)
)
async def test_fan_available(hass: core.HomeAssistant):
"""Tests that available state is updated based on API errors."""
await help_test_entity_available(
hass, FAN_DOMAIN, ceiling_fan("name-1"), "fan.name_1"
)
| tchellomello/home-assistant | tests/components/bond/test_fan.py | Python | apache-2.0 | 9,312 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.trainer."""
import tensorflow as tf
from google.protobuf import text_format
from object_detection import trainer
from object_detection.core import losses
from object_detection.core import model
from object_detection.core import standard_fields as fields
from object_detection.protos import train_pb2
NUMBER_OF_CLASSES = 2
def get_input_function():
"""A function to get test inputs. Returns an image with one box."""
image = tf.random_uniform([32, 32, 3], dtype=tf.float32)
class_label = tf.random_uniform(
[1], minval=0, maxval=NUMBER_OF_CLASSES, dtype=tf.int32)
box_label = tf.random_uniform(
[1, 4], minval=0.4, maxval=0.6, dtype=tf.float32)
return {
fields.InputDataFields.image: image,
fields.InputDataFields.groundtruth_classes: class_label,
fields.InputDataFields.groundtruth_boxes: box_label
}
class FakeDetectionModel(model.DetectionModel):
"""A simple (and poor) DetectionModel for use in test."""
def __init__(self):
super(FakeDetectionModel, self).__init__(num_classes=NUMBER_OF_CLASSES)
self._classification_loss = losses.WeightedSigmoidClassificationLoss(
anchorwise_output=True)
self._localization_loss = losses.WeightedSmoothL1LocalizationLoss(
anchorwise_output=True)
def preprocess(self, inputs):
"""Input preprocessing, resizes images to 28x28.
Args:
inputs: a [batch, height_in, width_in, channels] float32 tensor
representing a batch of images with values between 0 and 255.0.
Returns:
preprocessed_inputs: a [batch, 28, 28, channels] float32 tensor.
"""
return tf.image.resize_images(inputs, [28, 28])
def predict(self, preprocessed_inputs):
"""Prediction tensors from inputs tensor.
Args:
preprocessed_inputs: a [batch, 28, 28, channels] float32 tensor.
Returns:
prediction_dict: a dictionary holding prediction tensors to be
passed to the Loss or Postprocess functions.
"""
flattened_inputs = tf.contrib.layers.flatten(preprocessed_inputs)
class_prediction = tf.contrib.layers.fully_connected(
flattened_inputs, self._num_classes)
box_prediction = tf.contrib.layers.fully_connected(flattened_inputs, 4)
return {
'class_predictions_with_background': tf.reshape(
class_prediction, [-1, 1, self._num_classes]),
'box_encodings': tf.reshape(box_prediction, [-1, 1, 4])
}
def postprocess(self, prediction_dict, **params):
"""Convert predicted output tensors to final detections. Unused.
Args:
prediction_dict: a dictionary holding prediction tensors.
**params: Additional keyword arguments for specific implementations of
DetectionModel.
Returns:
detections: a dictionary with empty fields.
"""
return {
'detection_boxes': None,
'detection_scores': None,
'detection_classes': None,
'num_detections': None
}
def loss(self, prediction_dict):
"""Compute scalar loss tensors with respect to provided groundtruth.
Calling this function requires that groundtruth tensors have been
provided via the provide_groundtruth function.
Args:
prediction_dict: a dictionary holding predicted tensors
Returns:
a dictionary mapping strings (loss names) to scalar tensors representing
loss values.
"""
batch_reg_targets = tf.stack(
self.groundtruth_lists(fields.BoxListFields.boxes))
batch_cls_targets = tf.stack(
self.groundtruth_lists(fields.BoxListFields.classes))
weights = tf.constant(
1.0, dtype=tf.float32,
shape=[len(self.groundtruth_lists(fields.BoxListFields.boxes)), 1])
location_losses = self._localization_loss(
prediction_dict['box_encodings'], batch_reg_targets,
weights=weights)
cls_losses = self._classification_loss(
prediction_dict['class_predictions_with_background'], batch_cls_targets,
weights=weights)
loss_dict = {
'localization_loss': tf.reduce_sum(location_losses),
'classification_loss': tf.reduce_sum(cls_losses),
}
return loss_dict
def restore_fn(self, checkpoint_path, from_detection_checkpoint=True):
"""Return callable for loading a checkpoint into the tensorflow graph.
Args:
checkpoint_path: path to checkpoint to restore.
from_detection_checkpoint: whether to restore from a full detection
checkpoint (with compatible variable names) or to restore from a
classification checkpoint for initialization prior to training.
Returns:
a callable which takes a tf.Session and does nothing.
"""
def restore(unused_sess):
return
return restore
class TrainerTest(tf.test.TestCase):
def test_configure_trainer_and_train_two_steps(self):
train_config_text_proto = """
optimizer {
adam_optimizer {
learning_rate {
constant_learning_rate {
learning_rate: 0.01
}
}
}
}
data_augmentation_options {
random_adjust_brightness {
max_delta: 0.2
}
}
data_augmentation_options {
random_adjust_contrast {
min_delta: 0.7
max_delta: 1.1
}
}
num_steps: 2
"""
train_config = train_pb2.TrainConfig()
text_format.Merge(train_config_text_proto, train_config)
train_dir = self.get_temp_dir()
trainer.train(create_tensor_dict_fn=get_input_function,
create_model_fn=FakeDetectionModel,
train_config=train_config,
master='',
task=0,
num_clones=1,
worker_replicas=1,
clone_on_cpu=True,
ps_tasks=0,
worker_job_name='worker',
is_chief=True,
train_dir=train_dir)
if __name__ == '__main__':
tf.test.main()
| unnikrishnankgs/va | venv/lib/python3.5/site-packages/tensorflow/models/object_detection/trainer_test.py | Python | bsd-2-clause | 6,635 |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from metrics import power
from telemetry import test
from telemetry.core import util
from telemetry.page import page_measurement
from telemetry.page import page_set
class _DromaeoMeasurement(page_measurement.PageMeasurement):
def __init__(self):
super(_DromaeoMeasurement, self).__init__()
self._power_metric = power.PowerMetric()
def CustomizeBrowserOptions(self, options):
power.PowerMetric.CustomizeBrowserOptions(options)
def DidNavigateToPage(self, page, tab):
self._power_metric.Start(page, tab)
def MeasurePage(self, page, tab, results):
tab.WaitForJavaScriptExpression(
'window.document.cookie.indexOf("__done=1") >= 0', 600)
self._power_metric.Stop(page, tab)
self._power_metric.AddResults(tab, results)
js_get_results = 'JSON.stringify(window.automation.GetResults())'
print js_get_results
score = eval(tab.EvaluateJavaScript(js_get_results))
def Escape(k):
chars = [' ', '-', '/', '(', ')', '*']
for c in chars:
k = k.replace(c, '_')
return k
suffix = page.url[page.url.index('?') + 1 : page.url.index('&')]
for k, v in score.iteritems():
data_type = 'unimportant'
if k == suffix:
data_type = 'default'
results.Add(Escape(k), 'runs/s', float(v), data_type=data_type)
class _DromaeoBenchmark(test.Test):
"""A base class for Dromaeo benchmarks."""
test = _DromaeoMeasurement
def CreatePageSet(self, options):
"""Makes a PageSet for Dromaeo benchmarks."""
# Subclasses are expected to define a class member called query_param.
if not hasattr(self, 'query_param'):
raise NotImplementedError('query_param not in Dromaeo benchmark.')
url = 'file://index.html?%s&automated' % self.query_param
dromaeo_dir = os.path.join(util.GetChromiumSrcDir(),
'chrome', 'test', 'data', 'dromaeo')
ps = page_set.PageSet(file_path=dromaeo_dir)
ps.AddPageWithDefaultRunNavigate(url)
return ps
class DromaeoDomCoreAttr(_DromaeoBenchmark):
"""Dromaeo DOMCore attr JavaScript benchmark."""
tag = 'domcoreattr'
query_param = 'dom-attr'
@test.Disabled('xp') # crbug.com/323782
class DromaeoDomCoreModify(_DromaeoBenchmark):
"""Dromaeo DOMCore modify JavaScript benchmark."""
tag = 'domcoremodify'
query_param = 'dom-modify'
class DromaeoDomCoreQuery(_DromaeoBenchmark):
"""Dromaeo DOMCore query JavaScript benchmark."""
tag = 'domcorequery'
query_param = 'dom-query'
class DromaeoDomCoreTraverse(_DromaeoBenchmark):
"""Dromaeo DOMCore traverse JavaScript benchmark."""
tag = 'domcoretraverse'
query_param = 'dom-traverse'
class DromaeoJslibAttrJquery(_DromaeoBenchmark):
"""Dromaeo JSLib attr jquery JavaScript benchmark"""
tag = 'jslibattrjquery'
query_param = 'jslib-attr-jquery'
class DromaeoJslibAttrPrototype(_DromaeoBenchmark):
"""Dromaeo JSLib attr prototype JavaScript benchmark"""
tag = 'jslibattrprototype'
query_param = 'jslib-attr-prototype'
class DromaeoJslibEventJquery(_DromaeoBenchmark):
"""Dromaeo JSLib event jquery JavaScript benchmark"""
tag = 'jslibeventjquery'
query_param = 'jslib-event-jquery'
class DromaeoJslibEventPrototype(_DromaeoBenchmark):
"""Dromaeo JSLib event prototype JavaScript benchmark"""
tag = 'jslibeventprototype'
query_param = 'jslib-event-prototype'
class DromaeoJslibModifyJquery(_DromaeoBenchmark):
"""Dromaeo JSLib modify jquery JavaScript benchmark"""
tag = 'jslibmodifyjquery'
query_param = 'jslib-modify-jquery'
class DromaeoJslibModifyPrototype(_DromaeoBenchmark):
"""Dromaeo JSLib modify prototype JavaScript benchmark"""
tag = 'jslibmodifyprototype'
query_param = 'jslib-modify-prototype'
class DromaeoJslibStyleJquery(_DromaeoBenchmark):
"""Dromaeo JSLib style jquery JavaScript benchmark"""
tag = 'jslibstylejquery'
query_param = 'jslib-style-jquery'
class DromaeoJslibStylePrototype(_DromaeoBenchmark):
"""Dromaeo JSLib style prototype JavaScript benchmark"""
tag = 'jslibstyleprototype'
query_param = 'jslib-style-prototype'
class DromaeoJslibTraverseJquery(_DromaeoBenchmark):
"""Dromaeo JSLib traverse jquery JavaScript benchmark"""
tag = 'jslibtraversejquery'
query_param = 'jslib-traverse-jquery'
class DromaeoJslibTraversePrototype(_DromaeoBenchmark):
"""Dromaeo JSLib traverse prototype JavaScript benchmark"""
tag = 'jslibtraverseprototype'
query_param = 'jslib-traverse-prototype'
| TeamEOS/external_chromium_org | tools/perf/benchmarks/dromaeo.py | Python | bsd-3-clause | 4,623 |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import errno
import hashlib
import os
import re
import shutil
import stat
import subprocess
import sys
import tarfile
import time
import zipfile
from datetime import datetime
from typing import Dict, NamedTuple
from urllib.parse import urlparse
from urllib.request import urlretrieve
from .copytree import prefetch_dir_if_eden
from .envfuncs import Env
from .errors import TransientFailure
from .platform import is_windows
from .runcmd import run_cmd
def file_name_is_cmake_file(file_name):
file_name = file_name.lower()
base = os.path.basename(file_name)
return (
base.endswith(".cmake")
or base.endswith(".cmake.in")
or base == "cmakelists.txt"
)
class ChangeStatus(object):
"""Indicates the nature of changes that happened while updating
the source directory. There are two broad uses:
* When extracting archives for third party software we want to
know that we did something (eg: we either extracted code or
we didn't do anything)
* For 1st party code where we use shipit to transform the code,
we want to know if we changed anything so that we can perform
a build, but we generally want to be a little more nuanced
and be able to distinguish between just changing a source file
and whether we might need to reconfigure the build system.
"""
def __init__(self, all_changed: bool = False) -> None:
"""Construct a ChangeStatus object. The default is to create
a status that indicates no changes, but passing all_changed=True
will create one that indicates that everything changed"""
if all_changed:
self.source_files = 1
self.make_files = 1
else:
self.source_files = 0
self.make_files = 0
def record_change(self, file_name) -> None:
"""Used by the shipit fetcher to record changes as it updates
files in the destination. If the file name might be one used
in the cmake build system that we use for 1st party code, then
record that as a "make file" change. We could broaden this
to match any file used by various build systems, but it is
only really useful for our internal cmake stuff at this time.
If the file isn't a build file and is under the `fbcode_builder`
dir then we don't class that as an interesting change that we
might need to rebuild, so we ignore it.
Otherwise we record the file as a source file change."""
file_name = file_name.lower()
if file_name_is_cmake_file(file_name):
self.make_files += 1
elif "/fbcode_builder/cmake" in file_name:
self.source_files += 1
elif "/fbcode_builder/" not in file_name:
self.source_files += 1
def sources_changed(self) -> bool:
"""Returns true if any source files were changed during
an update operation. This will typically be used to decide
that the build system to be run on the source dir in an
incremental mode"""
return self.source_files > 0
def build_changed(self) -> bool:
"""Returns true if any build files were changed during
an update operation. This will typically be used to decidfe
that the build system should be reconfigured and re-run
as a full build"""
return self.make_files > 0
class Fetcher(object):
"""The Fetcher is responsible for fetching and extracting the
sources for project. The Fetcher instance defines where the
extracted data resides and reports this to the consumer via
its `get_src_dir` method."""
def update(self) -> ChangeStatus:
"""Brings the src dir up to date, ideally minimizing
changes so that a subsequent build doesn't over-build.
Returns a ChangeStatus object that helps the caller to
understand the nature of the changes required during
the update."""
return ChangeStatus()
def clean(self) -> None:
"""Reverts any changes that might have been made to
the src dir"""
pass
def hash(self) -> None:
"""Returns a hash that identifies the version of the code in the
working copy. For a git repo this is commit hash for the working
copy. For other Fetchers this should relate to the version of
the code in the src dir. The intent is that if a manifest
changes the version/rev of a project that the hash be different.
Importantly, this should be computable without actually fetching
the code, as we want this to factor into a hash used to download
a pre-built version of the code, without having to first download
and extract its sources (eg: boost on windows is pretty painful).
"""
pass
def get_src_dir(self) -> None:
"""Returns the source directory that the project was
extracted into"""
pass
class LocalDirFetcher(object):
"""This class exists to override the normal fetching behavior, and
use an explicit user-specified directory for the project sources.
This fetcher cannot update or track changes. It always reports that the
project has changed, forcing it to always be built."""
def __init__(self, path) -> None:
self.path = os.path.realpath(path)
def update(self) -> ChangeStatus:
return ChangeStatus(all_changed=True)
def hash(self) -> str:
return "0" * 40
def get_src_dir(self):
return self.path
class SystemPackageFetcher(object):
def __init__(self, build_options, packages) -> None:
self.manager = build_options.host_type.get_package_manager()
self.packages = packages.get(self.manager)
self.host_type = build_options.host_type
if self.packages:
self.installed = None
else:
self.installed = False
def packages_are_installed(self):
if self.installed is not None:
return self.installed
cmd = None
if self.manager == "rpm":
cmd = ["rpm", "-q"] + sorted(self.packages)
elif self.manager == "deb":
cmd = ["dpkg", "-s"] + sorted(self.packages)
elif self.manager == "homebrew":
cmd = ["brew", "ls", "--versions"] + sorted(self.packages)
if cmd:
proc = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if proc.returncode == 0:
# captured as binary as we will hash this later
self.installed = proc.stdout
else:
# Need all packages to be present to consider us installed
self.installed = False
else:
self.installed = False
return bool(self.installed)
def update(self) -> ChangeStatus:
assert self.installed
return ChangeStatus(all_changed=False)
def hash(self) -> str:
if self.packages_are_installed():
return hashlib.sha256(self.installed).hexdigest()
else:
return "0" * 40
def get_src_dir(self) -> None:
return None
class PreinstalledNopFetcher(SystemPackageFetcher):
def __init__(self) -> None:
self.installed = True
class GitFetcher(Fetcher):
DEFAULT_DEPTH = 1
def __init__(self, build_options, manifest, repo_url, rev, depth) -> None:
# Extract the host/path portions of the URL and generate a flattened
# directory name. eg:
# github.com/facebook/folly.git -> github.com-facebook-folly.git
url = urlparse(repo_url)
directory = "%s%s" % (url.netloc, url.path)
for s in ["/", "\\", ":"]:
directory = directory.replace(s, "-")
# Place it in a repos dir in the scratch space
repos_dir = os.path.join(build_options.scratch_dir, "repos")
if not os.path.exists(repos_dir):
os.makedirs(repos_dir)
self.repo_dir = os.path.join(repos_dir, directory)
if not rev and build_options.project_hashes:
hash_file = os.path.join(
build_options.project_hashes,
re.sub("\\.git$", "-rev.txt", url.path[1:]),
)
if os.path.exists(hash_file):
with open(hash_file, "r") as f:
data = f.read()
m = re.match("Subproject commit ([a-fA-F0-9]{40})", data)
if not m:
raise Exception("Failed to parse rev from %s" % hash_file)
rev = m.group(1)
print("Using pinned rev %s for %s" % (rev, repo_url))
self.rev = rev or "main"
self.origin_repo = repo_url
self.manifest = manifest
self.depth = depth if depth else GitFetcher.DEFAULT_DEPTH
def _update(self) -> ChangeStatus:
current_hash = (
subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=self.repo_dir)
.strip()
.decode("utf-8")
)
target_hash = (
subprocess.check_output(["git", "rev-parse", self.rev], cwd=self.repo_dir)
.strip()
.decode("utf-8")
)
if target_hash == current_hash:
# It's up to date, so there are no changes. This doesn't detect eg:
# if origin/main moved and rev='main', but that's ok for our purposes;
# we should be using explicit hashes or eg: a stable branch for the cases
# that we care about, and it isn't unreasonable to require that the user
# explicitly perform a clean build if those have moved. For the most
# part we prefer that folks build using a release tarball from github
# rather than use the git protocol, as it is generally a bit quicker
# to fetch and easier to hash and verify tarball downloads.
return ChangeStatus()
print("Updating %s -> %s" % (self.repo_dir, self.rev))
run_cmd(["git", "fetch", "origin", self.rev], cwd=self.repo_dir)
run_cmd(["git", "checkout", self.rev], cwd=self.repo_dir)
run_cmd(["git", "submodule", "update", "--init"], cwd=self.repo_dir)
return ChangeStatus(True)
def update(self) -> ChangeStatus:
if os.path.exists(self.repo_dir):
return self._update()
self._clone()
return ChangeStatus(True)
def _clone(self) -> None:
print("Cloning %s..." % self.origin_repo)
# The basename/dirname stuff allows us to dance around issues where
# eg: this python process is native win32, but the git.exe is cygwin
# or msys and doesn't like the absolute windows path that we'd otherwise
# pass to it. Careful use of cwd helps avoid headaches with cygpath.
run_cmd(
[
"git",
"clone",
"--depth=" + str(self.depth),
"--",
self.origin_repo,
os.path.basename(self.repo_dir),
],
cwd=os.path.dirname(self.repo_dir),
)
self._update()
def clean(self) -> None:
if os.path.exists(self.repo_dir):
run_cmd(["git", "clean", "-fxd"], cwd=self.repo_dir)
def hash(self):
return self.rev
def get_src_dir(self):
return self.repo_dir
def does_file_need_update(src_name, src_st, dest_name):
try:
target_st = os.lstat(dest_name)
except OSError as exc:
if exc.errno != errno.ENOENT:
raise
return True
if src_st.st_size != target_st.st_size:
return True
if stat.S_IFMT(src_st.st_mode) != stat.S_IFMT(target_st.st_mode):
return True
if stat.S_ISLNK(src_st.st_mode):
return os.readlink(src_name) != os.readlink(dest_name)
if not stat.S_ISREG(src_st.st_mode):
return True
# They might have the same content; compare.
with open(src_name, "rb") as sf, open(dest_name, "rb") as df:
chunk_size = 8192
while True:
src_data = sf.read(chunk_size)
dest_data = df.read(chunk_size)
if src_data != dest_data:
return True
if len(src_data) < chunk_size:
# EOF
break
return False
def copy_if_different(src_name, dest_name) -> bool:
"""Copy src_name -> dest_name, but only touch dest_name
if src_name is different from dest_name, making this a
more build system friendly way to copy."""
src_st = os.lstat(src_name)
if not does_file_need_update(src_name, src_st, dest_name):
return False
dest_parent = os.path.dirname(dest_name)
if not os.path.exists(dest_parent):
os.makedirs(dest_parent)
if stat.S_ISLNK(src_st.st_mode):
try:
os.unlink(dest_name)
except OSError as exc:
if exc.errno != errno.ENOENT:
raise
target = os.readlink(src_name)
print("Symlinking %s -> %s" % (dest_name, target))
os.symlink(target, dest_name)
else:
print("Copying %s -> %s" % (src_name, dest_name))
shutil.copy2(src_name, dest_name)
return True
def list_files_under_dir_newer_than_timestamp(dir_to_scan, ts):
for root, _dirs, files in os.walk(dir_to_scan):
for src_file in files:
full_name = os.path.join(root, src_file)
st = os.lstat(full_name)
if st.st_mtime > ts:
yield full_name
class ShipitPathMap(object):
def __init__(self) -> None:
self.roots = []
self.mapping = []
self.exclusion = []
def add_mapping(self, fbsource_dir, target_dir) -> None:
"""Add a posix path or pattern. We cannot normpath the input
here because that would change the paths from posix to windows
form and break the logic throughout this class."""
self.roots.append(fbsource_dir)
self.mapping.append((fbsource_dir, target_dir))
def add_exclusion(self, pattern) -> None:
self.exclusion.append(re.compile(pattern))
def _minimize_roots(self) -> None:
"""compute the de-duplicated set of roots within fbsource.
We take the shortest common directory prefix to make this
determination"""
self.roots.sort(key=len)
minimized = []
for r in self.roots:
add_this_entry = True
for existing in minimized:
if r.startswith(existing + "/"):
add_this_entry = False
break
if add_this_entry:
minimized.append(r)
self.roots = minimized
def _sort_mapping(self) -> None:
self.mapping.sort(reverse=True, key=lambda x: len(x[0]))
def _map_name(self, norm_name, dest_root):
if norm_name.endswith(".pyc") or norm_name.endswith(".swp"):
# Ignore some incidental garbage while iterating
return None
for excl in self.exclusion:
if excl.match(norm_name):
return None
for src_name, dest_name in self.mapping:
if norm_name == src_name or norm_name.startswith(src_name + "/"):
rel_name = os.path.relpath(norm_name, src_name)
# We can have "." as a component of some paths, depending
# on the contents of the shipit transformation section.
# normpath doesn't always remove `.` as the final component
# of the path, which be problematic when we later mkdir
# the dirname of the path that we return. Take care to avoid
# returning a path with a `.` in it.
rel_name = os.path.normpath(rel_name)
if dest_name == ".":
return os.path.normpath(os.path.join(dest_root, rel_name))
dest_name = os.path.normpath(dest_name)
return os.path.normpath(os.path.join(dest_root, dest_name, rel_name))
raise Exception("%s did not match any rules" % norm_name)
def mirror(self, fbsource_root, dest_root) -> ChangeStatus:
self._minimize_roots()
self._sort_mapping()
change_status = ChangeStatus()
# Record the full set of files that should be in the tree
full_file_list = set()
if sys.platform == "win32":
# Let's not assume st_dev has a consistent value on Windows.
def st_dev(path):
return 1
else:
def st_dev(path):
return os.lstat(path).st_dev
for fbsource_subdir in self.roots:
dir_to_mirror = os.path.join(fbsource_root, fbsource_subdir)
root_dev = st_dev(dir_to_mirror)
prefetch_dir_if_eden(dir_to_mirror)
if not os.path.exists(dir_to_mirror):
raise Exception(
"%s doesn't exist; check your sparse profile!" % dir_to_mirror
)
for root, dirs, files in os.walk(dir_to_mirror):
dirs[:] = [d for d in dirs if root_dev == st_dev(os.path.join(root, d))]
for src_file in files:
full_name = os.path.join(root, src_file)
rel_name = os.path.relpath(full_name, fbsource_root)
norm_name = rel_name.replace("\\", "/")
target_name = self._map_name(norm_name, dest_root)
if target_name:
full_file_list.add(target_name)
if copy_if_different(full_name, target_name):
change_status.record_change(target_name)
# Compare the list of previously shipped files; if a file is
# in the old list but not the new list then it has been
# removed from the source and should be removed from the
# destination.
# Why don't we simply create this list by walking dest_root?
# Some builds currently have to be in-source builds and
# may legitimately need to keep some state in the source tree :-/
installed_name = os.path.join(dest_root, ".shipit_shipped")
if os.path.exists(installed_name):
with open(installed_name, "rb") as f:
for name in f.read().decode("utf-8").splitlines():
name = name.strip()
if name not in full_file_list:
print("Remove %s" % name)
os.unlink(name)
change_status.record_change(name)
with open(installed_name, "wb") as f:
for name in sorted(list(full_file_list)):
f.write(("%s\n" % name).encode("utf-8"))
return change_status
class FbsourceRepoData(NamedTuple):
hash: str
date: str
FBSOURCE_REPO_DATA: Dict[str, FbsourceRepoData] = {}
def get_fbsource_repo_data(build_options) -> FbsourceRepoData:
"""Returns the commit metadata for the fbsource repo.
Since we may have multiple first party projects to
hash, and because we don't mutate the repo, we cache
this hash in a global."""
cached_data = FBSOURCE_REPO_DATA.get(build_options.fbsource_dir)
if cached_data:
return cached_data
cmd = ["hg", "log", "-r.", "-T{node}\n{date|hgdate}"]
env = Env()
env.set("HGPLAIN", "1")
log_data = subprocess.check_output(
cmd, cwd=build_options.fbsource_dir, env=dict(env.items())
).decode("ascii")
(hash, datestr) = log_data.split("\n")
# datestr is like "seconds fractionalseconds"
# We want "20200324.113140"
(unixtime, _fractional) = datestr.split(" ")
date = datetime.fromtimestamp(int(unixtime)).strftime("%Y%m%d.%H%M%S")
cached_data = FbsourceRepoData(hash=hash, date=date)
FBSOURCE_REPO_DATA[build_options.fbsource_dir] = cached_data
return cached_data
class SimpleShipitTransformerFetcher(Fetcher):
def __init__(self, build_options, manifest, ctx) -> None:
self.build_options = build_options
self.manifest = manifest
self.repo_dir = os.path.join(build_options.scratch_dir, "shipit", manifest.name)
self.ctx = ctx
def clean(self) -> None:
if os.path.exists(self.repo_dir):
shutil.rmtree(self.repo_dir)
def update(self) -> ChangeStatus:
mapping = ShipitPathMap()
for src, dest in self.manifest.get_section_as_ordered_pairs(
"shipit.pathmap", self.ctx
):
mapping.add_mapping(src, dest)
if self.manifest.shipit_fbcode_builder:
mapping.add_mapping(
"fbcode/opensource/fbcode_builder", "build/fbcode_builder"
)
for pattern in self.manifest.get_section_as_args("shipit.strip", self.ctx):
mapping.add_exclusion(pattern)
return mapping.mirror(self.build_options.fbsource_dir, self.repo_dir)
# pyre-fixme[15]: `hash` overrides method defined in `Fetcher` inconsistently.
def hash(self) -> str:
# We return a fixed non-hash string for in-fbsource builds.
# We're relying on the `update` logic to correctly invalidate
# the build in the case that files have changed.
return "fbsource"
def get_src_dir(self):
return self.repo_dir
class ShipitTransformerFetcher(Fetcher):
SHIPIT = "/var/www/scripts/opensource/shipit/run_shipit.php"
def __init__(self, build_options, project_name) -> None:
self.build_options = build_options
self.project_name = project_name
self.repo_dir = os.path.join(build_options.scratch_dir, "shipit", project_name)
def update(self) -> ChangeStatus:
if os.path.exists(self.repo_dir):
return ChangeStatus()
self.run_shipit()
return ChangeStatus(True)
def clean(self) -> None:
if os.path.exists(self.repo_dir):
shutil.rmtree(self.repo_dir)
@classmethod
def available(cls):
return os.path.exists(cls.SHIPIT)
def run_shipit(self) -> None:
tmp_path = self.repo_dir + ".new"
try:
if os.path.exists(tmp_path):
shutil.rmtree(tmp_path)
# Run shipit
run_cmd(
[
"php",
ShipitTransformerFetcher.SHIPIT,
"--project=" + self.project_name,
"--create-new-repo",
"--source-repo-dir=" + self.build_options.fbsource_dir,
"--source-branch=.",
"--skip-source-init",
"--skip-source-pull",
"--skip-source-clean",
"--skip-push",
"--skip-reset",
"--destination-use-anonymous-https",
"--create-new-repo-output-path=" + tmp_path,
]
)
# Remove the .git directory from the repository it generated.
# There is no need to commit this.
repo_git_dir = os.path.join(tmp_path, ".git")
shutil.rmtree(repo_git_dir)
os.rename(tmp_path, self.repo_dir)
except Exception:
# Clean up after a failed extraction
if os.path.exists(tmp_path):
shutil.rmtree(tmp_path)
self.clean()
raise
# pyre-fixme[15]: `hash` overrides method defined in `Fetcher` inconsistently.
def hash(self) -> str:
# We return a fixed non-hash string for in-fbsource builds.
return "fbsource"
def get_src_dir(self):
return self.repo_dir
def download_url_to_file_with_progress(url: str, file_name) -> None:
print("Download %s -> %s ..." % (url, file_name))
class Progress(object):
last_report = 0
def progress(self, count, block, total):
if total == -1:
total = "(Unknown)"
amount = count * block
if sys.stdout.isatty():
sys.stdout.write("\r downloading %s of %s " % (amount, total))
else:
# When logging to CI logs, avoid spamming the logs and print
# status every few seconds
now = time.time()
if now - self.last_report > 5:
sys.stdout.write(".. %s of %s " % (amount, total))
self.last_report = now
sys.stdout.flush()
progress = Progress()
start = time.time()
try:
(_filename, headers) = urlretrieve(url, file_name, reporthook=progress.progress)
except (OSError, IOError) as exc: # noqa: B014
raise TransientFailure(
"Failed to download %s to %s: %s" % (url, file_name, str(exc))
)
end = time.time()
sys.stdout.write(" [Complete in %f seconds]\n" % (end - start))
sys.stdout.flush()
print(f"{headers}")
class ArchiveFetcher(Fetcher):
def __init__(self, build_options, manifest, url, sha256) -> None:
self.manifest = manifest
self.url = url
self.sha256 = sha256
self.build_options = build_options
url = urlparse(self.url)
basename = "%s-%s" % (manifest.name, os.path.basename(url.path))
self.file_name = os.path.join(build_options.scratch_dir, "downloads", basename)
self.src_dir = os.path.join(build_options.scratch_dir, "extracted", basename)
self.hash_file = self.src_dir + ".hash"
def _verify_hash(self) -> None:
h = hashlib.sha256()
with open(self.file_name, "rb") as f:
while True:
block = f.read(8192)
if not block:
break
h.update(block)
digest = h.hexdigest()
if digest != self.sha256:
os.unlink(self.file_name)
raise Exception(
"%s: expected sha256 %s but got %s" % (self.url, self.sha256, digest)
)
def _download_dir(self):
"""returns the download dir, creating it if it doesn't already exist"""
download_dir = os.path.dirname(self.file_name)
if not os.path.exists(download_dir):
os.makedirs(download_dir)
return download_dir
def _download(self) -> None:
self._download_dir()
download_url_to_file_with_progress(self.url, self.file_name)
self._verify_hash()
def clean(self) -> None:
if os.path.exists(self.src_dir):
shutil.rmtree(self.src_dir)
def update(self) -> ChangeStatus:
try:
with open(self.hash_file, "r") as f:
saved_hash = f.read().strip()
if saved_hash == self.sha256 and os.path.exists(self.src_dir):
# Everything is up to date
return ChangeStatus()
print(
"saved hash %s doesn't match expected hash %s, re-validating"
% (saved_hash, self.sha256)
)
os.unlink(self.hash_file)
except EnvironmentError:
pass
# If we got here we know the contents of src_dir are either missing
# or wrong, so blow away whatever happened to be there first.
if os.path.exists(self.src_dir):
shutil.rmtree(self.src_dir)
# If we already have a file here, make sure it looks legit before
# proceeding: any errors and we just remove it and re-download
if os.path.exists(self.file_name):
try:
self._verify_hash()
except Exception:
if os.path.exists(self.file_name):
os.unlink(self.file_name)
if not os.path.exists(self.file_name):
self._download()
if tarfile.is_tarfile(self.file_name):
opener = tarfile.open
elif zipfile.is_zipfile(self.file_name):
opener = zipfile.ZipFile
else:
raise Exception("don't know how to extract %s" % self.file_name)
os.makedirs(self.src_dir)
print("Extract %s -> %s" % (self.file_name, self.src_dir))
t = opener(self.file_name)
if is_windows():
# Ensure that we don't fall over when dealing with long paths
# on windows
src = r"\\?\%s" % os.path.normpath(self.src_dir)
else:
src = self.src_dir
# The `str` here is necessary to ensure that we don't pass a unicode
# object down to tarfile.extractall on python2. When extracting
# the boost tarball it makes some assumptions and tries to convert
# a non-ascii path to ascii and throws.
src = str(src)
t.extractall(src)
with open(self.hash_file, "w") as f:
f.write(self.sha256)
return ChangeStatus(True)
def hash(self):
return self.sha256
def get_src_dir(self):
return self.src_dir
def homebrew_package_prefix(package):
cmd = ["brew", "--prefix", package]
try:
proc = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except FileNotFoundError:
return
if proc.returncode == 0:
return proc.stdout.decode("utf-8").rstrip()
| facebook/watchman | build/fbcode_builder/getdeps/fetcher.py | Python | mit | 29,428 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
GridNearestNeighbor.py
---------------------
Date : October 2013
Copyright : (C) 2013 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'October 2013'
__copyright__ = '(C) 2013, Alexander Bruy'
import os
from qgis.PyQt.QtGui import QIcon
from qgis.core import (QgsRasterFileWriter,
QgsProcessing,
QgsProcessingParameterDefinition,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterEnum,
QgsProcessingParameterField,
QgsProcessingParameterNumber,
QgsProcessingParameterString,
QgsProcessingParameterRasterDestination)
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.algs.gdal.GdalUtils import GdalUtils
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class GridNearestNeighbor(GdalAlgorithm):
INPUT = 'INPUT'
Z_FIELD = 'Z_FIELD'
RADIUS_1 = 'RADIUS_1'
RADIUS_2 = 'RADIUS_2'
ANGLE = 'ANGLE'
NODATA = 'NODATA'
OPTIONS = 'OPTIONS'
EXTRA = 'EXTRA'
DATA_TYPE = 'DATA_TYPE'
OUTPUT = 'OUTPUT'
TYPES = ['Byte', 'Int16', 'UInt16', 'UInt32', 'Int32', 'Float32', 'Float64', 'CInt16', 'CInt32', 'CFloat32', 'CFloat64']
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterFeatureSource(self.INPUT,
self.tr('Point layer'),
[QgsProcessing.TypeVectorPoint]))
z_field_param = QgsProcessingParameterField(self.Z_FIELD,
self.tr('Z value from field'),
None,
self.INPUT,
QgsProcessingParameterField.Numeric,
optional=True)
z_field_param.setFlags(z_field_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
self.addParameter(z_field_param)
self.addParameter(QgsProcessingParameterNumber(self.RADIUS_1,
self.tr('The first radius of search ellipse'),
type=QgsProcessingParameterNumber.Double,
minValue=0.0,
defaultValue=0.0))
self.addParameter(QgsProcessingParameterNumber(self.RADIUS_2,
self.tr('The second radius of search ellipse'),
type=QgsProcessingParameterNumber.Double,
minValue=0.0,
defaultValue=0.0))
self.addParameter(QgsProcessingParameterNumber(self.ANGLE,
self.tr('Angle of search ellipse rotation in degrees (counter clockwise)'),
type=QgsProcessingParameterNumber.Double,
minValue=0.0,
maxValue=360.0,
defaultValue=0.0))
self.addParameter(QgsProcessingParameterNumber(self.NODATA,
self.tr('NODATA marker to fill empty points'),
type=QgsProcessingParameterNumber.Double,
defaultValue=0.0))
options_param = QgsProcessingParameterString(self.OPTIONS,
self.tr('Additional creation options'),
defaultValue='',
optional=True)
options_param.setFlags(options_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
options_param.setMetadata({
'widget_wrapper': {
'class': 'processing.algs.gdal.ui.RasterOptionsWidget.RasterOptionsWidgetWrapper'}})
self.addParameter(options_param)
extra_param = QgsProcessingParameterString(self.EXTRA,
self.tr('Additional command-line parameters'),
defaultValue=None,
optional=True)
extra_param.setFlags(extra_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
self.addParameter(extra_param)
dataType_param = QgsProcessingParameterEnum(self.DATA_TYPE,
self.tr('Output data type'),
self.TYPES,
allowMultiple=False,
defaultValue=5)
dataType_param.setFlags(dataType_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
self.addParameter(dataType_param)
self.addParameter(QgsProcessingParameterRasterDestination(self.OUTPUT,
self.tr('Interpolated (Nearest neighbor)')))
def name(self):
return 'gridnearestneighbor'
def displayName(self):
return self.tr('Grid (Nearest neighbor)')
def group(self):
return self.tr('Raster analysis')
def groupId(self):
return 'rasteranalysis'
def icon(self):
return QIcon(os.path.join(pluginPath, 'images', 'gdaltools', 'grid.png'))
def commandName(self):
return 'gdal_grid'
def getConsoleCommands(self, parameters, context, feedback, executing=True):
ogrLayer, layerName = self.getOgrCompatibleSource(self.INPUT, parameters, context, feedback, executing)
arguments = ['-l']
arguments.append(layerName)
fieldName = self.parameterAsString(parameters, self.Z_FIELD, context)
if fieldName:
arguments.append('-zfield')
arguments.append(fieldName)
params = 'nearest'
params += ':radius1={}'.format(self.parameterAsDouble(parameters, self.RADIUS_1, context))
params += ':radius2={}'.format(self.parameterAsDouble(parameters, self.RADIUS_2, context))
params += ':angle={}'.format(self.parameterAsDouble(parameters, self.ANGLE, context))
params += ':nodata={}'.format(self.parameterAsDouble(parameters, self.NODATA, context))
arguments.append('-a')
arguments.append(params)
arguments.append('-ot')
arguments.append(self.TYPES[self.parameterAsEnum(parameters, self.DATA_TYPE, context)])
out = self.parameterAsOutputLayer(parameters, self.OUTPUT, context)
self.setOutputValue(self.OUTPUT, out)
arguments.append('-of')
arguments.append(QgsRasterFileWriter.driverForExtension(os.path.splitext(out)[1]))
options = self.parameterAsString(parameters, self.OPTIONS, context)
if options:
arguments.extend(GdalUtils.parseCreationOptions(options))
if self.EXTRA in parameters and parameters[self.EXTRA] not in (None, ''):
extra = self.parameterAsString(parameters, self.EXTRA, context)
arguments.append(extra)
arguments.append(ogrLayer)
arguments.append(out)
return [self.commandName(), GdalUtils.escapeAndJoin(arguments)]
| SrNetoChan/QGIS | python/plugins/processing/algs/gdal/GridNearestNeighbor.py | Python | gpl-2.0 | 8,683 |
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Scanner/Dir.py 2014/09/27 12:51:43 garyo"
import SCons.Node.FS
import SCons.Scanner
def only_dirs(nodes):
is_Dir = lambda n: isinstance(n.disambiguate(), SCons.Node.FS.Dir)
return list(filter(is_Dir, nodes))
def DirScanner(**kw):
"""Return a prototype Scanner instance for scanning
directories for on-disk files"""
kw['node_factory'] = SCons.Node.FS.Entry
kw['recursive'] = only_dirs
return SCons.Scanner.Base(scan_on_disk, "DirScanner", **kw)
def DirEntryScanner(**kw):
"""Return a prototype Scanner instance for "scanning"
directory Nodes for their in-memory entries"""
kw['node_factory'] = SCons.Node.FS.Entry
kw['recursive'] = None
return SCons.Scanner.Base(scan_in_memory, "DirEntryScanner", **kw)
skip_entry = {}
skip_entry_list = [
'.',
'..',
'.sconsign',
# Used by the native dblite.py module.
'.sconsign.dblite',
# Used by dbm and dumbdbm.
'.sconsign.dir',
# Used by dbm.
'.sconsign.pag',
# Used by dumbdbm.
'.sconsign.dat',
'.sconsign.bak',
# Used by some dbm emulations using Berkeley DB.
'.sconsign.db',
]
for skip in skip_entry_list:
skip_entry[skip] = 1
skip_entry[SCons.Node.FS._my_normcase(skip)] = 1
do_not_scan = lambda k: k not in skip_entry
def scan_on_disk(node, env, path=()):
"""
Scans a directory for on-disk files and directories therein.
Looking up the entries will add these to the in-memory Node tree
representation of the file system, so all we have to do is just
that and then call the in-memory scanning function.
"""
try:
flist = node.fs.listdir(node.abspath)
except (IOError, OSError):
return []
e = node.Entry
for f in filter(do_not_scan, flist):
# Add ./ to the beginning of the file name so if it begins with a
# '#' we don't look it up relative to the top-level directory.
e('./' + f)
return scan_in_memory(node, env, path)
def scan_in_memory(node, env, path=()):
"""
"Scans" a Node.FS.Dir for its in-memory entries.
"""
try:
entries = node.entries
except AttributeError:
# It's not a Node.FS.Dir (or doesn't look enough like one for
# our purposes), which can happen if a target list containing
# mixed Node types (Dirs and Files, for example) has a Dir as
# the first entry.
return []
entry_list = sorted(filter(do_not_scan, list(entries.keys())))
return [entries[n] for n in entry_list]
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| stonekyx/binary | vendor/scons-local-2.3.4/SCons/Scanner/Dir.py | Python | gpl-3.0 | 3,751 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_compute_instance_group_facts
description:
- Gather facts for GCP InstanceGroup
short_description: Gather facts for GCP InstanceGroup
version_added: 2.7
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
filters:
description:
- A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters.)
- Each additional filter in the list will act be added as an AND condition (filter1
and filter2) .
zone:
description:
- A reference to the zone where the instance group resides.
required: true
extends_documentation_fragment: gcp
'''
EXAMPLES = '''
- name: a instance group facts
gcp_compute_instance_group_facts:
zone: us-central1-a
filters:
- name = test_object
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
'''
RETURN = '''
items:
description: List of items
returned: always
type: complex
contains:
creationTimestamp:
description:
- Creation timestamp in RFC3339 text format.
returned: success
type: str
description:
description:
- An optional description of this resource. Provide this property when you create
the resource.
returned: success
type: str
id:
description:
- A unique identifier for this instance group.
returned: success
type: int
name:
description:
- The name of the instance group.
- The name must be 1-63 characters long, and comply with RFC1035.
returned: success
type: str
namedPorts:
description:
- Assigns a name to a port number.
- 'For example: {name: "http", port: 80}.'
- This allows the system to reference ports by the assigned name instead of
a port number. Named ports can also contain multiple ports.
- 'For example: [{name: "http", port: 80},{name: "http", port: 8080}] Named
ports apply to all instances in this instance group.'
returned: success
type: complex
contains:
name:
description:
- The name for this named port.
- The name must be 1-63 characters long, and comply with RFC1035.
returned: success
type: str
port:
description:
- The port number, which can be a value between 1 and 65535.
returned: success
type: int
network:
description:
- The network to which all instances in the instance group belong.
returned: success
type: str
region:
description:
- The region where the instance group is located (for regional resources).
returned: success
type: str
subnetwork:
description:
- The subnetwork to which all instances in the instance group belong.
returned: success
type: str
zone:
description:
- A reference to the zone where the instance group resides.
returned: success
type: str
instances:
description:
- The list of instances associated with this InstanceGroup.
- All instances must be created before being added to an InstanceGroup.
- All instances not in this list will be removed from the InstanceGroup and
will not be deleted.
- Only the full identifier of the instance will be returned.
returned: success
type: list
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest
import json
################################################################################
# Main
################################################################################
def main():
module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'), zone=dict(required=True, type='str')))
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/compute']
items = fetch_list(module, collection(module), query_options(module.params['filters']))
if items.get('items'):
items = items.get('items')
else:
items = []
return_value = {'items': items}
module.exit_json(**return_value)
def collection(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/instanceGroups".format(**module.params)
def fetch_list(module, link, query):
auth = GcpSession(module, 'compute')
response = auth.get(link, params={'filter': query})
return return_if_object(module, response)
def query_options(filters):
if not filters:
return ''
if len(filters) == 1:
return filters[0]
else:
queries = []
for f in filters:
# For multiple queries, all queries should have ()
if f[0] != '(' and f[-1] != ')':
queries.append("(%s)" % ''.join(f))
else:
queries.append(f)
return ' '.join(queries)
def return_if_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
if __name__ == "__main__":
main()
| Jorge-Rodriguez/ansible | lib/ansible/modules/cloud/google/gcp_compute_instance_group_facts.py | Python | gpl-3.0 | 6,993 |
"""simplify transaction log
Revision ID: 8c2406df6f8
Revises:58732bb5d14b
Create Date: 2014-08-08 01:57:17.144405
"""
# revision identifiers, used by Alembic.
revision = '8c2406df6f8'
down_revision = '58732bb5d14b'
from alembic import op
from sqlalchemy.sql import text
def upgrade():
conn = op.get_bind()
conn.execute(text('''
ALTER TABLE transaction
CHANGE public_snapshot snapshot LONGTEXT,
CHANGE table_name object_type VARCHAR(20),
DROP COLUMN private_snapshot,
DROP COLUMN delta,
ADD INDEX `ix_transaction_object_public_id` (`object_public_id`)
'''))
def downgrade():
raise Exception()
| nylas/sync-engine | migrations/versions/120_simplify_transaction_log.py | Python | agpl-3.0 | 688 |
"""Plot implied timescales vs lagtime
{{header}}
"""
# ? include "plot_header.template"
# ? from "plot_macros.template" import xdg_open with context
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
sns.set_style('ticks')
colors = sns.color_palette()
## Load
timescales = pd.read_pickle('timescales.pandas.pickl')
n_timescales = len([x for x in timescales.columns
if x.startswith('timescale_')])
## Implied timescales vs lagtime
def plot_timescales(ax):
for i in range(n_timescales):
ax.scatter(timescales['lag_time'],
timescales['timescale_{}'.format(i)],
s=50, c=colors[0],
label=None, # pandas be interfering
)
xmin, xmax = ax.get_xlim()
xx = np.linspace(xmin, xmax)
ax.plot(xx, xx, color=colors[2], label='$y=x$')
ax.legend(loc='best', fontsize=14)
ax.set_xlabel('Lag Time / todo:units', fontsize=18)
ax.set_ylabel('Implied Timescales / todo:units', fontsize=18)
ax.set_xscale('log')
ax.set_yscale('log')
## Percent trimmed vs lagtime
def plot_trimmed(ax):
ax.plot(timescales['lag_time'],
timescales['percent_retained'],
'o-',
label=None, # pandas be interfering
)
ax.axhline(100, color='k', ls='--', label='100%')
ax.legend(loc='best', fontsize=14)
ax.set_xlabel('Lag Time / todo:units', fontsize=18)
ax.set_ylabel('Retained / %', fontsize=18)
ax.set_xscale('log')
ax.set_ylim((0, 110))
## Plot timescales
fig, ax = plt.subplots(figsize=(7, 5))
plot_timescales(ax)
fig.tight_layout()
fig.savefig('implied-timescales.pdf')
# {{xdg_open('implied-timescales.pdf')}}
## Plot trimmed
fig, ax = plt.subplots(figsize=(7,5))
plot_trimmed(ax)
fig.tight_layout()
fig.savefig('percent-trimmed.pdf')
# {{xdg_open('percent-trimmed.pdf')}}
| msultan/msmbuilder | msmbuilder/project_templates/msm/timescales-plot.py | Python | lgpl-2.1 | 1,908 |
# pylint: skip-file
class YeditException(Exception):
''' Exception class for Yedit '''
pass
class Yedit(object):
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([a-zA-Z-./]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([a-zA-Z-./]+)"
def __init__(self, filename=None, content=None, content_type='yaml'):
self.content = content
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
if self.filename and not self.content:
self.load(content_type=self.content_type)
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def remove_entry(data, key):
''' remove data at location key '''
if not (key and re.match(Yedit.re_valid_key, key) and isinstance(data, (list, dict))):
return None
key_indexes = re.findall(Yedit.re_key, key)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if not (key and re.match(Yedit.re_valid_key, key) and isinstance(data, (list, dict))):
return None
curr_data = data
key_indexes = re.findall(Yedit.re_key, key)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and data.has_key(dict_key):
data = data[dict_key]
continue
data[dict_key] = {}
data = data[dict_key]
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
# process last index for add
# expected list entry
if key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
return curr_data
@staticmethod
def get_entry(data, key):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if not (key and re.match(Yedit.re_valid_key, key) and isinstance(data, (list, dict))):
return None
key_indexes = re.findall(Yedit.re_key, key)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
return data
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
with open(self.filename, 'w') as yfd:
yfd.write(yaml.safe_dump(self.yaml_dict, default_flow_style=False))
def read(self):
''' write to file '''
# check if it exists
if not self.exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents:
return None
# check if it is yaml
try:
if content_type == 'yaml':
self.yaml_dict = yaml.load(contents)
elif content_type == 'json':
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as _:
# Error loading yaml or json
return None
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key)
except KeyError as _:
entry = None
return entry
def delete(self, key):
''' remove key from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, key)
except KeyError as _:
entry = None
if not entry:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, key)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def put(self, key, value):
''' put key, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, key)
except KeyError as _:
entry = None
if entry == value:
return (False, self.yaml_dict)
result = Yedit.add_entry(self.yaml_dict, key, value)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def create(self, key, value):
''' create a yaml file '''
if not self.exists():
self.yaml_dict = {key: value}
return (True, self.yaml_dict)
return (False, self.yaml_dict)
| ivanhorvath/openshift-tools | openshift/installer/vendored/openshift-ansible-git-2016-04-18/roles/lib_yaml_editor/build/src/yedit.py | Python | apache-2.0 | 6,339 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.kernels.edit_distance_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.python.platform
import numpy as np
import tensorflow as tf
def ConstantOf(x):
x = np.asarray(x)
# Convert to int64 if it's not a string or unicode
if x.dtype.char not in "SU": x = np.asarray(x, dtype=np.int64)
return tf.constant(x)
class EditDistanceTest(tf.test.TestCase):
def _testEditDistance(self, hypothesis, truth, normalize,
expected_output, expected_err_re=None):
# hypothesis and truth are (index, value, shape) tuples
hypothesis_st = tf.SparseTensor(*[ConstantOf(x) for x in hypothesis])
truth_st = tf.SparseTensor(*[ConstantOf(x) for x in truth])
edit_distance = tf.edit_distance(
hypothesis=hypothesis_st, truth=truth_st, normalize=normalize)
with self.test_session():
if expected_err_re is None:
# Shape inference figures out the shape from the shape variables
# Explicit tuple() needed since zip returns an iterator in Python 3.
expected_shape = [
max(h, t) for h, t in tuple(zip(hypothesis[2], truth[2]))[:-1]]
self.assertEqual(edit_distance.get_shape(), expected_shape)
output = edit_distance.eval()
self.assertAllClose(output, expected_output)
else:
with self.assertRaisesOpError(expected_err_re):
edit_distance.eval()
def testEditDistanceNormalized(self):
hypothesis_indices = [[0, 0], [0, 1],
[1, 0], [1, 1]]
hypothesis_values = [0, 1,
1, -1]
hypothesis_shape = [2, 2]
truth_indices = [[0, 0],
[1, 0], [1, 1]]
truth_values = [0,
1, 1]
truth_shape = [2, 2]
expected_output = [1.0, 0.5]
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=True,
expected_output=expected_output)
def testEditDistanceUnnormalized(self):
hypothesis_indices = [[0, 0],
[1, 0], [1, 1]]
hypothesis_values = [10,
10, 11]
hypothesis_shape = [2, 2]
truth_indices = [[0, 0], [0, 1],
[1, 0], [1, 1]]
truth_values = [1, 2,
1, -1]
truth_shape = [2, 3]
expected_output = [2.0, 2.0]
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=False,
expected_output=expected_output)
def testEditDistanceProperDistance(self):
# In this case, the values are individual characters stored in the
# SparseTensor (type DT_STRING)
hypothesis_indices = ([[0, i] for i, _ in enumerate("algorithm")] +
[[1, i] for i, _ in enumerate("altruistic")])
hypothesis_values = [x for x in "algorithm"] + [x for x in "altruistic"]
hypothesis_shape = [2, 11]
truth_indices = ([[0, i] for i, _ in enumerate("altruistic")] +
[[1, i] for i, _ in enumerate("algorithm")])
truth_values = [x for x in "altruistic"] + [x for x in "algorithm"]
truth_shape = [2, 11]
expected_unnormalized = [6.0, 6.0]
expected_normalized = [6.0/len("altruistic"),
6.0/len("algorithm")]
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=False,
expected_output=expected_unnormalized)
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=True,
expected_output=expected_normalized)
def testEditDistance3D(self):
hypothesis_indices = [[0, 0, 0],
[1, 0, 0]]
hypothesis_values = [0, 1]
hypothesis_shape = [2, 1, 1]
truth_indices = [[0, 1, 0],
[1, 0, 0],
[1, 1, 0]]
truth_values = [0, 1, 1]
truth_shape = [2, 2, 1]
expected_output = [[np.inf, 1.0], # (0,0): no truth, (0,1): no hypothesis
[0.0, 1.0]] # (1,0): match, (1,1): no hypothesis
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=True,
expected_output=expected_output)
def testEditDistanceMissingHypothesis(self):
hypothesis_indices = np.empty((0, 2), dtype=np.int64)
hypothesis_values = []
hypothesis_shape = [1, 0]
truth_indices = [[0, 0]]
truth_values = [0]
truth_shape = [1, 1]
expected_output = [1.0]
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=True,
expected_output=expected_output)
def testEditDistanceMissingTruth(self):
hypothesis_indices = [[0, 0]]
hypothesis_values = [0]
hypothesis_shape = [1, 1]
truth_indices = np.empty((0, 2), dtype=np.int64)
truth_values = []
truth_shape = [1, 0]
expected_output = [np.inf] # Normalized, divide by zero
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=True,
expected_output=expected_output)
if __name__ == "__main__":
tf.test.main()
| DeepThoughtTeam/tensorflow | tensorflow/python/kernel_tests/edit_distance_op_test.py | Python | apache-2.0 | 6,429 |
"""
=========================
Filtering regional maxima
=========================
Here, we use morphological reconstruction to create a background image, which
we can subtract from the original image to isolate bright features (regional
maxima).
First we try reconstruction by dilation starting at the edges of the image. We
initialize a seed image to the minimum intensity of the image, and set its
border to be the pixel values in the original image. These maximal pixels will
get dilated in order to reconstruct the background image.
"""
import numpy as np
from scipy.ndimage import gaussian_filter
import matplotlib.pyplot as plt
from skimage import data
from skimage import img_as_float
from skimage.morphology import reconstruction
# Convert to float: Important for subtraction later which won't work with uint8
image = img_as_float(data.coins())
image = gaussian_filter(image, 1)
seed = np.copy(image)
seed[1:-1, 1:-1] = image.min()
mask = image
dilated = reconstruction(seed, mask, method='dilation')
"""
Subtracting the dilated image leaves an image with just the coins and a flat,
black background, as shown below.
"""
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(8, 2.5), sharex=True, sharey=True)
ax1.imshow(image)
ax1.set_title('original image')
ax1.axis('off')
ax1.set_adjustable('box-forced')
ax2.imshow(dilated, vmin=image.min(), vmax=image.max())
ax2.set_title('dilated')
ax2.axis('off')
ax2.set_adjustable('box-forced')
ax3.imshow(image - dilated)
ax3.set_title('image - dilated')
ax3.axis('off')
ax3.set_adjustable('box-forced')
fig.tight_layout()
"""
.. image:: PLOT2RST.current_figure
Although the features (i.e. the coins) are clearly isolated, the coins
surrounded by a bright background in the original image are dimmer in the
subtracted image. We can attempt to correct this using a different seed image.
Instead of creating a seed image with maxima along the image border, we can use
the features of the image itself to seed the reconstruction process. Here, the
seed image is the original image minus a fixed value, ``h``.
"""
h = 0.4
seed = image - h
dilated = reconstruction(seed, mask, method='dilation')
hdome = image - dilated
"""
To get a feel for the reconstruction process, we plot the intensity of the
mask, seed, and dilated images along a slice of the image (indicated by red
line).
"""
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(8, 2.5))
yslice = 197
ax1.plot(mask[yslice], '0.5', label='mask')
ax1.plot(seed[yslice], 'k', label='seed')
ax1.plot(dilated[yslice], 'r', label='dilated')
ax1.set_ylim(-0.2, 2)
ax1.set_title('image slice')
ax1.set_xticks([])
ax1.legend()
ax2.imshow(dilated, vmin=image.min(), vmax=image.max())
ax2.axhline(yslice, color='r', alpha=0.4)
ax2.set_title('dilated')
ax2.axis('off')
ax3.imshow(hdome)
ax3.axhline(yslice, color='r', alpha=0.4)
ax3.set_title('image - dilated')
ax3.axis('off')
fig.tight_layout()
plt.show()
"""
.. image:: PLOT2RST.current_figure
As you can see in the image slice, each coin is given a different baseline
intensity in the reconstructed image; this is because we used the local
intensity (shifted by ``h``) as a seed value. As a result, the coins in the
subtracted image have similar pixel intensities. The final result is known as
the h-dome of an image since this tends to isolate regional maxima of height
``h``. This operation is particularly useful when your images are unevenly
illuminated.
"""
| WarrenWeckesser/scikits-image | doc/examples/plot_regional_maxima.py | Python | bsd-3-clause | 3,435 |
## -*- mode: python; coding: utf-8; -*-
##
## This file is part of Invenio.
## Copyright (C) 2007, 2008, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Usage: bibrankgkb %s [options]
Examples:
bibrankgkb --input=bibrankgkb.cfg --output=test.kb
bibrankgkb -otest.kb -v9
bibrankgkb -v9
Generate options:
-i, --input=file input file, default from /etc/bibrank/bibrankgkb.cfg
-o, --output=file output file, will be placed in current folder
General options:
-h, --help print this help and exit
-V, --version print version and exit
-v, --verbose=LEVEL verbose level (from 0 to 9, default 1)
"""
__revision__ = "$Id$"
import getopt
import sys
import time
import urllib
import re
import ConfigParser
from invenio.config import CFG_ETCDIR
from invenio.dbquery import run_sql
opts_dict = {}
task_id = -1
def bibrankgkb(config):
"""Generates a .kb file based on input from the configuration file"""
if opts_dict["verbose"] >= 1:
write_message("Running: Generate Knowledgebase.")
journals = {}
journal_src = {}
i = 0
#Reading the configuration file
while config.has_option("bibrankgkb","create_%s" % i):
cfg = config.get("bibrankgkb", "create_%s" % i).split(",,")
conv = {}
temp = {}
#Input source 1, either file, www or from db
if cfg[0] == "file":
conv = get_from_source(cfg[0], cfg[1])
del cfg[0:2]
elif cfg[0] == "www":
j = 0
urls = {}
while config.has_option("bibrankgkb", cfg[1] % j):
urls[j] = config.get("bibrankgkb", cfg[1] % j)
j = j + 1
conv = get_from_source(cfg[0], (urls, cfg[2]))
del cfg[0:3]
elif cfg[0] == "db":
conv = get_from_source(cfg[0], (cfg[1], cfg[2]))
del cfg[0:3]
if not conv:
del cfg[0:2]
else:
if opts_dict["verbose"] >= 9:
write_message("Using last resource for converting values.")
#Input source 2, either file, www or from db
if cfg[0] == "file":
temp = get_from_source(cfg[0], cfg[1])
elif cfg[0] == "www":
j = 0
urls = {}
while config.has_option("bibrankgkb", cfg[1] % j):
urls[j] = config.get("bibrankgkb", cfg[1] % j)
j = j + 1
temp = get_from_source(cfg[0], (urls, cfg[2]))
elif cfg[0] == "db":
temp = get_from_source(cfg[0], (cfg[1], cfg[2]))
i = i + 1
#If a conversion file is given, the names will be converted to the correct convention
if len(conv) != 0:
if opts_dict["verbose"] >= 9:
write_message("Converting between naming conventions given.")
temp = convert(conv, temp)
if len(journals) != 0:
for element in temp.keys():
if not journals.has_key(element):
journals[element] = temp[element]
else:
journals = temp
#Writing output file
if opts_dict["output"]:
f = open(opts_dict["output"], 'w')
f.write("#Created by %s\n" % __revision__)
f.write("#Sources:\n")
for key in journals.keys():
f.write("%s---%s\n" % (key, journals[key]))
f.close()
if opts_dict["verbose"] >= 9:
write_message("Output complete: %s" % opts_dict["output"])
write_message("Number of hits: %s" % len(journals))
if opts_dict["verbose"] >= 9:
write_message("Result:")
for key in journals.keys():
write_message("%s---%s" % (key, journals[key]))
write_message("Total nr of lines: %s" % len(journals))
def showtime(timeused):
if opts_dict["verbose"] >= 9:
write_message("Time used: %d second(s)." % timeused)
def get_from_source(type, data):
"""Read a source based on the input to the function"""
datastruct = {}
if type == "db":
jvalue = run_sql(data[0])
jname = dict(run_sql(data[1]))
if opts_dict["verbose"] >= 9:
write_message("Reading data from database using SQL statements:")
write_message(jvalue)
write_message(jname)
for key, value in jvalue:
if jname.has_key(key):
key2 = jname[key].strip()
datastruct[key2] = value
#print "%s---%s" % (key2, value)
elif type == "file":
input = open(data, 'r')
if opts_dict["verbose"] >= 9:
write_message("Reading data from file: %s" % data)
data = input.readlines()
datastruct = {}
for line in data:
#print line
if not line[0:1] == "#":
key = line.strip().split("---")[0].split()
value = line.strip().split("---")[1]
datastruct[key] = value
#print "%s---%s" % (key,value)
elif type == "www":
if opts_dict["verbose"] >= 9:
write_message("Reading data from www using regexp: %s" % data[1])
write_message("Reading data from url:")
for link in data[0].keys():
if opts_dict["verbose"] >= 9:
write_message(data[0][link])
page = urllib.urlopen(data[0][link])
input = page.read()
#Using the regexp from config file
reg = re.compile(data[1])
iterator = re.finditer(reg, input)
for match in iterator:
if match.group("value"):
key = match.group("key").strip()
value = match.group("value").replace(",", ".")
datastruct[key] = value
if opts_dict["verbose"] == 9:
print "%s---%s" % (key, value)
return datastruct
def convert(convstruct, journals):
"""Converting between names"""
if len(convstruct) > 0 and len(journals) > 0:
invconvstruct = dict(map(lambda x: (x[1], x[0]), convstruct.items()))
tempjour = {}
for name in journals.keys():
if convstruct.has_key(name):
tempjour[convstruct[name]] = journals[name]
elif invconvstruct.has_key(name):
tempjour[name] = journals[name]
return tempjour
else:
return journals
def write_message(msg, stream = sys.stdout):
"""Write message and flush output stream (may be sys.stdout or sys.stderr). Useful for debugging stuff."""
if stream == sys.stdout or stream == sys.stderr:
stream.write(time.strftime("%Y-%m-%d %H:%M:%S --> ", time.localtime()))
try:
stream.write("%s\n" % msg)
except UnicodeEncodeError:
stream.write("%s\n" % msg.encode('ascii', 'backslashreplace'))
stream.flush()
else:
sys.stderr.write("Unknown stream %s. [must be sys.stdout or sys.stderr]\n" % stream)
return
def usage(code, msg=''):
"Prints usage for this module."
if msg:
sys.stderr.write("Error: %s.\n" % msg)
print >> sys.stderr, \
""" Usage: %s [options]
Examples:
%s --input=bibrankgkb.cfg --output=test.kb
%s -otest.kb -v9
%s -v9
Generate options:
-i, --input=file input file, default from /etc/bibrank/bibrankgkb.cfg
-o, --output=file output file, will be placed in current folder
General options:
-h, --help print this help and exit
-V, --version print version and exit
-v, --verbose=LEVEL verbose level (from 0 to 9, default 1)
""" % ((sys.argv[0],) * 4)
sys.exit(code)
def command_line():
global opts_dict
long_flags = ["input=", "output=", "help", "version", "verbose="]
short_flags = "i:o:hVv:"
format_string = "%Y-%m-%d %H:%M:%S"
sleeptime = ""
try:
opts, args = getopt.getopt(sys.argv[1:], short_flags, long_flags)
except getopt.GetoptError, err:
write_message(err, sys.stderr)
usage(1)
if args:
usage(1)
opts_dict = {"input": "%s/bibrank/bibrankgkb.cfg" % CFG_ETCDIR, "output":"", "verbose":1}
sched_time = time.strftime(format_string)
user = ""
try:
for opt in opts:
if opt == ("-h","") or opt == ("--help",""):
usage(1)
elif opt == ("-V","") or opt == ("--version",""):
print __revision__
sys.exit(1)
elif opt[0] in ["--input", "-i"]:
opts_dict["input"] = opt[1]
elif opt[0] in ["--output", "-o"]:
opts_dict["output"] = opt[1]
elif opt[0] in ["--verbose", "-v"]:
opts_dict["verbose"] = int(opt[1])
else:
usage(1)
startCreate = time.time()
file = opts_dict["input"]
config = ConfigParser.ConfigParser()
config.readfp(open(file))
bibrankgkb(config)
if opts_dict["verbose"] >= 9:
showtime((time.time() - startCreate))
except StandardError, e:
write_message(e, sys.stderr)
sys.exit(1)
return
def main():
command_line()
if __name__ == "__main__":
main()
| kaplun/Invenio-OpenAIRE | modules/bibrank/lib/bibrankgkb.py | Python | gpl-2.0 | 9,931 |
import pickle
import unittest
from test import test_support as support
turtle = support.import_module('turtle')
Vec2D = turtle.Vec2D
test_config = """\
width = 0.75
height = 0.8
canvwidth = 500
canvheight = 200
leftright = 100
topbottom = 100
mode = world
colormode = 255
delay = 100
undobuffersize = 10000
shape = circle
pencolor = red
fillcolor = blue
resizemode = auto
visible = None
language = english
exampleturtle = turtle
examplescreen = screen
title = Python Turtle Graphics
using_IDLE = ''
"""
test_config_two = """\
# Comments!
# Testing comments!
pencolor = red
fillcolor = blue
visible = False
language = english
# Some more
# comments
using_IDLE = False
"""
invalid_test_config = """
pencolor = red
fillcolor: blue
visible = False
"""
class TurtleConfigTest(unittest.TestCase):
def get_cfg_file(self, cfg_str):
self.addCleanup(support.unlink, support.TESTFN)
with open(support.TESTFN, 'w') as f:
f.write(cfg_str)
return support.TESTFN
def test_config_dict(self):
cfg_name = self.get_cfg_file(test_config)
parsed_cfg = turtle.config_dict(cfg_name)
expected = {
'width' : 0.75,
'height' : 0.8,
'canvwidth' : 500,
'canvheight': 200,
'leftright': 100,
'topbottom': 100,
'mode': 'world',
'colormode': 255,
'delay': 100,
'undobuffersize': 10000,
'shape': 'circle',
'pencolor' : 'red',
'fillcolor' : 'blue',
'resizemode' : 'auto',
'visible' : None,
'language': 'english',
'exampleturtle': 'turtle',
'examplescreen': 'screen',
'title': 'Python Turtle Graphics',
'using_IDLE': '',
}
self.assertEqual(parsed_cfg, expected)
def test_partial_config_dict_with_commments(self):
cfg_name = self.get_cfg_file(test_config_two)
parsed_cfg = turtle.config_dict(cfg_name)
expected = {
'pencolor': 'red',
'fillcolor': 'blue',
'visible': False,
'language': 'english',
'using_IDLE': False,
}
self.assertEqual(parsed_cfg, expected)
def test_config_dict_invalid(self):
cfg_name = self.get_cfg_file(invalid_test_config)
with support.captured_stdout() as stdout:
parsed_cfg = turtle.config_dict(cfg_name)
err_msg = stdout.getvalue()
self.assertIn('Bad line in config-file ', err_msg)
self.assertIn('fillcolor: blue', err_msg)
self.assertEqual(parsed_cfg, {
'pencolor': 'red',
'visible': False,
})
class VectorComparisonMixin:
def assertVectorsAlmostEqual(self, vec1, vec2):
if len(vec1) != len(vec2):
self.fail("Tuples are not of equal size")
for idx, (i, j) in enumerate(zip(vec1, vec2)):
self.assertAlmostEqual(
i, j, msg='values at index {} do not match'.format(idx))
class TestVec2D(VectorComparisonMixin, unittest.TestCase):
def test_constructor(self):
vec = Vec2D(0.5, 2)
self.assertEqual(vec[0], 0.5)
self.assertEqual(vec[1], 2)
self.assertIsInstance(vec, Vec2D)
self.assertRaises(TypeError, Vec2D)
self.assertRaises(TypeError, Vec2D, 0)
self.assertRaises(TypeError, Vec2D, (0, 1))
self.assertRaises(TypeError, Vec2D, vec)
self.assertRaises(TypeError, Vec2D, 0, 1, 2)
def test_repr(self):
vec = Vec2D(0.567, 1.234)
self.assertEqual(repr(vec), '(0.57,1.23)')
def test_equality(self):
vec1 = Vec2D(0, 1)
vec2 = Vec2D(0.0, 1)
vec3 = Vec2D(42, 1)
self.assertEqual(vec1, vec2)
self.assertEqual(vec1, tuple(vec1))
self.assertEqual(tuple(vec1), vec1)
self.assertNotEqual(vec1, vec3)
self.assertNotEqual(vec2, vec3)
def test_pickling(self):
vec = Vec2D(0.5, 2)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
pickled = pickle.dumps(vec, protocol=proto)
unpickled = pickle.loads(pickled)
self.assertEqual(unpickled, vec)
self.assertIsInstance(unpickled, Vec2D)
def _assert_arithmetic_cases(self, test_cases, lambda_operator):
for test_case in test_cases:
((first, second), expected) = test_case
op1 = Vec2D(*first)
op2 = Vec2D(*second)
result = lambda_operator(op1, op2)
expected = Vec2D(*expected)
self.assertVectorsAlmostEqual(result, expected)
def test_vector_addition(self):
test_cases = [
(((0, 0), (1, 1)), (1.0, 1.0)),
(((-1, 0), (2, 2)), (1, 2)),
(((1.5, 0), (1, 1)), (2.5, 1)),
]
self._assert_arithmetic_cases(test_cases, lambda x, y: x + y)
def test_vector_subtraction(self):
test_cases = [
(((0, 0), (1, 1)), (-1, -1)),
(((10.625, 0.125), (10, 0)), (0.625, 0.125)),
]
self._assert_arithmetic_cases(test_cases, lambda x, y: x - y)
def test_vector_multiply(self):
vec1 = Vec2D(10, 10)
vec2 = Vec2D(0.5, 3)
answer = vec1 * vec2
expected = 35
self.assertAlmostEqual(answer, expected)
vec = Vec2D(0.5, 3)
answer = vec * 10
expected = Vec2D(5, 30)
self.assertVectorsAlmostEqual(answer, expected)
def test_vector_negative(self):
vec = Vec2D(10, -10)
expected = (-10, 10)
self.assertVectorsAlmostEqual(-vec, expected)
def test_distance(self):
vec = Vec2D(6, 8)
expected = 10
self.assertEqual(abs(vec), expected)
vec = Vec2D(0, 0)
expected = 0
self.assertEqual(abs(vec), expected)
vec = Vec2D(2.5, 6)
expected = 6.5
self.assertEqual(abs(vec), expected)
def test_rotate(self):
cases = [
(((0, 0), 0), (0, 0)),
(((0, 1), 90), (-1, 0)),
(((0, 1), -90), (1, 0)),
(((1, 0), 180), (-1, 0)),
(((1, 0), 360), (1, 0)),
]
for case in cases:
(vec, rot), expected = case
vec = Vec2D(*vec)
got = vec.rotate(rot)
self.assertVectorsAlmostEqual(got, expected)
class TestTNavigator(VectorComparisonMixin, unittest.TestCase):
def setUp(self):
self.nav = turtle.TNavigator()
def test_goto(self):
self.nav.goto(100, -100)
self.assertAlmostEqual(self.nav.xcor(), 100)
self.assertAlmostEqual(self.nav.ycor(), -100)
def test_pos(self):
self.assertEqual(self.nav.pos(), self.nav._position)
self.nav.goto(100, -100)
self.assertEqual(self.nav.pos(), self.nav._position)
def test_left(self):
self.assertEqual(self.nav._orient, (1.0, 0))
self.nav.left(90)
self.assertVectorsAlmostEqual(self.nav._orient, (0.0, 1.0))
def test_right(self):
self.assertEqual(self.nav._orient, (1.0, 0))
self.nav.right(90)
self.assertVectorsAlmostEqual(self.nav._orient, (0, -1.0))
def test_reset(self):
self.nav.goto(100, -100)
self.assertAlmostEqual(self.nav.xcor(), 100)
self.assertAlmostEqual(self.nav.ycor(), -100)
self.nav.reset()
self.assertAlmostEqual(self.nav.xcor(), 0)
self.assertAlmostEqual(self.nav.ycor(), 0)
def test_forward(self):
self.nav.forward(150)
expected = Vec2D(150, 0)
self.assertVectorsAlmostEqual(self.nav.position(), expected)
self.nav.reset()
self.nav.left(90)
self.nav.forward(150)
expected = Vec2D(0, 150)
self.assertVectorsAlmostEqual(self.nav.position(), expected)
self.assertRaises(TypeError, self.nav.forward, 'skldjfldsk')
def test_backwards(self):
self.nav.back(200)
expected = Vec2D(-200, 0)
self.assertVectorsAlmostEqual(self.nav.position(), expected)
self.nav.reset()
self.nav.right(90)
self.nav.back(200)
expected = Vec2D(0, 200)
self.assertVectorsAlmostEqual(self.nav.position(), expected)
def test_distance(self):
self.nav.forward(100)
expected = 100
self.assertAlmostEqual(self.nav.distance(Vec2D(0,0)), expected)
def test_radians_and_degrees(self):
self.nav.left(90)
self.assertAlmostEqual(self.nav.heading(), 90)
self.nav.radians()
self.assertAlmostEqual(self.nav.heading(), 1.57079633)
self.nav.degrees()
self.assertAlmostEqual(self.nav.heading(), 90)
def test_towards(self):
coordinates = [
# coordinates, expected
((100, 0), 0.0),
((100, 100), 45.0),
((0, 100), 90.0),
((-100, 100), 135.0),
((-100, 0), 180.0),
((-100, -100), 225.0),
((0, -100), 270.0),
((100, -100), 315.0),
]
for (x, y), expected in coordinates:
self.assertEqual(self.nav.towards(x, y), expected)
self.assertEqual(self.nav.towards((x, y)), expected)
self.assertEqual(self.nav.towards(Vec2D(x, y)), expected)
def test_heading(self):
self.nav.left(90)
self.assertAlmostEqual(self.nav.heading(), 90)
self.nav.left(45)
self.assertAlmostEqual(self.nav.heading(), 135)
self.nav.right(1.6)
self.assertAlmostEqual(self.nav.heading(), 133.4)
self.assertRaises(TypeError, self.nav.right, 'sdkfjdsf')
self.nav.reset()
rotations = [10, 20, 170, 300]
result = sum(rotations) % 360
for num in rotations:
self.nav.left(num)
self.assertEqual(self.nav.heading(), result)
self.nav.reset()
result = (360-sum(rotations)) % 360
for num in rotations:
self.nav.right(num)
self.assertEqual(self.nav.heading(), result)
self.nav.reset()
rotations = [10, 20, -170, 300, -210, 34.3, -50.2, -10, -29.98, 500]
sum_so_far = 0
for num in rotations:
if num < 0:
self.nav.right(abs(num))
else:
self.nav.left(num)
sum_so_far += num
self.assertAlmostEqual(self.nav.heading(), sum_so_far % 360)
def test_setheading(self):
self.nav.setheading(102.32)
self.assertAlmostEqual(self.nav.heading(), 102.32)
self.nav.setheading(-123.23)
self.assertAlmostEqual(self.nav.heading(), (-123.23) % 360)
self.nav.setheading(-1000.34)
self.assertAlmostEqual(self.nav.heading(), (-1000.34) % 360)
self.nav.setheading(300000)
self.assertAlmostEqual(self.nav.heading(), 300000%360)
def test_positions(self):
self.nav.forward(100)
self.nav.left(90)
self.nav.forward(-200)
self.assertVectorsAlmostEqual(self.nav.pos(), (100.0, -200.0))
def test_setx_and_sety(self):
self.nav.setx(-1023.2334)
self.nav.sety(193323.234)
self.assertVectorsAlmostEqual(self.nav.pos(), (-1023.2334, 193323.234))
def test_home(self):
self.nav.left(30)
self.nav.forward(-100000)
self.nav.home()
self.assertVectorsAlmostEqual(self.nav.pos(), (0,0))
self.assertAlmostEqual(self.nav.heading(), 0)
def test_distance_method(self):
self.assertAlmostEqual(self.nav.distance(30, 40), 50)
vec = Vec2D(0.22, .001)
self.assertAlmostEqual(self.nav.distance(vec), 0.22000227271553355)
another_turtle = turtle.TNavigator()
another_turtle.left(90)
another_turtle.forward(10000)
self.assertAlmostEqual(self.nav.distance(another_turtle), 10000)
class TestTPen(unittest.TestCase):
def test_pendown_and_penup(self):
tpen = turtle.TPen()
self.assertTrue(tpen.isdown())
tpen.penup()
self.assertFalse(tpen.isdown())
tpen.pendown()
self.assertTrue(tpen.isdown())
def test_showturtle_hideturtle_and_isvisible(self):
tpen = turtle.TPen()
self.assertTrue(tpen.isvisible())
tpen.hideturtle()
self.assertFalse(tpen.isvisible())
tpen.showturtle()
self.assertTrue(tpen.isvisible())
def test_main():
support.run_unittest(TurtleConfigTest, TestVec2D, TestTNavigator, TestTPen)
if __name__ == '__main__':
test_main()
| HiSPARC/station-software | user/python/Lib/test/test_turtle.py | Python | gpl-3.0 | 12,577 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# mingus - Music theory Python package, track module.
# Copyright (C) 2008-2009, Bart Spaans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from mt_exceptions import InstrumentRangeError
from mingus.containers.note_container import NoteContainer
from mingus.containers.bar import Bar
import mingus.core.value as value
class Track(object):
"""A track object.
The Track class can be used to store Bars and to work on them.
The class is also designed to be used with Instruments, but this is
optional.
Tracks can be stored together in Compositions.
"""
bars = []
instrument = None
name = 'Untitled' # Will be looked for when saving a MIDI file.
tuning = None # Used by tablature
def __init__(self, instrument=None):
self.bars = []
self.instrument = instrument
def add_bar(self, bar):
"""Add a Bar to the current track."""
self.bars.append(bar)
return self
def add_notes(self, note, duration=None):
"""Add a Note, note as string or NoteContainer to the last Bar.
If the Bar is full, a new one will automatically be created.
If the Bar is not full but the note can't fit in, this method will
return False. True otherwise.
An InstrumentRangeError exception will be raised if an Instrument is
attached to the Track, but the note turns out not to be within the
range of the Instrument.
"""
if self.instrument != None:
if not self.instrument.can_play_notes(note):
raise InstrumentRangeError, \
"Note '%s' is not in range of the instrument (%s)" % (note,
self.instrument)
if duration == None:
duration = 4
# Check whether the last bar is full, if so create a new bar and add the
# note there
if len(self.bars) == 0:
self.bars.append(Bar())
last_bar = self.bars[-1]
if last_bar.is_full():
self.bars.append(Bar(last_bar.key, last_bar.meter))
# warning should hold note if it doesn't fit
return self.bars[-1].place_notes(note, duration)
def get_notes(self):
"""Return an iterator that iterates through every bar in the this
track."""
for bar in self.bars:
for beat, duration, notes in bar:
yield beat, duration, notes
def from_chords(self, chords, duration=1):
"""Add chords to the Track.
The given chords should be a list of shorthand strings or list of
list of shorthand strings, etc.
Each sublist divides the value by 2.
If a tuning is set, chords will be expanded so they have a proper
fingering.
Example:
>>> t = Track().from_chords(['C', ['Am', 'Dm'], 'G7', 'C#'], 1)
"""
tun = self.get_tuning()
def add_chord(chord, duration):
if type(chord) == list:
for c in chord:
add_chord(c, duration * 2)
else:
chord = NoteContainer().from_chord(chord)
if tun:
chord = tun.find_chord_fingering(chord,
return_best_as_NoteContainer=True)
if not self.add_notes(chord, duration):
# This should be the standard behaviour of add_notes
dur = self.bars[-1].value_left()
self.add_notes(chord, dur)
# warning should hold note
self.add_notes(chord, value.subtract(duration, dur))
for c in chords:
if c is not None:
add_chord(c, duration)
else:
self.add_notes(None, duration)
return self
def get_tuning(self):
"""Return a StringTuning object.
If an instrument is set and has a tuning it will be returned.
Otherwise the track's one will be used.
"""
if self.instrument and self.instrument.tuning:
return self.instrument.tuning
return self.tuning
def set_tuning(self, tuning):
"""Set the tuning attribute on both the Track and its instrument (when
available).
Tuning should be a StringTuning or derivative object.
"""
if self.instrument:
self.instrument.tuning = tuning
self.tuning = tuning
return self
def transpose(self, interval, up=True):
"""Transpose all the notes in the track up or down the interval.
Call transpose() on every Bar.
"""
for bar in self.bars:
bar.transpose(interval, up)
return self
def augment(self):
"""Augment all the bars in the Track."""
for bar in self.bars:
bar.augment()
return self
def diminish(self):
"""Diminish all the bars in the Track."""
for bar in self.bars:
bar.diminish()
return self
def __add__(self, value):
"""Enable the '+' operator for Tracks.
Notes, notes as string, NoteContainers and Bars accepted.
"""
if hasattr(value, 'bar'):
return self.add_bar(value)
elif hasattr(value, 'notes'):
return self.add_notes(value)
elif hasattr(value, 'name') or type(value) == str:
return self.add_notes(value)
def test_integrity(self):
"""Test whether all but the last Bars contained in this track are
full."""
for b in self.bars[:-1]:
if not b.is_full():
return False
return True
def __eq__(self, other):
"""Enable the '==' operator for tracks."""
for x in range(0, len(self.bars) - 1):
if self.bars[x] != other.bars[x]:
return False
return True
def __getitem__(self, index):
"""Enable the '[]' notation for Tracks."""
return self.bars[index]
def __setitem__(self, index, value):
"""Enable the '[] =' notation for Tracks.
Throw an UnexpectedObjectError if the value being set is not a
mingus.containers.Bar object.
"""
if not hasattr(value, 'bar'):
raise UnexpectedObjectError("Unexpected object '%s', "
"expecting a mingus.containers.Barobject" % value)
self.bars[index] = value
def __repr__(self):
"""Return a string representing the class."""
return str([self.instrument, self.bars])
def __len__(self):
"""Enable the len() function for Tracks."""
return len(self.bars)
| marianinn/mingus | mingus/containers/track.py | Python | gpl-3.0 | 7,328 |
# -*- coding: utf-8 -*-
# ***************************************************************************
# * Copyright (c) 2017 sliptonic <shopinthewoods@gmail.com> *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
import Path
from PathTests.PathTestUtils import PathTestBase
class TestPathTool(PathTestBase):
def test00(self):
'''Verify templateAttrs'''
name = 'tool 1'
mat = 'Carbide'
typ = 'EndMill'
dia = 1.7
flat = 7.2
offset = 3.2
corner = 4
height = 45.3
angle = 118
tool = Path.Tool()
tool.Name = name
tool.ToolType = typ
tool.Material = mat
tool.Diameter = dia
tool.LengthOffset = offset
tool.FlatRadius = flat
tool.CornerRadius = corner
tool.CuttingEdgeAngle = angle
tool.CuttingEdgeHeight = height
attrs = tool.templateAttrs()
self.assertEqual(attrs['name'], name)
self.assertEqual(attrs['diameter'], dia)
self.assertEqual(attrs['material'], mat)
self.assertEqual(attrs['tooltype'], typ)
self.assertEqual(attrs['lengthOffset'], offset)
self.assertEqual(attrs['flatRadius'], flat)
self.assertEqual(attrs['cornerRadius'], corner)
self.assertEqual(attrs['cuttingEdgeAngle'], angle)
self.assertEqual(attrs['cuttingEdgeHeight'], height)
return tool
def test01(self):
'''Verify template roundtrip'''
t0 = self.test00()
t1 = Path.Tool()
t1.setFromTemplate(t0.templateAttrs())
self.assertEqual(t0.Name, t1.Name)
self.assertEqual(t0.ToolType, t1.ToolType)
self.assertEqual(t0.Material, t1.Material)
self.assertEqual(t0.Diameter, t1.Diameter)
self.assertEqual(t0.LengthOffset, t1.LengthOffset)
self.assertEqual(t0.FlatRadius, t1.FlatRadius)
self.assertEqual(t0.CornerRadius, t1.CornerRadius)
self.assertEqual(t0.CuttingEdgeAngle, t1.CuttingEdgeAngle)
self.assertEqual(t0.CuttingEdgeHeight, t1.CuttingEdgeHeight)
def test02(self):
'''Verify template dictionary construction'''
t0 = self.test00()
t1 = Path.Tool(t0.templateAttrs())
self.assertEqual(t0.Name, t1.Name)
self.assertEqual(t0.ToolType, t1.ToolType)
self.assertEqual(t0.Material, t1.Material)
self.assertEqual(t0.Diameter, t1.Diameter)
self.assertEqual(t0.LengthOffset, t1.LengthOffset)
self.assertEqual(t0.FlatRadius, t1.FlatRadius)
self.assertEqual(t0.CornerRadius, t1.CornerRadius)
self.assertEqual(t0.CuttingEdgeAngle, t1.CuttingEdgeAngle)
self.assertEqual(t0.CuttingEdgeHeight, t1.CuttingEdgeHeight)
| sanguinariojoe/FreeCAD | src/Mod/Path/PathTests/TestPathTool.py | Python | lgpl-2.1 | 4,116 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from libcloud.compute.drivers.kili import KiliCloudNodeDriver, ENDPOINT_ARGS
from libcloud.test.compute.test_openstack import OpenStack_1_1_Tests
def _ex_connection_class_kwargs(self):
kwargs = self.openstack_connection_kwargs()
kwargs['get_endpoint_args'] = ENDPOINT_ARGS
# Remove keystone from the URL path so that the openstack base tests work
kwargs['ex_force_auth_url'] = 'https://api.kili.io/v2.0/tokens'
kwargs['ex_tenant_name'] = self.tenant_name
return kwargs
KiliCloudNodeDriver._ex_connection_class_kwargs = _ex_connection_class_kwargs
class KiliCloudNodeDriverTests(OpenStack_1_1_Tests, unittest.TestCase):
driver_klass = KiliCloudNodeDriver
driver_type = KiliCloudNodeDriver
| Kami/libcloud | libcloud/test/compute/test_kili.py | Python | apache-2.0 | 1,525 |
import time
from proboscis.asserts import assert_equal
from proboscis.asserts import assert_not_equal
from proboscis.asserts import assert_false
from proboscis import SkipTest
from proboscis import test
from proboscis import before_class
from proboscis import after_class
from json import loads
from modules.logger import Log
from modules.amqp import AMQPWorker
from modules.worker import WorkerThread, WorkerTasks
from config.api1_1_config import *
from config.amqp import *
from on_http_api1_1 import NodesApi as Nodes
from on_http_api1_1 import WorkflowApi as Workflows
from tests.api.v1_1.discovery_tests import DiscoveryTests
from tests.api.v1_1.poller_tests import PollerTests
from tests.api.v1_1.workflows_tests import WorkflowsTests
from benchmark.tests import ansible_ctl
from benchmark.utils import parser
from benchmark.utils.case_recorder import caseRecorder
LOG = Log(__name__)
class BenchmarkTests(object):
def __init__(self, name):
ansible_ctl.render_case_name(name)
self.__data_path = ansible_ctl.get_data_path_per_case()
self.case_recorder = caseRecorder(self.__data_path)
self.client = config.api_client
self.__node_count = 0
self.__finished = 0
self.__graph_name = None
def _prepare_case_env(self):
self.__node_count = self.__check_compute_count()
self.case_recorder.write_interval(ansible_ctl.get_data_interval())
self.case_recorder.write_start()
self.case_recorder.write_node_number(self.__node_count)
assert_equal(True, ansible_ctl.start_daemon(), \
message='Failed to start data collection daemon!')
def _collect_case_data(self):
assert_equal(True, ansible_ctl.collect_data(), message='Failed to collect footprint data!')
self.case_recorder.write_end()
LOG.info('Parse log and generate html reports')
try:
parser.parse(self.__data_path)
except RuntimeError as err:
LOG.warning('Error on parsing log or generating reports: ')
LOG.warning(err)
def _wait_until_graph_finish(self, graph_name, timevalue):
self.__graph_name = graph_name
self.__task = WorkerThread(AMQPWorker(queue=QUEUE_GRAPH_FINISH, \
callbacks=[self.__handle_graph_finish]), \
graph_name)
def start(worker, id):
worker.start()
tasks = WorkerTasks(tasks=[self.__task], func=start)
tasks.run()
tasks.wait_for_completion(timeout_sec=timevalue)
assert_false(self.__task.timeout, \
message='timeout waiting for task {0}'.format(self.__task.id))
def __handle_graph_finish(self, body, message):
routeId = message.delivery_info.get('routing_key').split('graph.finished.')[1]
Workflows().workflows_get()
workflows = loads(self.client.last_response.data)
message.ack()
for w in workflows:
definition = w['definition']
injectableName = definition.get('injectableName')
if injectableName == self.__graph_name:
graphId = w['context'].get('graphId')
if graphId == routeId:
nodeid = w['context'].get('target')
if nodeid == None:
nodeid = w['definition']['options']['defaults'].get('nodeId','')
status = body.get('status')
if status == 'succeeded':
self.__finished += 1
self.case_recorder.write_event('finish {0} on node {1} {2}'
.format(self.__graph_name, self.__finished, nodeid))
break
if self.__node_count == self.__finished:
self.__task.worker.stop()
self.__task.running = False
self.__finished = 0
self._collect_case_data()
LOG.info('Fetch {0} log finished'.format(self.__graph_name))
def __check_compute_count(self):
Nodes().nodes_get()
nodes = loads(self.client.last_response.data)
count = 0
for n in nodes:
type = n.get('type')
if type == 'compute':
count += 1
return count
@test(groups=["benchmark.poller"])
class BenchmarkPollerTests(BenchmarkTests):
def __init__(self):
BenchmarkTests.__init__(self, 'poller')
@test(groups=["test-bm-poller"], depends_on_groups=["test-node-poller"])
def test_poller(self):
""" Wait for 15 mins to let RackHD run pollers """
self._prepare_case_env()
time.sleep(900)
self._collect_case_data()
LOG.info('Fetch poller log finished')
@test(groups=["benchmark.discovery"])
class BenchmarkDiscoveryTests(BenchmarkTests):
def __init__(self):
BenchmarkTests.__init__(self, 'discovery')
@test(groups=["test-bm-discovery-prepare"], depends_on_groups=["test-node-poller"])
def test_prepare_discovery(self):
""" Prepare discovery """
self._prepare_case_env()
@test(groups=["test-bm-discovery"],
depends_on_groups=["test-bm-discovery-prepare", "test_discovery_delete_node"])
def test_discovery(self):
""" Wait for discovery finished """
self.case_recorder.write_event('start all discovery')
self._wait_until_graph_finish('Graph.SKU.Discovery', 1200)
@test(groups=["test-bm-discovery-post"],
depends_on_groups=["test_discovery_add_obm"])
def test_discovery_post(self):
pass
@test(groups=["benchmark.bootstrap"])
class BenchmarkBootstrapTests(BenchmarkTests):
def __init__(self):
BenchmarkTests.__init__(self, 'bootstrap')
self.__base = defaults.get('RACKHD_BASE_REPO_URL', \
'http://{0}:{1}'.format(HOST_IP, HOST_PORT))
self.__os_repo = defaults.get('RACKHD_CENTOS_REPO_PATH', \
self.__base + '/repo/centos/7')
@test(groups=["test-bm-bootstrap-prepare"], depends_on_groups=["test-node-poller"])
def test_prepare_bootstrap(self):
""" Prepare bootstrap """
self._prepare_case_env()
@test(groups=['test-bm-bootstrap-post-centos7'],
depends_on_groups=["test-bm-bootstrap-prepare"])
def test_install_centos7(self):
""" Testing CentOS 7 Installer Workflow """
self.case_recorder.write_event('start all bootstrap')
body = {
"options": {
"defaults": {
"version": "7",
"repo": self.__os_repo
}
}
}
WorkflowsTests().post_workflows("Graph.InstallCentOS",
nodes=[],
data=body,
run_now=False)
@test(groups=["test-bm-bootstrap"],
depends_on_groups=["test-bm-bootstrap-prepare", "test-bm-bootstrap-post-centos7"])
def test_bootstrap_centos(self):
""" Wait for bootstrap finished """
self.case_recorder.write_event('start all bootstrap')
self._wait_until_graph_finish('Graph.InstallCentOS', -1)
| johren/RackHD | test/benchmark/tests/api_v1_1_tests.py | Python | apache-2.0 | 7,210 |
import urllib
from urlparse import urlparse
from django.conf import settings
from django.core.handlers.wsgi import WSGIHandler
from django.contrib.staticfiles import utils
from django.contrib.staticfiles.views import serve
class StaticFilesHandler(WSGIHandler):
"""
WSGI middleware that intercepts calls to the static files directory, as
defined by the STATICFILES_URL setting, and serves those files.
"""
def __init__(self, application, media_dir=None):
self.application = application
if media_dir:
self.media_dir = media_dir
else:
self.media_dir = self.get_media_dir()
self.media_url = urlparse(self.get_media_url())
if settings.DEBUG:
utils.check_settings()
super(StaticFilesHandler, self).__init__()
def get_media_dir(self):
return settings.STATICFILES_ROOT
def get_media_url(self):
return settings.STATICFILES_URL
def _should_handle(self, path):
"""
Checks if the path should be handled. Ignores the path if:
* the host is provided as part of the media_url
* the request's path isn't under the media path (or equal)
* settings.DEBUG isn't True
"""
return (self.media_url[2] != path and
path.startswith(self.media_url[2]) and not self.media_url[1])
def file_path(self, url):
"""
Returns the relative path to the media file on disk for the given URL.
The passed URL is assumed to begin with ``media_url``. If the
resultant file path is outside the media directory, then a ValueError
is raised.
"""
# Remove ``media_url``.
relative_url = url[len(self.media_url[2]):]
return urllib.url2pathname(relative_url)
def serve(self, request):
"""
Actually serves the request path.
"""
return serve(request, self.file_path(request.path), insecure=True)
def get_response(self, request):
from django.http import Http404
if self._should_handle(request.path):
try:
return self.serve(request)
except Http404, e:
if settings.DEBUG:
from django.views import debug
return debug.technical_404_response(request, e)
return super(StaticFilesHandler, self).get_response(request)
def __call__(self, environ, start_response):
if not self._should_handle(environ['PATH_INFO']):
return self.application(environ, start_response)
return super(StaticFilesHandler, self).__call__(environ, start_response)
| writefaruq/lionface-app | django/contrib/staticfiles/handlers.py | Python | bsd-3-clause | 2,733 |