repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
citrix-openstack-build/nova | refs/heads/master | nova/virt/disk/mount/api.py | 9 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Support for mounting virtual image files."""
import os
import time
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova import utils
LOG = logging.getLogger(__name__)
MAX_DEVICE_WAIT = 30
class Mount(object):
"""Standard mounting operations, that can be overridden by subclasses.
The basic device operations provided are get, map and mount,
to be called in that order.
"""
mode = None # to be overridden in subclasses
@staticmethod
def instance_for_format(imgfile, mountdir, partition, imgfmt):
LOG.debug(_("Instance for format imgfile=%(imgfile)s "
"mountdir=%(mountdir)s partition=%(partition)s "
"imgfmt=%(imgfmt)s"),
{'imgfile': imgfile, 'mountdir': mountdir,
'partition': partition, 'imgfmt': imgfmt})
if imgfmt == "raw":
LOG.debug(_("Using LoopMount"))
return importutils.import_object(
"nova.virt.disk.mount.loop.LoopMount",
imgfile, mountdir, partition)
else:
LOG.debug(_("Using NbdMount"))
return importutils.import_object(
"nova.virt.disk.mount.nbd.NbdMount",
imgfile, mountdir, partition)
@staticmethod
def instance_for_device(imgfile, mountdir, partition, device):
LOG.debug(_("Instance for device imgfile=%(imgfile)s "
"mountdir=%(mountdir)s partition=%(partition)s "
"device=%(device)s"),
{'imgfile': imgfile, 'mountdir': mountdir,
'partition': partition, 'device': device})
if "loop" in device:
LOG.debug(_("Using LoopMount"))
return importutils.import_object(
"nova.virt.disk.mount.loop.LoopMount",
imgfile, mountdir, partition, device)
else:
LOG.debug(_("Using NbdMount"))
return importutils.import_object(
"nova.virt.disk.mount.nbd.NbdMount",
imgfile, mountdir, partition, device)
def __init__(self, image, mount_dir, partition=None, device=None):
# Input
self.image = image
self.partition = partition
self.mount_dir = mount_dir
# Output
self.error = ""
# Internal
self.linked = self.mapped = self.mounted = self.automapped = False
self.device = self.mapped_device = device
# Reset to mounted dir if possible
self.reset_dev()
def reset_dev(self):
"""Reset device paths to allow unmounting."""
if not self.device:
return
self.linked = self.mapped = self.mounted = True
device = self.device
if os.path.isabs(device) and os.path.exists(device):
if device.startswith('/dev/mapper/'):
device = os.path.basename(device)
device, self.partition = device.rsplit('p', 1)
self.device = os.path.join('/dev', device)
def get_dev(self):
"""Make the image available as a block device in the file system."""
self.device = None
self.linked = True
return True
def _get_dev_retry_helper(self):
"""Some implementations need to retry their get_dev."""
# NOTE(mikal): This method helps implement retries. The implementation
# simply calls _get_dev_retry_helper from their get_dev, and implements
# _inner_get_dev with their device acquisition logic. The NBD
# implementation has an example.
start_time = time.time()
device = self._inner_get_dev()
while not device:
LOG.info(_('Device allocation failed. Will retry in 2 seconds.'))
time.sleep(2)
if time.time() - start_time > MAX_DEVICE_WAIT:
LOG.warn(_('Device allocation failed after repeated retries.'))
return False
device = self._inner_get_dev()
return True
def _inner_get_dev(self):
raise NotImplementedError()
def unget_dev(self):
"""Release the block device from the file system namespace."""
self.linked = False
def map_dev(self):
"""Map partitions of the device to the file system namespace."""
assert(os.path.exists(self.device))
LOG.debug(_("Map dev %s"), self.device)
automapped_path = '/dev/%sp%s' % (os.path.basename(self.device),
self.partition)
if self.partition == -1:
self.error = _('partition search unsupported with %s') % self.mode
elif self.partition and not os.path.exists(automapped_path):
map_path = '/dev/mapper/%sp%s' % (os.path.basename(self.device),
self.partition)
assert(not os.path.exists(map_path))
# Note kpartx can output warnings to stderr and succeed
# Also it can output failures to stderr and "succeed"
# So we just go on the existence of the mapped device
_out, err = utils.trycmd('kpartx', '-a', self.device,
run_as_root=True, discard_warnings=True)
# Note kpartx does nothing when presented with a raw image,
# so given we only use it when we expect a partitioned image, fail
if not os.path.exists(map_path):
if not err:
err = _('partition %s not found') % self.partition
self.error = _('Failed to map partitions: %s') % err
else:
self.mapped_device = map_path
self.mapped = True
elif self.partition and os.path.exists(automapped_path):
# Note auto mapping can be enabled with the 'max_part' option
# to the nbd or loop kernel modules. Beware of possible races
# in the partition scanning for _loop_ devices though
# (details in bug 1024586), which are currently uncatered for.
self.mapped_device = automapped_path
self.mapped = True
self.automapped = True
else:
self.mapped_device = self.device
self.mapped = True
return self.mapped
def unmap_dev(self):
"""Remove partitions of the device from the file system namespace."""
if not self.mapped:
return
LOG.debug(_("Unmap dev %s"), self.device)
if self.partition and not self.automapped:
utils.execute('kpartx', '-d', self.device, run_as_root=True)
self.mapped = False
self.automapped = False
def mnt_dev(self):
"""Mount the device into the file system."""
LOG.debug(_("Mount %(dev)s on %(dir)s") %
{'dev': self.mapped_device, 'dir': self.mount_dir})
_out, err = utils.trycmd('mount', self.mapped_device, self.mount_dir,
discard_warnings=True, run_as_root=True)
if err:
self.error = _('Failed to mount filesystem: %s') % err
LOG.debug(self.error)
return False
self.mounted = True
return True
def unmnt_dev(self):
"""Unmount the device from the file system."""
if not self.mounted:
return
LOG.debug(_("Umount %s") % self.mapped_device)
utils.execute('umount', self.mapped_device, run_as_root=True)
self.mounted = False
def do_mount(self):
"""Call the get, map and mnt operations."""
status = False
try:
status = self.get_dev() and self.map_dev() and self.mnt_dev()
finally:
if not status:
LOG.debug(_("Fail to mount, tearing back down"))
self.do_teardown()
return status
def do_umount(self):
"""Call the unmnt operation."""
if self.mounted:
self.unmnt_dev()
def do_teardown(self):
"""Call the umnt, unmap, and unget operations."""
if self.mounted:
self.unmnt_dev()
if self.mapped:
self.unmap_dev()
if self.linked:
self.unget_dev()
|
acenario/Payable | refs/heads/master | lib/python2.7/site-packages/django/contrib/flatpages/models.py | 136 | from __future__ import unicode_literals
from django.contrib.sites.models import Site
from django.core.urlresolvers import get_script_prefix
from django.db import models
from django.utils.encoding import iri_to_uri, python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
@python_2_unicode_compatible
class FlatPage(models.Model):
url = models.CharField(_('URL'), max_length=100, db_index=True)
title = models.CharField(_('title'), max_length=200)
content = models.TextField(_('content'), blank=True)
enable_comments = models.BooleanField(_('enable comments'), default=False)
template_name = models.CharField(_('template name'), max_length=70, blank=True,
help_text=_(
"Example: 'flatpages/contact_page.html'. If this isn't provided, "
"the system will use 'flatpages/default.html'."
),
)
registration_required = models.BooleanField(_('registration required'),
help_text=_("If this is checked, only logged-in users will be able to view the page."),
default=False)
sites = models.ManyToManyField(Site)
class Meta:
db_table = 'django_flatpage'
verbose_name = _('flat page')
verbose_name_plural = _('flat pages')
ordering = ('url',)
def __str__(self):
return "%s -- %s" % (self.url, self.title)
def get_absolute_url(self):
# Handle script prefix manually because we bypass reverse()
return iri_to_uri(get_script_prefix().rstrip('/') + self.url)
|
hanicker/odoo | refs/heads/8.0 | addons/hr_timesheet_invoice/__init__.py | 442 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_timesheet_invoice
import wizard
import report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
rohitwaghchaure/frappe_smart | refs/heads/develop | frappe/core/doctype/file_data/__init__.py | 2292 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
|
jasimpson/gnuradio-jasimpson | refs/heads/master | grc/gui/StateCache.py | 34 | """
Copyright 2007 Free Software Foundation, Inc.
This file is part of GNU Radio
GNU Radio Companion is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
GNU Radio Companion is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
import Actions
from Constants import STATE_CACHE_SIZE
class StateCache(object):
"""
The state cache is an interface to a list to record data/states and to revert to previous states.
States are recorded into the list in a circular fassion by using an index for the current state,
and counters for the range where states are stored.
"""
def __init__(self, initial_state):
"""
StateCache constructor.
@param initial_state the intial state (nested data)
"""
self.states = [None] * STATE_CACHE_SIZE #fill states
self.current_state_index = 0
self.num_prev_states = 0
self.num_next_states = 0
self.states[0] = initial_state
self.update_actions()
def save_new_state(self, state):
"""
Save a new state.
Place the new state at the next index and add one to the number of previous states.
@param state the new state
"""
self.current_state_index = (self.current_state_index + 1)%STATE_CACHE_SIZE
self.states[self.current_state_index] = state
self.num_prev_states = self.num_prev_states + 1
if self.num_prev_states == STATE_CACHE_SIZE: self.num_prev_states = STATE_CACHE_SIZE - 1
self.num_next_states = 0
self.update_actions()
def get_current_state(self):
"""
Get the state at the current index.
@return the current state (nested data)
"""
self.update_actions()
return self.states[self.current_state_index]
def get_prev_state(self):
"""
Get the previous state and decrement the current index.
@return the previous state or None
"""
if self.num_prev_states > 0:
self.current_state_index = (self.current_state_index + STATE_CACHE_SIZE -1)%STATE_CACHE_SIZE
self.num_next_states = self.num_next_states + 1
self.num_prev_states = self.num_prev_states - 1
return self.get_current_state()
return None
def get_next_state(self):
"""
Get the nest state and increment the current index.
@return the next state or None
"""
if self.num_next_states > 0:
self.current_state_index = (self.current_state_index + 1)%STATE_CACHE_SIZE
self.num_next_states = self.num_next_states - 1
self.num_prev_states = self.num_prev_states + 1
return self.get_current_state()
return None
def update_actions(self):
"""
Update the undo and redo actions based on the number of next and prev states.
"""
Actions.FLOW_GRAPH_REDO.set_sensitive(self.num_next_states != 0)
Actions.FLOW_GRAPH_UNDO.set_sensitive(self.num_prev_states != 0)
|
MalloyPower/parsing-python | refs/heads/master | front-end/testsuite-python-lib/Python-2.4.3/Lib/idlelib/TreeWidget.py | 15 | # XXX TO DO:
# - popup menu
# - support partial or total redisplay
# - key bindings (instead of quick-n-dirty bindings on Canvas):
# - up/down arrow keys to move focus around
# - ditto for page up/down, home/end
# - left/right arrows to expand/collapse & move out/in
# - more doc strings
# - add icons for "file", "module", "class", "method"; better "python" icon
# - callback for selection???
# - multiple-item selection
# - tooltips
# - redo geometry without magic numbers
# - keep track of object ids to allow more careful cleaning
# - optimize tree redraw after expand of subnode
import os
import sys
from Tkinter import *
import imp
import ZoomHeight
from configHandler import idleConf
ICONDIR = "Icons"
# Look for Icons subdirectory in the same directory as this module
try:
_icondir = os.path.join(os.path.dirname(__file__), ICONDIR)
except NameError:
_icondir = ICONDIR
if os.path.isdir(_icondir):
ICONDIR = _icondir
elif not os.path.isdir(ICONDIR):
raise RuntimeError, "can't find icon directory (%r)" % (ICONDIR,)
def listicons(icondir=ICONDIR):
"""Utility to display the available icons."""
root = Tk()
import glob
list = glob.glob(os.path.join(icondir, "*.gif"))
list.sort()
images = []
row = column = 0
for file in list:
name = os.path.splitext(os.path.basename(file))[0]
image = PhotoImage(file=file, master=root)
images.append(image)
label = Label(root, image=image, bd=1, relief="raised")
label.grid(row=row, column=column)
label = Label(root, text=name)
label.grid(row=row+1, column=column)
column = column + 1
if column >= 10:
row = row+2
column = 0
root.images = images
class TreeNode:
def __init__(self, canvas, parent, item):
self.canvas = canvas
self.parent = parent
self.item = item
self.state = 'collapsed'
self.selected = False
self.children = []
self.x = self.y = None
self.iconimages = {} # cache of PhotoImage instances for icons
def destroy(self):
for c in self.children[:]:
self.children.remove(c)
c.destroy()
self.parent = None
def geticonimage(self, name):
try:
return self.iconimages[name]
except KeyError:
pass
file, ext = os.path.splitext(name)
ext = ext or ".gif"
fullname = os.path.join(ICONDIR, file + ext)
image = PhotoImage(master=self.canvas, file=fullname)
self.iconimages[name] = image
return image
def select(self, event=None):
if self.selected:
return
self.deselectall()
self.selected = True
self.canvas.delete(self.image_id)
self.drawicon()
self.drawtext()
def deselect(self, event=None):
if not self.selected:
return
self.selected = False
self.canvas.delete(self.image_id)
self.drawicon()
self.drawtext()
def deselectall(self):
if self.parent:
self.parent.deselectall()
else:
self.deselecttree()
def deselecttree(self):
if self.selected:
self.deselect()
for child in self.children:
child.deselecttree()
def flip(self, event=None):
if self.state == 'expanded':
self.collapse()
else:
self.expand()
self.item.OnDoubleClick()
return "break"
def expand(self, event=None):
if not self.item._IsExpandable():
return
if self.state != 'expanded':
self.state = 'expanded'
self.update()
self.view()
def collapse(self, event=None):
if self.state != 'collapsed':
self.state = 'collapsed'
self.update()
def view(self):
top = self.y - 2
bottom = self.lastvisiblechild().y + 17
height = bottom - top
visible_top = self.canvas.canvasy(0)
visible_height = self.canvas.winfo_height()
visible_bottom = self.canvas.canvasy(visible_height)
if visible_top <= top and bottom <= visible_bottom:
return
x0, y0, x1, y1 = self.canvas._getints(self.canvas['scrollregion'])
if top >= visible_top and height <= visible_height:
fraction = top + height - visible_height
else:
fraction = top
fraction = float(fraction) / y1
self.canvas.yview_moveto(fraction)
def lastvisiblechild(self):
if self.children and self.state == 'expanded':
return self.children[-1].lastvisiblechild()
else:
return self
def update(self):
if self.parent:
self.parent.update()
else:
oldcursor = self.canvas['cursor']
self.canvas['cursor'] = "watch"
self.canvas.update()
self.canvas.delete(ALL) # XXX could be more subtle
self.draw(7, 2)
x0, y0, x1, y1 = self.canvas.bbox(ALL)
self.canvas.configure(scrollregion=(0, 0, x1, y1))
self.canvas['cursor'] = oldcursor
def draw(self, x, y):
# XXX This hard-codes too many geometry constants!
self.x, self.y = x, y
self.drawicon()
self.drawtext()
if self.state != 'expanded':
return y+17
# draw children
if not self.children:
sublist = self.item._GetSubList()
if not sublist:
# _IsExpandable() was mistaken; that's allowed
return y+17
for item in sublist:
child = self.__class__(self.canvas, self, item)
self.children.append(child)
cx = x+20
cy = y+17
cylast = 0
for child in self.children:
cylast = cy
self.canvas.create_line(x+9, cy+7, cx, cy+7, fill="gray50")
cy = child.draw(cx, cy)
if child.item._IsExpandable():
if child.state == 'expanded':
iconname = "minusnode"
callback = child.collapse
else:
iconname = "plusnode"
callback = child.expand
image = self.geticonimage(iconname)
id = self.canvas.create_image(x+9, cylast+7, image=image)
# XXX This leaks bindings until canvas is deleted:
self.canvas.tag_bind(id, "<1>", callback)
self.canvas.tag_bind(id, "<Double-1>", lambda x: None)
id = self.canvas.create_line(x+9, y+10, x+9, cylast+7,
##stipple="gray50", # XXX Seems broken in Tk 8.0.x
fill="gray50")
self.canvas.tag_lower(id) # XXX .lower(id) before Python 1.5.2
return cy
def drawicon(self):
if self.selected:
imagename = (self.item.GetSelectedIconName() or
self.item.GetIconName() or
"openfolder")
else:
imagename = self.item.GetIconName() or "folder"
image = self.geticonimage(imagename)
id = self.canvas.create_image(self.x, self.y, anchor="nw", image=image)
self.image_id = id
self.canvas.tag_bind(id, "<1>", self.select)
self.canvas.tag_bind(id, "<Double-1>", self.flip)
def drawtext(self):
textx = self.x+20-1
texty = self.y-1
labeltext = self.item.GetLabelText()
if labeltext:
id = self.canvas.create_text(textx, texty, anchor="nw",
text=labeltext)
self.canvas.tag_bind(id, "<1>", self.select)
self.canvas.tag_bind(id, "<Double-1>", self.flip)
x0, y0, x1, y1 = self.canvas.bbox(id)
textx = max(x1, 200) + 10
text = self.item.GetText() or "<no text>"
try:
self.entry
except AttributeError:
pass
else:
self.edit_finish()
try:
label = self.label
except AttributeError:
# padding carefully selected (on Windows) to match Entry widget:
self.label = Label(self.canvas, text=text, bd=0, padx=2, pady=2)
theme = idleConf.GetOption('main','Theme','name')
if self.selected:
self.label.configure(idleConf.GetHighlight(theme, 'hilite'))
else:
self.label.configure(idleConf.GetHighlight(theme, 'normal'))
id = self.canvas.create_window(textx, texty,
anchor="nw", window=self.label)
self.label.bind("<1>", self.select_or_edit)
self.label.bind("<Double-1>", self.flip)
self.text_id = id
def select_or_edit(self, event=None):
if self.selected and self.item.IsEditable():
self.edit(event)
else:
self.select(event)
def edit(self, event=None):
self.entry = Entry(self.label, bd=0, highlightthickness=1, width=0)
self.entry.insert(0, self.label['text'])
self.entry.selection_range(0, END)
self.entry.pack(ipadx=5)
self.entry.focus_set()
self.entry.bind("<Return>", self.edit_finish)
self.entry.bind("<Escape>", self.edit_cancel)
def edit_finish(self, event=None):
try:
entry = self.entry
del self.entry
except AttributeError:
return
text = entry.get()
entry.destroy()
if text and text != self.item.GetText():
self.item.SetText(text)
text = self.item.GetText()
self.label['text'] = text
self.drawtext()
self.canvas.focus_set()
def edit_cancel(self, event=None):
try:
entry = self.entry
del self.entry
except AttributeError:
return
entry.destroy()
self.drawtext()
self.canvas.focus_set()
class TreeItem:
"""Abstract class representing tree items.
Methods should typically be overridden, otherwise a default action
is used.
"""
def __init__(self):
"""Constructor. Do whatever you need to do."""
def GetText(self):
"""Return text string to display."""
def GetLabelText(self):
"""Return label text string to display in front of text (if any)."""
expandable = None
def _IsExpandable(self):
"""Do not override! Called by TreeNode."""
if self.expandable is None:
self.expandable = self.IsExpandable()
return self.expandable
def IsExpandable(self):
"""Return whether there are subitems."""
return 1
def _GetSubList(self):
"""Do not override! Called by TreeNode."""
if not self.IsExpandable():
return []
sublist = self.GetSubList()
if not sublist:
self.expandable = 0
return sublist
def IsEditable(self):
"""Return whether the item's text may be edited."""
def SetText(self, text):
"""Change the item's text (if it is editable)."""
def GetIconName(self):
"""Return name of icon to be displayed normally."""
def GetSelectedIconName(self):
"""Return name of icon to be displayed when selected."""
def GetSubList(self):
"""Return list of items forming sublist."""
def OnDoubleClick(self):
"""Called on a double-click on the item."""
# Example application
class FileTreeItem(TreeItem):
"""Example TreeItem subclass -- browse the file system."""
def __init__(self, path):
self.path = path
def GetText(self):
return os.path.basename(self.path) or self.path
def IsEditable(self):
return os.path.basename(self.path) != ""
def SetText(self, text):
newpath = os.path.dirname(self.path)
newpath = os.path.join(newpath, text)
if os.path.dirname(newpath) != os.path.dirname(self.path):
return
try:
os.rename(self.path, newpath)
self.path = newpath
except os.error:
pass
def GetIconName(self):
if not self.IsExpandable():
return "python" # XXX wish there was a "file" icon
def IsExpandable(self):
return os.path.isdir(self.path)
def GetSubList(self):
try:
names = os.listdir(self.path)
except os.error:
return []
names.sort(lambda a, b: cmp(os.path.normcase(a), os.path.normcase(b)))
sublist = []
for name in names:
item = FileTreeItem(os.path.join(self.path, name))
sublist.append(item)
return sublist
# A canvas widget with scroll bars and some useful bindings
class ScrolledCanvas:
def __init__(self, master, **opts):
if not opts.has_key('yscrollincrement'):
opts['yscrollincrement'] = 17
self.master = master
self.frame = Frame(master)
self.frame.rowconfigure(0, weight=1)
self.frame.columnconfigure(0, weight=1)
self.canvas = Canvas(self.frame, **opts)
self.canvas.grid(row=0, column=0, sticky="nsew")
self.vbar = Scrollbar(self.frame, name="vbar")
self.vbar.grid(row=0, column=1, sticky="nse")
self.hbar = Scrollbar(self.frame, name="hbar", orient="horizontal")
self.hbar.grid(row=1, column=0, sticky="ews")
self.canvas['yscrollcommand'] = self.vbar.set
self.vbar['command'] = self.canvas.yview
self.canvas['xscrollcommand'] = self.hbar.set
self.hbar['command'] = self.canvas.xview
self.canvas.bind("<Key-Prior>", self.page_up)
self.canvas.bind("<Key-Next>", self.page_down)
self.canvas.bind("<Key-Up>", self.unit_up)
self.canvas.bind("<Key-Down>", self.unit_down)
#if isinstance(master, Toplevel) or isinstance(master, Tk):
self.canvas.bind("<Alt-Key-2>", self.zoom_height)
self.canvas.focus_set()
def page_up(self, event):
self.canvas.yview_scroll(-1, "page")
return "break"
def page_down(self, event):
self.canvas.yview_scroll(1, "page")
return "break"
def unit_up(self, event):
self.canvas.yview_scroll(-1, "unit")
return "break"
def unit_down(self, event):
self.canvas.yview_scroll(1, "unit")
return "break"
def zoom_height(self, event):
ZoomHeight.zoom_height(self.master)
return "break"
# Testing functions
def test():
import PyShell
root = Toplevel(PyShell.root)
root.configure(bd=0, bg="yellow")
root.focus_set()
sc = ScrolledCanvas(root, bg="white", highlightthickness=0, takefocus=1)
sc.frame.pack(expand=1, fill="both")
item = FileTreeItem("C:/windows/desktop")
node = TreeNode(sc.canvas, None, item)
node.expand()
def test2():
# test w/o scrolling canvas
root = Tk()
root.configure(bd=0)
canvas = Canvas(root, bg="white", highlightthickness=0)
canvas.pack(expand=1, fill="both")
item = FileTreeItem(os.curdir)
node = TreeNode(canvas, None, item)
node.update()
canvas.focus_set()
if __name__ == '__main__':
test()
|
miguelgrinberg/python-engineio | refs/heads/main | examples/server/sanic/latency.py | 1 | from sanic import Sanic
from sanic.response import html
import engineio
eio = engineio.AsyncServer(async_mode='sanic')
app = Sanic(name='latency')
eio.attach(app)
@app.route('/')
async def index(request):
with open('latency.html') as f:
return html(f.read())
@eio.on('message')
async def message(sid, data):
await eio.send(sid, 'pong')
app.static('/static', './static')
if __name__ == '__main__':
app.run()
|
chuan9/chromium-crosswalk | refs/heads/master | third_party/mojo/src/mojo/public/third_party/jinja2/parser.py | 637 | # -*- coding: utf-8 -*-
"""
jinja2.parser
~~~~~~~~~~~~~
Implements the template parser.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
from jinja2 import nodes
from jinja2.exceptions import TemplateSyntaxError, TemplateAssertionError
from jinja2.lexer import describe_token, describe_token_expr
from jinja2._compat import next, imap
#: statements that callinto
_statement_keywords = frozenset(['for', 'if', 'block', 'extends', 'print',
'macro', 'include', 'from', 'import',
'set'])
_compare_operators = frozenset(['eq', 'ne', 'lt', 'lteq', 'gt', 'gteq'])
class Parser(object):
"""This is the central parsing class Jinja2 uses. It's passed to
extensions and can be used to parse expressions or statements.
"""
def __init__(self, environment, source, name=None, filename=None,
state=None):
self.environment = environment
self.stream = environment._tokenize(source, name, filename, state)
self.name = name
self.filename = filename
self.closed = False
self.extensions = {}
for extension in environment.iter_extensions():
for tag in extension.tags:
self.extensions[tag] = extension.parse
self._last_identifier = 0
self._tag_stack = []
self._end_token_stack = []
def fail(self, msg, lineno=None, exc=TemplateSyntaxError):
"""Convenience method that raises `exc` with the message, passed
line number or last line number as well as the current name and
filename.
"""
if lineno is None:
lineno = self.stream.current.lineno
raise exc(msg, lineno, self.name, self.filename)
def _fail_ut_eof(self, name, end_token_stack, lineno):
expected = []
for exprs in end_token_stack:
expected.extend(imap(describe_token_expr, exprs))
if end_token_stack:
currently_looking = ' or '.join(
"'%s'" % describe_token_expr(expr)
for expr in end_token_stack[-1])
else:
currently_looking = None
if name is None:
message = ['Unexpected end of template.']
else:
message = ['Encountered unknown tag \'%s\'.' % name]
if currently_looking:
if name is not None and name in expected:
message.append('You probably made a nesting mistake. Jinja '
'is expecting this tag, but currently looking '
'for %s.' % currently_looking)
else:
message.append('Jinja was looking for the following tags: '
'%s.' % currently_looking)
if self._tag_stack:
message.append('The innermost block that needs to be '
'closed is \'%s\'.' % self._tag_stack[-1])
self.fail(' '.join(message), lineno)
def fail_unknown_tag(self, name, lineno=None):
"""Called if the parser encounters an unknown tag. Tries to fail
with a human readable error message that could help to identify
the problem.
"""
return self._fail_ut_eof(name, self._end_token_stack, lineno)
def fail_eof(self, end_tokens=None, lineno=None):
"""Like fail_unknown_tag but for end of template situations."""
stack = list(self._end_token_stack)
if end_tokens is not None:
stack.append(end_tokens)
return self._fail_ut_eof(None, stack, lineno)
def is_tuple_end(self, extra_end_rules=None):
"""Are we at the end of a tuple?"""
if self.stream.current.type in ('variable_end', 'block_end', 'rparen'):
return True
elif extra_end_rules is not None:
return self.stream.current.test_any(extra_end_rules)
return False
def free_identifier(self, lineno=None):
"""Return a new free identifier as :class:`~jinja2.nodes.InternalName`."""
self._last_identifier += 1
rv = object.__new__(nodes.InternalName)
nodes.Node.__init__(rv, 'fi%d' % self._last_identifier, lineno=lineno)
return rv
def parse_statement(self):
"""Parse a single statement."""
token = self.stream.current
if token.type != 'name':
self.fail('tag name expected', token.lineno)
self._tag_stack.append(token.value)
pop_tag = True
try:
if token.value in _statement_keywords:
return getattr(self, 'parse_' + self.stream.current.value)()
if token.value == 'call':
return self.parse_call_block()
if token.value == 'filter':
return self.parse_filter_block()
ext = self.extensions.get(token.value)
if ext is not None:
return ext(self)
# did not work out, remove the token we pushed by accident
# from the stack so that the unknown tag fail function can
# produce a proper error message.
self._tag_stack.pop()
pop_tag = False
self.fail_unknown_tag(token.value, token.lineno)
finally:
if pop_tag:
self._tag_stack.pop()
def parse_statements(self, end_tokens, drop_needle=False):
"""Parse multiple statements into a list until one of the end tokens
is reached. This is used to parse the body of statements as it also
parses template data if appropriate. The parser checks first if the
current token is a colon and skips it if there is one. Then it checks
for the block end and parses until if one of the `end_tokens` is
reached. Per default the active token in the stream at the end of
the call is the matched end token. If this is not wanted `drop_needle`
can be set to `True` and the end token is removed.
"""
# the first token may be a colon for python compatibility
self.stream.skip_if('colon')
# in the future it would be possible to add whole code sections
# by adding some sort of end of statement token and parsing those here.
self.stream.expect('block_end')
result = self.subparse(end_tokens)
# we reached the end of the template too early, the subparser
# does not check for this, so we do that now
if self.stream.current.type == 'eof':
self.fail_eof(end_tokens)
if drop_needle:
next(self.stream)
return result
def parse_set(self):
"""Parse an assign statement."""
lineno = next(self.stream).lineno
target = self.parse_assign_target()
self.stream.expect('assign')
expr = self.parse_tuple()
return nodes.Assign(target, expr, lineno=lineno)
def parse_for(self):
"""Parse a for loop."""
lineno = self.stream.expect('name:for').lineno
target = self.parse_assign_target(extra_end_rules=('name:in',))
self.stream.expect('name:in')
iter = self.parse_tuple(with_condexpr=False,
extra_end_rules=('name:recursive',))
test = None
if self.stream.skip_if('name:if'):
test = self.parse_expression()
recursive = self.stream.skip_if('name:recursive')
body = self.parse_statements(('name:endfor', 'name:else'))
if next(self.stream).value == 'endfor':
else_ = []
else:
else_ = self.parse_statements(('name:endfor',), drop_needle=True)
return nodes.For(target, iter, body, else_, test,
recursive, lineno=lineno)
def parse_if(self):
"""Parse an if construct."""
node = result = nodes.If(lineno=self.stream.expect('name:if').lineno)
while 1:
node.test = self.parse_tuple(with_condexpr=False)
node.body = self.parse_statements(('name:elif', 'name:else',
'name:endif'))
token = next(self.stream)
if token.test('name:elif'):
new_node = nodes.If(lineno=self.stream.current.lineno)
node.else_ = [new_node]
node = new_node
continue
elif token.test('name:else'):
node.else_ = self.parse_statements(('name:endif',),
drop_needle=True)
else:
node.else_ = []
break
return result
def parse_block(self):
node = nodes.Block(lineno=next(self.stream).lineno)
node.name = self.stream.expect('name').value
node.scoped = self.stream.skip_if('name:scoped')
# common problem people encounter when switching from django
# to jinja. we do not support hyphens in block names, so let's
# raise a nicer error message in that case.
if self.stream.current.type == 'sub':
self.fail('Block names in Jinja have to be valid Python '
'identifiers and may not contain hyphens, use an '
'underscore instead.')
node.body = self.parse_statements(('name:endblock',), drop_needle=True)
self.stream.skip_if('name:' + node.name)
return node
def parse_extends(self):
node = nodes.Extends(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
return node
def parse_import_context(self, node, default):
if self.stream.current.test_any('name:with', 'name:without') and \
self.stream.look().test('name:context'):
node.with_context = next(self.stream).value == 'with'
self.stream.skip()
else:
node.with_context = default
return node
def parse_include(self):
node = nodes.Include(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
if self.stream.current.test('name:ignore') and \
self.stream.look().test('name:missing'):
node.ignore_missing = True
self.stream.skip(2)
else:
node.ignore_missing = False
return self.parse_import_context(node, True)
def parse_import(self):
node = nodes.Import(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
self.stream.expect('name:as')
node.target = self.parse_assign_target(name_only=True).name
return self.parse_import_context(node, False)
def parse_from(self):
node = nodes.FromImport(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
self.stream.expect('name:import')
node.names = []
def parse_context():
if self.stream.current.value in ('with', 'without') and \
self.stream.look().test('name:context'):
node.with_context = next(self.stream).value == 'with'
self.stream.skip()
return True
return False
while 1:
if node.names:
self.stream.expect('comma')
if self.stream.current.type == 'name':
if parse_context():
break
target = self.parse_assign_target(name_only=True)
if target.name.startswith('_'):
self.fail('names starting with an underline can not '
'be imported', target.lineno,
exc=TemplateAssertionError)
if self.stream.skip_if('name:as'):
alias = self.parse_assign_target(name_only=True)
node.names.append((target.name, alias.name))
else:
node.names.append(target.name)
if parse_context() or self.stream.current.type != 'comma':
break
else:
break
if not hasattr(node, 'with_context'):
node.with_context = False
self.stream.skip_if('comma')
return node
def parse_signature(self, node):
node.args = args = []
node.defaults = defaults = []
self.stream.expect('lparen')
while self.stream.current.type != 'rparen':
if args:
self.stream.expect('comma')
arg = self.parse_assign_target(name_only=True)
arg.set_ctx('param')
if self.stream.skip_if('assign'):
defaults.append(self.parse_expression())
args.append(arg)
self.stream.expect('rparen')
def parse_call_block(self):
node = nodes.CallBlock(lineno=next(self.stream).lineno)
if self.stream.current.type == 'lparen':
self.parse_signature(node)
else:
node.args = []
node.defaults = []
node.call = self.parse_expression()
if not isinstance(node.call, nodes.Call):
self.fail('expected call', node.lineno)
node.body = self.parse_statements(('name:endcall',), drop_needle=True)
return node
def parse_filter_block(self):
node = nodes.FilterBlock(lineno=next(self.stream).lineno)
node.filter = self.parse_filter(None, start_inline=True)
node.body = self.parse_statements(('name:endfilter',),
drop_needle=True)
return node
def parse_macro(self):
node = nodes.Macro(lineno=next(self.stream).lineno)
node.name = self.parse_assign_target(name_only=True).name
self.parse_signature(node)
node.body = self.parse_statements(('name:endmacro',),
drop_needle=True)
return node
def parse_print(self):
node = nodes.Output(lineno=next(self.stream).lineno)
node.nodes = []
while self.stream.current.type != 'block_end':
if node.nodes:
self.stream.expect('comma')
node.nodes.append(self.parse_expression())
return node
def parse_assign_target(self, with_tuple=True, name_only=False,
extra_end_rules=None):
"""Parse an assignment target. As Jinja2 allows assignments to
tuples, this function can parse all allowed assignment targets. Per
default assignments to tuples are parsed, that can be disable however
by setting `with_tuple` to `False`. If only assignments to names are
wanted `name_only` can be set to `True`. The `extra_end_rules`
parameter is forwarded to the tuple parsing function.
"""
if name_only:
token = self.stream.expect('name')
target = nodes.Name(token.value, 'store', lineno=token.lineno)
else:
if with_tuple:
target = self.parse_tuple(simplified=True,
extra_end_rules=extra_end_rules)
else:
target = self.parse_primary()
target.set_ctx('store')
if not target.can_assign():
self.fail('can\'t assign to %r' % target.__class__.
__name__.lower(), target.lineno)
return target
def parse_expression(self, with_condexpr=True):
"""Parse an expression. Per default all expressions are parsed, if
the optional `with_condexpr` parameter is set to `False` conditional
expressions are not parsed.
"""
if with_condexpr:
return self.parse_condexpr()
return self.parse_or()
def parse_condexpr(self):
lineno = self.stream.current.lineno
expr1 = self.parse_or()
while self.stream.skip_if('name:if'):
expr2 = self.parse_or()
if self.stream.skip_if('name:else'):
expr3 = self.parse_condexpr()
else:
expr3 = None
expr1 = nodes.CondExpr(expr2, expr1, expr3, lineno=lineno)
lineno = self.stream.current.lineno
return expr1
def parse_or(self):
lineno = self.stream.current.lineno
left = self.parse_and()
while self.stream.skip_if('name:or'):
right = self.parse_and()
left = nodes.Or(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_and(self):
lineno = self.stream.current.lineno
left = self.parse_not()
while self.stream.skip_if('name:and'):
right = self.parse_not()
left = nodes.And(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_not(self):
if self.stream.current.test('name:not'):
lineno = next(self.stream).lineno
return nodes.Not(self.parse_not(), lineno=lineno)
return self.parse_compare()
def parse_compare(self):
lineno = self.stream.current.lineno
expr = self.parse_add()
ops = []
while 1:
token_type = self.stream.current.type
if token_type in _compare_operators:
next(self.stream)
ops.append(nodes.Operand(token_type, self.parse_add()))
elif self.stream.skip_if('name:in'):
ops.append(nodes.Operand('in', self.parse_add()))
elif self.stream.current.test('name:not') and \
self.stream.look().test('name:in'):
self.stream.skip(2)
ops.append(nodes.Operand('notin', self.parse_add()))
else:
break
lineno = self.stream.current.lineno
if not ops:
return expr
return nodes.Compare(expr, ops, lineno=lineno)
def parse_add(self):
lineno = self.stream.current.lineno
left = self.parse_sub()
while self.stream.current.type == 'add':
next(self.stream)
right = self.parse_sub()
left = nodes.Add(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_sub(self):
lineno = self.stream.current.lineno
left = self.parse_concat()
while self.stream.current.type == 'sub':
next(self.stream)
right = self.parse_concat()
left = nodes.Sub(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_concat(self):
lineno = self.stream.current.lineno
args = [self.parse_mul()]
while self.stream.current.type == 'tilde':
next(self.stream)
args.append(self.parse_mul())
if len(args) == 1:
return args[0]
return nodes.Concat(args, lineno=lineno)
def parse_mul(self):
lineno = self.stream.current.lineno
left = self.parse_div()
while self.stream.current.type == 'mul':
next(self.stream)
right = self.parse_div()
left = nodes.Mul(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_div(self):
lineno = self.stream.current.lineno
left = self.parse_floordiv()
while self.stream.current.type == 'div':
next(self.stream)
right = self.parse_floordiv()
left = nodes.Div(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_floordiv(self):
lineno = self.stream.current.lineno
left = self.parse_mod()
while self.stream.current.type == 'floordiv':
next(self.stream)
right = self.parse_mod()
left = nodes.FloorDiv(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_mod(self):
lineno = self.stream.current.lineno
left = self.parse_pow()
while self.stream.current.type == 'mod':
next(self.stream)
right = self.parse_pow()
left = nodes.Mod(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_pow(self):
lineno = self.stream.current.lineno
left = self.parse_unary()
while self.stream.current.type == 'pow':
next(self.stream)
right = self.parse_unary()
left = nodes.Pow(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_unary(self, with_filter=True):
token_type = self.stream.current.type
lineno = self.stream.current.lineno
if token_type == 'sub':
next(self.stream)
node = nodes.Neg(self.parse_unary(False), lineno=lineno)
elif token_type == 'add':
next(self.stream)
node = nodes.Pos(self.parse_unary(False), lineno=lineno)
else:
node = self.parse_primary()
node = self.parse_postfix(node)
if with_filter:
node = self.parse_filter_expr(node)
return node
def parse_primary(self):
token = self.stream.current
if token.type == 'name':
if token.value in ('true', 'false', 'True', 'False'):
node = nodes.Const(token.value in ('true', 'True'),
lineno=token.lineno)
elif token.value in ('none', 'None'):
node = nodes.Const(None, lineno=token.lineno)
else:
node = nodes.Name(token.value, 'load', lineno=token.lineno)
next(self.stream)
elif token.type == 'string':
next(self.stream)
buf = [token.value]
lineno = token.lineno
while self.stream.current.type == 'string':
buf.append(self.stream.current.value)
next(self.stream)
node = nodes.Const(''.join(buf), lineno=lineno)
elif token.type in ('integer', 'float'):
next(self.stream)
node = nodes.Const(token.value, lineno=token.lineno)
elif token.type == 'lparen':
next(self.stream)
node = self.parse_tuple(explicit_parentheses=True)
self.stream.expect('rparen')
elif token.type == 'lbracket':
node = self.parse_list()
elif token.type == 'lbrace':
node = self.parse_dict()
else:
self.fail("unexpected '%s'" % describe_token(token), token.lineno)
return node
def parse_tuple(self, simplified=False, with_condexpr=True,
extra_end_rules=None, explicit_parentheses=False):
"""Works like `parse_expression` but if multiple expressions are
delimited by a comma a :class:`~jinja2.nodes.Tuple` node is created.
This method could also return a regular expression instead of a tuple
if no commas where found.
The default parsing mode is a full tuple. If `simplified` is `True`
only names and literals are parsed. The `no_condexpr` parameter is
forwarded to :meth:`parse_expression`.
Because tuples do not require delimiters and may end in a bogus comma
an extra hint is needed that marks the end of a tuple. For example
for loops support tuples between `for` and `in`. In that case the
`extra_end_rules` is set to ``['name:in']``.
`explicit_parentheses` is true if the parsing was triggered by an
expression in parentheses. This is used to figure out if an empty
tuple is a valid expression or not.
"""
lineno = self.stream.current.lineno
if simplified:
parse = self.parse_primary
elif with_condexpr:
parse = self.parse_expression
else:
parse = lambda: self.parse_expression(with_condexpr=False)
args = []
is_tuple = False
while 1:
if args:
self.stream.expect('comma')
if self.is_tuple_end(extra_end_rules):
break
args.append(parse())
if self.stream.current.type == 'comma':
is_tuple = True
else:
break
lineno = self.stream.current.lineno
if not is_tuple:
if args:
return args[0]
# if we don't have explicit parentheses, an empty tuple is
# not a valid expression. This would mean nothing (literally
# nothing) in the spot of an expression would be an empty
# tuple.
if not explicit_parentheses:
self.fail('Expected an expression, got \'%s\'' %
describe_token(self.stream.current))
return nodes.Tuple(args, 'load', lineno=lineno)
def parse_list(self):
token = self.stream.expect('lbracket')
items = []
while self.stream.current.type != 'rbracket':
if items:
self.stream.expect('comma')
if self.stream.current.type == 'rbracket':
break
items.append(self.parse_expression())
self.stream.expect('rbracket')
return nodes.List(items, lineno=token.lineno)
def parse_dict(self):
token = self.stream.expect('lbrace')
items = []
while self.stream.current.type != 'rbrace':
if items:
self.stream.expect('comma')
if self.stream.current.type == 'rbrace':
break
key = self.parse_expression()
self.stream.expect('colon')
value = self.parse_expression()
items.append(nodes.Pair(key, value, lineno=key.lineno))
self.stream.expect('rbrace')
return nodes.Dict(items, lineno=token.lineno)
def parse_postfix(self, node):
while 1:
token_type = self.stream.current.type
if token_type == 'dot' or token_type == 'lbracket':
node = self.parse_subscript(node)
# calls are valid both after postfix expressions (getattr
# and getitem) as well as filters and tests
elif token_type == 'lparen':
node = self.parse_call(node)
else:
break
return node
def parse_filter_expr(self, node):
while 1:
token_type = self.stream.current.type
if token_type == 'pipe':
node = self.parse_filter(node)
elif token_type == 'name' and self.stream.current.value == 'is':
node = self.parse_test(node)
# calls are valid both after postfix expressions (getattr
# and getitem) as well as filters and tests
elif token_type == 'lparen':
node = self.parse_call(node)
else:
break
return node
def parse_subscript(self, node):
token = next(self.stream)
if token.type == 'dot':
attr_token = self.stream.current
next(self.stream)
if attr_token.type == 'name':
return nodes.Getattr(node, attr_token.value, 'load',
lineno=token.lineno)
elif attr_token.type != 'integer':
self.fail('expected name or number', attr_token.lineno)
arg = nodes.Const(attr_token.value, lineno=attr_token.lineno)
return nodes.Getitem(node, arg, 'load', lineno=token.lineno)
if token.type == 'lbracket':
args = []
while self.stream.current.type != 'rbracket':
if args:
self.stream.expect('comma')
args.append(self.parse_subscribed())
self.stream.expect('rbracket')
if len(args) == 1:
arg = args[0]
else:
arg = nodes.Tuple(args, 'load', lineno=token.lineno)
return nodes.Getitem(node, arg, 'load', lineno=token.lineno)
self.fail('expected subscript expression', self.lineno)
def parse_subscribed(self):
lineno = self.stream.current.lineno
if self.stream.current.type == 'colon':
next(self.stream)
args = [None]
else:
node = self.parse_expression()
if self.stream.current.type != 'colon':
return node
next(self.stream)
args = [node]
if self.stream.current.type == 'colon':
args.append(None)
elif self.stream.current.type not in ('rbracket', 'comma'):
args.append(self.parse_expression())
else:
args.append(None)
if self.stream.current.type == 'colon':
next(self.stream)
if self.stream.current.type not in ('rbracket', 'comma'):
args.append(self.parse_expression())
else:
args.append(None)
else:
args.append(None)
return nodes.Slice(lineno=lineno, *args)
def parse_call(self, node):
token = self.stream.expect('lparen')
args = []
kwargs = []
dyn_args = dyn_kwargs = None
require_comma = False
def ensure(expr):
if not expr:
self.fail('invalid syntax for function call expression',
token.lineno)
while self.stream.current.type != 'rparen':
if require_comma:
self.stream.expect('comma')
# support for trailing comma
if self.stream.current.type == 'rparen':
break
if self.stream.current.type == 'mul':
ensure(dyn_args is None and dyn_kwargs is None)
next(self.stream)
dyn_args = self.parse_expression()
elif self.stream.current.type == 'pow':
ensure(dyn_kwargs is None)
next(self.stream)
dyn_kwargs = self.parse_expression()
else:
ensure(dyn_args is None and dyn_kwargs is None)
if self.stream.current.type == 'name' and \
self.stream.look().type == 'assign':
key = self.stream.current.value
self.stream.skip(2)
value = self.parse_expression()
kwargs.append(nodes.Keyword(key, value,
lineno=value.lineno))
else:
ensure(not kwargs)
args.append(self.parse_expression())
require_comma = True
self.stream.expect('rparen')
if node is None:
return args, kwargs, dyn_args, dyn_kwargs
return nodes.Call(node, args, kwargs, dyn_args, dyn_kwargs,
lineno=token.lineno)
def parse_filter(self, node, start_inline=False):
while self.stream.current.type == 'pipe' or start_inline:
if not start_inline:
next(self.stream)
token = self.stream.expect('name')
name = token.value
while self.stream.current.type == 'dot':
next(self.stream)
name += '.' + self.stream.expect('name').value
if self.stream.current.type == 'lparen':
args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None)
else:
args = []
kwargs = []
dyn_args = dyn_kwargs = None
node = nodes.Filter(node, name, args, kwargs, dyn_args,
dyn_kwargs, lineno=token.lineno)
start_inline = False
return node
def parse_test(self, node):
token = next(self.stream)
if self.stream.current.test('name:not'):
next(self.stream)
negated = True
else:
negated = False
name = self.stream.expect('name').value
while self.stream.current.type == 'dot':
next(self.stream)
name += '.' + self.stream.expect('name').value
dyn_args = dyn_kwargs = None
kwargs = []
if self.stream.current.type == 'lparen':
args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None)
elif self.stream.current.type in ('name', 'string', 'integer',
'float', 'lparen', 'lbracket',
'lbrace') and not \
self.stream.current.test_any('name:else', 'name:or',
'name:and'):
if self.stream.current.test('name:is'):
self.fail('You cannot chain multiple tests with is')
args = [self.parse_expression()]
else:
args = []
node = nodes.Test(node, name, args, kwargs, dyn_args,
dyn_kwargs, lineno=token.lineno)
if negated:
node = nodes.Not(node, lineno=token.lineno)
return node
def subparse(self, end_tokens=None):
body = []
data_buffer = []
add_data = data_buffer.append
if end_tokens is not None:
self._end_token_stack.append(end_tokens)
def flush_data():
if data_buffer:
lineno = data_buffer[0].lineno
body.append(nodes.Output(data_buffer[:], lineno=lineno))
del data_buffer[:]
try:
while self.stream:
token = self.stream.current
if token.type == 'data':
if token.value:
add_data(nodes.TemplateData(token.value,
lineno=token.lineno))
next(self.stream)
elif token.type == 'variable_begin':
next(self.stream)
add_data(self.parse_tuple(with_condexpr=True))
self.stream.expect('variable_end')
elif token.type == 'block_begin':
flush_data()
next(self.stream)
if end_tokens is not None and \
self.stream.current.test_any(*end_tokens):
return body
rv = self.parse_statement()
if isinstance(rv, list):
body.extend(rv)
else:
body.append(rv)
self.stream.expect('block_end')
else:
raise AssertionError('internal parsing error')
flush_data()
finally:
if end_tokens is not None:
self._end_token_stack.pop()
return body
def parse(self):
"""Parse the whole template into a `Template` node."""
result = nodes.Template(self.subparse(), lineno=1)
result.set_environment(self.environment)
return result
|
jh23453/privacyidea | refs/heads/master | privacyidea/lib/tokens/radiustoken.py | 2 | # -*- coding: utf-8 -*-
#
# 2016-02-22 Cornelius Kölbel <cornelius@privacyidea.org>
# Add the RADIUS identifier, which points to the system wide list
# of RADIUS servers.
# 2015-10-09 Cornelius Kölbel <cornelius@privacyidea.org>
# Add the RADIUS-System-Config, so that not each
# RADIUS-token needs his own secret. -> change the
# secret globally
# 2015-01-29 Adapt for migration to flask
# Cornelius Kölbel <cornelius@privacyidea.org>
#
# May 08, 2014 Cornelius Kölbel
# License: AGPLv3
# contact: http://www.privacyidea.org
#
# Copyright (C) 2010 - 2014 LSE Leading Security Experts GmbH
# License: LSE
# contact: http://www.linotp.org
# http://www.lsexperts.de
# linotp@lsexperts.de
#
# This code is free software; you can redistribute it and/or
# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
# License as published by the Free Software Foundation; either
# version 3 of the License, or any later version.
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU AFFERO GENERAL PUBLIC LICENSE for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
__doc__ = """This module defines the RadiusTokenClass. The RADIUS token
forwards the authentication request to another RADIUS server.
The code is tested in tests/test_lib_tokens_radius
"""
import logging
import traceback
import binascii
from privacyidea.lib.tokenclass import TokenClass
from privacyidea.lib.tokens.remotetoken import RemoteTokenClass
from privacyidea.api.lib.utils import getParam, ParameterError
from privacyidea.lib.log import log_with
from privacyidea.lib.config import get_from_config
from privacyidea.lib.decorators import check_token_locked
from privacyidea.lib.radiusserver import get_radius
import pyrad.packet
from pyrad.client import Client
from pyrad.dictionary import Dictionary
from privacyidea.lib import _
optional = True
required = False
log = logging.getLogger(__name__)
###############################################
class RadiusTokenClass(RemoteTokenClass):
def __init__(self, db_token):
RemoteTokenClass.__init__(self, db_token)
self.set_type(u"radius")
self.mode = ['authenticate', 'challenge']
@staticmethod
def get_class_type():
return "radius"
@staticmethod
def get_class_prefix():
return "PIRA"
@staticmethod
@log_with(log)
def get_class_info(key=None, ret='all'):
"""
returns a subtree of the token definition
:param key: subsection identifier
:type key: string
:param ret: default return value, if nothing is found
:type ret: user defined
:return: subsection if key exists or user defined
:rtype: dict or string
"""
res = {'type': 'radius',
'title': 'RADIUS Token',
'description': _('RADIUS: Forward authentication request to a '
'RADIUS server.'),
'user': ['enroll'],
# This tokentype is enrollable in the UI for...
'ui_enroll': ["admin", "user"],
'policy': {},
}
if key:
ret = res.get(key, {})
else:
if ret == 'all':
ret = res
return ret
def update(self, param):
# New value
radius_identifier = getParam(param, "radius.identifier")
self.add_tokeninfo("radius.identifier", radius_identifier)
# old values
if not radius_identifier:
radiusServer = getParam(param, "radius.server", optional=required)
self.add_tokeninfo("radius.server", radiusServer)
radius_secret = getParam(param, "radius.secret", optional=required)
self.token.set_otpkey(binascii.hexlify(radius_secret))
system_settings = getParam(param, "radius.system_settings",
default=False)
self.add_tokeninfo("radius.system_settings", system_settings)
if not radius_identifier and not (radiusServer or radius_secret) and \
not system_settings:
raise ParameterError("Missing parameter: radius.identifier", id=905)
# if another OTP length would be specified in /admin/init this would
# be overwritten by the parent class, which is ok.
self.set_otplen(6)
TokenClass.update(self, param)
val = getParam(param, "radius.local_checkpin", optional) or 0
self.add_tokeninfo("radius.local_checkpin", val)
val = getParam(param, "radius.user", required)
self.add_tokeninfo("radius.user", val)
@property
def check_pin_local(self):
"""
lookup if pin should be checked locally or on radius host
:return: bool
"""
local_check = 1 == int(self.get_tokeninfo("radius.local_checkpin"))
log.debug("local checking pin? {0!r}".format(local_check))
return local_check
@log_with(log)
def split_pin_pass(self, passw, user=None, options=None):
"""
Split the PIN and the OTP value.
Only if it is locally checked and not remotely.
"""
res = 0
pin = ""
otpval = passw
if self.check_pin_local:
(res, pin, otpval) = TokenClass.split_pin_pass(self, passw)
return res, pin, otpval
@log_with(log)
@check_token_locked
def check_otp(self, otpval, counter=None, window=None, options=None):
"""
run the RADIUS request against the RADIUS server
:param otpval: the OTP value
:param counter: The counter for counter based otp values
:type counter: int
:param window: a counter window
:type counter: int
:param options: additional token specific options
:type options: dict
:return: counter of the matching OTP value.
:rtype: int
"""
otp_count = -1
options = options or {}
radius_dictionary = None
radius_identifier = self.get_tokeninfo("radius.identifier")
radius_user = self.get_tokeninfo("radius.user")
system_radius_settings = self.get_tokeninfo("radius.system_settings")
if radius_identifier:
# New configuration
radius_server_object = get_radius(radius_identifier)
radius_server = radius_server_object.config.server
radius_port = radius_server_object.config.port
radius_server = "{0!s}:{1!s}".format(radius_server, radius_port)
radius_secret = radius_server_object.get_secret()
radius_dictionary = radius_server_object.config.dictionary
elif system_radius_settings:
# system configuration
radius_server = get_from_config("radius.server")
radius_secret = get_from_config("radius.secret")
# Is returned as unicode, so we convert it to utf-8
radius_secret = radius_secret.encode("utf-8")
else:
# individual token settings
radius_server = self.get_tokeninfo("radius.server")
# Read the secret
secret = self.token.get_otpkey()
radius_secret = binascii.unhexlify(secret.getKey())
# here we also need to check for radius.user
log.debug("checking OTP len:{0!s} on radius server: {1!s}, user: {2!s}".format(len(otpval), radius_server, radius_user))
try:
# pyrad does not allow to set timeout and retries.
# it defaults to retries=3, timeout=5
# TODO: At the moment we support only one radius server.
# No round robin.
server = radius_server.split(':')
r_server = server[0]
r_authport = 1812
if len(server) >= 2:
r_authport = int(server[1])
nas_identifier = get_from_config("radius.nas_identifier",
"privacyIDEA")
if not radius_dictionary:
radius_dictionary = get_from_config("radius.dictfile",
"/etc/privacyidea/"
"dictionary")
log.debug("NAS Identifier: %r, "
"Dictionary: %r" % (nas_identifier, radius_dictionary))
log.debug("constructing client object "
"with server: %r, port: %r, secret: %r" %
(r_server, r_authport, radius_secret))
srv = Client(server=r_server,
authport=r_authport,
secret=radius_secret,
dict=Dictionary(radius_dictionary))
req = srv.CreateAuthPacket(code=pyrad.packet.AccessRequest,
User_Name=radius_user.encode('ascii'),
NAS_Identifier=nas_identifier.encode('ascii'))
req["User-Password"] = req.PwCrypt(otpval)
if "transactionid" in options:
req["State"] = str(options.get("transactionid"))
response = srv.SendPacket(req)
c = response.code
# TODO: handle the RADIUS challenge
"""
if response.code == pyrad.packet.AccessChallenge:
opt = {}
for attr in response.keys():
opt[attr] = response[attr]
res = False
log.debug("challenge returned %r " % opt)
# now we map this to a privacyidea challenge
if "State" in opt:
reply["transactionid"] = opt["State"][0]
if "Reply-Message" in opt:
reply["message"] = opt["Reply-Message"][0]
"""
if response.code == pyrad.packet.AccessAccept:
log.info("Radiusserver %s granted "
"access to user %s." % (r_server, radius_user))
otp_count = 0
else:
log.warning("Radiusserver %s"
"rejected access to user %s." %
(r_server, radius_user))
except Exception as ex: # pragma: no cover
log.error("Error contacting radius Server: {0!r}".format((ex)))
log.debug("{0!s}".format(traceback.format_exc()))
return otp_count
|
aduffy70/diversitydb | refs/heads/master | main.py | 1 | from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext import db
from google.appengine.api import users
from time import time
from google.appengine.ext.webapp import template
class DiversityRecord(db.Model):
"""
Record of a single diversity measurement
"""
impact_rating = db.IntegerProperty()
species_count = db.IntegerProperty()
sampled_area = db.FloatProperty()
month = db.StringProperty()
year = db.IntegerProperty()
description = db.TextProperty()
teacher = db.StringProperty()
school = db.StringProperty()
class Teacher(db.Model):
"""
Record of valid teacher names
"""
name = db.TextProperty()
class School(db.Model):
"""
Record of valid school names
"""
name = db.TextProperty()
class MainPageHandler(webapp.RequestHandler):
"""
Main page with form to view or filter data
"""
def get(self):
user = users.get_current_user()
if not user:
self.redirect(users.create_login_url(self.request.uri))
else:
error_message = self.request.get('error_message')
teachers = db.GqlQuery('SELECT * FROM Teacher')
schools = db.GqlQuery('SELECT * FROM School')
template_values = {'error_message': error_message,
'teachers': teachers,
'schools': schools}
self.response.out.write(template.render('mainpage.html', template_values))
def post(self):
self.get()
class DataEntryPageHandler(webapp.RequestHandler):
"""
Data entry page with form to enter new data points and buttons to
add new teachers or schools.
"""
def get(self):
user = users.get_current_user()
if not user:
self.redirect(users.create_login_url(self.request.uri))
else:
error_message = self.request.get('error_message')
teachers = db.GqlQuery('SELECT * FROM Teacher')
schools = db.GqlQuery('SELECT * FROM School')
template_values = {'error_message': error_message,
'teachers': teachers,
'schools': schools}
self.response.out.write(template.render('dataentrypage.html', template_values))
def post(self):
self.get()
class NewRecordHandler(webapp.RequestHandler):
"""
Validates and stores a new diversity record
"""
def is_numeric(self, numeric_text):
"""
Verify that a string can be converted to an integer or float
"""
try:
float(numeric_text) if '.' in numeric_text else int(numeric_text)
return True
except:
return False
def post(self):
error_message = ''
user = users.get_current_user()
if not user:
self.redirect(users.create_login_url(self.request.uri))
else:
sampled_area = self.request.get('sampled_area')
species_count = self.request.get('species_count')
teacher = self.request.get('teacher')
school = self.request.get('school')
month = self.request.get('month')
year = self.request.get('year')
if teacher == "None":
error_message = 'Invalid teacher name'
elif school == "None":
error_message = 'Invalid school name'
elif month == "0":
error_message = 'Invalid month'
elif year == "0":
error_message = 'Invalid year'
elif not self.is_numeric(sampled_area):
error_message = 'Invalid sampled area'
elif not self.is_numeric(species_count):
error_message = 'Invalid species count'
else:
diversity_record = DiversityRecord()
diversity_record.sampled_area = float(sampled_area)
diversity_record.species_count = int(species_count)
diversity_record.impact_rating = int(self.request.get('impact_rating'))
diversity_record.month = month
diversity_record.year = int(year)
diversity_record.description = self.request.get('description')
diversity_record.teacher = teacher
diversity_record.school = school
diversity_record.put()
error_message = 'Success - record added'
redirect_string = '/?error_message=' + error_message
self.redirect(redirect_string)
class ViewRecordsHandler(webapp.RequestHandler):
"""
Displays a subset of the records
"""
def post(self):
user = users.get_current_user()
if not user:
self.redirect(users.create_login_url(self.request.uri))
else:
teacher = self.request.get('teacher')
school = self.request.get('school')
month = self.request.get('month')
year = self.request.get('year')
query_string = "SELECT * FROM DiversityRecord"
where_clause = False
if month != "0":
query_string = query_string + " WHERE month = '" + month + "'"
where_clause = True
if year != "0":
if where_clause:
query_string = query_string + " AND year = " + year
else:
query_string = query_string + " WHERE year = " + year
where_clause = True
if teacher != "0":
if where_clause:
query_string = query_string + " AND teacher = '" + teacher + "'"
else:
query_string = query_string + " WHERE teacher = '" + teacher + "'"
where_clause = True
if school != "0":
if where_clause:
query_string = query_string + " AND school = '" + school + "'"
else:
query_string = query_string + " WHERE school = '" + school + "'"
where_clause = True
records = db.GqlQuery(query_string)
records_by_impact = [0,0,0,0,0,0,0,0,0,0,0]
species_by_impact = [0,0,0,0,0,0,0,0,0,0,0]
averages_by_impact = [0,0,0,0,0,0,0,0,0,0,0]
for record in records:
impact_rating = record.impact_rating
species_count = record.species_count
records_by_impact[impact_rating] += 1
species_by_impact[impact_rating] += species_count
max_yaxis = 0
chart_values = ''
for i in range(11):
if records_by_impact[i] > 0:
averages_by_impact[i] = species_by_impact[i] / float(records_by_impact[i])
else:
averages_by_impact[i] = 0
chart_values += ',' + str(averages_by_impact[i])
if averages_by_impact[i] > max_yaxis:
max_yaxis = averages_by_impact[i]
chart_values = chart_values[1:]
chart_url_string = '<img src="http://chart.apis.google.com/chart?chxr=0,0,%s&chxs=0,676767,10.833,0,t,676767&chxt=y,x&chbh=a,6&chs=500x300&cht=bvg&chco=A2C180,3D7930&chds=0,%s,-3.333,100&chd=t1:%s|-1&chma=|0,2&chtt=Average+species+diversity+by+human+impact rating&chts=676767,20" width="500" height="300" alt="Species Diversity" />' % (max_yaxis, max_yaxis, chart_values)
template_values = {'records': records,
'chart_url_string': chart_url_string}
self.response.out.write(template.render('recordspage.html', template_values))
class NewTeacherSchoolHandler(webapp.RequestHandler):
def post(self):
user = users.get_current_user()
if not user:
self.redirect(users.create_login_url(self.request.uri))
else:
teacher = self.request.get('teacher')
school = self.request.get('school')
message = ''
if teacher:
teachers = db.GqlQuery('SELECT * FROM Teacher')
is_in_list = False
for each_teacher in teachers:
if each_teacher.name == teacher:
is_in_list = True
if not is_in_list:
new_teacher = Teacher()
new_teacher.name = teacher
new_teacher.put()
message = 'Added %s to the list of teachers' % teacher
else:
message = '%s was already in the list of teachers' % teacher
if school:
schools = db.GqlQuery('Select * FROM School')
is_in_list = False
for each_school in schools:
if each_school.name == school:
is_in_list = True
if not is_in_list:
new_school = School()
new_school.name = school
new_school.put()
message = 'Added %s to the list of schools' % school
else:
message = '%s was already in the list of schools' % school
template_values = {'message': message}
self.response.out.write(template.render('newteacherschoolpage.html', template_values))
def get(self):
self.post()
def main():
application = webapp.WSGIApplication([('/', MainPageHandler),
('/addrecord', NewRecordHandler),
('/viewrecords', ViewRecordsHandler),
('/new', NewTeacherSchoolHandler),
('/data', DataEntryPageHandler)],
debug=True)
run_wsgi_app(application)
if __name__ == '__main__':
main()
|
gregoryjscott/please | refs/heads/master | app/Tests/Scripts/files/py/2_error.py | 3 | raise Exception("Error!")
|
taleinat/taltools | refs/heads/master | src/taltools/combinatorics.py | 1 | import functools
import operator
def n_choose_k(n, k):
result = 1
for i in range(k):
result *= n - i
result //= i + 1
return result
def n_next_to_k(n, k):
return functools.reduce(operator.mul, range(n, k, -1))
|
mindnervestech/mnrp | refs/heads/master | addons/website_google_map/__openerp__.py | 354 | {
'name': 'Website Google Map',
'category': 'Hidden',
'summary': '',
'version': '1.0',
'description': """
OpenERP Website Google Map
==========================
""",
'author': 'OpenERP SA',
'depends': ['base_geolocalize', 'website_partner', 'crm_partner_assign'],
'data': [
'views/google_map.xml',
],
'installable': True,
'auto_install': False,
}
|
PagerDuty/dd-agent | refs/heads/master | tests/checks/mock/test_network.py | 6 | # 3p
import mock
# project
from tests.checks.common import AgentCheckTest, Fixtures
def ss_subprocess_mock(*args, **kwargs):
if args[0][-1] == '-4':
return Fixtures.read_file('ss_ipv4')
elif args[0][-1] == '-6':
return Fixtures.read_file('ss_ipv6')
def netstat_subprocess_mock(*args, **kwargs):
if args[0][0] == 'ss':
raise OSError
elif args[0][0] == 'netstat':
return Fixtures.read_file('netstat')
class TestCheckNetwork(AgentCheckTest):
CHECK_NAME = 'network'
def setUp(self):
self.config = {
"instances": [
{
"collect_connection_state": True
}
]
}
self.load_check(self.config)
CX_STATE_GAUGES_VALUES = {
'system.net.udp4.connections': 2,
'system.net.udp6.connections': 3,
'system.net.tcp4.established': 1,
'system.net.tcp4.opening': 0,
'system.net.tcp4.closing': 0,
'system.net.tcp4.listening': 2,
'system.net.tcp4.time_wait': 2,
'system.net.tcp6.established': 1,
'system.net.tcp6.opening': 0,
'system.net.tcp6.closing': 1,
'system.net.tcp6.listening': 1,
'system.net.tcp6.time_wait': 1,
}
@mock.patch('network.get_subprocess_output', side_effect=ss_subprocess_mock)
@mock.patch('network.Platform.is_linux', return_value=True)
def test_cx_state_linux_ss(self, mock_subprocess, mock_platform):
self.run_check({})
# Assert metrics
for metric, value in self.CX_STATE_GAUGES_VALUES.iteritems():
self.assertMetric(metric, value=value)
@mock.patch('network.get_subprocess_output', side_effect=netstat_subprocess_mock)
@mock.patch('network.Platform.is_linux', return_value=True)
def test_cx_state_linux_netstat(self, mock_subprocess, mock_platform):
self.run_check({})
# Assert metrics
for metric, value in self.CX_STATE_GAUGES_VALUES.iteritems():
self.assertMetric(metric, value=value)
|
titasakgm/brc-stock | refs/heads/master | openerp/addons/account/wizard/account_move_line_unreconcile_select.py | 56 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_move_line_unreconcile_select(osv.osv_memory):
_name = "account.move.line.unreconcile.select"
_description = "Unreconciliation"
_columns ={
'account_id': fields.many2one('account.account','Account',required=True),
}
def action_open_window(self, cr, uid, ids, context=None):
data = self.read(cr, uid, ids, context=context)[0]
return {
'domain': "[('account_id','=',%d),('reconcile_id','<>',False),('state','<>','draft')]" % data['account_id'],
'name': 'Unreconciliation',
'view_type': 'form',
'view_mode': 'tree,form',
'view_id': False,
'res_model': 'account.move.line',
'type': 'ir.actions.act_window'
}
account_move_line_unreconcile_select()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
mt2d2/servo | refs/heads/master | tests/wpt/harness/wptrunner/update/base.py | 196 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
exit_unclean = object()
exit_clean = object()
class Step(object):
provides = []
def __init__(self, logger):
self.logger = logger
def run(self, step_index, state):
"""Base class for state-creating steps.
When a Step is run() the current state is checked to see
if the state from this step has already been created. If it
has the restore() method is invoked. Otherwise the create()
method is invoked with the state object. This is expected to
add items with all the keys in __class__.provides to the state
object.
"""
name = self.__class__.__name__
try:
stored_step = state.steps[step_index]
except IndexError:
stored_step = None
if stored_step == name:
self.restore(state)
elif stored_step is None:
self.create(state)
assert set(self.provides).issubset(set(state.keys()))
state.steps = state.steps + [name]
else:
raise ValueError("Expected a %s step, got a %s step" % (name, stored_step))
def create(self, data):
raise NotImplementedError
def restore(self, state):
self.logger.debug("Step %s using stored state" % (self.__class__.__name__,))
for key in self.provides:
assert key in state
class StepRunner(object):
steps = []
def __init__(self, logger, state):
"""Class that runs a specified series of Steps with a common State"""
self.state = state
self.logger = logger
if "steps" not in state:
state.steps = []
def run(self):
rv = None
for step_index, step in enumerate(self.steps):
self.logger.debug("Starting step %s" % step.__name__)
rv = step(self.logger).run(step_index, self.state)
if rv in (exit_clean, exit_unclean):
break
return rv
|
Bogh/django-oscar | refs/heads/master | src/oscar/apps/catalogue/migrations/0004_auto_20150217_1710.py | 67 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('catalogue', '0003_data_migration_slugs'),
]
operations = [
migrations.AlterField(
model_name='category',
name='slug',
field=models.SlugField(max_length=255, verbose_name='Slug'),
preserve_default=True,
),
]
|
dezynetechnologies/odoo | refs/heads/8.0 | openerp/addons/base/res/res_config.py | 243 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from operator import attrgetter
import re
import openerp
from openerp import SUPERUSER_ID
from openerp.osv import osv, fields
from openerp.tools import ustr
from openerp.tools.translate import _
from openerp import exceptions
from lxml import etree
_logger = logging.getLogger(__name__)
class res_config_module_installation_mixin(object):
def _install_modules(self, cr, uid, modules, context):
"""Install the requested modules.
return the next action to execute
modules is a list of tuples
(mod_name, browse_record | None)
"""
ir_module = self.pool.get('ir.module.module')
to_install_ids = []
to_install_missing_names = []
for name, module in modules:
if not module:
to_install_missing_names.append(name)
elif module.state == 'uninstalled':
to_install_ids.append(module.id)
result = None
if to_install_ids:
result = ir_module.button_immediate_install(cr, uid, to_install_ids, context=context)
#FIXME: if result is not none, the corresponding todo will be skipped because it was just marked done
if to_install_missing_names:
return {
'type': 'ir.actions.client',
'tag': 'apps',
'params': {'modules': to_install_missing_names},
}
return result
class res_config_configurable(osv.osv_memory):
''' Base classes for new-style configuration items
Configuration items should inherit from this class, implement
the execute method (and optionally the cancel one) and have
their view inherit from the related res_config_view_base view.
'''
_name = 'res.config'
def _next_action(self, cr, uid, context=None):
Todos = self.pool['ir.actions.todo']
_logger.info('getting next %s', Todos)
active_todos = Todos.browse(cr, uid,
Todos.search(cr, uid, ['&', ('type', '=', 'automatic'), ('state','=','open')]),
context=context)
user_groups = set(map(
lambda g: g.id,
self.pool['res.users'].browse(cr, uid, [uid], context=context)[0].groups_id))
valid_todos_for_user = [
todo for todo in active_todos
if not todo.groups_id or bool(user_groups.intersection((
group.id for group in todo.groups_id)))
]
if valid_todos_for_user:
return valid_todos_for_user[0]
return None
def _next(self, cr, uid, context=None):
_logger.info('getting next operation')
next = self._next_action(cr, uid, context=context)
_logger.info('next action is %s', next)
if next:
res = next.action_launch(context=context)
res['nodestroy'] = False
return res
return {
'type': 'ir.actions.client',
'tag': 'reload',
}
def start(self, cr, uid, ids, context=None):
return self.next(cr, uid, ids, context)
def next(self, cr, uid, ids, context=None):
""" Returns the next todo action to execute (using the default
sort order)
"""
return self._next(cr, uid, context=context)
def execute(self, cr, uid, ids, context=None):
""" Method called when the user clicks on the ``Next`` button.
Execute *must* be overloaded unless ``action_next`` is overloaded
(which is something you generally don't need to do).
If ``execute`` returns an action dictionary, that action is executed
rather than just going to the next configuration item.
"""
raise NotImplementedError(
'Configuration items need to implement execute')
def cancel(self, cr, uid, ids, context=None):
""" Method called when the user click on the ``Skip`` button.
``cancel`` should be overloaded instead of ``action_skip``. As with
``execute``, if it returns an action dictionary that action is
executed in stead of the default (going to the next configuration item)
The default implementation is a NOOP.
``cancel`` is also called by the default implementation of
``action_cancel``.
"""
pass
def action_next(self, cr, uid, ids, context=None):
""" Action handler for the ``next`` event.
Sets the status of the todo the event was sent from to
``done``, calls ``execute`` and -- unless ``execute`` returned
an action dictionary -- executes the action provided by calling
``next``.
"""
next = self.execute(cr, uid, ids, context=context)
if next: return next
return self.next(cr, uid, ids, context=context)
def action_skip(self, cr, uid, ids, context=None):
""" Action handler for the ``skip`` event.
Sets the status of the todo the event was sent from to
``skip``, calls ``cancel`` and -- unless ``cancel`` returned
an action dictionary -- executes the action provided by calling
``next``.
"""
next = self.cancel(cr, uid, ids, context=context)
if next: return next
return self.next(cr, uid, ids, context=context)
def action_cancel(self, cr, uid, ids, context=None):
""" Action handler for the ``cancel`` event. That event isn't
generated by the res.config.view.base inheritable view, the
inherited view has to overload one of the buttons (or add one
more).
Sets the status of the todo the event was sent from to
``cancel``, calls ``cancel`` and -- unless ``cancel`` returned
an action dictionary -- executes the action provided by calling
``next``.
"""
next = self.cancel(cr, uid, ids, context=context)
if next: return next
return self.next(cr, uid, ids, context=context)
class res_config_installer(osv.osv_memory, res_config_module_installation_mixin):
""" New-style configuration base specialized for addons selection
and installation.
Basic usage
-----------
Subclasses can simply define a number of _columns as
fields.boolean objects. The keys (column names) should be the
names of the addons to install (when selected). Upon action
execution, selected boolean fields (and those only) will be
interpreted as addons to install, and batch-installed.
Additional addons
-----------------
It is also possible to require the installation of an additional
addon set when a specific preset of addons has been marked for
installation (in the basic usage only, additionals can't depend on
one another).
These additionals are defined through the ``_install_if``
property. This property is a mapping of a collection of addons (by
name) to a collection of addons (by name) [#]_, and if all the *key*
addons are selected for installation, then the *value* ones will
be selected as well. For example::
_install_if = {
('sale','crm'): ['sale_crm'],
}
This will install the ``sale_crm`` addon if and only if both the
``sale`` and ``crm`` addons are selected for installation.
You can define as many additionals as you wish, and additionals
can overlap in key and value. For instance::
_install_if = {
('sale','crm'): ['sale_crm'],
('sale','project'): ['sale_service'],
}
will install both ``sale_crm`` and ``sale_service`` if all of
``sale``, ``crm`` and ``project`` are selected for installation.
Hook methods
------------
Subclasses might also need to express dependencies more complex
than that provided by additionals. In this case, it's possible to
define methods of the form ``_if_%(name)s`` where ``name`` is the
name of a boolean field. If the field is selected, then the
corresponding module will be marked for installation *and* the
hook method will be executed.
Hook methods take the usual set of parameters (cr, uid, ids,
context) and can return a collection of additional addons to
install (if they return anything, otherwise they should not return
anything, though returning any "falsy" value such as None or an
empty collection will have the same effect).
Complete control
----------------
The last hook is to simply overload the ``modules_to_install``
method, which implements all the mechanisms above. This method
takes the usual set of parameters (cr, uid, ids, context) and
returns a ``set`` of addons to install (addons selected by the
above methods minus addons from the *basic* set which are already
installed) [#]_ so an overloader can simply manipulate the ``set``
returned by ``res_config_installer.modules_to_install`` to add or
remove addons.
Skipping the installer
----------------------
Unless it is removed from the view, installers have a *skip*
button which invokes ``action_skip`` (and the ``cancel`` hook from
``res.config``). Hooks and additionals *are not run* when skipping
installation, even for already installed addons.
Again, setup your hooks accordingly.
.. [#] note that since a mapping key needs to be hashable, it's
possible to use a tuple or a frozenset, but not a list or a
regular set
.. [#] because the already-installed modules are only pruned at
the very end of ``modules_to_install``, additionals and
hooks depending on them *are guaranteed to execute*. Setup
your hooks accordingly.
"""
_name = 'res.config.installer'
_inherit = 'res.config'
_install_if = {}
def already_installed(self, cr, uid, context=None):
""" For each module, check if it's already installed and if it
is return its name
:returns: a list of the already installed modules in this
installer
:rtype: [str]
"""
return map(attrgetter('name'),
self._already_installed(cr, uid, context=context))
def _already_installed(self, cr, uid, context=None):
""" For each module (boolean fields in a res.config.installer),
check if it's already installed (either 'to install', 'to upgrade'
or 'installed') and if it is return the module's record
:returns: a list of all installed modules in this installer
:rtype: recordset (collection of Record)
"""
modules = self.pool['ir.module.module']
selectable = [field for field in self._columns
if type(self._columns[field]) is fields.boolean]
return modules.browse(
cr, uid,
modules.search(cr, uid,
[('name','in',selectable),
('state','in',['to install', 'installed', 'to upgrade'])],
context=context),
context=context)
def modules_to_install(self, cr, uid, ids, context=None):
""" selects all modules to install:
* checked boolean fields
* return values of hook methods. Hook methods are of the form
``_if_%(addon_name)s``, and are called if the corresponding
addon is marked for installation. They take the arguments
cr, uid, ids and context, and return an iterable of addon
names
* additionals, additionals are setup through the ``_install_if``
class variable. ``_install_if`` is a dict of {iterable:iterable}
where key and value are iterables of addon names.
If all the addons in the key are selected for installation
(warning: addons added through hooks don't count), then the
addons in the value are added to the set of modules to install
* not already installed
"""
base = set(module_name
for installer in self.read(cr, uid, ids, context=context)
for module_name, to_install in installer.iteritems()
if module_name != 'id'
if type(self._columns.get(module_name)) is fields.boolean
if to_install)
hooks_results = set()
for module in base:
hook = getattr(self, '_if_%s'% module, None)
if hook:
hooks_results.update(hook(cr, uid, ids, context=None) or set())
additionals = set(
module for requirements, consequences \
in self._install_if.iteritems()
if base.issuperset(requirements)
for module in consequences)
return (base | hooks_results | additionals).difference(
self.already_installed(cr, uid, context))
def default_get(self, cr, uid, fields_list, context=None):
''' If an addon is already installed, check it by default
'''
defaults = super(res_config_installer, self).default_get(
cr, uid, fields_list, context=context)
return dict(defaults,
**dict.fromkeys(
self.already_installed(cr, uid, context=context),
True))
def fields_get(self, cr, uid, fields=None, context=None, write_access=True, attributes=None):
""" If an addon is already installed, set it to readonly as
res.config.installer doesn't handle uninstallations of already
installed addons
"""
fields = super(res_config_installer, self).fields_get(
cr, uid, fields, context, write_access, attributes)
for name in self.already_installed(cr, uid, context=context):
if name not in fields:
continue
fields[name].update(
readonly=True,
help= ustr(fields[name].get('help', '')) +
_('\n\nThis addon is already installed on your system'))
return fields
def execute(self, cr, uid, ids, context=None):
to_install = list(self.modules_to_install(
cr, uid, ids, context=context))
_logger.info('Selecting addons %s to install', to_install)
ir_module = self.pool.get('ir.module.module')
modules = []
for name in to_install:
mod_ids = ir_module.search(cr, uid, [('name', '=', name)])
record = ir_module.browse(cr, uid, mod_ids[0], context) if mod_ids else None
modules.append((name, record))
return self._install_modules(cr, uid, modules, context=context)
class res_config_settings(osv.osv_memory, res_config_module_installation_mixin):
""" Base configuration wizard for application settings. It provides support for setting
default values, assigning groups to employee users, and installing modules.
To make such a 'settings' wizard, define a model like::
class my_config_wizard(osv.osv_memory):
_name = 'my.settings'
_inherit = 'res.config.settings'
_columns = {
'default_foo': fields.type(..., default_model='my.model'),
'group_bar': fields.boolean(..., group='base.group_user', implied_group='my.group'),
'module_baz': fields.boolean(...),
'other_field': fields.type(...),
}
The method ``execute`` provides some support based on a naming convention:
* For a field like 'default_XXX', ``execute`` sets the (global) default value of
the field 'XXX' in the model named by ``default_model`` to the field's value.
* For a boolean field like 'group_XXX', ``execute`` adds/removes 'implied_group'
to/from the implied groups of 'group', depending on the field's value.
By default 'group' is the group Employee. Groups are given by their xml id.
The attribute 'group' may contain several xml ids, separated by commas.
* For a boolean field like 'module_XXX', ``execute`` triggers the immediate
installation of the module named 'XXX' if the field has value ``True``.
* For the other fields, the method ``execute`` invokes all methods with a name
that starts with 'set_'; such methods can be defined to implement the effect
of those fields.
The method ``default_get`` retrieves values that reflect the current status of the
fields like 'default_XXX', 'group_XXX' and 'module_XXX'. It also invokes all methods
with a name that starts with 'get_default_'; such methods can be defined to provide
current values for other fields.
"""
_name = 'res.config.settings'
def copy(self, cr, uid, id, values, context=None):
raise osv.except_osv(_("Cannot duplicate configuration!"), "")
def fields_view_get(self, cr, user, view_id=None, view_type='form',
context=None, toolbar=False, submenu=False):
ret_val = super(res_config_settings, self).fields_view_get(
cr, user, view_id=view_id, view_type=view_type, context=context,
toolbar=toolbar, submenu=submenu)
doc = etree.XML(ret_val['arch'])
for field in ret_val['fields']:
if not field.startswith("module_"):
continue
for node in doc.xpath("//field[@name='%s']" % field):
if 'on_change' not in node.attrib:
node.set("on_change",
"onchange_module(%s, '%s')" % (field, field))
ret_val['arch'] = etree.tostring(doc)
return ret_val
def onchange_module(self, cr, uid, ids, field_value, module_name, context={}):
module_pool = self.pool.get('ir.module.module')
module_ids = module_pool.search(
cr, uid, [('name', '=', module_name.replace("module_", '')),
('state','in', ['to install', 'installed', 'to upgrade'])],
context=context)
if module_ids and not field_value:
dep_ids = module_pool.downstream_dependencies(cr, uid, module_ids, context=context)
dep_name = [x.shortdesc for x in module_pool.browse(
cr, uid, dep_ids + module_ids, context=context)]
message = '\n'.join(dep_name)
return {
'warning': {
'title': _('Warning!'),
'message': _('Disabling this option will also uninstall the following modules \n%s') % message,
}
}
return {}
def _get_classified_fields(self, cr, uid, context=None):
""" return a dictionary with the fields classified by category::
{ 'default': [('default_foo', 'model', 'foo'), ...],
'group': [('group_bar', [browse_group], browse_implied_group), ...],
'module': [('module_baz', browse_module), ...],
'other': ['other_field', ...],
}
"""
ir_model_data = self.pool['ir.model.data']
ir_module = self.pool['ir.module.module']
def ref(xml_id):
mod, xml = xml_id.split('.', 1)
return ir_model_data.get_object(cr, uid, mod, xml, context=context)
defaults, groups, modules, others = [], [], [], []
for name, field in self._columns.items():
if name.startswith('default_') and hasattr(field, 'default_model'):
defaults.append((name, field.default_model, name[8:]))
elif name.startswith('group_') and isinstance(field, fields.boolean) and hasattr(field, 'implied_group'):
field_groups = getattr(field, 'group', 'base.group_user').split(',')
groups.append((name, map(ref, field_groups), ref(field.implied_group)))
elif name.startswith('module_') and isinstance(field, fields.boolean):
mod_ids = ir_module.search(cr, uid, [('name', '=', name[7:])])
record = ir_module.browse(cr, uid, mod_ids[0], context) if mod_ids else None
modules.append((name, record))
else:
others.append(name)
return {'default': defaults, 'group': groups, 'module': modules, 'other': others}
def default_get(self, cr, uid, fields, context=None):
ir_values = self.pool['ir.values']
classified = self._get_classified_fields(cr, uid, context)
res = super(res_config_settings, self).default_get(cr, uid, fields, context)
# defaults: take the corresponding default value they set
for name, model, field in classified['default']:
value = ir_values.get_default(cr, uid, model, field)
if value is not None:
res[name] = value
# groups: which groups are implied by the group Employee
for name, groups, implied_group in classified['group']:
res[name] = all(implied_group in group.implied_ids for group in groups)
# modules: which modules are installed/to install
for name, module in classified['module']:
res[name] = module and module.state in ('installed', 'to install', 'to upgrade')
# other fields: call all methods that start with 'get_default_'
for method in dir(self):
if method.startswith('get_default_'):
res.update(getattr(self, method)(cr, uid, fields, context))
return res
def execute(self, cr, uid, ids, context=None):
if context is None:
context = {}
context = dict(context, active_test=False)
if uid != SUPERUSER_ID and not self.pool['res.users'].has_group(cr, uid, 'base.group_erp_manager'):
raise openerp.exceptions.AccessError(_("Only administrators can change the settings"))
ir_values = self.pool['ir.values']
ir_module = self.pool['ir.module.module']
res_groups = self.pool['res.groups']
classified = self._get_classified_fields(cr, uid, context=context)
config = self.browse(cr, uid, ids[0], context)
# default values fields
for name, model, field in classified['default']:
ir_values.set_default(cr, SUPERUSER_ID, model, field, config[name])
# group fields: modify group / implied groups
for name, groups, implied_group in classified['group']:
gids = map(int, groups)
if config[name]:
res_groups.write(cr, uid, gids, {'implied_ids': [(4, implied_group.id)]}, context=context)
else:
res_groups.write(cr, uid, gids, {'implied_ids': [(3, implied_group.id)]}, context=context)
uids = set()
for group in groups:
uids.update(map(int, group.users))
implied_group.write({'users': [(3, u) for u in uids]})
# other fields: execute all methods that start with 'set_'
for method in dir(self):
if method.startswith('set_'):
getattr(self, method)(cr, uid, ids, context)
# module fields: install/uninstall the selected modules
to_install = []
to_uninstall_ids = []
lm = len('module_')
for name, module in classified['module']:
if config[name]:
to_install.append((name[lm:], module))
else:
if module and module.state in ('installed', 'to upgrade'):
to_uninstall_ids.append(module.id)
if to_uninstall_ids:
ir_module.button_immediate_uninstall(cr, uid, to_uninstall_ids, context=context)
action = self._install_modules(cr, uid, to_install, context=context)
if action:
return action
# After the uninstall/install calls, the self.pool is no longer valid.
# So we reach into the RegistryManager directly.
res_config = openerp.modules.registry.RegistryManager.get(cr.dbname)['res.config']
config = res_config.next(cr, uid, [], context=context) or {}
if config.get('type') not in ('ir.actions.act_window_close',):
return config
# force client-side reload (update user menu and current view)
return {
'type': 'ir.actions.client',
'tag': 'reload',
}
def cancel(self, cr, uid, ids, context=None):
# ignore the current record, and send the action to reopen the view
act_window = self.pool['ir.actions.act_window']
action_ids = act_window.search(cr, uid, [('res_model', '=', self._name)])
if action_ids:
return act_window.read(cr, uid, action_ids[0], [], context=context)
return {}
def name_get(self, cr, uid, ids, context=None):
""" Override name_get method to return an appropriate configuration wizard
name, and not the generated name."""
if not ids:
return []
# name_get may receive int id instead of an id list
if isinstance(ids, (int, long)):
ids = [ids]
act_window = self.pool['ir.actions.act_window']
action_ids = act_window.search(cr, uid, [('res_model', '=', self._name)], context=context)
name = self._name
if action_ids:
name = act_window.read(cr, uid, action_ids[0], ['name'], context=context)['name']
return [(record.id, name) for record in self.browse(cr, uid , ids, context=context)]
def get_option_path(self, cr, uid, menu_xml_id, context=None):
"""
Fetch the path to a specified configuration view and the action id to access it.
:param string menu_xml_id: the xml id of the menuitem where the view is located,
structured as follows: module_name.menuitem_xml_id (e.g.: "base.menu_sale_config")
:return tuple:
- t[0]: string: full path to the menuitem (e.g.: "Settings/Configuration/Sales")
- t[1]: int or long: id of the menuitem's action
"""
module_name, menu_xml_id = menu_xml_id.split('.')
dummy, menu_id = self.pool['ir.model.data'].get_object_reference(cr, uid, module_name, menu_xml_id)
ir_ui_menu = self.pool['ir.ui.menu'].browse(cr, uid, menu_id, context=context)
return (ir_ui_menu.complete_name, ir_ui_menu.action.id)
def get_option_name(self, cr, uid, full_field_name, context=None):
"""
Fetch the human readable name of a specified configuration option.
:param string full_field_name: the full name of the field, structured as follows:
model_name.field_name (e.g.: "sale.config.settings.fetchmail_lead")
:return string: human readable name of the field (e.g.: "Create leads from incoming mails")
"""
model_name, field_name = full_field_name.rsplit('.', 1)
return self.pool[model_name].fields_get(cr, uid, allfields=[field_name], context=context)[field_name]['string']
def get_config_warning(self, cr, msg, context=None):
"""
Helper: return a Warning exception with the given message where the %(field:xxx)s
and/or %(menu:yyy)s are replaced by the human readable field's name and/or menuitem's
full path.
Usage:
------
Just include in your error message %(field:model_name.field_name)s to obtain the human
readable field's name, and/or %(menu:module_name.menuitem_xml_id)s to obtain the menuitem's
full path.
Example of use:
---------------
from openerp.addons.base.res.res_config import get_warning_config
raise get_warning_config(cr, _("Error: this action is prohibited. You should check the field %(field:sale.config.settings.fetchmail_lead)s in %(menu:base.menu_sale_config)s."), context=context)
This will return an exception containing the following message:
Error: this action is prohibited. You should check the field Create leads from incoming mails in Settings/Configuration/Sales.
What if there is another substitution in the message already?
-------------------------------------------------------------
You could have a situation where the error message you want to upgrade already contains a substitution. Example:
Cannot find any account journal of %s type for this company.\n\nYou can create one in the menu: \nConfiguration\Journals\Journals.
What you want to do here is simply to replace the path by %menu:account.menu_account_config)s, and leave the rest alone.
In order to do that, you can use the double percent (%%) to escape your new substitution, like so:
Cannot find any account journal of %s type for this company.\n\nYou can create one in the %%(menu:account.menu_account_config)s.
"""
res_config_obj = openerp.registry(cr.dbname)['res.config.settings']
regex_path = r'%\(((?:menu|field):[a-z_\.]*)\)s'
# Process the message
# 1/ find the menu and/or field references, put them in a list
references = re.findall(regex_path, msg, flags=re.I)
# 2/ fetch the menu and/or field replacement values (full path and
# human readable field's name) and the action_id if any
values = {}
action_id = None
for item in references:
ref_type, ref = item.split(':')
if ref_type == 'menu':
values[item], action_id = res_config_obj.get_option_path(cr, SUPERUSER_ID, ref, context=context)
elif ref_type == 'field':
values[item] = res_config_obj.get_option_name(cr, SUPERUSER_ID, ref, context=context)
# 3/ substitute and return the result
if (action_id):
return exceptions.RedirectWarning(msg % values, action_id, _('Go to the configuration panel'))
return exceptions.Warning(msg % values)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
consulo/consulo-python | refs/heads/master | plugin/src/main/dist/helpers/python-skeletons/numpy/__init__.py | 80 | """Skeleton for 'numpy' module.
Project: NumPy 1.8.0 <http://www.numpy.org//>
"""
from . import core
from .core import *
__all__ = []
__all__.extend(core.__all__) |
rgeleta/odoo | refs/heads/8.0 | openerp/report/render/__init__.py | 381 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from simple import simple
from rml import rml, rml2html, rml2txt, odt2odt , html2html, makohtml2html
from render import render
try:
from PIL import Image
except ImportError:
import logging
_logger = logging.getLogger(__name__)
_logger.warning('Python Imaging not installed, you can use only .JPG pictures !')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
kawamon/hue | refs/heads/master | desktop/core/src/desktop/lib/connectors/api_tests.py | 2 | #!/usr/bin/env python
## -*- coding: utf-8 -*-
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import sys
import unittest
from django.urls import reverse
from nose.plugins.skip import SkipTest
from nose.tools import assert_equal, assert_true, assert_false
from desktop.auth.backend import rewrite_user, is_admin
from desktop.conf import ENABLE_CONNECTORS, ENABLE_ORGANIZATIONS
from desktop.lib.connectors.api import _get_installed_connectors
from desktop.lib.django_test_util import make_logged_in_client
from useradmin.models import User, update_app_permissions, get_default_user_group, Connector
from useradmin.permissions import HuePermission, GroupPermission
if sys.version_info[0] > 2:
from unittest.mock import patch, Mock
else:
from mock import patch, Mock
class TestApi(object):
def setUp(self):
self.client = make_logged_in_client(username="admin_test_connector", recreate=True, is_superuser=False, is_admin=True)
self.user = User.objects.get(username="admin_test_connector")
@classmethod
def setUpClass(cls):
cls._class_resets = [
ENABLE_CONNECTORS.set_for_testing(True),
]
@classmethod
def tearDownClass(cls):
for reset in cls._class_resets:
reset()
def test_install_connector_examples(self):
with patch('desktop.lib.connectors.api._create_connector_examples') as _create_connector_examples:
with patch('desktop.lib.connectors.api.update_app_permissions') as update_app_permissions:
_create_connector_examples.return_value = ['Connector 1'], ['Connector 2']
response = self.client.post(
reverse('connectors.api.install_connector_examples')
)
data = json.loads(response.content)
assert_equal(200, response.status_code)
assert_equal(
'Added connectors: Connector 1. '
'Already installed connectors: Connector 2',
data['message'],
data
)
|
girving/tensorflow | refs/heads/master | tensorflow/python/estimator/inputs/queues/feeding_functions.py | 20 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for enqueuing data from arrays and pandas `DataFrame`s."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import random
import types as tp
import numpy as np
import six
from tensorflow.python.estimator.inputs.queues import feeding_queue_runner as fqr
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import queue_runner
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def _fill_array(arr, seq, fillvalue=0):
"""Recursively fills padded arr with elements from seq.
If length of seq is less than arr padded length, fillvalue used.
Args:
arr: Padded tensor of shape [batch_size, ..., max_padded_dim_len].
seq: Non-padded list of data samples of shape
[batch_size, ..., padded_dim(None)]
fillvalue: Default fillvalue to use.
"""
if arr.ndim == 1:
try:
len_ = len(seq)
except TypeError:
len_ = 0
arr[:len_] = seq
arr[len_:] = fillvalue
else:
for subarr, subseq in six.moves.zip_longest(arr, seq, fillvalue=()):
_fill_array(subarr, subseq, fillvalue)
def _pad_if_needed(batch_key_item, fillvalue=0):
""" Returns padded batch.
Args:
batch_key_item: List of data samples of any type with shape
[batch_size, ..., padded_dim(None)].
fillvalue: Default fillvalue to use.
Returns:
Padded with zeros tensor of same type and shape
[batch_size, ..., max_padded_dim_len].
Raises:
ValueError if data samples have different shapes (except last padded dim).
"""
shapes = [
seq.shape[:-1] if len(seq.shape) > 0 else -1 for seq in batch_key_item
]
if not all(shapes[0] == x for x in shapes):
raise ValueError("Array shapes must match.")
last_length = [
seq.shape[-1] if len(seq.shape) > 0 else 0 for seq in batch_key_item
]
if all([x == last_length[0] for x in last_length]):
return batch_key_item
batch_size = len(batch_key_item)
max_sequence_length = max(last_length)
result_batch = np.zeros(
shape=[batch_size] + list(shapes[0]) + [max_sequence_length],
dtype=batch_key_item[0].dtype)
_fill_array(result_batch, batch_key_item, fillvalue)
return result_batch
def _get_integer_indices_for_next_batch(batch_indices_start, batch_size,
epoch_end, array_length, current_epoch,
total_epochs):
"""Returns the integer indices for next batch.
If total epochs is not None and current epoch is the final epoch, the end
index of the next batch should not exceed the `epoch_end` (i.e., the final
batch might not have size `batch_size` to avoid overshooting the last epoch).
Args:
batch_indices_start: Integer, the index to start next batch.
batch_size: Integer, size of batches to return.
epoch_end: Integer, the end index of the epoch. The epoch could start from a
random position, so `epoch_end` provides the end index for that.
array_length: Integer, the length of the array.
current_epoch: Integer, the epoch number has been emitted.
total_epochs: Integer or `None`, the total number of epochs to emit. If
`None` will run forever.
Returns:
A tuple of a list with integer indices for next batch and `current_epoch`
value after the next batch.
Raises:
OutOfRangeError if `current_epoch` is not less than `total_epochs`.
"""
if total_epochs is not None and current_epoch >= total_epochs:
raise errors.OutOfRangeError(None, None,
"Already emitted %s epochs." % current_epoch)
batch_indices_end = batch_indices_start + batch_size
batch_indices = [
j % array_length for j in range(batch_indices_start, batch_indices_end)
]
epoch_end_indices = [i for i, x in enumerate(batch_indices) if x == epoch_end]
current_epoch += len(epoch_end_indices)
if total_epochs is None or current_epoch < total_epochs:
return (batch_indices, current_epoch)
# Now we might have emitted more data for expected epochs. Need to trim.
final_epoch_end_inclusive = epoch_end_indices[
-(current_epoch - total_epochs + 1)]
batch_indices = batch_indices[:final_epoch_end_inclusive + 1]
return (batch_indices, total_epochs)
class _ArrayFeedFn(object):
"""Creates feed dictionaries from numpy arrays."""
def __init__(self,
placeholders,
array,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != 2:
raise ValueError("_array_feed_fn expects 2 placeholders; got {}.".format(
len(placeholders)))
self._placeholders = placeholders
self._array = array
self._max = len(array)
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
integer_indexes, self._epoch = _get_integer_indices_for_next_batch(
batch_indices_start=self._trav,
batch_size=self._batch_size,
epoch_end=self._epoch_end,
array_length=self._max,
current_epoch=self._epoch,
total_epochs=self._num_epochs)
self._trav = (integer_indexes[-1] + 1) % self._max
return {
self._placeholders[0]: integer_indexes,
self._placeholders[1]: self._array[integer_indexes]
}
class _OrderedDictNumpyFeedFn(object):
"""Creates feed dictionaries from `OrderedDict`s of numpy arrays."""
def __init__(self,
placeholders,
ordered_dict_of_arrays,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != len(ordered_dict_of_arrays) + 1:
raise ValueError("Expected {} placeholders; got {}.".format(
len(ordered_dict_of_arrays), len(placeholders)))
self._index_placeholder = placeholders[0]
self._col_placeholders = placeholders[1:]
self._ordered_dict_of_arrays = ordered_dict_of_arrays
self._max = len(next(iter(ordered_dict_of_arrays.values())))
for _, v in ordered_dict_of_arrays.items():
if len(v) != self._max:
raise ValueError("Array lengths must match.")
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
integer_indexes, self._epoch = _get_integer_indices_for_next_batch(
batch_indices_start=self._trav,
batch_size=self._batch_size,
epoch_end=self._epoch_end,
array_length=self._max,
current_epoch=self._epoch,
total_epochs=self._num_epochs)
self._trav = (integer_indexes[-1] + 1) % self._max
feed_dict = {self._index_placeholder: integer_indexes}
cols = [
column[integer_indexes]
for column in self._ordered_dict_of_arrays.values()
]
feed_dict.update(dict(zip(self._col_placeholders, cols)))
return feed_dict
class _PandasFeedFn(object):
"""Creates feed dictionaries from pandas `DataFrames`."""
def __init__(self,
placeholders,
dataframe,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != len(dataframe.columns) + 1:
raise ValueError("Expected {} placeholders; got {}.".format(
len(dataframe.columns) + 1, len(placeholders)))
self._index_placeholder = placeholders[0]
self._col_placeholders = placeholders[1:]
self._dataframe = dataframe
self._max = len(dataframe)
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
integer_indexes, self._epoch = _get_integer_indices_for_next_batch(
batch_indices_start=self._trav,
batch_size=self._batch_size,
epoch_end=self._epoch_end,
array_length=self._max,
current_epoch=self._epoch,
total_epochs=self._num_epochs)
self._trav = (integer_indexes[-1] + 1) % self._max
result = self._dataframe.iloc[integer_indexes]
cols = [result[col].values for col in result.columns]
feed_dict = dict(zip(self._col_placeholders, cols))
feed_dict[self._index_placeholder] = result.index.values
return feed_dict
class _GeneratorFeedFn(object):
"""Creates feed dictionaries from `Generator` of `dicts` of numpy arrays."""
def __init__(self,
placeholders,
generator,
batch_size,
random_start=False,
seed=None,
num_epochs=None,
pad_value=None):
first_sample = next(generator())
if len(placeholders) != len(first_sample):
raise ValueError("Expected {} placeholders; got {}.".format(
len(first_sample), len(placeholders)))
self._keys = sorted(list(first_sample.keys()))
self._col_placeholders = placeholders
self._generator_function = generator
self._iterator = generator()
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
self._pad_value = pad_value
random.seed(seed)
def __call__(self):
if self._num_epochs and self._epoch >= self._num_epochs:
raise errors.OutOfRangeError(None, None,
"Already emitted %s epochs." % self._epoch)
list_dict = {}
list_dict_size = 0
while list_dict_size < self._batch_size:
try:
data_row = next(self._iterator)
except StopIteration:
self._epoch += 1
self._iterator = self._generator_function()
data_row = next(self._iterator)
for index, key in enumerate(self._keys):
if key not in data_row.keys():
raise KeyError("key mismatch between dicts emitted by GenFun "
"Expected {} keys; got {}".format(
self._keys, data_row.keys()))
list_dict.setdefault(self._col_placeholders[index], list()).append(
data_row[key])
list_dict_size += 1
if self._pad_value is not None:
feed_dict = {
key: np.asarray(_pad_if_needed(item, self._pad_value))
for key, item in list(list_dict.items())
}
else:
feed_dict = {
key: np.asarray(item)
for key, item in list(list_dict.items())
}
return feed_dict
def _enqueue_data(data,
capacity,
shuffle=False,
min_after_dequeue=None,
num_threads=1,
seed=None,
name="enqueue_input",
enqueue_size=1,
num_epochs=None,
pad_value=None):
"""Creates a queue filled from a numpy array or pandas `DataFrame`.
Returns a queue filled with the rows of the given (`OrderedDict` of) array
or `DataFrame`. In the case of a pandas `DataFrame`, the first enqueued
`Tensor` corresponds to the index of the `DataFrame`. For (`OrderedDict` of)
numpy arrays, the first enqueued `Tensor` contains the row number.
Args:
data: a numpy `ndarray`, `OrderedDict` of numpy arrays, or a generator
yielding `dict`s of numpy arrays or pandas `DataFrame` that will be read
into the queue.
capacity: the capacity of the queue.
shuffle: whether or not to shuffle the rows of the array.
min_after_dequeue: minimum number of elements that can remain in the queue
after a dequeue operation. Only used when `shuffle` is true. If not set,
defaults to `capacity` / 4.
num_threads: number of threads used for reading and enqueueing.
seed: used to seed shuffling and reader starting points.
name: a scope name identifying the data.
enqueue_size: the number of rows to enqueue per step.
num_epochs: limit enqueuing to a specified number of epochs, if provided.
pad_value: default value for dynamic padding of data samples, if provided.
Returns:
A queue filled with the rows of the given (`OrderedDict` of) array or
`DataFrame`.
Raises:
TypeError: `data` is not a Pandas `DataFrame`, an `OrderedDict` of numpy
arrays, a numpy `ndarray`, or a generator producing these.
NotImplementedError: padding and shuffling data at the same time.
NotImplementedError: padding usage with non generator data type.
"""
with ops.name_scope(name):
if isinstance(data, np.ndarray):
types = [dtypes.int64, dtypes.as_dtype(data.dtype)]
queue_shapes = [(), data.shape[1:]]
get_feed_fn = _ArrayFeedFn
elif isinstance(data, collections.OrderedDict):
types = [dtypes.int64
] + [dtypes.as_dtype(col.dtype) for col in data.values()]
queue_shapes = [()] + [col.shape[1:] for col in data.values()]
get_feed_fn = _OrderedDictNumpyFeedFn
elif isinstance(data, tp.FunctionType):
x_first_el = six.next(data())
x_first_keys = sorted(x_first_el.keys())
x_first_values = [x_first_el[key] for key in x_first_keys]
types = [dtypes.as_dtype(col.dtype) for col in x_first_values]
queue_shapes = [col.shape for col in x_first_values]
get_feed_fn = _GeneratorFeedFn
elif HAS_PANDAS and isinstance(data, pd.DataFrame):
types = [
dtypes.as_dtype(dt) for dt in [data.index.dtype] + list(data.dtypes)
]
queue_shapes = [() for _ in types]
get_feed_fn = _PandasFeedFn
else:
raise TypeError(
"data must be either a numpy array or pandas DataFrame if pandas is "
"installed; got {}".format(type(data).__name__))
pad_data = pad_value is not None
if pad_data and get_feed_fn is not _GeneratorFeedFn:
raise NotImplementedError(
"padding is only available with generator usage")
if shuffle and pad_data:
raise NotImplementedError(
"padding and shuffling data at the same time is not implemented")
# TODO(jamieas): TensorBoard warnings for all warnings below once available.
if num_threads > 1 and num_epochs is not None:
logging.warning(
"enqueue_data was called with num_epochs and num_threads > 1. "
"num_epochs is applied per thread, so this will produce more "
"epochs than you probably intend. "
"If you want to limit epochs, use one thread.")
if shuffle and num_threads > 1 and num_epochs is not None:
logging.warning(
"enqueue_data was called with shuffle=True, num_threads > 1, and "
"num_epochs. This will create multiple threads, all reading the "
"array/dataframe in order adding to the same shuffling queue; the "
"results will likely not be sufficiently shuffled.")
if not shuffle and num_threads > 1:
logging.warning(
"enqueue_data was called with shuffle=False and num_threads > 1. "
"This will create multiple threads, all reading the "
"array/dataframe in order. If you want examples read in order, use"
" one thread; if you want multiple threads, enable shuffling.")
if shuffle:
min_after_dequeue = int(capacity / 4 if min_after_dequeue is None else
min_after_dequeue)
queue = data_flow_ops.RandomShuffleQueue(
capacity,
min_after_dequeue,
dtypes=types,
shapes=queue_shapes,
seed=seed)
elif pad_data:
min_after_dequeue = 0 # just for the summary text
queue_shapes = list(
map(lambda x: tuple(list(x[:-1]) + [None]) if len(x) > 0 else x,
queue_shapes))
queue = data_flow_ops.PaddingFIFOQueue(
capacity, dtypes=types, shapes=queue_shapes)
else:
min_after_dequeue = 0 # just for the summary text
queue = data_flow_ops.FIFOQueue(
capacity, dtypes=types, shapes=queue_shapes)
enqueue_ops = []
feed_fns = []
for i in range(num_threads):
# Note the placeholders have no shapes, so they will accept any
# enqueue_size. enqueue_many below will break them up.
placeholders = [array_ops.placeholder(t) for t in types]
enqueue_ops.append(queue.enqueue_many(placeholders))
seed_i = None if seed is None else (i + 1) * seed
if not pad_data:
feed_fns.append(
get_feed_fn(
placeholders,
data,
enqueue_size,
random_start=shuffle,
seed=seed_i,
num_epochs=num_epochs))
else:
feed_fns.append(
get_feed_fn(
placeholders,
data,
enqueue_size,
random_start=shuffle,
seed=seed_i,
num_epochs=num_epochs,
pad_value=pad_value))
runner = fqr._FeedingQueueRunner( # pylint: disable=protected-access
queue=queue,
enqueue_ops=enqueue_ops,
feed_fns=feed_fns)
queue_runner.add_queue_runner(runner)
full = (
math_ops.cast(
math_ops.maximum(0,
queue.size() - min_after_dequeue), dtypes.float32)
* (1. / (capacity - min_after_dequeue)))
# Note that name contains a '/' at the end so we intentionally do not place
# a '/' after %s below.
summary_name = ("queue/%sfraction_over_%d_of_%d_full" %
(queue.name, min_after_dequeue,
capacity - min_after_dequeue))
summary.scalar(summary_name, full)
return queue
|
rajat1994/scikit-learn | refs/heads/master | examples/decomposition/plot_sparse_coding.py | 247 | """
===========================================
Sparse coding with a precomputed dictionary
===========================================
Transform a signal as a sparse combination of Ricker wavelets. This example
visually compares different sparse coding methods using the
:class:`sklearn.decomposition.SparseCoder` estimator. The Ricker (also known
as Mexican hat or the second derivative of a Gaussian) is not a particularly
good kernel to represent piecewise constant signals like this one. It can
therefore be seen how much adding different widths of atoms matters and it
therefore motivates learning the dictionary to best fit your type of signals.
The richer dictionary on the right is not larger in size, heavier subsampling
is performed in order to stay on the same order of magnitude.
"""
print(__doc__)
import numpy as np
import matplotlib.pylab as pl
from sklearn.decomposition import SparseCoder
def ricker_function(resolution, center, width):
"""Discrete sub-sampled Ricker (Mexican hat) wavelet"""
x = np.linspace(0, resolution - 1, resolution)
x = ((2 / ((np.sqrt(3 * width) * np.pi ** 1 / 4)))
* (1 - ((x - center) ** 2 / width ** 2))
* np.exp((-(x - center) ** 2) / (2 * width ** 2)))
return x
def ricker_matrix(width, resolution, n_components):
"""Dictionary of Ricker (Mexican hat) wavelets"""
centers = np.linspace(0, resolution - 1, n_components)
D = np.empty((n_components, resolution))
for i, center in enumerate(centers):
D[i] = ricker_function(resolution, center, width)
D /= np.sqrt(np.sum(D ** 2, axis=1))[:, np.newaxis]
return D
resolution = 1024
subsampling = 3 # subsampling factor
width = 100
n_components = resolution / subsampling
# Compute a wavelet dictionary
D_fixed = ricker_matrix(width=width, resolution=resolution,
n_components=n_components)
D_multi = np.r_[tuple(ricker_matrix(width=w, resolution=resolution,
n_components=np.floor(n_components / 5))
for w in (10, 50, 100, 500, 1000))]
# Generate a signal
y = np.linspace(0, resolution - 1, resolution)
first_quarter = y < resolution / 4
y[first_quarter] = 3.
y[np.logical_not(first_quarter)] = -1.
# List the different sparse coding methods in the following format:
# (title, transform_algorithm, transform_alpha, transform_n_nozero_coefs)
estimators = [('OMP', 'omp', None, 15), ('Lasso', 'lasso_cd', 2, None), ]
pl.figure(figsize=(13, 6))
for subplot, (D, title) in enumerate(zip((D_fixed, D_multi),
('fixed width', 'multiple widths'))):
pl.subplot(1, 2, subplot + 1)
pl.title('Sparse coding against %s dictionary' % title)
pl.plot(y, ls='dotted', label='Original signal')
# Do a wavelet approximation
for title, algo, alpha, n_nonzero in estimators:
coder = SparseCoder(dictionary=D, transform_n_nonzero_coefs=n_nonzero,
transform_alpha=alpha, transform_algorithm=algo)
x = coder.transform(y)
density = len(np.flatnonzero(x))
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x, label='%s: %s nonzero coefs,\n%.2f error'
% (title, density, squared_error))
# Soft thresholding debiasing
coder = SparseCoder(dictionary=D, transform_algorithm='threshold',
transform_alpha=20)
x = coder.transform(y)
_, idx = np.where(x != 0)
x[0, idx], _, _, _ = np.linalg.lstsq(D[idx, :].T, y)
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x,
label='Thresholding w/ debiasing:\n%d nonzero coefs, %.2f error' %
(len(idx), squared_error))
pl.axis('tight')
pl.legend()
pl.subplots_adjust(.04, .07, .97, .90, .09, .2)
pl.show()
|
sandeepkbhat/pylearn2 | refs/heads/master | pylearn2/scripts/__init__.py | 49 | """
Some scripts define objects that we want to import via yaml files
that we pass to the script, so this directory must be a python
module, rather than just a directory full of scripts.
"""
|
br0ziliy/oh-my-vagrant | refs/heads/master | extras/centos-ci.py | 1 | #!/usr/bin/python
# modified from:
# https://github.com/kbsingh/centos-ci-scripts/blob/master/build_python_script.py
# usage: centos-ci.py giturl [branch [commands]]
import os
import sys
import json
import urllib
import subprocess
url_base = 'http://admin.ci.centos.org:8080'
apikey = os.environ.get('DUFFY_API_KEY')
if apikey is None:
apikey = open('duffy.key', 'r').read().strip()
ver = '7'
arch = 'x86_64'
count = 1
git_url = sys.argv[1]
branch = 'master'
if len(sys.argv) > 2: branch = sys.argv[2]
folder = os.path.splitext(os.path.basename(__file__))[0]
run = 'make vtest' # the omv vtest cmd is a good option to run from this target
if len(sys.argv) > 3: run = ' '.join(sys.argv[3:])
get_nodes_url = "%s/Node/get?key=%s&ver=%s&arch=%s&i_count=%s" % (url_base, apikey, ver, arch, count)
data = json.loads(urllib.urlopen(get_nodes_url).read()) # request host(s)
hosts = data['hosts']
ssid = data['ssid']
done_nodes_url = "%s/Node/done?key=%s&ssid=%s" % (url_base, apikey, ssid)
host = hosts[0]
ssh = "ssh -t -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@%s" % host
yum = 'yum -y install git wget'
omv = 'wget https://github.com/purpleidea/oh-my-vagrant/raw/master/extras/install-omv.sh && chmod u+x install-omv.sh && ./install-omv.sh'
cmd = "%s '%s && %s'" % (ssh, yum, omv) # setup
print cmd
r = subprocess.call(cmd, shell=True)
if r != 0:
# NOTE: we don't clean up the host here, so that it can be inspected!
print "Error configuring omv on: %s" % host
sys.exit(r)
# the second ssh call will run with the omv /etc/profile.d/ script loaded
git = "git clone --recursive %s %s && cd %s && git checkout %s" % (git_url, folder, folder, branch)
cmd = "%s '%s && %s'" % (ssh, git, run) # run
print cmd
r = subprocess.call(cmd, shell=True)
if r != 0:
print "Error running job on: %s" % host
output = urllib.urlopen(done_nodes_url).read() # free host(s)
if output != 'Done':
print "Error freeing host: %s" % host
sys.exit(r)
|
RagBillySandstone/google-python-exercises | refs/heads/master | basic/solution/mimic.py | 208 | #!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
"""Mimic pyquick exercise -- optional extra exercise.
Google's Python Class
Read in the file specified on the command line.
Do a simple split() on whitespace to obtain all the words in the file.
Rather than read the file line by line, it's easier to read
it into one giant string and split it once.
Build a "mimic" dict that maps each word that appears in the file
to a list of all the words that immediately follow that word in the file.
The list of words can be be in any order and should include
duplicates. So for example the key "and" might have the list
["then", "best", "then", "after", ...] listing
all the words which came after "and" in the text.
We'll say that the empty string is what comes before
the first word in the file.
With the mimic dict, it's fairly easy to emit random
text that mimics the original. Print a word, then look
up what words might come next and pick one at random as
the next work.
Use the empty string as the first word to prime things.
If we ever get stuck with a word that is not in the dict,
go back to the empty string to keep things moving.
Note: the standard python module 'random' includes a
random.choice(list) method which picks a random element
from a non-empty list.
For fun, feed your program to itself as input.
Could work on getting it to put in linebreaks around 70
columns, so the output looks better.
"""
import random
import sys
def mimic_dict(filename):
"""Returns mimic dict mapping each word to list of words which follow it."""
# +++your code here+++
# LAB(begin solution)
mimic_dict = {}
f = open(filename, 'r')
text = f.read()
f.close()
words = text.split()
prev = ''
for word in words:
if not prev in mimic_dict:
mimic_dict[prev] = [word]
else:
mimic_dict[prev].append(word)
# Could write as: mimic_dict[prev] = mimic_dict.get(prev, []) + [word]
# It's one line, but not totally satisfying.
prev = word
return mimic_dict
# LAB(replace solution)
# return
# LAB(end solution)
def print_mimic(mimic_dict, word):
"""Given mimic dict and start word, prints 200 random words."""
# +++your code here+++
# LAB(begin solution)
for unused_i in range(200):
print word,
nexts = mimic_dict.get(word) # Returns None if not found
if not nexts:
nexts = mimic_dict[''] # Fallback to '' if not found
word = random.choice(nexts)
# The 'unused_' prefix turns off the lint warning about the unused variable.
# LAB(replace solution)
# return
# LAB(end solution)
# Provided main(), calls mimic_dict() and mimic()
def main():
if len(sys.argv) != 2:
print 'usage: ./mimic.py file-to-read'
sys.exit(1)
dict = mimic_dict(sys.argv[1])
print_mimic(dict, '')
if __name__ == '__main__':
main()
|
Nyancoins/NyanFaucet | refs/heads/master | nyanfaucet/cryptocoin/views.py | 6445 | from django.shortcuts import render
# Create your views here.
|
altairpearl/scikit-learn | refs/heads/master | sklearn/ensemble/tests/test_gradient_boosting.py | 43 | """
Testing for the gradient boosting module (sklearn.ensemble.gradient_boosting).
"""
import warnings
import numpy as np
from itertools import product
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from sklearn import datasets
from sklearn.base import clone
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.gradient_boosting import ZeroEstimator
from sklearn.metrics import mean_squared_error
from sklearn.utils import check_random_state, tosequence
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import skip_if_32bit
from sklearn.exceptions import DataConversionWarning
from sklearn.exceptions import NotFittedError
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
rng = np.random.RandomState(0)
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def check_classification_toy(presort, loss):
# Check classification on a toy dataset.
clf = GradientBoostingClassifier(loss=loss, n_estimators=10,
random_state=1, presort=presort)
assert_raises(ValueError, clf.predict, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf.estimators_))
deviance_decrease = (clf.train_score_[:-1] - clf.train_score_[1:])
assert_true(np.any(deviance_decrease >= 0.0))
leaves = clf.apply(X)
assert_equal(leaves.shape, (6, 10, 1))
def test_classification_toy():
for presort, loss in product(('auto', True, False),
('deviance', 'exponential')):
yield check_classification_toy, presort, loss
def test_parameter_checks():
# Check input parameter validation.
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=-1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='foobar').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=-1.).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=0.6).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(init={}).fit, X, y)
# test fit before feature importance
assert_raises(ValueError,
lambda: GradientBoostingClassifier().feature_importances_)
# deviance requires ``n_classes >= 2``.
assert_raises(ValueError,
lambda X, y: GradientBoostingClassifier(
loss='deviance').fit(X, y),
X, [0, 0, 0, 0])
def test_loss_function():
assert_raises(ValueError,
GradientBoostingClassifier(loss='ls').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='lad').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='quantile').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='huber').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='deviance').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='exponential').fit, X, y)
def check_classification_synthetic(presort, loss):
# Test GradientBoostingClassifier on synthetic dataset used by
# Hastie et al. in ESLII Example 12.7.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=2,
max_depth=1, loss=loss,
learning_rate=1.0, random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert_less(error_rate, 0.09)
gbrt = GradientBoostingClassifier(n_estimators=200, min_samples_split=2,
max_depth=1, loss=loss,
learning_rate=1.0, subsample=0.5,
random_state=0,
presort=presort)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert_less(error_rate, 0.08)
def test_classification_synthetic():
for presort, loss in product(('auto', True, False), ('deviance', 'exponential')):
yield check_classification_synthetic, presort, loss
def check_boston(presort, loss, subsample):
# Check consistency on dataset boston house prices with least squares
# and least absolute deviation.
ones = np.ones(len(boston.target))
last_y_pred = None
for sample_weight in None, ones, 2 * ones:
clf = GradientBoostingRegressor(n_estimators=100,
loss=loss,
max_depth=4,
subsample=subsample,
min_samples_split=2,
random_state=1,
presort=presort)
assert_raises(ValueError, clf.predict, boston.data)
clf.fit(boston.data, boston.target,
sample_weight=sample_weight)
leaves = clf.apply(boston.data)
assert_equal(leaves.shape, (506, 100))
y_pred = clf.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_less(mse, 6.0)
if last_y_pred is not None:
assert_array_almost_equal(last_y_pred, y_pred)
last_y_pred = y_pred
def test_boston():
for presort, loss, subsample in product(('auto', True, False),
('ls', 'lad', 'huber'),
(1.0, 0.5)):
yield check_boston, presort, loss, subsample
def check_iris(presort, subsample, sample_weight):
# Check consistency on dataset iris.
clf = GradientBoostingClassifier(n_estimators=100,
loss='deviance',
random_state=1,
subsample=subsample,
presort=presort)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9)
leaves = clf.apply(iris.data)
assert_equal(leaves.shape, (150, 100, 3))
def test_iris():
ones = np.ones(len(iris.target))
for presort, subsample, sample_weight in product(('auto', True, False),
(1.0, 0.5),
(None, ones)):
yield check_iris, presort, subsample, sample_weight
def test_regression_synthetic():
# Test on synthetic regression datasets used in Leo Breiman,
# `Bagging Predictors?. Machine Learning 24(2): 123-140 (1996).
random_state = check_random_state(1)
regression_params = {'n_estimators': 100, 'max_depth': 4,
'min_samples_split': 2, 'learning_rate': 0.1,
'loss': 'ls'}
# Friedman1
X, y = datasets.make_friedman1(n_samples=1200,
random_state=random_state,
noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
clf = GradientBoostingRegressor(presort=presort)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 5.0)
# Friedman2
X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
regression_params['presort'] = presort
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 1700.0)
# Friedman3
X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
regression_params['presort'] = presort
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 0.015)
def test_feature_importances():
X = np.array(boston.data, dtype=np.float32)
y = np.array(boston.target, dtype=np.float32)
for presort in True, False:
clf = GradientBoostingRegressor(n_estimators=100, max_depth=5,
min_samples_split=2, random_state=1,
presort=presort)
clf.fit(X, y)
assert_true(hasattr(clf, 'feature_importances_'))
# XXX: Remove this test in 0.19 after transform support to estimators
# is removed.
X_new = assert_warns(
DeprecationWarning, clf.transform, X, threshold="mean")
assert_less(X_new.shape[1], X.shape[1])
feature_mask = (
clf.feature_importances_ > clf.feature_importances_.mean())
assert_array_almost_equal(X_new, X[:, feature_mask])
def test_probability_log():
# Predict probabilities.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert_true(np.all(y_proba >= 0.0))
assert_true(np.all(y_proba <= 1.0))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_check_inputs():
# Test input checks (shape and type of X and y).
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y + [0, 1])
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y,
sample_weight=([1] * len(y)) + [0, 1])
def test_check_inputs_predict():
# X has wrong shape
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, rng.rand(len(X)))
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
def test_check_max_features():
# test if max_features is valid.
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=0)
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=(len(X[0]) + 1))
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=-0.1)
assert_raises(ValueError, clf.fit, X, y)
def test_max_feature_regression():
# Test to make sure random state is set properly.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=5,
max_depth=2, learning_rate=.1,
max_features=2, random_state=1)
gbrt.fit(X_train, y_train)
deviance = gbrt.loss_(y_test, gbrt.decision_function(X_test))
assert_true(deviance < 0.5, "GB failed with deviance %.4f" % deviance)
def test_max_feature_auto():
# Test if max features is set properly for floats and str.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
_, n_features = X.shape
X_train = X[:2000]
y_train = y[:2000]
gbrt = GradientBoostingClassifier(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, n_features)
gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.3)
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(n_features * 0.3))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='sqrt')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='log2')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.log2(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1,
max_features=0.01 / X.shape[1])
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, 1)
def test_staged_predict():
# Test whether staged decision function eventually gives
# the same prediction.
X, y = datasets.make_friedman1(n_samples=1200,
random_state=1, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test = X[200:]
clf = GradientBoostingRegressor()
# test raise ValueError if not fitted
assert_raises(ValueError, lambda X: np.fromiter(
clf.staged_predict(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# test if prediction for last stage equals ``predict``
for y in clf.staged_predict(X_test):
assert_equal(y.shape, y_pred.shape)
assert_array_equal(y_pred, y)
def test_staged_predict_proba():
# Test whether staged predict proba eventually gives
# the same prediction.
X, y = datasets.make_hastie_10_2(n_samples=1200,
random_state=1)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingClassifier(n_estimators=20)
# test raise NotFittedError if not fitted
assert_raises(NotFittedError, lambda X: np.fromiter(
clf.staged_predict_proba(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
# test if prediction for last stage equals ``predict``
for y_pred in clf.staged_predict(X_test):
assert_equal(y_test.shape, y_pred.shape)
assert_array_equal(clf.predict(X_test), y_pred)
# test if prediction for last stage equals ``predict_proba``
for staged_proba in clf.staged_predict_proba(X_test):
assert_equal(y_test.shape[0], staged_proba.shape[0])
assert_equal(2, staged_proba.shape[1])
assert_array_equal(clf.predict_proba(X_test), staged_proba)
def test_staged_functions_defensive():
# test that staged_functions make defensive copies
rng = np.random.RandomState(0)
X = rng.uniform(size=(10, 3))
y = (4 * X[:, 0]).astype(np.int) + 1 # don't predict zeros
for estimator in [GradientBoostingRegressor(),
GradientBoostingClassifier()]:
estimator.fit(X, y)
for func in ['predict', 'decision_function', 'predict_proba']:
staged_func = getattr(estimator, "staged_" + func, None)
if staged_func is None:
# regressor has no staged_predict_proba
continue
with warnings.catch_warnings(record=True):
staged_result = list(staged_func(X))
staged_result[1][:] = 0
assert_true(np.all(staged_result[0] != 0))
def test_serialization():
# Check model serialization.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
try:
import cPickle as pickle
except ImportError:
import pickle
serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL)
clf = None
clf = pickle.loads(serialized_clf)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_degenerate_targets():
# Check if we can fit even though all targets are equal.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
# classifier should raise exception
assert_raises(ValueError, clf.fit, X, np.ones(len(X)))
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, np.ones(len(X)))
clf.predict([rng.rand(2)])
assert_array_equal(np.ones((1,), dtype=np.float64),
clf.predict([rng.rand(2)]))
def test_quantile_loss():
# Check if quantile loss with alpha=0.5 equals lad.
clf_quantile = GradientBoostingRegressor(n_estimators=100, loss='quantile',
max_depth=4, alpha=0.5,
random_state=7)
clf_quantile.fit(boston.data, boston.target)
y_quantile = clf_quantile.predict(boston.data)
clf_lad = GradientBoostingRegressor(n_estimators=100, loss='lad',
max_depth=4, random_state=7)
clf_lad.fit(boston.data, boston.target)
y_lad = clf_lad.predict(boston.data)
assert_array_almost_equal(y_quantile, y_lad, decimal=4)
def test_symbol_labels():
# Test with non-integer class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
symbol_y = tosequence(map(str, y))
clf.fit(X, symbol_y)
assert_array_equal(clf.predict(T), tosequence(map(str, true_result)))
assert_equal(100, len(clf.estimators_))
def test_float_class_labels():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
float_y = np.asarray(y, dtype=np.float32)
clf.fit(X, float_y)
assert_array_equal(clf.predict(T),
np.asarray(true_result, dtype=np.float32))
assert_equal(100, len(clf.estimators_))
def test_shape_y():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
y_ = np.asarray(y, dtype=np.int32)
y_ = y_[:, np.newaxis]
# This will raise a DataConversionWarning that we want to
# "always" raise, elsewhere the warnings gets ignored in the
# later tests, and the tests that check for this warning fail
assert_warns(DataConversionWarning, clf.fit, X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_mem_layout():
# Test with different memory layouts of X and y
X_ = np.asfortranarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
X_ = np.ascontiguousarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.ascontiguousarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.asfortranarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_oob_improvement():
# Test if oob improvement has correct shape and regression test.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=0.5)
clf.fit(X, y)
assert_equal(clf.oob_improvement_.shape[0], 100)
# hard-coded regression test - change if modification in OOB computation
assert_array_almost_equal(clf.oob_improvement_[:5],
np.array([0.19, 0.15, 0.12, -0.12, -0.11]),
decimal=2)
def test_oob_improvement_raise():
# Test if oob improvement has correct shape.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=1.0)
clf.fit(X, y)
assert_raises(AttributeError, lambda: clf.oob_improvement_)
def test_oob_multilcass_iris():
# Check OOB improvement on multi-class dataset.
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=0.5)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9)
assert_equal(clf.oob_improvement_.shape[0], clf.n_estimators)
# hard-coded regression test - change if modification in OOB computation
# FIXME: the following snippet does not yield the same results on 32 bits
# assert_array_almost_equal(clf.oob_improvement_[:5],
# np.array([12.68, 10.45, 8.18, 6.43, 5.13]),
# decimal=2)
def test_verbose_output():
# Check verbose=1 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=1, subsample=0.8)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# with OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 3) % (
'Iter', 'Train Loss', 'OOB Improve', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# one for 1-10 and then 9 for 20-100
assert_equal(10 + 9, n_lines)
def test_more_verbose_output():
# Check verbose=2 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=2)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# no OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 2) % (
'Iter', 'Train Loss', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# 100 lines for n_estimators==100
assert_equal(100, n_lines)
def test_warm_start():
# Test if warm start equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_n_estimators():
# Test if warm start equals fit - set n_estimators.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=300, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=300)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_max_depth():
# Test if possible to fit trees of different depth in ensemble.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, max_depth=2)
est.fit(X, y)
# last 10 trees have different depth
assert_equal(est.estimators_[0, 0].max_depth, 1)
for i in range(1, 11):
assert_equal(est.estimators_[-i, 0].max_depth, 2)
def test_warm_start_clear():
# Test if fit clears state.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est_2 = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_2.fit(X, y) # inits state
est_2.set_params(warm_start=False)
est_2.fit(X, y) # clears old state and equals est
assert_array_almost_equal(est_2.predict(X), est.predict(X))
def test_warm_start_zero_n_estimators():
# Test if warm start with zero n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=0)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_smaller_n_estimators():
# Test if warm start with smaller n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=99)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test if warm start with equal n_estimators does nothing
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est2 = clone(est)
est2.set_params(n_estimators=est.n_estimators, warm_start=True)
est2.fit(X, y)
assert_array_almost_equal(est2.predict(X), est.predict(X))
def test_warm_start_oob_switch():
# Test if oob can be turned on during warm start.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, subsample=0.5)
est.fit(X, y)
assert_array_equal(est.oob_improvement_[:100], np.zeros(100))
# the last 10 are not zeros
assert_array_equal(est.oob_improvement_[-10:] == 0.0,
np.zeros(10, dtype=np.bool))
def test_warm_start_oob():
# Test if warm start OOB equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1, subsample=0.5,
random_state=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, subsample=0.5,
random_state=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.oob_improvement_[:100],
est.oob_improvement_[:100])
def early_stopping_monitor(i, est, locals):
"""Returns True on the 10th iteration. """
if i == 9:
return True
else:
return False
def test_monitor_early_stopping():
# Test if monitor return value works.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20) # this is not altered
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.train_score_.shape[0], 30)
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5,
warm_start=True)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20)
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30, warm_start=False)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.train_score_.shape[0], 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.oob_improvement_.shape[0], 30)
def test_complete_classification():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
est = GradientBoostingClassifier(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, k)
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_complete_regression():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
k = 4
est = GradientBoostingRegressor(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(boston.data, boston.target)
tree = est.estimators_[-1, 0].tree_
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_zero_estimator_reg():
# Test if ZeroEstimator works for regression.
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, boston.data, boston.target)
def test_zero_estimator_clf():
# Test if ZeroEstimator works for classification.
X = iris.data
y = np.array(iris.target)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
# binary clf
mask = y != 0
y[mask] = 1
y[~mask] = 0
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test precedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [GradientBoostingRegressor,
GradientBoostingClassifier]
k = 4
for GBEstimator in all_estimators:
est = GBEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_greater(tree.max_depth, 1)
est = GBEstimator(max_depth=1).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, 1)
def test_warm_start_wo_nestimators_change():
# Test if warm_start does nothing if n_estimators is not changed.
# Regression test for #3513.
clf = GradientBoostingClassifier(n_estimators=10, warm_start=True)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert_equal(clf.estimators_.shape[0], 10)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert_equal(clf.estimators_.shape[0], 10)
def test_probability_exponential():
# Predict probabilities.
clf = GradientBoostingClassifier(loss='exponential',
n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert_true(np.all(y_proba >= 0.0))
assert_true(np.all(y_proba <= 1.0))
score = clf.decision_function(T).ravel()
assert_array_almost_equal(y_proba[:, 1],
1.0 / (1.0 + np.exp(-2 * score)))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_non_uniform_weights_toy_edge_case_reg():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('huber', 'ls', 'lad', 'quantile'):
gb = GradientBoostingRegressor(learning_rate=1.0, n_estimators=2,
loss=loss)
gb.fit(X, y, sample_weight=sample_weight)
assert_greater(gb.predict([[1, 0]])[0], 0.5)
def test_non_uniform_weights_toy_edge_case_clf():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('deviance', 'exponential'):
gb = GradientBoostingClassifier(n_estimators=5)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
def check_sparse_input(EstimatorClass, X, X_sparse, y):
dense = EstimatorClass(n_estimators=10, random_state=0,
max_depth=2).fit(X, y)
sparse = EstimatorClass(n_estimators=10, random_state=0, max_depth=2,
presort=False).fit(X_sparse, y)
auto = EstimatorClass(n_estimators=10, random_state=0, max_depth=2,
presort='auto').fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
assert_array_almost_equal(sparse.apply(X), auto.apply(X))
assert_array_almost_equal(sparse.predict(X), auto.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
auto.feature_importances_)
if isinstance(EstimatorClass, GradientBoostingClassifier):
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
assert_array_almost_equal(sparse.predict_proba(X),
auto.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
auto.predict_log_proba(X))
@skip_if_32bit
def test_sparse_input():
ests = (GradientBoostingClassifier, GradientBoostingRegressor)
sparse_matrices = (csr_matrix, csc_matrix, coo_matrix)
y, X = datasets.make_multilabel_classification(random_state=0,
n_samples=50,
n_features=1,
n_classes=20)
y = y[:, 0]
for EstimatorClass, sparse_matrix in product(ests, sparse_matrices):
yield check_sparse_input, EstimatorClass, X, sparse_matrix(X), y
|
ckohl/illumos-kvm-cmd | refs/heads/master | scripts/tracetool/backend/ust.py | 114 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
LTTng User Space Tracing backend.
"""
__author__ = "Lluís Vilanova <vilanova@ac.upc.edu>"
__copyright__ = "Copyright 2012, Lluís Vilanova <vilanova@ac.upc.edu>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "stefanha@linux.vnet.ibm.com"
from tracetool import out
def c(events):
out('#include <ust/marker.h>',
'#undef mutex_lock',
'#undef mutex_unlock',
'#undef inline',
'#undef wmb',
'#include "trace.h"')
for e in events:
argnames = ", ".join(e.args.names())
if len(e.args) > 0:
argnames = ', ' + argnames
out('DEFINE_TRACE(ust_%(name)s);',
'',
'static void ust_%(name)s_probe(%(args)s)',
'{',
' trace_mark(ust, %(name)s, %(fmt)s%(argnames)s);',
'}',
name = e.name,
args = e.args,
fmt = e.fmt,
argnames = argnames,
)
else:
out('DEFINE_TRACE(ust_%(name)s);',
'',
'static void ust_%(name)s_probe(%(args)s)',
'{',
' trace_mark(ust, %(name)s, UST_MARKER_NOARGS);',
'}',
name = e.name,
args = e.args,
)
# register probes
out('',
'static void __attribute__((constructor)) trace_init(void)',
'{')
for e in events:
out(' register_trace_ust_%(name)s(ust_%(name)s_probe);',
name = e.name,
)
out('}')
def h(events):
out('#include <ust/tracepoint.h>',
'#undef mutex_lock',
'#undef mutex_unlock',
'#undef inline',
'#undef wmb')
for e in events:
if len(e.args) > 0:
out('DECLARE_TRACE(ust_%(name)s, TP_PROTO(%(args)s), TP_ARGS(%(argnames)s));',
'#define trace_%(name)s trace_ust_%(name)s',
name = e.name,
args = e.args,
argnames = ", ".join(e.args.names()),
)
else:
out('_DECLARE_TRACEPOINT_NOARGS(ust_%(name)s);',
'#define trace_%(name)s trace_ust_%(name)s',
name = e.name,
)
out()
|
googlearchive/cloud-playground | refs/heads/master | __pg/secret.py | 2 | """Class to maintain application secrets in the datastore."""
from webapp2_extras import security
from . import settings
from google.appengine.ext import ndb
class Secret(ndb.Model):
"""A model which stores secret keys."""
secret_key = ndb.StringProperty(indexed=False)
created = ndb.DateTimeProperty(auto_now_add=True, indexed=False)
updated = ndb.DateTimeProperty(auto_now=True, indexed=False)
def GenerateRandomString(entropy=128, pool=security.LOWERCASE_ALPHANUMERIC):
return security.generate_random_string(entropy=entropy, pool=pool)
def GetSecret(key_name, entropy):
"""Returns and lazily creates random application secrets."""
# optimistically try fast, transactionless get_by_key_name
entity = Secret.get_by_id(key_name, namespace=settings.PLAYGROUND_NAMESPACE)
# fall back to slower get_or_insert
if not entity:
candidate_secret_key = GenerateRandomString(entropy)
entity = Secret.get_or_insert(key_name, secret_key=candidate_secret_key,
namespace=settings.PLAYGROUND_NAMESPACE)
# return the one true secret key from the datastore
return str(entity.secret_key)
|
uberamd/NGECore2 | refs/heads/master | scripts/object/tangible/ship/components/weapon_capacitor/cap_sorosuub_dynamo_mk2.py | 85615 | import sys
def setup(core, object):
return |
VasuAgrawal/tartanHacks2015 | refs/heads/master | site/flask/lib/python2.7/site-packages/werkzeug/routing.py | 27 | # -*- coding: utf-8 -*-
"""
werkzeug.routing
~~~~~~~~~~~~~~~~
When it comes to combining multiple controller or view functions (however
you want to call them) you need a dispatcher. A simple way would be
applying regular expression tests on the ``PATH_INFO`` and calling
registered callback functions that return the value then.
This module implements a much more powerful system than simple regular
expression matching because it can also convert values in the URLs and
build URLs.
Here a simple example that creates an URL map for an application with
two subdomains (www and kb) and some URL rules:
>>> m = Map([
... # Static URLs
... Rule('/', endpoint='static/index'),
... Rule('/about', endpoint='static/about'),
... Rule('/help', endpoint='static/help'),
... # Knowledge Base
... Subdomain('kb', [
... Rule('/', endpoint='kb/index'),
... Rule('/browse/', endpoint='kb/browse'),
... Rule('/browse/<int:id>/', endpoint='kb/browse'),
... Rule('/browse/<int:id>/<int:page>', endpoint='kb/browse')
... ])
... ], default_subdomain='www')
If the application doesn't use subdomains it's perfectly fine to not set
the default subdomain and not use the `Subdomain` rule factory. The endpoint
in the rules can be anything, for example import paths or unique
identifiers. The WSGI application can use those endpoints to get the
handler for that URL. It doesn't have to be a string at all but it's
recommended.
Now it's possible to create a URL adapter for one of the subdomains and
build URLs:
>>> c = m.bind('example.com')
>>> c.build("kb/browse", dict(id=42))
'http://kb.example.com/browse/42/'
>>> c.build("kb/browse", dict())
'http://kb.example.com/browse/'
>>> c.build("kb/browse", dict(id=42, page=3))
'http://kb.example.com/browse/42/3'
>>> c.build("static/about")
'/about'
>>> c.build("static/index", force_external=True)
'http://www.example.com/'
>>> c = m.bind('example.com', subdomain='kb')
>>> c.build("static/about")
'http://www.example.com/about'
The first argument to bind is the server name *without* the subdomain.
Per default it will assume that the script is mounted on the root, but
often that's not the case so you can provide the real mount point as
second argument:
>>> c = m.bind('example.com', '/applications/example')
The third argument can be the subdomain, if not given the default
subdomain is used. For more details about binding have a look at the
documentation of the `MapAdapter`.
And here is how you can match URLs:
>>> c = m.bind('example.com')
>>> c.match("/")
('static/index', {})
>>> c.match("/about")
('static/about', {})
>>> c = m.bind('example.com', '/', 'kb')
>>> c.match("/")
('kb/index', {})
>>> c.match("/browse/42/23")
('kb/browse', {'id': 42, 'page': 23})
If matching fails you get a `NotFound` exception, if the rule thinks
it's a good idea to redirect (for example because the URL was defined
to have a slash at the end but the request was missing that slash) it
will raise a `RequestRedirect` exception. Both are subclasses of the
`HTTPException` so you can use those errors as responses in the
application.
If matching succeeded but the URL rule was incompatible to the given
method (for example there were only rules for `GET` and `HEAD` and
routing system tried to match a `POST` request) a `MethodNotAllowed`
method is raised.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
import uuid
import posixpath
from pprint import pformat
from threading import Lock
from werkzeug.urls import url_encode, url_quote, url_join
from werkzeug.utils import redirect, format_string
from werkzeug.exceptions import HTTPException, NotFound, MethodNotAllowed
from werkzeug._internal import _get_environ, _encode_idna
from werkzeug._compat import itervalues, iteritems, to_unicode, to_bytes, \
text_type, string_types, native_string_result, \
implements_to_string, wsgi_decoding_dance
from werkzeug.datastructures import ImmutableDict, MultiDict
_rule_re = re.compile(r'''
(?P<static>[^<]*) # static rule data
<
(?:
(?P<converter>[a-zA-Z_][a-zA-Z0-9_]*) # converter name
(?:\((?P<args>.*?)\))? # converter arguments
\: # variable delimiter
)?
(?P<variable>[a-zA-Z_][a-zA-Z0-9_]*) # variable name
>
''', re.VERBOSE)
_simple_rule_re = re.compile(r'<([^>]+)>')
_converter_args_re = re.compile(r'''
((?P<name>\w+)\s*=\s*)?
(?P<value>
True|False|
\d+.\d+|
\d+.|
\d+|
\w+|
[urUR]?(?P<stringval>"[^"]*?"|'[^']*')
)\s*,
''', re.VERBOSE | re.UNICODE)
_PYTHON_CONSTANTS = {
'None': None,
'True': True,
'False': False
}
def _pythonize(value):
if value in _PYTHON_CONSTANTS:
return _PYTHON_CONSTANTS[value]
for convert in int, float:
try:
return convert(value)
except ValueError:
pass
if value[:1] == value[-1:] and value[0] in '"\'':
value = value[1:-1]
return text_type(value)
def parse_converter_args(argstr):
argstr += ','
args = []
kwargs = {}
for item in _converter_args_re.finditer(argstr):
value = item.group('stringval')
if value is None:
value = item.group('value')
value = _pythonize(value)
if not item.group('name'):
args.append(value)
else:
name = item.group('name')
kwargs[name] = value
return tuple(args), kwargs
def parse_rule(rule):
"""Parse a rule and return it as generator. Each iteration yields tuples
in the form ``(converter, arguments, variable)``. If the converter is
`None` it's a static url part, otherwise it's a dynamic one.
:internal:
"""
pos = 0
end = len(rule)
do_match = _rule_re.match
used_names = set()
while pos < end:
m = do_match(rule, pos)
if m is None:
break
data = m.groupdict()
if data['static']:
yield None, None, data['static']
variable = data['variable']
converter = data['converter'] or 'default'
if variable in used_names:
raise ValueError('variable name %r used twice.' % variable)
used_names.add(variable)
yield converter, data['args'] or None, variable
pos = m.end()
if pos < end:
remaining = rule[pos:]
if '>' in remaining or '<' in remaining:
raise ValueError('malformed url rule: %r' % rule)
yield None, None, remaining
class RoutingException(Exception):
"""Special exceptions that require the application to redirect, notifying
about missing urls, etc.
:internal:
"""
class RequestRedirect(HTTPException, RoutingException):
"""Raise if the map requests a redirect. This is for example the case if
`strict_slashes` are activated and an url that requires a trailing slash.
The attribute `new_url` contains the absolute destination url.
"""
code = 301
def __init__(self, new_url):
RoutingException.__init__(self, new_url)
self.new_url = new_url
def get_response(self, environ):
return redirect(self.new_url, self.code)
class RequestSlash(RoutingException):
"""Internal exception."""
class RequestAliasRedirect(RoutingException):
"""This rule is an alias and wants to redirect to the canonical URL."""
def __init__(self, matched_values):
self.matched_values = matched_values
class BuildError(RoutingException, LookupError):
"""Raised if the build system cannot find a URL for an endpoint with the
values provided.
"""
def __init__(self, endpoint, values, method):
LookupError.__init__(self, endpoint, values, method)
self.endpoint = endpoint
self.values = values
self.method = method
class ValidationError(ValueError):
"""Validation error. If a rule converter raises this exception the rule
does not match the current URL and the next URL is tried.
"""
class RuleFactory(object):
"""As soon as you have more complex URL setups it's a good idea to use rule
factories to avoid repetitive tasks. Some of them are builtin, others can
be added by subclassing `RuleFactory` and overriding `get_rules`.
"""
def get_rules(self, map):
"""Subclasses of `RuleFactory` have to override this method and return
an iterable of rules."""
raise NotImplementedError()
class Subdomain(RuleFactory):
"""All URLs provided by this factory have the subdomain set to a
specific domain. For example if you want to use the subdomain for
the current language this can be a good setup::
url_map = Map([
Rule('/', endpoint='#select_language'),
Subdomain('<string(length=2):lang_code>', [
Rule('/', endpoint='index'),
Rule('/about', endpoint='about'),
Rule('/help', endpoint='help')
])
])
All the rules except for the ``'#select_language'`` endpoint will now
listen on a two letter long subdomain that holds the language code
for the current request.
"""
def __init__(self, subdomain, rules):
self.subdomain = subdomain
self.rules = rules
def get_rules(self, map):
for rulefactory in self.rules:
for rule in rulefactory.get_rules(map):
rule = rule.empty()
rule.subdomain = self.subdomain
yield rule
class Submount(RuleFactory):
"""Like `Subdomain` but prefixes the URL rule with a given string::
url_map = Map([
Rule('/', endpoint='index'),
Submount('/blog', [
Rule('/', endpoint='blog/index'),
Rule('/entry/<entry_slug>', endpoint='blog/show')
])
])
Now the rule ``'blog/show'`` matches ``/blog/entry/<entry_slug>``.
"""
def __init__(self, path, rules):
self.path = path.rstrip('/')
self.rules = rules
def get_rules(self, map):
for rulefactory in self.rules:
for rule in rulefactory.get_rules(map):
rule = rule.empty()
rule.rule = self.path + rule.rule
yield rule
class EndpointPrefix(RuleFactory):
"""Prefixes all endpoints (which must be strings for this factory) with
another string. This can be useful for sub applications::
url_map = Map([
Rule('/', endpoint='index'),
EndpointPrefix('blog/', [Submount('/blog', [
Rule('/', endpoint='index'),
Rule('/entry/<entry_slug>', endpoint='show')
])])
])
"""
def __init__(self, prefix, rules):
self.prefix = prefix
self.rules = rules
def get_rules(self, map):
for rulefactory in self.rules:
for rule in rulefactory.get_rules(map):
rule = rule.empty()
rule.endpoint = self.prefix + rule.endpoint
yield rule
class RuleTemplate(object):
"""Returns copies of the rules wrapped and expands string templates in
the endpoint, rule, defaults or subdomain sections.
Here a small example for such a rule template::
from werkzeug.routing import Map, Rule, RuleTemplate
resource = RuleTemplate([
Rule('/$name/', endpoint='$name.list'),
Rule('/$name/<int:id>', endpoint='$name.show')
])
url_map = Map([resource(name='user'), resource(name='page')])
When a rule template is called the keyword arguments are used to
replace the placeholders in all the string parameters.
"""
def __init__(self, rules):
self.rules = list(rules)
def __call__(self, *args, **kwargs):
return RuleTemplateFactory(self.rules, dict(*args, **kwargs))
class RuleTemplateFactory(RuleFactory):
"""A factory that fills in template variables into rules. Used by
`RuleTemplate` internally.
:internal:
"""
def __init__(self, rules, context):
self.rules = rules
self.context = context
def get_rules(self, map):
for rulefactory in self.rules:
for rule in rulefactory.get_rules(map):
new_defaults = subdomain = None
if rule.defaults:
new_defaults = {}
for key, value in iteritems(rule.defaults):
if isinstance(value, string_types):
value = format_string(value, self.context)
new_defaults[key] = value
if rule.subdomain is not None:
subdomain = format_string(rule.subdomain, self.context)
new_endpoint = rule.endpoint
if isinstance(new_endpoint, string_types):
new_endpoint = format_string(new_endpoint, self.context)
yield Rule(
format_string(rule.rule, self.context),
new_defaults,
subdomain,
rule.methods,
rule.build_only,
new_endpoint,
rule.strict_slashes
)
@implements_to_string
class Rule(RuleFactory):
"""A Rule represents one URL pattern. There are some options for `Rule`
that change the way it behaves and are passed to the `Rule` constructor.
Note that besides the rule-string all arguments *must* be keyword arguments
in order to not break the application on Werkzeug upgrades.
`string`
Rule strings basically are just normal URL paths with placeholders in
the format ``<converter(arguments):name>`` where the converter and the
arguments are optional. If no converter is defined the `default`
converter is used which means `string` in the normal configuration.
URL rules that end with a slash are branch URLs, others are leaves.
If you have `strict_slashes` enabled (which is the default), all
branch URLs that are matched without a trailing slash will trigger a
redirect to the same URL with the missing slash appended.
The converters are defined on the `Map`.
`endpoint`
The endpoint for this rule. This can be anything. A reference to a
function, a string, a number etc. The preferred way is using a string
because the endpoint is used for URL generation.
`defaults`
An optional dict with defaults for other rules with the same endpoint.
This is a bit tricky but useful if you want to have unique URLs::
url_map = Map([
Rule('/all/', defaults={'page': 1}, endpoint='all_entries'),
Rule('/all/page/<int:page>', endpoint='all_entries')
])
If a user now visits ``http://example.com/all/page/1`` he will be
redirected to ``http://example.com/all/``. If `redirect_defaults` is
disabled on the `Map` instance this will only affect the URL
generation.
`subdomain`
The subdomain rule string for this rule. If not specified the rule
only matches for the `default_subdomain` of the map. If the map is
not bound to a subdomain this feature is disabled.
Can be useful if you want to have user profiles on different subdomains
and all subdomains are forwarded to your application::
url_map = Map([
Rule('/', subdomain='<username>', endpoint='user/homepage'),
Rule('/stats', subdomain='<username>', endpoint='user/stats')
])
`methods`
A sequence of http methods this rule applies to. If not specified, all
methods are allowed. For example this can be useful if you want different
endpoints for `POST` and `GET`. If methods are defined and the path
matches but the method matched against is not in this list or in the
list of another rule for that path the error raised is of the type
`MethodNotAllowed` rather than `NotFound`. If `GET` is present in the
list of methods and `HEAD` is not, `HEAD` is added automatically.
.. versionchanged:: 0.6.1
`HEAD` is now automatically added to the methods if `GET` is
present. The reason for this is that existing code often did not
work properly in servers not rewriting `HEAD` to `GET`
automatically and it was not documented how `HEAD` should be
treated. This was considered a bug in Werkzeug because of that.
`strict_slashes`
Override the `Map` setting for `strict_slashes` only for this rule. If
not specified the `Map` setting is used.
`build_only`
Set this to True and the rule will never match but will create a URL
that can be build. This is useful if you have resources on a subdomain
or folder that are not handled by the WSGI application (like static data)
`redirect_to`
If given this must be either a string or callable. In case of a
callable it's called with the url adapter that triggered the match and
the values of the URL as keyword arguments and has to return the target
for the redirect, otherwise it has to be a string with placeholders in
rule syntax::
def foo_with_slug(adapter, id):
# ask the database for the slug for the old id. this of
# course has nothing to do with werkzeug.
return 'foo/' + Foo.get_slug_for_id(id)
url_map = Map([
Rule('/foo/<slug>', endpoint='foo'),
Rule('/some/old/url/<slug>', redirect_to='foo/<slug>'),
Rule('/other/old/url/<int:id>', redirect_to=foo_with_slug)
])
When the rule is matched the routing system will raise a
`RequestRedirect` exception with the target for the redirect.
Keep in mind that the URL will be joined against the URL root of the
script so don't use a leading slash on the target URL unless you
really mean root of that domain.
`alias`
If enabled this rule serves as an alias for another rule with the same
endpoint and arguments.
`host`
If provided and the URL map has host matching enabled this can be
used to provide a match rule for the whole host. This also means
that the subdomain feature is disabled.
.. versionadded:: 0.7
The `alias` and `host` parameters were added.
"""
def __init__(self, string, defaults=None, subdomain=None, methods=None,
build_only=False, endpoint=None, strict_slashes=None,
redirect_to=None, alias=False, host=None):
if not string.startswith('/'):
raise ValueError('urls must start with a leading slash')
self.rule = string
self.is_leaf = not string.endswith('/')
self.map = None
self.strict_slashes = strict_slashes
self.subdomain = subdomain
self.host = host
self.defaults = defaults
self.build_only = build_only
self.alias = alias
if methods is None:
self.methods = None
else:
self.methods = set([x.upper() for x in methods])
if 'HEAD' not in self.methods and 'GET' in self.methods:
self.methods.add('HEAD')
self.endpoint = endpoint
self.redirect_to = redirect_to
if defaults:
self.arguments = set(map(str, defaults))
else:
self.arguments = set()
self._trace = self._converters = self._regex = self._weights = None
def empty(self):
"""Return an unbound copy of this rule. This can be useful if you
want to reuse an already bound URL for another map."""
defaults = None
if self.defaults:
defaults = dict(self.defaults)
return type(self)(self.rule, defaults, self.subdomain, self.methods,
self.build_only, self.endpoint, self.strict_slashes,
self.redirect_to, self.alias, self.host)
def get_rules(self, map):
yield self
def refresh(self):
"""Rebinds and refreshes the URL. Call this if you modified the
rule in place.
:internal:
"""
self.bind(self.map, rebind=True)
def bind(self, map, rebind=False):
"""Bind the url to a map and create a regular expression based on
the information from the rule itself and the defaults from the map.
:internal:
"""
if self.map is not None and not rebind:
raise RuntimeError('url rule %r already bound to map %r' %
(self, self.map))
self.map = map
if self.strict_slashes is None:
self.strict_slashes = map.strict_slashes
if self.subdomain is None:
self.subdomain = map.default_subdomain
self.compile()
def get_converter(self, variable_name, converter_name, args, kwargs):
"""Looks up the converter for the given parameter.
.. versionadded:: 0.9
"""
if converter_name not in self.map.converters:
raise LookupError('the converter %r does not exist' % converter_name)
return self.map.converters[converter_name](self.map, *args, **kwargs)
def compile(self):
"""Compiles the regular expression and stores it."""
assert self.map is not None, 'rule not bound'
if self.map.host_matching:
domain_rule = self.host or ''
else:
domain_rule = self.subdomain or ''
self._trace = []
self._converters = {}
self._weights = []
regex_parts = []
def _build_regex(rule):
for converter, arguments, variable in parse_rule(rule):
if converter is None:
regex_parts.append(re.escape(variable))
self._trace.append((False, variable))
for part in variable.split('/'):
if part:
self._weights.append((0, -len(part)))
else:
if arguments:
c_args, c_kwargs = parse_converter_args(arguments)
else:
c_args = ()
c_kwargs = {}
convobj = self.get_converter(
variable, converter, c_args, c_kwargs)
regex_parts.append('(?P<%s>%s)' % (variable, convobj.regex))
self._converters[variable] = convobj
self._trace.append((True, variable))
self._weights.append((1, convobj.weight))
self.arguments.add(str(variable))
_build_regex(domain_rule)
regex_parts.append('\\|')
self._trace.append((False, '|'))
_build_regex(self.is_leaf and self.rule or self.rule.rstrip('/'))
if not self.is_leaf:
self._trace.append((False, '/'))
if self.build_only:
return
regex = r'^%s%s$' % (
u''.join(regex_parts),
(not self.is_leaf or not self.strict_slashes) and
'(?<!/)(?P<__suffix__>/?)' or ''
)
self._regex = re.compile(regex, re.UNICODE)
def match(self, path):
"""Check if the rule matches a given path. Path is a string in the
form ``"subdomain|/path(method)"`` and is assembled by the map. If
the map is doing host matching the subdomain part will be the host
instead.
If the rule matches a dict with the converted values is returned,
otherwise the return value is `None`.
:internal:
"""
if not self.build_only:
m = self._regex.search(path)
if m is not None:
groups = m.groupdict()
# we have a folder like part of the url without a trailing
# slash and strict slashes enabled. raise an exception that
# tells the map to redirect to the same url but with a
# trailing slash
if self.strict_slashes and not self.is_leaf and \
not groups.pop('__suffix__'):
raise RequestSlash()
# if we are not in strict slashes mode we have to remove
# a __suffix__
elif not self.strict_slashes:
del groups['__suffix__']
result = {}
for name, value in iteritems(groups):
try:
value = self._converters[name].to_python(value)
except ValidationError:
return
result[str(name)] = value
if self.defaults:
result.update(self.defaults)
if self.alias and self.map.redirect_defaults:
raise RequestAliasRedirect(result)
return result
def build(self, values, append_unknown=True):
"""Assembles the relative url for that rule and the subdomain.
If building doesn't work for some reasons `None` is returned.
:internal:
"""
tmp = []
add = tmp.append
processed = set(self.arguments)
for is_dynamic, data in self._trace:
if is_dynamic:
try:
add(self._converters[data].to_url(values[data]))
except ValidationError:
return
processed.add(data)
else:
add(url_quote(to_bytes(data, self.map.charset), safe='/:|+'))
domain_part, url = (u''.join(tmp)).split(u'|', 1)
if append_unknown:
query_vars = MultiDict(values)
for key in processed:
if key in query_vars:
del query_vars[key]
if query_vars:
url += u'?' + url_encode(query_vars, charset=self.map.charset,
sort=self.map.sort_parameters,
key=self.map.sort_key)
return domain_part, url
def provides_defaults_for(self, rule):
"""Check if this rule has defaults for a given rule.
:internal:
"""
return not self.build_only and self.defaults and \
self.endpoint == rule.endpoint and self != rule and \
self.arguments == rule.arguments
def suitable_for(self, values, method=None):
"""Check if the dict of values has enough data for url generation.
:internal:
"""
# if a method was given explicitly and that method is not supported
# by this rule, this rule is not suitable.
if method is not None and self.methods is not None \
and method not in self.methods:
return False
defaults = self.defaults or ()
# all arguments required must be either in the defaults dict or
# the value dictionary otherwise it's not suitable
for key in self.arguments:
if key not in defaults and key not in values:
return False
# in case defaults are given we ensure taht either the value was
# skipped or the value is the same as the default value.
if defaults:
for key, value in iteritems(defaults):
if key in values and value != values[key]:
return False
return True
def match_compare_key(self):
"""The match compare key for sorting.
Current implementation:
1. rules without any arguments come first for performance
reasons only as we expect them to match faster and some
common ones usually don't have any arguments (index pages etc.)
2. The more complex rules come first so the second argument is the
negative length of the number of weights.
3. lastly we order by the actual weights.
:internal:
"""
return bool(self.arguments), -len(self._weights), self._weights
def build_compare_key(self):
"""The build compare key for sorting.
:internal:
"""
return self.alias and 1 or 0, -len(self.arguments), \
-len(self.defaults or ())
def __eq__(self, other):
return self.__class__ is other.__class__ and \
self._trace == other._trace
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return self.rule
@native_string_result
def __repr__(self):
if self.map is None:
return u'<%s (unbound)>' % self.__class__.__name__
tmp = []
for is_dynamic, data in self._trace:
if is_dynamic:
tmp.append(u'<%s>' % data)
else:
tmp.append(data)
return u'<%s %s%s -> %s>' % (
self.__class__.__name__,
repr((u''.join(tmp)).lstrip(u'|')).lstrip(u'u'),
self.methods is not None and u' (%s)' %
u', '.join(self.methods) or u'',
self.endpoint
)
class BaseConverter(object):
"""Base class for all converters."""
regex = '[^/]+'
weight = 100
def __init__(self, map):
self.map = map
def to_python(self, value):
return value
def to_url(self, value):
return url_quote(value, charset=self.map.charset)
class UnicodeConverter(BaseConverter):
"""This converter is the default converter and accepts any string but
only one path segment. Thus the string can not include a slash.
This is the default validator.
Example::
Rule('/pages/<page>'),
Rule('/<string(length=2):lang_code>')
:param map: the :class:`Map`.
:param minlength: the minimum length of the string. Must be greater
or equal 1.
:param maxlength: the maximum length of the string.
:param length: the exact length of the string.
"""
def __init__(self, map, minlength=1, maxlength=None, length=None):
BaseConverter.__init__(self, map)
if length is not None:
length = '{%d}' % int(length)
else:
if maxlength is None:
maxlength = ''
else:
maxlength = int(maxlength)
length = '{%s,%s}' % (
int(minlength),
maxlength
)
self.regex = '[^/]' + length
class AnyConverter(BaseConverter):
"""Matches one of the items provided. Items can either be Python
identifiers or strings::
Rule('/<any(about, help, imprint, class, "foo,bar"):page_name>')
:param map: the :class:`Map`.
:param items: this function accepts the possible items as positional
arguments.
"""
def __init__(self, map, *items):
BaseConverter.__init__(self, map)
self.regex = '(?:%s)' % '|'.join([re.escape(x) for x in items])
class PathConverter(BaseConverter):
"""Like the default :class:`UnicodeConverter`, but it also matches
slashes. This is useful for wikis and similar applications::
Rule('/<path:wikipage>')
Rule('/<path:wikipage>/edit')
:param map: the :class:`Map`.
"""
regex = '[^/].*?'
weight = 200
class NumberConverter(BaseConverter):
"""Baseclass for `IntegerConverter` and `FloatConverter`.
:internal:
"""
weight = 50
def __init__(self, map, fixed_digits=0, min=None, max=None):
BaseConverter.__init__(self, map)
self.fixed_digits = fixed_digits
self.min = min
self.max = max
def to_python(self, value):
if (self.fixed_digits and len(value) != self.fixed_digits):
raise ValidationError()
value = self.num_convert(value)
if (self.min is not None and value < self.min) or \
(self.max is not None and value > self.max):
raise ValidationError()
return value
def to_url(self, value):
value = self.num_convert(value)
if self.fixed_digits:
value = ('%%0%sd' % self.fixed_digits) % value
return str(value)
class IntegerConverter(NumberConverter):
"""This converter only accepts integer values::
Rule('/page/<int:page>')
This converter does not support negative values.
:param map: the :class:`Map`.
:param fixed_digits: the number of fixed digits in the URL. If you set
this to ``4`` for example, the application will
only match if the url looks like ``/0001/``. The
default is variable length.
:param min: the minimal value.
:param max: the maximal value.
"""
regex = r'\d+'
num_convert = int
class FloatConverter(NumberConverter):
"""This converter only accepts floating point values::
Rule('/probability/<float:probability>')
This converter does not support negative values.
:param map: the :class:`Map`.
:param min: the minimal value.
:param max: the maximal value.
"""
regex = r'\d+\.\d+'
num_convert = float
def __init__(self, map, min=None, max=None):
NumberConverter.__init__(self, map, 0, min, max)
class UUIDConverter(BaseConverter):
"""This converter only accepts UUID strings::
Rule('/object/<uuid:identifier>')
.. versionadded:: 0.10
:param map: the :class:`Map`.
"""
regex = r'[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-' \
r'[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}'
def to_python(self, value):
return uuid.UUID(value)
def to_url(self, value):
return str(value)
#: the default converter mapping for the map.
DEFAULT_CONVERTERS = {
'default': UnicodeConverter,
'string': UnicodeConverter,
'any': AnyConverter,
'path': PathConverter,
'int': IntegerConverter,
'float': FloatConverter,
'uuid': UUIDConverter,
}
class Map(object):
"""The map class stores all the URL rules and some configuration
parameters. Some of the configuration values are only stored on the
`Map` instance since those affect all rules, others are just defaults
and can be overridden for each rule. Note that you have to specify all
arguments besides the `rules` as keyword arguments!
:param rules: sequence of url rules for this map.
:param default_subdomain: The default subdomain for rules without a
subdomain defined.
:param charset: charset of the url. defaults to ``"utf-8"``
:param strict_slashes: Take care of trailing slashes.
:param redirect_defaults: This will redirect to the default rule if it
wasn't visited that way. This helps creating
unique URLs.
:param converters: A dict of converters that adds additional converters
to the list of converters. If you redefine one
converter this will override the original one.
:param sort_parameters: If set to `True` the url parameters are sorted.
See `url_encode` for more details.
:param sort_key: The sort key function for `url_encode`.
:param encoding_errors: the error method to use for decoding
:param host_matching: if set to `True` it enables the host matching
feature and disables the subdomain one. If
enabled the `host` parameter to rules is used
instead of the `subdomain` one.
.. versionadded:: 0.5
`sort_parameters` and `sort_key` was added.
.. versionadded:: 0.7
`encoding_errors` and `host_matching` was added.
"""
#: .. versionadded:: 0.6
#: a dict of default converters to be used.
default_converters = ImmutableDict(DEFAULT_CONVERTERS)
def __init__(self, rules=None, default_subdomain='', charset='utf-8',
strict_slashes=True, redirect_defaults=True,
converters=None, sort_parameters=False, sort_key=None,
encoding_errors='replace', host_matching=False):
self._rules = []
self._rules_by_endpoint = {}
self._remap = True
self._remap_lock = Lock()
self.default_subdomain = default_subdomain
self.charset = charset
self.encoding_errors = encoding_errors
self.strict_slashes = strict_slashes
self.redirect_defaults = redirect_defaults
self.host_matching = host_matching
self.converters = self.default_converters.copy()
if converters:
self.converters.update(converters)
self.sort_parameters = sort_parameters
self.sort_key = sort_key
for rulefactory in rules or ():
self.add(rulefactory)
def is_endpoint_expecting(self, endpoint, *arguments):
"""Iterate over all rules and check if the endpoint expects
the arguments provided. This is for example useful if you have
some URLs that expect a language code and others that do not and
you want to wrap the builder a bit so that the current language
code is automatically added if not provided but endpoints expect
it.
:param endpoint: the endpoint to check.
:param arguments: this function accepts one or more arguments
as positional arguments. Each one of them is
checked.
"""
self.update()
arguments = set(arguments)
for rule in self._rules_by_endpoint[endpoint]:
if arguments.issubset(rule.arguments):
return True
return False
def iter_rules(self, endpoint=None):
"""Iterate over all rules or the rules of an endpoint.
:param endpoint: if provided only the rules for that endpoint
are returned.
:return: an iterator
"""
self.update()
if endpoint is not None:
return iter(self._rules_by_endpoint[endpoint])
return iter(self._rules)
def add(self, rulefactory):
"""Add a new rule or factory to the map and bind it. Requires that the
rule is not bound to another map.
:param rulefactory: a :class:`Rule` or :class:`RuleFactory`
"""
for rule in rulefactory.get_rules(self):
rule.bind(self)
self._rules.append(rule)
self._rules_by_endpoint.setdefault(rule.endpoint, []).append(rule)
self._remap = True
def bind(self, server_name, script_name=None, subdomain=None,
url_scheme='http', default_method='GET', path_info=None,
query_args=None):
"""Return a new :class:`MapAdapter` with the details specified to the
call. Note that `script_name` will default to ``'/'`` if not further
specified or `None`. The `server_name` at least is a requirement
because the HTTP RFC requires absolute URLs for redirects and so all
redirect exceptions raised by Werkzeug will contain the full canonical
URL.
If no path_info is passed to :meth:`match` it will use the default path
info passed to bind. While this doesn't really make sense for
manual bind calls, it's useful if you bind a map to a WSGI
environment which already contains the path info.
`subdomain` will default to the `default_subdomain` for this map if
no defined. If there is no `default_subdomain` you cannot use the
subdomain feature.
.. versionadded:: 0.7
`query_args` added
.. versionadded:: 0.8
`query_args` can now also be a string.
"""
server_name = server_name.lower()
if self.host_matching:
if subdomain is not None:
raise RuntimeError('host matching enabled and a '
'subdomain was provided')
elif subdomain is None:
subdomain = self.default_subdomain
if script_name is None:
script_name = '/'
server_name = _encode_idna(server_name)
return MapAdapter(self, server_name, script_name, subdomain,
url_scheme, path_info, default_method, query_args)
def bind_to_environ(self, environ, server_name=None, subdomain=None):
"""Like :meth:`bind` but you can pass it an WSGI environment and it
will fetch the information from that dictionary. Note that because of
limitations in the protocol there is no way to get the current
subdomain and real `server_name` from the environment. If you don't
provide it, Werkzeug will use `SERVER_NAME` and `SERVER_PORT` (or
`HTTP_HOST` if provided) as used `server_name` with disabled subdomain
feature.
If `subdomain` is `None` but an environment and a server name is
provided it will calculate the current subdomain automatically.
Example: `server_name` is ``'example.com'`` and the `SERVER_NAME`
in the wsgi `environ` is ``'staging.dev.example.com'`` the calculated
subdomain will be ``'staging.dev'``.
If the object passed as environ has an environ attribute, the value of
this attribute is used instead. This allows you to pass request
objects. Additionally `PATH_INFO` added as a default of the
:class:`MapAdapter` so that you don't have to pass the path info to
the match method.
.. versionchanged:: 0.5
previously this method accepted a bogus `calculate_subdomain`
parameter that did not have any effect. It was removed because
of that.
.. versionchanged:: 0.8
This will no longer raise a ValueError when an unexpected server
name was passed.
:param environ: a WSGI environment.
:param server_name: an optional server name hint (see above).
:param subdomain: optionally the current subdomain (see above).
"""
environ = _get_environ(environ)
if server_name is None:
if 'HTTP_HOST' in environ:
server_name = environ['HTTP_HOST']
else:
server_name = environ['SERVER_NAME']
if (environ['wsgi.url_scheme'], environ['SERVER_PORT']) not \
in (('https', '443'), ('http', '80')):
server_name += ':' + environ['SERVER_PORT']
elif subdomain is None and not self.host_matching:
server_name = server_name.lower()
if 'HTTP_HOST' in environ:
wsgi_server_name = environ.get('HTTP_HOST')
else:
wsgi_server_name = environ.get('SERVER_NAME')
if (environ['wsgi.url_scheme'], environ['SERVER_PORT']) not \
in (('https', '443'), ('http', '80')):
wsgi_server_name += ':' + environ['SERVER_PORT']
wsgi_server_name = wsgi_server_name.lower()
cur_server_name = wsgi_server_name.split('.')
real_server_name = server_name.split('.')
offset = -len(real_server_name)
if cur_server_name[offset:] != real_server_name:
# This can happen even with valid configs if the server was
# accesssed directly by IP address under some situations.
# Instead of raising an exception like in Werkzeug 0.7 or
# earlier we go by an invalid subdomain which will result
# in a 404 error on matching.
subdomain = '<invalid>'
else:
subdomain = '.'.join(filter(None, cur_server_name[:offset]))
def _get_wsgi_string(name):
val = environ.get(name)
if val is not None:
return wsgi_decoding_dance(val, self.charset)
script_name = _get_wsgi_string('SCRIPT_NAME')
path_info = _get_wsgi_string('PATH_INFO')
query_args = _get_wsgi_string('QUERY_STRING')
return Map.bind(self, server_name, script_name,
subdomain, environ['wsgi.url_scheme'],
environ['REQUEST_METHOD'], path_info,
query_args=query_args)
def update(self):
"""Called before matching and building to keep the compiled rules
in the correct order after things changed.
"""
if not self._remap:
return
with self._remap_lock:
if not self._remap:
return
self._rules.sort(key=lambda x: x.match_compare_key())
for rules in itervalues(self._rules_by_endpoint):
rules.sort(key=lambda x: x.build_compare_key())
self._remap = False
def __repr__(self):
rules = self.iter_rules()
return '%s(%s)' % (self.__class__.__name__, pformat(list(rules)))
class MapAdapter(object):
"""Returned by :meth:`Map.bind` or :meth:`Map.bind_to_environ` and does
the URL matching and building based on runtime information.
"""
def __init__(self, map, server_name, script_name, subdomain,
url_scheme, path_info, default_method, query_args=None):
self.map = map
self.server_name = to_unicode(server_name)
script_name = to_unicode(script_name)
if not script_name.endswith(u'/'):
script_name += u'/'
self.script_name = script_name
self.subdomain = to_unicode(subdomain)
self.url_scheme = to_unicode(url_scheme)
self.path_info = to_unicode(path_info)
self.default_method = to_unicode(default_method)
self.query_args = query_args
def dispatch(self, view_func, path_info=None, method=None,
catch_http_exceptions=False):
"""Does the complete dispatching process. `view_func` is called with
the endpoint and a dict with the values for the view. It should
look up the view function, call it, and return a response object
or WSGI application. http exceptions are not caught by default
so that applications can display nicer error messages by just
catching them by hand. If you want to stick with the default
error messages you can pass it ``catch_http_exceptions=True`` and
it will catch the http exceptions.
Here a small example for the dispatch usage::
from werkzeug.wrappers import Request, Response
from werkzeug.wsgi import responder
from werkzeug.routing import Map, Rule
def on_index(request):
return Response('Hello from the index')
url_map = Map([Rule('/', endpoint='index')])
views = {'index': on_index}
@responder
def application(environ, start_response):
request = Request(environ)
urls = url_map.bind_to_environ(environ)
return urls.dispatch(lambda e, v: views[e](request, **v),
catch_http_exceptions=True)
Keep in mind that this method might return exception objects, too, so
use :class:`Response.force_type` to get a response object.
:param view_func: a function that is called with the endpoint as
first argument and the value dict as second. Has
to dispatch to the actual view function with this
information. (see above)
:param path_info: the path info to use for matching. Overrides the
path info specified on binding.
:param method: the HTTP method used for matching. Overrides the
method specified on binding.
:param catch_http_exceptions: set to `True` to catch any of the
werkzeug :class:`HTTPException`\s.
"""
try:
try:
endpoint, args = self.match(path_info, method)
except RequestRedirect as e:
return e
return view_func(endpoint, args)
except HTTPException as e:
if catch_http_exceptions:
return e
raise
def match(self, path_info=None, method=None, return_rule=False,
query_args=None):
"""The usage is simple: you just pass the match method the current
path info as well as the method (which defaults to `GET`). The
following things can then happen:
- you receive a `NotFound` exception that indicates that no URL is
matching. A `NotFound` exception is also a WSGI application you
can call to get a default page not found page (happens to be the
same object as `werkzeug.exceptions.NotFound`)
- you receive a `MethodNotAllowed` exception that indicates that there
is a match for this URL but not for the current request method.
This is useful for RESTful applications.
- you receive a `RequestRedirect` exception with a `new_url`
attribute. This exception is used to notify you about a request
Werkzeug requests from your WSGI application. This is for example the
case if you request ``/foo`` although the correct URL is ``/foo/``
You can use the `RequestRedirect` instance as response-like object
similar to all other subclasses of `HTTPException`.
- you get a tuple in the form ``(endpoint, arguments)`` if there is
a match (unless `return_rule` is True, in which case you get a tuple
in the form ``(rule, arguments)``)
If the path info is not passed to the match method the default path
info of the map is used (defaults to the root URL if not defined
explicitly).
All of the exceptions raised are subclasses of `HTTPException` so they
can be used as WSGI responses. The will all render generic error or
redirect pages.
Here is a small example for matching:
>>> m = Map([
... Rule('/', endpoint='index'),
... Rule('/downloads/', endpoint='downloads/index'),
... Rule('/downloads/<int:id>', endpoint='downloads/show')
... ])
>>> urls = m.bind("example.com", "/")
>>> urls.match("/", "GET")
('index', {})
>>> urls.match("/downloads/42")
('downloads/show', {'id': 42})
And here is what happens on redirect and missing URLs:
>>> urls.match("/downloads")
Traceback (most recent call last):
...
RequestRedirect: http://example.com/downloads/
>>> urls.match("/missing")
Traceback (most recent call last):
...
NotFound: 404 Not Found
:param path_info: the path info to use for matching. Overrides the
path info specified on binding.
:param method: the HTTP method used for matching. Overrides the
method specified on binding.
:param return_rule: return the rule that matched instead of just the
endpoint (defaults to `False`).
:param query_args: optional query arguments that are used for
automatic redirects as string or dictionary. It's
currently not possible to use the query arguments
for URL matching.
.. versionadded:: 0.6
`return_rule` was added.
.. versionadded:: 0.7
`query_args` was added.
.. versionchanged:: 0.8
`query_args` can now also be a string.
"""
self.map.update()
if path_info is None:
path_info = self.path_info
else:
path_info = to_unicode(path_info, self.map.charset)
if query_args is None:
query_args = self.query_args
method = (method or self.default_method).upper()
path = u'%s|%s' % (
self.map.host_matching and self.server_name or self.subdomain,
path_info and '/%s' % path_info.lstrip('/')
)
have_match_for = set()
for rule in self.map._rules:
try:
rv = rule.match(path)
except RequestSlash:
raise RequestRedirect(self.make_redirect_url(
url_quote(path_info, self.map.charset,
safe='/:|+') + '/', query_args))
except RequestAliasRedirect as e:
raise RequestRedirect(self.make_alias_redirect_url(
path, rule.endpoint, e.matched_values, method, query_args))
if rv is None:
continue
if rule.methods is not None and method not in rule.methods:
have_match_for.update(rule.methods)
continue
if self.map.redirect_defaults:
redirect_url = self.get_default_redirect(rule, method, rv,
query_args)
if redirect_url is not None:
raise RequestRedirect(redirect_url)
if rule.redirect_to is not None:
if isinstance(rule.redirect_to, string_types):
def _handle_match(match):
value = rv[match.group(1)]
return rule._converters[match.group(1)].to_url(value)
redirect_url = _simple_rule_re.sub(_handle_match,
rule.redirect_to)
else:
redirect_url = rule.redirect_to(self, **rv)
raise RequestRedirect(str(url_join('%s://%s%s%s' % (
self.url_scheme or 'http',
self.subdomain and self.subdomain + '.' or '',
self.server_name,
self.script_name
), redirect_url)))
if return_rule:
return rule, rv
else:
return rule.endpoint, rv
if have_match_for:
raise MethodNotAllowed(valid_methods=list(have_match_for))
raise NotFound()
def test(self, path_info=None, method=None):
"""Test if a rule would match. Works like `match` but returns `True`
if the URL matches, or `False` if it does not exist.
:param path_info: the path info to use for matching. Overrides the
path info specified on binding.
:param method: the HTTP method used for matching. Overrides the
method specified on binding.
"""
try:
self.match(path_info, method)
except RequestRedirect:
pass
except HTTPException:
return False
return True
def allowed_methods(self, path_info=None):
"""Returns the valid methods that match for a given path.
.. versionadded:: 0.7
"""
try:
self.match(path_info, method='--')
except MethodNotAllowed as e:
return e.valid_methods
except HTTPException as e:
pass
return []
def get_host(self, domain_part):
"""Figures out the full host name for the given domain part. The
domain part is a subdomain in case host matching is disabled or
a full host name.
"""
if self.map.host_matching:
if domain_part is None:
return self.server_name
return to_unicode(domain_part, 'ascii')
subdomain = domain_part
if subdomain is None:
subdomain = self.subdomain
else:
subdomain = to_unicode(subdomain, 'ascii')
return (subdomain and subdomain + u'.' or u'') + self.server_name
def get_default_redirect(self, rule, method, values, query_args):
"""A helper that returns the URL to redirect to if it finds one.
This is used for default redirecting only.
:internal:
"""
assert self.map.redirect_defaults
for r in self.map._rules_by_endpoint[rule.endpoint]:
# every rule that comes after this one, including ourself
# has a lower priority for the defaults. We order the ones
# with the highest priority up for building.
if r is rule:
break
if r.provides_defaults_for(rule) and \
r.suitable_for(values, method):
values.update(r.defaults)
domain_part, path = r.build(values)
return self.make_redirect_url(
path, query_args, domain_part=domain_part)
def encode_query_args(self, query_args):
if not isinstance(query_args, string_types):
query_args = url_encode(query_args, self.map.charset)
return query_args
def make_redirect_url(self, path_info, query_args=None, domain_part=None):
"""Creates a redirect URL.
:internal:
"""
suffix = ''
if query_args:
suffix = '?' + self.encode_query_args(query_args)
return str('%s://%s/%s%s' % (
self.url_scheme or 'http',
self.get_host(domain_part),
posixpath.join(self.script_name[:-1].lstrip('/'),
path_info.lstrip('/')),
suffix
))
def make_alias_redirect_url(self, path, endpoint, values, method, query_args):
"""Internally called to make an alias redirect URL."""
url = self.build(endpoint, values, method, append_unknown=False,
force_external=True)
if query_args:
url += '?' + self.encode_query_args(query_args)
assert url != path, 'detected invalid alias setting. No canonical ' \
'URL found'
return url
def _partial_build(self, endpoint, values, method, append_unknown):
"""Helper for :meth:`build`. Returns subdomain and path for the
rule that accepts this endpoint, values and method.
:internal:
"""
# in case the method is none, try with the default method first
if method is None:
rv = self._partial_build(endpoint, values, self.default_method,
append_unknown)
if rv is not None:
return rv
# default method did not match or a specific method is passed,
# check all and go with first result.
for rule in self.map._rules_by_endpoint.get(endpoint, ()):
if rule.suitable_for(values, method):
rv = rule.build(values, append_unknown)
if rv is not None:
return rv
def build(self, endpoint, values=None, method=None, force_external=False,
append_unknown=True):
"""Building URLs works pretty much the other way round. Instead of
`match` you call `build` and pass it the endpoint and a dict of
arguments for the placeholders.
The `build` function also accepts an argument called `force_external`
which, if you set it to `True` will force external URLs. Per default
external URLs (include the server name) will only be used if the
target URL is on a different subdomain.
>>> m = Map([
... Rule('/', endpoint='index'),
... Rule('/downloads/', endpoint='downloads/index'),
... Rule('/downloads/<int:id>', endpoint='downloads/show')
... ])
>>> urls = m.bind("example.com", "/")
>>> urls.build("index", {})
'/'
>>> urls.build("downloads/show", {'id': 42})
'/downloads/42'
>>> urls.build("downloads/show", {'id': 42}, force_external=True)
'http://example.com/downloads/42'
Because URLs cannot contain non ASCII data you will always get
bytestrings back. Non ASCII characters are urlencoded with the
charset defined on the map instance.
Additional values are converted to unicode and appended to the URL as
URL querystring parameters:
>>> urls.build("index", {'q': 'My Searchstring'})
'/?q=My+Searchstring'
If a rule does not exist when building a `BuildError` exception is
raised.
The build method accepts an argument called `method` which allows you
to specify the method you want to have an URL built for if you have
different methods for the same endpoint specified.
.. versionadded:: 0.6
the `append_unknown` parameter was added.
:param endpoint: the endpoint of the URL to build.
:param values: the values for the URL to build. Unhandled values are
appended to the URL as query parameters.
:param method: the HTTP method for the rule if there are different
URLs for different methods on the same endpoint.
:param force_external: enforce full canonical external URLs. If the URL
scheme is not provided, this will generate
a protocol-relative URL.
:param append_unknown: unknown parameters are appended to the generated
URL as query string argument. Disable this
if you want the builder to ignore those.
"""
self.map.update()
if values:
if not isinstance(values, MultiDict):
values = MultiDict(values)
valueiter = iteritems(values, multi=True)
values = MultiDict((k, v) for k, v in valueiter if v is not None)
else:
values = {}
rv = self._partial_build(endpoint, values, method, append_unknown)
if rv is None:
raise BuildError(endpoint, values, method)
domain_part, path = rv
host = self.get_host(domain_part)
# shortcut this.
if not force_external and (
(self.map.host_matching and host == self.server_name) or
(not self.map.host_matching and domain_part == self.subdomain)):
return str(url_join(self.script_name, './' + path.lstrip('/')))
return str('%s//%s%s/%s' % (
self.url_scheme + ':' if self.url_scheme else '',
host,
self.script_name[:-1],
path.lstrip('/')
))
|
vathpela/blivet | refs/heads/master | blivet/formats/multipath.py | 5 | # multipath.py
# multipath device formats
#
# Copyright (C) 2009 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Any Red Hat trademarks that are incorporated in the source code or
# documentation are not subject to the GNU General Public License and
# may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Peter Jones <pjones@redhat.com>
#
from ..storage_log import log_method_call
from ..errors import MultipathMemberError
from ..i18n import N_
from . import DeviceFormat, register_device_format
import logging
log = logging.getLogger("blivet")
class MultipathMember(DeviceFormat):
""" A multipath member disk. """
_type = "multipath_member"
_name = N_("multipath member device")
_udev_types = ["mpath_member"]
_supported = True # is supported
_packages = ["device-mapper-multipath"] # required packages
_hidden = True # hide devices with this formatting?
def __init__(self, **kwargs):
"""
:keyword device: path to the underlying device (required)
:keyword uuid: this format's UUID
:keyword exists: whether this is an existing format
:type exists: bool
"""
log_method_call(self, **kwargs)
DeviceFormat.__init__(self, **kwargs)
# Initialize the attribute that will hold the block object.
self._member = None
def __repr__(self):
s = DeviceFormat.__repr__(self)
s += (" member = %(member)r" % {"member": self.member})
return s
def _get_member(self):
return self._member
def _set_member(self, member):
self._member = member
member = property(lambda s: s._get_member(),
lambda s, m: s._set_member(m))
def create(self, **kwargs):
log_method_call(self, device=self.device,
type=self.type, status=self.status)
raise MultipathMemberError("creation of multipath members is non-sense")
def destroy(self, **kwargs):
log_method_call(self, device=self.device,
type=self.type, status=self.status)
raise MultipathMemberError("destruction of multipath members is non-sense")
register_device_format(MultipathMember)
|
prarthitm/edxplatform | refs/heads/master | common/djangoapps/microsite_configuration/tests/test_admin.py | 46 | """
Tests for microsite admin
"""
from django.contrib.admin.sites import AdminSite
from django.http import HttpRequest
from microsite_configuration.admin import MicrositeAdmin
from microsite_configuration.models import Microsite
from microsite_configuration.tests.tests import DatabaseMicrositeTestCase
class MicrositeAdminTests(DatabaseMicrositeTestCase):
"""
Test class for MicrositeAdmin
"""
def setUp(self):
super(MicrositeAdminTests, self).setUp()
self.adminsite = AdminSite()
self.microsite_admin = MicrositeAdmin(Microsite, self.adminsite)
self.request = HttpRequest()
def test_fields_in_admin_form(self):
"""
Tests presence of form fields for Microsite.
"""
microsite_form = self.microsite_admin.get_form(self.request, self.microsite)
self.assertEqual(
list(microsite_form.base_fields),
["site", "key", "values"]
)
def test_save_action_admin_form(self):
"""
Tests save action for Microsite model form.
"""
new_values = {
"domain_prefix": "test-site-new",
"platform_name": "Test Site New"
}
microsite_form = self.microsite_admin.get_form(self.request)(instance=self.microsite, data={
"key": self.microsite.key,
"site": self.microsite.site.id,
"values": new_values,
})
self.assertTrue(microsite_form.is_valid())
microsite_form.save()
new_microsite = Microsite.objects.get(key=self.microsite.key)
self.assertEqual(new_microsite.values, new_values)
|
ADemonisis/mitmproxy | refs/heads/master | libmproxy/contrib/jsbeautifier/unpackers/urlencode.py | 221 | #
# Trivial bookmarklet/escaped script detector for the javascript beautifier
# written by Einar Lielmanis <einar@jsbeautifier.org>
# rewritten in Python by Stefano Sanfilippo <a.little.coder@gmail.com>
#
# Will always return valid javascript: if `detect()` is false, `code` is
# returned, unmodified.
#
# usage:
#
# some_string = urlencode.unpack(some_string)
#
"""Bookmarklet/escaped script unpacker."""
# Python 2 retrocompatibility
# pylint: disable=F0401
# pylint: disable=E0611
try:
from urllib import unquote_plus
except ImportError:
from urllib.parse import unquote_plus
PRIORITY = 0
def detect(code):
"""Detects if a scriptlet is urlencoded."""
# the fact that script doesn't contain any space, but has %20 instead
# should be sufficient check for now.
return ' ' not in code and ('%20' in code or code.count('%') > 3)
def unpack(code):
"""URL decode `code` source string."""
return unquote_plus(code) if detect(code) else code
|
brianwoo/django-tutorial | refs/heads/master | ENV/lib/python2.7/site-packages/django/utils/decorators.py | 118 | "Functions that help with dynamically creating decorators for views."
try:
from contextlib import ContextDecorator
except ImportError:
ContextDecorator = None
from functools import WRAPPER_ASSIGNMENTS, update_wrapper, wraps
from django.utils import six
class classonlymethod(classmethod):
def __get__(self, instance, owner):
if instance is not None:
raise AttributeError("This method is available only on the class, not on instances.")
return super(classonlymethod, self).__get__(instance, owner)
def method_decorator(decorator):
"""
Converts a function decorator into a method decorator
"""
# 'func' is a function at the time it is passed to _dec, but will eventually
# be a method of the class it is defined on.
def _dec(func):
def _wrapper(self, *args, **kwargs):
@decorator
def bound_func(*args2, **kwargs2):
return func.__get__(self, type(self))(*args2, **kwargs2)
# bound_func has the signature that 'decorator' expects i.e. no
# 'self' argument, but it is a closure over self so it can call
# 'func' correctly.
return bound_func(*args, **kwargs)
# In case 'decorator' adds attributes to the function it decorates, we
# want to copy those. We don't have access to bound_func in this scope,
# but we can cheat by using it on a dummy function.
@decorator
def dummy(*args, **kwargs):
pass
update_wrapper(_wrapper, dummy)
# Need to preserve any existing attributes of 'func', including the name.
update_wrapper(_wrapper, func)
return _wrapper
update_wrapper(_dec, decorator, assigned=available_attrs(decorator))
# Change the name to aid debugging.
if hasattr(decorator, '__name__'):
_dec.__name__ = 'method_decorator(%s)' % decorator.__name__
else:
_dec.__name__ = 'method_decorator(%s)' % decorator.__class__.__name__
return _dec
def decorator_from_middleware_with_args(middleware_class):
"""
Like decorator_from_middleware, but returns a function
that accepts the arguments to be passed to the middleware_class.
Use like::
cache_page = decorator_from_middleware_with_args(CacheMiddleware)
# ...
@cache_page(3600)
def my_view(request):
# ...
"""
return make_middleware_decorator(middleware_class)
def decorator_from_middleware(middleware_class):
"""
Given a middleware class (not an instance), returns a view decorator. This
lets you use middleware functionality on a per-view basis. The middleware
is created with no params passed.
"""
return make_middleware_decorator(middleware_class)()
def available_attrs(fn):
"""
Return the list of functools-wrappable attributes on a callable.
This is required as a workaround for http://bugs.python.org/issue3445
under Python 2.
"""
if six.PY3:
return WRAPPER_ASSIGNMENTS
else:
return tuple(a for a in WRAPPER_ASSIGNMENTS if hasattr(fn, a))
def make_middleware_decorator(middleware_class):
def _make_decorator(*m_args, **m_kwargs):
middleware = middleware_class(*m_args, **m_kwargs)
def _decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
if hasattr(middleware, 'process_request'):
result = middleware.process_request(request)
if result is not None:
return result
if hasattr(middleware, 'process_view'):
result = middleware.process_view(request, view_func, args, kwargs)
if result is not None:
return result
try:
response = view_func(request, *args, **kwargs)
except Exception as e:
if hasattr(middleware, 'process_exception'):
result = middleware.process_exception(request, e)
if result is not None:
return result
raise
if hasattr(response, 'render') and callable(response.render):
if hasattr(middleware, 'process_template_response'):
response = middleware.process_template_response(request, response)
# Defer running of process_response until after the template
# has been rendered:
if hasattr(middleware, 'process_response'):
callback = lambda response: middleware.process_response(request, response)
response.add_post_render_callback(callback)
else:
if hasattr(middleware, 'process_response'):
return middleware.process_response(request, response)
return response
return _wrapped_view
return _decorator
return _make_decorator
if ContextDecorator is None:
# ContextDecorator was introduced in Python 3.2
# See https://docs.python.org/3/library/contextlib.html#contextlib.ContextDecorator
class ContextDecorator(object):
"""
A base class that enables a context manager to also be used as a decorator.
"""
def __call__(self, func):
@wraps(func, assigned=available_attrs(func))
def inner(*args, **kwargs):
with self:
return func(*args, **kwargs)
return inner
|
townbull/keystone-dtrust | refs/heads/dev/domain-trusts | keystone/tests/test_sql_upgrade.py | 2 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
To run these tests against a live database:
1. Modify the file `keystone/tests/backend_sql.conf` to use the connection for
your live database
2. Set up a blank, live database.
3. run the tests using
./run_tests.sh -N test_sql_upgrade
WARNING::
Your database will be wiped.
Do not do this against a Database with valuable data as
all data will be lost.
"""
import copy
import json
import uuid
from migrate.versioning import api as versioning_api
import sqlalchemy
from keystone.common import sql
from keystone.common.sql import migration
from keystone.common import utils
from keystone import config
from keystone import credential
from keystone import exception
from keystone import tests
from keystone.tests import default_fixtures
CONF = config.CONF
DEFAULT_DOMAIN_ID = CONF.identity.default_domain_id
class SqlMigrateBase(tests.TestCase):
def initialize_sql(self):
self.metadata = sqlalchemy.MetaData()
self.metadata.bind = self.engine
_config_file_list = [tests.etcdir('keystone.conf.sample'),
tests.testsdir('test_overrides.conf'),
tests.testsdir('backend_sql.conf')]
#override this to specify the complete list of configuration files
def config_files(self):
return self._config_file_list
def repo_package(self):
return None
def setUp(self):
super(SqlMigrateBase, self).setUp()
self.config(self.config_files())
self.base = sql.Base()
# create and share a single sqlalchemy engine for testing
self.engine = self.base.get_engine(allow_global_engine=False)
sql.core.set_global_engine(self.engine)
self.Session = self.base.get_sessionmaker(engine=self.engine,
autocommit=False)
self.initialize_sql()
self.repo_path = migration.find_migrate_repo(self.repo_package())
self.schema = versioning_api.ControlledSchema.create(
self.engine,
self.repo_path, 0)
# auto-detect the highest available schema version in the migrate_repo
self.max_version = self.schema.repository.version().version
def tearDown(self):
sqlalchemy.orm.session.Session.close_all()
table = sqlalchemy.Table("migrate_version", self.metadata,
autoload=True)
self.downgrade(0)
table.drop(self.engine, checkfirst=True)
sql.core.set_global_engine(None)
super(SqlMigrateBase, self).tearDown()
def select_table(self, name):
table = sqlalchemy.Table(name,
self.metadata,
autoload=True)
s = sqlalchemy.select([table])
return s
def assertTableExists(self, table_name):
try:
self.select_table(table_name)
except sqlalchemy.exc.NoSuchTableError:
raise AssertionError('Table "%s" does not exist' % table_name)
def assertTableDoesNotExist(self, table_name):
"""Asserts that a given table exists cannot be selected by name."""
# Switch to a different metadata otherwise you might still
# detect renamed or dropped tables
try:
temp_metadata = sqlalchemy.MetaData()
temp_metadata.bind = self.engine
sqlalchemy.Table(table_name, temp_metadata, autoload=True)
except sqlalchemy.exc.NoSuchTableError:
pass
else:
raise AssertionError('Table "%s" already exists' % table_name)
def upgrade(self, *args, **kwargs):
self._migrate(*args, **kwargs)
def downgrade(self, *args, **kwargs):
self._migrate(*args, downgrade=True, **kwargs)
def _migrate(self, version, repository=None, downgrade=False,
current_schema=None):
repository = repository or self.repo_path
err = ''
version = versioning_api._migrate_version(self.schema,
version,
not downgrade,
err)
if not current_schema:
current_schema = self.schema
changeset = current_schema.changeset(version)
for ver, change in changeset:
self.schema.runchange(ver, change, changeset.step)
self.assertEqual(self.schema.version, version)
def assertTableColumns(self, table_name, expected_cols):
"""Asserts that the table contains the expected set of columns."""
self.initialize_sql()
table = self.select_table(table_name)
actual_cols = [col.name for col in table.columns]
self.assertEqual(expected_cols, actual_cols, '%s table' % table_name)
class SqlUpgradeTests(SqlMigrateBase):
def test_blank_db_to_start(self):
self.assertTableDoesNotExist('user')
def test_start_version_0(self):
version = migration.db_version()
self.assertEqual(version, 0, "DB is at version 0")
def test_two_steps_forward_one_step_back(self):
"""You should be able to cleanly undo and re-apply all upgrades.
Upgrades are run in the following order::
0 -> 1 -> 0 -> 1 -> 2 -> 1 -> 2 -> 3 -> 2 -> 3 ...
^---------^ ^---------^ ^---------^
"""
for x in range(1, self.max_version + 1):
self.upgrade(x)
self.downgrade(x - 1)
self.upgrade(x)
def test_upgrade_add_initial_tables(self):
self.upgrade(1)
self.assertTableColumns("user", ["id", "name", "extra"])
self.assertTableColumns("tenant", ["id", "name", "extra"])
self.assertTableColumns("role", ["id", "name"])
self.assertTableColumns("user_tenant_membership",
["user_id", "tenant_id"])
self.assertTableColumns("metadata", ["user_id", "tenant_id", "data"])
self.populate_user_table()
def test_upgrade_add_policy(self):
self.upgrade(5)
self.assertTableDoesNotExist('policy')
self.upgrade(6)
self.assertTableExists('policy')
self.assertTableColumns('policy', ['id', 'type', 'blob', 'extra'])
def test_upgrade_normalize_identity(self):
self.upgrade(8)
self.populate_user_table()
self.populate_tenant_table()
self.upgrade(10)
self.assertTableColumns("user",
["id", "name", "extra",
"password", "enabled"])
self.assertTableColumns("tenant",
["id", "name", "extra", "description",
"enabled"])
self.assertTableColumns("role", ["id", "name", "extra"])
self.assertTableColumns("user_tenant_membership",
["user_id", "tenant_id"])
self.assertTableColumns("metadata", ["user_id", "tenant_id", "data"])
session = self.Session()
user_table = sqlalchemy.Table("user",
self.metadata,
autoload=True)
a_user = session.query(user_table).filter("id='foo'").one()
self.assertTrue(a_user.enabled)
a_user = session.query(user_table).filter("id='badguy'").one()
self.assertFalse(a_user.enabled)
tenant_table = sqlalchemy.Table("tenant",
self.metadata,
autoload=True)
a_tenant = session.query(tenant_table).filter("id='baz'").one()
self.assertEqual(a_tenant.description, 'description')
session.commit()
session.close()
def test_upgrade_user_tenant_membership_to_metadata(self):
self.upgrade(16)
self.assertTableColumns(
'user_project_membership',
['user_id', 'tenant_id'])
user = {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': 'default',
'extra': json.dumps({}),
}
project = {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': 'default',
'extra': json.dumps({}),
}
metadata = {
'user_id': user['id'],
'tenant_id': project['id'],
}
session = self.Session()
self.insert_dict(session, 'user', user)
self.insert_dict(session, 'project', project)
self.insert_dict(session, 'user_project_membership', metadata)
self.upgrade(17)
user_project_metadata_table = sqlalchemy.Table(
'user_project_metadata', self.metadata, autoload=True)
result = session.query(user_project_metadata_table).one()
self.assertEqual(result.user_id, user['id'])
self.assertEqual(result.project_id, project['id'])
self.assertEqual(
json.loads(result.data),
{'roles': [CONF.member_role_id]})
def test_normalized_enabled_states(self):
self.upgrade(8)
users = {
'bool_enabled_user': {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex,
'extra': json.dumps({'enabled': True})},
'bool_disabled_user': {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex,
'extra': json.dumps({'enabled': False})},
'str_enabled_user': {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex,
'extra': json.dumps({'enabled': 'True'})},
'str_disabled_user': {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex,
'extra': json.dumps({'enabled': 'False'})},
'int_enabled_user': {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex,
'extra': json.dumps({'enabled': 1})},
'int_disabled_user': {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex,
'extra': json.dumps({'enabled': 0})},
'null_enabled_user': {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex,
'extra': json.dumps({'enabled': None})},
'unset_enabled_user': {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex,
'extra': json.dumps({})}}
session = self.Session()
for user in users.values():
self.insert_dict(session, 'user', user)
session.commit()
self.upgrade(10)
user_table = sqlalchemy.Table('user', self.metadata, autoload=True)
q = session.query(user_table, 'enabled')
user = q.filter_by(id=users['bool_enabled_user']['id']).one()
self.assertTrue(user.enabled)
user = q.filter_by(id=users['bool_disabled_user']['id']).one()
self.assertFalse(user.enabled)
user = q.filter_by(id=users['str_enabled_user']['id']).one()
self.assertTrue(user.enabled)
user = q.filter_by(id=users['str_disabled_user']['id']).one()
self.assertFalse(user.enabled)
user = q.filter_by(id=users['int_enabled_user']['id']).one()
self.assertTrue(user.enabled)
user = q.filter_by(id=users['int_disabled_user']['id']).one()
self.assertFalse(user.enabled)
user = q.filter_by(id=users['null_enabled_user']['id']).one()
self.assertTrue(user.enabled)
user = q.filter_by(id=users['unset_enabled_user']['id']).one()
self.assertTrue(user.enabled)
def test_downgrade_10_to_8(self):
self.upgrade(10)
self.populate_user_table(with_pass_enab=True)
self.populate_tenant_table(with_desc_enab=True)
self.downgrade(8)
self.assertTableColumns('user',
['id', 'name', 'extra'])
self.assertTableColumns('tenant',
['id', 'name', 'extra'])
session = self.Session()
user_table = sqlalchemy.Table("user",
self.metadata,
autoload=True)
a_user = session.query(user_table).filter("id='badguy'").one()
self.assertEqual(a_user.name, default_fixtures.USERS[2]['name'])
tenant_table = sqlalchemy.Table("tenant",
self.metadata,
autoload=True)
a_tenant = session.query(tenant_table).filter("id='baz'").one()
self.assertEqual(a_tenant.name, default_fixtures.TENANTS[1]['name'])
session.commit()
session.close()
def test_upgrade_endpoints(self):
self.upgrade(10)
service_extra = {
'name': uuid.uuid4().hex,
}
service = {
'id': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
'extra': json.dumps(service_extra),
}
endpoint_extra = {
'publicurl': uuid.uuid4().hex,
'internalurl': uuid.uuid4().hex,
'adminurl': uuid.uuid4().hex,
}
endpoint = {
'id': uuid.uuid4().hex,
'region': uuid.uuid4().hex,
'service_id': service['id'],
'extra': json.dumps(endpoint_extra),
}
session = self.Session()
self.insert_dict(session, 'service', service)
self.insert_dict(session, 'endpoint', endpoint)
session.commit()
session.close()
self.upgrade(13)
self.assertTableColumns(
'service',
['id', 'type', 'extra'])
self.assertTableColumns(
'endpoint',
['id', 'legacy_endpoint_id', 'interface', 'region', 'service_id',
'url', 'extra'])
endpoint_table = sqlalchemy.Table(
'endpoint', self.metadata, autoload=True)
session = self.Session()
self.assertEqual(session.query(endpoint_table).count(), 3)
for interface in ['public', 'internal', 'admin']:
q = session.query(endpoint_table)
q = q.filter_by(legacy_endpoint_id=endpoint['id'])
q = q.filter_by(interface=interface)
ref = q.one()
self.assertNotEqual(ref.id, endpoint['id'])
self.assertEqual(ref.legacy_endpoint_id, endpoint['id'])
self.assertEqual(ref.interface, interface)
self.assertEqual(ref.region, endpoint['region'])
self.assertEqual(ref.service_id, endpoint['service_id'])
self.assertEqual(ref.url, endpoint_extra['%surl' % interface])
self.assertEqual(ref.extra, '{}')
session.commit()
session.close()
def assertTenantTables(self):
self.assertTableExists('tenant')
self.assertTableExists('user_tenant_membership')
self.assertTableDoesNotExist('project')
self.assertTableDoesNotExist('user_project_membership')
def assertProjectTables(self):
self.assertTableExists('project')
self.assertTableExists('user_project_membership')
self.assertTableDoesNotExist('tenant')
self.assertTableDoesNotExist('user_tenant_membership')
def test_upgrade_tenant_to_project(self):
self.upgrade(14)
self.assertTenantTables()
self.upgrade(15)
self.assertProjectTables()
def test_downgrade_project_to_tenant(self):
# TODO(henry-nash): Debug why we need to re-load the tenant
# or user_tenant_membership ahead of upgrading to project
# in order for the assertProjectTables to work on sqlite
# (MySQL is fine without it)
self.upgrade(14)
self.assertTenantTables()
self.upgrade(15)
self.assertProjectTables()
self.downgrade(14)
self.assertTenantTables()
def test_upgrade_add_group_tables(self):
self.upgrade(13)
self.upgrade(14)
self.assertTableExists('group')
self.assertTableExists('group_project_metadata')
self.assertTableExists('group_domain_metadata')
self.assertTableExists('user_group_membership')
def test_upgrade_14_to_16(self):
self.upgrade(14)
self.populate_user_table(with_pass_enab=True)
self.populate_tenant_table(with_desc_enab=True)
self.upgrade(16)
self.assertTableColumns("user",
["id", "name", "extra",
"password", "enabled", "domain_id"])
session = self.Session()
user_table = sqlalchemy.Table("user",
self.metadata,
autoload=True)
a_user = session.query(user_table).filter("id='foo'").one()
self.assertTrue(a_user.enabled)
self.assertEqual(a_user.domain_id, DEFAULT_DOMAIN_ID)
a_user = session.query(user_table).filter("id='badguy'").one()
self.assertEqual(a_user.name, default_fixtures.USERS[2]['name'])
self.assertEqual(a_user.domain_id, DEFAULT_DOMAIN_ID)
project_table = sqlalchemy.Table("project",
self.metadata,
autoload=True)
a_project = session.query(project_table).filter("id='baz'").one()
self.assertEqual(a_project.description,
default_fixtures.TENANTS[1]['description'])
self.assertEqual(a_project.domain_id, DEFAULT_DOMAIN_ID)
session.commit()
session.close()
self.check_uniqueness_constraints()
def test_downgrade_16_to_14(self):
self.upgrade(16)
self.populate_user_table(with_pass_enab_domain=True)
self.populate_tenant_table(with_desc_enab_domain=True)
self.downgrade(14)
self.assertTableColumns("user",
["id", "name", "extra",
"password", "enabled"])
session = self.Session()
user_table = sqlalchemy.Table("user",
self.metadata,
autoload=True)
a_user = session.query(user_table).filter("id='foo'").one()
self.assertTrue(a_user.enabled)
a_user = session.query(user_table).filter("id='badguy'").one()
self.assertEqual(a_user.name, default_fixtures.USERS[2]['name'])
tenant_table = sqlalchemy.Table("tenant",
self.metadata,
autoload=True)
a_tenant = session.query(tenant_table).filter("id='baz'").one()
self.assertEqual(a_tenant.description,
default_fixtures.TENANTS[1]['description'])
session.commit()
session.close()
def test_downgrade_remove_group_tables(self):
self.upgrade(14)
self.downgrade(13)
self.assertTableDoesNotExist('group')
self.assertTableDoesNotExist('group_project_metadata')
self.assertTableDoesNotExist('group_domain_metadata')
self.assertTableDoesNotExist('user_group_membership')
def test_downgrade_endpoints(self):
self.upgrade(13)
service_extra = {
'name': uuid.uuid4().hex,
}
service = {
'id': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
'extra': json.dumps(service_extra),
}
common_endpoint_attrs = {
'legacy_endpoint_id': uuid.uuid4().hex,
'region': uuid.uuid4().hex,
'service_id': service['id'],
'extra': json.dumps({}),
}
endpoints = {
'public': {
'id': uuid.uuid4().hex,
'interface': 'public',
'url': uuid.uuid4().hex,
},
'internal': {
'id': uuid.uuid4().hex,
'interface': 'internal',
'url': uuid.uuid4().hex,
},
'admin': {
'id': uuid.uuid4().hex,
'interface': 'admin',
'url': uuid.uuid4().hex,
},
}
session = self.Session()
self.insert_dict(session, 'service', service)
for endpoint in endpoints.values():
endpoint.update(common_endpoint_attrs)
self.insert_dict(session, 'endpoint', endpoint)
session.commit()
session.close()
self.downgrade(9)
self.assertTableColumns(
'service',
['id', 'type', 'extra'])
self.assertTableColumns(
'endpoint',
['id', 'region', 'service_id', 'extra'])
endpoint_table = sqlalchemy.Table(
'endpoint', self.metadata, autoload=True)
session = self.Session()
self.assertEqual(session.query(endpoint_table).count(), 1)
q = session.query(endpoint_table)
q = q.filter_by(id=common_endpoint_attrs['legacy_endpoint_id'])
ref = q.one()
self.assertEqual(ref.id, common_endpoint_attrs['legacy_endpoint_id'])
self.assertEqual(ref.region, endpoint['region'])
self.assertEqual(ref.service_id, endpoint['service_id'])
extra = json.loads(ref.extra)
for interface in ['public', 'internal', 'admin']:
expected_url = endpoints[interface]['url']
self.assertEqual(extra['%surl' % interface], expected_url)
session.commit()
session.close()
def insert_dict(self, session, table_name, d):
"""Naively inserts key-value pairs into a table, given a dictionary."""
this_table = sqlalchemy.Table(table_name, self.metadata, autoload=True)
insert = this_table.insert()
insert.execute(d)
session.commit()
def test_upgrade_31_to_32(self):
self.upgrade(32)
user_table = self.select_table("user")
self.assertEquals(user_table.c.name.type.length, 255)
def test_downgrade_32_to_31(self):
self.upgrade(32)
session = self.Session()
# NOTE(aloga): we need a different metadata object
user_table = sqlalchemy.Table('user',
sqlalchemy.MetaData(),
autoload=True,
autoload_with=self.engine)
user_id = uuid.uuid4().hex
ins = user_table.insert().values(
{'id': user_id,
'name': 'a' * 255,
'password': uuid.uuid4().hex,
'enabled': True,
'domain_id': DEFAULT_DOMAIN_ID,
'extra': '{}'})
session.execute(ins)
session.commit()
self.downgrade(31)
# Check that username has been truncated
q = session.query(user_table.c.name)
q = q.filter(user_table.c.id == user_id)
r = q.one()
user_name = r[0]
self.assertEquals(len(user_name), 64)
user_table = self.select_table("user")
self.assertEquals(user_table.c.name.type.length, 64)
def test_downgrade_to_0(self):
self.upgrade(self.max_version)
if self.engine.name == 'mysql':
self._mysql_check_all_tables_innodb()
self.downgrade(0)
for table_name in ["user", "token", "role", "user_tenant_membership",
"metadata"]:
self.assertTableDoesNotExist(table_name)
def test_upgrade_add_domain_tables(self):
self.upgrade(6)
self.assertTableDoesNotExist('credential')
self.assertTableDoesNotExist('domain')
self.assertTableDoesNotExist('user_domain_metadata')
self.upgrade(7)
self.assertTableExists('credential')
self.assertTableColumns('credential', ['id', 'user_id', 'project_id',
'blob', 'type', 'extra'])
self.assertTableExists('domain')
self.assertTableColumns('domain', ['id', 'name', 'enabled', 'extra'])
self.assertTableExists('user_domain_metadata')
self.assertTableColumns('user_domain_metadata',
['user_id', 'domain_id', 'data'])
def test_metadata_table_migration(self):
# Scaffolding
session = self.Session()
self.upgrade(16)
domain_table = sqlalchemy.Table('domain', self.metadata, autoload=True)
user_table = sqlalchemy.Table('user', self.metadata, autoload=True)
role_table = sqlalchemy.Table('role', self.metadata, autoload=True)
project_table = sqlalchemy.Table(
'project', self.metadata, autoload=True)
metadata_table = sqlalchemy.Table(
'metadata', self.metadata, autoload=True)
# Create a Domain
domain = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'enabled': True}
session.execute(domain_table.insert().values(domain))
# Create a Project
project = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': domain['id'],
'extra': "{}"}
session.execute(project_table.insert().values(project))
# Create another Project
project2 = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': domain['id'],
'extra': "{}"}
session.execute(project_table.insert().values(project2))
# Create a User
user = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': domain['id'],
'password': uuid.uuid4().hex,
'enabled': True,
'extra': json.dumps({})}
session.execute(user_table.insert().values(user))
# Create a Role
role = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex}
session.execute(role_table.insert().values(role))
# And another role
role2 = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex}
session.execute(role_table.insert().values(role2))
# Grant Role to User
role_grant = {'user_id': user['id'],
'tenant_id': project['id'],
'data': json.dumps({"roles": [role['id']]})}
session.execute(metadata_table.insert().values(role_grant))
role_grant = {'user_id': user['id'],
'tenant_id': project2['id'],
'data': json.dumps({"roles": [role2['id']]})}
session.execute(metadata_table.insert().values(role_grant))
# Create another user to test the case where member_role_id is already
# assigned.
user2 = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': domain['id'],
'password': uuid.uuid4().hex,
'enabled': True,
'extra': json.dumps({})}
session.execute(user_table.insert().values(user2))
# Grant CONF.member_role_id to User2
role_grant = {'user_id': user2['id'],
'tenant_id': project['id'],
'data': json.dumps({"roles": [CONF.member_role_id]})}
session.execute(metadata_table.insert().values(role_grant))
session.commit()
self.upgrade(17)
user_project_metadata_table = sqlalchemy.Table(
'user_project_metadata', self.metadata, autoload=True)
s = sqlalchemy.select([metadata_table.c.data]).where(
(metadata_table.c.user_id == user['id']) &
(metadata_table.c.tenant_id == project['id']))
r = session.execute(s)
test_project1 = json.loads(r.fetchone()['data'])
self.assertEqual(len(test_project1['roles']), 1)
self.assertIn(role['id'], test_project1['roles'])
# Test user in project2 has role2
s = sqlalchemy.select([metadata_table.c.data]).where(
(metadata_table.c.user_id == user['id']) &
(metadata_table.c.tenant_id == project2['id']))
r = session.execute(s)
test_project2 = json.loads(r.fetchone()['data'])
self.assertEqual(len(test_project2['roles']), 1)
self.assertIn(role2['id'], test_project2['roles'])
# Test for user in project has role in user_project_metadata
# Migration 17 does not properly migrate this data, so this should
# be None.
s = sqlalchemy.select([user_project_metadata_table.c.data]).where(
(user_project_metadata_table.c.user_id == user['id']) &
(user_project_metadata_table.c.project_id == project['id']))
r = session.execute(s)
self.assertIsNone(r.fetchone())
# Create a conflicting user-project in user_project_metadata with
# a different role
data = json.dumps({"roles": [role2['id']]})
role_grant = {'user_id': user['id'],
'project_id': project['id'],
'data': data}
cmd = user_project_metadata_table.insert().values(role_grant)
self.engine.execute(cmd)
# Create another conflicting user-project for User2
data = json.dumps({"roles": [role2['id']]})
role_grant = {'user_id': user2['id'],
'project_id': project['id'],
'data': data}
cmd = user_project_metadata_table.insert().values(role_grant)
self.engine.execute(cmd)
# End Scaffolding
session.commit()
# Migrate to 20
self.upgrade(20)
# The user-project pairs should have all roles from the previous
# metadata table in addition to any roles currently in
# user_project_metadata
s = sqlalchemy.select([user_project_metadata_table.c.data]).where(
(user_project_metadata_table.c.user_id == user['id']) &
(user_project_metadata_table.c.project_id == project['id']))
r = session.execute(s)
role_ids = json.loads(r.fetchone()['data'])['roles']
self.assertEqual(len(role_ids), 3)
self.assertIn(CONF.member_role_id, role_ids)
self.assertIn(role['id'], role_ids)
self.assertIn(role2['id'], role_ids)
# pairs that only existed in old metadata table should be in
# user_project_metadata
s = sqlalchemy.select([user_project_metadata_table.c.data]).where(
(user_project_metadata_table.c.user_id == user['id']) &
(user_project_metadata_table.c.project_id == project2['id']))
r = session.execute(s)
role_ids = json.loads(r.fetchone()['data'])['roles']
self.assertEqual(len(role_ids), 2)
self.assertIn(CONF.member_role_id, role_ids)
self.assertIn(role2['id'], role_ids)
self.assertTableDoesNotExist('metadata')
def test_upgrade_default_roles(self):
def count_member_roles():
session = self.Session()
query_string = ("select count(*) as c from role "
"where name='%s'" % config.CONF.member_role_name)
role_count = session.execute(query_string).fetchone()['c']
session.close()
return role_count
self.upgrade(16)
self.assertEquals(0, count_member_roles())
self.upgrade(17)
self.assertEquals(1, count_member_roles())
self.downgrade(16)
self.assertEquals(0, count_member_roles())
def check_uniqueness_constraints(self):
# Check uniqueness constraints for User & Project tables are
# correct following schema modification. The Group table's
# schema is never modified, so we don't bother to check that.
domain_table = sqlalchemy.Table('domain',
self.metadata,
autoload=True)
domain1 = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'enabled': True}
domain2 = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'enabled': True}
cmd = domain_table.insert().values(domain1)
self.engine.execute(cmd)
cmd = domain_table.insert().values(domain2)
self.engine.execute(cmd)
# First, the User table.
this_table = sqlalchemy.Table('user',
self.metadata,
autoload=True)
user = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': domain1['id'],
'password': uuid.uuid4().hex,
'enabled': True,
'extra': json.dumps({})}
cmd = this_table.insert().values(user)
self.engine.execute(cmd)
# now insert a user with the same name into a different
# domain - which should work.
user['id'] = uuid.uuid4().hex
user['domain_id'] = domain2['id']
cmd = this_table.insert().values(user)
self.engine.execute(cmd)
# TODO(henry-nash): For now, as part of clean-up we delete one of these
# users. Although not part of this test, unless we do so the
# downgrade(16->15) that is part of teardown with fail due to having
# two uses with clashing name as we try to revert to a single global
# name space. This limitation is raised as Bug #1125046 and the delete
# could be removed depending on how that bug is resolved.
cmd = this_table.delete(id=user['id'])
self.engine.execute(cmd)
# Now, the Project table.
this_table = sqlalchemy.Table('project',
self.metadata,
autoload=True)
project = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': domain1['id'],
'description': uuid.uuid4().hex,
'enabled': True,
'extra': json.dumps({})}
cmd = this_table.insert().values(project)
self.engine.execute(cmd)
# now insert a project with the same name into a different
# domain - which should work.
project['id'] = uuid.uuid4().hex
project['domain_id'] = domain2['id']
cmd = this_table.insert().values(project)
self.engine.execute(cmd)
# TODO(henry-nash): For now, we delete one of the projects for the same
# reason as we delete one of the users (Bug #1125046). This delete
# could be removed depending on that bug resolution.
cmd = this_table.delete(id=project['id'])
self.engine.execute(cmd)
def test_upgrade_trusts(self):
self.assertEqual(self.schema.version, 0, "DB is at version 0")
self.upgrade(20)
self.assertTableColumns("token",
["id", "expires", "extra", "valid"])
self.upgrade(21)
self.assertTableColumns("trust",
["id", "trustor_user_id",
"trustee_user_id",
"project_id", "impersonation",
"deleted_at",
"expires_at", "extra"])
self.assertTableColumns("trust_role",
["trust_id", "role_id"])
self.assertTableColumns("token",
["id", "expires", "extra", "valid",
"trust_id", "user_id"])
def test_fixup_role(self):
session = self.Session()
self.assertEqual(self.schema.version, 0, "DB is at version 0")
self.upgrade(1)
self.insert_dict(session, "role", {"id": "test", "name": "test"})
self.upgrade(18)
self.insert_dict(session, "role", {"id": "test2",
"name": "test2",
"extra": None})
r = session.execute('select count(*) as c from role '
'where extra is null')
self.assertEqual(r.fetchone()['c'], 2)
session.commit()
self.upgrade(19)
r = session.execute('select count(*) as c from role '
'where extra is null')
self.assertEqual(r.fetchone()['c'], 0)
def test_legacy_endpoint_id(self):
session = self.Session()
self.upgrade(21)
service = {
'id': uuid.uuid4().hex,
'name': 'keystone',
'type': 'identity'}
self.insert_dict(session, 'service', service)
legacy_endpoint_id = uuid.uuid4().hex
endpoint = {
'id': uuid.uuid4().hex,
'service_id': service['id'],
'interface': uuid.uuid4().hex[:8],
'url': uuid.uuid4().hex,
'extra': json.dumps({
'legacy_endpoint_id': legacy_endpoint_id})}
self.insert_dict(session, 'endpoint', endpoint)
session.commit()
self.upgrade(22)
endpoint_table = sqlalchemy.Table(
'endpoint', self.metadata, autoload=True)
self.assertEqual(session.query(endpoint_table).count(), 1)
ref = session.query(endpoint_table).one()
self.assertEqual(ref.id, endpoint['id'], ref)
self.assertEqual(ref.service_id, endpoint['service_id'])
self.assertEqual(ref.interface, endpoint['interface'])
self.assertEqual(ref.url, endpoint['url'])
self.assertEqual(ref.legacy_endpoint_id, legacy_endpoint_id)
self.assertEqual(ref.extra, '{}')
def test_group_project_FK_fixup(self):
# To create test data we must start before we broke in the
# group_project_metadata table in 015.
self.upgrade(14)
session = self.Session()
domain_table = sqlalchemy.Table('domain', self.metadata, autoload=True)
group_table = sqlalchemy.Table('group', self.metadata, autoload=True)
tenant_table = sqlalchemy.Table('tenant', self.metadata, autoload=True)
role_table = sqlalchemy.Table('role', self.metadata, autoload=True)
group_project_metadata_table = sqlalchemy.Table(
'group_project_metadata', self.metadata, autoload=True)
# Create a Domain
domain = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'enabled': True}
session.execute(domain_table.insert().values(domain))
# Create two Tenants
tenant = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'extra': "{}"}
session.execute(tenant_table.insert().values(tenant))
tenant1 = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'extra': "{}"}
session.execute(tenant_table.insert().values(tenant1))
# Create a Group
group = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': domain['id'],
'extra': json.dumps({})}
session.execute(group_table.insert().values(group))
# Create roles
role_list = []
for _ in range(2):
role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
session.execute(role_table.insert().values(role))
role_list.append(role)
# Grant Role to User on Project
role_grant = {'group_id': group['id'],
'project_id': tenant['id'],
'data': json.dumps({'roles': [role_list[0]['id']]})}
session.execute(
group_project_metadata_table.insert().values(role_grant))
role_grant = {'group_id': group['id'],
'project_id': tenant1['id'],
'data': json.dumps({'roles': [role_list[1]['id']]})}
session.execute(
group_project_metadata_table.insert().values(role_grant))
session.commit()
# Now upgrade and fix up the FKs
self.upgrade(28)
self.assertTableExists('group_project_metadata')
self.assertTableExists('project')
self.assertTableDoesNotExist('tenant')
s = sqlalchemy.select([group_project_metadata_table.c.data]).where(
(group_project_metadata_table.c.group_id == group['id']) &
(group_project_metadata_table.c.project_id == tenant['id']))
r = session.execute(s)
data = json.loads(r.fetchone()['data'])
self.assertEqual(len(data['roles']), 1)
self.assertIn(role_list[0]['id'], data['roles'])
s = sqlalchemy.select([group_project_metadata_table.c.data]).where(
(group_project_metadata_table.c.group_id == group['id']) &
(group_project_metadata_table.c.project_id == tenant1['id']))
r = session.execute(s)
data = json.loads(r.fetchone()['data'])
self.assertEqual(len(data['roles']), 1)
self.assertIn(role_list[1]['id'], data['roles'])
self.downgrade(27)
self.assertTableExists('group_project_metadata')
self.assertTableExists('project')
self.assertTableDoesNotExist('tenant')
def test_assignment_metadata_migration(self):
self.upgrade(28)
# Scaffolding
session = self.Session()
domain_table = sqlalchemy.Table('domain', self.metadata, autoload=True)
user_table = sqlalchemy.Table('user', self.metadata, autoload=True)
group_table = sqlalchemy.Table('group', self.metadata, autoload=True)
role_table = sqlalchemy.Table('role', self.metadata, autoload=True)
project_table = sqlalchemy.Table(
'project', self.metadata, autoload=True)
user_project_metadata_table = sqlalchemy.Table(
'user_project_metadata', self.metadata, autoload=True)
user_domain_metadata_table = sqlalchemy.Table(
'user_domain_metadata', self.metadata, autoload=True)
group_project_metadata_table = sqlalchemy.Table(
'group_project_metadata', self.metadata, autoload=True)
group_domain_metadata_table = sqlalchemy.Table(
'group_domain_metadata', self.metadata, autoload=True)
# Create a Domain
domain = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'enabled': True}
session.execute(domain_table.insert().values(domain))
# Create anther Domain
domain2 = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'enabled': True}
session.execute(domain_table.insert().values(domain2))
# Create a Project
project = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': domain['id'],
'extra': "{}"}
session.execute(project_table.insert().values(project))
# Create another Project
project2 = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': domain['id'],
'extra': "{}"}
session.execute(project_table.insert().values(project2))
# Create a User
user = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': domain['id'],
'password': uuid.uuid4().hex,
'enabled': True,
'extra': json.dumps({})}
session.execute(user_table.insert().values(user))
# Create a Group
group = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': domain['id'],
'extra': json.dumps({})}
session.execute(group_table.insert().values(group))
# Create roles
role_list = []
for _ in range(7):
role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
session.execute(role_table.insert().values(role))
role_list.append(role)
# Grant Role to User on Project
role_grant = {'user_id': user['id'],
'project_id': project['id'],
'data': json.dumps({'roles': [role_list[0]['id']]})}
session.execute(
user_project_metadata_table.insert().values(role_grant))
role_grant = {'user_id': user['id'],
'project_id': project2['id'],
'data': json.dumps({'roles': [role_list[1]['id']]})}
session.execute(
user_project_metadata_table.insert().values(role_grant))
# Grant Role to Group on different Project
role_grant = {'group_id': group['id'],
'project_id': project2['id'],
'data': json.dumps({'roles': [role_list[2]['id']]})}
session.execute(
group_project_metadata_table.insert().values(role_grant))
# Grant Role to User on Domain
role_grant = {'user_id': user['id'],
'domain_id': domain['id'],
'data': json.dumps({'roles': [role_list[3]['id']]})}
session.execute(user_domain_metadata_table.insert().values(role_grant))
# Grant Role to Group on Domain
role_grant = {'group_id': group['id'],
'domain_id': domain['id'],
'data': json.dumps(
{'roles': [role_list[4]['id']],
'other': 'somedata'})}
session.execute(
group_domain_metadata_table.insert().values(role_grant))
session.commit()
self.upgrade(29)
s = sqlalchemy.select([user_project_metadata_table.c.data]).where(
(user_project_metadata_table.c.user_id == user['id']) &
(user_project_metadata_table.c.project_id == project['id']))
r = session.execute(s)
data = json.loads(r.fetchone()['data'])
self.assertEqual(len(data['roles']), 1)
self.assertIn({'id': role_list[0]['id']}, data['roles'])
s = sqlalchemy.select([user_project_metadata_table.c.data]).where(
(user_project_metadata_table.c.user_id == user['id']) &
(user_project_metadata_table.c.project_id == project2['id']))
r = session.execute(s)
data = json.loads(r.fetchone()['data'])
self.assertEqual(len(data['roles']), 1)
self.assertIn({'id': role_list[1]['id']}, data['roles'])
s = sqlalchemy.select([group_project_metadata_table.c.data]).where(
(group_project_metadata_table.c.group_id == group['id']) &
(group_project_metadata_table.c.project_id == project2['id']))
r = session.execute(s)
data = json.loads(r.fetchone()['data'])
self.assertEqual(len(data['roles']), 1)
self.assertIn({'id': role_list[2]['id']}, data['roles'])
s = sqlalchemy.select([user_domain_metadata_table.c.data]).where(
(user_domain_metadata_table.c.user_id == user['id']) &
(user_domain_metadata_table.c.domain_id == domain['id']))
r = session.execute(s)
data = json.loads(r.fetchone()['data'])
self.assertEqual(len(data['roles']), 1)
self.assertIn({'id': role_list[3]['id']}, data['roles'])
s = sqlalchemy.select([group_domain_metadata_table.c.data]).where(
(group_domain_metadata_table.c.group_id == group['id']) &
(group_domain_metadata_table.c.domain_id == domain['id']))
r = session.execute(s)
data = json.loads(r.fetchone()['data'])
self.assertEqual(len(data['roles']), 1)
self.assertIn({'id': role_list[4]['id']}, data['roles'])
self.assertIn('other', data)
# Now add an entry that has one regular and one inherited role
role_grant = {'user_id': user['id'],
'domain_id': domain2['id'],
'data': json.dumps(
{'roles': [{'id': role_list[5]['id']},
{'id': role_list[6]['id'],
'inherited_to': 'projects'}]})}
session.execute(user_domain_metadata_table.insert().values(role_grant))
session.commit()
self.downgrade(28)
s = sqlalchemy.select([user_project_metadata_table.c.data]).where(
(user_project_metadata_table.c.user_id == user['id']) &
(user_project_metadata_table.c.project_id == project['id']))
r = session.execute(s)
data = json.loads(r.fetchone()['data'])
self.assertEqual(len(data['roles']), 1)
self.assertIn(role_list[0]['id'], data['roles'])
s = sqlalchemy.select([user_project_metadata_table.c.data]).where(
(user_project_metadata_table.c.user_id == user['id']) &
(user_project_metadata_table.c.project_id == project2['id']))
r = session.execute(s)
data = json.loads(r.fetchone()['data'])
self.assertEqual(len(data['roles']), 1)
self.assertIn(role_list[1]['id'], data['roles'])
s = sqlalchemy.select([group_project_metadata_table.c.data]).where(
(group_project_metadata_table.c.group_id == group['id']) &
(group_project_metadata_table.c.project_id == project2['id']))
r = session.execute(s)
data = json.loads(r.fetchone()['data'])
self.assertEqual(len(data['roles']), 1)
self.assertIn(role_list[2]['id'], data['roles'])
s = sqlalchemy.select([user_domain_metadata_table.c.data]).where(
(user_domain_metadata_table.c.user_id == user['id']) &
(user_domain_metadata_table.c.domain_id == domain['id']))
r = session.execute(s)
data = json.loads(r.fetchone()['data'])
self.assertEqual(len(data['roles']), 1)
self.assertIn(role_list[3]['id'], data['roles'])
s = sqlalchemy.select([group_domain_metadata_table.c.data]).where(
(group_domain_metadata_table.c.group_id == group['id']) &
(group_domain_metadata_table.c.domain_id == domain['id']))
r = session.execute(s)
data = json.loads(r.fetchone()['data'])
self.assertEqual(len(data['roles']), 1)
self.assertIn(role_list[4]['id'], data['roles'])
self.assertIn('other', data)
# For user-domain2, where we had one regular and one inherited role,
# only the direct role should remain, the inherited role should
# have been deleted during the downgrade
s = sqlalchemy.select([user_domain_metadata_table.c.data]).where(
(user_domain_metadata_table.c.user_id == user['id']) &
(user_domain_metadata_table.c.domain_id == domain2['id']))
r = session.execute(s)
data = json.loads(r.fetchone()['data'])
self.assertEqual(len(data['roles']), 1)
self.assertIn(role_list[5]['id'], data['roles'])
def test_drop_credential_constraint(self):
ec2_credential = {
'id': '100',
'user_id': 'foo',
'project_id': 'bar',
'type': 'ec2',
'blob': json.dumps({
"access": "12345",
"secret": "12345"
})
}
user = {
'id': 'foo',
'name': 'FOO',
'password': 'foo2',
'enabled': True,
'email': 'foo@bar.com',
'extra': json.dumps({'enabled': True})
}
tenant = {
'id': 'bar',
'name': 'BAR',
'description': 'description',
'enabled': True,
'extra': json.dumps({'enabled': True})
}
session = self.Session()
self.upgrade(7)
self.insert_dict(session, 'user', user)
self.insert_dict(session, 'tenant', tenant)
self.insert_dict(session, 'credential', ec2_credential)
session.commit()
self.upgrade(30)
cred_table = sqlalchemy.Table('credential',
self.metadata,
autoload=True)
cred = session.query(cred_table).filter("id='100'").one()
self.assertEqual(cred.user_id,
ec2_credential['user_id'])
def test_drop_credential_indexes(self):
self.upgrade(31)
table = sqlalchemy.Table('credential', self.metadata, autoload=True)
self.assertEqual(len(table.indexes), 0)
def test_downgrade_30(self):
self.upgrade(31)
self.downgrade(30)
table = sqlalchemy.Table('credential', self.metadata, autoload=True)
index_data = [(idx.name, idx.columns.keys())
for idx in table.indexes]
if self.engine.name == 'mysql':
self.assertIn(('user_id', ['user_id']), index_data)
self.assertIn(('credential_project_id_fkey', ['project_id']),
index_data)
else:
self.assertEqual(len(index_data), 0)
def test_migrate_ec2_credential(self):
user = {
'id': 'foo',
'name': 'FOO',
'password': 'foo2',
'enabled': True,
'email': 'foo@bar.com',
'extra': json.dumps({'enabled': True})
}
project = {
'id': 'bar',
'name': 'BAR',
'description': 'description',
'enabled': True,
'extra': json.dumps({'enabled': True})
}
ec2_credential = {
'access': uuid.uuid4().hex,
'secret': uuid.uuid4().hex,
'user_id': user['id'],
'tenant_id': project['id'],
}
session = self.Session()
self.upgrade(7)
self.insert_dict(session, 'ec2_credential', ec2_credential)
self.insert_dict(session, 'user', user)
self.insert_dict(session, 'tenant', project)
self.upgrade(33)
self.assertTableDoesNotExist('ec2_credential')
cred_table = sqlalchemy.Table('credential',
self.metadata,
autoload=True)
expected_credential_id = utils.hash_access_key(
ec2_credential['access'])
cred = session.query(cred_table).filter_by(
id=expected_credential_id).one()
self.assertEqual(cred.user_id, ec2_credential['user_id'])
self.assertEqual(cred.project_id, ec2_credential['tenant_id'])
# test list credential using credential manager.
credential_api = credential.Manager()
self.assertNotEmpty(credential_api.
list_credentials(
user_id=ec2_credential['user_id']))
self.downgrade(32)
session.commit()
self.assertTableExists('ec2_credential')
ec2_cred_table = sqlalchemy.Table('ec2_credential',
self.metadata,
autoload=True)
ec2_cred = session.query(ec2_cred_table).filter_by(
access=ec2_credential['access']).one()
self.assertEqual(ec2_cred.user_id, ec2_credential['user_id'])
def test_migrate_ec2_credential_with_conflict_project(self):
user = {
'id': 'foo',
'name': 'FOO',
'password': 'foo2',
'enabled': True,
'email': 'foo@bar.com',
'extra': json.dumps({'enabled': True})
}
project_1 = {
'id': 'bar',
'name': 'BAR',
'description': 'description',
'enabled': True,
'extra': json.dumps({'enabled': True})
}
project_2 = {
'id': 'baz',
'name': 'BAZ',
'description': 'description',
'enabled': True,
'extra': json.dumps({'enabled': True})
}
ec2_credential = {
'access': uuid.uuid4().hex,
'secret': uuid.uuid4().hex,
'user_id': user['id'],
'tenant_id': project_1['id'],
}
blob = {'access': ec2_credential['access'],
'secret': ec2_credential['secret']}
v3_credential = {
'id': utils.hash_access_key(ec2_credential['access']),
'user_id': user['id'],
# set the project id to simulate a conflict
'project_id': project_2['id'],
'blob': json.dumps(blob),
'type': 'ec2',
'extra': json.dumps({})
}
session = self.Session()
self.upgrade(7)
self.insert_dict(session, 'ec2_credential', ec2_credential)
self.insert_dict(session, 'user', user)
self.insert_dict(session, 'tenant', project_1)
self.insert_dict(session, 'tenant', project_2)
self.upgrade(32)
self.insert_dict(session, 'credential', v3_credential)
self.assertRaises(exception.Conflict, self.upgrade, 33)
def test_migrate_ec2_credential_with_conflict_secret(self):
user = {
'id': 'foo',
'name': 'FOO',
'password': 'foo2',
'enabled': True,
'email': 'foo@bar.com',
'extra': json.dumps({'enabled': True})
}
project_1 = {
'id': 'bar',
'name': 'BAR',
'description': 'description',
'enabled': True,
'extra': json.dumps({'enabled': True})
}
project_2 = {
'id': 'baz',
'name': 'BAZ',
'description': 'description',
'enabled': True,
'extra': json.dumps({'enabled': True})
}
ec2_credential = {
'access': uuid.uuid4().hex,
'secret': uuid.uuid4().hex,
'user_id': user['id'],
'tenant_id': project_1['id'],
}
blob = {'access': ec2_credential['access'],
'secret': 'different secret'}
v3_cred_different_secret = {
'id': utils.hash_access_key(ec2_credential['access']),
'user_id': user['id'],
'project_id': project_1['id'],
'blob': json.dumps(blob),
'type': 'ec2',
'extra': json.dumps({})
}
session = self.Session()
self.upgrade(7)
self.insert_dict(session, 'ec2_credential', ec2_credential)
self.insert_dict(session, 'user', user)
self.insert_dict(session, 'tenant', project_1)
self.insert_dict(session, 'tenant', project_2)
self.upgrade(32)
self.insert_dict(session, 'credential', v3_cred_different_secret)
self.assertRaises(exception.Conflict, self.upgrade, 33)
def test_migrate_ec2_credential_with_invalid_blob(self):
user = {
'id': 'foo',
'name': 'FOO',
'password': 'foo2',
'enabled': True,
'email': 'foo@bar.com',
'extra': json.dumps({'enabled': True})
}
project_1 = {
'id': 'bar',
'name': 'BAR',
'description': 'description',
'enabled': True,
'extra': json.dumps({'enabled': True})
}
project_2 = {
'id': 'baz',
'name': 'BAZ',
'description': 'description',
'enabled': True,
'extra': json.dumps({'enabled': True})
}
ec2_credential = {
'access': uuid.uuid4().hex,
'secret': uuid.uuid4().hex,
'user_id': user['id'],
'tenant_id': project_1['id'],
}
blob = '{"abc":"def"d}'
v3_cred_invalid_blob = {
'id': utils.hash_access_key(ec2_credential['access']),
'user_id': user['id'],
'project_id': project_1['id'],
'blob': json.dumps(blob),
'type': 'ec2',
'extra': json.dumps({})
}
session = self.Session()
self.upgrade(7)
self.insert_dict(session, 'ec2_credential', ec2_credential)
self.insert_dict(session, 'user', user)
self.insert_dict(session, 'tenant', project_1)
self.insert_dict(session, 'tenant', project_2)
self.upgrade(32)
self.insert_dict(session, 'credential', v3_cred_invalid_blob)
self.assertRaises(exception.ValidationError, self.upgrade, 33)
def test_migrate_add_default_project_id_column_upgrade(self):
user1 = {
'id': 'foo1',
'name': 'FOO1',
'password': 'foo2',
'enabled': True,
'email': 'foo@bar.com',
'extra': json.dumps({'tenantId': 'bar'}),
'domain_id': DEFAULT_DOMAIN_ID
}
user2 = {
'id': 'foo2',
'name': 'FOO2',
'password': 'foo2',
'enabled': True,
'email': 'foo@bar.com',
'extra': json.dumps({'tenant_id': 'bar'}),
'domain_id': DEFAULT_DOMAIN_ID
}
user3 = {
'id': 'foo3',
'name': 'FOO3',
'password': 'foo2',
'enabled': True,
'email': 'foo@bar.com',
'extra': json.dumps({'default_project_id': 'bar'}),
'domain_id': DEFAULT_DOMAIN_ID
}
user4 = {
'id': 'foo4',
'name': 'FOO4',
'password': 'foo2',
'enabled': True,
'email': 'foo@bar.com',
'extra': json.dumps({'tenantId': 'baz',
'default_project_id': 'bar'}),
'domain_id': DEFAULT_DOMAIN_ID
}
session = self.Session()
self.upgrade(33)
self.insert_dict(session, 'user', user1)
self.insert_dict(session, 'user', user2)
self.insert_dict(session, 'user', user3)
self.insert_dict(session, 'user', user4)
self.assertTableColumns('user',
['id', 'name', 'extra', 'password',
'enabled', 'domain_id'])
session.commit()
session.close()
self.upgrade(34)
session = self.Session()
self.assertTableColumns('user',
['id', 'name', 'extra', 'password',
'enabled', 'domain_id', 'default_project_id'])
user_table = sqlalchemy.Table('user', self.metadata, autoload=True)
updated_user1 = session.query(user_table).filter_by(id='foo1').one()
old_json_data = json.loads(user1['extra'])
new_json_data = json.loads(updated_user1.extra)
self.assertNotIn('tenantId', new_json_data)
self.assertEqual(old_json_data['tenantId'],
updated_user1.default_project_id)
updated_user2 = session.query(user_table).filter_by(id='foo2').one()
old_json_data = json.loads(user2['extra'])
new_json_data = json.loads(updated_user2.extra)
self.assertNotIn('tenant_id', new_json_data)
self.assertEqual(old_json_data['tenant_id'],
updated_user2.default_project_id)
updated_user3 = session.query(user_table).filter_by(id='foo3').one()
old_json_data = json.loads(user3['extra'])
new_json_data = json.loads(updated_user3.extra)
self.assertNotIn('default_project_id', new_json_data)
self.assertEqual(old_json_data['default_project_id'],
updated_user3.default_project_id)
updated_user4 = session.query(user_table).filter_by(id='foo4').one()
old_json_data = json.loads(user4['extra'])
new_json_data = json.loads(updated_user4.extra)
self.assertNotIn('default_project_id', new_json_data)
self.assertNotIn('tenantId', new_json_data)
self.assertEqual(old_json_data['default_project_id'],
updated_user4.default_project_id)
def test_migrate_add_default_project_id_column_downgrade(self):
user1 = {
'id': 'foo1',
'name': 'FOO1',
'password': 'foo2',
'enabled': True,
'email': 'foo@bar.com',
'extra': json.dumps({}),
'default_project_id': 'bar',
'domain_id': DEFAULT_DOMAIN_ID
}
self.upgrade(34)
session = self.Session()
self.insert_dict(session, 'user', user1)
self.assertTableColumns('user',
['id', 'name', 'extra', 'password',
'enabled', 'domain_id', 'default_project_id'])
session.commit()
session.close()
self.downgrade(33)
session = self.Session()
self.assertTableColumns('user',
['id', 'name', 'extra', 'password',
'enabled', 'domain_id'])
user_table = sqlalchemy.Table('user', self.metadata, autoload=True)
updated_user1 = session.query(user_table).filter_by(id='foo1').one()
new_json_data = json.loads(updated_user1.extra)
self.assertIn('tenantId', new_json_data)
self.assertIn('default_project_id', new_json_data)
self.assertEqual(user1['default_project_id'],
new_json_data['tenantId'])
self.assertEqual(user1['default_project_id'],
new_json_data['default_project_id'])
self.assertEqual(user1['default_project_id'],
new_json_data['tenant_id'])
def populate_user_table(self, with_pass_enab=False,
with_pass_enab_domain=False):
# Populate the appropriate fields in the user
# table, depending on the parameters:
#
# Default: id, name, extra
# pass_enab: Add password, enabled as well
# pass_enab_domain: Add password, enabled and domain as well
#
this_table = sqlalchemy.Table("user",
self.metadata,
autoload=True)
for user in default_fixtures.USERS:
extra = copy.deepcopy(user)
extra.pop('id')
extra.pop('name')
if with_pass_enab:
password = extra.pop('password', None)
enabled = extra.pop('enabled', True)
ins = this_table.insert().values(
{'id': user['id'],
'name': user['name'],
'password': password,
'enabled': bool(enabled),
'extra': json.dumps(extra)})
else:
if with_pass_enab_domain:
password = extra.pop('password', None)
enabled = extra.pop('enabled', True)
extra.pop('domain_id')
ins = this_table.insert().values(
{'id': user['id'],
'name': user['name'],
'domain_id': user['domain_id'],
'password': password,
'enabled': bool(enabled),
'extra': json.dumps(extra)})
else:
ins = this_table.insert().values(
{'id': user['id'],
'name': user['name'],
'extra': json.dumps(extra)})
self.engine.execute(ins)
def populate_tenant_table(self, with_desc_enab=False,
with_desc_enab_domain=False):
# Populate the appropriate fields in the tenant or
# project table, depending on the parameters
#
# Default: id, name, extra
# desc_enab: Add description, enabled as well
# desc_enab_domain: Add description, enabled and domain as well,
# plus use project instead of tenant
#
if with_desc_enab_domain:
# By this time tenants are now projects
this_table = sqlalchemy.Table("project",
self.metadata,
autoload=True)
else:
this_table = sqlalchemy.Table("tenant",
self.metadata,
autoload=True)
for tenant in default_fixtures.TENANTS:
extra = copy.deepcopy(tenant)
extra.pop('id')
extra.pop('name')
if with_desc_enab:
desc = extra.pop('description', None)
enabled = extra.pop('enabled', True)
ins = this_table.insert().values(
{'id': tenant['id'],
'name': tenant['name'],
'description': desc,
'enabled': bool(enabled),
'extra': json.dumps(extra)})
else:
if with_desc_enab_domain:
desc = extra.pop('description', None)
enabled = extra.pop('enabled', True)
extra.pop('domain_id')
ins = this_table.insert().values(
{'id': tenant['id'],
'name': tenant['name'],
'domain_id': tenant['domain_id'],
'description': desc,
'enabled': bool(enabled),
'extra': json.dumps(extra)})
else:
ins = this_table.insert().values(
{'id': tenant['id'],
'name': tenant['name'],
'extra': json.dumps(extra)})
self.engine.execute(ins)
def _mysql_check_all_tables_innodb(self):
database = self.engine.url.database
connection = self.engine.connect()
# sanity check
total = connection.execute("SELECT count(*) "
"from information_schema.TABLES "
"where TABLE_SCHEMA='%(database)s'" %
dict(database=database))
self.assertTrue(total.scalar() > 0, "No tables found. Wrong schema?")
noninnodb = connection.execute("SELECT table_name "
"from information_schema.TABLES "
"where TABLE_SCHEMA='%(database)s' "
"and ENGINE!='InnoDB' "
"and TABLE_NAME!='migrate_version'" %
dict(database=database))
names = [x[0] for x in noninnodb]
self.assertEqual(names, [],
"Non-InnoDB tables exist")
connection.close()
|
clokep/pelican-plugins | refs/heads/master | extract_toc/__init__.py | 84 | from .extract_toc import *
|
heeraj123/oh-mainline | refs/heads/master | vendor/packages/twisted/twisted/plugins/twisted_portforward.py | 83 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.application.service import ServiceMaker
TwistedPortForward = ServiceMaker(
"Twisted Port-Forwarding",
"twisted.tap.portforward",
"A simple port-forwarder.",
"portforward")
|
mugurrus/superdesk-core | refs/heads/master | apps/publish/odbc_test.py | 7 | # -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014, 2015 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import superdesk
from superdesk.tests import TestCase
from superdesk.publish import init_app, SUBSCRIBER_TYPES
from superdesk.publish.transmitters.odbc import ODBCPublishService
class ODBCTests(TestCase):
subscribers = [{"_id": "1", "name": "Test", "subscriber_type": SUBSCRIBER_TYPES.WIRE, "media_type": "media",
"is_active": True, "sequence_num_settings": {"max": 10, "min": 1},
"critical_errors": {"9004": True},
"destinations": [{"name": "AAP IPNEWS", "delivery_type": "odbc", "format": "AAP IPNEWS",
"config": {"stored_procedure": "InsertNews"}
}]
}]
queue_items = [{"_id": "1", "state": "pending", "content_type": "text", "headline": "test", "unique_name": "#2034",
"publishing_action": "published", "published_seq_num": 4,
"destination": {"name": "AAP IPNEWS", "delivery_type": "odbc", "format": "AAP IPNEWS",
"config": {"stored_procedure": "InsertNews"}
},
"formatted_item": {
"ident": "0",
"selector_codes": '3**',
"wordcount": 313,
"texttab": "x",
"originator": "AAP",
"service_level": "a",
"keyword": "ROSS",
"subject": "crime, law and justice",
"category": "a",
"take_key": "Take-that",
"subject_detail": "international court or tribunal",
"subject_reference": "02011001",
"article_text": "THIS IS A TEST PLEASE IGNORE",
"priority": "u",
"headline": "TEST HEADLINE",
"usn": 68147,
"subject_matter": "international law",
"sequence": 117,
"news_item_type": "News",
"author": "",
"genre": "Current",
"fullStory": 1
},
"subscriber_id": "1", "item_id": "1", "item_version": 6
}]
def setUp(self):
self.subscribers[0]['destinations'][0]['config']['connection_string'] = \
superdesk.app.config["ODBC_TEST_CONNECTION_STRING"]
self.app.data.insert('subscribers', self.subscribers)
self.queue_items[0]['destination']['config']['connection_string'] = \
superdesk.app.config["ODBC_TEST_CONNECTION_STRING"]
self.app.data.insert('publish_queue', self.queue_items)
init_app(self.app)
def test_transmit(self):
if superdesk.app.config['ODBC_PUBLISH']:
subscriber = self.app.data.find('subscribers', None, None)[0]
publish_service = ODBCPublishService()
ret = publish_service._transmit(self.queue_items[0], subscriber)
self.assertGreater(ret, 0)
|
asartori/mopidy-cd | refs/heads/master | mopidy_cd/__init__.py | 1 | from __future__ import unicode_literals
import os
from mopidy import config, ext
__version__ = '0.5'
class Extension(ext.Extension):
dist_name = 'Mopidy-Cd'
ext_name = 'cd'
version = __version__
def get_default_config(self):
conf_file = os.path.join(os.path.dirname(__file__), 'ext.conf')
return config.read(conf_file)
def setup(self, registry):
from .backend import CdBackend
registry.add('backend', CdBackend)
|
aavanian/bokeh | refs/heads/master | bokeh/protocol/messages/__init__.py | 8 | '''
'''
from __future__ import absolute_import
from ..exceptions import ProtocolError
index = {}
def register(cls):
''' Decorator to add a Message (and its revision) to the Protocol index.
Example:
.. code-block:: python
@register
class some_msg_1(Message):
msgtype = 'SOME-MSG'
revision = 1
@classmethod
def create(cls, **metadata):
header = cls.create_header()
content = {}
return cls(header, metadata, content)
'''
key = (cls.msgtype, cls.revision)
if key in index:
raise ProtocolError("Duplicate message specification encountered: %r" % key)
index[key] = cls
return cls
from .ack import *
from .event import *
from .ok import *
from .patch_doc import *
from .pull_doc_req import *
from .pull_doc_reply import *
from .push_doc import *
from .error import *
from .server_info_reply import *
from .server_info_req import *
|
banga/powerline-shell | refs/heads/master | test/segments_test/git_stash_test.py | 3 | import unittest
import mock
import tempfile
import shutil
import sh
import powerline_shell.segments.git_stash as git_stash
from powerline_shell.utils import RepoStats
class GitStashTest(unittest.TestCase):
def setUp(self):
self.powerline = mock.MagicMock()
self.dirname = tempfile.mkdtemp()
sh.cd(self.dirname)
sh.git("init", ".")
self.segment = git_stash.Segment(self.powerline, {})
def tearDown(self):
shutil.rmtree(self.dirname)
def _add_and_commit(self, filename):
sh.touch(filename)
sh.git("add", filename)
sh.git("commit", "-m", "add file " + filename)
def _overwrite_file(self, filename, content):
sh.echo(content, _out=filename)
def _stash(self):
sh.git("stash")
@mock.patch('powerline_shell.utils.get_PATH')
def test_git_not_installed(self, get_PATH):
get_PATH.return_value = "" # so git can't be found
self.segment.start()
self.segment.add_to_powerline()
self.assertEqual(self.powerline.append.call_count, 0)
def test_non_git_directory(self):
shutil.rmtree(".git")
self.segment.start()
self.segment.add_to_powerline()
self.assertEqual(self.powerline.append.call_count, 0)
def test_no_stashes(self):
self._add_and_commit("foo")
self.segment.start()
self.segment.add_to_powerline()
self.assertEqual(self.powerline.append.call_count, 0)
def test_one_stash(self):
self._add_and_commit("foo")
self._overwrite_file("foo", "some new content")
self._stash()
self.segment.start()
self.segment.add_to_powerline()
expected = u' {} '.format(RepoStats.symbols["stash"])
self.assertEqual(self.powerline.append.call_args[0][0], expected)
def test_multiple_stashes(self):
self._add_and_commit("foo")
self._overwrite_file("foo", "some new content")
self._stash()
self._overwrite_file("foo", "some different content")
self._stash()
self._overwrite_file("foo", "more different content")
self._stash()
self.segment.start()
self.segment.add_to_powerline()
expected = u' 3{} '.format(RepoStats.symbols["stash"])
self.assertEqual(self.powerline.append.call_args[0][0], expected)
|
amjadm61/bedrock | refs/heads/master | vendor-local/lib/python/south/management/commands/graphmigrations.py | 129 | """
Outputs a graphviz dot file of the dependencies.
"""
from __future__ import print_function
from optparse import make_option
import re
import textwrap
from django.core.management.base import BaseCommand
from django.core.management.color import no_style
from south.migration import Migrations, all_migrations
class Command(BaseCommand):
help = "Outputs a GraphViz dot file of all migration dependencies to stdout."
def handle(self, **options):
# Resolve dependencies
Migrations.calculate_dependencies()
colors = [ 'crimson', 'darkgreen', 'darkgoldenrod', 'navy',
'brown', 'darkorange', 'aquamarine' , 'blueviolet' ]
color_index = 0
wrapper = textwrap.TextWrapper(width=40)
print("digraph G {")
# Group each app in a subgraph
for migrations in all_migrations():
print(" subgraph %s {" % migrations.app_label())
print(" node [color=%s];" % colors[color_index])
for migration in migrations:
# Munge the label - text wrap and change _ to spaces
label = "%s - %s" % (
migration.app_label(), migration.name())
label = re.sub(r"_+", " ", label)
label= "\\n".join(wrapper.wrap(label))
print(' "%s.%s" [label="%s"];' % (
migration.app_label(), migration.name(), label))
print(" }")
color_index = (color_index + 1) % len(colors)
# For every migration, print its links.
for migrations in all_migrations():
for migration in migrations:
for other in migration.dependencies:
# Added weight tends to keep migrations from the same app
# in vertical alignment
attrs = "[weight=2.0]"
# But the more interesting edges are those between apps
if other.app_label() != migration.app_label():
attrs = "[style=bold]"
print(' "%s.%s" -> "%s.%s" %s;' % (
other.app_label(), other.name(),
migration.app_label(), migration.name(),
attrs
))
print("}");
|
DelazJ/QGIS | refs/heads/master | tests/src/python/test_qgsfiledownloader.py | 16 | # -*- coding: utf-8 -*-
"""
Test the QgsFileDownloader class
Run test with:
LC_ALL=en_US.UTF-8 ctest -V -R PyQgsFileDownloader
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
import os
import tempfile
from functools import partial
from qgis.PyQt.QtCore import QEventLoop, QUrl
from qgis.core import (QgsFileDownloader, )
from qgis.testing import start_app, unittest
__author__ = 'Alessandro Pasotti'
__date__ = '08/11/2016'
__copyright__ = 'Copyright 2016, The QGIS Project'
start_app()
class TestQgsFileDownloader(unittest.TestCase):
"""
This class tests the QgsFileDownloader class
"""
def _make_download(self, url, destination, cancel=False):
self.completed_was_called = False
self.error_was_called = False
self.canceled_was_called = False
self.progress_was_called = False
self.exited_was_called = False
loop = QEventLoop()
downloader = QgsFileDownloader(QUrl(url), destination)
downloader.downloadCompleted.connect(partial(self._set_slot, 'completed'))
downloader.downloadExited.connect(partial(self._set_slot, 'exited'))
downloader.downloadCanceled.connect(partial(self._set_slot, 'canceled'))
downloader.downloadError.connect(partial(self._set_slot, 'error'))
downloader.downloadProgress.connect(partial(self._set_slot, 'progress'))
downloader.downloadExited.connect(loop.quit)
if cancel:
downloader.downloadProgress.connect(downloader.cancelDownload)
loop.exec_()
@unittest.skipIf(os.environ.get('QGIS_CONTINUOUS_INTEGRATION_RUN', 'true'),
'Test with http://www.qgis.org unstable. Needs local server.')
def test_validDownload(self):
"""Tests a valid download"""
destination = tempfile.mktemp()
self._make_download('http://www.qgis.org', destination)
self.assertTrue(self.exited_was_called)
self.assertTrue(self.completed_was_called)
self.assertTrue(self.progress_was_called)
self.assertFalse(self.canceled_was_called)
self.assertFalse(self.error_was_called)
self.assertTrue(os.path.isfile(destination))
self.assertGreater(os.path.getsize(destination), 0)
def test_inValidDownload(self):
"""Tests an invalid download"""
destination = tempfile.mktemp()
self._make_download('http://www.doesnotexistofthatimsure.qgis', destination)
self.assertTrue(self.exited_was_called)
self.assertFalse(self.completed_was_called)
self.assertTrue(self.progress_was_called)
self.assertFalse(self.canceled_was_called)
self.assertTrue(self.error_was_called)
self.assertEqual(self.error_args[1], [u'Download failed: Host www.doesnotexistofthatimsure.qgis not found'])
self.assertFalse(os.path.isfile(destination))
@unittest.skipIf(os.environ.get('QGIS_CONTINUOUS_INTEGRATION_RUN', 'true'),
'Test with http://www.github.com unstable. Needs local server.')
def test_dowloadCanceled(self):
"""Tests user canceled download"""
destination = tempfile.mktemp()
self._make_download('https://github.com/qgis/QGIS/archive/master.zip', destination, True)
self.assertTrue(self.exited_was_called)
self.assertFalse(self.completed_was_called)
self.assertTrue(self.canceled_was_called)
self.assertFalse(self.error_was_called)
self.assertFalse(os.path.isfile(destination))
def test_InvalidUrl(self):
destination = tempfile.mktemp()
self._make_download('xyz://www', destination)
self.assertTrue(self.exited_was_called)
self.assertFalse(self.completed_was_called)
self.assertFalse(self.canceled_was_called)
self.assertTrue(self.error_was_called)
self.assertFalse(os.path.isfile(destination))
self.assertEqual(self.error_args[1], [u"Download failed: Protocol \"xyz\" is unknown"])
@unittest.skipIf(os.environ.get('QGIS_CONTINUOUS_INTEGRATION_RUN', 'true'),
'Test with http://www.github.com unstable. Needs local server.')
def test_InvalidFile(self):
self._make_download('https://github.com/qgis/QGIS/archive/master.zip', "")
self.assertTrue(self.exited_was_called)
self.assertFalse(self.completed_was_called)
self.assertFalse(self.canceled_was_called)
self.assertTrue(self.error_was_called)
self.assertEqual(self.error_args[1], [u"No output filename specified"])
def test_BlankUrl(self):
destination = tempfile.mktemp()
self._make_download('', destination)
self.assertTrue(self.exited_was_called)
self.assertFalse(self.completed_was_called)
self.assertFalse(self.canceled_was_called)
self.assertTrue(self.error_was_called)
self.assertFalse(os.path.isfile(destination))
self.assertEqual(self.error_args[1], [u"Download failed: Protocol \"\" is unknown"])
def ssl_compare(self, name, url, error):
destination = tempfile.mktemp()
self._make_download(url, destination)
msg = "Failed in %s: %s" % (name, url)
self.assertTrue(self.exited_was_called)
self.assertFalse(self.completed_was_called, msg)
self.assertFalse(self.canceled_was_called, msg)
self.assertTrue(self.error_was_called, msg)
self.assertFalse(os.path.isfile(destination), msg)
result = sorted(self.error_args[1])
result = ';'.join(result)
self.assertTrue(result.startswith(error), msg + "expected:\n%s\nactual:\n%s\n" % (result, error))
@unittest.skipIf(os.environ.get('QGIS_CONTINUOUS_INTEGRATION_RUN', 'true'), 'Test with badssl.com unstable. Needs local server.')
def test_sslExpired(self):
self.ssl_compare("expired", "https://expired.badssl.com/", "SSL Errors: ;The certificate has expired")
self.ssl_compare("self-signed", "https://self-signed.badssl.com/",
"SSL Errors: ;The certificate is self-signed, and untrusted")
self.ssl_compare("untrusted-root", "https://untrusted-root.badssl.com/",
"No certificates could be verified;SSL Errors: ;The issuer certificate of a locally looked up certificate could not be found")
def _set_slot(self, *args, **kwargs):
# print('_set_slot(%s) called' % args[0])
setattr(self, args[0] + '_was_called', True)
setattr(self, args[0] + '_args', args)
if __name__ == '__main__':
unittest.main()
|
CPedrini/TateTRES | refs/heads/master | handler.py | 1 | #-*- encoding: utf-8 -*-
import random, time, sys
# Librerias de mi autoria, permiten respectivamente: Scrapear eRepublik, scrapear datos de RW's, scrapear eventos de eRepublik de forma automatica
from erapi import ErAPI
from watcher import Watcher
from events import Events
from serversocket import ServerSocket
class FEAHandler:
# Metodo constructor, seteos basicos necesarios de configuracion, instancia objetos utiles
def __init__(self, bot):
self.bot = bot
self.erepublik = ErAPI()
self.watcher = Watcher()
# Requires bot for automatic messages
self.events = Events(bot)
self.serversocket = ServerSocket(bot)
# Manejador inutil, existe solo para probar paralelismo y control de errores de nivel superior al objeto
def HANDLE_SARASA(self, sender, params, is_pm):
# Error intensional para demostrar que ante un error imprevisto esta todo manejado y que
# soporta multiples comandos al mismo tiempo (ejecutar varios .sarasa para probar)
time.sleep(10)
raise Exception('sarasa')
if(is_pm):
self.bot.queue_append(*['PRIVMSG %s :%s' % (sender, 'Mensajes de a bloques 1.'), 'PRIVMSG %s :%s\n' % (sender, 'Mensaje de a bloques 2.')])
else:
channel = params[0]
self.bot.queue_append('PRIVMSG %s :%s' % (channel, 'Hola.'))
# Manejador del MASS CALL, ilumina todos los nicks y envia mensaje
def HANDLE_CHE(self, sender, params, is_pm):
self.HANDLE_KUAK(sender, params, is_pm)
# Manejador del MASS CALL, ilumina todos los nicks y envia mensaje
def HANDLE_KUAK(self, sender, params, is_pm):
if(is_pm == False):
channel = params[0].lower()
if(self.bot.has_level(channel, sender, '%') or self.bot.has_permission(sender, '%')):
users = [self.bot.data[channel]['users'][i:i+35] for i in range(0, len(self.bot.data[channel]['users']), 35)]
for sublist in users:
self.bot.queue_append('PRIVMSG %s :%s' % (channel, ' '.join(sublist)))
self.bot.queue_append('PRIVMSG %s :%s' % (channel, '\x030,4\02¡ATENCION!\02\x03\x031,0\02 %s\x03' % params[1]))
else:
self.bot.queue_append('PRIVMSG %s :%s' % (channel, '%s: No tienes permiso para utilizar ese comando.' % sender))
# Manejador de sorteo, sortea lo que se envie por parametro al azar entre los miembros del canal, incluso el bot puede ganar ^.^
def HANDLE_SORTEAR(self, sender, params, is_pm):
if(is_pm == False):
channel = params[0].lower()
self.bot.queue_append('PRIVMSG %s :%s' % (channel, '\x030,4\02¡ATENCION!\02\x03\x031,7\02 Comienza el sorteo de: %s\02\x03' % params[1]))
self.bot.queue_append('PRIVMSG %s :%s' % (channel, '\x031,7\02Chan chan chan\02\x03'))
time.sleep(1)
winner = random.choice(self.bot.data[channel]['users'])
if(winner == self.bot.nick):
self.bot.queue_append('PRIVMSG %s :%s' % (channel, '\x030,3\02Muajaja, gane yo, denme el premio!\02\x03'))
else:
self.bot.queue_append('PRIVMSG %s :%s' % (channel, '\x030,3\02Felicidades %s, ganaste: %s\02\x03' % (winner, params[1])))
# Manejador del SYNC, actualiza ordenes
def HANDLE_SYNC(self, sender, params, is_pm):
if(is_pm):
if(self.bot.has_permission(sender, '%')):
self.bot.refresh_orders()
self.bot.queue_append('PRIVMSG %s :%s' % (sender, 'Ordenes actualizadas.'))
else:
self.bot.queue_append('PRIVMSG %s :%s' % (sender, 'No tienes permiso para utilizar ese comando.'))
else:
channel = params[0]
if(self.bot.has_permission(sender, '%')):
self.bot.refresh_orders()
self.bot.queue_append('PRIVMSG %s :%s' % (channel, '%s: Ordenes actualizados.' % sender))
else:
self.bot.queue_append('PRIVMSG %s :%s' % (channel, '%s: No tienes permiso para utilizar ese comando.' % sender))
# Manejador del ANUNCIOKUAK, hace un KUAK y luego muestra el ANUNCIO en todos los canales donde esta el bot
def HANDLE_ANUNCIOKUAK(self, sender, params, is_pm):
if(self.bot.has_permission(sender, '&')):
self.serversocket.send('¡ANUNCIO! %s dice: %s' % (sender, params[1]))
for channel in self.bot.data:
users = [self.bot.data[channel]['users'][i:i+35] for i in range(0, len(self.bot.data[channel]['users']), 35)]
for sublist in users:
self.bot.queue_append('PRIVMSG %s :%s' % (channel, ' '.join(sublist)))
self.bot.queue_append('PRIVMSG %s :%s' % (channel, '\x030,4\02¡ANUNCIO!\02\x03\x030,5 %s dice:\x03\x031,0\02 %s\02\x03' % ('%s\'%s' % (sender[:-1], sender[-1:]), params[1])))
else:
if(is_pm):
self.bot.queue_append('PRIVMSG %s :%s' % (sender, 'No tienes permiso para utilizar ese comando.'))
else:
self.bot.queue_append('PRIVMSG %s :%s' % (params[0], '%s: No tienes permiso para utilizar ese comando.' % sender))
# Manejador del ANUNCIO, muestra un mensaje en todos los canales donde esta el bot
def HANDLE_ANUNCIO(self, sender, params, is_pm):
if(self.bot.has_permission(sender, '@')):
self.serversocket.send('¡ANUNCIO! %s dice: %s' % (sender, params[1]))
for channel in self.bot.data:
self.bot.queue_append('PRIVMSG %s :%s' % (channel, '\x030,4\02¡ANUNCIO!\02\x03\x030,5 %s dice:\x03\x031,0\02 %s\02\x03' % ('%s\'%s' % (sender[:-1], sender[-1:]), params[1])))
else:
if(is_pm):
self.bot.queue_append('PRIVMSG %s :%s' % (sender, 'No tienes permiso para utilizar ese comando.'))
else:
self.bot.queue_append('PRIVMSG %s :%s' % (params[0], '%s: No tienes permiso para utilizar ese comando.' % sender))
# Manejador del DONAR, muestra los links de donacion del perfil de eR asociado al nick, o si se especifica, de otro nick
def HANDLE_DONAR(self, sender, params, is_pm):
channel = sender if is_pm == True else params[0]
try:
if(len(params) > 1):
if(bool(params[1].strip())):
sender = params[1].strip(' \r\n')
id = str(self.erepublik.get_id(sender))
self.bot.queue_append('PRIVMSG %s :%s' % (channel, '\x03\02%s\02 - \02Donar items:\02\x03 http://www.erepublik.com/es/economy/donate-items/%s\x03 - \02Donar dinero:\02 \x03http://www.erepublik.com/es/economy/donate-money/%s' % (sender, id, id)))
except:
self.bot.queue_append('PRIVMSG %s :%s' % (channel, '\x031,4\02Error:\02\x03 Nick \02"%s"\02 no esta registrado.' % sender))
# Manejador del FC, muestra estadisticas del perfil de eR asociado al nick, o si se especifica, de otro nick
def HANDLE_FC(self, sender, params, is_pm):
channel = sender if is_pm == True else params[0]
try:
if(len(params) > 1):
if(bool(params[1].strip())):
sender = params[1].strip(' \r\n')
id = self.erepublik.get_id(sender)
level = self.erepublik.get_level(sender)
strength = self.erepublik.get_strength(sender)
rank_points = self.erepublik.get_rank_points(sender)
citizenship = self.erepublik.get_citizenship(sender)
nick = self.erepublik.get_nick(sender)
rank_name = self.erepublik.calculate_rank_name(rank_points)
q0 = self.erepublik.calculate_damage(rank_points, strength, 0, level, 1)
q1 = self.erepublik.calculate_damage(rank_points, strength, 20, level, 1)
q2 = self.erepublik.calculate_damage(rank_points, strength, 40, level, 1)
q3 = self.erepublik.calculate_damage(rank_points, strength, 60, level, 1)
q4 = self.erepublik.calculate_damage(rank_points, strength, 80, level, 1)
q5 = self.erepublik.calculate_damage(rank_points, strength, 100, level, 1)
q6 = self.erepublik.calculate_damage(rank_points, strength, 120, level, 1)
q7 = self.erepublik.calculate_damage(rank_points, strength, 200, level, 1)
#self.bot.queue_append('PRIVMSG %s :%s' % (channel, 'Nick: %s | ID: %d | Nivel: %d | Fuerza: %.2f | Rango: %s | Puntos de rango: %d | Ciudadania: %s ' % (nick, id, level, strength, rank_name, rank_points, citizenship)))
self.bot.queue_append('PRIVMSG %s :%s' % (channel, '\x03\02%s\02\x03 (%s, %.2f de fuerza): \x0314[Q0: \02%d\02]\x03 \x033[Q1: \02%d\02]\x03 \x0312[Q2: \02%d\02]\x03 \x032[Q3: \02%d\02]\x03 \x037[Q4: \02%d\02]\x03 \x034[Q5: \02%d\02]\x03 \x035[Q6: \02%d\02]\x03 \x036[Q7: \02%d\02]\x03 \x030,3[Q7 + NE: \02%d\02]\x03 \x030,3[Q7 + 50%%: \02%d\02]\x03 \x030,3[Q7 + NE + 50%%: \02%d\02]\x03' % (nick, rank_name, strength, q0, q1, q2, q3, q4, q5, q6, q7, q7 * 1.1, q7 * 1.5, q7 * 1.65)))
except:
self.bot.queue_append('PRIVMSG %s :%s' % (channel, '\x031,4\02Error:\02\x03 Nick \02"%s"\02 no esta registrado.' % sender))
# Manejador del LP, muestra estadisticas del perfil de eR asociado al nick, o si se especifica, de otro nick
def HANDLE_LP(self, sender, params, is_pm):
channel = sender if is_pm == True else params[0]
try:
if(len(params) > 1):
if(bool(params[1].strip())):
sender = params[1].strip(' \r\n')
id = self.erepublik.get_id(sender)
level = self.erepublik.get_level(sender)
strength = self.erepublik.get_strength(sender)
rank_points = self.erepublik.get_rank_points(sender)
citizenship = self.erepublik.get_citizenship(sender)
nick = self.erepublik.get_nick(sender)
rank_name = self.erepublik.calculate_rank_name(rank_points)
self.bot.queue_append('PRIVMSG %s :%s' % (channel, '\x03\02%s\02 [%d] | \02Nivel:\02 %d | \02Fuerza:\02 %.2f | \02Rango:\02 %s (%d) | \02Ciudadania:\02 %s\x03' % (nick, id, level, strength, rank_name, rank_points, citizenship)))
except:
self.bot.queue_append('PRIVMSG %s :%s' % (channel, '\x031,4\02Error:\02\x03 Nick \02"%s"\02 no esta registrado.' % sender))
# Manejador del LINK, muestra el link del perfil de eR asociado al nick, o si se especifica, de otro nick
def HANDLE_LINK(self, sender, params, is_pm):
channel = sender if is_pm == True else params[0]
try:
if(len(params) > 1):
if(bool(params[1].strip())):
sender = params[1].strip(' \r\n')
id = self.erepublik.get_id(sender)
self.bot.queue_append('PRIVMSG %s :%s' % (channel, '\x03El link al perfil de \02%s\02 es:\x03 http://www.erepublik.com/es/citizen/profile/%d' % (sender, id)))
except:
self.bot.queue_append('PRIVMSG %s :%s' % (channel, '\x031,4\02Error:\02\x03 Nick \02"%s"\02 no esta registrado.' % sender))
# Manejador del REGID, registra el ID asociado al nick que envia el comando
def HANDLE_REGID(self, sender, params, is_pm):
channel = sender if is_pm == True else params[0]
try:
self.erepublik.reg_nick_write(sender, params[1])
self.bot.queue_append('PRIVMSG %s :%s' % (channel, 'Usuario registrado.'))
except:
self.bot.queue_append('PRIVMSG %s :%s' % (channel, 'Error al registrar usuario.'))
# Manejador del ORDENES, muestra las ordenes en lista segun prioridad
def HANDLE_ORDENES(self, sender, params, is_pm):
channel = params[0]
if(is_pm):
channel = sender
for zone in self.bot.orders:
for div in ['d1', 'd2', 'd3', 'd4']:
if self.bot.orders[zone][div] == 'NO PEGAR':
self.bot.orders[zone][div] = '\x030,1\02 NO PEGAR \02\x03'
elif self.bot.orders[zone][div] == 'BAJA':
self.bot.orders[zone][div] = '\x030,9\02 BAJA \02\x03'
elif self.bot.orders[zone][div] == 'MEDIA':
self.bot.orders[zone][div] = '\x030,8\02 MEDIA \02\x03'
elif self.bot.orders[zone][div] == 'ALTA':
self.bot.orders[zone][div] = '\x030,7\02 ALTA \02\x03'
elif self.bot.orders[zone][div] == 'MAXIMA':
self.bot.orders[zone][div] = '\x030,4\02 MAXIMA \02\x03'
data = (zone, self.bot.orders[zone]['id'], self.bot.orders[zone]['d1'], self.bot.orders[zone]['d2'], self.bot.orders[zone]['d3'], self.bot.orders[zone]['d4'], self.bot.orders[zone]['side'])
self.bot.queue_append('PRIVMSG %s :%s' % (channel, '\x030,4\02Zona:\02\x03 %s || \02Link: http://erpk.org/b/%d\02 || \02Div 1:\02 %s || \02Div 2:\02 %s || \02Div 3:\02 %s || \02Div 4:\02 %s || \02Side:\02 \x030,3%s\x03' % data))
# Manejador del RW, muestra datos del estado de la rebelion de el/la region/pais indicad@
def HANDLE_RW(self, sender, params, is_pm):
channel = sender if is_pm == True else params[0]
if(params[1][0] == '-'):
type = params[1][1:params[1].find(' ')].lower()
what = params[1][params[1].find(' ') + 1:].strip('\r\n ').lower()
if(len(what) > 0 and what[0] != '-'):
if(type == 'p' or type == 'a'):
if(type == 'a'):
what = self.watcher.get_country_name(what)
if(what != False):
self.bot.queue_append('PRIVMSG %s :Regiones cargadas en: %s' % (channel, what))
rws = self.watcher.get_by_country_name(what)
if(rws != False):
enabled = False
for rw in rws:
if(rw['enabled'] == True):
enabled = True
self.bot.queue_append('PRIVMSG %s :%s [%s] | Supporters: %d/10' % (channel, rw['region_name'], rw['region_initial'], len(rw['supporters'])))
if(enabled == False):
self.bot.queue_append('PRIVMSG %s :El pais no tiene rebeliones activas.' % (channel))
else:
self.bot.queue_append('PRIVMSG %s :\x031,4\02Error:\02\x03 Pais erroneo o no cargado en el sistema.' % channel)
else:
self.bot.queue_append('PRIVMSG %s :\x031,4\02Error:\02\x03 Abreviacion de pais erronea o inexistente.' % channel)
elif(type == 's'):
rw = self.watcher.get_by_region_initial(what)
if(rw != False):
if(rw['enabled'] == True):
self.bot.queue_append('PRIVMSG %s :%s [%s] | Pais: %s [%s] | Supporters: %d/10' % (channel, rw['region_name'], rw['region_initial'], rw['country_name'], rw['country_initial'],len(rw['supporters'])))
else:
self.bot.queue_append('PRIVMSG %s :La region %s no tiene una rebelion activa.' % (channel, rw['region_name']))
else:
self.bot.queue_append('PRIVMSG %s :\x031,4\02Error:\02\x03 Region incorrecta o no cargada en el sistema.' % channel)
else:
self.bot.queue_append('PRIVMSG %s :\x031,4\02Error:\02\x03 tipo de parametro incorrecto.' % channel)
else:
self.bot.queue_append('PRIVMSG %s :\x031,4\02Error:\02\x03 No se especifico valor del parametro.' % channel)
else:
self.bot.queue_append('PRIVMSG %s :\x031,4\02Error:\02\x03 No se especifico tipo de parametro.' % channel)
# Manejador del HELP, muestra link al HTML de ayuda del bot
def HANDLE_HELP(self, sender, params, is_pm):
if(is_pm):
self.bot.queue_append('PRIVMSG %s :%s' % (sender, 'Hoja de ayuda: %s' % self.bot.help_html))
else:
channel = params[0]
self.bot.queue_append('PRIVMSG %s :%s' % (channel, 'Hoja de ayuda: %s' % self.bot.help_html))
# Manejador del JOIN, une el bot a un canal, parametro opcional de contraseña
def HANDLE_JOIN(self, sender, params, is_pm):
if(self.bot.has_permission(sender, '&')):
self.bot.queue_append('JOIN %s\n' % params[1].strip('\r\n'))
else:
if(is_pm):
self.bot.queue_append('PRIVMSG %s :%s' % (sender, 'No tienes permiso para utilizar ese comando.'))
else:
self.bot.queue_append('PRIVMSG %s :%s' % (params[0], '%s: No tienes permiso para utilizar ese comando.' % sender))
# Manejador del PART, hace salir al bot de un canal y borra dicho canal de la lista temporal de canales
def HANDLE_PART(self, sender, params, is_pm):
if(self.bot.has_permission(sender, '&')):
self.bot.queue_append('PART %s\n' % params[1].strip('\r\n'))
self.bot.del_channel(params[1].strip('\r\n'))
self.bot.del_channel_event(params[1].strip('\r\n'))
self.bot.queue_append('PRIVMSG %s :%s' % (sender, 'Canal eliminado de la lista de canales temporales y de canales de evento. No olvides hacerlo efectivo con ".update".'))
else:
if(is_pm):
self.bot.queue_append('PRIVMSG %s :%s' % (sender, 'No tienes permiso para utilizar ese comando.'))
else:
self.bot.queue_append('PRIVMSG %s :%s' % (params[0], '%s: No tienes permiso para utilizar ese comando.' % sender))
# Manejador del STAY, añade el canal especificado a la lista temporal de canales, parametro opcional la contraseña del canal
def HANDLE_STAY(self, sender, params, is_pm):
if(self.bot.has_permission(sender, '&')):
parts = params[1].strip('\r\n').split(' ', 1)
if len(parts) > 1:
self.bot.add_channel(parts[0], parts[1])
else:
self.bot.add_channel(parts[0], '')
if(is_pm):
self.bot.queue_append('PRIVMSG %s :%s' % (sender, 'Canal agregado para autoinicio. No olvides hacerlo efectivo con ".update".'))
else:
self.bot.queue_append('PRIVMSG %s :%s' % (params[0], '%s: Canal agregado para autoinicio. No olvides hacerlo efectivo con ".update".' % sender))
else:
if(is_pm):
self.bot.queue_append('PRIVMSG %s :%s' % (sender, 'No tienes permiso para utilizar ese comando.'))
else:
self.bot.queue_append('PRIVMSG %s :%s' % (params[0], '%s: No tienes permiso para utilizar ese comando.' % sender))
# Manejador del DEL, elimina el nick ESPECIFICADO de la lista temporal del permiso ESPECIFICADO
def HANDLE_DEL(self, sender, params, is_pm):
if(self.bot.has_permission(sender, '&')):
parts = params[1].strip('\r\n').split(' ', 1)
if len(parts) > 1:
if((parts[1] != '~' and parts[1] != '&') or self.bot.has_permission(sender, '~')):
self.bot.del_permission(parts[0], parts[1])
if(is_pm):
self.bot.queue_append('PRIVMSG %s :%s' % (sender, 'Permiso eliminado correctamente. No olvides hacerlo efectivo con ".update".'))
else:
self.bot.queue_append('PRIVMSG %s :%s' % (params[0], '%s: Permiso eliminado correctamente. No olvides hacerlo efectivo con ".update".' % sender))
else:
if(is_pm):
self.bot.queue_append('PRIVMSG %s :%s' % (sender, 'No puedes eliminar permisos iguales o superiores al tuyo.'))
else:
self.bot.queue_append('PRIVMSG %s :%s' % (params[0], '%s: No puedes eliminar permisos iguales o superiores al tuyo.' % sender))
else:
if(is_pm):
self.bot.queue_append('PRIVMSG %s :%s' % (sender, 'El formato del comando es erroneo.'))
else:
self.bot.queue_append('PRIVMSG %s :%s' % (params[0], '%s: El formato del comando es erroneo.' % sender))
else:
if(is_pm):
self.bot.queue_append('PRIVMSG %s :%s' % (sender, 'No tienes permiso para utilizar ese comando.'))
else:
self.bot.queue_append('PRIVMSG %s :%s' % (params[0], '%s: No tienes permiso para utilizar ese comando.' % sender))
# Manejador del ADD, agrega el nick ESPECIFICADO en la lista temporal del permiso ESPECIFICADO
def HANDLE_ADD(self, sender, params, is_pm):
if(self.bot.has_permission(sender, '&')):
parts = params[1].strip('\r\n').split(' ', 1)
if len(parts) > 1:
if((parts[1] != '~' and parts[1] != '&') or self.bot.has_permission(sender, '~')):
self.bot.add_permission(parts[0], parts[1])
if(is_pm):
self.bot.queue_append('PRIVMSG %s :%s' % (sender, 'Permiso asignado correctamente. No olvides hacerlo efectivo con ".update".'))
else:
self.bot.queue_append('PRIVMSG %s :%s' % (params[0], '%s: Permiso asignado correctamente. No olvides hacerlo efectivo con ".update".' % sender))
else:
if(is_pm):
self.bot.queue_append('PRIVMSG %s :%s' % (sender, 'Solo puedes asignar permisos inferiores al tuyo.'))
else:
self.bot.queue_append('PRIVMSG %s :%s' % (params[0], '%s: Solo puedes asignar permisos inferiores al tuyo.' % sender))
else:
if(is_pm):
self.bot.queue_append('PRIVMSG %s :%s' % (sender, 'El formato del comando es erroneo.'))
else:
self.bot.queue_append('PRIVMSG %s :%s' % (params[0], '%s: El formato del comando es erroneo.' % sender))
else:
if(is_pm):
self.bot.queue_append('PRIVMSG %s :%s' % (sender, 'No tienes permiso para utilizar ese comando.'))
else:
self.bot.queue_append('PRIVMSG %s :%s' % (params[0], '%s: No tienes permiso para utilizar ese comando.' % sender))
# Manejador del UPDATE, actualiza hojas de datos de permisos y canales con los datos actuales del bot
def HANDLE_UPDATE(self, sender, params, is_pm):
if(self.bot.has_permission(sender, '&')):
self.bot.update_permission()
self.bot.update_channels()
self.bot.update_channels_events()
if(is_pm):
self.bot.queue_append('PRIVMSG %s :%s' % (sender, 'Base de datos de permisos y canales actualizadas.'))
else:
self.bot.queue_append('PRIVMSG %s :%s' % (params[0], '%s: Base de datos de permisos y canales actualizadas.' % sender))
else:
if(is_pm):
self.bot.queue_append('PRIVMSG %s :%s' % (sender, 'No tienes permiso para utilizar ese comando.'))
else:
self.bot.queue_append('PRIVMSG %s :%s' % (params[0], '%s: No tienes permiso para utilizar ese comando.' % sender))
# Manejador del SAY, hace hablar al bot en el canal especificado diciendo el mensaje del parametro
def HANDLE_SAY(self, sender, params, is_pm):
if(self.bot.has_permission(sender, '~')):
parts = params[1].split(' ', 1)
if(parts[0].lower() != self.bot.nick.lower()):
if(parts[1][0] != '.'):
self.bot.queue_append('PRIVMSG %s :%s' % (parts[0], parts[1]))
else:
if(is_pm):
self.bot.queue_append('PRIVMSG %s :%s' % (sender, 'No tienes permiso para utilizar ese comando.'))
else:
self.bot.queue_append('PRIVMSG %s :%s' % (params[0], '%s: No tienes permiso para utilizar ese comando.' % sender))
# Manejador del ACT, hace actuar al bot en el canal especificado diciendo el mensaje del parametro
def HANDLE_ACT(self, sender, params, is_pm):
if(self.bot.has_permission(sender, '~')):
parts = params[1].split(' ', 1)
if(parts[0].lower() != self.bot.nick.lower()):
if(parts[1][0] != '.'):
self.bot.queue_append('PRIVMSG %s :\01ACTION %s\01' % (parts[0], parts[1].strip('\r\n')))
else:
if(is_pm):
self.bot.queue_append('PRIVMSG %s :%s' % (sender, 'No tienes permiso para utilizar ese comando.'))
else:
self.bot.queue_append('PRIVMSG %s :%s' % (params[0], '%s: No tienes permiso para utilizar ese comando.' % sender))
# Manejador del REBOOT, reinicia el bot, por consecuencia se actualizan listas de canales y permisos desde las hojas de calculo
def HANDLE_REBOOT(self, sender, params, is_pm):
if(self.bot.has_permission(sender, '~')):
self.bot.running = False
else:
if(is_pm):
self.bot.queue_append('PRIVMSG %s :%s' % (sender, 'No tienes permiso para utilizar ese comando.'))
else:
self.bot.queue_append('PRIVMSG %s :%s' % (params[0], '%s: No tienes permiso para utilizar ese comando.' % sender))
# Manejador del SHUTDOWN, desconecta el bot de forma remota evitando su reinicio. Finaliza todos los threads
def HANDLE_SHUTDOWN(self, sender, params, is_pm):
if(self.bot.has_permission(sender, '~')):
self.bot.dont_reconnect = True
self.bot.queue_append('QUIT :%s' % 'Debo irme, mi planeta me necesita.')
else:
if(is_pm):
self.bot.queue_append('PRIVMSG %s :%s' % (sender, 'No tienes permiso para utilizar ese comando.'))
else:
self.bot.queue_append('PRIVMSG %s :%s' % (params[0], '%s: No tienes permiso para utilizar ese comando.' % sender))
# Manejador del MATAR, "mata" al nick indicado
def HANDLE_MATAR(self, sender, params, is_pm):
if(is_pm == False):
target = params[1][:params[1].find(' ')]
if('xxciro' in target.lower()):
if('xxciro' in sender.lower()):
self.bot.queue_append(*['PRIVMSG %s :No podria vivir sin usted, no me oblige, nooooooooooo!' % params[0],
'PRIVMSG %s :\01ACTION mata a %s y se suicida.\01' % (params[0], target)])
else:
self.bot.queue_append(*['PRIVMSG %s :¿Osas intentar matar a mi creador?' % params[0],
'PRIVMSG %s :\01ACTION captura a %s y lo encierra en una jaula. El creador decidira que hacer contigo.\01' % (params[0], sender)])
elif(self.bot.nick.lower() in target.lower()):
if('xxciro' in sender.lower()):
self.bot.queue_append(*['PRIVMSG %s :Si mi amo!' % params[0],
'PRIVMSG %s :\01ACTION se suicida.\01' % params[0]])
else:
self.bot.queue_append(*['PRIVMSG %s :¿Me tomas por idiota?' % params[0],
'PRIVMSG %s :\01ACTION mata a %s y juega con sus tripas :3\01' % (params[0], sender)])
else:
self.bot.queue_append('PRIVMSG %s :\01ACTION mata a %s y juega con sus tripas :3\01' % (params[0], target))
# Manejador del ABRAZAR, "abraza" al nick indicado mediante un ACT
def HANDLE_ABRAZAR(self, sender, params, is_pm):
if(is_pm == False):
target = params[1][:params[1].find(' ')]
if('xxciro' in target.lower()):
self.bot.queue_append('PRIVMSG %s :\01ACTION abraza a su creador :3\01' % params[0])
elif(self.bot.nick.lower() in target.lower()):
self.bot.queue_append('PRIVMSG %s :\01ACTION se autoabraza.\01' % params[0])
else:
self.bot.queue_append('PRIVMSG %s :\01ACTION abraza a %s :3\01' % (params[0], target))
# Manejador del EJECT, kickbanea al nick indicado durante x segundos
def HANDLE_EJECT(self, sender, params, is_pm):
if(is_pm == False):
if(self.bot.has_level(params[0].lower(), self.bot.nick, '%')):
if(self.bot.has_level(params[0].lower(), sender, '%')):
target = params[1][:params[1].find(' ')]
ban_time = random.randrange(10)
if('xxciro' in target.lower()):
if('xxciro' in sender.lower()):
self.bot.queue_append('PRIVMSG %s :No podria vivir sin usted, no me oblige, nooooooooooo!' % params[0])
self.bot.queue_append('PRIVMSG %s :El creador ha sido ejecutado durante %d segundos' % (params[0], ban_time))
self.bot.queue_append('MODE %s +b %s' % (params[0], target))
self.bot.queue_append('KICK %s %s :A VOOOLLLLAAARRR!!!!!!! Regresa dentro de %d segundos!!!' % (params[0], target, ban_time))
time.sleep(ban_time)
self.bot.queue_append('MODE %s -b %s' % (params[0], target))
else:
self.bot.queue_append('PRIVMSG %s :¿Osas intentar matar a mi creador?' % params[0])
self.bot.queue_append('PRIVMSG %s :%s ha sido ejecutado por %s durante %d segundos' % (params[0], sender, self.bot.nick, ban_time))
self.bot.queue_append('MODE %s +b %s' % (params[0], sender))
self.bot.queue_append('KICK %s %s :A VOOOLLLLAAARRR!!!!!!! Regresa dentro de %d segundos!!!' % (params[0], sender, ban_time))
time.sleep(ban_time)
self.bot.queue_append('MODE %s -b %s' % (params[0], sender))
elif(self.bot.nick.lower() in target.lower()):
self.bot.queue_append('PRIVMSG %s :¿Me tomas por idiota?' % params[0])
self.bot.queue_append('PRIVMSG %s :%s ha sido ejecutado por %s durante %d segundos' % (params[0], sender, self.bot.nick, ban_time))
self.bot.queue_append('MODE %s +b %s' % (params[0], sender))
self.bot.queue_append('KICK %s %s :A VOOOLLLLAAARRR!!!!!!! Regresa dentro de %d segundos!!!' % (params[0], sender, ban_time))
time.sleep(ban_time)
self.bot.queue_append('MODE %s -b %s' % (params[0], sender))
else:
self.bot.queue_append('PRIVMSG %s :%s ha sido ejecutado por %s durante %d segundos' % (params[0], target, sender, ban_time))
self.bot.queue_append('MODE %s +b %s' % (params[0], target))
self.bot.queue_append('KICK %s %s :A VOOOLLLLAAARRR!!!!!!! Regresa dentro de %d segundos!!!' % (params[0], target, ban_time))
time.sleep(ban_time)
self.bot.queue_append('MODE %s -b %s' % (params[0], target))
else:
self.bot.queue_append('PRIVMSG %s :%s: No tienes accesos suficientes como para ejecutar este comando.' % (params[0], sender))
else:
self.bot.queue_append('PRIVMSG %s :%s: No tengo accesos suficientes como para ejecutar este comando, necesito HOP.' % (params[0], sender))
# Manejador del FUSILAR, kickbanea al nick indicado durante x segundos
def HANDLE_FUSILAR(self, sender, params, is_pm):
if(is_pm == False):
if(self.bot.has_level(params[0].lower(), self.bot.nick, '%')):
if(self.bot.has_level(params[0].lower(), sender, '%')):
target = params[1][:params[1].find(' ')]
if('xxciro' in target.lower()):
if('xxciro' in sender.lower()):
self.bot.queue_append('PRIVMSG %s :No podria vivir sin usted, no me oblige, nooooooooooo!' % params[0])
self.bot.queue_append(*['PRIVMSG %s :\01ACTION le pone un cigarro a %s y lo prende\01' % (params[0], target),
'PRIVMSG %s :\01ACTION deja que %s diga sus ultimas palabras\01' % (params[0], target),
'PRIVMSG %s :\01ACTION Apunta!!!\01' % (params[0]),
'MODE %s +b %s' % (params[0], target),
'KICK %s %s :PUUUUMMMMMMM!!!!!! You\'re DEAD!' % (params[0], target),
'MODE %s -b %s' % (params[0], target)])
else:
self.bot.queue_append('PRIVMSG %s :¿Osas intentar matar a mi creador?' % params[0])
self.bot.queue_append(*['PRIVMSG %s :\01ACTION le pone un cigarro a %s y lo prende\01' % (params[0], sender),
'PRIVMSG %s :\01ACTION deja que %s diga sus ultimas palabras\01' % (params[0], sender),
'PRIVMSG %s :\01ACTION Apunta!!!\01' % (params[0]),
'MODE %s +b %s' % (params[0], sender),
'KICK %s %s :PUUUUMMMMMMM!!!!!! You\'re DEAD!' % (params[0], sender),
'MODE %s -b %s' % (params[0], sender)])
elif(self.bot.nick.lower() in target.lower()):
self.bot.queue_append('PRIVMSG %s :¿Me tomas por idiota?' % params[0])
self.bot.queue_append(*['PRIVMSG %s :\01ACTION le pone un cigarro a %s y lo prende\01' % (params[0], sender),
'PRIVMSG %s :\01ACTION deja que %s diga sus ultimas palabras\01' % (params[0], sender),
'PRIVMSG %s :\01ACTION Apunta!!!\01' % (params[0]),
'MODE %s +b %s' % (params[0], sender),
'KICK %s %s :PUUUUMMMMMMM!!!!!! You\'re DEAD!' % (params[0], sender),
'MODE %s -b %s' % (params[0], sender)])
else:
self.bot.queue_append(*['PRIVMSG %s :\01ACTION le pone un cigarro a %s y lo prende\01' % (params[0], target),
'PRIVMSG %s :\01ACTION deja que %s diga sus ultimas palabras\01' % (params[0], target),
'PRIVMSG %s :\01ACTION Apunta!!!\01' % (params[0]),
'MODE %s +b %s' % (params[0], target),
'KICK %s %s :PUUUUMMMMMMM!!!!!! You\'re DEAD!' % (params[0], target),
'MODE %s -b %s' % (params[0], target)])
else:
self.bot.queue_append('PRIVMSG %s :%s: No tienes accesos suficientes como para ejecutar este comando.' % (params[0], sender))
else:
self.bot.queue_append('PRIVMSG %s :%s: No tengo accesos suficientes como para ejecutar este comando, necesito HOP.' % (params[0], sender))
# Manejador del EVENTO, añade el canal especificado a la lista temporal de canales, parametro opcional la contraseña del canal
def HANDLE_EVENTO(self, sender, params, is_pm):
parts = params[1].strip(' \r\n').split(' ', 1)
if(self.bot.has_level(params[0].lower(), sender, '&') or self.bot.has_permission(sender, '~')):
if len(parts) == 2 and (parts[1].lower() == 'enable' or parts[1].lower() == 'disable'):
if parts[1].lower() == 'enable':
self.bot.add_channel_event(parts[0])
if(is_pm):
self.bot.queue_append('PRIVMSG %s :%s' % (sender, 'Canal agregado temporalmente para evento. No olvides hacerlo efectivo con ".update".'))
else:
self.bot.queue_append('PRIVMSG %s :%s' % (params[0], '%s: Canal agregado temporalmente para evento. No olvides hacerlo efectivo con ".update".' % sender))
else:
self.bot.del_channel_event(parts[0])
if(is_pm):
self.bot.queue_append('PRIVMSG %s :%s' % (sender, 'Canal eliminado temporalmente para evento. No olvides hacerlo efectivo con ".update".'))
else:
self.bot.queue_append('PRIVMSG %s :%s' % (params[0], '%s: Canal eliminado temporalmente para evento. No olvides hacerlo efectivo con ".update".' % sender))
else:
if(is_pm):
self.bot.queue_append('PRIVMSG %s :%s' % (sender, 'Formato del comando invalido.'))
else:
self.bot.queue_append('PRIVMSG %s :%s' % (params[0], '%s: Formato del comando invalido.' % sender))
else:
if(is_pm):
self.bot.queue_append('PRIVMSG %s :%s' % (sender, 'No tienes permiso para utilizar ese comando.'))
else:
self.bot.queue_append('PRIVMSG %s :%s' % (params[0], '%s: No tienes permiso para utilizar ese comando.' % sender))
# Manejador del ADMIN, permite visualizar valores de variables
def HANDLE_ADMIN(self, sender, params, is_pm):
if self.bot.has_permission(sender, '~'):
parts = params[1].strip(' \r\n').split(' ', 1)
str_exit = 'Formato de comando invalido'
if len(parts) >= 1:
if parts[0].lower() == 'evento':
str_exit = ', '.join(self.bot.channels_events)
elif parts[0].lower() == 'channels':
channels_list = []
for channel in self.bot.data:
channels_list.append(channel)
str_exit = ', '.join(channels_list)
elif parts[0].lower() == 'data':
str_exit = str(self.bot.data)
else:
str_exit = 'Valor de busqueda invalido'
if(is_pm):
self.bot.queue_append('PRIVMSG %s :%s' % (sender, str_exit))
else:
self.bot.queue_append('PRIVMSG %s :%s' % (params[0], '%s: %s' % (sender, str_exit)))
else:
if(is_pm):
self.bot.queue_append('PRIVMSG %s :%s' % (sender, 'No tienes permiso para utilizar ese comando.'))
else:
self.bot.queue_append('PRIVMSG %s :%s' % (params[0], '%s: No tienes permiso para utilizar ese comando.' % sender)) |
rowhit/h2o-2 | refs/heads/master | py/testdir_multi_jvm/test_rf_float_rand_fvec.py | 9 | import unittest, time, sys, random
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_browse as h2b, h2o_import as h2i, h2o_rf
# fyi max FP single precision (hex rep. 7f7f ffff) approx. 3.4028234 * 10**38"
def write_syn_dataset(csvPathname, rowCount, colCount, headerData):
dsf = open(csvPathname, "w+")
# output is just 0 or 1 randomly
dsf.write(headerData + "\n")
# add random output. just 0 or 1
for i in range(rowCount):
rowData = rand_rowData(colCount)
dsf.write(rowData + "," + str(random.randint(0,1)) + "\n")
dsf.close()
# append!
def append_syn_dataset(csvPathname, colCount, num):
with open(csvPathname, "a") as dsf:
for i in range(num):
rowData = rand_rowData(colCount)
dsf.write(rowData + "\n")
def rand_rowData(colCount):
# UPDATE: maybe because of byte buffer boundary issues, single byte
# data is best? if we put all 0s or 1, then I guess it will be bits?
# see https://0xdata.atlassian.net/browse/HEX-1315 for problem if -1e59,1e59 range is used
# keep values in range +/- ~10e-44.85 to ~10e38.53 to fit in SP exponent range
rowData = str(random.uniform(0,colCount))
for i in range(colCount):
# h2o used to only support single (HEX-638)
# rowData = rowData + "," + str(random.uniform(-1e59,1e59))
rowData = rowData + "," + str(random.uniform(-1e37,1e37))
return rowData
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init(2,java_heap_MB=1300,use_flatfile=True)
@classmethod
def tearDownClass(cls):
if not h2o.browse_disable:
# time.sleep(500000)
pass
h2o.tear_down_cloud()
def test_rf_float_rand_fvec(self):
SYNDATASETS_DIR = h2o.make_syn_dir()
csvFilename = "syn_prostate.csv"
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
headerData = "ID,CAPSULE,AGE,RACE,DPROS,DCAPS,PSA,VOL,GLEASON"
totalRows = 1000
colCount = 7
write_syn_dataset(csvPathname, totalRows, colCount, headerData)
for trial in range (5):
# grow the data set
rowData = rand_rowData(colCount)
num = random.randint(4096, 10096)
append_syn_dataset(csvPathname, colCount, num)
totalRows += num
# make sure all key names are unique, when we re-put and re-parse (h2o caching issues)
hex_key = csvFilename + "_" + str(trial) + ".hex"
# On EC2 once we get to 30 trials or so, do we see polling hang? GC or spill of heap or ??
ntree = 2
kwargs = {
'ntrees': ntree,
'mtries': None,
'max_depth': 20,
'sample_rate': 0.67,
'destination_key': None,
'nbins': 1024,
'seed': 784834182943470027,
}
parseResult = h2i.import_parse(path=csvPathname, schema='put', hex_key=hex_key, doSummary=True)
start = time.time()
rfView = h2o_cmd.runRF(parseResult=parseResult, timeoutSecs=15, pollTimeoutSecs=5, **kwargs)
print "trial #", trial, "totalRows:", totalRows, "num:", num, "RF end on ", csvFilename, \
'took', time.time() - start, 'seconds'
(classification_error, classErrorPctList, totalScores) = h2o_rf.simpleCheckRFView(rfv=rfView, ntree=ntree)
inspect = h2o_cmd.runInspect(key=hex_key)
cols = inspect['cols']
numCols = inspect['numCols']
for i,c in enumerate(cols):
if i < (numCols-1): # everything except the last col (output) should be 8 byte float
colType = c['type']
self.assertEqual(colType, 'Real', msg="col %d should be type Real: %s" % (i, colType))
### h2o_cmd.runInspect(key=hex_key)
### h2b.browseJsonHistoryAsUrlLastMatch("Inspect")
h2o.check_sandbox_for_errors()
if __name__ == '__main__':
h2o.unit_main()
|
rfguri/vimfiles | refs/heads/master | bundle/ycm/third_party/ycmd/third_party/JediHTTP/vendor/jedi/jedi/evaluate/compiled/mixed.py | 1 | """
Used only for REPL Completion.
"""
import inspect
from jedi import common
from jedi.parser.fast import FastParser
from jedi.evaluate import compiled
from jedi.cache import underscore_memoization
class MixedObject(object):
"""
A ``MixedObject`` is used in two ways:
1. It uses the default logic of ``parser.tree`` objects,
2. except for getattr calls. The names dicts are generated in a fashion
like ``CompiledObject``.
This combined logic makes it possible to provide more powerful REPL
completion. It allows side effects that are not noticable with the default
parser structure to still be completeable.
The biggest difference from CompiledObject to MixedObject is that we are
generally dealing with Python code and not with C code. This will generate
fewer special cases, because we in Python you don't have the same freedoms
to modify the runtime.
"""
def __init__(self, evaluator, obj, node_name):
self._evaluator = evaluator
self.obj = obj
self.node_name = node_name
self.definition = node_name.get_definition()
@property
def names_dict(self):
return LazyMixedNamesDict(self._evaluator, self)
def names_dicts(self, search_global):
# TODO is this needed?
assert search_global is False
return [self.names_dict]
def api_type(self):
mappings = {
'expr_stmt': 'statement',
'classdef': 'class',
'funcdef': 'function',
'file_input': 'module',
}
return mappings[self.definition.type]
def __repr__(self):
return '<%s: %s>' % (type(self).__name__, repr(self.obj))
def __getattr__(self, name):
return getattr(self.definition, name)
class MixedName(compiled.CompiledName):
"""
The ``CompiledName._compiled_object`` is our MixedObject.
"""
@property
@underscore_memoization
def parent(self):
return create(self._evaluator, getattr(self._compiled_obj.obj, self.name))
@parent.setter
def parent(self, value):
pass # Just ignore this, Name tries to overwrite the parent attribute.
@property
def start_pos(self):
if isinstance(self.parent, MixedObject):
return self.parent.node_name.start_pos
# This means a start_pos that doesn't exist (compiled objects).
return (0, 0)
class LazyMixedNamesDict(compiled.LazyNamesDict):
name_class = MixedName
def parse(grammar, path):
with open(path) as f:
source = f.read()
source = common.source_to_unicode(source)
return FastParser(grammar, source, path)
def _load_module(evaluator, path, python_object):
module = parse(evaluator.grammar, path).module
python_module = inspect.getmodule(python_object)
evaluator.modules[python_module.__name__] = module
return module
def find_syntax_node_name(evaluator, python_object):
try:
path = inspect.getsourcefile(python_object)
except TypeError:
# The type might not be known (e.g. class_with_dict.__weakref__)
return None
if path is None:
return None
module = _load_module(evaluator, path, python_object)
if inspect.ismodule(python_object):
# We don't need to check names for modules, because there's not really
# a way to write a module in a module in Python (and also __name__ can
# be something like ``email.utils``).
return module
try:
names = module.used_names[python_object.__name__]
except NameError:
return None
names = [n for n in names if n.is_definition()]
try:
code = python_object.__code__
# By using the line number of a code object we make the lookup in a
# file pretty easy. There's still a possibility of people defining
# stuff like ``a = 3; foo(a); a = 4`` on the same line, but if people
# do so we just don't care.
line_nr = code.co_firstlineno
except AttributeError:
pass
else:
line_names = [name for name in names if name.start_pos[0] == line_nr]
# There's a chance that the object is not available anymore, because
# the code has changed in the background.
if line_names:
return line_names[-1]
# It's really hard to actually get the right definition, here as a last
# resort we just return the last one. This chance might lead to odd
# completions at some points but will lead to mostly correct type
# inference, because people tend to define a public name in a module only
# once.
return names[-1]
@compiled.compiled_objects_cache('mixed_cache')
def create(evaluator, obj):
name = find_syntax_node_name(evaluator, obj)
if name is None:
return compiled.create(evaluator, obj)
else:
return MixedObject(evaluator, obj, name)
|
tommybobbins/ChristmasUnicornHat | refs/heads/master | christmas.py | 1 | #!/usr/bin/env python
import unicornhat as unicorn
import time, colorsys
def christmas_tree():
unicorn.brightness(0.4)
r = int(0)
g = int(255)
b = int(0)
rt = int(102)
gt = int(51)
bt = int(0)
unicorn.set_pixel(0,5,255,0,0)
unicorn.set_pixel(1,5,r,g,b)
unicorn.set_pixel(2,5,r,g,b)
unicorn.set_pixel(3,5,r,g,b)
unicorn.set_pixel(4,5,r,g,b)
unicorn.set_pixel(5,5,r,g,b)
unicorn.set_pixel(6,5,r,g,b)
unicorn.set_pixel(7,5,r,g,b)
unicorn.set_pixel(1,4,r,g,b)
unicorn.set_pixel(3,4,r,g,b)
unicorn.set_pixel(5,4,r,g,b)
unicorn.set_pixel(2,3,r,g,b)
unicorn.set_pixel(4,3,r,g,b)
unicorn.set_pixel(3,2,r,g,b)
unicorn.set_pixel(3,1,r,g,b)
#### Tree base
unicorn.set_pixel(3,7,rt,gt,bt)
unicorn.set_pixel(3,6,rt,gt,bt)
#### Star
unicorn.set_pixel(3,0,255,255,0)
unicorn.set_pixel(0,5,255,0,b)
unicorn.set_pixel(2,5,255,0,b)
unicorn.set_pixel(4,5,255,0,b)
unicorn.set_pixel(6,5,255,0,b)
unicorn.set_pixel(0,4,51,0,153)
unicorn.set_pixel(2,4,51,0,153)
unicorn.set_pixel(4,4,51,0,153)
unicorn.set_pixel(6,4,51,0,153)
unicorn.set_pixel(1,3,255,0,0)
unicorn.set_pixel(3,3,255,0,0)
unicorn.set_pixel(5,3,255,0,0)
unicorn.set_pixel(2,2,51,0,153)
unicorn.set_pixel(4,2,51,0,153)
#### Star
unicorn.show()
time.sleep(1)
unicorn.brightness(0.5)
unicorn.set_pixel(3,0,255,255,255)
unicorn.show()
time.sleep(0.1)
unicorn.set_pixel(3,0,255,255,0)
unicorn.brightness(0.4)
unicorn.show()
time.sleep(2)
unicorn.set_pixel(0,5,51,0,153)
unicorn.set_pixel(2,5,51,0,153)
unicorn.set_pixel(4,5,51,0,153)
unicorn.set_pixel(6,5,51,0,153)
unicorn.set_pixel(0,4,255,0,0)
unicorn.set_pixel(2,4,255,0,0)
unicorn.set_pixel(4,4,255,0,0)
unicorn.set_pixel(6,4,255,0,0)
unicorn.set_pixel(1,3,51,0,153)
unicorn.set_pixel(3,3,51,0,153)
unicorn.set_pixel(5,3,51,0,153)
unicorn.set_pixel(2,2,255,0,0)
unicorn.set_pixel(4,2,255,0,0)
unicorn.show()
time.sleep(2)
unicorn.clear()
def hattie1():
unicorn.set_pixel(1,2,255,255,0)
unicorn.set_pixel(1,3,255,0,0)
unicorn.set_pixel(1,4,255,0,0)
unicorn.set_pixel(1,5,0,0,255)
unicorn.set_pixel(1,5,255,0,0)
unicorn.set_pixel(1,6,0,255,0)
unicorn.set_pixel(1,6,255,0,0)
unicorn.set_pixel(1,7,255,255,0)
unicorn.set_pixel(2,0,0,0,255)
unicorn.set_pixel(2,2,0,255,0)
unicorn.set_pixel(2,2,255,0,0)
unicorn.set_pixel(2,3,0,255,0)
unicorn.set_pixel(2,4,255,0,0)
unicorn.set_pixel(2,5,255,0,0)
unicorn.set_pixel(2,6,0,255,0)
unicorn.set_pixel(2,7,255,0,0)
unicorn.set_pixel(3,1,0,0,255)
unicorn.set_pixel(3,2,255,0,0)
unicorn.set_pixel(3,3,255,0,0)
unicorn.set_pixel(3,4,255,255,0)
unicorn.set_pixel(3,5,255,255,0)
unicorn.set_pixel(3,6,255,0,0)
unicorn.set_pixel(3,7,255,0,0)
unicorn.set_pixel(4,1,0,0,255)
unicorn.set_pixel(4,2,255,0,0)
unicorn.set_pixel(4,3,0,0,255)
unicorn.set_pixel(4,3,255,0,0)
unicorn.set_pixel(4,4,255,0,0)
unicorn.set_pixel(4,4,255,255,0)
unicorn.set_pixel(4,5,255,255,0)
unicorn.set_pixel(4,6,255,0,0)
unicorn.set_pixel(4,7,255,0,0)
unicorn.set_pixel(5,0,0,0,255)
unicorn.set_pixel(5,2,255,0,0)
unicorn.set_pixel(5,3,0,255,0)
unicorn.set_pixel(5,4,255,0,0)
unicorn.set_pixel(5,5,255,0,0)
unicorn.set_pixel(5,6,0,255,0)
unicorn.set_pixel(5,7,255,0,0)
unicorn.set_pixel(6,2,255,255,0)
unicorn.set_pixel(6,3,255,0,0)
unicorn.set_pixel(6,4,255,0,0)
unicorn.set_pixel(6,5,255,0,0)
unicorn.set_pixel(6,6,255,0,0)
unicorn.set_pixel(6,7,255,255,0)
unicorn.show()
time.sleep(2)
unicorn.clear()
def hattie2():
unicorn.set_pixel(1,5,255,0,0)
unicorn.set_pixel(1,6,255,0,0)
unicorn.set_pixel(1,7,255,0,0)
unicorn.set_pixel(2,5,255,0,0)
unicorn.set_pixel(2,6,255,0,0)
unicorn.set_pixel(2,7,255,0,0)
unicorn.set_pixel(3,0,255,255,255)
unicorn.set_pixel(3,1,255,255,255)
unicorn.set_pixel(3,2,255,255,255)
unicorn.set_pixel(3,5,255,0,0)
unicorn.set_pixel(3,6,255,0,0)
unicorn.set_pixel(3,7,255,0,0)
unicorn.set_pixel(4,0,255,255,255)
unicorn.set_pixel(4,1,255,255,255)
unicorn.set_pixel(4,2,255,255,255)
unicorn.set_pixel(4,3,255,0,0)
unicorn.set_pixel(4,4,255,0,0)
unicorn.set_pixel(4,5,255,0,0)
unicorn.set_pixel(4,6,255,0,0)
unicorn.set_pixel(4,7,255,0,0)
unicorn.set_pixel(5,0,255,255,255)
unicorn.set_pixel(5,1,255,255,255)
unicorn.set_pixel(5,2,255,255,255)
unicorn.set_pixel(5,3,255,0,0)
unicorn.set_pixel(5,4,255,0,0)
unicorn.set_pixel(5,5,255,0,0)
unicorn.set_pixel(5,6,255,0,0)
unicorn.set_pixel(5,7,255,0,0)
unicorn.set_pixel(6,0,255,255,255)
unicorn.set_pixel(6,1,255,255,255)
unicorn.set_pixel(6,2,255,255,255)
unicorn.set_pixel(6,3,255,0,0)
unicorn.set_pixel(6,4,255,0,0)
unicorn.set_pixel(6,5,255,0,0)
unicorn.set_pixel(6,6,255,0,0)
unicorn.set_pixel(6,7,255,0,0)
unicorn.set_pixel(7,0,255,255,255)
unicorn.set_pixel(7,1,255,255,255)
unicorn.set_pixel(7,2,255,255,255)
unicorn.show()
time.sleep(2)
unicorn.clear()
def hattie3():
unicorn.set_pixel(0,3,255,0,0)
unicorn.set_pixel(0,4,255,0,0)
unicorn.set_pixel(0,5,255,0,0)
unicorn.set_pixel(1,2,255,0,0)
unicorn.set_pixel(1,3,255,255,255)
unicorn.set_pixel(1,4,255,0,0)
unicorn.set_pixel(1,5,0,0,255)
unicorn.set_pixel(1,6,255,0,0)
unicorn.set_pixel(2,2,255,0,0)
unicorn.set_pixel(2,3,255,0,0)
unicorn.set_pixel(2,4,255,0,0)
unicorn.set_pixel(2,5,255,0,0)
unicorn.set_pixel(2,6,0,0,255)
unicorn.set_pixel(2,7,255,0,0)
unicorn.set_pixel(3,0,255,255,255)
unicorn.set_pixel(3,1,255,255,255)
unicorn.set_pixel(3,2,255,255,255)
unicorn.set_pixel(3,3,255,255,0)
unicorn.set_pixel(3,4,255,0,0)
unicorn.set_pixel(3,5,255,0,0)
unicorn.set_pixel(3,6,255,0,0)
unicorn.set_pixel(3,7,255,0,0)
unicorn.set_pixel(4,0,255,255,255)
unicorn.set_pixel(4,1,255,255,255)
unicorn.set_pixel(4,2,255,255,255)
unicorn.set_pixel(4,3,255,255,0)
unicorn.set_pixel(4,4,255,0,0)
unicorn.set_pixel(4,5,255,0,0)
unicorn.set_pixel(4,6,255,0,0)
unicorn.set_pixel(4,7,255,0,0)
unicorn.set_pixel(5,2,255,0,0)
unicorn.set_pixel(5,3,255,0,0)
unicorn.set_pixel(5,4,255,0,0)
unicorn.set_pixel(5,5,255,0,0)
unicorn.set_pixel(5,6,0,0,255)
unicorn.set_pixel(5,7,255,0,0)
unicorn.set_pixel(6,2,255,0,0)
unicorn.set_pixel(6,3,255,255,255)
unicorn.set_pixel(6,4,255,0,0)
unicorn.set_pixel(6,5,0,0,255)
unicorn.set_pixel(6,6,255,0,0)
unicorn.set_pixel(7,3,255,0,0)
unicorn.set_pixel(7,4,255,0,0)
unicorn.set_pixel(7,5,255,0,0)
unicorn.show()
time.sleep(2)
unicorn.clear()
def james1():
unicorn.set_pixel(0,0,0,152,152)
unicorn.set_pixel(1,0,0,152,152)
unicorn.set_pixel(2,0,0,152,152)
unicorn.set_pixel(3,0,0,152,152)
unicorn.set_pixel(4,0,0,152,152)
unicorn.set_pixel(5,0,0,152,152)
unicorn.set_pixel(6,0,0,152,152)
unicorn.set_pixel(7,0,0,152,152)
unicorn.set_pixel(0,1,0,152,152)
unicorn.set_pixel(1,1,0,152,152)
unicorn.set_pixel(2,1,0,152,152)
unicorn.set_pixel(3,1,0,152,152)
unicorn.set_pixel(4,1,0,152,152)
unicorn.set_pixel(5,1,0,152,152)
unicorn.set_pixel(6,1,0,152,152)
unicorn.set_pixel(7,1,0,152,152)
unicorn.set_pixel(0,2,0,152,152)
unicorn.set_pixel(1,2,0,152,152)
unicorn.set_pixel(2,2,0,152,152)
unicorn.set_pixel(3,2,0,152,152)
unicorn.set_pixel(4,2,0,152,152)
unicorn.set_pixel(5,2,0,152,152)
unicorn.set_pixel(6,2,0,152,152)
unicorn.set_pixel(7,2,0,152,152)
unicorn.set_pixel(0,3,0,152,152)
unicorn.set_pixel(1,3,0,152,152)
unicorn.set_pixel(2,3,0,152,152)
unicorn.set_pixel(3,3,0,152,152)
unicorn.set_pixel(4,3,0,152,152)
unicorn.set_pixel(5,3,0,152,152)
unicorn.set_pixel(6,3,0,152,152)
unicorn.set_pixel(7,3,0,152,152)
unicorn.set_pixel(0,4,0,152,152)
unicorn.set_pixel(1,4,0,152,152)
unicorn.set_pixel(2,4,0,152,152)
unicorn.set_pixel(3,4,0,152,152)
unicorn.set_pixel(4,4,0,152,152)
unicorn.set_pixel(5,4,0,152,152)
unicorn.set_pixel(6,4,0,152,152)
unicorn.set_pixel(7,4,0,152,152)
unicorn.set_pixel(0,5,0,152,152)
unicorn.set_pixel(1,5,0,152,152)
unicorn.set_pixel(2,5,0,152,152)
unicorn.set_pixel(3,5,0,152,152)
unicorn.set_pixel(4,5,0,152,152)
unicorn.set_pixel(5,5,0,152,152)
unicorn.set_pixel(6,5,0,152,152)
unicorn.set_pixel(7,5,0,152,152)
unicorn.set_pixel(0,6,0,152,152)
unicorn.set_pixel(1,6,0,152,152)
unicorn.set_pixel(2,6,0,152,152)
unicorn.set_pixel(3,6,0,152,152)
unicorn.set_pixel(4,6,0,152,152)
unicorn.set_pixel(5,6,0,152,152)
unicorn.set_pixel(6,6,0,152,152)
unicorn.set_pixel(7,6,0,152,152)
unicorn.set_pixel(0,7,0,152,152)
unicorn.set_pixel(1,7,0,152,152)
unicorn.set_pixel(2,7,0,152,152)
unicorn.set_pixel(3,7,0,152,152)
unicorn.set_pixel(4,7,0,152,152)
unicorn.set_pixel(5,7,0,152,152)
unicorn.set_pixel(6,7,0,152,152)
unicorn.set_pixel(7,7,0,152,152)
unicorn.set_pixel(2,7,255,255,255)
unicorn.set_pixel(3,7,255,255,255)
unicorn.set_pixel(4,7,255,255,255)
unicorn.set_pixel(5,6,255,255,255)
unicorn.set_pixel(6,5,255,255,255)
unicorn.set_pixel(6,4,255,255,255)
unicorn.set_pixel(6,3,255,255,255)
unicorn.set_pixel(5,2,255,255,255)
unicorn.set_pixel(5,3,255,255,255)
unicorn.set_pixel(4,3,255,255,255)
unicorn.set_pixel(3,3,255,255,255)
unicorn.set_pixel(2,4,0,132,132)
unicorn.set_pixel(2,3,255,255,255)
unicorn.set_pixel(1,4,255,255,255)
unicorn.set_pixel(1,5,255,255,255)
unicorn.set_pixel(2,6,255,255,255)
unicorn.set_pixel(1,7,0,0,0)
unicorn.set_pixel(1,7,0,152,152)
unicorn.set_pixel(4,2,0,152,152)
unicorn.set_pixel(4,1,0,152,152)
unicorn.set_pixel(5,2,0,152,152)
unicorn.set_pixel(6,3,0,152,152)
unicorn.set_pixel(2,4,0,152,152)
unicorn.set_pixel(2,6,0,0,0)
unicorn.set_pixel(1,5,0,0,0)
unicorn.set_pixel(1,4,0,0,0)
unicorn.set_pixel(1,6,255,255,255)
unicorn.set_pixel(0,5,255,255,255)
unicorn.set_pixel(0,4,255,255,255)
unicorn.set_pixel(1,3,255,255,255)
unicorn.set_pixel(3,4,255,255,255)
unicorn.set_pixel(4,4,255,255,255)
unicorn.set_pixel(5,4,255,255,255)
unicorn.set_pixel(2,5,255,255,255)
unicorn.set_pixel(3,5,255,255,255)
unicorn.set_pixel(4,5,255,255,255)
unicorn.set_pixel(5,5,255,255,255)
unicorn.set_pixel(3,6,255,255,255)
unicorn.set_pixel(4,6,255,255,255)
unicorn.set_pixel(2,4,255,255,255)
unicorn.set_pixel(1,4,255,255,255)
unicorn.set_pixel(1,5,255,255,255)
unicorn.set_pixel(2,6,255,255,255)
unicorn.set_pixel(3,4,0,0,132)
unicorn.set_pixel(3,4,255,255,255)
unicorn.set_pixel(3,3,0,0,132)
unicorn.set_pixel(3,5,0,0,132)
unicorn.set_pixel(3,7,0,0,132)
unicorn.set_pixel(5,5,132,0,0)
unicorn.set_pixel(6,5,132,0,0)
unicorn.set_pixel(7,4,132,0,0)
unicorn.set_pixel(7,6,132,0,0)
unicorn.set_pixel(2,2,255,255,255)
unicorn.set_pixel(3,1,255,255,255)
unicorn.set_pixel(4,2,255,255,255)
unicorn.set_pixel(4,1,255,255,255)
unicorn.set_pixel(2,1,255,255,255)
unicorn.set_pixel(3,2,255,255,255)
unicorn.set_pixel(4,0,255,255,255)
unicorn.set_pixel(3,0,255,255,255)
unicorn.set_pixel(2,0,255,255,255)
unicorn.set_pixel(5,0,255,255,255)
unicorn.set_pixel(5,1,255,255,255)
unicorn.set_pixel(5,2,255,255,255)
unicorn.set_pixel(1,2,255,255,255)
unicorn.set_pixel(1,1,255,255,255)
unicorn.set_pixel(1,0,255,255,255)
unicorn.set_pixel(6,3,255,255,255)
unicorn.set_pixel(0,3,255,255,255)
unicorn.set_pixel(2,0,132,0,0)
unicorn.set_pixel(4,0,132,0,0)
unicorn.set_pixel(1,1,132,0,0)
unicorn.set_pixel(1,1,255,255,255)
unicorn.set_pixel(2,3,255,0,0)
unicorn.set_pixel(2,4,255,0,0)
unicorn.set_pixel(1,2,255,0,0)
unicorn.set_pixel(2,2,255,0,0)
unicorn.set_pixel(3,2,255,0,0)
unicorn.set_pixel(4,2,255,0,0)
unicorn.set_pixel(5,2,255,0,0)
unicorn.set_pixel(3,1,255,132,0)
unicorn.set_pixel(5,1,255,255,255)
unicorn.set_pixel(4,1,255,255,255)
unicorn.show()
time.sleep(2)
unicorn.clear()
def mum1():
unicorn.set_pixel(3,0,140,255,255)
unicorn.set_pixel(4,0,140,255,255)
unicorn.set_pixel(3,1,140,255,255)
unicorn.set_pixel(4,1,140,255,255)
unicorn.set_pixel(4,2,140,255,255)
unicorn.set_pixel(3,2,140,255,255)
unicorn.set_pixel(3,3,140,255,255)
unicorn.set_pixel(4,3,140,255,255)
unicorn.set_pixel(3,4,140,255,255)
unicorn.set_pixel(3,5,140,255,255)
unicorn.set_pixel(3,7,140,255,255)
unicorn.set_pixel(3,6,140,255,255)
unicorn.set_pixel(4,4,140,255,255)
unicorn.set_pixel(4,7,140,255,255)
unicorn.set_pixel(4,6,140,255,255)
unicorn.set_pixel(4,5,140,255,255)
unicorn.set_pixel(5,3,140,255,255)
unicorn.set_pixel(5,4,140,255,255)
unicorn.set_pixel(6,3,140,255,255)
unicorn.set_pixel(6,4,140,255,255)
unicorn.set_pixel(7,3,140,255,255)
unicorn.set_pixel(7,4,140,255,255)
unicorn.set_pixel(2,3,140,255,255)
unicorn.set_pixel(2,4,140,255,255)
unicorn.set_pixel(1,3,140,255,255)
unicorn.set_pixel(1,4,140,255,255)
unicorn.set_pixel(0,3,140,255,255)
unicorn.set_pixel(0,4,140,255,255)
unicorn.set_pixel(6,1,255,255,255)
unicorn.set_pixel(5,2,255,255,255)
unicorn.set_pixel(1,1,255,255,255)
unicorn.set_pixel(2,2,255,255,255)
unicorn.set_pixel(2,5,255,255,255)
unicorn.set_pixel(1,6,255,255,255)
unicorn.set_pixel(5,5,255,255,255)
unicorn.set_pixel(6,6,255,255,255)
unicorn.show()
time.sleep(2)
unicorn.clear()
def mum2():
unicorn.set_pixel(4,0,0,132,0)
unicorn.set_pixel(4,1,0,132,0)
unicorn.set_pixel(3,1,0,132,0)
unicorn.set_pixel(5,1,0,132,0)
unicorn.set_pixel(5,2,0,132,0)
unicorn.set_pixel(4,2,0,132,0)
unicorn.set_pixel(3,2,0,132,0)
unicorn.set_pixel(2,2,0,132,0)
unicorn.set_pixel(6,2,0,132,0)
unicorn.set_pixel(5,3,0,132,0)
unicorn.set_pixel(4,3,0,132,0)
unicorn.set_pixel(3,3,0,132,0)
unicorn.set_pixel(2,4,0,132,0)
unicorn.set_pixel(3,4,0,132,0)
unicorn.set_pixel(5,4,0,132,0)
unicorn.set_pixel(6,4,0,132,0)
unicorn.set_pixel(3,5,0,132,0)
unicorn.set_pixel(4,4,0,132,0)
unicorn.set_pixel(4,5,0,132,0)
unicorn.set_pixel(5,5,0,132,0)
unicorn.set_pixel(4,6,132,101,0)
unicorn.set_pixel(4,7,132,101,0)
unicorn.set_pixel(3,6,255,0,0)
unicorn.set_pixel(5,6,255,0,0)
unicorn.show()
time.sleep(2)
unicorn.clear()
def mum3():
unicorn.set_pixel(4,0,255,255,0)
unicorn.set_pixel(3,1,255,255,0)
unicorn.set_pixel(2,2,255,255,0)
unicorn.set_pixel(2,3,255,255,0)
unicorn.set_pixel(3,4,255,255,0)
unicorn.set_pixel(5,4,255,255,0)
unicorn.set_pixel(4,5,255,255,0)
unicorn.set_pixel(4,6,255,255,255)
unicorn.set_pixel(4,7,255,255,255)
unicorn.set_pixel(3,6,255,255,255)
unicorn.set_pixel(3,7,255,255,255)
unicorn.set_pixel(3,5,255,255,0)
unicorn.set_pixel(2,4,255,255,0)
unicorn.set_pixel(3,0,255,255,0)
unicorn.set_pixel(5,2,255,255,0)
unicorn.set_pixel(4,1,255,255,0)
unicorn.set_pixel(5,3,255,255,0)
unicorn.set_pixel(4,4,255,255,0)
unicorn.set_pixel(4,2,255,118,0)
unicorn.set_pixel(3,2,255,118,0)
unicorn.set_pixel(3,3,255,118,0)
unicorn.set_pixel(4,3,255,118,0)
unicorn.show()
time.sleep(2)
unicorn.clear()
while True:
christmas_tree()
hattie1()
hattie2()
mum1()
hattie3()
james1()
mum2()
mum3()
|
gooofy/voxforge | refs/heads/master | zamiaai/ai_context.py | 3 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2015, 2016, 2017, 2018 Guenter Bartsch
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# ai context - keep track of context semantics in current natural language processing
#
from __future__ import print_function
import os
import sys
import logging
import traceback
import time
import datetime
import numpy as np
from tzlocal import get_localzone # $ pip install tzlocal
from nltools import misc
from nltools.tokenizer import tokenize
from zamiaai import model
MAX_NER_RESULTS = 5
class AIContext(object):
def __init__(self, user, session, lang, realm, kernal, test_mode = False):
self.dlg_log = []
self.staged_resps = []
self.high_score = 0.0
self.inp = u''
self.user = user
self.realm = realm
self.ner_dict = {} # DB cache
self.session = session
self.lang = lang
self.kernal = kernal
self.test_mode = test_mode
tz = get_localzone()
self.current_dt = tz.localize(datetime.datetime.now())
def set_inp(self, inp):
self.inp = inp
def resp(self, resp, score=0.0, action=None, action_arg=None):
if score < self.high_score:
return
if score > self.high_score:
self.high_score = score
self.staged_resps = []
self.staged_resps.append( (resp, score, action, action_arg) )
#print ("resp: score=%f staged_resps=%s" % (score, repr(self.staged_resps)))
def get_resps(self):
return self.staged_resps
def commit_resp(self, i):
self.dlg_log.append( { 'inp': self.inp,
'out': self.staged_resps[i][0] })
action = self.staged_resps[i][2]
action_arg = self.staged_resps[i][3]
if action:
if action_arg:
action(self, action_arg)
else:
action(self)
self.staged_resps = []
self.high_score = 0.0
self.kernal.prolog_persist()
def _ner_learn(self, lang, cls):
entities = []
labels = []
for nerdata in self.session.query(model.NERData).filter(model.NERData.lang==lang).filter(model.NERData.cls==cls):
entities.append(nerdata.entity)
labels.append(nerdata.label)
if not lang in self.ner_dict:
self.ner_dict[lang] = {}
if not cls in self.ner_dict[lang]:
self.ner_dict[lang][cls] = {}
nd = self.ner_dict[lang][cls]
for i, entity in enumerate(entities):
label = labels[i]
for j, token in enumerate(tokenize(label, lang=lang)):
if not token in nd:
nd[token] = {}
if not entity in nd[token]:
nd[token][entity] = set([])
nd[token][entity].add(j)
# logging.debug ('ner_learn: %4d %s %s: %s -> %s %s' % (i, entity, label, token, cls, lang))
cnt = 0
for token in nd:
# import pdb; pdb.set_trace()
# s1 = repr(nd[token])
# s2 = limit_str(s1, 10)
logging.debug ('ner_learn: nd[%-20s]=%s' % (token, misc.limit_str(repr(nd[token]), 80)))
cnt += 1
if cnt > 10:
break
def ner(self, lang, cls, tstart, tend):
if not lang in self.ner_dict:
self.ner_dict[lang] = {}
if not cls in self.ner_dict[lang]:
self._ner_learn(lang, cls)
nd = self.ner_dict[lang][cls]
tokens = tokenize(self.inp, lang=lang)
#
# start scoring
#
max_scores = {}
for tstart in range (tstart-1, tstart+2):
if tstart <0:
continue
for tend in range (tend-1, tend+2):
if tend > len(tokens):
continue
scores = {}
for tidx in range(tstart, tend):
toff = tidx-tstart
# logging.debug('tidx: %d, toff: %d [%d - %d]' % (tidx, toff, tstart, tend))
token = tokens[tidx]
if not token in nd:
# logging.debug('token %s not in nd %s %s' % (repr(token), repr(lang), repr(cls)))
continue
for entity in nd[token]:
if not entity in scores:
scores[entity] = 0.0
for eidx in nd[token][entity]:
points = 2.0-abs(eidx-toff)
if points>0:
scores[entity] += points
logging.debug('scores: %s' % repr(scores))
for entity in scores:
if not entity in max_scores:
max_scores[entity] = scores[entity]
continue
if scores[entity]>max_scores[entity]:
max_scores[entity] = scores[entity]
res = []
cnt = 0
# for entity in max_scores:
for entity, max_score in sorted(max_scores.iteritems(), key=lambda x: x[1], reverse=True):
res.append((entity, max_score))
cnt += 1
if cnt > MAX_NER_RESULTS:
break
return res
|
kevinmel2000/sl4a | refs/heads/master | python/src/Lib/encodings/bz2_codec.py | 501 | """ Python 'bz2_codec' Codec - bz2 compression encoding
Unlike most of the other codecs which target Unicode, this codec
will return Python string objects for both encode and decode.
Adapted by Raymond Hettinger from zlib_codec.py which was written
by Marc-Andre Lemburg (mal@lemburg.com).
"""
import codecs
import bz2 # this codec needs the optional bz2 module !
### Codec APIs
def bz2_encode(input,errors='strict'):
""" Encodes the object input and returns a tuple (output
object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = bz2.compress(input)
return (output, len(input))
def bz2_decode(input,errors='strict'):
""" Decodes the object input and returns a tuple (output
object, length consumed).
input must be an object which provides the bf_getreadbuf
buffer slot. Python strings, buffer objects and memory
mapped files are examples of objects providing this slot.
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = bz2.decompress(input)
return (output, len(input))
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return bz2_encode(input, errors)
def decode(self, input, errors='strict'):
return bz2_decode(input, errors)
class IncrementalEncoder(codecs.IncrementalEncoder):
def __init__(self, errors='strict'):
assert errors == 'strict'
self.errors = errors
self.compressobj = bz2.BZ2Compressor()
def encode(self, input, final=False):
if final:
c = self.compressobj.compress(input)
return c + self.compressobj.flush()
else:
return self.compressobj.compress(input)
def reset(self):
self.compressobj = bz2.BZ2Compressor()
class IncrementalDecoder(codecs.IncrementalDecoder):
def __init__(self, errors='strict'):
assert errors == 'strict'
self.errors = errors
self.decompressobj = bz2.BZ2Decompressor()
def decode(self, input, final=False):
try:
return self.decompressobj.decompress(input)
except EOFError:
return ''
def reset(self):
self.decompressobj = bz2.BZ2Decompressor()
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name="bz2",
encode=bz2_encode,
decode=bz2_decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
|
jmesteve/saas3 | refs/heads/master | openerp/addons/base/tests/test_res_lang.py | 384 | import unittest2
import openerp.tests.common as common
class test_res_lang(common.TransactionCase):
def test_00_intersperse(self):
from openerp.addons.base.res.res_lang import intersperse
assert intersperse("", []) == ("", 0)
assert intersperse("0", []) == ("0", 0)
assert intersperse("012", []) == ("012", 0)
assert intersperse("1", []) == ("1", 0)
assert intersperse("12", []) == ("12", 0)
assert intersperse("123", []) == ("123", 0)
assert intersperse("1234", []) == ("1234", 0)
assert intersperse("123456789", []) == ("123456789", 0)
assert intersperse("&ab%#@1", []) == ("&ab%#@1", 0)
assert intersperse("0", []) == ("0", 0)
assert intersperse("0", [1]) == ("0", 0)
assert intersperse("0", [2]) == ("0", 0)
assert intersperse("0", [200]) == ("0", 0)
assert intersperse("12345678", [1], '.') == ('1234567.8', 1)
assert intersperse("12345678", [1], '.') == ('1234567.8', 1)
assert intersperse("12345678", [2], '.') == ('123456.78', 1)
assert intersperse("12345678", [2,1], '.') == ('12345.6.78', 2)
assert intersperse("12345678", [2,0], '.') == ('12.34.56.78', 3)
assert intersperse("12345678", [-1,2], '.') == ('12345678', 0)
assert intersperse("12345678", [2,-1], '.') == ('123456.78', 1)
assert intersperse("12345678", [2,0,1], '.') == ('12.34.56.78', 3)
assert intersperse("12345678", [2,0,0], '.') == ('12.34.56.78', 3)
assert intersperse("12345678", [2,0,-1], '.') == ('12.34.56.78', 3)
assert intersperse("12345678", [3,3,3,3], '.') == ('12.345.678', 2)
assert intersperse("abc1234567xy", [2], '.') == ('abc1234567.xy', 1)
assert intersperse("abc1234567xy8", [2], '.') == ('abc1234567x.y8', 1) # ... w.r.t. here.
assert intersperse("abc12", [3], '.') == ('abc12', 0)
assert intersperse("abc12", [2], '.') == ('abc12', 0)
assert intersperse("abc12", [1], '.') == ('abc1.2', 1)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
fillycheezstake/MissionPlanner | refs/heads/master | Lib/lib2to3/fixes/fix_getcwdu.py | 61 | """
Fixer that changes os.getcwdu() to os.getcwd().
"""
# Author: Victor Stinner
# Local imports
from .. import fixer_base
from ..fixer_util import Name
class FixGetcwdu(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
power< 'os' trailer< dot='.' name='getcwdu' > any* >
"""
def transform(self, node, results):
name = results["name"]
name.replace(Name(u"getcwd", prefix=name.prefix))
|
mglukhikh/intellij-community | refs/heads/master | python/testData/completion/heavyStarPropagation/lib/_pkg1/_pkg1_0/_pkg1_0_0/_pkg1_0_0_1/_pkg1_0_0_1_0/_mod1_0_0_1_0_2.py | 30 | name1_0_0_1_0_2_0 = None
name1_0_0_1_0_2_1 = None
name1_0_0_1_0_2_2 = None
name1_0_0_1_0_2_3 = None
name1_0_0_1_0_2_4 = None |
bitmazk/django-document-library | refs/heads/master | document_library/cms_app.py | 1 | """CMS apphook for the ``document_library`` app."""
from django.utils.translation import ugettext_lazy as _
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
class DocumentLibraryApphook(CMSApp):
name = _("Document Library Apphook")
urls = ["document_library.urls"]
apphook_pool.register(DocumentLibraryApphook)
|
sabi0/intellij-community | refs/heads/master | python/testData/copyPaste/singleLine/IndentOnTopLevel.dst.py | 83 | class C:
def foo(self):
x = 1
y = 2
<caret>
def foo():
pass |
delhivery/django | refs/heads/master | tests/forms_tests/views.py | 452 | from django import forms
from django.views.generic.edit import UpdateView
from .models import Article
class ArticleForm(forms.ModelForm):
content = forms.CharField(strip=False, widget=forms.Textarea)
class Meta:
model = Article
fields = '__all__'
class ArticleFormView(UpdateView):
model = Article
success_url = '/'
form_class = ArticleForm
|
paveldedik/blog | refs/heads/master | paveldedik/utils.py | 1 | # -*- coding: utf-8 -*-
import re
from unicodedata import normalize
from flask.views import MethodView
from . import app
_slug_regex = re.compile(r'[\t !"#$%&\'()*\-/<=>?@\[\\\]^_`{|},.]+')
def slugify(text, delim=u'-', length=60):
"""Generates an ASCII-only slug. A slug is the part of a URL which
identifies a page using human-readable keywords.
See `Generating Slugs<http://flask.pocoo.org/snippets/5/>`_.
:type text: unicode
:param delim: Separator for white space characters. Default is hyphen.
:type delim: unicode
:param length: Maximum lenght of the result. Default is `60`.
:type length: integer
:rtype: unicode
"""
result = []
text = text if length is None else text[:length]
for word in _slug_regex.split(text.lower()):
word = normalize('NFKD', word).encode('ascii', 'ignore')
if word:
result.append(word)
return unicode(delim.join(result))
def underscore(string):
"""Converts input to under_scored string.
:param string: Camel-case string to be converted.
:return: Snake-case string.
"""
string = re.sub(r'(.)([A-Z][a-z]+)', r'\1_\2', string)
return re.sub(r'([a-z0-9])([A-Z])', r'\1_\2', string).lower()
class route(object):
"""Class decorator to simplify route registration."""
def __init__(self, endpoint):
self.endpoint = endpoint
def __call__(self, cls):
assert issubclass(cls, MethodView)
view_name = underscore(cls.__name__)
app.add_url_rule(self.endpoint, view_func=cls.as_view(view_name))
return cls
|
cpcloud/dynd-python | refs/heads/master | dynd/tests/test_nd_groupby.py | 3 | import sys
import unittest
from dynd import nd, ndt
"""
Todo: Fix this
class TestGroupBy(unittest.TestCase):
def test_immutable(self):
a = nd.array([
('x', 0),
('y', 1),
('x', 2),
('x', 3),
('y', 4)],
dtype='{A: string, B: int32}').eval_immutable()
gb = nd.groupby(a, nd.fields(a, 'A'))
self.assertEqual(nd.as_py(gb.groups), [{'A': 'x'}, {'A': 'y'}])
# TODO: This test fails since we modernized comparisons
# self.assertEqual(nd.as_py(gb), [
# [{'A': 'x', 'B': 0},
# {'A': 'x', 'B': 2},
# {'A': 'x', 'B': 3}],
# [{'A': 'y', 'B': 1},
# {'A': 'y', 'B': 4}]])
def test_grouped_slices(self):
a = nd.asarray([[1, 2, 3], [1, 4, 5]])
gb = nd.groupby(a[:, 1:], a[:, 0])
self.assertEqual(nd.as_py(gb.groups), [1])
self.assertEqual(nd.as_py(gb), [[[2, 3], [4, 5]]])
a = nd.asarray([[1, 2, 3], [3, 1, 7], [1, 4, 5], [2, 6, 7], [3, 2, 5]])
gb = nd.groupby(a[:, 1:], a[:, 0])
self.assertEqual(nd.as_py(gb.groups), [1, 2, 3])
self.assertEqual(nd.as_py(gb), [[[2, 3], [4, 5]],
[[6, 7]],
[[1, 7], [2, 5]]])
"""
if __name__ == '__main__':
unittest.main()
|
dezede/dezede | refs/heads/master | static_grouper/templatetags/static_grouper.py | 1 | from collections import defaultdict
from compressor.templatetags.compress import CompressorNode
from django.template import (
Library, Node, Template, TemplateSyntaxError, Context)
register = Library()
CONTEXT_VARIABLE_NAME = 'static_grouper_dict'
class AddStaticNode(Node):
def __init__(self, parser, token):
contents = token.split_contents()
if len(contents) not in (2, 3):
raise TemplateSyntaxError
if len(contents) == 3:
assert contents[2] == 'nocompress'
self.compress = False
else:
self.compress = True
self.static_type = contents[1]
self.nodelist = parser.parse(('endaddstatic',))
parser.delete_first_token()
def render(self, context):
output = self.nodelist.render(context).strip()
static_grouper_dict = context.get(CONTEXT_VARIABLE_NAME)
if static_grouper_dict is None:
root_context = context.dicts[0]
root_context[CONTEXT_VARIABLE_NAME] = \
static_grouper_dict = defaultdict(list)
item = (self.compress, output)
if item not in static_grouper_dict[self.static_type]:
static_grouper_dict[self.static_type].append(item)
return ''
register.tag('addstatic', AddStaticNode)
class StaticListNode(Node):
def __init__(self, parser, token):
contents = token.split_contents()
if len(contents) not in (2, 3):
raise TemplateSyntaxError
self.static_type = contents[1]
if len(contents) == 3:
assert contents[2] == 'compress'
self.compress = True
else:
self.compress = False
self.following_nodelist = parser.parse()
def groups_iterator(self, static_grouper_dict):
compressed_group = []
for compress, output in static_grouper_dict[self.static_type]:
if compress:
compressed_group.append(output)
else:
if compressed_group:
yield True, ''.join(compressed_group)
compressed_group = []
yield False, output
if compressed_group:
yield True, ''.join(compressed_group)
def render(self, context):
static_grouper_dict = context.get(CONTEXT_VARIABLE_NAME, defaultdict(list))
following = self.following_nodelist.render(context)
inner = ''
for compress, output in self.groups_iterator(static_grouper_dict):
if compress and self.compress:
inner += CompressorNode(
nodelist=Template(output).nodelist, kind=self.static_type,
mode='file').render(context=context)
else:
inner += output
return inner + following
register.tag('static_list', StaticListNode)
|
dhcmrlchtdj/toolkit | refs/heads/master | python/database.py | 1 | #!/usr/bin/env python3.4
# python 3.4.0
# mysql-connector 1.1.6
# mariadb 5.5.37
__version__ = "0.2.0"
import logging
import mysql.connector
logger = logging.getLogger(__name__)
class Connection:
""" Usage:
>>> import database
>>> db = database.Connection("user", "password", "database")
"""
def __init__(self, user, password, database, host="127.0.0.1",
connect_timeout=10, time_zone="+0:00"):
self.host = host
config = {
"user": user,
"password": password,
"database": database,
"connect_timeout": connect_timeout,
"time_zone": time_zone,
"autocommit": True,
"sql_mode": "TRADITIONAL",
"use_unicode": True,
"charset": "utf8",
}
if "/" in host:
config["unix_socket"] = host
else:
pair = host.split(":")
if len(pair) == 2:
config["host"] = pair[0]
config["port"] = int(pair[1])
else:
config["host"] = host
self._db = None
self._db_config = config
try:
self.reconnect()
except Exception:
logger.error(
"Cannot connect to MySQL on %s" % self.host,
exc_info=True)
def __del__(self):
self.close()
def close(self):
"""Closes database connection"""
if getattr(self, "_db", None) is not None:
self._db.close()
self._db = None
def reconnect(self):
"""Closes the existing database connection and re-opens it"""
self.close()
self._db = mysql.connector.connect(**self._db_config)
def _ensure_connected(self):
if self._db is None or not self._db.is_connected():
self.reconnect()
def _cursor(self):
self._ensure_connected()
return self._db.cursor()
def query(self, sql, *parameters, **kwparameters):
cursor = self._cursor()
try:
cursor.execute(sql, kwparameters or parameters)
column_names = cursor.column_names
return [Row(zip(column_names, row)) for row in cursor]
finally:
cursor.close()
def get(self, sql, *parameters, **kwparameters):
"""Returns the first row returned for the given query."""
rows = self.query(sql, *parameters, **kwparameters)
if not rows:
return None
elif len(rows) > 1:
raise Exception("Multiple rows returned for database.get() query")
else:
return rows[0]
def execute(self, sql, *parameters, **kwparameters):
"""Executes the given query, returning the rowcount from the query."""
return self.execute_rowcount(sql, *parameters, **kwparameters)
def execute_lastrowid(self, sql, *parameters, **kwparameters):
"""Executes the given query, returning the lastrowid from the query."""
cursor = self._cursor()
try:
self._execute(cursor, sql, parameters, kwparameters)
return cursor.lastrowid
finally:
cursor.close()
def execute_rowcount(self, sql, *parameters, **kwparameters):
"""Executes the given query, returning the rowcount from the query."""
cursor = self._cursor()
try:
self._execute(cursor, sql, parameters, kwparameters)
return cursor.rowcount
finally:
cursor.close()
def executemany(self, sql, parameters):
"""Executes the given query against all the given param sequences.
return the rowcount from the query.
"""
return self.executemany_rowcount(sql, parameters)
def executemany_lastrowid(self, sql, parameters):
"""Executes the given query against all the given param sequences.
We return the lastrowid from the query.
"""
cursor = self._cursor()
try:
cursor.executemany(sql, parameters)
return cursor.lastrowid
finally:
cursor.close()
def executemany_rowcount(self, query, parameters):
"""Executes the given query against all the given param sequences.
We return the rowcount from the query.
"""
cursor = self._cursor()
try:
cursor.executemany(query, parameters)
return cursor.rowcount
finally:
cursor.close()
update = execute_rowcount
updatemany = executemany_rowcount
insert = execute_lastrowid
insertmany = executemany_lastrowid
class Row(dict):
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
|
2ndQuadrant/ansible | refs/heads/master | lib/ansible/modules/cloud/docker/docker_image.py | 2 | #!/usr/bin/python
#
# Copyright 2016 Red Hat | Ansible
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: docker_image
short_description: Manage docker images.
version_added: "1.5"
description:
- Build, load or pull an image, making the image available for creating containers. Also supports tagging an
image into a repository and archiving an image to a .tar file.
- Since Ansible 2.8, it is recommended to explicitly specify the image's source (I(source) can be C(build),
C(load), C(pull) or C(local)). This will be required from Ansible 2.12 on.
options:
source:
description:
- "Determines where the module will try to retrieve the image from."
- "Use C(build) to build the image from a C(Dockerfile). I(build.path) must
be specified when this value is used."
- "Use C(load) to load the image from a C(.tar) file. I(load_path) must
be specified when this value is used."
- "Use C(pull) to pull the image from a registry."
- "Use C(local) to make sure that the image is already available on the local
docker daemon, i.e. do not try to build, pull or load the image."
- "Before Ansible 2.12, the value of this option will be auto-detected
to be backwards compatible, but a warning will be issued if it is not
explicitly specified. From Ansible 2.12 on, auto-detection will be disabled
and this option will be made mandatory."
type: str
choices:
- build
- load
- pull
- local
version_added: "2.8"
build:
description:
- "Specifies options used for building images."
type: dict
suboptions:
cache_from:
description:
- List of image names to consider as cache source.
type: list
dockerfile:
description:
- Use with state C(present) and source C(build) to provide an alternate name for the Dockerfile to use when building an image.
- This can also include a relative path (relative to I(path)).
type: str
http_timeout:
description:
- Timeout for HTTP requests during the image build operation. Provide a positive integer value for the number of
seconds.
type: int
path:
description:
- Use with state 'present' to build an image. Will be the path to a directory containing the context and
Dockerfile for building an image.
type: path
required: yes
pull:
description:
- When building an image downloads any updates to the FROM image in Dockerfile.
- The default is currently C(yes). This will change to C(no) in Ansible 2.12.
type: bool
rm:
description:
- Remove intermediate containers after build.
type: bool
default: yes
network:
description:
- The network to use for C(RUN) build instructions.
type: str
nocache:
description:
- Do not use cache when building an image.
type: bool
default: no
args:
description:
- Provide a dictionary of C(key:value) build arguments that map to Dockerfile ARG directive.
- Docker expects the value to be a string. For convenience any non-string values will be converted to strings.
- Requires Docker API >= 1.21.
type: dict
container_limits:
description:
- A dictionary of limits applied to each container created by the build process.
type: dict
suboptions:
memory:
description:
- Set memory limit for build.
type: int
memswap:
description:
- Total memory (memory + swap), -1 to disable swap.
type: int
cpushares:
description:
- CPU shares (relative weight).
type: int
cpusetcpus:
description:
- CPUs in which to allow execution, e.g., "0-3", "0,1".
type: str
use_config_proxy:
description:
- If set to C(yes) and a proxy configuration is specified in the docker client configuration
(by default C($HOME/.docker/config.json)), the corresponding environment variables will
be set in the container being built.
- Needs Docker SDK for Python >= 3.7.0.
type: bool
version_added: "2.8"
archive_path:
description:
- Use with state C(present) to archive an image to a .tar file.
type: path
version_added: "2.1"
load_path:
description:
- Use with state C(present) to load an image from a .tar file.
- Set I(source) to C(load) if you want to load the image. The option will
be set automatically before Ansible 2.12 if this option is used (except
if I(path) is specified as well, in which case building will take precedence).
From Ansible 2.12 on, you have to set I(source) to C(load).
type: path
version_added: "2.2"
dockerfile:
description:
- Use with state C(present) and source C(build) to provide an alternate name for the Dockerfile to use when building an image.
- This can also include a relative path (relative to I(path)).
- Please use I(build.dockerfile) instead. This option will be removed in Ansible 2.12.
type: str
version_added: "2.0"
force:
description:
- Use with state I(absent) to un-tag and remove all images matching the specified name. Use with state
C(present) to build, load or pull an image when the image already exists. Also use with state C(present)
to force tagging an image.
- Please stop using this option, and use the more specialized force options
I(force_source), I(force_absent) and I(force_tag) instead.
- This option will be removed in Ansible 2.12.
type: bool
version_added: "2.1"
force_source:
description:
- Use with state C(present) to build, load or pull an image (depending on the
value of the I(source) option) when the image already exists.
type: bool
default: false
version_added: "2.8"
force_absent:
description:
- Use with state I(absent) to un-tag and remove all images matching the specified name.
type: bool
default: false
version_added: "2.8"
force_tag:
description:
- Use with state C(present) to force tagging an image.
type: bool
default: false
version_added: "2.8"
http_timeout:
description:
- Timeout for HTTP requests during the image build operation. Provide a positive integer value for the number of
seconds.
- Please use I(build.http_timeout) instead. This option will be removed in Ansible 2.12.
type: int
version_added: "2.1"
name:
description:
- "Image name. Name format will be one of: name, repository/name, registry_server:port/name.
When pushing or pulling an image the name can optionally include the tag by appending ':tag_name'."
- Note that image IDs (hashes) are not supported.
type: str
required: yes
path:
description:
- Use with state 'present' to build an image. Will be the path to a directory containing the context and
Dockerfile for building an image.
- Set I(source) to C(build) if you want to build the image. The option will
be set automatically before Ansible 2.12 if this option is used. From Ansible 2.12
on, you have to set I(source) to C(build).
- Please use I(build.path) instead. This option will be removed in Ansible 2.12.
type: path
aliases:
- build_path
pull:
description:
- When building an image downloads any updates to the FROM image in Dockerfile.
- Please use I(build.pull) instead. This option will be removed in Ansible 2.12.
- The default is currently C(yes). This will change to C(no) in Ansible 2.12.
type: bool
version_added: "2.1"
push:
description:
- Push the image to the registry. Specify the registry as part of the I(name) or I(repository) parameter.
type: bool
default: no
version_added: "2.2"
rm:
description:
- Remove intermediate containers after build.
- Please use I(build.rm) instead. This option will be removed in Ansible 2.12.
type: bool
default: yes
version_added: "2.1"
nocache:
description:
- Do not use cache when building an image.
- Please use I(build.nocache) instead. This option will be removed in Ansible 2.12.
type: bool
default: no
repository:
description:
- Full path to a repository. Use with state C(present) to tag the image into the repository. Expects
format I(repository:tag). If no tag is provided, will use the value of the C(tag) parameter or I(latest).
type: str
version_added: "2.1"
state:
description:
- Make assertions about the state of an image.
- When C(absent) an image will be removed. Use the force option to un-tag and remove all images
matching the provided name.
- When C(present) check if an image exists using the provided name and tag. If the image is not found or the
force option is used, the image will either be pulled, built or loaded, depending on the I(source) option.
- By default the image will be pulled from Docker Hub, or the registry specified in the image's name. Note that
this will change in Ansible 2.12, so to make sure that you are pulling, set I(source) to C(pull). To build
the image, provide a I(path) value set to a directory containing a context and Dockerfile, and set I(source)
to C(build). To load an image, specify I(load_path) to provide a path to an archive file. To tag an image to
a repository, provide a I(repository) path. If the name contains a repository path, it will be pushed.
- "*Note:* C(state=build) is DEPRECATED and will be removed in Ansible 2.11. Specifying C(build) will behave the
same as C(present)."
type: str
default: present
choices:
- absent
- present
- build
tag:
description:
- Used to select an image when pulling. Will be added to the image when pushing, tagging or building. Defaults to
I(latest).
- If I(name) parameter format is I(name:tag), then tag value from I(name) will take precedence.
type: str
default: latest
buildargs:
description:
- Provide a dictionary of C(key:value) build arguments that map to Dockerfile ARG directive.
- Docker expects the value to be a string. For convenience any non-string values will be converted to strings.
- Requires Docker API >= 1.21.
- Please use I(build.args) instead. This option will be removed in Ansible 2.12.
type: dict
version_added: "2.2"
container_limits:
description:
- A dictionary of limits applied to each container created by the build process.
- Please use I(build.container_limits) instead. This option will be removed in Ansible 2.12.
type: dict
suboptions:
memory:
description:
- Set memory limit for build.
type: int
memswap:
description:
- Total memory (memory + swap), -1 to disable swap.
type: int
cpushares:
description:
- CPU shares (relative weight).
type: int
cpusetcpus:
description:
- CPUs in which to allow execution, e.g., "0-3", "0,1".
type: str
version_added: "2.1"
use_tls:
description:
- "DEPRECATED. Whether to use tls to connect to the docker daemon. Set to
C(encrypt) to use TLS. And set to C(verify) to use TLS and verify that
the server's certificate is valid for the server."
- "*Note:* If you specify this option, it will set the value of the I(tls) or
I(validate_certs) parameters if not set to C(no)."
- Will be removed in Ansible 2.11.
type: str
choices:
- 'no'
- 'encrypt'
- 'verify'
version_added: "2.0"
extends_documentation_fragment:
- docker
- docker.docker_py_1_documentation
requirements:
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
- "Docker API >= 1.20"
author:
- Pavel Antonov (@softzilla)
- Chris Houseknecht (@chouseknecht)
'''
EXAMPLES = '''
- name: pull an image
docker_image:
name: pacur/centos-7
source: pull
- name: Tag and push to docker hub
docker_image:
name: pacur/centos-7:56
repository: dcoppenhagan/myimage:7.56
push: yes
source: local
- name: Tag and push to local registry
docker_image:
# Image will be centos:7
name: centos
# Will be pushed to localhost:5000/centos:7
repository: localhost:5000/centos
tag: 7
push: yes
source: local
- name: Add tag latest to image
docker_image:
name: myimage:7.1.2
repository: myimage:latest
# As 'latest' usually already is present, we need to enable overwriting of existing tags:
force_tag: yes
source: local
- name: Remove image
docker_image:
state: absent
name: registry.ansible.com/chouseknecht/sinatra
tag: v1
- name: Build an image and push it to a private repo
docker_image:
build:
path: ./sinatra
name: registry.ansible.com/chouseknecht/sinatra
tag: v1
push: yes
source: build
- name: Archive image
docker_image:
name: registry.ansible.com/chouseknecht/sinatra
tag: v1
archive_path: my_sinatra.tar
source: local
- name: Load image from archive and push to a private registry
docker_image:
name: localhost:5000/myimages/sinatra
tag: v1
push: yes
load_path: my_sinatra.tar
source: load
- name: Build image and with build args
docker_image:
name: myimage
build:
path: /path/to/build/dir
args:
log_volume: /var/log/myapp
listen_port: 8080
source: build
- name: Build image using cache source
docker_image:
name: myimage:latest
build:
path: /path/to/build/dir
# Use as cache source for building myimage
cache_from:
- nginx:latest
- alpine:3.8
source: build
'''
RETURN = '''
image:
description: Image inspection results for the affected image.
returned: success
type: dict
sample: {}
'''
import os
import re
import traceback
from distutils.version import LooseVersion
from ansible.module_utils.docker.common import (
docker_version,
AnsibleDockerClient,
DockerBaseClass,
is_image_name_id,
is_valid_tag,
RequestException,
)
from ansible.module_utils._text import to_native
if docker_version is not None:
try:
if LooseVersion(docker_version) >= LooseVersion('2.0.0'):
from docker.auth import resolve_repository_name
else:
from docker.auth.auth import resolve_repository_name
from docker.utils.utils import parse_repository_tag
from docker.errors import DockerException
except ImportError:
# missing Docker SDK for Python handled in module_utils.docker.common
pass
class ImageManager(DockerBaseClass):
def __init__(self, client, results):
super(ImageManager, self).__init__()
self.client = client
self.results = results
parameters = self.client.module.params
self.check_mode = self.client.check_mode
self.source = parameters['source']
build = parameters['build'] or dict()
self.archive_path = parameters.get('archive_path')
self.cache_from = build.get('cache_from')
self.container_limits = build.get('container_limits')
self.dockerfile = build.get('dockerfile')
self.force_source = parameters.get('force_source')
self.force_absent = parameters.get('force_absent')
self.force_tag = parameters.get('force_tag')
self.load_path = parameters.get('load_path')
self.name = parameters.get('name')
self.network = build.get('network')
self.nocache = build.get('nocache', False)
self.build_path = build.get('path')
self.pull = build.get('pull')
self.repository = parameters.get('repository')
self.rm = build.get('rm', True)
self.state = parameters.get('state')
self.tag = parameters.get('tag')
self.http_timeout = build.get('http_timeout')
self.push = parameters.get('push')
self.buildargs = build.get('args')
self.use_config_proxy = build.get('use_config_proxy')
# If name contains a tag, it takes precedence over tag parameter.
if not is_image_name_id(self.name):
repo, repo_tag = parse_repository_tag(self.name)
if repo_tag:
self.name = repo
self.tag = repo_tag
if self.state == 'present':
self.present()
elif self.state == 'absent':
self.absent()
def fail(self, msg):
self.client.fail(msg)
def present(self):
'''
Handles state = 'present', which includes building, loading or pulling an image,
depending on user provided parameters.
:returns None
'''
image = self.client.find_image(name=self.name, tag=self.tag)
if not image or self.force_source:
if self.source == 'build':
# Build the image
if not os.path.isdir(self.build_path):
self.fail("Requested build path %s could not be found or you do not have access." % self.build_path)
image_name = self.name
if self.tag:
image_name = "%s:%s" % (self.name, self.tag)
self.log("Building image %s" % image_name)
self.results['actions'].append("Built image %s from %s" % (image_name, self.build_path))
self.results['changed'] = True
if not self.check_mode:
self.results['image'] = self.build_image()
elif self.source == 'load':
# Load the image from an archive
if not os.path.isfile(self.load_path):
self.fail("Error loading image %s. Specified path %s does not exist." % (self.name,
self.load_path))
image_name = self.name
if self.tag:
image_name = "%s:%s" % (self.name, self.tag)
self.results['actions'].append("Loaded image %s from %s" % (image_name, self.load_path))
self.results['changed'] = True
if not self.check_mode:
self.results['image'] = self.load_image()
elif self.source == 'pull':
# pull the image
self.results['actions'].append('Pulled image %s:%s' % (self.name, self.tag))
self.results['changed'] = True
if not self.check_mode:
self.results['image'], dummy = self.client.pull_image(self.name, tag=self.tag)
elif self.source == 'local':
if image is None:
name = self.name
if self.tag:
name = "%s:%s" % (self.name, self.tag)
self.client.fail('Cannot find the image %s locally.' % name)
if not self.check_mode and image and image['Id'] == self.results['image']['Id']:
self.results['changed'] = False
if self.archive_path:
self.archive_image(self.name, self.tag)
if self.push and not self.repository:
self.push_image(self.name, self.tag)
elif self.repository:
self.tag_image(self.name, self.tag, self.repository, push=self.push)
def absent(self):
'''
Handles state = 'absent', which removes an image.
:return None
'''
name = self.name
if is_image_name_id(name):
image = self.client.find_image_by_id(name)
else:
image = self.client.find_image(name, self.tag)
if self.tag:
name = "%s:%s" % (self.name, self.tag)
if image:
if not self.check_mode:
try:
self.client.remove_image(name, force=self.force_absent)
except Exception as exc:
self.fail("Error removing image %s - %s" % (name, str(exc)))
self.results['changed'] = True
self.results['actions'].append("Removed image %s" % (name))
self.results['image']['state'] = 'Deleted'
def archive_image(self, name, tag):
'''
Archive an image to a .tar file. Called when archive_path is passed.
:param name - name of the image. Type: str
:return None
'''
if not tag:
tag = "latest"
image = self.client.find_image(name=name, tag=tag)
if not image:
self.log("archive image: image %s:%s not found" % (name, tag))
return
image_name = "%s:%s" % (name, tag)
self.results['actions'].append('Archived image %s to %s' % (image_name, self.archive_path))
self.results['changed'] = True
if not self.check_mode:
self.log("Getting archive of image %s" % image_name)
try:
image = self.client.get_image(image_name)
except Exception as exc:
self.fail("Error getting image %s - %s" % (image_name, str(exc)))
try:
with open(self.archive_path, 'wb') as fd:
if self.client.docker_py_version >= LooseVersion('3.0.0'):
for chunk in image:
fd.write(chunk)
else:
for chunk in image.stream(2048, decode_content=False):
fd.write(chunk)
except Exception as exc:
self.fail("Error writing image archive %s - %s" % (self.archive_path, str(exc)))
image = self.client.find_image(name=name, tag=tag)
if image:
self.results['image'] = image
def push_image(self, name, tag=None):
'''
If the name of the image contains a repository path, then push the image.
:param name Name of the image to push.
:param tag Use a specific tag.
:return: None
'''
repository = name
if not tag:
repository, tag = parse_repository_tag(name)
registry, repo_name = resolve_repository_name(repository)
self.log("push %s to %s/%s:%s" % (self.name, registry, repo_name, tag))
if registry:
self.results['actions'].append("Pushed image %s to %s/%s:%s" % (self.name, registry, repo_name, tag))
self.results['changed'] = True
if not self.check_mode:
status = None
try:
changed = False
for line in self.client.push(repository, tag=tag, stream=True, decode=True):
self.log(line, pretty_print=True)
if line.get('errorDetail'):
raise Exception(line['errorDetail']['message'])
status = line.get('status')
if status == 'Pushing':
changed = True
self.results['changed'] = changed
except Exception as exc:
if re.search('unauthorized', str(exc)):
if re.search('authentication required', str(exc)):
self.fail("Error pushing image %s/%s:%s - %s. Try logging into %s first." %
(registry, repo_name, tag, str(exc), registry))
else:
self.fail("Error pushing image %s/%s:%s - %s. Does the repository exist?" %
(registry, repo_name, tag, str(exc)))
self.fail("Error pushing image %s: %s" % (repository, str(exc)))
self.results['image'] = self.client.find_image(name=repository, tag=tag)
if not self.results['image']:
self.results['image'] = dict()
self.results['image']['push_status'] = status
def tag_image(self, name, tag, repository, push=False):
'''
Tag an image into a repository.
:param name: name of the image. required.
:param tag: image tag.
:param repository: path to the repository. required.
:param push: bool. push the image once it's tagged.
:return: None
'''
repo, repo_tag = parse_repository_tag(repository)
if not repo_tag:
repo_tag = "latest"
if tag:
repo_tag = tag
image = self.client.find_image(name=repo, tag=repo_tag)
found = 'found' if image else 'not found'
self.log("image %s was %s" % (repo, found))
if not image or self.force_tag:
self.log("tagging %s:%s to %s:%s" % (name, tag, repo, repo_tag))
self.results['changed'] = True
self.results['actions'].append("Tagged image %s:%s to %s:%s" % (name, tag, repo, repo_tag))
if not self.check_mode:
try:
# Finding the image does not always work, especially running a localhost registry. In those
# cases, if we don't set force=True, it errors.
image_name = name
if tag and not re.search(tag, name):
image_name = "%s:%s" % (name, tag)
tag_status = self.client.tag(image_name, repo, tag=repo_tag, force=True)
if not tag_status:
raise Exception("Tag operation failed.")
except Exception as exc:
self.fail("Error: failed to tag image - %s" % str(exc))
self.results['image'] = self.client.find_image(name=repo, tag=repo_tag)
if image and image['Id'] == self.results['image']['Id']:
self.results['changed'] = False
if push:
self.push_image(repo, repo_tag)
def build_image(self):
'''
Build an image
:return: image dict
'''
params = dict(
path=self.build_path,
tag=self.name,
rm=self.rm,
nocache=self.nocache,
timeout=self.http_timeout,
pull=self.pull,
forcerm=self.rm,
dockerfile=self.dockerfile,
decode=True,
)
if self.client.docker_py_version < LooseVersion('3.0.0'):
params['stream'] = True
build_output = []
if self.tag:
params['tag'] = "%s:%s" % (self.name, self.tag)
if self.container_limits:
params['container_limits'] = self.container_limits
if self.buildargs:
for key, value in self.buildargs.items():
self.buildargs[key] = to_native(value)
params['buildargs'] = self.buildargs
if self.cache_from:
params['cache_from'] = self.cache_from
if self.network:
params['network_mode'] = self.network
if self.use_config_proxy:
params['use_config_proxy'] = self.use_config_proxy
# Due to a bug in docker-py, it will crash if
# use_config_proxy is True and buildargs is None
if 'buildargs' not in params:
params['buildargs'] = {}
for line in self.client.build(**params):
# line = json.loads(line)
self.log(line, pretty_print=True)
if "stream" in line:
build_output.append(line["stream"])
if line.get('error'):
if line.get('errorDetail'):
errorDetail = line.get('errorDetail')
self.fail(
"Error building %s - code: %s, message: %s, logs: %s" % (
self.name,
errorDetail.get('code'),
errorDetail.get('message'),
build_output))
else:
self.fail("Error building %s - message: %s, logs: %s" % (
self.name, line.get('error'), build_output))
return self.client.find_image(name=self.name, tag=self.tag)
def load_image(self):
'''
Load an image from a .tar archive
:return: image dict
'''
try:
self.log("Opening image %s" % self.load_path)
image_tar = open(self.load_path, 'rb')
except Exception as exc:
self.fail("Error opening image %s - %s" % (self.load_path, str(exc)))
try:
self.log("Loading image from %s" % self.load_path)
self.client.load_image(image_tar)
except Exception as exc:
self.fail("Error loading image %s - %s" % (self.name, str(exc)))
try:
image_tar.close()
except Exception as exc:
self.fail("Error closing image %s - %s" % (self.name, str(exc)))
return self.client.find_image(self.name, self.tag)
def main():
argument_spec = dict(
source=dict(type='str', choices=['build', 'load', 'pull', 'local']),
build=dict(type='dict', suboptions=dict(
cache_from=dict(type='list', elements='str'),
container_limits=dict(type='dict', options=dict(
memory=dict(type='int'),
memswap=dict(type='int'),
cpushares=dict(type='int'),
cpusetcpus=dict(type='str'),
)),
dockerfile=dict(type='str'),
http_timeout=dict(type='int'),
network=dict(type='str'),
nocache=dict(type='bool', default=False),
path=dict(type='path', required=True),
pull=dict(type='bool'),
rm=dict(type='bool', default=True),
args=dict(type='dict'),
use_config_proxy=dict(type='bool'),
)),
archive_path=dict(type='path'),
container_limits=dict(type='dict', options=dict(
memory=dict(type='int'),
memswap=dict(type='int'),
cpushares=dict(type='int'),
cpusetcpus=dict(type='str'),
), removed_in_version='2.12'),
dockerfile=dict(type='str', removed_in_version='2.12'),
force=dict(type='bool', removed_in_version='2.12'),
force_source=dict(type='bool', default=False),
force_absent=dict(type='bool', default=False),
force_tag=dict(type='bool', default=False),
http_timeout=dict(type='int', removed_in_version='2.12'),
load_path=dict(type='path'),
name=dict(type='str', required=True),
nocache=dict(type='bool', default=False, removed_in_version='2.12'),
path=dict(type='path', aliases=['build_path'], removed_in_version='2.12'),
pull=dict(type='bool', removed_in_version='2.12'),
push=dict(type='bool', default=False),
repository=dict(type='str'),
rm=dict(type='bool', default=True, removed_in_version='2.12'),
state=dict(type='str', default='present', choices=['absent', 'present', 'build']),
tag=dict(type='str', default='latest'),
use_tls=dict(type='str', choices=['no', 'encrypt', 'verify'], removed_in_version='2.11'),
buildargs=dict(type='dict', removed_in_version='2.12'),
)
required_if = [
# ('state', 'present', ['source']), -- enable in Ansible 2.12.
# ('source', 'build', ['build']), -- enable in Ansible 2.12.
('source', 'load', ['load_path']),
]
def detect_build_cache_from(client):
return client.module.params['build'] and client.module.params['build'].get('cache_from') is not None
def detect_build_network(client):
return client.module.params['build'] and client.module.params['build'].get('network') is not None
def detect_use_config_proxy(client):
return client.module.params['build'] and client.module.params['build'].get('use_config_proxy') is not None
option_minimal_versions = dict()
option_minimal_versions["build.cache_from"] = dict(docker_py_version='2.1.0', docker_api_version='1.25', detect_usage=detect_build_cache_from)
option_minimal_versions["build.network"] = dict(docker_py_version='2.4.0', docker_api_version='1.25', detect_usage=detect_build_network)
option_minimal_versions["build.use_config_proxy"] = dict(docker_py_version='3.7.0', detect_usage=detect_use_config_proxy)
client = AnsibleDockerClient(
argument_spec=argument_spec,
required_if=required_if,
supports_check_mode=True,
min_docker_version='1.8.0',
min_docker_api_version='1.20',
option_minimal_versions=option_minimal_versions,
)
if client.module.params['state'] == 'build':
client.module.warn('The "build" state has been deprecated for a long time '
'and will be removed in Ansible 2.11. Please use '
'"present", which has the same meaning as "build".')
client.module.params['state'] = 'present'
if client.module.params['use_tls']:
client.module.warn('The "use_tls" option has been deprecated for a long time '
'and will be removed in Ansible 2.11. Please use the'
'"tls" and "validate_certs" options instead.')
if not is_valid_tag(client.module.params['tag'], allow_empty=True):
client.fail('"{0}" is not a valid docker tag!'.format(client.module.params['tag']))
build_options = dict(
container_limits='container_limits',
dockerfile='dockerfile',
http_timeout='http_timeout',
nocache='nocache',
path='path',
pull='pull',
rm='rm',
buildargs='args',
)
for option, build_option in build_options.items():
default_value = None
if option in ('rm', ):
default_value = True
elif option in ('nocache', ):
default_value = False
if client.module.params[option] != default_value:
if client.module.params['build'] is None:
client.module.params['build'] = dict()
if client.module.params['build'].get(build_option, default_value) != default_value:
client.fail('Cannot specify both %s and build.%s!' % (option, build_option))
client.module.params['build'][build_option] = client.module.params[option]
client.module.warn('Please specify build.%s instead of %s. The %s option '
'has been renamed and will be removed in Ansible 2.12.' % (build_option, option, option))
if client.module.params['source'] == 'build':
if (not client.module.params['build'] or not client.module.params['build'].get('path')):
client.fail('If "source" is set to "build", the "build.path" option must be specified.')
if client.module.params['build'].get('pull') is None:
client.module.warn("The default for build.pull is currently 'yes', but will be changed to 'no' in Ansible 2.12. "
"Please set build.pull explicitly to the value you need.")
client.module.params['build']['pull'] = True # TODO: change to False in Ansible 2.12
if client.module.params['state'] == 'present' and client.module.params['source'] is None:
# Autodetection. To be removed in Ansible 2.12.
if (client.module.params['build'] or dict()).get('path'):
client.module.params['source'] = 'build'
elif client.module.params['load_path']:
client.module.params['source'] = 'load'
else:
client.module.params['source'] = 'pull'
client.module.warn('The value of the "source" option was determined to be "%s". '
'Please set the "source" option explicitly. Autodetection will '
'be removed in Ansible 2.12.' % client.module.params['source'])
if client.module.params['force']:
client.module.params['force_source'] = True
client.module.params['force_absent'] = True
client.module.params['force_tag'] = True
client.module.warn('The "force" option will be removed in Ansible 2.12. Please '
'use the "force_source", "force_absent" or "force_tag" option '
'instead, depending on what you want to force.')
try:
results = dict(
changed=False,
actions=[],
image={}
)
ImageManager(client, results)
client.module.exit_json(**results)
except DockerException as e:
client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
except RequestException as e:
client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
|
hortonworks/hortonworks-sandbox | refs/heads/master | desktop/core/ext-py/south/south/management/commands/test.py | 4 | from django.core.management.commands import test
import south.management.commands.syncdb
class Command(test.Command):
def handle(self, *args, **kwargs):
south.management.commands.syncdb.patch_for_test_db_setup()
super(Command, self).handle(*args, **kwargs)
|
peterwilletts24/Python-Scripts | refs/heads/master | plot_scripts/Rain/Diurnal/Diurnal_Surface_Flux_Embrace.py | 2 | """
Load npy xy, plot and save
"""
import os, sys
import matplotlib
#matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
import matplotlib.pyplot as plt
import matplotlib.cm as mpl_cm
from matplotlib import rc
from matplotlib.font_manager import FontProperties
from matplotlib import rcParams
from matplotlib import cm
import pdb
rc('text', usetex=True)
rcParams['text.usetex']=True
rcParams['text.latex.unicode']=True
rc('font', family = 'serif', serif = 'cmr10')
font = {'family' : 'normal',
'weight' : 'bold',
'size' : 14}
matplotlib.rc('font', **font)
import numpy as np
from datetime import timedelta
import datetime
import math
import imp
import re
from textwrap import wrap
import iris.analysis.geometry
from shapely.geometry import Polygon
model_name_convert_legend = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/modules/model_name_convert_legend.py')
#unrotate = imp.load_source('util', '/home/pwille/python_scripts/modules/unrotate_pole.py')
utc_to_local=datetime.timedelta(hours=5, minutes=30)
###############
# Things to change
top_dir='/nfs/a90/eepdw/Data/EMBRACE'
save_dir='/nfs/a90/eepdw/Figures/EMBRACE/Diurnal'
pp_file = '2201'
types_of_plot=['large_domain_only', '8_and_12_km_only', 'all']
#types_of_plot=['8_and_12_km_only', 'all']
types_of_plot=['all']
# lon_max = 101.866
# lon_min = 64.115
# lat_max= 33.
# lat_min=-6.79
#trmm_dir = '/nfs/a90/eepdw/Data/Observations/Satellite/TRMM/Diurnal/'
#trmm_file = "trmm_diurnal_average_lat_%s_%s_lon_%s_%s.npz" % (lat_min,lat_max, lon_min, lon_max)
#############
# Make own time x-axis (UTC)
#d = matplotlib.dates.drange(datetime.datetime(2011, 8, 21, 6,30), datetime.datetime(2011, 8, 22, 6, 30), timedelta(hours=1))
formatter = matplotlib.dates.DateFormatter('%H:%M')
def main():
for type_of_plot in types_of_plot:
if type_of_plot=='large_domain_only':
experiment_ids_p = [ 'djznw', 'dkjxq', 'djznq' ] # Params
experiment_ids_e = ['dkhgu', 'dkbhu'] # Explicit
if type_of_plot=='8_and_12_km_only':
experiment_ids_p = [ 'djznw' 'dkmbq', 'dklzq' ] # Params
experiment_ids_e = ['dklyu', 'dklwu', 'dkbhu'] # Explicit
if type_of_plot=='all':
experiment_ids_p = ['djznw', 'djzny', 'djznq', 'dklzq', 'dkmbq', 'dkjxq' ] # Most of Params
experiment_ids_e = ['dklwu', 'dklyu', 'djzns', 'dkbhu', 'djznu', 'dkhgu'] # Most of Explicit
NUM_COLOURS = 16
cmap=cm.get_cmap(cm.Set1, NUM_COLOURS)
#cgen = (cmap(1.*i/NUM_COLORS) for i in range(NUM_COLORS))
for ls in ['land', 'sea', 'total']:
# for ls in ['sea']:
fig = plt.figure(figsize=(12,6))
ax = fig.add_subplot(111)
#legendEntries=[]
#legendtext=[]
if ls=='sea':
bbox_anchor=-0.25
else:
bbox_anchor=0
# Change the legend label colors to almost black
#texts = l0.texts
#for t in texts:
#t.set_color('#262626')
legendEntries=[]
legendtext=[]
for c, experiment_id in enumerate(experiment_ids_p):
expmin1 = experiment_id[:-1]
if (experiment_id=='djznw'):
print experiment_id
colour = cmap(1.*1/NUM_COLOURS)
linewidth=0.2
linestylez='--'
if (experiment_id=='djzny'):
print experiment_id
colour = cmap(1.*3/NUM_COLOURS)
linewidth=0.5
linestylez='--'
if ((experiment_id=='djznq') or (experiment_id=='dkjxq')):
print experiment_id
colour = cmap(1.*11/NUM_COLOURS)
linewidth=0.8
if (experiment_id=='djznq'):
linestylez='--'
if (experiment_id=='dkjxq'):
linestylez=':'
if ((experiment_id=='dklzq') or (experiment_id=='dklwu')):
print experiment_id
colour = cmap(1.*7/NUM_COLOURS)
linewidth=1
if (experiment_id=='dklzq'):
linestylez='--'
if (experiment_id=='dklwu'):
linestylez='-'
if ((experiment_id=='dklyu') or (experiment_id=='dkmbq')):
print experiment_id
colour = cmap(1.*9/NUM_COLOURS)
linewidth=1.3
if (experiment_id=='dkmbq'):
linestylez='--'
if (experiment_id=='dklyu'):
linestylez='-'
if (experiment_id=='djzns'):
print experiment_id
colour = cmap(1.*11/NUM_COLOURS)
linewidth=1.6
linestylez='-'
if ((experiment_id=='dkbhu')or (experiment_id=='dkhgu')):
print experiment_id
colour = cmap(1.*13/NUM_COLOURS)
linewidth=1.9
if (experiment_id=='dkbhu'):
linestylez='-'
if (experiment_id=='dkhgu'):
linestylez=':'
if (experiment_id=='djznu'):
print experiment_id
colour = cmap(1.*15/NUM_COLOURS)
linewidth=2.
linestylez='-'
try:
plotnp = np.load('%s/%s/%s/%s_%s_diurnal_np.npy' % (top_dir, expmin1, experiment_id, pp_file, ls))
#if (ls != 'total'):
# Make own time x-axis (local)
hour_arg_sort=np.argsort(plotnp[1])
#time_sort = plotnp[1][hour_arg_sort]
data_sort = plotnp[0][hour_arg_sort]
minute_min,hour_min = math.modf(plotnp[1].min())
minute_max,hour_max = math.modf(plotnp[1].max())
#pdb.set_trace()
d = matplotlib.dates.drange(datetime.datetime(2011, 8, 21, int(hour_min), int(minute_min*60))+utc_to_local, datetime.datetime(2011, 8, 21, int(hour_max), int(minute_max*60) )+utc_to_local, timedelta(hours=1))
l, = plt.plot_date(d, data_sort, label=model_name_convert_legend.main(experiment_id), linewidth=linewidth, linestyle=linestylez, marker='', markersize=2, fmt='', color=colour)
#else:
# l, = plt.plot_date(d, plotnp*3600, label=model_name_convert_legend.main(experiment_id), linewidth=linewidth, linestyle=linestylez, marker='', markersize=2, fmt='', color=colour)
legendEntries.append(l)
legendtext.append('%s' % (model_name_convert_legend.main(experiment_id)))
except Exception, e:
print e
pass
l1=plt.legend(legendEntries, legendtext, title='Parametrised', loc=9, frameon=False, prop={'size':12},
bbox_to_anchor=(0+bbox_anchor, 0,1, 1))
# Change the legend label colors to almost black
texts = l1.texts
for t in texts:
t.set_color('#262626')
legendEntries=[]
legendtext=[]
c1=0
for c, experiment_id in enumerate(experiment_ids_e):
if (experiment_id=='djznw'):
print experiment_id
colour = cmap(1.*1/NUM_COLOURS)
linewidth=0.2
linestylez='--'
if (experiment_id=='djzny'):
print experiment_id
colour = cmap(1.*3/NUM_COLOURS)
linewidth=0.5
linestylez='--'
if ((experiment_id=='djznq') or (experiment_id=='dkjxq')):
print experiment_id
colour = cmap(1.*11/NUM_COLOURS)
linewidth=0.8
if (experiment_id=='djznq'):
linestylez='--'
if (experiment_id=='dkjxq'):
linestylez=':'
if ((experiment_id=='dklzq') or (experiment_id=='dklwu')):
print experiment_id
colour = cmap(1.*7/NUM_COLOURS)
linewidth=1
if (experiment_id=='dklzq'):
linestylez='--'
if (experiment_id=='dklwu'):
linestylez='-'
if ((experiment_id=='dklyu') or (experiment_id=='dkmbq')):
print experiment_id
colour = cmap(1.*9/NUM_COLOURS)
linewidth=1.3
if (experiment_id=='dkmbq'):
linestylez='--'
if (experiment_id=='dklyu'):
linestylez='-'
if (experiment_id=='djzns'):
print experiment_id
colour = cmap(1.*11/NUM_COLOURS)
linewidth=1.6
linestylez='-'
if ((experiment_id=='dkbhu')or (experiment_id=='dkhgu')):
print experiment_id
colour = cmap(1.*13/NUM_COLOURS)
linewidth=1.9
if (experiment_id=='dkbhu'):
linestylez='-'
if (experiment_id=='dkhgu'):
linestylez=':'
if (experiment_id=='djznu'):
print experiment_id
colour = cmap(1.*15/NUM_COLOURS)
linewidth=2.
linestylez='-'
expmin1 = experiment_id[:-1]
try:
plotnp = np.load('%s/%s/%s/%s_%s_diurnal_np.npy' % (top_dir, expmin1, experiment_id, pp_file, ls))
#plotnp = np.ort(pnp, axis=1)
#if (ls != 'total'):
# Make own time x-axis (local)
hour_arg_sort=np.argsort(plotnp[1])
#time_sort = plotnp[1][hour_arg_sort]
data_sort = plotnp[0][hour_arg_sort]
minute_min,hour_min = math.modf(plotnp[1].min())
minute_max,hour_max = math.modf(plotnp[1].max())
d = matplotlib.dates.drange(datetime.datetime(2011, 8, 21, int(hour_min), int(minute_min*60))+utc_to_local, datetime.datetime(2011, 8, 21, int(hour_max), int(minute_max*60) )+utc_to_local, timedelta(hours=1))
l, = plt.plot_date(d, data_sort, label='%s' % (model_name_convert_legend.main(experiment_id)), linewidth=linewidth, linestyle=linestylez, marker='', markersize=2, fmt='', color=colour)
#else:
#l, = plt.plot_date(d, plotnp*3600, label='%s' % (model_name_convert_legend.main(experiment_id)), linewidth=linewidth, linestyle=linestylez, marker='', markersize=2, fmt='', color=colour)
legendEntries.append(l)
legendtext.append('%s' % (model_name_convert_legend.main(experiment_id)))
except Exception, e:
print e
pass
l2=plt.legend(legendEntries, legendtext, title='Explicit', loc=9, frameon=False, prop={'size':12},
bbox_to_anchor=(0.155+bbox_anchor, 0,1, 1))
plt.gca().add_artist(l1)
#plt.gca().add_artist(l0)
plt.gca().xaxis.set_major_formatter(formatter)
# Change the legend label colors to almost black
texts = l2.texts
for t in texts:
t.set_color('#262626')
plt.xlabel('Time (Local)')
plt.ylabel('${W m^-2}$')
if pp_file == '1201':
diag_title = 'Net Down Surface SW Flux'
if pp_file == '2201':
diag_title = 'Net Down Surface LW Flux'
if pp_file == '3217':
diag_title = 'Surface Heat Flux'
if pp_file == '3234':
diag_title = 'Surface Latent Heat Flux'
title='Domain Averaged %s - %s' % (diag_title, ls)
t=re.sub('(.{68} )', '\\1\n', str(title), 0, re.DOTALL)
t = re.sub(r'[(\']', ' ', t)
t = re.sub(r'[\',)]', ' ', t)
pp_filenodot= pp_file.replace(".", "")
# Bit of formatting
# Set colour of axis lines
spines_to_keep = ['bottom', 'left']
for spine in spines_to_keep:
ax.spines[spine].set_linewidth(0.5)
ax.spines[spine].set_color('#262626')
# Remove top and right axes lines ("spines")
spines_to_remove = ['top', 'right']
for spine in spines_to_remove:
ax.spines[spine].set_visible(False)
# Get rid of ticks. The position of the numbers is informative enough of
# the position of the value.
ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('none')
# Change the labels to the off-black
ax.xaxis.label.set_color('#262626')
ax.yaxis.label.set_color('#262626')
if not os.path.exists(save_dir): os.makedirs(save_dir)
plt.savefig('%s/EMBRACE_Diurnal_%s_%s_%s_notitle.png' % (save_dir, pp_filenodot, ls, type_of_plot),
format='png', bbox_inches='tight')
plt.title('\n'.join(wrap('%s' % (t.title()), 1000,replace_whitespace=False)), fontsize=16)
#plt.show()
plt.savefig('%s/EMBRACE_Diurnal_%s_%s_%s.png' % (save_dir, pp_filenodot, ls, type_of_plot),
format='png', bbox_inches='tight')
plt.close()
#except Exception, e:
#print e
if __name__ == '__main__':
main()
|
chandolia/python-social-auth | refs/heads/master | social/backends/yahoo.py | 70 | """
Yahoo OpenId, OAuth1 and OAuth2 backends, docs at:
http://psa.matiasaguirre.net/docs/backends/yahoo.html
"""
from requests.auth import HTTPBasicAuth
from social.utils import handle_http_errors
from social.backends.open_id import OpenIdAuth
from social.backends.oauth import BaseOAuth2, BaseOAuth1
class YahooOpenId(OpenIdAuth):
"""Yahoo OpenID authentication backend"""
name = 'yahoo'
URL = 'http://me.yahoo.com'
class YahooOAuth(BaseOAuth1):
"""Yahoo OAuth authentication backend. DEPRECATED"""
name = 'yahoo-oauth'
ID_KEY = 'guid'
AUTHORIZATION_URL = 'https://api.login.yahoo.com/oauth/v2/request_auth'
REQUEST_TOKEN_URL = \
'https://api.login.yahoo.com/oauth/v2/get_request_token'
ACCESS_TOKEN_URL = 'https://api.login.yahoo.com/oauth/v2/get_token'
EXTRA_DATA = [
('guid', 'id'),
('access_token', 'access_token'),
('expires', 'expires')
]
def get_user_details(self, response):
"""Return user details from Yahoo Profile"""
fullname, first_name, last_name = self.get_user_names(
first_name=response.get('givenName'),
last_name=response.get('familyName')
)
emails = [email for email in response.get('emails', [])
if email.get('handle')]
emails.sort(key=lambda e: e.get('primary', False), reverse=True)
return {'username': response.get('nickname'),
'email': emails[0]['handle'] if emails else '',
'fullname': fullname,
'first_name': first_name,
'last_name': last_name}
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
url = 'https://social.yahooapis.com/v1/user/{0}/profile?format=json'
return self.get_json(
url.format(self._get_guid(access_token)),
auth=self.oauth_auth(access_token)
)['profile']
def _get_guid(self, access_token):
"""
Beause you have to provide GUID for every API request it's also
returned during one of OAuth calls
"""
return self.get_json(
'https://social.yahooapis.com/v1/me/guid?format=json',
auth=self.oauth_auth(access_token)
)['guid']['value']
class YahooOAuth2(BaseOAuth2):
"""Yahoo OAuth2 authentication backend"""
name = 'yahoo-oauth2'
ID_KEY = 'guid'
AUTHORIZATION_URL = 'https://api.login.yahoo.com/oauth2/request_auth'
ACCESS_TOKEN_URL = 'https://api.login.yahoo.com/oauth2/get_token'
ACCESS_TOKEN_METHOD = 'POST'
EXTRA_DATA = [
('xoauth_yahoo_guid', 'id'),
('access_token', 'access_token'),
('expires_in', 'expires'),
('refresh_token', 'refresh_token'),
('token_type', 'token_type'),
]
def get_user_names(self, first_name, last_name):
if first_name or last_name:
return ' '.join((first_name, last_name)), first_name, last_name
return None, None, None
def get_user_details(self, response):
"""
Return user details from Yahoo Profile.
To Get user email you need the profile private read permission.
"""
fullname, first_name, last_name = self.get_user_names(
first_name=response.get('givenName'),
last_name=response.get('familyName')
)
emails = [email for email in response.get('emails', [])
if 'handle' in email]
emails.sort(key=lambda e: e.get('primary', False), reverse=True)
email = emails[0]['handle'] if emails else response.get('guid', '')
return {
'username': response.get('nickname'),
'email': email,
'fullname': fullname,
'first_name': first_name,
'last_name': last_name
}
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
url = 'https://social.yahooapis.com/v1/user/{0}/profile?format=json' \
.format(kwargs['response']['xoauth_yahoo_guid'])
return self.get_json(url, headers={
'Authorization': 'Bearer {0}'.format(access_token)
}, method='GET')['profile']
@handle_http_errors
def auth_complete(self, *args, **kwargs):
"""Completes loging process, must return user instance"""
self.process_error(self.data)
response = self.request_access_token(
self.ACCESS_TOKEN_URL,
auth=HTTPBasicAuth(*self.get_key_and_secret()),
data=self.auth_complete_params(self.validate_state()),
headers=self.auth_headers(),
method=self.ACCESS_TOKEN_METHOD
)
self.process_error(response)
return self.do_auth(response['access_token'], response=response,
*args, **kwargs)
def refresh_token_params(self, token, *args, **kwargs):
return {
'refresh_token': token,
'grant_type': 'refresh_token',
'redirect_uri': 'oob', # out of bounds
}
def refresh_token(self, token, *args, **kwargs):
params = self.refresh_token_params(token, *args, **kwargs)
url = self.REFRESH_TOKEN_URL or self.ACCESS_TOKEN_URL
method = self.REFRESH_TOKEN_METHOD
key = 'params' if method == 'GET' else 'data'
request_args = {
'headers': self.auth_headers(),
'method': method,
key: params
}
request = self.request(
url,
auth=HTTPBasicAuth(*self.get_key_and_secret()),
**request_args
)
return self.process_refresh_token_response(request, *args, **kwargs)
def auth_complete_params(self, state=None):
return {
'grant_type': 'authorization_code', # request auth code
'code': self.data.get('code', ''), # server response code
'redirect_uri': self.get_redirect_uri(state)
}
|
cogeorg/black_rhino | refs/heads/master | examples/firesales_simple/networkx/algorithms/link_analysis/tests/test_pagerank.py | 22 | #!/usr/bin/env python
from nose.tools import *
from nose import SkipTest
from nose.plugins.attrib import attr
import random
import networkx
# Example from
# A. Langville and C. Meyer, "A survey of eigenvector methods of web
# information retrieval." http://citeseer.ist.psu.edu/713792.html
class TestPageRank:
def setUp(self):
G=networkx.DiGraph()
edges=[(1,2),(1,3),\
(3,1),(3,2),(3,5),\
(4,5),(4,6),\
(5,4),(5,6),\
(6,4)]
G.add_edges_from(edges)
self.G=G
self.G.pagerank=dict(zip(G,
[0.03721197,0.05395735,0.04150565,
0.37508082,0.20599833, 0.28624589]))
def test_pagerank(self):
G=self.G
p=networkx.pagerank(G,alpha=0.9,tol=1.e-08)
for n in G:
assert_almost_equal(p[n],G.pagerank[n],places=4)
nstart = dict((n,random.random()) for n in G)
p=networkx.pagerank(G,alpha=0.9,tol=1.e-08, nstart=nstart)
for n in G:
assert_almost_equal(p[n],G.pagerank[n],places=4)
assert_raises(networkx.NetworkXError,networkx.pagerank,G,
max_iter=0)
@attr('numpy')
def test_numpy_pagerank(self):
try:
import numpy
except ImportError:
raise SkipTest('numpy not available.')
G=self.G
p=networkx.pagerank_numpy(G,alpha=0.9)
for n in G:
assert_almost_equal(p[n],G.pagerank[n],places=4)
personalize = dict((n,random.random()) for n in G)
p=networkx.pagerank_numpy(G,alpha=0.9, personalization=personalize)
@attr('numpy')
def test_google_matrix(self):
try:
import numpy.linalg
except ImportError:
raise SkipTest('numpy not available.')
G=self.G
M=networkx.google_matrix(G,alpha=0.9)
e,ev=numpy.linalg.eig(M.T)
p=numpy.array(ev[:,0]/ev[:,0].sum())[:,0]
for (a,b) in zip(p,self.G.pagerank.values()):
assert_almost_equal(a,b)
personalize = dict((n,random.random()) for n in G)
M=networkx.google_matrix(G,alpha=0.9, personalization=personalize)
_ = personalize.pop(1)
assert_raises(networkx.NetworkXError,networkx.google_matrix,G,
personalization=personalize)
def test_scipy_pagerank(self):
G=self.G
try:
import scipy
except ImportError:
raise SkipTest('scipy not available.')
p=networkx.pagerank_scipy(G,alpha=0.9,tol=1.e-08)
for n in G:
assert_almost_equal(p[n],G.pagerank[n],places=4)
personalize = dict((n,random.random()) for n in G)
p=networkx.pagerank_scipy(G,alpha=0.9,tol=1.e-08,
personalization=personalize)
assert_raises(networkx.NetworkXError,networkx.pagerank_scipy,G,
max_iter=0)
def test_personalization(self):
G=networkx.complete_graph(4)
personalize={0:1,1:1,2:4,3:4}
answer={0:0.1,1:0.1,2:0.4,3:0.4}
p=networkx.pagerank(G,alpha=0.0,personalization=personalize)
for n in G:
assert_almost_equal(p[n],answer[n],places=4)
_ = personalize.pop(0)
assert_raises(networkx.NetworkXError,networkx.pagerank,G,
personalization=personalize)
@attr('numpy')
def test_empty(self):
try:
import numpy
except ImportError:
raise SkipTest('numpy not available.')
G=networkx.Graph()
assert_equal(networkx.pagerank(G),{})
assert_equal(networkx.pagerank_numpy(G),{})
assert_equal(networkx.google_matrix(G).shape,(0,0))
def test_empty_scipy(self):
try:
import scipy
except ImportError:
raise SkipTest('scipy not available.')
G=networkx.Graph()
assert_equal(networkx.pagerank_scipy(G),{})
|
yeming233/rally | refs/heads/master | tests/unit/plugins/openstack/scenarios/zaqar/test_utils.py | 10 | # Copyright (c) 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally.plugins.openstack.scenarios.zaqar import utils
from tests.unit import fakes
from tests.unit import test
UTILS = "rally.plugins.openstack.scenarios.zaqar.utils."
class ZaqarScenarioTestCase(test.ScenarioTestCase):
@mock.patch(UTILS + "ZaqarScenario.generate_random_name",
return_value="kitkat")
def test_queue_create(self, mock_generate_random_name):
scenario = utils.ZaqarScenario(self.context)
result = scenario._queue_create(fakearg="fakearg")
self.assertEqual(self.clients("zaqar").queue.return_value, result)
self.clients("zaqar").queue.assert_called_once_with("kitkat",
fakearg="fakearg")
self._test_atomic_action_timer(scenario.atomic_actions(),
"zaqar.create_queue")
def test_queue_delete(self):
queue = fakes.FakeQueue()
queue.delete = mock.MagicMock()
scenario = utils.ZaqarScenario(context=self.context)
scenario._queue_delete(queue)
queue.delete.assert_called_once_with()
self._test_atomic_action_timer(scenario.atomic_actions(),
"zaqar.delete_queue")
def test_messages_post(self):
queue = fakes.FakeQueue()
queue.post = mock.MagicMock()
messages = [{"body": {"id": "one"}, "ttl": 100},
{"body": {"id": "two"}, "ttl": 120},
{"body": {"id": "three"}, "ttl": 140}]
min_msg_count = max_msg_count = len(messages)
scenario = utils.ZaqarScenario(context=self.context)
scenario._messages_post(queue, messages, min_msg_count, max_msg_count)
queue.post.assert_called_once_with(messages)
def test_messages_list(self):
queue = fakes.FakeQueue()
queue.messages = mock.MagicMock()
scenario = utils.ZaqarScenario(context=self.context)
scenario._messages_list(queue)
queue.messages.assert_called_once_with()
self._test_atomic_action_timer(scenario.atomic_actions(),
"zaqar.list_messages")
|
mfherbst/spack | refs/heads/develop | lib/spack/spack/test/cmd/spec.py | 2 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import re
import pytest
import spack.spec
from spack.main import SpackCommand
pytestmark = pytest.mark.usefixtures('config', 'mutable_mock_packages')
spec = SpackCommand('spec')
def test_spec():
output = spec('mpileaks')
assert 'mpileaks@2.3' in output
assert 'callpath@1.0' in output
assert 'dyninst@8.2' in output
assert 'libdwarf@20130729' in output
assert 'libelf@0.8.1' in output
assert 'mpich@3.0.4' in output
def test_spec_yaml():
output = spec('--yaml', 'mpileaks')
mpileaks = spack.spec.Spec.from_yaml(output)
assert 'mpileaks' in mpileaks
assert 'callpath' in mpileaks
assert 'dyninst' in mpileaks
assert 'libdwarf' in mpileaks
assert 'libelf' in mpileaks
assert 'mpich' in mpileaks
def _parse_types(string):
"""Parse deptypes for specs from `spack spec -t` output."""
lines = string.strip().split('\n')
result = {}
for line in lines:
match = re.match(r'\[([^]]*)\]\s*\^?([^@]*)@', line)
if match:
types, name = match.groups()
result.setdefault(name, []).append(types)
result[name] = sorted(result[name])
return result
def test_spec_deptypes_nodes():
output = spec('--types', '--cover', 'nodes', 'dt-diamond')
types = _parse_types(output)
assert types['dt-diamond'] == [' ']
assert types['dt-diamond-left'] == ['bl ']
assert types['dt-diamond-right'] == ['bl ']
assert types['dt-diamond-bottom'] == ['blr ']
def test_spec_deptypes_edges():
output = spec('--types', '--cover', 'edges', 'dt-diamond')
types = _parse_types(output)
assert types['dt-diamond'] == [' ']
assert types['dt-diamond-left'] == ['bl ']
assert types['dt-diamond-right'] == ['bl ']
assert types['dt-diamond-bottom'] == ['b ', 'blr ']
def test_spec_returncode():
with pytest.raises(spack.main.SpackCommandError):
spec()
assert spec.returncode == 1
|
alirizakeles/zato | refs/heads/dsuch-f-gh723-add-exe-agent | code/alembic/versions/0028_ae3419a9_gh323_http_soap_migration.py | 7 | """gh323 http_soap migration
Revision ID: 0028_ae3419a9
Revises: 0027_e139a019
Create Date: 2014-11-03 19:31:51
"""
# revision identifiers, used by Alembic.
revision = '0028_ae3419a9'
down_revision = '0027_e139a019'
def upgrade():
pass
def downgrade():
pass
|
AudreyFrancisco/AliPhysics | refs/heads/master | PWGJE/EMCALJetTasks/Tracks/analysis/base/struct/TriggerPatchTHnSparse.py | 41 | #**************************************************************************
#* Copyright(c) 1998-2014, ALICE Experiment at CERN, All rights reserved. *
#* *
#* Author: The ALICE Off-line Project. *
#* Contributors are mentioned in the code where appropriate. *
#* *
#* Permission to use, copy, modify and distribute this software and its *
#* documentation strictly for non-commercial purposes is hereby granted *
#* without fee, provided that the above copyright notice appears in all *
#* copies and that both the copyright notice and this permission notice *
#* appear in the supporting documentation. The authors make no claims *
#* about the suitability of this software for any purpose. It is *
#* provided "as is" without express or implied warranty. *
#**************************************************************************
from PWGJE.EMCALJetTasks.Tracks.analysis.base.struct.THnSparseWrapper import THnSparseWrapper, AxisFormat
from copy import copy, deepcopy
class AxisFormatTriggerPatches(AxisFormat):
'''
Axis definition used in the trigger patch THnSparse
'''
def __init__(self):
'''
Constructor
'''
AxisFormat.__init__(self, "patches")
self._axes["energy"] = 0
self._axes["energyRough"] = 1
self._axes["amplitude"] = 2
self._axes["eta"] = 3
self._axes["phi"] = 4
self._axes["isMain"] = 5
def __copy__(self):
'''
Shallow copy constructor
'''
newobj = AxisFormatTriggerPatches()
newobj._Copy(self)
return newobj
def __deepcopy__(self, memo):
'''
Deep copy constructor
'''
newobj = AxisFormatTriggerPatches()
newobj._Deepcopy(self, memo)
return newobj
class TriggerPatchTHnSparse(THnSparseWrapper):
'''
Representation of Patch THnSparse with axes
- Offline Energy
- Rough Energy
- Amplitude
- eta
- phi
- isMain
'''
def __init__(self, roothist):
'''
Constructor
'''
THnSparseWrapper.__init__(self, roothist)
self._axisdefinition = AxisFormatTriggerPatches()
def __copy__(self):
'''
Shallow copy constructor
'''
result = TriggerPatchTHnSparse(copy(self._rootthnsparse))
result.CopyCuts(self, False)
return result
def __deepcopy__(self, memo):
'''
Deep copy constructor
'''
result = TriggerPatchTHnSparse(deepcopy(self._rootthnsparse))
result.CopyCuts(self, True)
return result
def SetEtaCut(self, etamin, etamax):
'''
Apply cut in eta
'''
self.ApplyCut("eta",etamin,etamax)
def SetPhiCut(self, phimin, phimax):
'''
Apply cut in phi
'''
self.ApplyCut("phi", phimin, phimax)
def SetMainPatch(self):
'''
Request main patches
'''
self.ApplyCut("isMain", 1, 1) |
akretion/hr | refs/heads/8.0 | __unported__/hr_policy_absence/__init__.py | 3 | #-*- coding:utf-8 -*-
#
#
# Copyright (C) 2013 Michael Telahun Makonnen <mmakonnen@gmail.com>.
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from . import hr_policy_absence
|
krez13/scikit-learn | refs/heads/master | sklearn/ensemble/partial_dependence.py | 251 | """Partial dependence plots for tree ensembles. """
# Authors: Peter Prettenhofer
# License: BSD 3 clause
from itertools import count
import numbers
import numpy as np
from scipy.stats.mstats import mquantiles
from ..utils.extmath import cartesian
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import map, range, zip
from ..utils import check_array
from ..tree._tree import DTYPE
from ._gradient_boosting import _partial_dependence_tree
from .gradient_boosting import BaseGradientBoosting
def _grid_from_X(X, percentiles=(0.05, 0.95), grid_resolution=100):
"""Generate a grid of points based on the ``percentiles of ``X``.
The grid is generated by placing ``grid_resolution`` equally
spaced points between the ``percentiles`` of each column
of ``X``.
Parameters
----------
X : ndarray
The data
percentiles : tuple of floats
The percentiles which are used to construct the extreme
values of the grid axes.
grid_resolution : int
The number of equally spaced points that are placed
on the grid.
Returns
-------
grid : ndarray
All data points on the grid; ``grid.shape[1] == X.shape[1]``
and ``grid.shape[0] == grid_resolution * X.shape[1]``.
axes : seq of ndarray
The axes with which the grid has been created.
"""
if len(percentiles) != 2:
raise ValueError('percentile must be tuple of len 2')
if not all(0. <= x <= 1. for x in percentiles):
raise ValueError('percentile values must be in [0, 1]')
axes = []
for col in range(X.shape[1]):
uniques = np.unique(X[:, col])
if uniques.shape[0] < grid_resolution:
# feature has low resolution use unique vals
axis = uniques
else:
emp_percentiles = mquantiles(X, prob=percentiles, axis=0)
# create axis based on percentiles and grid resolution
axis = np.linspace(emp_percentiles[0, col],
emp_percentiles[1, col],
num=grid_resolution, endpoint=True)
axes.append(axis)
return cartesian(axes), axes
def partial_dependence(gbrt, target_variables, grid=None, X=None,
percentiles=(0.05, 0.95), grid_resolution=100):
"""Partial dependence of ``target_variables``.
Partial dependence plots show the dependence between the joint values
of the ``target_variables`` and the function represented
by the ``gbrt``.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
target_variables : array-like, dtype=int
The target features for which the partial dependecy should be
computed (size should be smaller than 3 for visual renderings).
grid : array-like, shape=(n_points, len(target_variables))
The grid of ``target_variables`` values for which the
partial dependecy should be evaluated (either ``grid`` or ``X``
must be specified).
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained. It is used to generate
a ``grid`` for the ``target_variables``. The ``grid`` comprises
``grid_resolution`` equally spaced points between the two
``percentiles``.
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used create the extreme values
for the ``grid``. Only if ``X`` is not None.
grid_resolution : int, default=100
The number of equally spaced points on the ``grid``.
Returns
-------
pdp : array, shape=(n_classes, n_points)
The partial dependence function evaluated on the ``grid``.
For regression and binary classification ``n_classes==1``.
axes : seq of ndarray or None
The axes with which the grid has been created or None if
the grid has been given.
Examples
--------
>>> samples = [[0, 0, 2], [1, 0, 0]]
>>> labels = [0, 1]
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> gb = GradientBoostingClassifier(random_state=0).fit(samples, labels)
>>> kwargs = dict(X=samples, percentiles=(0, 1), grid_resolution=2)
>>> partial_dependence(gb, [0], **kwargs) # doctest: +SKIP
(array([[-4.52..., 4.52...]]), [array([ 0., 1.])])
"""
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
if (grid is None and X is None) or (grid is not None and X is not None):
raise ValueError('Either grid or X must be specified')
target_variables = np.asarray(target_variables, dtype=np.int32,
order='C').ravel()
if any([not (0 <= fx < gbrt.n_features) for fx in target_variables]):
raise ValueError('target_variables must be in [0, %d]'
% (gbrt.n_features - 1))
if X is not None:
X = check_array(X, dtype=DTYPE, order='C')
grid, axes = _grid_from_X(X[:, target_variables], percentiles,
grid_resolution)
else:
assert grid is not None
# dont return axes if grid is given
axes = None
# grid must be 2d
if grid.ndim == 1:
grid = grid[:, np.newaxis]
if grid.ndim != 2:
raise ValueError('grid must be 2d but is %dd' % grid.ndim)
grid = np.asarray(grid, dtype=DTYPE, order='C')
assert grid.shape[1] == target_variables.shape[0]
n_trees_per_stage = gbrt.estimators_.shape[1]
n_estimators = gbrt.estimators_.shape[0]
pdp = np.zeros((n_trees_per_stage, grid.shape[0],), dtype=np.float64,
order='C')
for stage in range(n_estimators):
for k in range(n_trees_per_stage):
tree = gbrt.estimators_[stage, k].tree_
_partial_dependence_tree(tree, grid, target_variables,
gbrt.learning_rate, pdp[k])
return pdp, axes
def plot_partial_dependence(gbrt, X, features, feature_names=None,
label=None, n_cols=3, grid_resolution=100,
percentiles=(0.05, 0.95), n_jobs=1,
verbose=0, ax=None, line_kw=None,
contour_kw=None, **fig_kw):
"""Partial dependence plots for ``features``.
The ``len(features)`` plots are arranged in a grid with ``n_cols``
columns. Two-way partial dependence plots are plotted as contour
plots.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained.
features : seq of tuples or ints
If seq[i] is an int or a tuple with one int value, a one-way
PDP is created; if seq[i] is a tuple of two ints, a two-way
PDP is created.
feature_names : seq of str
Name of each feature; feature_names[i] holds
the name of the feature with index i.
label : object
The class label for which the PDPs should be computed.
Only if gbrt is a multi-class model. Must be in ``gbrt.classes_``.
n_cols : int
The number of columns in the grid plot (default: 3).
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used to create the extreme values
for the PDP axes.
grid_resolution : int, default=100
The number of equally spaced points on the axes.
n_jobs : int
The number of CPUs to use to compute the PDs. -1 means 'all CPUs'.
Defaults to 1.
verbose : int
Verbose output during PD computations. Defaults to 0.
ax : Matplotlib axis object, default None
An axis object onto which the plots will be drawn.
line_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For one-way partial dependence plots.
contour_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For two-way partial dependence plots.
fig_kw : dict
Dict with keywords passed to the figure() call.
Note that all keywords not recognized above will be automatically
included here.
Returns
-------
fig : figure
The Matplotlib Figure object.
axs : seq of Axis objects
A seq of Axis objects, one for each subplot.
Examples
--------
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> fig, axs = plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP
...
"""
import matplotlib.pyplot as plt
from matplotlib import transforms
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import ScalarFormatter
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
# set label_idx for multi-class GBRT
if hasattr(gbrt, 'classes_') and np.size(gbrt.classes_) > 2:
if label is None:
raise ValueError('label is not given for multi-class PDP')
label_idx = np.searchsorted(gbrt.classes_, label)
if gbrt.classes_[label_idx] != label:
raise ValueError('label %s not in ``gbrt.classes_``' % str(label))
else:
# regression and binary classification
label_idx = 0
X = check_array(X, dtype=DTYPE, order='C')
if gbrt.n_features != X.shape[1]:
raise ValueError('X.shape[1] does not match gbrt.n_features')
if line_kw is None:
line_kw = {'color': 'green'}
if contour_kw is None:
contour_kw = {}
# convert feature_names to list
if feature_names is None:
# if not feature_names use fx indices as name
feature_names = [str(i) for i in range(gbrt.n_features)]
elif isinstance(feature_names, np.ndarray):
feature_names = feature_names.tolist()
def convert_feature(fx):
if isinstance(fx, six.string_types):
try:
fx = feature_names.index(fx)
except ValueError:
raise ValueError('Feature %s not in feature_names' % fx)
return fx
# convert features into a seq of int tuples
tmp_features = []
for fxs in features:
if isinstance(fxs, (numbers.Integral,) + six.string_types):
fxs = (fxs,)
try:
fxs = np.array([convert_feature(fx) for fx in fxs], dtype=np.int32)
except TypeError:
raise ValueError('features must be either int, str, or tuple '
'of int/str')
if not (1 <= np.size(fxs) <= 2):
raise ValueError('target features must be either one or two')
tmp_features.append(fxs)
features = tmp_features
names = []
try:
for fxs in features:
l = []
# explicit loop so "i" is bound for exception below
for i in fxs:
l.append(feature_names[i])
names.append(l)
except IndexError:
raise ValueError('features[i] must be in [0, n_features) '
'but was %d' % i)
# compute PD functions
pd_result = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(partial_dependence)(gbrt, fxs, X=X,
grid_resolution=grid_resolution,
percentiles=percentiles)
for fxs in features)
# get global min and max values of PD grouped by plot type
pdp_lim = {}
for pdp, axes in pd_result:
min_pd, max_pd = pdp[label_idx].min(), pdp[label_idx].max()
n_fx = len(axes)
old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd))
min_pd = min(min_pd, old_min_pd)
max_pd = max(max_pd, old_max_pd)
pdp_lim[n_fx] = (min_pd, max_pd)
# create contour levels for two-way plots
if 2 in pdp_lim:
Z_level = np.linspace(*pdp_lim[2], num=8)
if ax is None:
fig = plt.figure(**fig_kw)
else:
fig = ax.get_figure()
fig.clear()
n_cols = min(n_cols, len(features))
n_rows = int(np.ceil(len(features) / float(n_cols)))
axs = []
for i, fx, name, (pdp, axes) in zip(count(), features, names,
pd_result):
ax = fig.add_subplot(n_rows, n_cols, i + 1)
if len(axes) == 1:
ax.plot(axes[0], pdp[label_idx].ravel(), **line_kw)
else:
# make contour plot
assert len(axes) == 2
XX, YY = np.meshgrid(axes[0], axes[1])
Z = pdp[label_idx].reshape(list(map(np.size, axes))).T
CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5,
colors='k')
ax.contourf(XX, YY, Z, levels=Z_level, vmax=Z_level[-1],
vmin=Z_level[0], alpha=0.75, **contour_kw)
ax.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True)
# plot data deciles + axes labels
deciles = mquantiles(X[:, fx[0]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transData,
ax.transAxes)
ylim = ax.get_ylim()
ax.vlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_xlabel(name[0])
ax.set_ylim(ylim)
# prevent x-axis ticks from overlapping
ax.xaxis.set_major_locator(MaxNLocator(nbins=6, prune='lower'))
tick_formatter = ScalarFormatter()
tick_formatter.set_powerlimits((-3, 4))
ax.xaxis.set_major_formatter(tick_formatter)
if len(axes) > 1:
# two-way PDP - y-axis deciles + labels
deciles = mquantiles(X[:, fx[1]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transAxes,
ax.transData)
xlim = ax.get_xlim()
ax.hlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_ylabel(name[1])
# hline erases xlim
ax.set_xlim(xlim)
else:
ax.set_ylabel('Partial dependence')
if len(axes) == 1:
ax.set_ylim(pdp_lim[1])
axs.append(ax)
fig.subplots_adjust(bottom=0.15, top=0.7, left=0.1, right=0.95, wspace=0.4,
hspace=0.3)
return fig, axs
|
mehdidc/scikit-learn | refs/heads/master | examples/cluster/plot_birch_vs_minibatchkmeans.py | 333 | """
=================================
Compare BIRCH and MiniBatchKMeans
=================================
This example compares the timing of Birch (with and without the global
clustering step) and MiniBatchKMeans on a synthetic dataset having
100,000 samples and 2 features generated using make_blobs.
If ``n_clusters`` is set to None, the data is reduced from 100,000
samples to a set of 158 clusters. This can be viewed as a preprocessing
step before the final (global) clustering step that further reduces these
158 clusters to 100 clusters.
"""
# Authors: Manoj Kumar <manojkumarsivaraj334@gmail.com
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# License: BSD 3 clause
print(__doc__)
from itertools import cycle
from time import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import Birch, MiniBatchKMeans
from sklearn.datasets.samples_generator import make_blobs
# Generate centers for the blobs so that it forms a 10 X 10 grid.
xx = np.linspace(-22, 22, 10)
yy = np.linspace(-22, 22, 10)
xx, yy = np.meshgrid(xx, yy)
n_centres = np.hstack((np.ravel(xx)[:, np.newaxis],
np.ravel(yy)[:, np.newaxis]))
# Generate blobs to do a comparison between MiniBatchKMeans and Birch.
X, y = make_blobs(n_samples=100000, centers=n_centres, random_state=0)
# Use all colors that matplotlib provides by default.
colors_ = cycle(colors.cnames.keys())
fig = plt.figure(figsize=(12, 4))
fig.subplots_adjust(left=0.04, right=0.98, bottom=0.1, top=0.9)
# Compute clustering with Birch with and without the final clustering step
# and plot.
birch_models = [Birch(threshold=1.7, n_clusters=None),
Birch(threshold=1.7, n_clusters=100)]
final_step = ['without global clustering', 'with global clustering']
for ind, (birch_model, info) in enumerate(zip(birch_models, final_step)):
t = time()
birch_model.fit(X)
time_ = time() - t
print("Birch %s as the final step took %0.2f seconds" % (
info, (time() - t)))
# Plot result
labels = birch_model.labels_
centroids = birch_model.subcluster_centers_
n_clusters = np.unique(labels).size
print("n_clusters : %d" % n_clusters)
ax = fig.add_subplot(1, 3, ind + 1)
for this_centroid, k, col in zip(centroids, range(n_clusters), colors_):
mask = labels == k
ax.plot(X[mask, 0], X[mask, 1], 'w',
markerfacecolor=col, marker='.')
if birch_model.n_clusters is None:
ax.plot(this_centroid[0], this_centroid[1], '+', markerfacecolor=col,
markeredgecolor='k', markersize=5)
ax.set_ylim([-25, 25])
ax.set_xlim([-25, 25])
ax.set_autoscaley_on(False)
ax.set_title('Birch %s' % info)
# Compute clustering with MiniBatchKMeans.
mbk = MiniBatchKMeans(init='k-means++', n_clusters=100, batch_size=100,
n_init=10, max_no_improvement=10, verbose=0,
random_state=0)
t0 = time()
mbk.fit(X)
t_mini_batch = time() - t0
print("Time taken to run MiniBatchKMeans %0.2f seconds" % t_mini_batch)
mbk_means_labels_unique = np.unique(mbk.labels_)
ax = fig.add_subplot(1, 3, 3)
for this_centroid, k, col in zip(mbk.cluster_centers_,
range(n_clusters), colors_):
mask = mbk.labels_ == k
ax.plot(X[mask, 0], X[mask, 1], 'w', markerfacecolor=col, marker='.')
ax.plot(this_centroid[0], this_centroid[1], '+', markeredgecolor='k',
markersize=5)
ax.set_xlim([-25, 25])
ax.set_ylim([-25, 25])
ax.set_title("MiniBatchKMeans")
ax.set_autoscaley_on(False)
plt.show()
|
uberamd/NGECore2 | refs/heads/master | scripts/object/tangible/hair/rodian/hair_rodian_female_s45.py | 85615 | import sys
def setup(core, object):
return |
danielquinn/paperless | refs/heads/master | src/documents/views.py | 1 | from django.http import HttpResponse, HttpResponseBadRequest
from django.views.generic import DetailView, FormView, TemplateView
from django_filters.rest_framework import DjangoFilterBackend
from django.conf import settings
from django.utils import cache
from paperless.db import GnuPG
from paperless.mixins import SessionOrBasicAuthMixin
from paperless.views import StandardPagination
from rest_framework.filters import OrderingFilter, SearchFilter
from rest_framework.mixins import (
DestroyModelMixin,
ListModelMixin,
RetrieveModelMixin,
UpdateModelMixin
)
from rest_framework.permissions import IsAuthenticated
from rest_framework.viewsets import (
GenericViewSet,
ModelViewSet,
ReadOnlyModelViewSet
)
from .filters import CorrespondentFilterSet, DocumentFilterSet, TagFilterSet
from .forms import UploadForm
from .models import Correspondent, Document, Log, Tag
from .serialisers import (
CorrespondentSerializer,
DocumentSerializer,
LogSerializer,
TagSerializer
)
class IndexView(TemplateView):
template_name = "documents/index.html"
class FetchView(SessionOrBasicAuthMixin, DetailView):
model = Document
def render_to_response(self, context, **response_kwargs):
"""
Override the default to return the unencrypted image/PDF as raw data.
"""
content_types = {
Document.TYPE_PDF: "application/pdf",
Document.TYPE_PNG: "image/png",
Document.TYPE_JPG: "image/jpeg",
Document.TYPE_GIF: "image/gif",
Document.TYPE_TIF: "image/tiff",
Document.TYPE_CSV: "text/csv",
Document.TYPE_MD: "text/markdown",
Document.TYPE_TXT: "text/plain"
}
if self.kwargs["kind"] == "thumb":
response = HttpResponse(
self._get_raw_data(self.object.thumbnail_file),
content_type=content_types[Document.TYPE_PNG]
)
cache.patch_cache_control(response, max_age=31536000, private=True)
return response
response = HttpResponse(
self._get_raw_data(self.object.source_file),
content_type=content_types[self.object.file_type]
)
DISPOSITION = (
'inline' if settings.INLINE_DOC or self.kwargs["kind"] == 'preview'
else 'attachment'
)
response["Content-Disposition"] = '{}; filename="{}"'.format(
DISPOSITION, self.object.file_name)
return response
def _get_raw_data(self, file_handle):
if self.object.storage_type == Document.STORAGE_TYPE_UNENCRYPTED:
return file_handle
return GnuPG.decrypted(file_handle)
class PushView(SessionOrBasicAuthMixin, FormView):
"""
A crude REST-ish API for creating documents.
"""
form_class = UploadForm
def form_valid(self, form):
form.save()
return HttpResponse("1", status=202)
def form_invalid(self, form):
return HttpResponseBadRequest(str(form.errors))
class CorrespondentViewSet(ModelViewSet):
model = Correspondent
queryset = Correspondent.objects.all()
serializer_class = CorrespondentSerializer
pagination_class = StandardPagination
permission_classes = (IsAuthenticated,)
filter_backends = (DjangoFilterBackend, OrderingFilter)
filter_class = CorrespondentFilterSet
ordering_fields = ("name", "slug")
class TagViewSet(ModelViewSet):
model = Tag
queryset = Tag.objects.all()
serializer_class = TagSerializer
pagination_class = StandardPagination
permission_classes = (IsAuthenticated,)
filter_backends = (DjangoFilterBackend, OrderingFilter)
filter_class = TagFilterSet
ordering_fields = ("name", "slug")
class DocumentViewSet(RetrieveModelMixin,
UpdateModelMixin,
DestroyModelMixin,
ListModelMixin,
GenericViewSet):
model = Document
queryset = Document.objects.all()
serializer_class = DocumentSerializer
pagination_class = StandardPagination
permission_classes = (IsAuthenticated,)
filter_backends = (DjangoFilterBackend, SearchFilter, OrderingFilter)
filter_class = DocumentFilterSet
search_fields = ("title", "correspondent__name", "content")
ordering_fields = (
"id", "title", "correspondent__name", "created", "modified", "added")
class LogViewSet(ReadOnlyModelViewSet):
model = Log
queryset = Log.objects.all().by_group()
serializer_class = LogSerializer
pagination_class = StandardPagination
permission_classes = (IsAuthenticated,)
filter_backends = (DjangoFilterBackend, OrderingFilter)
ordering_fields = ("time",)
|
glu10/trough | refs/heads/master | newsStore.py | 1 | """
Trough - a GTK+ RSS news reader
Copyright (C) 2017 Andrew Asp
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see {http://www.gnu.org/licenses/}.
Trough homepage: https://github.com/glu10/trough
"""
from typing import List
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
from item import Item
class NewsStore:
""" A centralized data repository of all viewable fetched content. """
def __init__(self):
self.store = Gtk.ListStore(
str, # Feed Name
str, # Item URI
str, # Item Title
str, # Item Description
str, ) # Item Article
def append(self, item: Item) -> None:
self.store.append([
item.feed_name,
item.uri,
item.title,
item.description,
item.article
])
@staticmethod
def row_to_item(row: List[str]) -> Item:
return Item(*row)
def model(self) -> Gtk.TreeModel:
return self.store
def clear(self) -> None:
self.store.clear()
|
portableant/open-context-py | refs/heads/master | opencontext_py/apps/contexts/tests.py | 9 | from django.test import TestCase
from opencontext_py.apps.ocitems.ocitem.models import OCitem
#from opencontext_py.apps.ldata.linkannotations.recursion import LinkRecursion
class TestPredicateValues(TestCase):
def setUp(self):
self.oc_item = OCitem(
).get_item('9E474B89-E36B-4B9D-2D38-7C7CCBDBB030')
self.json_ld = self.oc_item.json_ld
self.oc_label = self.json_ld['label']
def test_get_oc_item_label(self):
#oc_label = self.json_ld['label']
self.assertEqual(self.oc_label, 'Bone 4502')
|
alexgorban/models | refs/heads/master | research/syntaxnet/examples/dragnn/tutorial_2.py | 18 | """Second example: separate tagger and parser."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import tensorflow as tf
from google.protobuf import text_format
from dragnn.protos import spec_pb2
from dragnn.python import graph_builder
from dragnn.python import lexicon
from dragnn.python import spec_builder
from dragnn.python import visualization
from syntaxnet import sentence_pb2
import dragnn.python.load_dragnn_cc_impl
import syntaxnet.load_parser_ops
data_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'tutorial_data')
lexicon_dir = '/tmp/tutorial/lexicon'
training_sentence = os.path.join(data_dir, 'sentence.prototext')
if not os.path.isdir(lexicon_dir):
os.makedirs(lexicon_dir)
def main(argv):
del argv # unused
# Constructs lexical resources for SyntaxNet in the given resource path, from
# the training data.
lexicon.build_lexicon(
lexicon_dir,
training_sentence,
training_corpus_format='sentence-prototext')
# Construct the ComponentSpec for tagging. This is a simple left-to-right RNN
# sequence tagger.
tagger = spec_builder.ComponentSpecBuilder('tagger')
tagger.set_network_unit(name='FeedForwardNetwork', hidden_layer_sizes='256')
tagger.set_transition_system(name='tagger')
tagger.add_fixed_feature(name='words', fml='input.word', embedding_dim=64)
tagger.add_rnn_link(embedding_dim=-1)
tagger.fill_from_resources(lexicon_dir)
# Construct the ComponentSpec for parsing.
parser = spec_builder.ComponentSpecBuilder('parser')
parser.set_network_unit(
name='FeedForwardNetwork',
hidden_layer_sizes='256',
layer_norm_hidden='True')
parser.set_transition_system(name='arc-standard')
parser.add_token_link(
source=tagger,
fml='input.focus stack.focus stack(1).focus',
embedding_dim=32,
source_layer='logits')
# Recurrent connection for the arc-standard parser. For both tokens on the
# stack, we connect to the last time step to either SHIFT or REDUCE that
# token. This allows the parser to build up compositional representations of
# phrases.
parser.add_link(
source=parser, # recurrent connection
name='rnn-stack', # unique identifier
fml='stack.focus stack(1).focus', # look for both stack tokens
source_translator='shift-reduce-step', # maps token indices -> step
embedding_dim=32) # project down to 32 dims
parser.fill_from_resources(lexicon_dir)
master_spec = spec_pb2.MasterSpec()
master_spec.component.extend([tagger.spec, parser.spec])
hyperparam_config = spec_pb2.GridPoint()
# Build the TensorFlow graph.
graph = tf.Graph()
with graph.as_default():
builder = graph_builder.MasterBuilder(master_spec, hyperparam_config)
target = spec_pb2.TrainTarget()
target.name = 'all'
target.unroll_using_oracle.extend([True, True])
dry_run = builder.add_training_from_config(target, trace_only=True)
# Read in serialized protos from training data.
sentence = sentence_pb2.Sentence()
text_format.Merge(open(training_sentence).read(), sentence)
training_set = [sentence.SerializeToString()]
with tf.Session(graph=graph) as sess:
# Make sure to re-initialize all underlying state.
sess.run(tf.initialize_all_variables())
traces = sess.run(
dry_run['traces'], feed_dict={dry_run['input_batch']: training_set})
with open('dragnn_tutorial_2.html', 'w') as f:
f.write(
visualization.trace_html(
traces[0], height='400px', master_spec=master_spec).encode('utf-8'))
if __name__ == '__main__':
tf.app.run()
|
totalgood/nlpia | refs/heads/master | src/nlpia/book/examples/ch06_word2vec_embedding_viz.py | 1 | # script adopted from https://gist.github.com/lampts/026a4d6400b1efac9a13a3296f16e655
import gensim
import numpy as np
import tensorflow as tf
from nlpia.loaders import get_data
from tensorflow.contrib.tensorboard.plugins import projector
words = ('Sacramento', 'California', 'Oregon', 'Salem', 'Washington', 'Olympia')
# loading your gensim
# model = gensim.models.KeyedVectors.load_word2vec_format('~/Downloads/GoogleNews-vectors-negative300.bin.gz', binary=True, limit=200000)
model = get_data('w2v', limit=300000) # <1>
# project part of vocab, 10K of 300 dimension
w2v_10K = np.zeros((6, 300))
with open("/Users/hannes/Downloads/prefix_metadata.tsv", 'w+') as file_metadata:
# for i, word in enumerate(model.index2word[:200000]):
# w2v_10K[i] = model[word]
# file_metadata.write(word.encode('utf-8') + '\n')
for i, word in enumerate(list(words)):
w2v_10K[i] = model[word]
file_metadata.write(word.encode('utf-8') + '\n')
# define the model without training
sess = tf.InteractiveSession()
with tf.device("/cpu:0"):
embedding = tf.Variable(w2v_10K, trainable=False, name='word2vec_embedding')
tf.global_variables_initializer().run()
saver = tf.train.Saver()
writer = tf.summary.FileWriter('/Users/hannes/Downloads', sess.graph)
# adding into projector
config = projector.ProjectorConfig()
embed = config.embeddings.add()
embed.tensor_name = 'word2vec_embedding'
embed.metadata_path = '/Users/hannes/Downloads//prefix_metadata.tsv'
# Specify the width and height of a single thumbnail.
projector.visualize_embeddings(writer, config)
saver.save(sess, '/Users/hannes/Downloads/prefix_model.ckpt', global_step=1000)
# open tensorboard with logdir, check localhost:6006 for viewing your embedding.
# tensorboard --logdir="./projector/"
|
gcode-mirror/audacity | refs/heads/master | lib-src/lv2/lv2/plugins/eg03-metro.lv2/waflib/Tools/c_config.py | 69 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os,re,shlex,sys
from waflib import Build,Utils,Task,Options,Logs,Errors,ConfigSet,Runner
from waflib.TaskGen import after_method,feature
from waflib.Configure import conf
WAF_CONFIG_H='config.h'
DEFKEYS='define_key'
INCKEYS='include_key'
cfg_ver={'atleast-version':'>=','exact-version':'==','max-version':'<=',}
SNIP_FUNCTION='''
int main(int argc, char **argv) {
void *p;
(void)argc; (void)argv;
p=(void*)(%s);
return 0;
}
'''
SNIP_TYPE='''
int main(int argc, char **argv) {
(void)argc; (void)argv;
if ((%(type_name)s *) 0) return 0;
if (sizeof (%(type_name)s)) return 0;
return 1;
}
'''
SNIP_EMPTY_PROGRAM='''
int main(int argc, char **argv) {
(void)argc; (void)argv;
return 0;
}
'''
SNIP_FIELD='''
int main(int argc, char **argv) {
char *off;
(void)argc; (void)argv;
off = (char*) &((%(type_name)s*)0)->%(field_name)s;
return (size_t) off < sizeof(%(type_name)s);
}
'''
MACRO_TO_DESTOS={'__linux__':'linux','__GNU__':'gnu','__FreeBSD__':'freebsd','__NetBSD__':'netbsd','__OpenBSD__':'openbsd','__sun':'sunos','__hpux':'hpux','__sgi':'irix','_AIX':'aix','__CYGWIN__':'cygwin','__MSYS__':'msys','_UWIN':'uwin','_WIN64':'win32','_WIN32':'win32','__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__':'darwin','__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__':'darwin','__QNX__':'qnx','__native_client__':'nacl'}
MACRO_TO_DEST_CPU={'__x86_64__':'x86_64','__amd64__':'x86_64','__i386__':'x86','__ia64__':'ia','__mips__':'mips','__sparc__':'sparc','__alpha__':'alpha','__aarch64__':'aarch64','__thumb__':'thumb','__arm__':'arm','__hppa__':'hppa','__powerpc__':'powerpc','__ppc__':'powerpc','__convex__':'convex','__m68k__':'m68k','__s390x__':'s390x','__s390__':'s390','__sh__':'sh',}
@conf
def parse_flags(self,line,uselib_store,env=None,force_static=False):
assert(isinstance(line,str))
env=env or self.env
app=env.append_value
appu=env.append_unique
lex=shlex.shlex(line,posix=False)
lex.whitespace_split=True
lex.commenters=''
lst=list(lex)
uselib=uselib_store
while lst:
x=lst.pop(0)
st=x[:2]
ot=x[2:]
if st=='-I'or st=='/I':
if not ot:ot=lst.pop(0)
appu('INCLUDES_'+uselib,[ot])
elif st=='-include':
tmp=[x,lst.pop(0)]
app('CFLAGS',tmp)
app('CXXFLAGS',tmp)
elif st=='-D'or(env.CXX_NAME=='msvc'and st=='/D'):
if not ot:ot=lst.pop(0)
app('DEFINES_'+uselib,[ot])
elif st=='-l':
if not ot:ot=lst.pop(0)
prefix=force_static and'STLIB_'or'LIB_'
appu(prefix+uselib,[ot])
elif st=='-L':
if not ot:ot=lst.pop(0)
appu('LIBPATH_'+uselib,[ot])
elif x.startswith('/LIBPATH:'):
appu('LIBPATH_'+uselib,[x.replace('/LIBPATH:','')])
elif x=='-pthread'or x.startswith('+')or x.startswith('-std'):
app('CFLAGS_'+uselib,[x])
app('CXXFLAGS_'+uselib,[x])
app('LINKFLAGS_'+uselib,[x])
elif x=='-framework':
appu('FRAMEWORK_'+uselib,[lst.pop(0)])
elif x.startswith('-F'):
appu('FRAMEWORKPATH_'+uselib,[x[2:]])
elif x.startswith('-Wl'):
app('LINKFLAGS_'+uselib,[x])
elif x.startswith('-m')or x.startswith('-f')or x.startswith('-dynamic'):
app('CFLAGS_'+uselib,[x])
app('CXXFLAGS_'+uselib,[x])
elif x.startswith('-bundle'):
app('LINKFLAGS_'+uselib,[x])
elif x.startswith('-undefined'):
arg=lst.pop(0)
app('LINKFLAGS_'+uselib,[x,arg])
elif x.startswith('-arch')or x.startswith('-isysroot'):
tmp=[x,lst.pop(0)]
app('CFLAGS_'+uselib,tmp)
app('CXXFLAGS_'+uselib,tmp)
app('LINKFLAGS_'+uselib,tmp)
elif x.endswith('.a')or x.endswith('.so')or x.endswith('.dylib')or x.endswith('.lib'):
appu('LINKFLAGS_'+uselib,[x])
@conf
def ret_msg(self,f,kw):
if isinstance(f,str):
return f
return f(kw)
@conf
def validate_cfg(self,kw):
if not'path'in kw:
if not self.env.PKGCONFIG:
self.find_program('pkg-config',var='PKGCONFIG')
kw['path']=self.env.PKGCONFIG
if'atleast_pkgconfig_version'in kw:
if not'msg'in kw:
kw['msg']='Checking for pkg-config version >= %r'%kw['atleast_pkgconfig_version']
return
if not'okmsg'in kw:
kw['okmsg']='yes'
if not'errmsg'in kw:
kw['errmsg']='not found'
if'modversion'in kw:
if not'msg'in kw:
kw['msg']='Checking for %r version'%kw['modversion']
return
for x in cfg_ver.keys():
y=x.replace('-','_')
if y in kw:
if not'package'in kw:
raise ValueError('%s requires a package'%x)
if not'msg'in kw:
kw['msg']='Checking for %r %s %s'%(kw['package'],cfg_ver[x],kw[y])
return
if not'msg'in kw:
kw['msg']='Checking for %r'%(kw['package']or kw['path'])
@conf
def exec_cfg(self,kw):
def define_it():
self.define(self.have_define(kw.get('uselib_store',kw['package'])),1,0)
if'atleast_pkgconfig_version'in kw:
cmd=[kw['path'],'--atleast-pkgconfig-version=%s'%kw['atleast_pkgconfig_version']]
self.cmd_and_log(cmd)
if not'okmsg'in kw:
kw['okmsg']='yes'
return
for x in cfg_ver:
y=x.replace('-','_')
if y in kw:
self.cmd_and_log([kw['path'],'--%s=%s'%(x,kw[y]),kw['package']])
if not'okmsg'in kw:
kw['okmsg']='yes'
define_it()
break
if'modversion'in kw:
version=self.cmd_and_log([kw['path'],'--modversion',kw['modversion']]).strip()
self.define('%s_VERSION'%Utils.quote_define_name(kw.get('uselib_store',kw['modversion'])),version)
return version
lst=[kw['path']]
defi=kw.get('define_variable',None)
if not defi:
defi=self.env.PKG_CONFIG_DEFINES or{}
for key,val in defi.items():
lst.append('--define-variable=%s=%s'%(key,val))
if'variables'in kw:
env=kw.get('env',self.env)
uselib=kw.get('uselib_store',kw['package'].upper())
vars=Utils.to_list(kw['variables'])
for v in vars:
val=self.cmd_and_log(lst+['--variable='+v]).strip()
var='%s_%s'%(uselib,v)
env[var]=val
if not'okmsg'in kw:
kw['okmsg']='yes'
return
static=False
if'args'in kw:
args=Utils.to_list(kw['args'])
if'--static'in args or'--static-libs'in args:
static=True
lst+=args
lst.extend(Utils.to_list(kw['package']))
ret=self.cmd_and_log(lst)
if not'okmsg'in kw:
kw['okmsg']='yes'
define_it()
self.parse_flags(ret,kw.get('uselib_store',kw['package'].upper()),kw.get('env',self.env),force_static=static)
return ret
@conf
def check_cfg(self,*k,**kw):
if k:
lst=k[0].split()
kw['package']=lst[0]
kw['args']=' '.join(lst[1:])
self.validate_cfg(kw)
if'msg'in kw:
self.start_msg(kw['msg'])
ret=None
try:
ret=self.exec_cfg(kw)
except self.errors.WafError:
if'errmsg'in kw:
self.end_msg(kw['errmsg'],'YELLOW')
if Logs.verbose>1:
raise
else:
self.fatal('The configuration failed')
else:
kw['success']=ret
if'okmsg'in kw:
self.end_msg(self.ret_msg(kw['okmsg'],kw))
return ret
@conf
def validate_c(self,kw):
if not'env'in kw:
kw['env']=self.env.derive()
env=kw['env']
if not'compiler'in kw and not'features'in kw:
kw['compiler']='c'
if env['CXX_NAME']and Task.classes.get('cxx',None):
kw['compiler']='cxx'
if not self.env['CXX']:
self.fatal('a c++ compiler is required')
else:
if not self.env['CC']:
self.fatal('a c compiler is required')
if not'compile_mode'in kw:
kw['compile_mode']='c'
if'cxx'in Utils.to_list(kw.get('features',[]))or kw.get('compiler','')=='cxx':
kw['compile_mode']='cxx'
if not'type'in kw:
kw['type']='cprogram'
if not'features'in kw:
kw['features']=[kw['compile_mode'],kw['type']]
else:
kw['features']=Utils.to_list(kw['features'])
if not'compile_filename'in kw:
kw['compile_filename']='test.c'+((kw['compile_mode']=='cxx')and'pp'or'')
def to_header(dct):
if'header_name'in dct:
dct=Utils.to_list(dct['header_name'])
return''.join(['#include <%s>\n'%x for x in dct])
return''
if'framework_name'in kw:
fwkname=kw['framework_name']
if not'uselib_store'in kw:
kw['uselib_store']=fwkname.upper()
if not kw.get('no_header',False):
if not'header_name'in kw:
kw['header_name']=[]
fwk='%s/%s.h'%(fwkname,fwkname)
if kw.get('remove_dot_h',None):
fwk=fwk[:-2]
kw['header_name']=Utils.to_list(kw['header_name'])+[fwk]
kw['msg']='Checking for framework %s'%fwkname
kw['framework']=fwkname
if'function_name'in kw:
fu=kw['function_name']
if not'msg'in kw:
kw['msg']='Checking for function %s'%fu
kw['code']=to_header(kw)+SNIP_FUNCTION%fu
if not'uselib_store'in kw:
kw['uselib_store']=fu.upper()
if not'define_name'in kw:
kw['define_name']=self.have_define(fu)
elif'type_name'in kw:
tu=kw['type_name']
if not'header_name'in kw:
kw['header_name']='stdint.h'
if'field_name'in kw:
field=kw['field_name']
kw['code']=to_header(kw)+SNIP_FIELD%{'type_name':tu,'field_name':field}
if not'msg'in kw:
kw['msg']='Checking for field %s in %s'%(field,tu)
if not'define_name'in kw:
kw['define_name']=self.have_define((tu+'_'+field).upper())
else:
kw['code']=to_header(kw)+SNIP_TYPE%{'type_name':tu}
if not'msg'in kw:
kw['msg']='Checking for type %s'%tu
if not'define_name'in kw:
kw['define_name']=self.have_define(tu.upper())
elif'header_name'in kw:
if not'msg'in kw:
kw['msg']='Checking for header %s'%kw['header_name']
l=Utils.to_list(kw['header_name'])
assert len(l)>0,'list of headers in header_name is empty'
kw['code']=to_header(kw)+SNIP_EMPTY_PROGRAM
if not'uselib_store'in kw:
kw['uselib_store']=l[0].upper()
if not'define_name'in kw:
kw['define_name']=self.have_define(l[0])
if'lib'in kw:
if not'msg'in kw:
kw['msg']='Checking for library %s'%kw['lib']
if not'uselib_store'in kw:
kw['uselib_store']=kw['lib'].upper()
if'stlib'in kw:
if not'msg'in kw:
kw['msg']='Checking for static library %s'%kw['stlib']
if not'uselib_store'in kw:
kw['uselib_store']=kw['stlib'].upper()
if'fragment'in kw:
kw['code']=kw['fragment']
if not'msg'in kw:
kw['msg']='Checking for code snippet'
if not'errmsg'in kw:
kw['errmsg']='no'
for(flagsname,flagstype)in[('cxxflags','compiler'),('cflags','compiler'),('linkflags','linker')]:
if flagsname in kw:
if not'msg'in kw:
kw['msg']='Checking for %s flags %s'%(flagstype,kw[flagsname])
if not'errmsg'in kw:
kw['errmsg']='no'
if not'execute'in kw:
kw['execute']=False
if kw['execute']:
kw['features'].append('test_exec')
if not'errmsg'in kw:
kw['errmsg']='not found'
if not'okmsg'in kw:
kw['okmsg']='yes'
if not'code'in kw:
kw['code']=SNIP_EMPTY_PROGRAM
if self.env[INCKEYS]:
kw['code']='\n'.join(['#include <%s>'%x for x in self.env[INCKEYS]])+'\n'+kw['code']
if not kw.get('success'):kw['success']=None
if'define_name'in kw:
self.undefine(kw['define_name'])
assert'msg'in kw,'invalid parameters, read http://freehackers.org/~tnagy/wafbook/single.html#config_helpers_c'
@conf
def post_check(self,*k,**kw):
is_success=0
if kw['execute']:
if kw['success']is not None:
if kw.get('define_ret',False):
is_success=kw['success']
else:
is_success=(kw['success']==0)
else:
is_success=(kw['success']==0)
if'define_name'in kw:
if'header_name'in kw or'function_name'in kw or'type_name'in kw or'fragment'in kw:
if kw['execute']and kw.get('define_ret',None)and isinstance(is_success,str):
self.define(kw['define_name'],is_success,quote=kw.get('quote',1))
else:
self.define_cond(kw['define_name'],is_success)
else:
self.define_cond(kw['define_name'],is_success)
if'header_name'in kw:
if kw.get('auto_add_header_name',False):
self.env.append_value(INCKEYS,Utils.to_list(kw['header_name']))
if is_success and'uselib_store'in kw:
from waflib.Tools import ccroot
_vars=set([])
for x in kw['features']:
if x in ccroot.USELIB_VARS:
_vars|=ccroot.USELIB_VARS[x]
for k in _vars:
lk=k.lower()
if k=='INCLUDES':lk='includes'
if k=='DEFINES':lk='defines'
if lk in kw:
val=kw[lk]
if isinstance(val,str):
val=val.rstrip(os.path.sep)
self.env.append_unique(k+'_'+kw['uselib_store'],val)
return is_success
@conf
def check(self,*k,**kw):
self.validate_c(kw)
self.start_msg(kw['msg'])
ret=None
try:
ret=self.run_c_code(*k,**kw)
except self.errors.ConfigurationError:
self.end_msg(kw['errmsg'],'YELLOW')
if Logs.verbose>1:
raise
else:
self.fatal('The configuration failed')
else:
kw['success']=ret
ret=self.post_check(*k,**kw)
if not ret:
self.end_msg(kw['errmsg'],'YELLOW')
self.fatal('The configuration failed %r'%ret)
else:
self.end_msg(self.ret_msg(kw['okmsg'],kw))
return ret
class test_exec(Task.Task):
color='PINK'
def run(self):
if getattr(self.generator,'rpath',None):
if getattr(self.generator,'define_ret',False):
self.generator.bld.retval=self.generator.bld.cmd_and_log([self.inputs[0].abspath()])
else:
self.generator.bld.retval=self.generator.bld.exec_command([self.inputs[0].abspath()])
else:
env=self.env.env or{}
env.update(dict(os.environ))
for var in('LD_LIBRARY_PATH','DYLD_LIBRARY_PATH','PATH'):
env[var]=self.inputs[0].parent.abspath()+os.path.pathsep+env.get(var,'')
if getattr(self.generator,'define_ret',False):
self.generator.bld.retval=self.generator.bld.cmd_and_log([self.inputs[0].abspath()],env=env)
else:
self.generator.bld.retval=self.generator.bld.exec_command([self.inputs[0].abspath()],env=env)
@feature('test_exec')
@after_method('apply_link')
def test_exec_fun(self):
self.create_task('test_exec',self.link_task.outputs[0])
CACHE_RESULTS=1
COMPILE_ERRORS=2
@conf
def run_c_code(self,*k,**kw):
lst=[str(v)for(p,v)in kw.items()if p!='env']
h=Utils.h_list(lst)
dir=self.bldnode.abspath()+os.sep+(not Utils.is_win32 and'.'or'')+'conf_check_'+Utils.to_hex(h)
try:
os.makedirs(dir)
except OSError:
pass
try:
os.stat(dir)
except OSError:
self.fatal('cannot use the configuration test folder %r'%dir)
cachemode=getattr(Options.options,'confcache',None)
if cachemode==CACHE_RESULTS:
try:
proj=ConfigSet.ConfigSet(os.path.join(dir,'cache_run_c_code'))
except OSError:
pass
else:
ret=proj['cache_run_c_code']
if isinstance(ret,str)and ret.startswith('Test does not build'):
self.fatal(ret)
return ret
bdir=os.path.join(dir,'testbuild')
if not os.path.exists(bdir):
os.makedirs(bdir)
self.test_bld=bld=Build.BuildContext(top_dir=dir,out_dir=bdir)
bld.init_dirs()
bld.progress_bar=0
bld.targets='*'
if kw['compile_filename']:
node=bld.srcnode.make_node(kw['compile_filename'])
node.write(kw['code'])
bld.logger=self.logger
bld.all_envs.update(self.all_envs)
bld.env=kw['env']
o=bld(features=kw['features'],source=kw['compile_filename'],target='testprog')
for k,v in kw.items():
setattr(o,k,v)
self.to_log("==>\n%s\n<=="%kw['code'])
bld.targets='*'
ret=-1
try:
try:
bld.compile()
except Errors.WafError:
ret='Test does not build: %s'%Utils.ex_stack()
self.fatal(ret)
else:
ret=getattr(bld,'retval',0)
finally:
proj=ConfigSet.ConfigSet()
proj['cache_run_c_code']=ret
proj.store(os.path.join(dir,'cache_run_c_code'))
return ret
@conf
def check_cxx(self,*k,**kw):
kw['compiler']='cxx'
return self.check(*k,**kw)
@conf
def check_cc(self,*k,**kw):
kw['compiler']='c'
return self.check(*k,**kw)
@conf
def define(self,key,val,quote=True):
assert key and isinstance(key,str)
if val is True:
val=1
elif val in(False,None):
val=0
if isinstance(val,int)or isinstance(val,float):
s='%s=%s'
else:
s=quote and'%s="%s"'or'%s=%s'
app=s%(key,str(val))
ban=key+'='
lst=self.env['DEFINES']
for x in lst:
if x.startswith(ban):
lst[lst.index(x)]=app
break
else:
self.env.append_value('DEFINES',app)
self.env.append_unique(DEFKEYS,key)
@conf
def undefine(self,key):
assert key and isinstance(key,str)
ban=key+'='
lst=[x for x in self.env['DEFINES']if not x.startswith(ban)]
self.env['DEFINES']=lst
self.env.append_unique(DEFKEYS,key)
@conf
def define_cond(self,key,val):
assert key and isinstance(key,str)
if val:
self.define(key,1)
else:
self.undefine(key)
@conf
def is_defined(self,key):
assert key and isinstance(key,str)
ban=key+'='
for x in self.env['DEFINES']:
if x.startswith(ban):
return True
return False
@conf
def get_define(self,key):
assert key and isinstance(key,str)
ban=key+'='
for x in self.env['DEFINES']:
if x.startswith(ban):
return x[len(ban):]
return None
@conf
def have_define(self,key):
return(self.env.HAVE_PAT or'HAVE_%s')%Utils.quote_define_name(key)
@conf
def write_config_header(self,configfile='',guard='',top=False,env=None,defines=True,headers=False,remove=True,define_prefix=''):
if env:
Logs.warn('Cannot pass env to write_config_header')
if not configfile:configfile=WAF_CONFIG_H
waf_guard=guard or'W_%s_WAF'%Utils.quote_define_name(configfile)
node=top and self.bldnode or self.path.get_bld()
node=node.make_node(configfile)
node.parent.mkdir()
lst=['/* WARNING! All changes made to this file will be lost! */\n']
lst.append('#ifndef %s\n#define %s\n'%(waf_guard,waf_guard))
lst.append(self.get_config_header(defines,headers,define_prefix=define_prefix))
lst.append('\n#endif /* %s */\n'%waf_guard)
node.write('\n'.join(lst))
self.env.append_unique(Build.CFG_FILES,[node.abspath()])
if remove:
for key in self.env[DEFKEYS]:
self.undefine(key)
self.env[DEFKEYS]=[]
@conf
def get_config_header(self,defines=True,headers=False,define_prefix=''):
lst=[]
if headers:
for x in self.env[INCKEYS]:
lst.append('#include <%s>'%x)
if defines:
for x in self.env[DEFKEYS]:
if self.is_defined(x):
val=self.get_define(x)
lst.append('#define %s %s'%(define_prefix+x,val))
else:
lst.append('/* #undef %s */'%(define_prefix+x))
return"\n".join(lst)
@conf
def cc_add_flags(conf):
conf.add_os_flags('CPPFLAGS','CFLAGS')
conf.add_os_flags('CFLAGS')
@conf
def cxx_add_flags(conf):
conf.add_os_flags('CPPFLAGS','CXXFLAGS')
conf.add_os_flags('CXXFLAGS')
@conf
def link_add_flags(conf):
conf.add_os_flags('LINKFLAGS')
conf.add_os_flags('LDFLAGS','LINKFLAGS')
@conf
def cc_load_tools(conf):
if not conf.env.DEST_OS:
conf.env.DEST_OS=Utils.unversioned_sys_platform()
conf.load('c')
@conf
def cxx_load_tools(conf):
if not conf.env.DEST_OS:
conf.env.DEST_OS=Utils.unversioned_sys_platform()
conf.load('cxx')
@conf
def get_cc_version(conf,cc,gcc=False,icc=False):
cmd=cc+['-dM','-E','-']
env=conf.env.env or None
try:
p=Utils.subprocess.Popen(cmd,stdin=Utils.subprocess.PIPE,stdout=Utils.subprocess.PIPE,stderr=Utils.subprocess.PIPE,env=env)
p.stdin.write('\n')
out=p.communicate()[0]
except Exception:
conf.fatal('Could not determine the compiler version %r'%cmd)
if not isinstance(out,str):
out=out.decode(sys.stdout.encoding or'iso8859-1')
if gcc:
if out.find('__INTEL_COMPILER')>=0:
conf.fatal('The intel compiler pretends to be gcc')
if out.find('__GNUC__')<0 and out.find('__clang__')<0:
conf.fatal('Could not determine the compiler type')
if icc and out.find('__INTEL_COMPILER')<0:
conf.fatal('Not icc/icpc')
k={}
if icc or gcc:
out=out.splitlines()
for line in out:
lst=shlex.split(line)
if len(lst)>2:
key=lst[1]
val=lst[2]
k[key]=val
def isD(var):
return var in k
def isT(var):
return var in k and k[var]!='0'
if not conf.env.DEST_OS:
conf.env.DEST_OS=''
for i in MACRO_TO_DESTOS:
if isD(i):
conf.env.DEST_OS=MACRO_TO_DESTOS[i]
break
else:
if isD('__APPLE__')and isD('__MACH__'):
conf.env.DEST_OS='darwin'
elif isD('__unix__'):
conf.env.DEST_OS='generic'
if isD('__ELF__'):
conf.env.DEST_BINFMT='elf'
elif isD('__WINNT__')or isD('__CYGWIN__')or isD('_WIN32'):
conf.env.DEST_BINFMT='pe'
conf.env.LIBDIR=conf.env.BINDIR
elif isD('__APPLE__'):
conf.env.DEST_BINFMT='mac-o'
if not conf.env.DEST_BINFMT:
conf.env.DEST_BINFMT=Utils.destos_to_binfmt(conf.env.DEST_OS)
for i in MACRO_TO_DEST_CPU:
if isD(i):
conf.env.DEST_CPU=MACRO_TO_DEST_CPU[i]
break
Logs.debug('ccroot: dest platform: '+' '.join([conf.env[x]or'?'for x in('DEST_OS','DEST_BINFMT','DEST_CPU')]))
if icc:
ver=k['__INTEL_COMPILER']
conf.env['CC_VERSION']=(ver[:-2],ver[-2],ver[-1])
else:
if isD('__clang__'):
conf.env['CC_VERSION']=(k['__clang_major__'],k['__clang_minor__'],k['__clang_patchlevel__'])
else:
conf.env['CC_VERSION']=(k['__GNUC__'],k['__GNUC_MINOR__'],k['__GNUC_PATCHLEVEL__'])
return k
@conf
def get_xlc_version(conf,cc):
cmd=cc+['-qversion']
try:
out,err=conf.cmd_and_log(cmd,output=0)
except Errors.WafError:
conf.fatal('Could not find xlc %r'%cmd)
for v in(r"IBM XL C/C\+\+.* V(?P<major>\d*)\.(?P<minor>\d*)",):
version_re=re.compile(v,re.I).search
match=version_re(out or err)
if match:
k=match.groupdict()
conf.env['CC_VERSION']=(k['major'],k['minor'])
break
else:
conf.fatal('Could not determine the XLC version.')
@conf
def add_as_needed(self):
if self.env.DEST_BINFMT=='elf'and'gcc'in(self.env.CXX_NAME,self.env.CC_NAME):
self.env.append_unique('LINKFLAGS','--as-needed')
class cfgtask(Task.TaskBase):
def display(self):
return''
def runnable_status(self):
return Task.RUN_ME
def uid(self):
return Utils.SIG_NIL
def run(self):
conf=self.conf
bld=Build.BuildContext(top_dir=conf.srcnode.abspath(),out_dir=conf.bldnode.abspath())
bld.env=conf.env
bld.init_dirs()
bld.in_msg=1
bld.logger=self.logger
try:
bld.check(**self.args)
except Exception:
return 1
@conf
def multicheck(self,*k,**kw):
self.start_msg(kw.get('msg','Executing %d configuration tests'%len(k)))
class par(object):
def __init__(self):
self.keep=False
self.cache_global=Options.cache_global
self.nocache=Options.options.nocache
self.returned_tasks=[]
self.task_sigs={}
def total(self):
return len(tasks)
def to_log(self,*k,**kw):
return
bld=par()
tasks=[]
for dct in k:
x=cfgtask(bld=bld)
tasks.append(x)
x.args=dct
x.bld=bld
x.conf=self
x.args=dct
x.logger=Logs.make_mem_logger(str(id(x)),self.logger)
def it():
yield tasks
while 1:
yield[]
p=Runner.Parallel(bld,Options.options.jobs)
p.biter=it()
p.start()
for x in tasks:
x.logger.memhandler.flush()
for x in tasks:
if x.hasrun!=Task.SUCCESS:
self.end_msg(kw.get('errmsg','no'),color='YELLOW')
self.fatal(kw.get('fatalmsg',None)or'One of the tests has failed, see the config.log for more information')
self.end_msg('ok')
|
larsmans/scipy | refs/heads/master | scipy/sparse/csgraph/tests/test_reordering.py | 93 | from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_equal
from scipy.sparse.csgraph import reverse_cuthill_mckee,\
maximum_bipartite_matching
from scipy.sparse import diags, csr_matrix, coo_matrix
def test_graph_reverse_cuthill_mckee():
A = np.array([[1, 0, 0, 0, 1, 0, 0, 0],
[0, 1, 1, 0, 0, 1, 0, 1],
[0, 1, 1, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 1, 0],
[1, 0, 1, 0, 1, 0, 0, 0],
[0, 1, 0, 0, 0, 1, 0, 1],
[0, 0, 0, 1, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0, 1]], dtype=int)
graph = csr_matrix(A)
perm = reverse_cuthill_mckee(graph)
correct_perm = np.array([6, 3, 7, 5, 1, 2, 4, 0])
assert_equal(perm, correct_perm)
# Test int64 indices input
graph.indices = graph.indices.astype('int64')
graph.indptr = graph.indptr.astype('int64')
perm = reverse_cuthill_mckee(graph, True)
assert_equal(perm, correct_perm)
def test_graph_reverse_cuthill_mckee_ordering():
data = np.ones(63,dtype=int)
rows = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2,
2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5,
6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 9, 9,
9, 10, 10, 10, 10, 10, 11, 11, 11, 11,
12, 12, 12, 13, 13, 13, 13, 14, 14, 14,
14, 15, 15, 15, 15, 15])
cols = np.array([0, 2, 5, 8, 10, 1, 3, 9, 11, 0, 2,
7, 10, 1, 3, 11, 4, 6, 12, 14, 0, 7, 13,
15, 4, 6, 14, 2, 5, 7, 15, 0, 8, 10, 13,
1, 9, 11, 0, 2, 8, 10, 15, 1, 3, 9, 11,
4, 12, 14, 5, 8, 13, 15, 4, 6, 12, 14,
5, 7, 10, 13, 15])
graph = coo_matrix((data, (rows,cols))).tocsr()
perm = reverse_cuthill_mckee(graph)
correct_perm = np.array([12, 14, 4, 6, 10, 8, 2, 15,
0, 13, 7, 5, 9, 11, 1, 3])
assert_equal(perm, correct_perm)
def test_graph_maximum_bipartite_matching():
A = diags(np.ones(25), offsets=0, format='csc')
rand_perm = np.random.permutation(25)
rand_perm2 = np.random.permutation(25)
Rrow = np.arange(25)
Rcol = rand_perm
Rdata = np.ones(25,dtype=int)
Rmat = coo_matrix((Rdata,(Rrow,Rcol))).tocsc()
Crow = rand_perm2
Ccol = np.arange(25)
Cdata = np.ones(25,dtype=int)
Cmat = coo_matrix((Cdata,(Crow,Ccol))).tocsc()
# Randomly permute identity matrix
B = Rmat*A*Cmat
# Row permute
perm = maximum_bipartite_matching(B,perm_type='row')
Rrow = np.arange(25)
Rcol = perm
Rdata = np.ones(25,dtype=int)
Rmat = coo_matrix((Rdata,(Rrow,Rcol))).tocsc()
C1 = Rmat*B
# Column permute
perm2 = maximum_bipartite_matching(B,perm_type='column')
Crow = perm2
Ccol = np.arange(25)
Cdata = np.ones(25,dtype=int)
Cmat = coo_matrix((Cdata,(Crow,Ccol))).tocsc()
C2 = B*Cmat
# Should get identity matrix back
assert_equal(any(C1.diagonal() == 0), False)
assert_equal(any(C2.diagonal() == 0), False)
# Test int64 indices input
B.indices = B.indices.astype('int64')
B.indptr = B.indptr.astype('int64')
perm = maximum_bipartite_matching(B,perm_type='row')
Rrow = np.arange(25)
Rcol = perm
Rdata = np.ones(25,dtype=int)
Rmat = coo_matrix((Rdata,(Rrow,Rcol))).tocsc()
C3 = Rmat*B
assert_equal(any(C3.diagonal() == 0), False)
|
torchbox/wagtail | refs/heads/master | wagtail/documents/migrations/0005_document_collection.py | 24 | # -*- coding: utf-8 -*-
from django.db import migrations, models
import wagtail.core.models
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0025_collection_initial_data'),
('wagtaildocs', '0004_capitalizeverbose'),
]
operations = [
migrations.AddField(
model_name='document',
name='collection',
field=models.ForeignKey(related_name='+', to='wagtailcore.Collection', verbose_name='collection', default=wagtail.core.models.get_root_collection_id, on_delete=models.CASCADE),
preserve_default=True,
),
]
|
msadat/python-scripts | refs/heads/master | delete_atoms_GB_point.py | 1 | #Author: Mohammad R. Sadat
#PhD candidate
#Dept of CEEM, U of A
#this script randomly deletes atoms from a grain boundary while maintaining stoichiometry and writes data in 'full' style format after deletion
def delAtoms(x,t):
"""x = array of xyz data file with the atom types,
t = list of atoms to be deleted """
'''
idx = []
for i in range(len(x)):
if x[i,0] == t:
idx.append(i)
else:
idx = idx
#print "no of atoms ="+ str(len(idx))
#print "Initial points = "+str(len(x))
#print "deleted atoms = "+str(len(idx))'''
return np.delete(x,t,0)
import numpy as np
import glob
import random
natoms_initial = int(raw_input("Enter the original no. of atoms: ")) #int(sys.argv[1])
filename = 'data.GB-test' # name of the input data file
outputfile = 'data.GB-test-defected' # name of output data file
flist2 = glob.glob(filename)
#the 1st col. is string so its loaded seperately
for f in flist2:
load2 = np.genfromtxt(f, skip_header=16, skip_footer=natoms_initial+1, dtype=float, usecols=(1,2,3,4)) #dtype=("|S10", float, float, float),
dataovito1=np.array(load2)
for f in flist2:
load2 = np.genfromtxt(f, skip_header=16, skip_footer=natoms_initial+1, dtype=int, usecols=(1)) #dtype=("|S10", float, float, float),
dataovito2=np.array(load2)
nZn = 0
nS = 0
for j in range(len(dataovito2)):
if dataovito2[j] == 1:
nZn +=1
elif dataovito2[j] ==2:
nS +=1
total = nZn+nS
print "total of type 1: ", nZn
print "total of type 2: ", nS
#to_delete = total*1/100
#extent for deletion (around GB plane)
xmin = np.min(dataovito1[:,1]) -1.0
xmax = np.max(dataovito1[:,1])
ymin = np.min(dataovito1[:,2]) -1.0
ymax = np.max(dataovito1[:,2])
zmin = np.min(dataovito1[:,3]) -1.0
zmax = np.max(dataovito1[:,3])
ylo = -2
yhi = 2
ndel = total*1/100
ids= [] #final list of atoms to be deleted
idsin1 = [] #list of type 1 atom within chosen boundary
idsin2 = [] #list of type 2 atom within chosen boundary
for j in range(len(dataovito1)):
#if yhi < dataovito1[j,2] or dataovito1[j,2] < ylo and dataovito1[j,0]==atomtype:
if ylo < dataovito1[j,2] < yhi and dataovito1[j,0]==1:
idsin1.append(j)
for j in range(len(dataovito1)):
#if yhi < dataovito1[j,2] or dataovito1[j,2] < ylo and dataovito1[j,0]==atomtype:
if ylo < dataovito1[j,2] < yhi and dataovito1[j,0]==2:
idsin2.append(j)
#print idsin1
#print idsin2
for j in range(len(idsin1)):
ids.append(random.choice(idsin1))
if len(ids) >=ndel/2:
break
#print ids
for j in range(len(idsin2)):
ids.append(random.choice(idsin2))
if len(ids)/2 >=ndel/2:
break
#print ids
dataovito1= delAtoms(dataovito1, ids)
print "total deleted: ", len(ids)
#print total
print "final no of atoms: ", len(dataovito1)
#print dataovito1
natoms = len(dataovito1)
outFile = open(outputfile, 'w')
outFile.write('LAMMPS data file written using Python script\n')
outFile.write('\n')
outFile.write('%i %s \n' %(natoms, 'atoms'))
outFile.write('2 atom types \n')
outFile.write('\n')
outFile.write('%f %f %s %s \n' %(xmin, xmax, 'xlo', 'xhi'))
outFile.write('%f %f %s %s \n' %(ymin, ymax, 'ylo', 'yhi'))
outFile.write('%f %f %s %s \n' %(zmin, zmax, 'zlo', 'zhi'))
outFile.write('\n')
outFile.write('%s \n' %('Masses'))
outFile.write('\n')
outFile.write('%i %f \n' %(1, 65.39))
outFile.write('%i %f \n' %(2, 32.066))
outFile.write('\n')
outFile.write('Atoms\n')
outFile.write('\n')
for j in range(len(dataovito1)):
if dataovito1[j,0]==1:
outFile.write('%i %i %i %i %f %f %f \n' %(j+1, 0, 1, 0, dataovito1[j,1], dataovito1[j,2], dataovito1[j,3]))
elif dataovito1[j,0]==2:
outFile.write('%i %i %i %i %f %f %f \n' %(j+1, 0, 2, 0, dataovito1[j,1], dataovito1[j,2], dataovito1[j,3]))
outFile.close()
print "All done!"
|
krishauser/Klampt | refs/heads/master | Python/klampt/io/open3d_convert.py | 1 | """Conversions to and from the Open3D library.
Open3D is useful for doing various point cloud processing routines.
It can be installed using pip as follows::
pip install open3d-python
"""
import open3d
from klampt import PointCloud,TriangleMesh,VolumeGrid,Geometry3D
from ..model import geometry
def to_open3d(obj):
"""Converts Klamp't geometry to an open3d geometry.
Geometry3D objects are converted applying the current transform.
If the VolumeGrid is considered to be an occupancy grid (all values
between 0 and 1), then it is converted to a VoxelGrid containing a
voxel for any positive items. If it is considered to be a SDF,
then it contains a voxel of any non-positive items. There may be
issues converting from VolumeGrids whose cells are non-cubic.
"""
if isinstance(obj,PointCloud):
pc = open3d.geometry.PointCloud()
for i in range(obj.numPoints()):
k = i*3
pc.points.append((obj.vertices[k],obj.vertices[k+1],obj.vertices[k+2]))
#TODO: other properties
colors = geometry.point_cloud_colors(obj,('r','g','b'))
if colors is not None:
for i in range(obj.numPoints()):
pc.colors.append(colors[i])
return pc
elif isinstance(obj,TriangleMesh):
m = open3d.geometry.TriangleMesh()
for i in range(len(obj.vertices)//3):
k = i*3
m.vertices.append((obj.vertices[k],obj.vertices[k+1],obj.vertices[k+2]))
for i in range(len(obj.indices)//3):
k = i*3
m.triangles.append((obj.indices[k],obj.indices[k+1],obj.indices[k+2]))
return m
elif isinstance(obj,VolumeGrid):
import numpy as np
origin = np.array([obj.bbox[0],obj.bbox[1],obj.bbox[2]])
cx = (obj.bbox[3]-obj.bbox[0])/obj.dims[0]
cy = (obj.bbox[4]-obj.bbox[1])/obj.dims[1]
cz = (obj.bbox[5]-obj.bbox[2])/obj.dims[2]
voxel_size = pow(cx*cy*cz,1.0/3.0)
values = np.array(obj.values)
vrange = np.min(values),np.min(values)
if vrange[0] < 0 or vrange[1] > 1:
#treat as SDF
indices = np.nonzero(values <= 0)
else:
#treat as occupancy grid
indices = np.nonzero(values > 0.5)
pc = open3d.geometry.PointCloud()
for i in indices[0]:
cell = np.array([i//(obj.dims[2]*obj.dims[1]), (i//obj.dims[2]) % obj.dims[1], i%obj.dims[2]])
pt = (cell + 0.5)*voxel_size + origin
pc.points.append(pt)
return open3d.geometry.create_surface_voxel_grid_from_point_cloud(pc,voxel_size)
elif isinstance(obj,Geometry3D):
if obj.type() == 'PointCloud':
pc = obj.getPointCloud()
pc.transform(*obj.getCurrentTransform())
return to_open3d(pc)
elif obj.type() == 'TriangleMesh':
m = obj.getTriangleMesh()
m.transform(*obj.getCurrentTransform())
return to_open3d(m)
raise TypeError("Invalid type")
def from_open3d(obj):
"""Converts open3d geometry to a Klamp't geometry.
"""
if isinstance(obj,open3d.geometry.PointCloud):
pc = PointCloud()
for p in obj.points:
pc.vertices.append(p[0])
pc.vertices.append(p[1])
pc.vertices.append(p[2])
if obj.has_colors():
geometry.point_cloud_set_colors(pc,obj.colors,('r','g','b'),'rgb')
#TODO: other properties
return pc
elif isinstance(obj,open3d.geometry.TriangleMesh):
m = TriangleMesh()
for p in obj.vertices:
m.vertices.append(p[0])
m.vertices.append(p[1])
m.vertices.append(p[2])
for i in obj.triangles:
m.indices.append(int(i[0]))
m.indices.append(int(i[1]))
m.indices.append(int(i[2]))
return m
elif isinstance(obj,open3d.geometry.VoxelGrid):
grid = VolumeGrid()
import numpy as np
occupied = np.array(obj.voxels,dtype=np.int32)
imin = np.min(occupied,axis=0)
imax = np.max(occupied,axis=0)
assert imin.shape == (3,)
assert imax.shape == (3,)
bmin = obj.origin + (imin-1)*obj.voxel_size
bmax = obj.origin + (imax+2)*obj.voxel_size
grid.bbox.append(bmin[0])
grid.bbox.append(bmin[1])
grid.bbox.append(bmin[2])
grid.bbox.append(bmax[0])
grid.bbox.append(bmax[1])
grid.bbox.append(bmax[2])
grid.dims.append(imax[0]-imin[0]+3)
grid.dims.append(imax[1]-imin[1]+3)
grid.dims.append(imax[2]-imin[2]+3)
grid.values.resize(grid.dims[0]*grid.dims[1]*grid.dims[2],-1.0)
for cell in occupied:
grid.set(int(cell[0]-imin[0]+1),int(cell[1]-imin[1]+1),int(cell[2]-imin[2]+1),1.0)
return grid
raise TypeError("Invalid type")
|
timm/timmnix | refs/heads/master | pypy3-v5.5.0-linux64/lib-python/3/test/test_cmd_line_script.py | 1 | # tests command line execution of scripts
import importlib
import importlib.machinery
import zipimport
import unittest
import sys
import os
import os.path
import py_compile
import textwrap
from test import support
from test.script_helper import (
make_pkg, make_script, make_zip_pkg, make_zip_script,
assert_python_ok, assert_python_failure, temp_dir,
spawn_python, kill_python)
verbose = support.verbose
example_args = ['test1', 'test2', 'test3']
test_source = """\
# Script may be run with optimisation enabled, so don't rely on assert
# statements being executed
def assertEqual(lhs, rhs):
if lhs != rhs:
raise AssertionError('%r != %r' % (lhs, rhs))
def assertIdentical(lhs, rhs):
if lhs is not rhs:
raise AssertionError('%r is not %r' % (lhs, rhs))
# Check basic code execution
result = ['Top level assignment']
def f():
result.append('Lower level reference')
f()
assertEqual(result, ['Top level assignment', 'Lower level reference'])
# Check population of magic variables
assertEqual(__name__, '__main__')
from importlib.machinery import BuiltinImporter
_loader = __loader__ if __loader__ is BuiltinImporter else type(__loader__)
print('__loader__==%a' % _loader)
print('__file__==%a' % __file__)
if __cached__ is not None:
# XXX: test_script_compiled on PyPy
assertEqual(__file__, __cached__)
if not __cached__.endswith(('pyc', 'pyo')):
raise AssertionError('has __cached__ but not compiled')
print('__package__==%r' % __package__)
# Check the sys module
import sys
assertIdentical(globals(), sys.modules[__name__].__dict__)
from test import test_cmd_line_script
example_args_list = test_cmd_line_script.example_args
assertEqual(sys.argv[1:], example_args_list)
print('sys.argv[0]==%a' % sys.argv[0])
print('sys.path[0]==%a' % sys.path[0])
# Check the working directory
import os
print('cwd==%a' % os.getcwd())
"""
def _make_test_script(script_dir, script_basename, source=test_source):
to_return = make_script(script_dir, script_basename, source)
importlib.invalidate_caches()
return to_return
def _make_test_zip_pkg(zip_dir, zip_basename, pkg_name, script_basename,
source=test_source, depth=1):
to_return = make_zip_pkg(zip_dir, zip_basename, pkg_name, script_basename,
source, depth)
importlib.invalidate_caches()
return to_return
# There's no easy way to pass the script directory in to get
# -m to work (avoiding that is the whole point of making
# directories and zipfiles executable!)
# So we fake it for testing purposes with a custom launch script
launch_source = """\
import sys, os.path, runpy
sys.path.insert(0, %s)
runpy._run_module_as_main(%r)
"""
def _make_launch_script(script_dir, script_basename, module_name, path=None):
if path is None:
path = "os.path.dirname(__file__)"
else:
path = repr(path)
source = launch_source % (path, module_name)
to_return = make_script(script_dir, script_basename, source)
importlib.invalidate_caches()
return to_return
class CmdLineTest(unittest.TestCase):
def _check_output(self, script_name, exit_code, data,
expected_file, expected_argv0,
expected_path0, expected_package,
expected_loader):
if verbose > 1:
print("Output from test script %r:" % script_name)
print(data)
self.assertEqual(exit_code, 0)
printed_loader = '__loader__==%a' % expected_loader
printed_file = '__file__==%a' % expected_file
printed_package = '__package__==%r' % expected_package
printed_argv0 = 'sys.argv[0]==%a' % expected_argv0
printed_path0 = 'sys.path[0]==%a' % expected_path0
printed_cwd = 'cwd==%a' % os.getcwd()
if verbose > 1:
print('Expected output:')
print(printed_file)
print(printed_package)
print(printed_argv0)
print(printed_cwd)
self.assertIn(printed_loader.encode('utf-8'), data)
self.assertIn(printed_file.encode('utf-8'), data)
self.assertIn(printed_package.encode('utf-8'), data)
self.assertIn(printed_argv0.encode('utf-8'), data)
self.assertIn(printed_path0.encode('utf-8'), data)
self.assertIn(printed_cwd.encode('utf-8'), data)
def _check_script(self, script_name, expected_file,
expected_argv0, expected_path0,
expected_package, expected_loader,
*cmd_line_switches):
if not __debug__:
cmd_line_switches += ('-' + 'O' * sys.flags.optimize,)
run_args = cmd_line_switches + (script_name,) + tuple(example_args)
rc, out, err = assert_python_ok(*run_args)
self._check_output(script_name, rc, out + err, expected_file,
expected_argv0, expected_path0,
expected_package, expected_loader)
def _check_import_error(self, script_name, expected_msg,
*cmd_line_switches):
run_args = cmd_line_switches + (script_name,)
rc, out, err = assert_python_failure(*run_args)
if verbose > 1:
print('Output from test script %r:' % script_name)
print(err)
print('Expected output: %r' % expected_msg)
self.assertIn(expected_msg.encode('utf-8'), err)
def test_dash_c_loader(self):
rc, out, err = assert_python_ok("-c", "print(__loader__)")
expected = repr(importlib.machinery.BuiltinImporter).encode("utf-8")
self.assertIn(expected, out)
def test_stdin_loader(self):
# Unfortunately, there's no way to automatically test the fully
# interactive REPL, since that code path only gets executed when
# stdin is an interactive tty.
p = spawn_python()
try:
p.stdin.write(b"print(__loader__)\n")
p.stdin.flush()
finally:
out = kill_python(p)
expected = repr(importlib.machinery.BuiltinImporter).encode("utf-8")
self.assertIn(expected, out)
def test_basic_script(self):
with temp_dir() as script_dir:
script_name = _make_test_script(script_dir, 'script')
package = '' if support.check_impl_detail(pypy=True) else None
self._check_script(script_name, script_name, script_name,
script_dir, package,
importlib.machinery.SourceFileLoader)
def test_script_compiled(self):
with temp_dir() as script_dir:
script_name = _make_test_script(script_dir, 'script')
py_compile.compile(script_name, doraise=True)
os.remove(script_name)
pyc_file = support.make_legacy_pyc(script_name)
package = '' if support.check_impl_detail(pypy=True) else None
self._check_script(pyc_file, pyc_file,
pyc_file, script_dir, package,
importlib.machinery.SourcelessFileLoader)
def test_directory(self):
with temp_dir() as script_dir:
script_name = _make_test_script(script_dir, '__main__')
self._check_script(script_dir, script_name, script_dir,
script_dir, '',
importlib.machinery.SourceFileLoader)
def test_directory_compiled(self):
with temp_dir() as script_dir:
script_name = _make_test_script(script_dir, '__main__')
py_compile.compile(script_name, doraise=True)
os.remove(script_name)
pyc_file = support.make_legacy_pyc(script_name)
self._check_script(script_dir, pyc_file, script_dir,
script_dir, '',
importlib.machinery.SourcelessFileLoader)
def test_directory_error(self):
with temp_dir() as script_dir:
msg = "can't find '__main__' module in %r" % script_dir
self._check_import_error(script_dir, msg)
def test_zipfile(self):
with temp_dir() as script_dir:
script_name = _make_test_script(script_dir, '__main__')
zip_name, run_name = make_zip_script(script_dir, 'test_zip', script_name)
self._check_script(zip_name, run_name, zip_name, zip_name, '',
zipimport.zipimporter)
def test_zipfile_compiled(self):
with temp_dir() as script_dir:
script_name = _make_test_script(script_dir, '__main__')
compiled_name = py_compile.compile(script_name, doraise=True)
zip_name, run_name = make_zip_script(script_dir, 'test_zip', compiled_name)
self._check_script(zip_name, run_name, zip_name, zip_name, '',
zipimport.zipimporter)
def test_zipfile_error(self):
with temp_dir() as script_dir:
script_name = _make_test_script(script_dir, 'not_main')
zip_name, run_name = make_zip_script(script_dir, 'test_zip', script_name)
msg = "can't find '__main__' module in %r" % zip_name
self._check_import_error(zip_name, msg)
def test_module_in_package(self):
with temp_dir() as script_dir:
pkg_dir = os.path.join(script_dir, 'test_pkg')
make_pkg(pkg_dir)
script_name = _make_test_script(pkg_dir, 'script')
launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg.script')
self._check_script(launch_name, script_name, script_name,
script_dir, 'test_pkg',
importlib.machinery.SourceFileLoader)
def test_module_in_package_in_zipfile(self):
with temp_dir() as script_dir:
zip_name, run_name = _make_test_zip_pkg(script_dir, 'test_zip', 'test_pkg', 'script')
launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg.script', zip_name)
self._check_script(launch_name, run_name, run_name,
zip_name, 'test_pkg', zipimport.zipimporter)
def test_module_in_subpackage_in_zipfile(self):
with temp_dir() as script_dir:
zip_name, run_name = _make_test_zip_pkg(script_dir, 'test_zip', 'test_pkg', 'script', depth=2)
launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg.test_pkg.script', zip_name)
self._check_script(launch_name, run_name, run_name,
zip_name, 'test_pkg.test_pkg',
zipimport.zipimporter)
def test_package(self):
with temp_dir() as script_dir:
pkg_dir = os.path.join(script_dir, 'test_pkg')
make_pkg(pkg_dir)
script_name = _make_test_script(pkg_dir, '__main__')
launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg')
self._check_script(launch_name, script_name,
script_name, script_dir, 'test_pkg',
importlib.machinery.SourceFileLoader)
def test_package_compiled(self):
with temp_dir() as script_dir:
pkg_dir = os.path.join(script_dir, 'test_pkg')
make_pkg(pkg_dir)
script_name = _make_test_script(pkg_dir, '__main__')
compiled_name = py_compile.compile(script_name, doraise=True)
os.remove(script_name)
pyc_file = support.make_legacy_pyc(script_name)
launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg')
self._check_script(launch_name, pyc_file,
pyc_file, script_dir, 'test_pkg',
importlib.machinery.SourcelessFileLoader)
def test_package_error(self):
with temp_dir() as script_dir:
pkg_dir = os.path.join(script_dir, 'test_pkg')
make_pkg(pkg_dir)
msg = ("'test_pkg' is a package and cannot "
"be directly executed")
launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg')
self._check_import_error(launch_name, msg)
def test_package_recursion(self):
with temp_dir() as script_dir:
pkg_dir = os.path.join(script_dir, 'test_pkg')
make_pkg(pkg_dir)
main_dir = os.path.join(pkg_dir, '__main__')
make_pkg(main_dir)
msg = ("Cannot use package as __main__ module; "
"'test_pkg' is a package and cannot "
"be directly executed")
launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg')
self._check_import_error(launch_name, msg)
def test_issue8202(self):
# Make sure package __init__ modules see "-m" in sys.argv0 while
# searching for the module to execute
with temp_dir() as script_dir:
with support.change_cwd(path=script_dir):
pkg_dir = os.path.join(script_dir, 'test_pkg')
make_pkg(pkg_dir, "import sys; print('init_argv0==%r' % sys.argv[0])")
script_name = _make_test_script(pkg_dir, 'script')
rc, out, err = assert_python_ok('-m', 'test_pkg.script', *example_args)
if verbose > 1:
print(out)
expected = "init_argv0==%r" % '-m'
self.assertIn(expected.encode('utf-8'), out)
self._check_output(script_name, rc, out,
script_name, script_name, '', 'test_pkg',
importlib.machinery.SourceFileLoader)
def test_issue8202_dash_c_file_ignored(self):
# Make sure a "-c" file in the current directory
# does not alter the value of sys.path[0]
with temp_dir() as script_dir:
with support.change_cwd(path=script_dir):
with open("-c", "w") as f:
f.write("data")
rc, out, err = assert_python_ok('-c',
'import sys; print("sys.path[0]==%r" % sys.path[0])')
if verbose > 1:
print(out)
expected = "sys.path[0]==%r" % ''
self.assertIn(expected.encode('utf-8'), out)
def test_issue8202_dash_m_file_ignored(self):
# Make sure a "-m" file in the current directory
# does not alter the value of sys.path[0]
with temp_dir() as script_dir:
script_name = _make_test_script(script_dir, 'other')
with support.change_cwd(path=script_dir):
with open("-m", "w") as f:
f.write("data")
rc, out, err = assert_python_ok('-m', 'other', *example_args)
self._check_output(script_name, rc, out,
script_name, script_name, '', '',
importlib.machinery.SourceFileLoader)
def test_dash_m_error_code_is_one(self):
# If a module is invoked with the -m command line flag
# and results in an error that the return code to the
# shell is '1'
with temp_dir() as script_dir:
with support.change_cwd(path=script_dir):
pkg_dir = os.path.join(script_dir, 'test_pkg')
make_pkg(pkg_dir)
script_name = _make_test_script(pkg_dir, 'other',
"if __name__ == '__main__': raise ValueError")
rc, out, err = assert_python_failure('-m', 'test_pkg.other', *example_args)
if verbose > 1:
print(out)
self.assertEqual(rc, 1)
def test_pep_409_verbiage(self):
# Make sure PEP 409 syntax properly suppresses
# the context of an exception
script = textwrap.dedent("""\
try:
raise ValueError
except:
raise NameError from None
""")
with temp_dir() as script_dir:
script_name = _make_test_script(script_dir, 'script', script)
exitcode, stdout, stderr = assert_python_failure(script_name)
text = stderr.decode('ascii').split('\n')
self.assertEqual(len(text), 4)
self.assertTrue(text[0].startswith('Traceback'))
self.assertTrue(text[1].startswith(' File '))
self.assertTrue(text[3].startswith('NameError'))
def test_non_ascii(self):
# Mac OS X denies the creation of a file with an invalid UTF-8 name.
# Windows allows to create a name with an arbitrary bytes name, but
# Python cannot a undecodable bytes argument to a subprocess.
if (support.TESTFN_UNDECODABLE
and sys.platform not in ('win32', 'darwin')):
name = os.fsdecode(support.TESTFN_UNDECODABLE)
elif support.TESTFN_NONASCII:
name = support.TESTFN_NONASCII
else:
self.skipTest("need support.TESTFN_NONASCII")
# Issue #16218
source = 'print(ascii(__file__))\n'
script_name = _make_test_script(os.curdir, name, source)
self.addCleanup(support.unlink, script_name)
rc, stdout, stderr = assert_python_ok(script_name)
self.assertEqual(
ascii(script_name),
stdout.rstrip().decode('ascii'),
'stdout=%r stderr=%r' % (stdout, stderr))
self.assertEqual(0, rc)
def test_main():
support.run_unittest(CmdLineTest)
support.reap_children()
if __name__ == '__main__':
test_main()
|
NMGRL/pychron | refs/heads/develop | pychron/entry/export/xml_irradiation_exporter.py | 2 | # ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
# ============= local library imports ==========================
from __future__ import absolute_import
from pychron.entry.export.base_irradiation_exporter import BaseIrradiationExporter
class XMLIrradiationExporter(BaseIrradiationExporter):
"""
export irradiations from pychron database to an XML file
"""
# ============= EOF =============================================
|
AkA84/edx-platform | refs/heads/master | lms/djangoapps/survey/tests/test_utils.py | 62 | """
Python tests for the Survey models
"""
from collections import OrderedDict
from django.test.client import Client
from django.contrib.auth.models import User
from survey.models import SurveyForm
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from survey.utils import is_survey_required_for_course, must_answer_survey
class SurveyModelsTests(ModuleStoreTestCase):
"""
All tests for the utils.py file
"""
def setUp(self):
"""
Set up the test data used in the specific tests
"""
super(SurveyModelsTests, self).setUp()
self.client = Client()
# Create two accounts
self.password = 'abc'
self.student = User.objects.create_user('student', 'student@test.com', self.password)
self.student2 = User.objects.create_user('student2', 'student2@test.com', self.password)
self.staff = User.objects.create_user('staff', 'staff@test.com', self.password)
self.staff.is_staff = True
self.staff.save()
self.test_survey_name = 'TestSurvey'
self.test_form = '<input name="foo"></input>'
self.student_answers = OrderedDict({
'field1': 'value1',
'field2': 'value2',
})
self.student2_answers = OrderedDict({
'field1': 'value3'
})
self.course = CourseFactory.create(
course_survey_required=True,
course_survey_name=self.test_survey_name
)
self.survey = SurveyForm.create(self.test_survey_name, self.test_form)
def test_is_survey_required_for_course(self):
"""
Assert the a requried course survey is when both the flags is set and a survey name
is set on the course descriptor
"""
self.assertTrue(is_survey_required_for_course(self.course))
def test_is_survey_not_required_for_course(self):
"""
Assert that if various data is not available or if the survey is not found
then the survey is not considered required
"""
course = CourseFactory.create()
self.assertFalse(is_survey_required_for_course(course))
course = CourseFactory.create(
course_survey_required=False
)
self.assertFalse(is_survey_required_for_course(course))
course = CourseFactory.create(
course_survey_required=True,
course_survey_name="NonExisting"
)
self.assertFalse(is_survey_required_for_course(course))
course = CourseFactory.create(
course_survey_required=False,
course_survey_name=self.test_survey_name
)
self.assertFalse(is_survey_required_for_course(course))
def test_user_not_yet_answered_required_survey(self):
"""
Assert that a new course which has a required survey but user has not answered it yet
"""
self.assertTrue(must_answer_survey(self.course, self.student))
temp_course = CourseFactory.create(
course_survey_required=False
)
self.assertFalse(must_answer_survey(temp_course, self.student))
temp_course = CourseFactory.create(
course_survey_required=True,
course_survey_name="NonExisting"
)
self.assertFalse(must_answer_survey(temp_course, self.student))
def test_user_has_answered_required_survey(self):
"""
Assert that a new course which has a required survey and user has answers for it
"""
self.survey.save_user_answers(self.student, self.student_answers)
self.assertFalse(must_answer_survey(self.course, self.student))
def test_staff_must_answer_survey(self):
"""
Assert that someone with staff level permissions does not have to answer the survey
"""
self.assertFalse(must_answer_survey(self.course, self.staff))
|
mmbtba/odoo | refs/heads/8.0 | addons/l10n_be_intrastat/__init__.py | 258 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Odoo, Open Source Business Applications
# Copyright (C) 2014-2015 Odoo S.A. <http://www.odoo.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import l10n_be_intrastat
import wizard
|
digideskio/st2contrib | refs/heads/master | packs/github/actions/add_status.py | 15 | from github import GithubObject
from lib.base import BaseGithubAction
__all__ = [
'AddCommitStatusAction'
]
class AddCommitStatusAction(BaseGithubAction):
def run(self, user, repo, sha, state, target_url=None, description=None):
target_url = target_url or GithubObject.NotSet
description = description or GithubObject.NotSet
user = self._client.get_user(user)
repo = user.get_repo(repo)
commit = repo.get_commit(sha)
commit.create_status(state=state, target_url=target_url,
description=description)
return True
|
mhollick/strider | refs/heads/master | lib/strider/utils/__init__.py | 4 | # Copyright 2015 Michael DeHaan <michael.dehaan/gmail>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class memoize:
def __init__(self, fn):
self.fn = fn
self.result = {}
def __call__(self, *args):
try:
return self.result[args]
except KeyError:
self.result[args] = self.fn(*args)
return self.result[args]
|
Maccimo/intellij-community | refs/heads/master | python/testData/stubs/variableAnnotationsInExternalFiles/main.py | 34 | from .lib import attr
x = attr
|
scottpurdy/nupic | refs/heads/master | tests/integration/nupic/algorithms/tm_test.py | 10 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This file performs a variety of tests on the reference temporal memory code.
basic_test
==========
Tests creation and serialization of the TM class. Sets parameters and ensures
they are the same after a serialization and de-serialization step. Runs learning
and inference on a small number of random patterns and ensures it doesn't crash.
===============================================================================
Basic First Order Sequences
===============================================================================
These tests ensure the most basic (first order) sequence learning mechanism is
working.
Parameters: Use a "fast learning mode": turn off global decay, temporal pooling
and hilo (make minThreshold really high). initPerm should be greater than
connectedPerm and permanenceDec should be zero. With these settings sequences
should be learned in one pass:
minThreshold = newSynapseCount
globalDecay = 0
temporalPooling = False
initialPerm = 0.8
connectedPerm = 0.7
permanenceDec = 0
permanenceInc = 0.4
Other Parameters:
numCols = 100
cellsPerCol = 1
newSynapseCount=11
activationThreshold = 8
permanenceMax = 1
Note: this is not a high order sequence, so one cell per column is fine.
Input Sequence: We train with M input sequences, each consisting of N random
patterns. Each pattern consists of a random number of bits on. The number of 1's
in each pattern should be between 21 and 25 columns. The sequences are
constructed so that consecutive patterns within a sequence don't share any
columns.
Training: The TM is trained with P passes of the M sequences. There
should be a reset between sequences. The total number of iterations during
training is P*N*M.
Testing: Run inference through the same set of sequences, with a reset before
each sequence. For each sequence the system should accurately predict the
pattern at the next time step up to and including the N-1'st pattern. A perfect
prediction consists of getting every column correct in the prediction, with no
extra columns. We report the number of columns that are incorrect and report a
failure if more than 2 columns are incorrectly predicted.
We can also calculate the number of segments and synapses that should be
learned. We raise an error if too many or too few were learned.
B1) Basic sequence learner. M=1, N=100, P=1.
B2) Same as above, except P=2. Test that permanences go up and that no
additional synapses or segments are learned.
B3) N=300, M=1, P=1. (See how high we can go with M)
B4) N=100, M=3, P=1 (See how high we can go with N*M)
B5) Like B1) but only have newSynapseCount columns ON in each pattern (instead of
between 21 and 25), and set activationThreshold to newSynapseCount.
B6) Like B1 but with cellsPerCol = 4. First order sequences should still work
just fine.
B7) Like B1 but with slower learning. Set the following parameters differently:
activationThreshold = newSynapseCount
minThreshold = activationThreshold
initialPerm = 0.2
connectedPerm = 0.7
permanenceInc = 0.2
Now we train the TM with the B1 sequence 4 times (P=4). This will increment
the permanences to be above 0.8 and at that point the inference will be correct.
This test will ensure the basic match function and segment activation rules are
working correctly.
B8) Like B7 but with 4 cells per column. Should still work.
B9) Like B7 but present the sequence less than 4 times: the inference should be
incorrect.
B10) Like B2, except that cells per column = 4. Should still add zero additional
synapses.
===============================================================================
High Order Sequences
===============================================================================
These tests ensure that high order sequences can be learned in a multiple cells
per column instantiation.
Parameters: Same as Basic First Order Tests above, but with varying cells per
column.
Input Sequence: We train with M input sequences, each consisting of N random
patterns. Each pattern consists of a random number of bits on. The number of 1's
in each pattern should be between 21 and 25 columns (except for H0). The
sequences are constructed so that consecutive patterns within a sequence don't
share any columns. The sequences are constructed to contain shared subsequences,
such as:
A B C D E F G H I J
K L M D E F N O P Q
The position and length of shared subsequences are parameters in the tests.
Training: Identical to basic first order tests above.
Testing: Identical to basic first order tests above unless noted.
We can also calculate the number of segments and synapses that should be
learned. We raise an error if too many or too few were learned.
H0) Two simple high order sequences, each of length 7, with a shared
subsequence in positions 2-4. Each pattern has a consecutive set of 5 bits on.
No pattern shares any columns with the others. These sequences are easy to
visualize and is very useful for debugging.
H1) Learn two sequences with a short shared pattern. Parameters
should be the same as B1. This test will FAIL since cellsPerCol == 1. No
consecutive patterns share any column.
H2) As above but with cellsPerCol == 4. This test should PASS. No consecutive
patterns share any column.
H2a) Same as above, except P=2. Test that permanences go up and that no
additional synapses or segments are learned.
H3) Same parameters as H.2 except sequences are created such that they share a
single significant sub-sequence. Subsequences should be reasonably long and in
the middle of sequences. No consecutive patterns share any column.
H4) Like H.3, except the shared subsequence is in the beginning. (e.g.
"ABCDEF" and "ABCGHIJ". At the point where the shared subsequence ends, all
possible next patterns should be predicted. As soon as you see the first unique
pattern, the predictions should collapse to be a perfect prediction.
H5) Shared patterns. Similar to H3 except that patterns are shared between
sequences. All sequences are different shufflings of the same set of N
patterns (there is no shared subsequence). Care should be taken such that the
same three patterns never follow one another in two sequences.
H6) Combination of H5) and H3). Shared patterns in different sequences, with a
shared subsequence.
H7) Stress test: every other pattern is shared. [Unimplemented]
H8) Start predicting in the middle of a sequence. [Unimplemented]
H9) Hub capacity. How many patterns can use that hub?
[Implemented, but does not run by default.]
H10) Sensitivity to small amounts of noise during inference. [Unimplemented]
H11) Higher order patterns with alternating elements.
Create the following 4 sequences:
A B A B A C
A B A B D E
A B F G H I
A J K L M N
After training we should verify that the expected transitions are in the
model. Prediction accuracy should be perfect. In addition, during inference,
after the first element is presented, the columns should not burst any more.
Need to verify, for the first sequence, that the high order representation
when presented with the second A and B is different from the representation
in the first presentation.
===============================================================================
Temporal Pooling Tests [UNIMPLEMENTED]
===============================================================================
Parameters: Use a "fast learning mode": With these settings sequences should be
learned in one pass:
minThreshold = newSynapseCount
globalDecay = 0
initialPerm = 0.8
connectedPerm = 0.7
permanenceDec = 0
permanenceInc = 0.4
Other Parameters:
cellsPerCol = 4
newSynapseCount=11
activationThreshold = 11
permanenceMax = 1
doPooling = True
Input Sequence: We train with M input sequences, each consisting of N random
patterns. Each pattern consists of a random number of bits on. The number of 1's
in each pattern should be between 17 and 21 columns. The sequences are
constructed so that consecutive patterns within a sequence don't share any
columns.
Note: for pooling tests the density of input patterns should be pretty low
since each pooling step increases the output density. At the same time, we need
enough bits on in the input for the temporal memory to find enough synapses. So,
for the tests, constraints should be something like:
(Input Density) * (Number of pooling steps) < 25 %.
AND
sum(Input) > newSynapseCount*1.5
Training: The TM is trained with P passes of the M sequences. There
should be a reset between sequences. The total number of iterations during
training is P*N*M.
Testing: Run inference through the same set of sequences, with a reset before
each sequence. For each sequence the system should accurately predict the
pattern at the next P time steps, up to and including the N-P'th pattern. A
perfect prediction consists of getting every column correct in the prediction,
with no extra columns. We report the number of columns that are incorrect and
report a failure if more than 2 columns are incorrectly predicted.
P1) Train the TM two times (P=2) on a single long sequence consisting of random
patterns (N=20, M=1). There should be no overlapping columns between successive
patterns. During inference, the TM should be able reliably predict the pattern
two time steps in advance. numCols should be about 350 to meet the above
constraints and also to maintain consistency with test P2.
P2) Increase TM rate to 3 time steps in advance (P=3). At each step during
inference, the TM should be able to reliably predict the pattern coming up at
t+1, t+2, and t+3..
P3) Set segUpdateValidDuration to 2 and set P=3. This should behave almost
identically to P1. It should only predict the next time step correctly and not
two time steps in advance. (Check off by one error in this logic.)
P4) As above, but with multiple sequences.
P5) Same as P3 but with shared subsequences.
Continuous mode tests
=====================
Slow changing inputs.
Orphan Decay Tests
==================
HiLo Tests
==========
A high order sequence memory like the TM can memorize very long sequences. In
many applications though you don't want to memorize. You see a long sequence of
patterns but there are actually lower order repeating sequences embedded within
it. A simplistic example is words in a sentence. Words such as You'd like the TM to learn those sequences.
Tests should capture number of synapses learned and compare against
theoretically optimal numbers to pass/fail.
HL0a) For debugging, similar to H0. We want to learn a 3 pattern long sequence presented
with noise before and after, with no resets. Two steps of noise will be presented.
The noise will be 20 patterns, presented in random order. Every pattern has a
consecutive set of 5 bits on, so the vector will be 115 bits long. No pattern
shares any columns with the others. These sequences are easy to visualize and is
very useful for debugging.
TM parameters should be the same as B7 except that permanenceDec should be 0.05:
activationThreshold = newSynapseCount
minThreshold = activationThreshold
initialPerm = 0.2
connectedPerm = 0.7
permanenceInc = 0.2
permanenceDec = 0.05
So, this means it should learn a sequence after 4 repetitions. It will take
4 orphan decay steps to get an incorrect synapse to go away completely.
HL0b) Like HL0a, but after the 3-sequence is learned, try to learn a 4-sequence that
builds on the 3-sequence. For example, if learning A-B-C we train also on
D-A-B-C. It should learn that ABC is separate from DABC. Note: currently this
test is disabled in the code. It is a bit tricky to test this. When you present DAB,
you should predict the same columns as when you present AB (i.e. in both cases
C should be predicted). However, the representation for C in DABC should be
different than the representation for C in ABC. Furthermore, when you present
AB, the representation for C should be an OR of the representation in DABC and ABC
since you could also be starting in the middle of the DABC sequence. All this is
actually happening in the code, but verified by visual inspection only.
HL1) Noise + sequence + noise + sequence repeatedly without resets until it has
learned that sequence. Train the TM repeatedly with N random sequences that all
share a single subsequence. Each random sequence can be 10 patterns long,
sharing a subsequence that is 5 patterns long. There should be no resets
between presentations. Inference should then be on that 5 long shared subsequence.
Example (3-long shared subsequence):
A B C D E F G H I J
K L M D E F N O P Q
R S T D E F U V W X
Y Z 1 D E F 2 3 4 5
TM parameters should be the same as HL0.
HL2) Like HL1, but after A B C has learned, try to learn D A B C . It should learn
ABC is separate from DABC.
HL3) Like HL2, but test with resets.
HL4) Like HL1 but with minThreshold high. This should FAIL and learn a ton
of synapses.
HiLo but with true high order sequences embedded in noise
Present 25 sequences in random order with no resets but noise between
sequences (1-20 samples). Learn all 25 sequences. Test global decay vs non-zero
permanenceDec .
Pooling + HiLo Tests [UNIMPLEMENTED]
====================
Needs to be defined.
Global Decay Tests [UNIMPLEMENTED]
==================
Simple tests to ensure global decay is actually working.
Sequence Likelihood Tests
=========================
These tests are in the file TMLikelihood.py
Segment Learning Tests [UNIMPLEMENTED]
======================
Multi-attribute sequence tests.
SL1) Train the TM repeatedly using a single (multiple) sequence plus noise. The
sequence can be relatively short, say 20 patterns. No two consecutive patterns
in the sequence should share columns. Add random noise each time a pattern is
presented. The noise should be different for each presentation and can be equal
to the number of on bits in the pattern. After N iterations of the noisy
sequences, the TM should should achieve perfect inference on the true sequence.
There should be resets between each presentation of the sequence.
Check predictions in the sequence only. And test with clean sequences.
Vary percentage of bits that are signal vs noise.
Noise can be a fixed alphabet instead of being randomly generated.
HL2) As above, but with no resets.
Shared Column Tests [UNIMPLEMENTED]
===================
Carefully test what happens when consecutive patterns in a sequence share
columns.
Sequence Noise Tests [UNIMPLEMENTED]
====================
Note: I don't think these will work with the current logic. Need to discuss
whether we want to accommodate sequence noise like this.
SN1) Learn sequence with pooling up to T timesteps. Run inference on a sequence
and occasionally drop elements of a sequence. Inference should still work.
SN2) As above, but occasionally add a random pattern into a sequence.
SN3) A combination of the above two.
Capacity Tests [UNIMPLEMENTED]
==============
These are stress tests that verify that the temporal memory can learn a large
number of sequences and can predict a large number of possible next steps. Some
research needs to be done first to understand the capacity of the system as it
relates to the number of columns, cells per column, etc.
Token Prediction Tests: Test how many predictions of individual tokens we can
superimpose and still recover.
Online Learning Tests [UNIMPLEMENTED]
=====================
These tests will verify that the temporal memory continues to work even if
sequence statistics (and the actual sequences) change slowly over time. The TM
should adapt to the changes and learn to recognize newer sequences (and forget
the older sequences?).
"""
import cPickle
import numpy
import pickle
import pprint
import random
import sys
from numpy import *
from nupic.algorithms import fdrutilities as fdrutils
from nupic.algorithms.backtracking_tm import BacktrackingTM
from nupic.algorithms.backtracking_tm_cpp import BacktrackingTMCPP
#---------------------------------------------------------------------------------
TEST_CPP_TM = 1 # temporarily disabled until it can be updated
VERBOSITY = 0 # how chatty the unit tests should be
SEED = 33 # the random seed used throughout
TMClass = BacktrackingTM
checkSynapseConsistency = False
rgen = numpy.random.RandomState(SEED) # always call this rgen, NOT random
#---------------------------------------------------------------------------------
# Helper routines
#--------------------------------------------------------------------------------
def printOneTrainingVector(x):
print ''.join('1' if k != 0 else '.' for k in x)
def printAllTrainingSequences(trainingSequences, upTo = 99999):
for t in xrange(min(len(trainingSequences[0]), upTo)):
print 't=',t,
for i,trainingSequence in enumerate(trainingSequences):
print "\tseq#",i,'\t',
printOneTrainingVector(trainingSequences[i][t])
def generatePattern(numCols = 100,
minOnes =21,
maxOnes =25,
colSet = [],
prevPattern =numpy.array([])):
"""Generate a single test pattern with given parameters.
Parameters:
--------------------------------------------
numCols: Number of columns in each pattern.
minOnes: The minimum number of 1's in each pattern.
maxOnes: The maximum number of 1's in each pattern.
colSet: The set of column indices for the pattern.
prevPattern: Pattern to avoid (null intersection).
"""
assert minOnes < maxOnes
assert maxOnes < numCols
nOnes = rgen.randint(minOnes, maxOnes)
candidates = list(colSet.difference(set(prevPattern.nonzero()[0])))
rgen.shuffle(candidates)
ind = candidates[:nOnes]
x = numpy.zeros(numCols, dtype='float32')
x[ind] = 1
return x
def buildTrainingSet(numSequences = 2,
sequenceLength = 100,
pctShared = 0.2,
seqGenMode = 'shared sequence',
subsequenceStartPos = 10,
numCols = 100,
minOnes=21,
maxOnes = 25,
disjointConsecutive =True):
"""Build random high order test sequences.
Parameters:
--------------------------------------------
numSequences: The number of sequences created.
sequenceLength: The length of each sequence.
pctShared: The percentage of sequenceLength that is shared across
every sequence. If sequenceLength is 100 and pctShared
is 0.2, then a subsequence consisting of 20 patterns
will be in every sequence. Can also be the keyword
'one pattern', in which case a single time step is shared.
seqGenMode: What kind of sequence to generate. If contains 'shared'
generates shared subsequence. If contains 'no shared',
does not generate any shared subsequence. If contains
'shuffle', will use common patterns shuffle among the
different sequences. If contains 'beginning', will
place shared subsequence at the beginning.
subsequenceStartPos: The position where the shared subsequence starts
numCols: Number of columns in each pattern.
minOnes: The minimum number of 1's in each pattern.
maxOnes: The maximum number of 1's in each pattern.
disjointConsecutive: Whether to generate disjoint consecutive patterns or not.
"""
# Calculate the set of column indexes once to be used in each call to generatePattern()
colSet = set(range(numCols))
if 'beginning' in seqGenMode:
assert 'shared' in seqGenMode and 'no shared' not in seqGenMode
if 'no shared' in seqGenMode or numSequences == 1:
pctShared = 0.0
#--------------------------------------------------------------------------------
# Build shared subsequence
if 'no shared' not in seqGenMode and 'one pattern' not in seqGenMode:
sharedSequenceLength = int(pctShared*sequenceLength)
elif 'one pattern' in seqGenMode:
sharedSequenceLength = 1
else:
sharedSequenceLength = 0
assert sharedSequenceLength + subsequenceStartPos < sequenceLength
sharedSequence = []
for i in xrange(sharedSequenceLength):
if disjointConsecutive and i > 0:
x = generatePattern(numCols, minOnes, maxOnes, colSet, sharedSequence[i-1])
else:
x = generatePattern(numCols, minOnes, maxOnes, colSet)
sharedSequence.append(x)
#--------------------------------------------------------------------------------
# Build random training set, splicing in the shared subsequence
trainingSequences = []
if 'beginning' not in seqGenMode:
trailingLength = sequenceLength - sharedSequenceLength - subsequenceStartPos
else:
trailingLength = sequenceLength - sharedSequenceLength
for k,s in enumerate(xrange(numSequences)):
# TODO: implement no repetitions
if len(trainingSequences) > 0 and 'shuffle' in seqGenMode:
r = range(subsequenceStartPos) \
+ range(subsequenceStartPos + sharedSequenceLength, sequenceLength)
rgen.shuffle(r)
r = r[:subsequenceStartPos] \
+ range(subsequenceStartPos, subsequenceStartPos + sharedSequenceLength) \
+ r[subsequenceStartPos:]
sequence = [trainingSequences[k-1][j] for j in r]
else:
sequence = []
if 'beginning' not in seqGenMode:
for i in xrange(subsequenceStartPos):
if disjointConsecutive and i > 0:
x = generatePattern(numCols, minOnes, maxOnes, colSet, sequence[i-1])
else:
x = generatePattern(numCols, minOnes, maxOnes, colSet)
sequence.append(x)
if 'shared' in seqGenMode and 'no shared' not in seqGenMode:
sequence.extend(sharedSequence)
for i in xrange(trailingLength):
if disjointConsecutive and i > 0:
x = generatePattern(numCols, minOnes, maxOnes, colSet, sequence[i-1])
else:
x = generatePattern(numCols, minOnes, maxOnes, colSet)
sequence.append(x)
assert len(sequence) == sequenceLength
trainingSequences.append(sequence)
assert len(trainingSequences) == numSequences
if VERBOSITY >= 2:
print "Training Sequences"
pprint.pprint(trainingSequences)
if sharedSequenceLength > 0:
return (trainingSequences, subsequenceStartPos + sharedSequenceLength)
else:
return (trainingSequences, -1)
def getSimplePatterns(numOnes, numPatterns):
"""Very simple patterns. Each pattern has numOnes consecutive
bits on. There are numPatterns*numOnes bits in the vector."""
numCols = numOnes * numPatterns
p = []
for i in xrange(numPatterns):
x = numpy.zeros(numCols, dtype='float32')
x[i*numOnes:(i+1)*numOnes] = 1
p.append(x)
return p
def buildSimpleTrainingSet(numOnes=5):
"""Two very simple high order sequences for debugging. Each pattern in the
sequence has a series of 1's in a specific set of columns."""
numPatterns = 11
p = getSimplePatterns(numOnes, numPatterns)
s1 = [p[0], p[1], p[2], p[3], p[4], p[5], p[6] ]
s2 = [p[7], p[8], p[2], p[3], p[4], p[9], p[10]]
trainingSequences = [s1, s2]
return (trainingSequences, 5)
def buildAlternatingTrainingSet(numOnes=5):
"""High order sequences that alternate elements. Pattern i has one's in
i*numOnes to (i+1)*numOnes.
The sequences are:
A B A B A C
A B A B D E
A B F G H I
A J K L M N
"""
numPatterns = 14
p = getSimplePatterns(numOnes, numPatterns)
s1 = [p[0], p[1], p[0], p[1], p[0], p[2]]
s2 = [p[0], p[1], p[0], p[1], p[3], p[4]]
s3 = [p[0], p[1], p[5], p[6], p[7], p[8]]
s4 = [p[0], p[9], p[10], p[11], p[12], p[13]]
trainingSequences = [s1, s2, s3, s4]
return (trainingSequences, 5)
def buildHL0aTrainingSet(numOnes=5):
"""Simple sequences for HL0. Each pattern in the sequence has a series of 1's
in a specific set of columns.
There are 23 patterns, p0 to p22.
The sequence we want to learn is p0->p1->p2
We create a very long sequence consisting of N N p0 p1 p2 N N p0 p1 p2
N is randomly chosen from p3 to p22
"""
numPatterns = 23
p = getSimplePatterns(numOnes, numPatterns)
s = []
s.append(p[rgen.randint(3,23)])
for i in xrange(20):
s.append(p[rgen.randint(3,23)])
s.append(p[0])
s.append(p[1])
s.append(p[2])
s.append(p[rgen.randint(3,23)])
return ([s], [[p[0], p[1], p[2]]])
def buildHL0bTrainingSet(numOnes=5):
"""Simple sequences for HL0b. Each pattern in the sequence has a series of 1's
in a specific set of columns.
There are 23 patterns, p0 to p22.
The sequences we want to learn are p1->p2->p3 and p0->p1->p2->p4.
We create a very long sequence consisting of these two sub-sequences
intermixed with noise, such as:
N N p0 p1 p2 p4 N N p1 p2 p3 N N p1 p2 p3
N is randomly chosen from p5 to p22
"""
numPatterns = 23
p = getSimplePatterns(numOnes, numPatterns)
s = []
s.append(p[rgen.randint(5,numPatterns)])
for i in xrange(50):
r = rgen.randint(5,numPatterns)
print r,
s.append(p[r])
if rgen.binomial(1, 0.5) > 0:
print "S1",
s.append(p[0])
s.append(p[1])
s.append(p[2])
s.append(p[4])
else:
print "S2",
s.append(p[1])
s.append(p[2])
s.append(p[3])
r = rgen.randint(5,numPatterns)
s.append(p[r])
print r,
print
return ([s], [ [p[0], p[1], p[2], p[4]], [p[1], p[2], p[3]] ])
# Basic test (creation, pickling, basic run of learning and inference)
def basicTest():
global TMClass, SEED, VERBOSITY, checkSynapseConsistency
#--------------------------------------------------------------------------------
# Create TM object
numberOfCols =10
cellsPerColumn =3
initialPerm =.2
connectedPerm =.8
minThreshold =2
newSynapseCount =5
permanenceInc =.1
permanenceDec =.05
permanenceMax =1
globalDecay =.05
activationThreshold =4 # low for those basic tests on purpose
doPooling =True
segUpdateValidDuration =5
seed =SEED
verbosity =VERBOSITY
tm = TMClass(numberOfCols, cellsPerColumn,
initialPerm, connectedPerm,
minThreshold, newSynapseCount,
permanenceInc, permanenceDec, permanenceMax,
globalDecay, activationThreshold,
doPooling, segUpdateValidDuration,
seed=seed, verbosity=verbosity,
pamLength = 1000,
checkSynapseConsistency=checkSynapseConsistency)
print "Creation ok"
#--------------------------------------------------------------------------------
# Save and reload
pickle.dump(tm, open("test_tm.pkl", "wb"))
tm2 = pickle.load(open("test_tm.pkl"))
assert tm2.numberOfCols == numberOfCols
assert tm2.cellsPerColumn == cellsPerColumn
print tm2.initialPerm
assert tm2.initialPerm == numpy.float32(.2)
assert tm2.connectedPerm == numpy.float32(.8)
assert tm2.minThreshold == minThreshold
assert tm2.newSynapseCount == newSynapseCount
assert tm2.permanenceInc == numpy.float32(.1)
assert tm2.permanenceDec == numpy.float32(.05)
assert tm2.permanenceMax == 1
assert tm2.globalDecay == numpy.float32(.05)
assert tm2.activationThreshold == activationThreshold
assert tm2.doPooling == doPooling
assert tm2.segUpdateValidDuration == segUpdateValidDuration
assert tm2.seed == SEED
assert tm2.verbosity == verbosity
print "Save/load ok"
#--------------------------------------------------------------------------------
# Learn
for i in xrange(5):
xi = rgen.randint(0,2,(numberOfCols))
x = numpy.array(xi, dtype="uint32")
y = tm.learn(x)
#--------------------------------------------------------------------------------
# Infer
patterns = rgen.randint(0,2,(4,numberOfCols))
for i in xrange(10):
xi = rgen.randint(0,2,(numberOfCols))
x = numpy.array(xi, dtype="uint32")
y = tm.infer(x)
if i > 0:
p = tm._checkPrediction([pattern.nonzero()[0] for pattern in patterns])
print "basicTest ok"
#---------------------------------------------------------------------------------
# Figure out acceptable patterns if none were passed to us.
def findAcceptablePatterns(tm, t, whichSequence, trainingSequences, nAcceptable = 1):
"""
Tries to infer the set of acceptable patterns for prediction at the given
time step and for the give sequence. Acceptable patterns are: the current one,
plus a certain number of patterns after timeStep, in the sequence that the TM
is currently tracking. Any other pattern is not acceptable.
TODO:
====
- Doesn't work for noise cases.
- Might run in trouble if shared subsequence at the beginning.
Parameters:
==========
tm the whole TM, so that we can look at its parameters
t the current time step
whichSequence the sequence we are currently tracking
trainingSequences all the training sequences
nAcceptable the number of steps forward from the current timeStep
we are willing to consider acceptable. In the case of
pooling, it is less than or equal to the min of the
number of training reps and the segUpdateValidDuration
parameter of the TM, depending on the test case.
The default value is 1, because by default, the pattern
after the current one should always be predictable.
Return value:
============
acceptablePatterns A list of acceptable patterns for prediction.
"""
# Determine how many steps forward we want to see in the prediction
upTo = t + 2 # always predict current and next
# If the TM is pooling, more steps can be predicted
if tm.doPooling:
upTo += min(tm.segUpdateValidDuration, nAcceptable)
assert upTo <= len(trainingSequences[whichSequence])
acceptablePatterns = []
# Check whether we were in a shared subsequence at the beginning.
# If so, at the point of exiting the shared subsequence (t), we should
# be predicting multiple patterns for 1 time step, then collapse back
# to a single sequence.
if len(trainingSequences) == 2 and \
(trainingSequences[0][0] == trainingSequences[1][0]).all():
if (trainingSequences[0][t] == trainingSequences[1][t]).all() \
and (trainingSequences[0][t+1] != trainingSequences[1][t+1]).any():
acceptablePatterns.append(trainingSequences[0][t+1])
acceptablePatterns.append(trainingSequences[1][t+1])
# Add patterns going forward
acceptablePatterns += [trainingSequences[whichSequence][t] \
for t in xrange(t,upTo)]
return acceptablePatterns
def _testSequence(trainingSequences,
nTrainingReps = 1,
numberOfCols = 40,
cellsPerColumn =5,
initialPerm =.8,
connectedPerm =.7,
minThreshold = 11,
newSynapseCount =5,
permanenceInc =.4,
permanenceDec =0.0,
permanenceMax =1,
globalDecay =0.0,
pamLength = 1000,
activationThreshold =5,
acceptablePatterns = [], # if empty, try to infer what they are
doPooling = False,
nAcceptable = -1, # if doPooling, number of acceptable steps
noiseModel = None,
noiseLevel = 0,
doResets = True,
shouldFail = False,
testSequences = None,
predJustAfterHubOnly = None,
compareToPy = False,
nMultiStepPrediction = 0,
highOrder = False):
"""Test a single set of sequences once and return the number of
prediction failures, the number of errors, and the number of perfect
predictions"""
global BacktrackingTM, SEED, checkSynapseConsistency, VERBOSITY
numPerfect = 0 # When every column is correct in the prediction
numStrictErrors = 0 # When at least one column is incorrect
numFailures = 0 # When > 2 columns are incorrect
sequenceLength = len(trainingSequences[0])
segUpdateValidDuration =5
verbosity = VERBOSITY
# override default maxSeqLEngth value for high-order sequences
if highOrder:
tm = TMClass(numberOfCols, cellsPerColumn,
initialPerm, connectedPerm,
minThreshold, newSynapseCount,
permanenceInc, permanenceDec, permanenceMax,
globalDecay, activationThreshold,
doPooling, segUpdateValidDuration,
seed=SEED, verbosity=verbosity,
checkSynapseConsistency=checkSynapseConsistency,
pamLength=pamLength,
maxSeqLength=0
)
else:
tm = TMClass(numberOfCols, cellsPerColumn,
initialPerm, connectedPerm,
minThreshold, newSynapseCount,
permanenceInc, permanenceDec, permanenceMax,
globalDecay, activationThreshold,
doPooling, segUpdateValidDuration,
seed=SEED, verbosity=verbosity,
checkSynapseConsistency=checkSynapseConsistency,
pamLength=pamLength
)
if compareToPy:
# override default maxSeqLEngth value for high-order sequences
if highOrder:
py_tm = BacktrackingTM(numberOfCols, cellsPerColumn,
initialPerm, connectedPerm,
minThreshold, newSynapseCount,
permanenceInc, permanenceDec, permanenceMax,
globalDecay, activationThreshold,
doPooling, segUpdateValidDuration,
seed=SEED, verbosity=verbosity,
pamLength=pamLength,
maxSeqLength=0
)
else:
py_tm = BacktrackingTM(numberOfCols, cellsPerColumn,
initialPerm, connectedPerm,
minThreshold, newSynapseCount,
permanenceInc, permanenceDec, permanenceMax,
globalDecay, activationThreshold,
doPooling, segUpdateValidDuration,
seed=SEED, verbosity=verbosity,
pamLength=pamLength,
)
trainingSequences = trainingSequences[0]
if testSequences == None: testSequences = trainingSequences
inferAcceptablePatterns = acceptablePatterns == []
#--------------------------------------------------------------------------------
# Learn
for r in xrange(nTrainingReps):
if VERBOSITY > 1:
print "============= Learning round",r,"================="
for sequenceNum, trainingSequence in enumerate(trainingSequences):
if VERBOSITY > 1:
print "============= New sequence ================="
if doResets:
tm.reset()
if compareToPy:
py_tm.reset()
for t,x in enumerate(trainingSequence):
if noiseModel is not None and \
'xor' in noiseModel and 'binomial' in noiseModel \
and 'training' in noiseModel:
noise_vector = rgen.binomial(len(x), noiseLevel, (len(x)))
x = logical_xor(x, noise_vector)
if VERBOSITY > 2:
print "Time step",t, "learning round",r, "sequence number", sequenceNum
print "Input: ",tm.printInput(x)
print "NNZ:", x.nonzero()
x = numpy.array(x).astype('float32')
y = tm.learn(x)
if compareToPy:
py_y = py_tm.learn(x)
if t % 25 == 0: # To track bugs, do that every iteration, but very slow
assert fdrutils.tmDiff(tm, py_tm, VERBOSITY) == True
if VERBOSITY > 3:
tm.printStates(printPrevious = (VERBOSITY > 4))
print
if VERBOSITY > 3:
print "Sequence finished. Complete state after sequence"
tm.printCells()
print
numPerfectAtHub = 0
if compareToPy:
print "End of training"
assert fdrutils.tmDiff(tm, py_tm, VERBOSITY) == True
#--------------------------------------------------------------------------------
# Infer
if VERBOSITY > 1: print "============= Inference ================="
for s,testSequence in enumerate(testSequences):
if VERBOSITY > 1: print "============= New sequence ================="
if doResets:
tm.reset()
if compareToPy:
py_tm.reset()
slen = len(testSequence)
for t,x in enumerate(testSequence):
# Generate noise (optional)
if noiseModel is not None and \
'xor' in noiseModel and 'binomial' in noiseModel \
and 'inference' in noiseModel:
noise_vector = rgen.binomial(len(x), noiseLevel, (len(x)))
x = logical_xor(x, noise_vector)
if VERBOSITY > 2: print "Time step",t, '\nInput:', tm.printInput(x)
x = numpy.array(x).astype('float32')
y = tm.infer(x)
if compareToPy:
py_y = py_tm.infer(x)
assert fdrutils.tmDiff(tm, py_tm, VERBOSITY) == True
# if t == predJustAfterHubOnly:
# z = sum(y, axis = 1)
# print '\t\t',
# print ''.join('.' if z[i] == 0 else '1' for i in xrange(len(z)))
if VERBOSITY > 3: tm.printStates(printPrevious = (VERBOSITY > 4),
printLearnState = False); print
if nMultiStepPrediction > 0:
y_ms = tm.predict(nSteps=nMultiStepPrediction)
if VERBOSITY > 3:
print "Multi step prediction at Time step", t
for i in range(nMultiStepPrediction):
print "Prediction at t+", i+1
tm.printColConfidence(y_ms[i])
# Error Checking
for i in range(nMultiStepPrediction):
predictedTimeStep = t+i+1
if predictedTimeStep < slen:
input = testSequence[predictedTimeStep].nonzero()[0]
prediction = y_ms[i].nonzero()[0]
foundInInput, totalActiveInInput, \
missingFromInput, totalActiveInPrediction = \
fdrutils.checkMatch(input, prediction, sparse=True)
falseNegatives = totalActiveInInput - foundInInput
falsePositives = missingFromInput
if VERBOSITY > 2:
print "Predition from %d to %d" % (t, t+i+1)
print "\t\tFalse Negatives:", falseNegatives
print "\t\tFalse Positivies:", falsePositives
if falseNegatives > 0 or falsePositives > 0:
numStrictErrors += 1
if falseNegatives > 0 and VERBOSITY > 1:
print "Multi step prediction from t=", t, "to t=", t+i+1,\
"false negative with error=",falseNegatives,
print "out of", totalActiveInInput,"ones"
if falsePositives > 0 and VERBOSITY > 1:
print "Multi step prediction from t=", t, "to t=", t+i+1,\
"false positive with error=",falsePositives,
print "out of",totalActiveInInput,"ones"
if falsePositives > 3 or falseNegatives > 3:
numFailures += 1
# Analyze the failure if we care about it
if VERBOSITY > 1 and not shouldFail:
print 'Input at t=', t
print '\t\t',; printOneTrainingVector(testSequence[t])
print 'Prediction for t=', t+i+1
print '\t\t',; printOneTrainingVector(y_ms[i])
print 'Actual input at t=', t+i+1
print '\t\t',; printOneTrainingVector(testSequence[t+i+1])
if t < slen-1:
# If no acceptable patterns were passed to us, we need to infer them
# for the current sequence and time step by looking at the testSequences.
# nAcceptable is used to reduce the number of automatically determined
# acceptable patterns.
if inferAcceptablePatterns:
acceptablePatterns = findAcceptablePatterns(tm, t, s, testSequences,
nAcceptable)
scores = tm._checkPrediction([pattern.nonzero()[0] \
for pattern in acceptablePatterns])
falsePositives, falseNegatives = scores[0], scores[1]
# We report an error if FN or FP is > 0.
# We report a failure if number of FN or number of FP is > 2 for any
# pattern. We also count the number of perfect predictions.
if falseNegatives > 0 or falsePositives > 0:
numStrictErrors += 1
if falseNegatives > 0 and VERBOSITY > 1:
print "Pattern",s,"time",t,\
"prediction false negative with error=",falseNegatives,
print "out of",int(testSequence[t+1].sum()),"ones"
if falsePositives > 0 and VERBOSITY > 1:
print "Pattern",s,"time",t,\
"prediction false positive with error=",falsePositives,
print "out of",int(testSequence[t+1].sum()),"ones"
if falseNegatives > 3 or falsePositives > 3:
numFailures += 1
# Analyze the failure if we care about it
if VERBOSITY > 1 and not shouldFail:
print 'Test sequences'
if len(testSequences) > 1:
printAllTrainingSequences(testSequences, t+1)
else:
print '\t\t',; printOneTrainingVector(testSequence[t])
print '\t\t',; printOneTrainingVector(testSequence[t+1])
print 'Acceptable'
for p in acceptablePatterns:
print '\t\t',; printOneTrainingVector(p)
print 'Output'
diagnostic = ''
output = sum(tm.currentOutput,axis=1)
print '\t\t',; printOneTrainingVector(output)
else:
numPerfect += 1
if predJustAfterHubOnly is not None and predJustAfterHubOnly == t:
numPerfectAtHub += 1
if predJustAfterHubOnly is None:
return numFailures, numStrictErrors, numPerfect, tm
else:
return numFailures, numStrictErrors, numPerfect, numPerfectAtHub, tm
def TestB1(numUniquePatterns, nTests, cellsPerColumn = 1, name = "B1"):
numCols = 100
sequenceLength = numUniquePatterns
nFailed = 0
for numSequences in [1]:
print "Test "+name+" (sequence memory - 1 repetition - 1 sequence)"
for k in range(nTests): # Test that configuration several times
trainingSet = buildTrainingSet(numSequences =numSequences,
sequenceLength = sequenceLength,
pctShared = 0.0,
subsequenceStartPos = 0,
numCols = numCols,
minOnes = 15, maxOnes = 20)
numFailures, numStrictErrors, numPerfect, tm = \
_testSequence(trainingSet,
nTrainingReps = 1,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 8,
newSynapseCount = 11,
permanenceInc = .4,
permanenceDec = 0,
permanenceMax = 1,
globalDecay = .0,
activationThreshold = 8,
doPooling = False)
if numFailures == 0:
print "Test "+name+" ok"
else:
print "Test "+name+" failed"
nFailed = nFailed + 1
print "numFailures=", numFailures
print "numStrictErrors=", numStrictErrors
print "numPerfect=", numPerfect
return nFailed
def TestB7(numUniquePatterns, nTests, cellsPerColumn = 1, name = "B7"):
numCols = 100
sequenceLength = numUniquePatterns
nFailed = 0
for numSequences in [1]:
print "Test "+name+" (sequence memory - 4 repetition - 1 sequence - slow learning)"
for k in range(nTests): # Test that configuration several times
trainingSet = buildTrainingSet(numSequences =numSequences,
sequenceLength = sequenceLength,
pctShared = 0.0,
subsequenceStartPos = 0,
numCols = numCols,
minOnes = 15, maxOnes = 20)
numFailures, numStrictErrors, numPerfect, tm = \
_testSequence(trainingSet,
nTrainingReps = 4,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
minThreshold = 11,
newSynapseCount = 11,
activationThreshold = 11,
initialPerm = .2,
connectedPerm = .6,
permanenceInc = .2,
permanenceDec = 0,
permanenceMax = 1,
globalDecay = .0,
doPooling = False)
if numFailures == 0:
print "Test "+name+" ok"
else:
print "Test "+name+" failed"
nFailed = nFailed + 1
print "numFailures=", numFailures,
print "numStrictErrors=", numStrictErrors,
print "numPerfect=", numPerfect
return nFailed
def TestB2(numUniquePatterns, nTests, cellsPerColumn = 1, name = "B2"):
numCols = 100
sequenceLength = numUniquePatterns
nFailed = 0
for numSequences in [1]: # TestC has multiple sequences
print "Test",name,"(sequence memory - second repetition of the same sequence" +\
" should not add synapses)"
print "Num patterns in sequence =", numUniquePatterns,
print "cellsPerColumn=",cellsPerColumn
for k in range(nTests): # Test that configuration several times
trainingSet = buildTrainingSet(numSequences =numSequences,
sequenceLength = sequenceLength,
pctShared = 0.0,
subsequenceStartPos = 0,
numCols = numCols,
minOnes = 15, maxOnes = 20)
# Do one pass through the training set
numFailures1, numStrictErrors1, numPerfect1, tm1 = \
_testSequence(trainingSet,
nTrainingReps = 1,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 8,
newSynapseCount = 11,
permanenceInc = .4,
permanenceDec = 0,
permanenceMax = 1,
globalDecay = .0,
activationThreshold = 8)
# Do two passes through the training set
numFailures, numStrictErrors, numPerfect, tm2 = \
_testSequence(trainingSet,
nTrainingReps = 2,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 8,
newSynapseCount = 11,
permanenceInc = .4,
permanenceDec = 0,
permanenceMax = 1,
globalDecay = .0,
activationThreshold = 8)
# Check that training with a second pass did not result in more synapses
segmentInfo1 = tm1.getSegmentInfo()
segmentInfo2 = tm2.getSegmentInfo()
if (segmentInfo1[0] != segmentInfo2[0]) or \
(segmentInfo1[1] != segmentInfo2[1]) :
print "Training twice incorrectly resulted in more segments or synapses"
print "Number of segments: ", segmentInfo1[0], segmentInfo2[0]
numFailures += 1
if numFailures == 0:
print "Test",name,"ok"
else:
print "Test",name,"failed"
nFailed = nFailed + 1
print "numFailures=", numFailures
print "numStrictErrors=", numStrictErrors
print "numPerfect=", numPerfect
return nFailed
def TestB3(numUniquePatterns, nTests):
numCols = 100
sequenceLength = numUniquePatterns
nFailed = 0
for numSequences in [2,5]:
print "Test B3 (sequence memory - 2 repetitions -", numSequences, "sequences)"
for k in range(nTests): # Test that configuration several times
trainingSet = buildTrainingSet(numSequences =numSequences,
sequenceLength = sequenceLength,
pctShared = 0.0,
subsequenceStartPos = 0,
numCols = numCols,
minOnes = 15, maxOnes = 20)
numFailures, numStrictErrors, numPerfect, tm = \
_testSequence(trainingSet,
nTrainingReps = 2,
numberOfCols = numCols,
cellsPerColumn = 4,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 11,
permanenceInc = .4,
permanenceDec = 0,
permanenceMax = 1,
globalDecay = .0,
newSynapseCount = 11,
activationThreshold = 8,
doPooling = False)
if numFailures == 0:
print "Test B3 ok"
else:
print "Test B3 failed"
nFailed = nFailed + 1
print "numFailures=", numFailures
print "numStrictErrors=", numStrictErrors
print "numPerfect=", numPerfect
return nFailed
def TestH0(numOnes = 5,nMultiStepPrediction=0):
cellsPerColumn = 4
print "Higher order test 0 with cellsPerColumn=",cellsPerColumn
trainingSet = buildSimpleTrainingSet(numOnes)
numFailures, numStrictErrors, numPerfect, tm = \
_testSequence(trainingSet,
nTrainingReps = 20,
numberOfCols = trainingSet[0][0][0].size,
cellsPerColumn = cellsPerColumn,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 6,
permanenceInc = .4,
permanenceDec = .2,
permanenceMax = 1,
globalDecay = .0,
newSynapseCount = 5,
activationThreshold = 4,
doPooling = False,
nMultiStepPrediction=nMultiStepPrediction)
if numFailures == 0 and \
numStrictErrors == 0 and \
numPerfect == len(trainingSet[0])*(len(trainingSet[0][0]) - 1):
print "Test PASS"
return 0
else:
print "Test FAILED"
print "numFailures=", numFailures
print "numStrictErrors=", numStrictErrors
print "numPerfect=", numPerfect
return 1
def TestH(sequenceLength, nTests, cellsPerColumn, numCols =100, nSequences =[2],
pctShared = 0.1, seqGenMode = 'shared sequence', nTrainingReps = 2,
shouldFail = False, compareToPy = False, highOrder = False):
nFailed = 0
subsequenceStartPos = 10
assert subsequenceStartPos < sequenceLength
for numSequences in nSequences:
print "Higher order test with sequenceLength=",sequenceLength,
print "cellsPerColumn=",cellsPerColumn,"nTests=",nTests,
print "numSequences=",numSequences, "pctShared=", pctShared
for k in range(nTests): # Test that configuration several times
trainingSet = buildTrainingSet(numSequences = numSequences,
sequenceLength = sequenceLength,
pctShared = pctShared, seqGenMode = seqGenMode,
subsequenceStartPos = subsequenceStartPos,
numCols = numCols,
minOnes = 21, maxOnes = 25)
numFailures, numStrictErrors, numPerfect, tm = \
_testSequence(trainingSet,
nTrainingReps = nTrainingReps,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 12,
permanenceInc = .4,
permanenceDec = .1,
permanenceMax = 1,
globalDecay = .0,
newSynapseCount = 11,
activationThreshold = 8,
doPooling = False,
shouldFail = shouldFail,
compareToPy = compareToPy,
highOrder = highOrder)
if numFailures == 0 and not shouldFail \
or numFailures > 0 and shouldFail:
print "Test PASS",
if shouldFail:
print '(should fail, and failed)'
else:
print
else:
print "Test FAILED"
nFailed = nFailed + 1
print "numFailures=", numFailures
print "numStrictErrors=", numStrictErrors
print "numPerfect=", numPerfect
return nFailed
def TestH11(numOnes = 3):
cellsPerColumn = 4
print "Higher order test 11 with cellsPerColumn=",cellsPerColumn
trainingSet = buildAlternatingTrainingSet(numOnes= 3)
numFailures, numStrictErrors, numPerfect, tm = \
_testSequence(trainingSet,
nTrainingReps = 1,
numberOfCols = trainingSet[0][0][0].size,
cellsPerColumn = cellsPerColumn,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 6,
permanenceInc = .4,
permanenceDec = 0,
permanenceMax = 1,
globalDecay = .0,
newSynapseCount = 1,
activationThreshold = 1,
doPooling = False)
if numFailures == 0 and \
numStrictErrors == 0 and \
numPerfect == len(trainingSet[0])*(len(trainingSet[0][0]) - 1):
print "Test PASS"
return 0
else:
print "Test FAILED"
print "numFailures=", numFailures
print "numStrictErrors=", numStrictErrors
print "numPerfect=", numPerfect
return 1
def TestH2a(sequenceLength, nTests, cellsPerColumn, numCols =100, nSequences =[2],
pctShared = 0.02, seqGenMode = 'shared sequence',
shouldFail = False):
"""
Still need to test:
Two overlapping sequences. OK to get new segments but check that we can
get correct high order prediction after multiple reps.
"""
print "Test H2a - second repetition of the same sequence should not add synapses"
nFailed = 0
subsequenceStartPos = 10
assert subsequenceStartPos < sequenceLength
for numSequences in nSequences:
print "Higher order test with sequenceLength=",sequenceLength,
print "cellsPerColumn=",cellsPerColumn,"nTests=",nTests,"numCols=", numCols
print "numSequences=",numSequences, "pctShared=", pctShared,
print "sharing mode=", seqGenMode
for k in range(nTests): # Test that configuration several times
trainingSet = buildTrainingSet(numSequences = numSequences,
sequenceLength = sequenceLength,
pctShared = pctShared, seqGenMode = seqGenMode,
subsequenceStartPos = subsequenceStartPos,
numCols = numCols,
minOnes = 21, maxOnes = 25)
print "============== 10 ======================"
numFailures3, numStrictErrors3, numPerfect3, tm3 = \
_testSequence(trainingSet,
nTrainingReps = 10,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .4,
connectedPerm = .7,
minThreshold = 12,
permanenceInc = .1,
permanenceDec = 0.1,
permanenceMax = 1,
globalDecay = .0,
newSynapseCount = 15,
activationThreshold = 12,
doPooling = False,
shouldFail = shouldFail)
print "============== 2 ======================"
numFailures, numStrictErrors, numPerfect, tm2 = \
_testSequence(trainingSet,
nTrainingReps = 2,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 12,
permanenceInc = .1,
permanenceDec = 0,
permanenceMax = 1,
globalDecay = .0,
newSynapseCount = 15,
activationThreshold = 12,
doPooling = False,
shouldFail = shouldFail)
print "============== 1 ======================"
numFailures1, numStrictErrors1, numPerfect1, tm1 = \
_testSequence(trainingSet,
nTrainingReps = 1,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 12,
permanenceInc = .1,
permanenceDec = 0,
permanenceMax = 1,
globalDecay = .0,
newSynapseCount = 15,
activationThreshold = 12,
doPooling = False,
shouldFail = shouldFail)
# Check that training with a second pass did not result in more synapses
segmentInfo1 = tm1.getSegmentInfo()
segmentInfo2 = tm2.getSegmentInfo()
if (abs(segmentInfo1[0] - segmentInfo2[0]) > 3) or \
(abs(segmentInfo1[1] - segmentInfo2[1]) > 3*15) :
print "Training twice incorrectly resulted in too many segments or synapses"
print segmentInfo1
print segmentInfo2
print tm3.getSegmentInfo()
tm3.trimSegments()
print tm3.getSegmentInfo()
print "Failures for 1, 2, and N reps"
print numFailures1, numStrictErrors1, numPerfect1
print numFailures, numStrictErrors, numPerfect
print numFailures3, numStrictErrors3, numPerfect3
numFailures += 1
if numFailures == 0 and not shouldFail \
or numFailures > 0 and shouldFail:
print "Test PASS",
if shouldFail:
print '(should fail, and failed)'
else:
print
else:
print "Test FAILED"
nFailed = nFailed + 1
print "numFailures=", numFailures
print "numStrictErrors=", numStrictErrors
print "numPerfect=", numPerfect
return nFailed
def TestP(sequenceLength, nTests, cellsPerColumn, numCols =300, nSequences =[2],
pctShared = 0.1, seqGenMode = 'shared subsequence', nTrainingReps = 2):
nFailed = 0
newSynapseCount = 7
activationThreshold = newSynapseCount - 2
minOnes = 1.5 * newSynapseCount
maxOnes = .3 * numCols / nTrainingReps
for numSequences in nSequences:
print "Pooling test with sequenceLength=",sequenceLength,
print 'numCols=', numCols,
print "cellsPerColumn=",cellsPerColumn,"nTests=",nTests,
print "numSequences=",numSequences, "pctShared=", pctShared,
print "nTrainingReps=", nTrainingReps, "minOnes=", minOnes,
print "maxOnes=", maxOnes
for k in range(nTests): # Test that configuration several times
minOnes = 1.5 * newSynapseCount
trainingSet = buildTrainingSet(numSequences =numSequences,
sequenceLength = sequenceLength,
pctShared = pctShared, seqGenMode = seqGenMode,
subsequenceStartPos = 10,
numCols = numCols,
minOnes = minOnes, maxOnes = maxOnes)
numFailures, numStrictErrors, numPerfect, tm = \
_testSequence(trainingSet,
nTrainingReps = nTrainingReps,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 11,
permanenceInc = .4,
permanenceDec = 0,
permanenceMax = 1,
globalDecay = .0,
newSynapseCount = newSynapseCount,
activationThreshold = activationThreshold,
doPooling = True)
if numFailures == 0 and \
numStrictErrors == 0 and \
numPerfect == numSequences*(sequenceLength - 1):
print "Test PASS"
else:
print "Test FAILED"
print "numFailures=", numFailures
print "numStrictErrors=", numStrictErrors
print "numPerfect=", numPerfect
nFailed = nFailed + 1
return nFailed
def TestHL0a(numOnes = 5):
cellsPerColumn = 4
newSynapseCount = 5
activationThreshold = newSynapseCount
print "HiLo test 0a with cellsPerColumn=",cellsPerColumn
trainingSet, testSet = buildHL0aTrainingSet()
numCols = trainingSet[0][0].size
numFailures, numStrictErrors, numPerfect, tm = \
_testSequence([trainingSet],
nTrainingReps = 1,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .2,
connectedPerm = .7,
permanenceInc = .2,
permanenceDec = 0.05,
permanenceMax = 1,
globalDecay = .0,
minThreshold = activationThreshold,
newSynapseCount = newSynapseCount,
activationThreshold = activationThreshold,
pamLength = 2,
doPooling = False,
testSequences = testSet)
tm.trimSegments()
retAfter = tm.getSegmentInfo()
print retAfter[0], retAfter[1]
if retAfter[0] > 20:
print "Too many segments"
numFailures += 1
if retAfter[1] > 100:
print "Too many synapses"
numFailures += 1
if numFailures == 0:
print "Test HL0a ok"
return 0
else:
print "Test HL0a failed"
print "numFailures=", numFailures
print "numStrictErrors=", numStrictErrors
print "numPerfect=", numPerfect
return 1
def TestHL0b(numOnes = 5):
cellsPerColumn = 4
newSynapseCount = 5
activationThreshold = newSynapseCount
print "HiLo test 0b with cellsPerColumn=",cellsPerColumn
trainingSet, testSet = buildHL0bTrainingSet()
numCols = trainingSet[0][0].size
print "numCols=", numCols
numFailures, numStrictErrors, numPerfect, tm = \
_testSequence([trainingSet],
nTrainingReps = 1,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .2,
connectedPerm = .7,
permanenceInc = .2,
permanenceDec = 0.05,
permanenceMax = 1,
globalDecay = .0,
minThreshold = activationThreshold,
newSynapseCount = newSynapseCount,
activationThreshold = activationThreshold,
doPooling = False,
testSequences = testSet)
tm.trimSegments()
retAfter = tm.getSegmentInfo()
tm.printCells()
if numFailures == 0:
print "Test HL0 ok"
return 0
else:
print "Test HL0 failed"
print "numFailures=", numFailures
print "numStrictErrors=", numStrictErrors
print "numPerfect=", numPerfect
return 1
def TestHL(sequenceLength, nTests, cellsPerColumn, numCols =200, nSequences =[2],
pctShared = 0.1, seqGenMode = 'shared subsequence', nTrainingReps = 3,
noiseModel = 'xor binomial in learning only', noiseLevel = 0.1,
hiloOn = True):
nFailed = 0
newSynapseCount = 8
activationThreshold = newSynapseCount
minOnes = 1.5 * newSynapseCount
maxOnes = 0.3 * numCols / nTrainingReps
if hiloOn == False:
minThreshold = 0.9
for numSequences in nSequences:
print "Hilo test with sequenceLength=", sequenceLength,
print "cellsPerColumn=", cellsPerColumn, "nTests=", nTests,
print "numSequences=", numSequences, "pctShared=", pctShared,
print "nTrainingReps=", nTrainingReps, "minOnes=", minOnes,
print "maxOnes=", maxOnes,
print 'noiseModel=', noiseModel, 'noiseLevel=', noiseLevel
for k in range(nTests): # Test that configuration several times
minOnes = 1.5 * newSynapseCount
trainingSet = buildTrainingSet(numSequences =numSequences,
sequenceLength = sequenceLength,
pctShared = pctShared, seqGenMode = seqGenMode,
subsequenceStartPos = 10,
numCols = numCols,
minOnes = minOnes, maxOnes = maxOnes)
numFailures, numStrictErrors, numPerfect, tm = \
_testSequence(trainingSet,
nTrainingReps = nTrainingReps,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .2,
connectedPerm = .7,
minThreshold = activationThreshold,
newSynapseCount = newSynapseCount,
activationThreshold = activationThreshold,
permanenceInc = .2,
permanenceDec = 0.05,
permanenceMax = 1,
globalDecay = .0,
doPooling = False,
noiseModel = noiseModel,
noiseLevel = noiseLevel)
if numFailures == 0 and \
numStrictErrors == 0 and \
numPerfect == numSequences*(sequenceLength - 1):
print "Test PASS"
else:
print "Test FAILED"
print "numFailures=", numFailures
print "numStrictErrors=", numStrictErrors
print "numPerfect=", numPerfect
nFailed = nFailed + 1
return nFailed
def worker(x):
"""Worker function to use in parallel hub capacity test below."""
cellsPerColumn, numSequences = x[0], x[1]
nTrainingReps = 1
sequenceLength = 10
numCols = 200
print 'Started', cellsPerColumn, numSequences
seqGenMode = 'shared subsequence, one pattern'
subsequenceStartPos = 5
trainingSet = buildTrainingSet(numSequences = numSequences,
sequenceLength = sequenceLength,
pctShared = .1, seqGenMode = seqGenMode,
subsequenceStartPos = subsequenceStartPos,
numCols = numCols,
minOnes = 21, maxOnes = 25)
numFailures1, numStrictErrors1, numPerfect1, atHub, tm = \
_testSequence(trainingSet,
nTrainingReps = nTrainingReps,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 11,
permanenceInc = .4,
permanenceDec = 0,
permanenceMax = 1,
globalDecay = .0,
newSynapseCount = 8,
activationThreshold = 8,
doPooling = False,
shouldFail = False,
predJustAfterHubOnly = 5)
seqGenMode = 'no shared subsequence'
trainingSet = buildTrainingSet(numSequences = numSequences,
sequenceLength = sequenceLength,
pctShared = 0, seqGenMode = seqGenMode,
subsequenceStartPos = 0,
numCols = numCols,
minOnes = 21, maxOnes = 25)
numFailures2, numStrictErrors2, numPerfect2, tm = \
_testSequence(trainingSet,
nTrainingReps = nTrainingReps,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 11,
permanenceInc = .4,
permanenceDec = 0,
permanenceMax = 1,
globalDecay = .0,
newSynapseCount = 8,
activationThreshold = 8,
doPooling = False,
shouldFail = False)
print 'Completed',
print cellsPerColumn, numSequences, numFailures1, numStrictErrors1, numPerfect1, atHub, \
numFailures2, numStrictErrors2, numPerfect2
return cellsPerColumn, numSequences, numFailures1, numStrictErrors1, numPerfect1, atHub, \
numFailures2, numStrictErrors2, numPerfect2
def hubCapacity():
"""
Study hub capacity. Figure out how many sequences can share a pattern
for a given number of cells per column till we the system fails.
DON'T RUN IN BUILD SYSTEM!!! (takes too long)
"""
from multiprocessing import Pool
import itertools
print "Hub capacity test"
# scalar value on predictions by looking at max perm over column
p = Pool(2)
results = p.map(worker, itertools.product([1,2,3,4,5,6,7,8], xrange(1,2000,200)))
f = open('results-numPerfect.11.22.10.txt', 'w')
for i,r in enumerate(results):
print >>f, '{%d,%d,%d,%d,%d,%d,%d,%d,%d},' % r
f.close()
def runTests(testLength = "short"):
# Data structure to collect results of tests
# TODO: put numFailures, numStrictErrors and numPerfect in here for reporting
tests = {}
# always run this one: if that one fails, we can't do anything
basicTest()
print
#---------------------------------------------------------------------------------
if testLength == "long":
tests['B1'] = TestB1(numUniquePatterns, nTests)
tests['B2'] = TestB2(numUniquePatterns, nTests)
tests['B8'] = TestB7(4, nTests, cellsPerColumn = 4, name="B8")
tests['B10'] = TestB2(numUniquePatterns, nTests, cellsPerColumn = 4,
name = "B10")
# Run these always
tests['B3'] = TestB3(numUniquePatterns, nTests)
tests['B6'] = TestB1(numUniquePatterns, nTests,
cellsPerColumn = 4, name="B6")
tests['B7'] = TestB7(numUniquePatterns, nTests)
print
#---------------------------------------------------------------------------------
#print "Test H11"
#tests['H11'] = TestH11()
if True:
print "Test H0"
tests['H0'] = TestH0(numOnes = 5)
print "Test H2"
#tests['H2'] = TestH(numUniquePatterns, nTests, cellsPerColumn = 4,
# nTrainingReps = numUniquePatterns, compareToPy = False)
print "Test H3"
tests['H3'] = TestH(numUniquePatterns, nTests,
numCols = 200,
cellsPerColumn = 20,
pctShared = 0.3, nTrainingReps=numUniquePatterns,
compareToPy = False,
highOrder = True)
print "Test H4" # Produces 3 false positives, but otherwise fine.
# TODO: investigate initial false positives?
tests['H4'] = TestH(numUniquePatterns, nTests,
cellsPerColumn = 20,
pctShared = 0.1,
seqGenMode='shared subsequence at beginning')
if True:
print "Test H0 with multistep prediction"
tests['H0_MS'] = TestH0(numOnes = 5, nMultiStepPrediction=2)
if True:
print "Test H1" # - Should Fail
tests['H1'] = TestH(numUniquePatterns, nTests,
cellsPerColumn = 1, nTrainingReps = 1,
shouldFail = True)
# Also fails in --long mode. See H2 above
#print "Test H2a"
#tests['H2a'] = TestH2a(numUniquePatterns,
# nTests, pctShared = 0.02, numCols = 300, cellsPerColumn = 4)
if False:
print "Test H5" # make sure seqs are good even with shuffling, fast learning
tests['H5'] = TestH(numUniquePatterns, nTests,
cellsPerColumn = 10,
pctShared = 0.0,
seqGenMode='shuffle, no shared subsequence')
print "Test H6" # should work
tests['H6'] = TestH(numUniquePatterns, nTests,
cellsPerColumn = 10,
pctShared = 0.4,
seqGenMode='shuffle, shared subsequence')
# Try with 2 sequences, then 3 sequences interleaved so that there is
# always a shared pattern, but it belongs to 2 different sequences each
# time!
#print "Test H7"
#tests['H7'] = TestH(numUniquePatterns, nTests,
# cellsPerColumn = 10,
# pctShared = 0.4,
# seqGenMode='shuffle, shared subsequence')
# tricky: if start predicting in middle of subsequence, several predictions
# are possible
#print "Test H8"
#tests['H8'] = TestH(numUniquePatterns, nTests,
# cellsPerColumn = 10,
# pctShared = 0.4,
# seqGenMode='shuffle, shared subsequence')
print "Test H9" # plot hub capacity
tests['H9'] = TestH(numUniquePatterns, nTests,
cellsPerColumn = 10,
pctShared = 0.4,
seqGenMode='shuffle, shared subsequence')
#print "Test H10" # plot
#tests['H10'] = TestH(numUniquePatterns, nTests,
# cellsPerColumn = 10,
# pctShared = 0.4,
# seqGenMode='shuffle, shared subsequence')
print
#---------------------------------------------------------------------------------
if False:
print "Test P1"
tests['P1'] = TestP(numUniquePatterns, nTests,
cellsPerColumn = 4,
pctShared = 0.0,
seqGenMode = 'no shared subsequence',
nTrainingReps = 3)
if False:
print "Test P2"
tests['P2'] = TestP(numUniquePatterns, nTests,
cellsPerColumn = 4,
pctShared = 0.0,
seqGenMode = 'no shared subsequence',
nTrainingReps = 5)
print "Test P3"
tests['P3'] = TestP(numUniquePatterns, nTests,
cellsPerColumn = 4,
pctShared = 0.0,
seqGenMode = 'no shared subsequence',
nSequences = [2] if testLength == 'short' else [2,5],
nTrainingReps = 5)
print "Test P4"
tests['P4'] = TestP(numUniquePatterns, nTests,
cellsPerColumn = 4,
pctShared = 0.0,
seqGenMode = 'shared subsequence',
nSequences = [2] if testLength == 'short' else [2,5],
nTrainingReps = 5)
print
#---------------------------------------------------------------------------------
if True:
print "Test HL0a"
tests['HL0a'] = TestHL0a(numOnes = 5)
if False:
print "Test HL0b"
tests['HL0b'] = TestHL0b(numOnes = 5)
print "Test HL1"
tests['HL1'] = TestHL(sequenceLength = 20,
nTests = nTests,
numCols = 100,
nSequences = [1],
nTrainingReps = 3,
cellsPerColumn = 1,
seqGenMode = 'no shared subsequence',
noiseModel = 'xor binomial in learning only',
noiseLevel = 0.1,
doResets = False)
print "Test HL2"
tests['HL2'] = TestHL(numUniquePatterns = 20,
nTests = nTests,
numCols = 200,
nSequences = [1],
nTrainingReps = 3,
cellsPerColumn = 1,
seqGenMode = 'no shared subsequence',
noiseModel = 'xor binomial in learning only',
noiseLevel = 0.1,
doResets = False)
print "Test HL3"
tests['HL3'] = TestHL(numUniquePatterns = 30,
nTests = nTests,
numCols = 200,
nSequences = [2],
pctShared = 0.66,
nTrainingReps = 3,
cellsPerColumn = 1,
seqGenMode = 'shared subsequence',
noiseModel = None,
noiseLevel = 0.0,
doResets = True)
print "Test HL4"
tests['HL4'] = TestHL(numUniquePatterns = 30,
nTests = nTests,
numCols = 200,
nSequences = [2],
pctShared = 0.66,
nTrainingReps = 3,
cellsPerColumn = 1,
seqGenMode = 'shared subsequence',
noiseModel = None,
noiseLevel = 0.0,
doResets = False)
print "Test HL5"
tests['HL5'] = TestHL(numUniquePatterns = 30,
nTests = nTests,
numCols = 200,
nSequences = [2],
pctShared = 0.66,
nTrainingReps = 3,
cellsPerColumn = 1,
seqGenMode = 'shared subsequence',
noiseModel = 'xor binomial in learning only',
noiseLevel = 0.1,
doResets = False)
print "Test HL6"
tests['HL6'] = nTests - TestHL(numUniquePatterns = 20,
nTests = nTests,
numCols = 200,
nSequences = [1],
nTrainingReps = 3,
cellsPerColumn = 1,
seqGenMode = 'no shared subsequence',
noiseModel = 'xor binomial in learning only',
noiseLevel = 0.1,
doResets = True,
hiloOn = False)
print
#---------------------------------------------------------------------------------
nFailures = 0
for k,v in tests.iteritems():
nFailures = nFailures + v
if nFailures > 0: # 1 to account for H1
print "There are failed tests"
print "Test\tn failures"
for k,v in tests.iteritems():
print k, "\t", v
assert 0
else:
print "All tests pass"
#---------------------------------------------------------------------------------
# Keep
if False:
import hotshot, hotshot.stats
prof = hotshot.Profile("profile.prof")
prof.runcall(TestB2, numUniquePatterns=100, nTests=2)
prof.close()
stats = hotshot.stats.load("profile.prof")
stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_stats(50)
if __name__=="__main__":
if not TEST_CPP_TM:
print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
print "!! WARNING: C++ TM testing is DISABLED until it can be updated."
print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
# Three different test lengths are passed in through the command line.
# Developer tests use --short. Autobuild does not pass in anything.
# Acceptance tests pass in --long. testLength reflects these possibilities
# as "autobuild", "short", and "long"
testLength = "autobuild"
# Scan command line arguments to see what to do for the seed
# TODO: make default be a random seed, once we're sure it will pass reliably!
for i,arg in enumerate(sys.argv):
if 'seed' in arg:
try:
# used specified seed
SEED = int(sys.argv[i+1])
except ValueError as e:
# random seed
SEED = numpy.random.randint(100)
if 'verbosity' in arg:
VERBOSITY = int(sys.argv[i+1])
if 'help' in arg:
print "TMTest.py --short|long --seed number|'rand' --verbosity number"
sys.exit()
if "short" in arg:
testLength = "short"
if "long" in arg:
testLength = "long"
rgen = numpy.random.RandomState(SEED) # always call this rgen, NOT random
# Setup the severity and length of the tests
if testLength == "short":
numUniquePatterns = 50
nTests = 1
elif testLength == "autobuild":
print "Running autobuild tests"
numUniquePatterns = 50
nTests = 1
elif testLength == "long":
numUniquePatterns = 100
nTests = 3
print "TM tests", testLength, "numUniquePatterns=", numUniquePatterns, "nTests=", nTests,
print "seed=", SEED
print
if testLength == "long":
print 'Testing Python TM'
TMClass = BacktrackingTM
runTests(testLength)
if testLength != 'long':
checkSynapseConsistency = False
else:
# Setting this to True causes test to take way too long
# Temporarily turned off so we can investigate
checkSynapseConsistency = False
if TEST_CPP_TM:
print 'Testing C++ TM'
TMClass = BacktrackingTMCPP
runTests(testLength)
|
HewlettPackard/oneview-ansible | refs/heads/master | library/oneview_certificates_server.py | 1 | #!/usr/bin/python
# -*- coding: utf-8 -*-
###
# Copyright (2016-2020) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: oneview_certificates_server
short_description: Manage OneView Server Certificate resources.
description:
- Provides an interface to manage Server Certificate resources. Can create, update, and delete.
version_added: "2.4"
requirements:
- "python >= 3.4.2"
- "hpeOneView >= 5.4.0"
author: "Venkatesh Ravula (@VenkateshRavula)"
options:
state:
description:
- Indicates the desired state for the Server Certificate resource.
C(present) will ensure data properties are compliant with OneView.
C(absent) will remove the resource from OneView, if it exists.
choices: ['present', 'absent']
data:
description:
- List with the Server Certificate properties.
required: true
extends_documentation_fragment:
- oneview
- oneview.validateetag
'''
EXAMPLES = '''
- name: Create a Server Certificate
oneview_certificates_server:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 1200
state: present
name: "172.18.13.11"
data:
certificateDetails:
- aliasName: 'vcenter'
base64Data: '--- Certificate ---'
- name: Update the Server Certificate name to 'vcenter Renamed'
oneview_certificates_server:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 1200
state: present
name: "172.18.13.11"
data:
name: 'vcenter renamed'
certificateDetails:
- aliasName: 'vcenter'
base64Data: '--- Certificate ---'
- name: Ensure that the Hypervisor Manager is absent
oneview_certificates_server:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 1200
state: absent
name: "172.18.13.11"
data:
alias_name: 'vcenter'
'''
RETURN = '''
certificate_server:
description: Has the facts about the managed OneView Hypervisor Manager.
returned: On state 'present'. Can be null.
type: dict
'''
from ansible.module_utils.oneview import OneViewModule
class CertificatesServerModule(OneViewModule):
MSG_CREATED = 'Server Certificate created successfully.'
MSG_UPDATED = 'Server Certificate updated successfully.'
MSG_DELETED = 'Server Certificate deleted successfully.'
MSG_ALREADY_PRESENT = 'Server Certificate is already present.'
MSG_ALREADY_ABSENT = 'Server Certificate is already absent.'
RESOURCE_FACT_NAME = 'certificate_server'
def __init__(self):
additional_arg_spec = dict(data=dict(required=True, type='dict'),
name=dict(required=False, type='str'),
state=dict(
required=True,
choices=['present', 'absent']))
super(CertificatesServerModule, self).__init__(additional_arg_spec=additional_arg_spec, validate_etag_support=True)
self.__set_current_resource(self.oneview_client.certificates_server)
def execute_module(self):
if self.state == 'present':
return self.resource_present(self.RESOURCE_FACT_NAME)
elif self.state == 'absent':
return self.resource_absent()
def __set_current_resource(self, resource_client):
self.resource_client = resource_client
aliasname = None
if self.module.params.get('name'):
aliasname = self.module.params['name']
if self.resource_client.get_by_alias_name(aliasname):
self.current_resource = self.resource_client.get_by_alias_name(aliasname)
def main():
CertificatesServerModule().run()
if __name__ == '__main__':
main()
|
darina/omim | refs/heads/master | tools/python/InstrumentsTraceParser.py | 24 | #!/usr/bin/env python2.7
from __future__ import print_function
import struct
import sys
import numpy
class Analyzer:
"""
The binary format is
time since the beginning of the measurement : double
unknown and irrelevant field : double
momentary consumption calculated for the current time segment : double
"""
def __init__(self):
self.duration = 0.0
self.consumption = []
self.mean = 0.0
self.std = 0.0
self.avg = 0.0
self.averages = []
def read_file(self, file_path):
binary = bytearray()
with open(file_path, "r") as f:
binary = bytearray(f.read())
for i in range(0, len(binary) - 24, 24):
res = struct.unpack(">ddd", binary[i:i+24])
current_duration = res[0]
if not current_duration > self.duration:
print("Unexpected elapsed time value, lower than the previous one.")
exit(2) # this should never happen because the file is written sequentially
current_consumption = res[2]
self.averages.append(current_consumption / (current_duration - self.duration))
self.duration = current_duration
self.consumption.append(current_consumption)
self.calculate_stats()
def calculate_stats(self):
self.mean = numpy.mean(self.averages)
self.std = numpy.std(self.averages)
self.avg = sum(self.consumption) / self.duration
if __name__ == "__main__":
for file_path in sys.argv[1:]:
analyzer = Analyzer()
analyzer.read_file(file_path)
print("{}\n\tavg: {}\n\tmean: {}\n\tstd: {}".format(file_path, analyzer.avg, analyzer.mean, analyzer.std))
|
faust93/franco_geehrc_f93 | refs/heads/nightlies-4.4 | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py | 12527 | # Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
|
lenw/ansible-modules-core | refs/heads/devel | cloud/amazon/ec2_lc.py | 47 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
---
module: ec2_lc
short_description: Create or delete AWS Autoscaling Launch Configurations
description:
- Can create or delete AwS Autoscaling Configurations
- Works with the ec2_asg module to manage Autoscaling Groups
notes:
- "Amazon ASG Autoscaling Launch Configurations are immutable once created, so modifying the configuration
after it is changed will not modify the launch configuration on AWS. You must create a new config and assign
it to the ASG instead."
version_added: "1.6"
author: "Gareth Rushgrove (@garethr)"
options:
state:
description:
- register or deregister the instance
required: true
choices: ['present', 'absent']
name:
description:
- Unique name for configuration
required: true
instance_type:
description:
- instance type to use for the instance
required: true
default: null
aliases: []
image_id:
description:
- The AMI unique identifier to be used for the group
required: false
key_name:
description:
- The SSH key name to be used for access to managed instances
required: false
security_groups:
description:
- A list of security groups into which instances should be found
required: false
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: false
aliases: ['aws_region', 'ec2_region']
volumes:
description:
- a list of volume dicts, each containing device name and optionally ephemeral id or snapshot id. Size and type (and number of iops for io device type) must be specified for a new volume or a root volume, and may be passed for a snapshot volume. For any volume, a volume size less than 1 will be interpreted as a request not to create the volume.
required: false
default: null
aliases: []
user_data:
description:
- opaque blob of data which is made available to the ec2 instance
required: false
default: null
aliases: []
kernel_id:
description:
- Kernel id for the EC2 instance
required: false
default: null
aliases: []
spot_price:
description:
- The spot price you are bidding. Only applies for an autoscaling group with spot instances.
required: false
default: null
instance_monitoring:
description:
- whether instances in group are launched with detailed monitoring.
required: false
default: false
aliases: []
assign_public_ip:
description:
- Used for Auto Scaling groups that launch instances into an Amazon Virtual Private Cloud. Specifies whether to assign a public IP address to each instance launched in a Amazon VPC.
required: false
aliases: []
version_added: "1.8"
ramdisk_id:
description:
- A RAM disk id for the instances.
required: false
default: null
aliases: []
version_added: "1.8"
instance_profile_name:
description:
- The name or the Amazon Resource Name (ARN) of the instance profile associated with the IAM role for the instances.
required: false
default: null
aliases: []
version_added: "1.8"
ebs_optimized:
description:
- Specifies whether the instance is optimized for EBS I/O (true) or not (false).
required: false
default: false
aliases: []
version_added: "1.8"
classic_link_vpc_id:
description:
- Id of ClassicLink enabled VPC
required: false
default: null
version_added: "2.0"
classic_link_vpc_security_groups:
description:
- A list of security group id's with which to associate the ClassicLink VPC instances.
required: false
default: null
version_added: "2.0"
extends_documentation_fragment: aws
"""
EXAMPLES = '''
- ec2_lc:
name: special
image_id: ami-XXX
key_name: default
security_groups: ['group', 'group2' ]
instance_type: t1.micro
volumes:
- device_name: /dev/sda1
volume_size: 100
device_type: io1
iops: 3000
delete_on_termination: true
'''
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
try:
from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping
import boto.ec2.autoscale
from boto.ec2.autoscale import LaunchConfiguration
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def create_block_device(module, volume):
# Not aware of a way to determine this programatically
# http://aws.amazon.com/about-aws/whats-new/2013/10/09/ebs-provisioned-iops-maximum-iops-gb-ratio-increased-to-30-1/
MAX_IOPS_TO_SIZE_RATIO = 30
if 'snapshot' not in volume and 'ephemeral' not in volume:
if 'volume_size' not in volume:
module.fail_json(msg='Size must be specified when creating a new volume or modifying the root volume')
if 'snapshot' in volume:
if 'device_type' in volume and volume.get('device_type') == 'io1' and 'iops' not in volume:
module.fail_json(msg='io1 volumes must have an iops value set')
if 'ephemeral' in volume:
if 'snapshot' in volume:
module.fail_json(msg='Cannot set both ephemeral and snapshot')
return BlockDeviceType(snapshot_id=volume.get('snapshot'),
ephemeral_name=volume.get('ephemeral'),
size=volume.get('volume_size'),
volume_type=volume.get('device_type'),
delete_on_termination=volume.get('delete_on_termination', False),
iops=volume.get('iops'))
def create_launch_config(connection, module):
name = module.params.get('name')
image_id = module.params.get('image_id')
key_name = module.params.get('key_name')
security_groups = module.params['security_groups']
user_data = module.params.get('user_data')
volumes = module.params['volumes']
instance_type = module.params.get('instance_type')
spot_price = module.params.get('spot_price')
instance_monitoring = module.params.get('instance_monitoring')
assign_public_ip = module.params.get('assign_public_ip')
kernel_id = module.params.get('kernel_id')
ramdisk_id = module.params.get('ramdisk_id')
instance_profile_name = module.params.get('instance_profile_name')
ebs_optimized = module.params.get('ebs_optimized')
classic_link_vpc_id = module.params.get('classic_link_vpc_id')
classic_link_vpc_security_groups = module.params.get('classic_link_vpc_security_groups')
bdm = BlockDeviceMapping()
if volumes:
for volume in volumes:
if 'device_name' not in volume:
module.fail_json(msg='Device name must be set for volume')
# Minimum volume size is 1GB. We'll use volume size explicitly set to 0
# to be a signal not to create this volume
if 'volume_size' not in volume or int(volume['volume_size']) > 0:
bdm[volume['device_name']] = create_block_device(module, volume)
lc = LaunchConfiguration(
name=name,
image_id=image_id,
key_name=key_name,
security_groups=security_groups,
user_data=user_data,
block_device_mappings=[bdm],
instance_type=instance_type,
kernel_id=kernel_id,
spot_price=spot_price,
instance_monitoring=instance_monitoring,
associate_public_ip_address=assign_public_ip,
ramdisk_id=ramdisk_id,
instance_profile_name=instance_profile_name,
ebs_optimized=ebs_optimized,
classic_link_vpc_security_groups=classic_link_vpc_security_groups,
classic_link_vpc_id=classic_link_vpc_id,
)
launch_configs = connection.get_all_launch_configurations(names=[name])
changed = False
if not launch_configs:
try:
connection.create_launch_configuration(lc)
launch_configs = connection.get_all_launch_configurations(names=[name])
changed = True
except BotoServerError, e:
module.fail_json(msg=str(e))
result = dict(
((a[0], a[1]) for a in vars(launch_configs[0]).items()
if a[0] not in ('connection', 'created_time', 'instance_monitoring', 'block_device_mappings'))
)
result['created_time'] = str(launch_configs[0].created_time)
# Looking at boto's launchconfig.py, it looks like this could be a boolean
# value or an object with an enabled attribute. The enabled attribute
# could be a boolean or a string representation of a boolean. Since
# I can't test all permutations myself to see if my reading of the code is
# correct, have to code this *very* defensively
if launch_configs[0].instance_monitoring is True:
result['instance_monitoring'] = True
else:
try:
result['instance_monitoring'] = module.boolean(launch_configs[0].instance_monitoring.enabled)
except AttributeError:
result['instance_monitoring'] = False
if launch_configs[0].block_device_mappings is not None:
result['block_device_mappings'] = []
for bdm in launch_configs[0].block_device_mappings:
result['block_device_mappings'].append(dict(device_name=bdm.device_name, virtual_name=bdm.virtual_name))
if bdm.ebs is not None:
result['block_device_mappings'][-1]['ebs'] = dict(snapshot_id=bdm.ebs.snapshot_id, volume_size=bdm.ebs.volume_size)
module.exit_json(changed=changed, name=result['name'], created_time=result['created_time'],
image_id=result['image_id'], arn=result['launch_configuration_arn'],
security_groups=result['security_groups'],
instance_type=result['instance_type'],
result=result)
def delete_launch_config(connection, module):
name = module.params.get('name')
launch_configs = connection.get_all_launch_configurations(names=[name])
if launch_configs:
launch_configs[0].delete()
module.exit_json(changed=True)
else:
module.exit_json(changed=False)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=True, type='str'),
image_id=dict(type='str'),
key_name=dict(type='str'),
security_groups=dict(type='list'),
user_data=dict(type='str'),
kernel_id=dict(type='str'),
volumes=dict(type='list'),
instance_type=dict(type='str'),
state=dict(default='present', choices=['present', 'absent']),
spot_price=dict(type='float'),
ramdisk_id=dict(type='str'),
instance_profile_name=dict(type='str'),
ebs_optimized=dict(default=False, type='bool'),
associate_public_ip_address=dict(type='bool'),
instance_monitoring=dict(default=False, type='bool'),
assign_public_ip=dict(type='bool'),
classic_link_vpc_security_groups=dict(type='list'),
classic_link_vpc_id=dict(type='str')
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
try:
connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, StandardError), e:
module.fail_json(msg=str(e))
state = module.params.get('state')
if state == 'present':
create_launch_config(connection, module)
elif state == 'absent':
delete_launch_config(connection, module)
main()
|
guewen/odoo | refs/heads/master | addons/base_setup/res_config.py | 38 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
import re
from openerp.report.render.rml2pdf import customfonts
class base_config_settings(osv.osv_memory):
_name = 'base.config.settings'
_inherit = 'res.config.settings'
_columns = {
'module_multi_company': fields.boolean('Manage multiple companies',
help='Work in multi-company environments, with appropriate security access between companies.\n'
'-This installs the module multi_company.'),
'module_share': fields.boolean('Allow documents sharing',
help="""Share or embbed any screen of openerp."""),
'module_portal': fields.boolean('Activate the customer portal',
help="""Give your customers access to their documents."""),
'module_auth_oauth': fields.boolean('Use external authentication providers, sign in with google, facebook, ...'),
'module_base_import': fields.boolean("Allow users to import data from CSV files"),
'module_google_drive': fields.boolean('Attach Google documents to any record',
help="""This installs the module google_docs."""),
'module_google_calendar': fields.boolean('Allow the users to synchronize their calendar with Google Calendar',
help="""This installs the module google_calendar."""),
'font': fields.many2one('res.font', string="Report Font", domain=[('mode', 'in', ('Normal', 'Regular', 'all', 'Book'))],
help="Set the font into the report header, it will be used as default font in the RML reports of the user company"),
}
_defaults= {
'font': lambda self,cr,uid,c: self.pool.get('res.users').browse(cr, uid, uid, c).company_id.font.id,
}
def open_company(self, cr, uid, ids, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context)
return {
'type': 'ir.actions.act_window',
'name': 'Your Company',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'res.company',
'res_id': user.company_id.id,
'target': 'current',
}
def _change_header(self, header,font):
""" Replace default fontname use in header and setfont tag """
default_para = re.sub('fontName.?=.?".*"', 'fontName="%s"'% font,header)
return re.sub('(<setFont.?name.?=.?)(".*?")(.)', '\g<1>"%s"\g<3>'% font,default_para)
def set_base_defaults(self, cr, uid, ids, context=None):
ir_model_data = self.pool.get('ir.model.data')
wizard = self.browse(cr, uid, ids, context)[0]
if wizard.font:
user = self.pool.get('res.users').browse(cr, uid, uid, context)
font_name = wizard.font.name
user.company_id.write({'font': wizard.font.id,'rml_header': self._change_header(user.company_id.rml_header,font_name), 'rml_header2': self._change_header(user.company_id.rml_header2, font_name), 'rml_header3': self._change_header(user.company_id.rml_header3, font_name)})
return {}
def act_discover_fonts(self, cr, uid, ids, context=None):
return self.pool.get("res.font").font_scan(cr, uid, context=context)
# Preferences wizard for Sales & CRM.
# It is defined here because it is inherited independently in modules sale, crm,
# plugin_outlook and plugin_thunderbird.
class sale_config_settings(osv.osv_memory):
_name = 'sale.config.settings'
_inherit = 'res.config.settings'
_columns = {
'module_web_linkedin': fields.boolean('Get contacts automatically from linkedIn',
help="""When you create a new contact (person or company), you will be able to load all the data from LinkedIn (photos, address, etc)."""),
'module_crm': fields.boolean('CRM'),
'module_sale' : fields.boolean('SALE'),
'module_plugin_thunderbird': fields.boolean('Enable Thunderbird plug-in',
help='The plugin allows you archive email and its attachments to the selected '
'OpenERP objects. You can select a partner, or a lead and '
'attach the selected mail as a .eml file in '
'the attachment of a selected record. You can create documents for CRM Lead, '
'Partner from the selected emails.\n'
'-This installs the module plugin_thunderbird.'),
'module_plugin_outlook': fields.boolean('Enable Outlook plug-in',
help='The Outlook plugin allows you to select an object that you would like to add '
'to your email and its attachments from MS Outlook. You can select a partner, '
'or a lead object and archive a selected email into an OpenERP mail message with attachments.\n'
'-This installs the module plugin_outlook.'),
'module_mass_mailing': fields.boolean(
'Manage mass mailing campaigns',
help='Get access to statistics with your mass mailing, manage campaigns.'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: |
nevir/plexability | refs/heads/master | extern/depot_tools/git_try.py | 18 | #!/usr/bin/python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Wrapper for trychange.py for git checkout."""
import logging
import sys
import breakpad # pylint: disable=W0611
from scm import GIT
import subprocess2
import third_party.upload
import trychange
import git_cl
def GetRietveldIssueNumber():
try:
return GIT.Capture(
['config', 'branch.%s.rietveldissue' % GIT.GetBranch('.')],
'.').strip()
except subprocess2.CalledProcessError:
return None
def GetRietveldPatchsetNumber():
try:
return GIT.Capture(
['config', 'branch.%s.rietveldpatchset' % GIT.GetBranch('.')],
'.').strip()
except subprocess2.CalledProcessError:
return None
def GetRietveldServerUrl():
try:
return GIT.Capture(['config', 'rietveld.server'], '.').strip()
except subprocess2.CalledProcessError:
return None
if __name__ == '__main__':
args = sys.argv[1:]
patchset = GetRietveldPatchsetNumber()
if patchset:
args.extend([
'--issue', GetRietveldIssueNumber(),
'--patchset', patchset,
])
else:
rietveld_url = GetRietveldServerUrl()
if rietveld_url:
args.extend(['--rietveld_url', GetRietveldServerUrl()])
try:
cl = git_cl.Changelist()
change = cl.GetChange(cl.GetUpstreamBranch(), None)
# Hack around a limitation in logging.
logging.getLogger().handlers = []
sys.exit(trychange.TryChange(
args, change, swallow_exception=False,
prog='git try',
extra_epilog='\n'
'git try will diff against your tracked branch and will '
'detect your rietveld\n'
'code review if you are using git-cl\n'))
except third_party.upload.ClientLoginError, e:
print('Got an exception while trying to log in to Rietveld.')
print(str(e))
|
eddyb/servo | refs/heads/master | tests/wpt/web-platform-tests/tools/wptserve/wptserve/pipes.py | 38 | from cgi import escape
import gzip as gzip_module
import re
import time
import types
import uuid
from cStringIO import StringIO
def resolve_content(response):
return b"".join(item for item in response.iter_content(read_file=True))
class Pipeline(object):
pipes = {}
def __init__(self, pipe_string):
self.pipe_functions = self.parse(pipe_string)
def parse(self, pipe_string):
functions = []
for item in PipeTokenizer().tokenize(pipe_string):
if not item:
break
if item[0] == "function":
functions.append((self.pipes[item[1]], []))
elif item[0] == "argument":
functions[-1][1].append(item[1])
return functions
def __call__(self, request, response):
for func, args in self.pipe_functions:
response = func(request, response, *args)
return response
class PipeTokenizer(object):
def __init__(self):
#This whole class can likely be replaced by some regexps
self.state = None
def tokenize(self, string):
self.string = string
self.state = self.func_name_state
self._index = 0
while self.state:
yield self.state()
yield None
def get_char(self):
if self._index >= len(self.string):
return None
rv = self.string[self._index]
self._index += 1
return rv
def func_name_state(self):
rv = ""
while True:
char = self.get_char()
if char is None:
self.state = None
if rv:
return ("function", rv)
else:
return None
elif char == "(":
self.state = self.argument_state
return ("function", rv)
elif char == "|":
if rv:
return ("function", rv)
else:
rv += char
def argument_state(self):
rv = ""
while True:
char = self.get_char()
if char is None:
self.state = None
return ("argument", rv)
elif char == "\\":
rv += self.get_escape()
if rv is None:
#This should perhaps be an error instead
return ("argument", rv)
elif char == ",":
return ("argument", rv)
elif char == ")":
self.state = self.func_name_state
return ("argument", rv)
else:
rv += char
def get_escape(self):
char = self.get_char()
escapes = {"n": "\n",
"r": "\r",
"t": "\t"}
return escapes.get(char, char)
class pipe(object):
def __init__(self, *arg_converters):
self.arg_converters = arg_converters
self.max_args = len(self.arg_converters)
self.min_args = 0
opt_seen = False
for item in self.arg_converters:
if not opt_seen:
if isinstance(item, opt):
opt_seen = True
else:
self.min_args += 1
else:
if not isinstance(item, opt):
raise ValueError("Non-optional argument cannot follow optional argument")
def __call__(self, f):
def inner(request, response, *args):
if not (self.min_args <= len(args) <= self.max_args):
raise ValueError("Expected between %d and %d args, got %d" %
(self.min_args, self.max_args, len(args)))
arg_values = tuple(f(x) for f, x in zip(self.arg_converters, args))
return f(request, response, *arg_values)
Pipeline.pipes[f.__name__] = inner
#We actually want the undecorated function in the main namespace
return f
class opt(object):
def __init__(self, f):
self.f = f
def __call__(self, arg):
return self.f(arg)
def nullable(func):
def inner(arg):
if arg.lower() == "null":
return None
else:
return func(arg)
return inner
def boolean(arg):
if arg.lower() in ("true", "1"):
return True
elif arg.lower() in ("false", "0"):
return False
raise ValueError
@pipe(int)
def status(request, response, code):
"""Alter the status code.
:param code: Status code to use for the response."""
response.status = code
return response
@pipe(str, str, opt(boolean))
def header(request, response, name, value, append=False):
"""Set a HTTP header.
Replaces any existing HTTP header of the same name unless
append is set, in which case the header is appended without
replacement.
:param name: Name of the header to set.
:param value: Value to use for the header.
:param append: True if existing headers should not be replaced
"""
if not append:
response.headers.set(name, value)
else:
response.headers.append(name, value)
return response
@pipe(str)
def trickle(request, response, delays):
"""Send the response in parts, with time delays.
:param delays: A string of delays and amounts, in bytes, of the
response to send. Each component is separated by
a colon. Amounts in bytes are plain integers, whilst
delays are floats prefixed with a single d e.g.
d1:100:d2
Would cause a 1 second delay, would then send 100 bytes
of the file, and then cause a 2 second delay, before sending
the remainder of the file.
If the last token is of the form rN, instead of sending the
remainder of the file, the previous N instructions will be
repeated until the whole file has been sent e.g.
d1:100:d2:r2
Causes a delay of 1s, then 100 bytes to be sent, then a 2s delay
and then a further 100 bytes followed by a two second delay
until the response has been fully sent.
"""
def parse_delays():
parts = delays.split(":")
rv = []
for item in parts:
if item.startswith("d"):
item_type = "delay"
item = item[1:]
value = float(item)
elif item.startswith("r"):
item_type = "repeat"
value = int(item[1:])
if not value % 2 == 0:
raise ValueError
else:
item_type = "bytes"
value = int(item)
if len(rv) and rv[-1][0] == item_type:
rv[-1][1] += value
else:
rv.append((item_type, value))
return rv
delays = parse_delays()
if not delays:
return response
content = resolve_content(response)
offset = [0]
def add_content(delays, repeat=False):
for i, (item_type, value) in enumerate(delays):
if item_type == "bytes":
yield content[offset[0]:offset[0] + value]
offset[0] += value
elif item_type == "delay":
time.sleep(value)
elif item_type == "repeat":
if i != len(delays) - 1:
continue
while offset[0] < len(content):
for item in add_content(delays[-(value + 1):-1], True):
yield item
if not repeat and offset[0] < len(content):
yield content[offset[0]:]
response.content = add_content(delays)
return response
@pipe(nullable(int), opt(nullable(int)))
def slice(request, response, start, end=None):
"""Send a byte range of the response body
:param start: The starting offset. Follows python semantics including
negative numbers.
:param end: The ending offset, again with python semantics and None
(spelled "null" in a query string) to indicate the end of
the file.
"""
content = resolve_content(response)
response.content = content[start:end]
return response
class ReplacementTokenizer(object):
def ident(scanner, token):
return ("ident", token)
def index(scanner, token):
token = token[1:-1]
try:
token = int(token)
except ValueError:
token = unicode(token, "utf8")
return ("index", token)
def var(scanner, token):
token = token[:-1]
return ("var", token)
def tokenize(self, string):
return self.scanner.scan(string)[0]
scanner = re.Scanner([(r"\$\w+:", var),
(r"\$?\w+(?:\(\))?", ident),
(r"\[[^\]]*\]", index)])
class FirstWrapper(object):
def __init__(self, params):
self.params = params
def __getitem__(self, key):
try:
return self.params.first(key)
except KeyError:
return ""
@pipe(opt(nullable(str)))
def sub(request, response, escape_type="html"):
"""Substitute environment information about the server and request into the script.
:param escape_type: String detailing the type of escaping to use. Known values are
"html" and "none", with "html" the default for historic reasons.
The format is a very limited template language. Substitutions are
enclosed by {{ and }}. There are several avaliable substitutions:
host
A simple string value and represents the primary host from which the
tests are being run.
domains
A dictionary of available domains indexed by subdomain name.
ports
A dictionary of lists of ports indexed by protocol.
location
A dictionary of parts of the request URL. Valid keys are
'server, 'scheme', 'host', 'hostname', 'port', 'path' and 'query'.
'server' is scheme://host:port, 'host' is hostname:port, and query
includes the leading '?', but other delimiters are omitted.
headers
A dictionary of HTTP headers in the request.
GET
A dictionary of query parameters supplied with the request.
uuid()
A pesudo-random UUID suitable for usage with stash
So for example in a setup running on localhost with a www
subdomain and a http server on ports 80 and 81::
{{host}} => localhost
{{domains[www]}} => www.localhost
{{ports[http][1]}} => 81
It is also possible to assign a value to a variable name, which must start with
the $ character, using the ":" syntax e.g.
{{$id:uuid()}
Later substitutions in the same file may then refer to the variable
by name e.g.
{{$id}}
"""
content = resolve_content(response)
new_content = template(request, content, escape_type=escape_type)
response.content = new_content
return response
def template(request, content, escape_type="html"):
#TODO: There basically isn't any error handling here
tokenizer = ReplacementTokenizer()
variables = {}
def config_replacement(match):
content, = match.groups()
tokens = tokenizer.tokenize(content)
if tokens[0][0] == "var":
variable = tokens[0][1]
tokens = tokens[1:]
else:
variable = None
assert tokens[0][0] == "ident" and all(item[0] == "index" for item in tokens[1:]), tokens
field = tokens[0][1]
if field in variables:
value = variables[field]
elif field == "headers":
value = request.headers
elif field == "GET":
value = FirstWrapper(request.GET)
elif field in request.server.config:
value = request.server.config[tokens[0][1]]
elif field == "location":
value = {"server": "%s://%s:%s" % (request.url_parts.scheme,
request.url_parts.hostname,
request.url_parts.port),
"scheme": request.url_parts.scheme,
"host": "%s:%s" % (request.url_parts.hostname,
request.url_parts.port),
"hostname": request.url_parts.hostname,
"port": request.url_parts.port,
"path": request.url_parts.path,
"pathname": request.url_parts.path,
"query": "?%s" % request.url_parts.query}
elif field == "uuid()":
value = str(uuid.uuid4())
elif field == "url_base":
value = request.url_base
else:
raise Exception("Undefined template variable %s" % field)
for item in tokens[1:]:
value = value[item[1]]
assert isinstance(value, (int,) + types.StringTypes), tokens
if variable is not None:
variables[variable] = value
escape_func = {"html": lambda x:escape(x, quote=True),
"none": lambda x:x}[escape_type]
#Should possibly support escaping for other contexts e.g. script
#TODO: read the encoding of the response
return escape_func(unicode(value)).encode("utf-8")
template_regexp = re.compile(r"{{([^}]*)}}")
new_content = template_regexp.sub(config_replacement, content)
return new_content
@pipe()
def gzip(request, response):
"""This pipe gzip-encodes response data.
It sets (or overwrites) these HTTP headers:
Content-Encoding is set to gzip
Content-Length is set to the length of the compressed content
"""
content = resolve_content(response)
response.headers.set("Content-Encoding", "gzip")
out = StringIO()
with gzip_module.GzipFile(fileobj=out, mode="w") as f:
f.write(content)
response.content = out.getvalue()
response.headers.set("Content-Length", len(response.content))
return response
|
faywong/FFPlayer | refs/heads/trunk | project/jni/python/src/Demo/threads/Coroutine.py | 42 | # Coroutine implementation using Python threads.
#
# Combines ideas from Guido's Generator module, and from the coroutine
# features of Icon and Simula 67.
#
# To run a collection of functions as coroutines, you need to create
# a Coroutine object to control them:
# co = Coroutine()
# and then 'create' a subsidiary object for each function in the
# collection:
# cof1 = co.create(f1 [, arg1, arg2, ...]) # [] means optional,
# cof2 = co.create(f2 [, arg1, arg2, ...]) #... not list
# cof3 = co.create(f3 [, arg1, arg2, ...])
# etc. The functions need not be distinct; 'create'ing the same
# function multiple times gives you independent instances of the
# function.
#
# To start the coroutines running, use co.tran on one of the create'd
# functions; e.g., co.tran(cof2). The routine that first executes
# co.tran is called the "main coroutine". It's special in several
# respects: it existed before you created the Coroutine object; if any of
# the create'd coroutines exits (does a return, or suffers an unhandled
# exception), EarlyExit error is raised in the main coroutine; and the
# co.detach() method transfers control directly to the main coroutine
# (you can't use co.tran() for this because the main coroutine doesn't
# have a name ...).
#
# Coroutine objects support these methods:
#
# handle = .create(func [, arg1, arg2, ...])
# Creates a coroutine for an invocation of func(arg1, arg2, ...),
# and returns a handle ("name") for the coroutine so created. The
# handle can be used as the target in a subsequent .tran().
#
# .tran(target, data=None)
# Transfer control to the create'd coroutine "target", optionally
# passing it an arbitrary piece of data. To the coroutine A that does
# the .tran, .tran acts like an ordinary function call: another
# coroutine B can .tran back to it later, and if it does A's .tran
# returns the 'data' argument passed to B's tran. E.g.,
#
# in coroutine coA in coroutine coC in coroutine coB
# x = co.tran(coC) co.tran(coB) co.tran(coA,12)
# print x # 12
#
# The data-passing feature is taken from Icon, and greatly cuts
# the need to use global variables for inter-coroutine communication.
#
# .back( data=None )
# The same as .tran(invoker, data=None), where 'invoker' is the
# coroutine that most recently .tran'ed control to the coroutine
# doing the .back. This is akin to Icon's "&source".
#
# .detach( data=None )
# The same as .tran(main, data=None), where 'main' is the
# (unnameable!) coroutine that started it all. 'main' has all the
# rights of any other coroutine: upon receiving control, it can
# .tran to an arbitrary coroutine of its choosing, go .back to
# the .detach'er, or .kill the whole thing.
#
# .kill()
# Destroy all the coroutines, and return control to the main
# coroutine. None of the create'ed coroutines can be resumed after a
# .kill(). An EarlyExit exception does a .kill() automatically. It's
# a good idea to .kill() coroutines you're done with, since the
# current implementation consumes a thread for each coroutine that
# may be resumed.
import thread
import sync
class _CoEvent:
def __init__(self, func):
self.f = func
self.e = sync.event()
def __repr__(self):
if self.f is None:
return 'main coroutine'
else:
return 'coroutine for func ' + self.f.func_name
def __hash__(self):
return id(self)
def __cmp__(x,y):
return cmp(id(x), id(y))
def resume(self):
self.e.post()
def wait(self):
self.e.wait()
self.e.clear()
class Killed(Exception): pass
class EarlyExit(Exception): pass
class Coroutine:
def __init__(self):
self.active = self.main = _CoEvent(None)
self.invokedby = {self.main: None}
self.killed = 0
self.value = None
self.terminated_by = None
def create(self, func, *args):
me = _CoEvent(func)
self.invokedby[me] = None
thread.start_new_thread(self._start, (me,) + args)
return me
def _start(self, me, *args):
me.wait()
if not self.killed:
try:
try:
apply(me.f, args)
except Killed:
pass
finally:
if not self.killed:
self.terminated_by = me
self.kill()
def kill(self):
if self.killed:
raise TypeError, 'kill() called on dead coroutines'
self.killed = 1
for coroutine in self.invokedby.keys():
coroutine.resume()
def back(self, data=None):
return self.tran( self.invokedby[self.active], data )
def detach(self, data=None):
return self.tran( self.main, data )
def tran(self, target, data=None):
if not self.invokedby.has_key(target):
raise TypeError, '.tran target %r is not an active coroutine' % (target,)
if self.killed:
raise TypeError, '.tran target %r is killed' % (target,)
self.value = data
me = self.active
self.invokedby[target] = me
self.active = target
target.resume()
me.wait()
if self.killed:
if self.main is not me:
raise Killed
if self.terminated_by is not None:
raise EarlyExit, '%r terminated early' % (self.terminated_by,)
return self.value
# end of module
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.