code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
#
# THIS FILE IS PART OF THE JOKOSHER PROJECT AND LICENSED UNDER THE GPL. SEE
# THE 'COPYING' FILE FOR DETAILS
#
# EffectPresets.py
#
# This module implements support for effects presets. These presets are used
# to store settings for single effects and multiple effects strung together
# (called a 'chain').
#
# The way this works is that we have a LADSPA_FACTORY_REGISTRY filled with
# the system's LADSPA effects, LADSPA_NAME_MAP which amps LADSPA element
# factory names (Such as ladspa-delay-5s) to the effect name (such as
# Simple Delay) and self.effectpresetsregistry which contains a generated
# dictionary of effects. This dictionary is search with list comprehensions
# to get the relavent presets out.
#
#-------------------------------------------------------------------------------
import pygst
pygst.require("0.10")
import gst
import gobject
import xml.dom.minidom as xml
import os
import Globals
from Utils import *
import glob
import string
#=========================================================================
class EffectPresets(gobject.GObject):
"""
This class implements support for effects presets. These presets are used
to store settings for single effects and multiple effects strung together
(called a 'chain').
Signals:
"single-preset" -- The waveform date for this event has changed.
"chain-preset" -- The starting position of this event has changed.
"""
__gsignals__ = {
"single-preset" : ( gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, () ),
"chain-preset" : ( gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, () )
}
#_____________________________________________________________________
def __init__(self):
"""
Creates a new instance of EffectsPresets. If needed, it populates the
LADSPA and effect presets registries.
"""
gobject.GObject.__init__(self)
# Version of the preset files xml format
Globals.EFFECT_PRESETS_VERSION = "0.2"
"""
This is the main dictionary of presets. It has the following structure when filled:
effectpresetregistry[presetType][elementName][presetname][property]
where:
presetType = "instruments" or "effects"
elementName = unique ladspa or instrument name (i.e. ladspa-eq or guitar)
presetName = name of the preset (i.e. Chorus + Delay)
property = an specific preset property (i.e. dependencies or file)
*Note: all these 4 fields are dictionaries
Diagram:
effectpresetregistry
|
+--instruments
| |
| +--guitar
| | |
| | +--Chorus + Delay
| | | |
| | | +--instrument: guitar
| | | +--dependencies: ["effect1", "effect2"]
| | | +--file: guitar - Chorus + Delay.jpreset
| | |
| | +--Heavy Metal
| | |
| | +-- (...)
| |
| +--audiofile
| |
| +--Delay chamber
| | |
| | +-- (...)
| |
| +--Hum removal
| |
| +-- (...)
|
+--effects
|
+--ladspa-eq
| |
| +--Rock
| | |
| | +--dependencies: ["effect1", "effect2"]
| | +--file: ladspa-eq - Rock.jpreset
| |
| +--Jazz
| | |
| | +-- (...)
| |
| +--Pop
| |
| +-- (...)
|
+--ladspa-chorus
|
+--Full depth
| |
| +-- (...)
|
+--Bubbly dream
|
+-- (...)
"""
self.effectpresetregistry = {}
# string used to separate the preset type from its name when generating
# a preset filename
self.separator = " - "
# fill the different data structures with information if necessary. The LADSPA
# structures are part of Globals.py
if not Globals.LADSPA_NAME_MAP or not Globals.LADSPA_FACTORY_REGISTRY:
self.FillLADSPARegistry()
self.FillEffectsPresetsRegistry()
#_____________________________________________________________________
def SaveSingleEffect(self, label, effectdict, effectelement, effecttype):
"""
This method will write a single effect preset to a preset file.
Parameters:
label --the name of the effect.
effectdict -- the effect dictionary.
effectelement -- the effect that the user selected.
effecttype -- the type of the effect the user selected.
"""
self.effectelement = effectelement
self.effecttype = effecttype
if not Globals.EFFECT_PRESETS_PATH:
raise Exception("No preset save path specified!")
doc = xml.Document()
head = doc.createElement("JokosherPreset")
doc.appendChild(head)
head.setAttribute("version", Globals.EFFECT_PRESETS_VERSION)
effectblock = doc.createElement("Effect")
#effectblock.setAttribute("element", effectelement)
#effectblock.setAttribute("effectype", effecttype)
head.appendChild(effectblock)
paramsblock = doc.createElement("Parameters")
effectblock.appendChild(paramsblock)
paramslist = ["effectelement", "effecttype"]
StoreParametersToXML(self, doc, paramsblock, paramslist)
settingsblock = doc.createElement("Settings")
effectblock.appendChild(settingsblock)
StoreDictionaryToXML(doc, settingsblock, effectdict)
filename = self._PresetFilename(effectelement, label)
file = open(Globals.EFFECT_PRESETS_PATH + filename, "w")
file.write(doc.toprettyxml())
file.close()
self.emit("single-preset")
#_____________________________________________________________________
def SaveEffectChain(self, label, effectlist, instrumenttype):
"""
Write an effect chain to a preset file.
Parameters:
label -- the name of the effect.
effectlist -- the list of effects.
instrumenttype -- the type of instrument currently being used.
"""
self.effectelement = None
self.effecttype = None
if not Globals.EFFECT_PRESETS_PATH:
raise Exception("No effect chain preset save path specified!")
doc = xml.Document()
head = doc.createElement("JokosherPreset")
doc.appendChild(head)
head.setAttribute("version", Globals.EFFECT_PRESETS_VERSION)
# effect chain preset files have an extra <Chain> block which mainly
# serves to indicate which type of instrument the effect is for
chainblock = doc.createElement("Chain")
head.appendChild(chainblock)
chaindict = {}
chaindict["instrument"] = instrumenttype
StoreDictionaryToXML(doc, chainblock, chaindict)
# the structure of each <Effect> tag is not different from the single
# effect presets, there is just an <Effect> block for each effect in
# the chain
for effect in effectlist:
self.effectelement = effect["effectelement"]
self.effecttype = effect["effecttype"]
Globals.debug(self.effectelement)
effectblock = doc.createElement("Effect")
head.appendChild(effectblock)
paramsblock = doc.createElement("Parameters")
effectblock.appendChild(paramsblock)
paramslist = ["effectelement", "effecttype"]
StoreParametersToXML(self, doc, paramsblock, paramslist)
settingsblock = doc.createElement("Settings")
effectblock.appendChild(settingsblock)
StoreDictionaryToXML(doc, settingsblock, effect["settings"])
filename = self._PresetFilename(instrumenttype, label)
presetfile = open(os.path.realpath(Globals.EFFECT_PRESETS_PATH + filename), "w")
presetfile.write(doc.toprettyxml())
presetfile.close()
self.emit("chain-preset")
#_____________________________________________________________________
def LoadSingleEffect(self, presetName, effectelement):
"""
Load effect settings from a preset file for a single effect.
Parameters:
presetName -- the name of the preset to be loaded.
effectelement -- the effect element to be loaded.
Returns:
a settings dictionary with the loaded settings for the effect or
False if the preset file doesn't exist.
"""
filename = self._PresetFilename(effectelement, presetName)
presetfile = Globals.EFFECT_PRESETS_PATH + filename
Globals.debug(presetfile)
if not os.path.exists(presetfile):
Globals.debug("preset file does not exist")
return False
else:
xmlfile = open(presetfile, "r")
doc = xml.parse(presetfile)
settingstags = doc.getElementsByTagName('Effect')[0].getElementsByTagName('Settings')[0]
settdict = LoadDictionaryFromXML(settingstags)
return settdict
#____________________________________________________________________
def LoadEffectChain(self, presetName, instrType):
"""
Load settings from the preset file for an Instrument's effects chain.
Parameters:
presetName -- name of the preset to be loaded.
Returns:
a settings dictionary with the loaded settings for the effects.
"""
filename = self._PresetFilename(instrType, presetName)
presetfile = Globals.EFFECT_PRESETS_PATH + filename
if not os.path.exists(presetfile):
Globals.debug("preset file does not exist")
else:
xmlfile = open(presetfile, "r")
doc = xml.parse(presetfile)
settdict = {}
for effect in doc.getElementsByTagName('Effect'):
preftags = effect.getElementsByTagName('Parameters')[0]
prefs = LoadDictionaryFromXML(preftags)
settingstags = effect.getElementsByTagName('Settings')[0]
setts = LoadDictionaryFromXML(settingstags)
elementname = setts["name"]
settdict[str(elementname)] = {'preferences': prefs, 'settings': setts}
return settdict
#_____________________________________________________________________
def LoadSingleEffectList(self):
"""
TODO -- This method is not yet implemented.
"""
pass
#_____________________________________________________________________
def LoadEffectChainList(self):
"""
TODO -- This method is not yet implemented.
"""
pass
#_____________________________________________________________________
def DeleteSingleEffect(self, presetName, effectName):
"""
Removes a single effect preset.
Parameters:
presetName -- name of the preset to be removed.
effectName -- ladspa unique name of the effect the preset
belongs to.
"""
self._DeletePresetFile(self._PresetFilename(effectName, presetName))
self.emit("single-preset")
#_____________________________________________________________________
def DeleteEffectChain(self, presetName, instrType):
"""
Removes an effect chain preset.
Parameters:
presetName -- name of the preset to be removed.
instrType -- type of the Instrument the preset belongs to.
"""
self._DeletePresetFile(self._PresetFilename(instrType, presetName))
self.emit("chain-preset")
#_____________________________________________________________________
def _DeletePresetFile(self, filename):
"""
Removes a preset file.
Parameters:
filename -- name of the preset file to remove.
"""
presetFile = os.path.expanduser(Globals.EFFECT_PRESETS_PATH + filename)
if os.path.isfile(presetFile):
os.remove(presetFile)
#_____________________________________________________________________
def _PresetFilename(self, prefix, name):
"""
Creates the correct preset filename according to the parameters.
Examples:
PresetFilename("Guitar", "Soloist") will output:
"/Guitar %separator% Soloist.jpreset"
PresetFilename("ladspa-delay", "5ms deep delay") will output:
"/ladspa-delay %separator% 5ms deep delay.jpreset"
where %separator% is the separator string defined inside __init__
Parameters:
prefix -- unique ladspa shortname or instrType.
name -- name of the preset.
Returns:
a properly formatted preset filename string.
"""
return ("/%s%s%s.jpreset") % (prefix, self.separator, name)
#_____________________________________________________________________
def FillEffectsPresetsRegistry(self):
"""
Load all chain/effect presets into the main presets registry.
"""
Globals.debug("\tReading in presets...")
presetsfiles = glob.glob(Globals.EFFECT_PRESETS_PATH + "/*.jpreset")
self.effectpresetregistry = {}
self.effectpresetregistry["instruments"] = {}
self.effectpresetregistry["effects"] = {}
for file_ in presetsfiles:
preset = {}
depslist = []
presetname = None
effectName = None
if not os.path.exists(file_):
Globals.debug("preset file does not exist")
else:
xmlfile = open(file_, "r")
doc = xml.parse(file_)
# True if the loaded preset corresponds to an effect chain, False otherwise
isChain = None
try:
instrument = doc.getElementsByTagName('Chain')[0].getElementsByTagName('instrument')[0].getAttribute('value')
isChain = True
except:
instrument = None
for effect in doc.getElementsByTagName("Effect"):
paramtags = effect.getElementsByTagName("Parameters")[0]
for node in paramtags.childNodes:
if node.nodeType == xml.Node.ELEMENT_NODE:
if node.getAttribute("type") == "int":
pass
elif node.getAttribute("type") == "float":
pass
else:
if node.tagName == "effectelement":
depslist.append(str(node.getAttribute("value")))
effectName = str(node.getAttribute("value"))
presetname = file_.replace(str(Globals.EFFECT_PRESETS_PATH + "/"), "")
presetfile = presetname
# extract the preset name from the prefix
presetname = presetname.split(self.separator, 1)
if len(presetname) == 1:
# the filename doesn't have a prefix. Could be an old or non-compliant file
# TODO: should upgrade the filename or it won't load
presetname = presetname[0]
else:
presetname = presetname[1]
presetname = presetname.replace(".jpreset", "")
preset["dependencies"] = set(depslist)
preset["file"] = str(presetfile)
if isChain:
preset["instrument"] = str(instrument)
presetType = "instruments"
elementName = instrument
else:
presetType = "effects"
elementName = effectName
# create the elementName dir if it doesn't exist
try:
self.effectpresetregistry[presetType][elementName]
except KeyError:
self.effectpresetregistry[presetType][elementName] = {}
self.effectpresetregistry[presetType][elementName][presetname] = preset
Globals.debug("\t...done.")
#_____________________________________________________________________
def FillLADSPARegistry(self):
"""
Fill Globals.LADSPA_FACTORY_REGISTRY with effects on the system. This
is to ensure that only presets with effects on the current system are listed.
"""
Globals.debug("Filling LADSPA Registry")
##make sure all the structures are empty before we append to them
Globals.LADSPA_NAME_MAP=[]
Globals.LADSPA_FACTORY_REGISTRY = None
effects = []
ladspaFactoryList = gst.registry_get_default().get_feature_list_by_plugin("ladspa")
for factory in ladspaFactoryList:
if isinstance(factory, gst.ElementFactory):
# from the list of LADSPA effects we check which ones only
# have a single sink and a single src so we know they work
if factory.get_num_pad_templates() == 2:
pads = factory.get_static_pad_templates()
sinkpads = len( [pad for pad in pads if pad.direction == gst.PAD_SINK] )
srcpads = len( [pad for pad in pads if pad.direction == gst.PAD_SRC] )
if srcpads == 1 and sinkpads == 1:
effects.append(factory.get_name())
Globals.LADSPA_NAME_MAP.append((factory.get_name(), factory.get_longname()))
Globals.debug("\t", len(effects), "LADSPA effects loaded")
Globals.LADSPA_FACTORY_REGISTRY = set(effects)
#_____________________________________________________________________
#=========================================================================
|
mjumbewu/jokosher
|
Jokosher/EffectPresets.py
|
Python
|
gpl-2.0
| 15,541
|
#! /usr/bin/env python
# Decompress a jpeg or jpeggrey video file to rgb format
# Usage:
#
# Vunjpeg [infile [outfile]]
# Options:
#
# infile : input file (default film.video)
# outfile : output file (default out.video)
import sys
import jpeg
sys.path.append('/ufs/guido/src/video')
import VFile
# Main program -- mostly command line parsing
def main():
args = sys.argv[1:]
if len(args) < 1:
args.append('film.video')
if len(args) < 2:
args.append('out.video')
if len(args) > 2:
sys.stderr.write('usage: Vunjpeg [infile [outfile]]\n')
sys.exit(2)
sts = process(args[0], args[1])
sys.exit(sts)
# Copy one file to another
def process(infilename, outfilename):
try:
vin = VFile.BasicVinFile(infilename)
except IOError, msg:
sys.stderr.write(infilename + ': I/O error: ' + `msg` + '\n')
return 1
except VFile.Error, msg:
sys.stderr.write(msg + '\n')
return 1
except EOFError:
sys.stderr.write(infilename + ': EOF in video file\n')
return 1
try:
vout = VFile.BasicVoutFile(outfilename)
except IOError, msg:
sys.stderr.write(outfilename + ': I/O error: ' + `msg` + '\n')
return 1
info = vin.getinfo()
if info[0] == 'jpeg':
format = 'rgb'
width, height = vin.getsize()
bytes = 4
elif info[0] == 'jpeggrey':
format = 'grey'
width, height = vin.getsize()
pf = vin.packfactor
width, height = width/pf, height/pf
bytes = 1
else:
sys.stderr.write('Vunjpeg: input not in jpeg[grey] format\n')
return 1
info = (format,) + info[1:]
vout.setinfo(info)
vout.writeheader()
sts = 0
n = 0
try:
while 1:
t, data, cdata = vin.getnextframe()
n = n + 1
sys.stderr.write('Frame ' + `n` + '...')
data, w, h, b = jpeg.decompress(data)
if (w, h, b) <> (width, height, bytes):
sys.stderr.write('jpeg data has wrong size\n')
sts = 1
else:
vout.writeframe(t, data, None)
sys.stderr.write('\n')
except EOFError:
pass
return sts
# Don't forget to call the main program
main()
|
sensysnetworks/uClinux
|
user/python/Demo/sgi/video/Vunjpeg.py
|
Python
|
gpl-2.0
| 1,978
|
{ "fedora_rpm":
{
'(_perl-5.10.0-62.17.1_, _Fedora release 11 (Leonidas)_)':dict(
setProductKey=('perl-5.10.0-62.17.1', 'Fedora release 11 (Leonidas)'),
setDescription="The Perl interpreter",
setInstallDate="2009/6/9",
),
}
}
|
zenoss/ZenPacks.community.Fedora
|
ZenPacks/community/Fedora/tests/plugindata/linux/server1/fedora_rpm.py
|
Python
|
gpl-2.0
| 262
|
# We import importlib *ASAP* in order to test #15386
import importlib
from test.libregrtest.cmdline import _parse_args, RESOURCE_NAMES, ALL_RESOURCES
from test.libregrtest.main import main
|
FFMG/myoddweb.piger
|
monitor/api/python/Python-3.7.2/Lib/test/libregrtest/__init__.py
|
Python
|
gpl-2.0
| 190
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2006 Donald N. Allingham
# Copyright (C) 2007-2009 Brian G. Matherly
# Copyright (C) 2009-2010 Benny Malengier <benny.malengier@gramps-project.org>
# Copyright (C) 2010 Peter Landgren
# Copyright (C) 2010 Tim Lyons
# Copyright (C) 2011 Adam Stein <adam@csh.rit.edu>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Report output generator for html documents, based on Html and HtmlBackend
"""
#------------------------------------------------------------------------
#
# Python modules
#
#------------------------------------------------------------------------
import os
import shutil
import logging
#------------------------------------------------------------------------
#
# Gramps modules
#
#------------------------------------------------------------------------
from gramps.gen.utils.image import resize_to_jpeg
from gramps.gen.const import DATA_DIR, IMAGE_DIR, PROGRAM_NAME, URL_HOMEPAGE
from gramps.gen.errors import ReportError
from gramps.version import VERSION
from gramps.gen.plug.docgen import BaseDoc, TextDoc, URL_PATTERN
from gramps.plugins.lib.libhtmlbackend import HtmlBackend, process_spaces
from gramps.plugins.lib.libhtml import Html
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#------------------------------------------------------------------------
#
# Set up logging
#
#------------------------------------------------------------------------
LOG = logging.getLogger(".htmldoc")
_TEXTDOCSCREEN = 'grampstextdoc.css'
_HTMLSCREEN = 'grampshtml.css'
#------------------------------------------------------------------------
#
# Set up to make links clickable
#
#------------------------------------------------------------------------
_CLICKABLE = r'''<a href="\1">\1</a>'''
#------------------------------------------------------------------------
#
# HtmlDoc
#
#------------------------------------------------------------------------
class HtmlDoc(BaseDoc, TextDoc):
"""Implementation of the BaseDoc and TextDoc gen.plug.docgen api for the
creation of Html files. This is achieved by writing on a HtmlBackend
object
div id's defined here:
id="grampstextdoc" : the entire text report
id="grampsheading" : a small defined heading, but not h1 to h6 !
id="grampsstylednote" : start of part with a styled note, divided in
paragraphs
id="grampsnote" : start of part with a note. This id is normally not
used
The styles as defined in the stylesheed of the textdoc, will be converted
to css class. Color is removed to avoid conflicts with the css. Also
Fontface is removed. Size, italic, bold, margins, borders are retained
"""
def __init__(self, styles, paper_style, uistate=None):
BaseDoc.__init__(self, styles, None, uistate=uistate)
self.style_declaration = ''
self.htmllist = []
self._backend = None
self.css_filename = ''
self.warn_dir = True
self._col = 0
self._tbl = None
self._empty = 1
self.title = ''
self.__title_written = -1 # -1 = not written, 0 = writing, 1 = written
self.__link_attrs = {} # additional link attrs, eg {"style": "...", "class": "..."}
self.use_table_headers = False # th, td
self.first_row = True
def set_css_filename(self, css_filename):
"""
Set the css file to use. The path must be included.
Note: DocReportDialog sets this for html doc
"""
if css_filename and os.path.basename(css_filename):
self.css_filename = css_filename
else:
self.css_filename = ''
def open(self, filename):
"""
Overwrite base method
"""
self._backend = HtmlBackend(filename)
self._backend.open()
self.htmllist += [self._backend.html_body]
#start a gramps report
self.htmllist += [Html('div', id="grampstextdoc")]
self.build_header()
def build_header(self):
"""
Build up the header of the html file over the defaults of Html()
"""
# add additional meta tags and stylesheet links to head section
# create additional meta tags
_meta1 = 'name="generator" content="%s %s %s"' % (
PROGRAM_NAME, VERSION, URL_HOMEPAGE)
meta = Html('meta', attr=_meta1)
#set styles of the report as inline css
self.build_style_declaration()
# Gramps favicon en css
fname1 = '/'.join([self._backend.datadir(), 'favicon.ico'])
fname2 = '/'.join([self._backend.datadir(), _TEXTDOCSCREEN])
fname3 = '/'.join([self._backend.datadir(), _HTMLSCREEN])
# links for Gramps favicon and stylesheets
links = Html('link', rel='shortcut icon', href=fname1,
type='image/x-icon') + (
Html('link', rel='stylesheet', href=fname2,
type='text/css', media='screen', indent=False),)
if self.css_filename:
links += (Html('link', rel='stylesheet', href=fname3,
type='text/css', media='screen', indent=False),)
self._backend.html_header += (meta, links)
def build_style_declaration(self, id="grampstextdoc"):
"""
Convert the styles of the report into inline css for the html doc
"""
styles = self.get_style_sheet()
text = []
for sname in sorted(styles.get_cell_style_names()):
style = styles.get_cell_style(sname)
pad = "%.3fcm" % style.get_padding()
top = bottom = left = right = 'none'
if style.get_top_border():
top = 'thin solid #000000'
if style.get_bottom_border():
bottom = 'thin solid #000000'
if style.get_left_border():
left = 'thin solid #000000'
if style.get_right_border():
right = 'thin solid #000000'
text.append('#%s .%s {\n'
'\tpadding: %s %s %s %s;\n'
'\tborder-top:%s; border-bottom:%s;\n'
'\tborder-left:%s; border-right:%s;\n}'
% (id, sname, pad, pad, pad, pad, top, bottom,
left, right))
for style_name in sorted(styles.get_paragraph_style_names()):
style = styles.get_paragraph_style(style_name)
font = style.get_font()
font_size = font.get_size()
#font_color = '#%02x%02x%02x' % font.get_color()
align = style.get_alignment_text()
text_indent = "%.2f" % style.get_first_indent()
right_margin = "%.2f" % style.get_right_margin()
left_margin = "%.2f" % style.get_left_margin()
top_margin = "%.2f" % style.get_top_margin()
bottom_margin = "%.2f" % style.get_bottom_margin()
top = bottom = left = right = 'none'
if style.get_top_border():
top = 'thin solid #000000'
if style.get_bottom_border():
bottom = 'thin solid #000000'
if style.get_left_border():
left = 'thin solid #000000'
if style.get_right_border():
right = 'thin solid #000000'
italic = bold = ''
if font.get_italic():
italic = 'font-style:italic; '
if font.get_bold():
bold = 'font-weight:bold; '
#if font.get_type_face() == FONT_SANS_SERIF:
# family = '"Helvetica","Arial","sans-serif"'
#else:
# family = '"Times New Roman","Times","serif"'
# do not allow color, set in base css !
# so no : 'color: %s' % font_color
# so no : 'font-family:%s;' % family
text.append('#%s .%s {\n'
'\tfont-size: %dpt;\n'
'\ttext-align: %s; text-indent: %scm;\n'
'\tmargin-right: %scm; margin-left: %scm;\n'
'\tmargin-top: %scm; margin-bottom: %scm;\n'
'\tborder-top:%s; border-bottom:%s;\n'
'\tborder-left:%s; border-right:%s;\n'
'\t%s%s\n}'
% (id, style_name, font_size,
align, text_indent,
right_margin, left_margin,
top_margin, bottom_margin,
top, bottom, left, right,
italic, bold))
self.style_declaration = '\n'.join(text)
def close(self):
"""
Overwrite base method
"""
while len(self.htmllist) > 1:
self.__reduce_list()
#now write the actual file
self._backend.close()
self.write_support_files()
def copy_file(self, from_fname, to_fname, to_dir=''):
"""
Copy a file from a source to a (report) destination. If to_dir is not
present, then the destination directory will be created.
Normally 'to_fname' will be just a filename, without directory path.
'to_dir' is the relative path name in the destination root. It will
be prepended before 'to_fname'.
"""
#build absolute path
dest = os.path.join(self._backend.datadirfull(), to_dir, to_fname)
destdir = os.path.dirname(dest)
if not os.path.isdir(destdir):
os.makedirs(destdir)
if from_fname != dest:
shutil.copyfile(from_fname, dest)
elif self.warn_dir:
raise ReportError(
_("Possible destination error"),
_("You appear to have set your target directory "
"to a directory used for data storage. This "
"could create problems with file management. "
"It is recommended that you consider using "
"a different directory to store your generated "
"web pages."))
self.warn_dir = False
def write_support_files(self):
"""
Copy support files to the datadir that needs to hold them
"""
#css of textdoc styles
with open(os.path.join(self._backend.datadirfull(),
_TEXTDOCSCREEN), 'w') as tdfile:
tdfile.write(self.style_declaration)
#css file
if self.css_filename:
#we do an extra check in case file does not exist, eg cli call
fullpath = os.path.join(DATA_DIR, self.css_filename)
if os.path.exists(fullpath):
self.copy_file(fullpath, _HTMLSCREEN)
#favicon
self.copy_file(os.path.join(IMAGE_DIR, 'webstuff', 'favicon.ico'),
'favicon.ico')
def __reduce_list(self):
"""
Takes the internal list of html objects, and adds the last to the
previous. This closes the upper tag
"""
self.htmllist[-2] += self.htmllist[-1]
self.htmllist.pop()
def __write_text(self, text, mark=None, markup=False, links=False):
"""
@param text: text to write.
@param mark: IndexMark to use for indexing (not supported)
@param markup: True if text already contains markup info.
Then text will no longer be escaped
@param links: make URLs clickable if True
"""
if not markup:
text = self._backend.ESCAPE_FUNC()(text)
if self.__title_written == 0:
self.title += text
if links is True:
import re
text = re.sub(URL_PATTERN, _CLICKABLE, text)
self.htmllist[-1] += text
def __empty_char(self):
"""
Output a non breaking whitespace so as to have browser behave ok on
empty content
"""
self.__write_text(' ', markup=True)
def write_text(self, text, mark=None, links=False):
"""
Overwrite base method
"""
if text != "":
self._empty = 0
self.__write_text(text, mark, links=links)
def write_title(self):
"""
Add title field to header
"""
self._backend.html_header += Html('title', self.title,
inline=True)
def start_table(self, name, style):
"""
Overwrite base method
"""
self.first_row = True
styles = self.get_style_sheet()
self._tbl = styles.get_table_style(style)
self.htmllist += [Html('table', width=str(self._tbl.get_width())+'%',
cellspacing='0')]
def end_table(self):
"""
Overwrite base method
"""
self.__reduce_list()
def start_row(self):
"""
Overwrite base method
"""
self.htmllist += [Html('tr')]
self._col = 0
def end_row(self):
"""
Overwrite base method
"""
self.first_row = False
self.__reduce_list()
def start_cell(self, style_name, span=1):
"""
Overwrite base method
"""
if self.use_table_headers and self.first_row:
tag = "th"
else:
tag = "td"
self._empty = 1
if span > 1:
self.htmllist += (Html(tag, colspan=str(span), class_=style_name),)
self._col += span
else:
self.htmllist += (Html(tag, colspan=str(span),
width=str(self._tbl.get_column_width(
self._col))+ '%',
class_=style_name),)
self._col += 1
def end_cell(self):
"""
Overwrite base method
"""
self.__reduce_list()
def start_paragraph(self, style_name, leader=None):
"""
Overwrite base method
"""
style_sheet = self.get_style_sheet()
style = style_sheet.get_paragraph_style(style_name)
level = style.get_header_level()
if level == 0:
#a normal paragraph
self.htmllist += (Html('p', class_=style_name, inline=True),)
elif level == 1:
if self.__title_written == -1 and \
style_name.upper().find('TITLE') != -1:
self.__title_written = 0
self.htmllist += (Html('div', id="header"),)
self.htmllist += (Html('h1', class_=style_name, id='SiteTitle',
inline=True),)
else:
self.htmllist += (Html('h1', class_=style_name, inline=True),)
elif 2 <= level <= 5:
tag = 'h'+str(level+1)
self.htmllist += (Html(tag, class_=style_name, inline=True),)
else:
# a low level header
self.htmllist += (Html('div', id='grampsheading',
class_=style_name),)
if leader is not None:
self.write_text(leader+' ')
def end_paragraph(self):
"""
Overwrite base method
"""
if self._empty == 1:
self.__empty_char()
self._empty = 0
self.__reduce_list()
if self.__title_written == 0:
self.__title_written = 1
#close div statement
self.__reduce_list()
self.write_title()
def start_bold(self):
"""
Overwrite base method
"""
self.htmllist += [Html('strong')]
def end_bold(self):
"""
Overwrite base method
"""
self.__reduce_list()
def start_superscript(self):
"""
Overwrite base method
"""
self.htmllist += [Html('sup')]
def end_superscript(self):
"""
Overwrite base method
"""
self.__reduce_list()
def write_styled_note(self, styledtext, format, style_name,
contains_html=False, links=False):
"""
Convenience function to write a styledtext to the html doc.
styledtext : assumed a StyledText object to write
format : = 0 : Flowed, = 1 : Preformatted
style_name : name of the style to use for default presentation
contains_html: bool, the backend should not check if html is present.
If contains_html=True, then the textdoc is free to handle that in
some way. Eg, a textdoc could remove all tags, or could make sure
a link is clickable. HtmlDoc will show the html as pure text, so
no escaping will happen.
links: bool, make URLs clickable if True
"""
text = str(styledtext)
self.htmllist += [Html('div', id='grampsstylednote')]
if contains_html:
#just dump the note out as it is. Adding markup would be dangerous
# as it could destroy the html. If html code, one can do the
self.start_paragraph(style_name)
self.__write_text(text, markup=True, links=links)
self.end_paragraph()
else:
s_tags = styledtext.get_tags()
markuptext = self._backend.add_markup_from_styled(text, s_tags,
split='\n')
self.start_paragraph(style_name)
inpara = True
self._empty = 1 # para is empty
# we explicitly set _empty because start and end para do not seem
# to do a very good job at setting them
linenb = 1
# The code is tricky here, because we don't want to start a new para
# at the end of the last line if there is no newline there.
# Instead, we want to just end the current para.
for line in markuptext.split('\n'):
[line, sigcount] = process_spaces(line, format)
if sigcount == 0:
if inpara is False:
# needed for runs of three or more newlines
self.start_paragraph(style_name)
inpara = True
self._empty = 1 # para is empty
self.end_paragraph()
inpara = False
linenb = 1
else:
if inpara is False:
self.start_paragraph(style_name)
inpara = True
self._empty = 1 # para is empty
if linenb > 1:
self.htmllist[-1] += Html('br')
self.__write_text(line, markup=True, links=links)
self._empty = 0 # para is not empty
linenb += 1
if inpara is True:
self.end_paragraph()
if sigcount == 0:
# if the last line was blank, then as well as outputting the
# previous para, which we have just done, we also output a new
# blank para
self.start_paragraph(style_name)
self._empty = 1 # para is empty
self.end_paragraph()
#end div element
self.__reduce_list()
def add_media(self, name, pos, w_cm, h_cm, alt='', style_name=None,
crop=None):
"""
Overwrite base method
"""
self._empty = 0
size = int(max(w_cm, h_cm) * float(150.0/2.54))
refname = "is%s" % os.path.basename(name)
imdir = self._backend.datadirfull()
try:
resize_to_jpeg(name, imdir + os.sep + refname, size, size,
crop=crop)
except:
LOG.warning(_("Could not create jpeg version of image %(name)s"),
name)
return
if len(alt):
alt = '<br />'.join(alt)
if pos not in ["right", "left"]:
if len(alt):
self.htmllist[-1] += Html('div') + (
Html('img', src=imdir + os.sep + refname,
border='0', alt=alt),
Html('p', class_="DDR-Caption") + alt
)
else:
self.htmllist[-1] += Html('img', src=imdir + os.sep + refname,
border='0', alt=alt)
else:
if len(alt):
self.htmllist[-1] += Html(
'div', style_="float: %s; padding: 5px; margin: 0;" % pos
) + (Html('img', src=imdir + os.sep + refname,
border='0', alt=alt),
Html('p', class_="DDR-Caption") + alt)
else:
self.htmllist[-1] += Html('img', src=imdir + os.sep + refname,
border='0', alt=alt, align=pos)
def page_break(self):
"""
overwrite base method so page break has no effect
"""
pass
def start_link(self, link):
"""
Starts a section to add a link. Link is a URI.
"""
self.htmllist += [Html('a', href=link, **self.__link_attrs)]
def stop_link(self):
"""
Stop a section of a link.
"""
self.__reduce_list()
def start_underline(self):
"""
Starts a section of underlining.
"""
self.htmllist += [Html('u')]
def stop_underline(self):
"""
Stop underlining.
"""
self.__reduce_list()
def set_link_attrs(self, attrs):
"""
Set some a attributes/values. attrs is a dictionary, eg
{"style": "...", "class": "..."}
"""
self.__link_attrs = attrs
|
jralls/gramps
|
gramps/plugins/docgen/htmldoc.py
|
Python
|
gpl-2.0
| 22,595
|
PLUGIN_NAME = 'Release Type'
PLUGIN_AUTHOR = 'Elliot Chance'
PLUGIN_DESCRIPTION = 'Appends information to EPs and Singles'
PLUGIN_VERSION = '1.2'
PLUGIN_API_VERSIONS = ["0.9.0", "0.10", "0.15"]
from picard.metadata import register_album_metadata_processor
import re
#==================
# options
#==================
_SINGLE = " (single)"
_EP = " EP"
def add_release_type(tagger, metadata, release):
# make sure "EP" (or "single", ...) is not already a word in the name
words = metadata["album"].lower().split(" ")
for word in ["ep", "e.p.", "single", "(single)"]:
if word in words:
return
# check release type
if metadata["releasetype"] == "ep":
rs = _EP;
elif metadata["releasetype"] == "single":
rs = _SINGLE;
else:
rs = ""
# append title
metadata["album"] = metadata["album"] + rs
register_album_metadata_processor(add_release_type)
|
lalinsky/picard-debian
|
contrib/plugins/release_type.py
|
Python
|
gpl-2.0
| 884
|
#!/usr/bin/env python
"""
Show how to make date plots in matplotlib using date tick locators and
formatters. See major_minor_demo1.py for more information on
controlling major and minor ticks
"""
import datetime
from pylab import figure, show
from matplotlib.dates import MONDAY, SATURDAY
from matplotlib.finance import quotes_historical_yahoo
from matplotlib.dates import MonthLocator, WeekdayLocator, DateFormatter
date1 = datetime.date( 2002, 1, 5 )
date2 = datetime.date( 2003, 12, 1 )
# every monday
mondays = WeekdayLocator(MONDAY)
# every 3rd month
months = MonthLocator(range(1,13), bymonthday=1, interval=3)
monthsFmt = DateFormatter("%b '%y")
quotes = quotes_historical_yahoo('INTC', date1, date2)
if len(quotes) == 0:
print 'Found no quotes'
raise SystemExit
dates = [q[0] for q in quotes]
opens = [q[1] for q in quotes]
fig = figure()
ax = fig.add_subplot(111)
ax.plot_date(dates, opens, '-')
ax.xaxis.set_major_locator(months)
ax.xaxis.set_major_formatter(monthsFmt)
ax.xaxis.set_minor_locator(mondays)
ax.autoscale_view()
#ax.xaxis.grid(False, 'major')
#ax.xaxis.grid(True, 'minor')
ax.grid(True)
fig.autofmt_xdate()
show()
|
ryandougherty/mwa-capstone
|
MWA_Tools/build/matplotlib/lib/mpl_examples/pylab_examples/date_demo2.py
|
Python
|
gpl-2.0
| 1,162
|
##
# Copyright 2014 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing Go, implemented as an easyblock
@author: Adam DeConinck (NVIDIA)
@author: Kenneth Hoste (HPC-UGent)
"""
import os
import shutil
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.tools.filetools import rmtree2, run_cmd
class EB_Go(ConfigureMake):
"""
Build Go compiler
"""
def configure_step(self):
"""No dedicated configure step."""
pass
def build_step(self):
"""No dedicated build step, building & installing is done in one go."""
pass
def install_step(self):
"""
Execute the all.bash script to build and install the Go compiler,
specifying the final installation prefix by setting $GOROOT_FINAL.
"""
srcdir = os.path.join(self.cfg['start_dir'], 'src')
try:
os.chdir(srcdir)
except OSError, err:
self.log.error("Failed to move to %s: %s" % (srcdir, err))
# $GOROOT_FINAL only specifies the location of the final installation, which gets baked into the binaries
# the installation itself is *not* done by the all.bash script, that needs to be done manually
cmd = "GOROOT_FINAL=%s ./all.bash" % self.installdir
run_cmd(cmd, log_all=True, simple=False)
try:
rmtree2(self.installdir)
shutil.copytree(self.cfg['start_dir'], self.installdir, symlinks=self.cfg['keepsymlinks'])
except OSError, err:
self.log.error("Failed to copy installation to %s: %s" % (self.installdir, err))
|
geimer/easybuild-easyblocks
|
easybuild/easyblocks/g/go.py
|
Python
|
gpl-2.0
| 2,638
|
#!/usr/bin/env python
x = 0
print 'x is currently:', x
while x < 5:
x += 1
print 'incrementing x to:', x
|
opensvn/test
|
src/study/python/cpp/ch14/alt/xcount.py
|
Python
|
gpl-2.0
| 114
|
##
# Copyright 2012-2015 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
Unit tests for easyblock.py
@author: Jens Timmerman (Ghent University)
@author: Kenneth Hoste (Ghent University)
"""
import os
import re
import shutil
import sys
import tempfile
from test.framework.utilities import EnhancedTestCase, init_config
from unittest import TestLoader, main
from easybuild.framework.easyblock import EasyBlock, get_easyblock_instance
from easybuild.framework.easyconfig import CUSTOM
from easybuild.framework.easyconfig.easyconfig import EasyConfig
from easybuild.framework.easyconfig.tools import process_easyconfig
from easybuild.framework.extensioneasyblock import ExtensionEasyBlock
from easybuild.tools import config
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import mkdir, read_file, write_file
from easybuild.tools.modules import modules_tool
class EasyBlockTest(EnhancedTestCase):
""" Baseclass for easyblock testcases """
def writeEC(self):
""" create temporary easyconfig file """
write_file(self.eb_file, self.contents)
def setUp(self):
""" setup """
super(EasyBlockTest, self).setUp()
fd, self.eb_file = tempfile.mkstemp(prefix='easyblock_test_file_', suffix='.eb')
os.close(fd)
self.orig_tmp_logdir = os.environ.get('EASYBUILD_TMP_LOGDIR', None)
self.test_tmp_logdir = tempfile.mkdtemp()
os.environ['EASYBUILD_TMP_LOGDIR'] = self.test_tmp_logdir
def test_empty(self):
self.contents = "# empty"
self.writeEC()
""" empty files should not parse! """
self.assertRaises(EasyBuildError, EasyConfig, self.eb_file)
self.assertErrorRegex(EasyBuildError, "Value of incorrect type passed", EasyBlock, "")
def test_easyblock(self):
""" make sure easyconfigs defining extensions work"""
def check_extra_options_format(extra_options):
"""Make sure extra_options value is of correct format."""
# EasyBuild v2.0: dict with <string> keys and <list> values
# (breaks backward compatibility compared to v1.x)
self.assertTrue(isinstance(extra_options, dict)) # conversion to a dict works
extra_options.items()
extra_options.keys()
extra_options.values()
for key in extra_options.keys():
self.assertTrue(isinstance(extra_options[key], list))
self.assertTrue(len(extra_options[key]), 3)
name = "pi"
version = "3.14"
self.contents = '\n'.join([
'easyblock = "ConfigureMake"',
'name = "%s"' % name,
'version = "%s"' % version,
'homepage = "http://example.com"',
'description = "test easyconfig"',
'toolchain = {"name":"dummy", "version": "dummy"}',
'exts_list = ["ext1"]',
])
self.writeEC()
stdoutorig = sys.stdout
sys.stdout = open("/dev/null", 'w')
ec = EasyConfig(self.eb_file)
eb = EasyBlock(ec)
self.assertEqual(eb.cfg['name'], name)
self.assertEqual(eb.cfg['version'], version)
self.assertRaises(NotImplementedError, eb.run_all_steps, True)
check_extra_options_format(eb.extra_options())
sys.stdout.close()
sys.stdout = stdoutorig
# check whether 'This is easyblock' log message is there
tup = ('EasyBlock', 'easybuild.framework.easyblock', '.*easybuild/framework/easyblock.pyc*')
eb_log_msg_re = re.compile(r"INFO This is easyblock %s from module %s (%s)" % tup, re.M)
logtxt = read_file(eb.logfile)
self.assertTrue(eb_log_msg_re.search(logtxt), "Pattern '%s' found in: %s" % (eb_log_msg_re.pattern, logtxt))
# test extensioneasyblock, as extension
exeb1 = ExtensionEasyBlock(eb, {'name': 'foo', 'version': '0.0'})
self.assertEqual(exeb1.cfg['name'], 'foo')
extra_options = exeb1.extra_options()
check_extra_options_format(extra_options)
self.assertTrue('options' in extra_options)
# test extensioneasyblock, as easyblock
exeb2 = ExtensionEasyBlock(ec)
self.assertEqual(exeb2.cfg['name'], 'pi')
self.assertEqual(exeb2.cfg['version'], '3.14')
extra_options = exeb2.extra_options()
check_extra_options_format(extra_options)
self.assertTrue('options' in extra_options)
class TestExtension(ExtensionEasyBlock):
@staticmethod
def extra_options():
return ExtensionEasyBlock.extra_options({'extra_param': [None, "help", CUSTOM]})
texeb = TestExtension(eb, {'name': 'bar'})
self.assertEqual(texeb.cfg['name'], 'bar')
extra_options = texeb.extra_options()
check_extra_options_format(extra_options)
self.assertTrue('options' in extra_options)
self.assertEqual(extra_options['extra_param'], [None, "help", CUSTOM])
# cleanup
eb.close_log()
os.remove(eb.logfile)
def test_fake_module_load(self):
"""Testcase for fake module load"""
self.contents = '\n'.join([
'easyblock = "ConfigureMake"',
'name = "pi"',
'version = "3.14"',
'homepage = "http://example.com"',
'description = "test easyconfig"',
'toolchain = {"name": "dummy", "version": "dummy"}',
])
self.writeEC()
eb = EasyBlock(EasyConfig(self.eb_file))
eb.installdir = config.build_path()
fake_mod_data = eb.load_fake_module()
eb.clean_up_fake_module(fake_mod_data)
# cleanup
eb.close_log()
os.remove(eb.logfile)
def test_make_module_req(self):
"""Testcase for make_module_req"""
self.contents = '\n'.join([
'easyblock = "ConfigureMake"',
'name = "pi"',
'version = "3.14"',
'homepage = "http://example.com"',
'description = "test easyconfig"',
'toolchain = {"name":"dummy", "version": "dummy"}',
])
self.writeEC()
eb = EasyBlock(EasyConfig(self.eb_file))
eb.installdir = config.install_path()
# create fake directories and files that should be guessed
os.makedirs(eb.installdir)
open(os.path.join(eb.installdir, 'foo.jar'), 'w').write('foo.jar')
open(os.path.join(eb.installdir, 'bla.jar'), 'w').write('bla.jar')
os.mkdir(os.path.join(eb.installdir, 'bin'))
os.mkdir(os.path.join(eb.installdir, 'share'))
os.mkdir(os.path.join(eb.installdir, 'share', 'man'))
# this is not a path that should be picked up
os.mkdir(os.path.join(eb.installdir, 'CPATH'))
guess = eb.make_module_req()
self.assertTrue(re.search("^prepend-path\s+CLASSPATH\s+\$root/bla.jar$", guess, re.M))
self.assertTrue(re.search("^prepend-path\s+CLASSPATH\s+\$root/foo.jar$", guess, re.M))
self.assertTrue(re.search("^prepend-path\s+MANPATH\s+\$root/share/man$", guess, re.M))
self.assertTrue(re.search("^prepend-path\s+PATH\s+\$root/bin$", guess, re.M))
self.assertFalse(re.search("^prepend-path\s+CPATH\s+.*$", guess, re.M))
# cleanup
eb.close_log()
os.remove(eb.logfile)
def test_extensions_step(self):
"""Test the extensions_step"""
self.contents = '\n'.join([
'easyblock = "ConfigureMake"',
'name = "pi"',
'version = "3.14"',
'homepage = "http://example.com"',
'description = "test easyconfig"',
'toolchain = {"name": "dummy", "version": "dummy"}',
'exts_list = ["ext1"]',
])
self.writeEC()
"""Testcase for extensions"""
# test for proper error message without the exts_defaultclass set
eb = EasyBlock(EasyConfig(self.eb_file))
eb.installdir = config.install_path()
self.assertRaises(EasyBuildError, eb.extensions_step, fetch=True)
self.assertErrorRegex(EasyBuildError, "No default extension class set", eb.extensions_step, fetch=True)
# test if everything works fine if set
self.contents += "\nexts_defaultclass = 'DummyExtension'"
self.writeEC()
eb = EasyBlock(EasyConfig(self.eb_file))
eb.builddir = config.build_path()
eb.installdir = config.install_path()
eb.extensions_step(fetch=True)
# test for proper error message when skip is set, but no exts_filter is set
self.assertRaises(EasyBuildError, eb.skip_extensions)
self.assertErrorRegex(EasyBuildError, "no exts_filter set", eb.skip_extensions)
# cleanup
eb.close_log()
os.remove(eb.logfile)
def test_skip_extensions_step(self):
"""Test the skip_extensions_step"""
self.contents = '\n'.join([
'easyblock = "ConfigureMake"',
'name = "pi"',
'version = "3.14"',
'homepage = "http://example.com"',
'description = "test easyconfig"',
'toolchain = {"name": "dummy", "version": "dummy"}',
'exts_list = ["ext1", "ext2"]',
'exts_filter = ("if [ %(ext_name)s == \'ext2\' ]; then exit 0; else exit 1; fi", "")',
'exts_defaultclass = "DummyExtension"',
])
# check if skip skips correct extensions
self.writeEC()
eb = EasyBlock(EasyConfig(self.eb_file))
eb.builddir = config.build_path()
eb.installdir = config.install_path()
eb.skip = True
eb.extensions_step(fetch=True)
# 'ext1' should be in eb.exts
self.assertTrue('ext1' in [y for x in eb.exts for y in x.values()])
# 'ext2' should not
self.assertFalse('ext2' in [y for x in eb.exts for y in x.values()])
# cleanup
eb.close_log()
os.remove(eb.logfile)
def test_make_module_step(self):
"""Test the make_module_step"""
name = "pi"
version = "3.14"
deps = [('GCC', '4.6.4')]
hiddendeps = [('toy', '0.0-deps')]
alldeps = deps + hiddendeps # hidden deps must be included in list of deps
modextravars = {'PI': '3.1415', 'FOO': 'bar'}
modextrapaths = {'PATH': 'pibin', 'CPATH': 'pi/include'}
self.contents = '\n'.join([
'easyblock = "ConfigureMake"',
'name = "%s"' % name,
'version = "%s"' % version,
'homepage = "http://example.com"',
'description = "test easyconfig"',
"toolchain = {'name': 'dummy', 'version': 'dummy'}",
"dependencies = %s" % str(alldeps),
"hiddendependencies = %s" % str(hiddendeps),
"builddependencies = [('OpenMPI', '1.6.4-GCC-4.6.4')]",
"modextravars = %s" % str(modextravars),
"modextrapaths = %s" % str(modextrapaths),
])
test_dir = os.path.dirname(os.path.abspath(__file__))
os.environ['MODULEPATH'] = os.path.join(test_dir, 'modules')
# test if module is generated correctly
self.writeEC()
ec = EasyConfig(self.eb_file)
eb = EasyBlock(ec)
eb.installdir = os.path.join(config.install_path(), 'pi', '3.14')
eb.check_readiness_step()
modpath = os.path.join(eb.make_module_step(), name, version)
self.assertTrue(os.path.exists(modpath), "%s exists" % modpath)
# verify contents of module
f = open(modpath, 'r')
txt = f.read()
f.close()
self.assertTrue(re.search("^#%Module", txt.split('\n')[0]))
self.assertTrue(re.search("^conflict\s+%s$" % name, txt, re.M))
self.assertTrue(re.search("^set\s+root\s+%s$" % eb.installdir, txt, re.M))
self.assertTrue(re.search('^setenv\s+EBROOT%s\s+".root"\s*$' % name.upper(), txt, re.M))
self.assertTrue(re.search('^setenv\s+EBVERSION%s\s+"%s"$' % (name.upper(), version), txt, re.M))
for (key, val) in modextravars.items():
regex = re.compile('^setenv\s+%s\s+"%s"$' % (key, val), re.M)
self.assertTrue(regex.search(txt), "Pattern %s found in %s" % (regex.pattern, txt))
for (key, val) in modextrapaths.items():
regex = re.compile('^prepend-path\s+%s\s+\$root/%s$' % (key, val), re.M)
self.assertTrue(regex.search(txt), "Pattern %s found in %s" % (regex.pattern, txt))
for (name, ver) in deps:
regex = re.compile('^\s*module load %s\s*$' % os.path.join(name, ver), re.M)
self.assertTrue(regex.search(txt), "Pattern %s found in %s" % (regex.pattern, txt))
for (name, ver) in hiddendeps:
regex = re.compile('^\s*module load %s/.%s\s*$' % (name, ver), re.M)
self.assertTrue(regex.search(txt), "Pattern %s found in %s" % (regex.pattern, txt))
def test_gen_dirs(self):
"""Test methods that generate/set build/install directory names."""
self.contents = '\n'.join([
'easyblock = "ConfigureMake"',
"name = 'pi'",
"version = '3.14'",
"homepage = 'http://example.com'",
"description = 'test easyconfig'",
"toolchain = {'name': 'dummy', 'version': 'dummy'}",
])
self.writeEC()
stdoutorig = sys.stdout
sys.stdout = open("/dev/null", 'w')
eb = EasyBlock(EasyConfig(self.eb_file))
resb = eb.gen_builddir()
resi = eb.gen_installdir()
eb.make_builddir()
eb.make_installdir()
# doesn't return anything
self.assertEqual(resb, None)
self.assertEqual(resi, None)
# directories are set, and exist
self.assertTrue(os.path.isdir(eb.builddir))
self.assertTrue(os.path.isdir(eb.installdir))
# make sure cleaning up old build dir is default
self.assertTrue(eb.cfg['cleanupoldbuild'] or eb.cfg.get('cleanupoldbuild', True))
builddir = eb.builddir
eb.gen_builddir()
self.assertEqual(builddir, eb.builddir)
eb.cfg['cleanupoldbuild'] = True
eb.gen_builddir()
self.assertEqual(builddir, eb.builddir)
# make sure build dir is unique
eb.cfg['cleanupoldbuild'] = False
builddir = eb.builddir
for i in range(3):
eb.gen_builddir()
self.assertEqual(eb.builddir, "%s.%d" % (builddir, i))
eb.make_builddir()
# cleanup
sys.stdout.close()
sys.stdout = stdoutorig
eb.close_log()
def test_get_easyblock_instance(self):
"""Test get_easyblock_instance function."""
# adjust PYTHONPATH such that test easyblocks are found
testdir = os.path.abspath(os.path.dirname(__file__))
import easybuild
eb_blocks_path = os.path.join(testdir, 'sandbox')
if eb_blocks_path not in sys.path:
sys.path.append(eb_blocks_path)
easybuild = reload(easybuild)
import easybuild.easyblocks
reload(easybuild.easyblocks)
from easybuild.easyblocks.toy import EB_toy
ec = process_easyconfig(os.path.join(testdir, 'easyconfigs', 'toy-0.0.eb'))[0]
eb = get_easyblock_instance(ec)
self.assertTrue(isinstance(eb, EB_toy))
# check whether 'This is easyblock' log message is there
tup = ('EB_toy', 'easybuild.easyblocks.toy', '.*test/framework/sandbox/easybuild/easyblocks/toy.pyc*')
eb_log_msg_re = re.compile(r"INFO This is easyblock %s from module %s (%s)" % tup, re.M)
logtxt = read_file(eb.logfile)
self.assertTrue(eb_log_msg_re.search(logtxt), "Pattern '%s' found in: %s" % (eb_log_msg_re.pattern, logtxt))
def test_fetch_patches(self):
"""Test fetch_patches method."""
# adjust PYTHONPATH such that test easyblocks are found
testdir = os.path.abspath(os.path.dirname(__file__))
ec = process_easyconfig(os.path.join(testdir, 'easyconfigs', 'toy-0.0.eb'))[0]
eb = get_easyblock_instance(ec)
eb.fetch_patches()
self.assertEqual(len(eb.patches), 1)
self.assertEqual(eb.patches[0]['name'], 'toy-0.0_typo.patch')
self.assertFalse('level' in eb.patches[0])
# reset
eb.patches = []
patches = [
('toy-0.0_typo.patch', 0), # should also be level 0 (not None or something else)
('toy-0.0_typo.patch', 4), # should be level 4
('toy-0.0_typo.patch', 'foobar'), # sourcepath should be set to 'foobar'
('toy-0.0.tar.gz', 'some/path'), # copy mode (not a .patch file)
]
# check if patch levels are parsed correctly
eb.fetch_patches(patch_specs=patches)
self.assertEqual(len(eb.patches), 4)
self.assertEqual(eb.patches[0]['name'], 'toy-0.0_typo.patch')
self.assertEqual(eb.patches[0]['level'], 0)
self.assertEqual(eb.patches[1]['name'], 'toy-0.0_typo.patch')
self.assertEqual(eb.patches[1]['level'], 4)
self.assertEqual(eb.patches[2]['name'], 'toy-0.0_typo.patch')
self.assertEqual(eb.patches[2]['sourcepath'], 'foobar')
self.assertEqual(eb.patches[3]['name'], 'toy-0.0.tar.gz'),
self.assertEqual(eb.patches[3]['copy'], 'some/path')
patches = [
('toy-0.0_level4.patch', False), # should throw an error, only int's an strings allowed here
]
self.assertRaises(EasyBuildError, eb.fetch_patches, patch_specs=patches)
def test_obtain_file(self):
"""Test obtain_file method."""
toy_tarball = 'toy-0.0.tar.gz'
testdir = os.path.abspath(os.path.dirname(__file__))
sandbox_sources = os.path.join(testdir, 'sandbox', 'sources')
toy_tarball_path = os.path.join(sandbox_sources, 'toy', toy_tarball)
tmpdir = tempfile.mkdtemp()
tmpdir_subdir = os.path.join(tmpdir, 'testing')
mkdir(tmpdir_subdir, parents=True)
del os.environ['EASYBUILD_SOURCEPATH'] # defined by setUp
ec = process_easyconfig(os.path.join(testdir, 'easyconfigs', 'toy-0.0.eb'))[0]
eb = EasyBlock(ec['ec'])
# 'downloading' a file to (first) sourcepath works
init_config(args=["--sourcepath=%s:/no/such/dir:%s" % (tmpdir, testdir)])
shutil.copy2(toy_tarball_path, tmpdir_subdir)
res = eb.obtain_file(toy_tarball, urls=['file://%s' % tmpdir_subdir])
self.assertEqual(res, os.path.join(tmpdir, 't', 'toy', toy_tarball))
# finding a file in sourcepath works
init_config(args=["--sourcepath=%s:/no/such/dir:%s" % (sandbox_sources, tmpdir)])
res = eb.obtain_file(toy_tarball)
self.assertEqual(res, toy_tarball_path)
# sourcepath has preference over downloading
res = eb.obtain_file(toy_tarball, urls=['file://%s' % tmpdir_subdir])
self.assertEqual(res, toy_tarball_path)
# obtain_file yields error for non-existing files
fn = 'thisisclearlyanonexistingfile'
error_regex = "Couldn't find file %s anywhere, and downloading it didn't work either" % fn
self.assertErrorRegex(EasyBuildError, error_regex, eb.obtain_file, fn, urls=['file://%s' % tmpdir_subdir])
# file specifications via URL also work, are downloaded to (first) sourcepath
init_config(args=["--sourcepath=%s:/no/such/dir:%s" % (tmpdir, sandbox_sources)])
file_url = "http://hpcugent.github.io/easybuild/index.html"
fn = os.path.basename(file_url)
res = None
try:
res = eb.obtain_file(file_url)
except EasyBuildError, err:
# if this fails, it should be because there's no online access
download_fail_regex = re.compile('socket error')
self.assertTrue(download_fail_regex.search(str(err)))
# result may be None during offline testing
if res is not None:
loc = os.path.join(tmpdir, 't', 'toy', fn)
self.assertEqual(res, loc)
self.assertTrue(os.path.exists(loc), "%s file is found at %s" % (fn, loc))
txt = open(loc, 'r').read()
eb_regex = re.compile("EasyBuild: building software with ease")
self.assertTrue(eb_regex.search(txt))
else:
print "ignoring failure to download %s in test_obtain_file, testing offline?" % file_url
shutil.rmtree(tmpdir)
def test_check_readiness(self):
"""Test check_readiness method."""
init_config(build_options={'validate': False})
# check that check_readiness step works (adding dependencies, etc.)
ec_file = 'OpenMPI-1.6.4-GCC-4.6.4.eb'
ec_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'easyconfigs', ec_file)
ec = EasyConfig(ec_path)
eb = EasyBlock(ec)
eb.check_readiness_step()
# a proper error should be thrown for dependencies that can't be resolved (module should be there)
tmpdir = tempfile.mkdtemp()
shutil.copy2(ec_path, tmpdir)
ec_path = os.path.join(tmpdir, ec_file)
f = open(ec_path, 'a')
f.write("\ndependencies += [('nosuchsoftware', '1.2.3')]\n")
f.close()
ec = EasyConfig(ec_path)
eb = EasyBlock(ec)
try:
eb.check_readiness_step()
except EasyBuildError, err:
err_regex = re.compile("no module 'nosuchsoftware/1.2.3-GCC-4.6.4' found for dependency .*")
self.assertTrue(err_regex.search(str(err)), "Pattern '%s' found in '%s'" % (err_regex.pattern, err))
shutil.rmtree(tmpdir)
def test_exclude_path_to_top_of_module_tree(self):
"""
Make sure that modules under the HierarchicalMNS are correct,
w.r.t. not including any load statements for modules that build up the path to the top of the module tree.
"""
self.orig_module_naming_scheme = config.get_module_naming_scheme()
test_ecs_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'easyconfigs')
all_stops = [x[0] for x in EasyBlock.get_steps()]
build_options = {
'check_osdeps': False,
'robot_path': [test_ecs_path],
'valid_stops': all_stops,
'validate': False,
}
os.environ['EASYBUILD_MODULE_NAMING_SCHEME'] = 'HierarchicalMNS'
init_config(build_options=build_options)
self.setup_hierarchical_modules()
modfile_prefix = os.path.join(self.test_installpath, 'modules', 'all')
mkdir(os.path.join(modfile_prefix, 'Compiler', 'GCC', '4.8.3'), parents=True)
mkdir(os.path.join(modfile_prefix, 'MPI', 'intel', '2013.5.192-GCC-4.8.3', 'impi', '4.1.3.049'), parents=True)
impi_modfile_path = os.path.join('Compiler', 'intel', '2013.5.192-GCC-4.8.3', 'impi', '4.1.3.049')
imkl_modfile_path = os.path.join('MPI', 'intel', '2013.5.192-GCC-4.8.3', 'impi', '4.1.3.049', 'imkl', '11.1.2.144')
# example: for imkl on top of iimpi toolchain with HierarchicalMNS, no module load statements should be included
# not for the toolchain or any of the toolchain components,
# since both icc/ifort and impi form the path to the top of the module tree
tests = [
('impi-4.1.3.049-iccifort-2013.5.192-GCC-4.8.3.eb', impi_modfile_path, ['icc', 'ifort', 'iccifort']),
('imkl-11.1.2.144-iimpi-5.5.3-GCC-4.8.3.eb', imkl_modfile_path, ['icc', 'ifort', 'impi', 'iccifort', 'iimpi']),
]
for ec_file, modfile_path, excluded_deps in tests:
ec = EasyConfig(os.path.join(test_ecs_path, ec_file))
eb = EasyBlock(ec)
eb.toolchain.prepare()
modpath = eb.make_module_step()
modfile_path = os.path.join(modpath, modfile_path)
modtxt = read_file(modfile_path)
for imkl_dep in excluded_deps:
tup = (imkl_dep, modfile_path, modtxt)
failmsg = "No 'module load' statement found for '%s' not found in module %s: %s" % tup
self.assertFalse(re.search("module load %s" % imkl_dep, modtxt), failmsg)
os.environ['EASYBUILD_MODULE_NAMING_SCHEME'] = self.orig_module_naming_scheme
init_config(build_options=build_options)
def test_patch_step(self):
"""Test patch step."""
ec = process_easyconfig(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'easyconfigs', 'toy-0.0.eb'))[0]
orig_sources = ec['ec']['sources'][:]
# test applying patches without sources
ec['ec']['sources'] = []
eb = EasyBlock(ec['ec'])
eb.fetch_step()
eb.extract_step()
self.assertErrorRegex(EasyBuildError, '.*', eb.patch_step)
# test actual patching of unpacked sources
ec['ec']['sources'] = orig_sources
eb = EasyBlock(ec['ec'])
eb.fetch_step()
eb.extract_step()
eb.patch_step()
def tearDown(self):
""" make sure to remove the temporary file """
super(EasyBlockTest, self).tearDown()
os.remove(self.eb_file)
if self.orig_tmp_logdir is not None:
os.environ['EASYBUILD_TMP_LOGDIR'] = self.orig_tmp_logdir
shutil.rmtree(self.test_tmp_logdir, True)
def suite():
""" return all the tests in this file """
return TestLoader().loadTestsFromTestCase(EasyBlockTest)
if __name__ == '__main__':
main()
|
pneerincx/easybuild-framework
|
test/framework/easyblock.py
|
Python
|
gpl-2.0
| 26,390
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
test_qgsrulebasedrenderer.py
---------------------
Date : September 2015
Copyright : (C) 2015 by Matthias Kuhn
Email : matthias at opengis dot ch
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Matthias Kuhn'
__date__ = 'September 2015'
__copyright__ = '(C) 2015, Matthiasd Kuhn'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import qgis # NOQA
import os
from qgis.PyQt.QtCore import QSize
from qgis.core import (QgsVectorLayer,
QgsProject,
QgsRectangle,
QgsMultiRenderChecker,
QgsRuleBasedRenderer,
QgsFillSymbol,
QgsMarkerSymbol,
QgsRendererCategory,
QgsCategorizedSymbolRenderer,
QgsGraduatedSymbolRenderer,
QgsRendererRange
)
from qgis.testing import start_app, unittest
from qgis.testing.mocked import get_iface
from utilities import unitTestDataPath
# Convenience instances in case you may need them
# not used in this test
start_app()
TEST_DATA_DIR = unitTestDataPath()
class TestQgsRulebasedRenderer(unittest.TestCase):
def setUp(self):
self.iface = get_iface()
myShpFile = os.path.join(TEST_DATA_DIR, 'rectangles.shp')
layer = QgsVectorLayer(myShpFile, 'Points', 'ogr')
QgsProject.instance().addMapLayer(layer)
# Create rulebased style
sym1 = QgsFillSymbol.createSimple({'color': '#fdbf6f', 'outline_color': 'black'})
sym2 = QgsFillSymbol.createSimple({'color': '#71bd6c', 'outline_color': 'black'})
sym3 = QgsFillSymbol.createSimple({'color': '#1f78b4', 'outline_color': 'black'})
self.r1 = QgsRuleBasedRenderer.Rule(sym1, 0, 0, '"id" = 1')
self.r2 = QgsRuleBasedRenderer.Rule(sym2, 0, 0, '"id" = 2')
self.r3 = QgsRuleBasedRenderer.Rule(sym3, 0, 0, 'ELSE')
self.rootrule = QgsRuleBasedRenderer.Rule(None)
self.rootrule.appendChild(self.r1)
self.rootrule.appendChild(self.r2)
self.rootrule.appendChild(self.r3)
self.renderer = QgsRuleBasedRenderer(self.rootrule)
layer.setRenderer(self.renderer)
self.mapsettings = self.iface.mapCanvas().mapSettings()
self.mapsettings.setOutputSize(QSize(400, 400))
self.mapsettings.setOutputDpi(96)
self.mapsettings.setExtent(QgsRectangle(-163, 22, -70, 52))
rendered_layers = [layer]
self.mapsettings.setLayers(rendered_layers)
def tearDown(self):
QgsProject.instance().removeAllMapLayers()
def testElse(self):
# Setup rendering check
renderchecker = QgsMultiRenderChecker()
renderchecker.setMapSettings(self.mapsettings)
renderchecker.setControlName('expected_rulebased_else')
result = renderchecker.runTest('rulebased_else')
assert result
def testDisabledElse(self):
# Disable a rule and assert that it's hidden not rendered with else
self.r2.setActive(False)
renderchecker = QgsMultiRenderChecker()
renderchecker.setMapSettings(self.mapsettings)
renderchecker.setControlName('expected_rulebased_disabled_else')
result = renderchecker.runTest('rulebased_disabled_else')
assert result
def testRefineWithCategories(self):
# Test refining rule with categories (refs #10815)
# First, try with a field based category (id)
cats = []
cats.append(QgsRendererCategory(1, QgsMarkerSymbol(), "id 1"))
cats.append(QgsRendererCategory(2, QgsMarkerSymbol(), "id 2"))
c = QgsCategorizedSymbolRenderer("id", cats)
QgsRuleBasedRenderer.refineRuleCategories(self.r2, c)
assert self.r2.children()[0].filterExpression() == '"id" = 1'
assert self.r2.children()[1].filterExpression() == '"id" = 2'
# Next try with an expression based category
cats = []
cats.append(QgsRendererCategory(1, QgsMarkerSymbol(), "result 1"))
cats.append(QgsRendererCategory(2, QgsMarkerSymbol(), "result 2"))
c = QgsCategorizedSymbolRenderer("id + 1", cats)
QgsRuleBasedRenderer.refineRuleCategories(self.r1, c)
assert self.r1.children()[0].filterExpression() == 'id + 1 = 1'
assert self.r1.children()[1].filterExpression() == 'id + 1 = 2'
# Last try with an expression which is just a quoted field name
cats = []
cats.append(QgsRendererCategory(1, QgsMarkerSymbol(), "result 1"))
cats.append(QgsRendererCategory(2, QgsMarkerSymbol(), "result 2"))
c = QgsCategorizedSymbolRenderer('"id"', cats)
QgsRuleBasedRenderer.refineRuleCategories(self.r3, c)
assert self.r3.children()[0].filterExpression() == '"id" = 1'
assert self.r3.children()[1].filterExpression() == '"id" = 2'
def testRefineWithRanges(self):
# Test refining rule with ranges (refs #10815)
# First, try with a field based category (id)
ranges = []
ranges.append(QgsRendererRange(0, 1, QgsMarkerSymbol(), "0-1"))
ranges.append(QgsRendererRange(1, 2, QgsMarkerSymbol(), "1-2"))
g = QgsGraduatedSymbolRenderer("id", ranges)
QgsRuleBasedRenderer.refineRuleRanges(self.r2, g)
assert self.r2.children()[0].filterExpression() == '"id" >= 0.0000 AND "id" <= 1.0000'
assert self.r2.children()[1].filterExpression() == '"id" > 1.0000 AND "id" <= 2.0000'
# Next try with an expression based range
ranges = []
ranges.append(QgsRendererRange(0, 1, QgsMarkerSymbol(), "0-1"))
ranges.append(QgsRendererRange(1, 2, QgsMarkerSymbol(), "1-2"))
g = QgsGraduatedSymbolRenderer("id / 2", ranges)
QgsRuleBasedRenderer.refineRuleRanges(self.r1, g)
assert self.r1.children()[0].filterExpression() == '(id / 2) >= 0.0000 AND (id / 2) <= 1.0000'
assert self.r1.children()[1].filterExpression() == '(id / 2) > 1.0000 AND (id / 2) <= 2.0000'
# Last try with an expression which is just a quoted field name
ranges = []
ranges.append(QgsRendererRange(0, 1, QgsMarkerSymbol(), "0-1"))
ranges.append(QgsRendererRange(1, 2, QgsMarkerSymbol(), "1-2"))
g = QgsGraduatedSymbolRenderer('"id"', ranges)
QgsRuleBasedRenderer.refineRuleRanges(self.r3, g)
assert self.r3.children()[0].filterExpression() == '"id" >= 0.0000 AND "id" <= 1.0000'
assert self.r3.children()[1].filterExpression() == '"id" > 1.0000 AND "id" <= 2.0000'
def testConvertFromCategorisedRenderer(self):
# Test converting categorised renderer to rule based
# First, try with a field based category (id)
cats = []
cats.append(QgsRendererCategory(1, QgsMarkerSymbol(), "id 1"))
cats.append(QgsRendererCategory(2, QgsMarkerSymbol(), "id 2"))
cats.append(QgsRendererCategory('a\'b', QgsMarkerSymbol(), "id a'b"))
cats.append(QgsRendererCategory('a\nb', QgsMarkerSymbol(), "id a\\nb"))
cats.append(QgsRendererCategory('a\\b', QgsMarkerSymbol(), "id a\\\\b"))
cats.append(QgsRendererCategory('a\tb', QgsMarkerSymbol(), "id a\\tb"))
c = QgsCategorizedSymbolRenderer("id", cats)
r = QgsRuleBasedRenderer.convertFromRenderer(c)
self.assertEqual(r.rootRule().children()[0].filterExpression(), '"id" = 1')
self.assertEqual(r.rootRule().children()[1].filterExpression(), '"id" = 2')
self.assertEqual(r.rootRule().children()[2].filterExpression(), '"id" = \'a\'\'b\'')
self.assertEqual(r.rootRule().children()[3].filterExpression(), '"id" = \'a\\nb\'')
self.assertEqual(r.rootRule().children()[4].filterExpression(), '"id" = \'a\\\\b\'')
self.assertEqual(r.rootRule().children()[5].filterExpression(), '"id" = \'a\\tb\'')
# Next try with an expression based category
cats = []
cats.append(QgsRendererCategory(1, QgsMarkerSymbol(), "result 1"))
cats.append(QgsRendererCategory(2, QgsMarkerSymbol(), "result 2"))
c = QgsCategorizedSymbolRenderer("id + 1", cats)
r = QgsRuleBasedRenderer.convertFromRenderer(c)
self.assertEqual(r.rootRule().children()[0].filterExpression(), 'id + 1 = 1')
self.assertEqual(r.rootRule().children()[1].filterExpression(), 'id + 1 = 2')
# Last try with an expression which is just a quoted field name
cats = []
cats.append(QgsRendererCategory(1, QgsMarkerSymbol(), "result 1"))
cats.append(QgsRendererCategory(2, QgsMarkerSymbol(), "result 2"))
c = QgsCategorizedSymbolRenderer('"id"', cats)
r = QgsRuleBasedRenderer.convertFromRenderer(c)
self.assertEqual(r.rootRule().children()[0].filterExpression(), '"id" = 1')
self.assertEqual(r.rootRule().children()[1].filterExpression(), '"id" = 2')
def testConvertFromGraduatedRenderer(self):
# Test converting graduated renderer to rule based
# First, try with a field based category (id)
ranges = []
ranges.append(QgsRendererRange(0, 1, QgsMarkerSymbol(), "0-1"))
ranges.append(QgsRendererRange(1, 2, QgsMarkerSymbol(), "1-2"))
g = QgsGraduatedSymbolRenderer("id", ranges)
r = QgsRuleBasedRenderer.convertFromRenderer(g)
self.assertEqual(r.rootRule().children()[0].filterExpression(), '"id" >= 0.000000 AND "id" <= 1.000000')
self.assertEqual(r.rootRule().children()[1].filterExpression(), '"id" > 1.000000 AND "id" <= 2.000000')
# Next try with an expression based range
ranges = []
ranges.append(QgsRendererRange(0, 1, QgsMarkerSymbol(), "0-1"))
ranges.append(QgsRendererRange(1, 2, QgsMarkerSymbol(), "1-2"))
g = QgsGraduatedSymbolRenderer("id / 2", ranges)
r = QgsRuleBasedRenderer.convertFromRenderer(g)
self.assertEqual(r.rootRule().children()[0].filterExpression(), '(id / 2) >= 0.000000 AND (id / 2) <= 1.000000')
self.assertEqual(r.rootRule().children()[1].filterExpression(), '(id / 2) > 1.000000 AND (id / 2) <= 2.000000')
# Last try with an expression which is just a quoted field name
ranges = []
ranges.append(QgsRendererRange(0, 1, QgsMarkerSymbol(), "0-1"))
ranges.append(QgsRendererRange(1, 2, QgsMarkerSymbol(), "1-2"))
g = QgsGraduatedSymbolRenderer('"id"', ranges)
r = QgsRuleBasedRenderer.convertFromRenderer(g)
self.assertEqual(r.rootRule().children()[0].filterExpression(), '"id" >= 0.000000 AND "id" <= 1.000000')
self.assertEqual(r.rootRule().children()[1].filterExpression(), '"id" > 1.000000 AND "id" <= 2.000000')
if __name__ == '__main__':
unittest.main()
|
stevenmizuno/QGIS
|
tests/src/python/test_qgsrulebasedrenderer.py
|
Python
|
gpl-2.0
| 11,534
|
#!/usr/bin/env python
from numpy import reshape, dot
from ase.visualize import view
from ase.lattice.surface import fcc111, add_adsorbate
from gpaw import GPAW
from gpaw.mixer import MixerSum
from gpaw import dscf
filename='lumo'
#-------------------------------------------
c_mol = GPAW(nbands=9, h=0.2, xc='RPBE', kpts=(8,6,1),
spinpol=True,
convergence={'energy': 100,
'density': 100,
'eigenstates': 1.0e-9,
'bands': -2}, txt='CO_lumo.txt')
calc = GPAW(nbands=60, h=0.2, xc='RPBE', kpts=(8,6,1),
eigensolver='cg',
spinpol=True,
mixer=MixerSum(nmaxold=5, beta=0.1, weight=100),
convergence={'energy': 100,
'density': 100,
'eigenstates': 1.0e-7,
'bands': -10}, txt=filename+'.txt')
#----------------------------------------
# Import Slab with relaxed CO
#slab = Calculator('gs.gpw').get_atoms()
slab = fcc111('Pt', size=(1, 2, 3), orthogonal=True)
add_adsorbate(slab, 'C', 2.0, 'ontop')
add_adsorbate(slab, 'O', 3.15, 'ontop')
slab.center(axis=2, vacuum=4.0)
view(slab)
molecule = slab.copy()
del molecule [:-2]
# Molecule
#----------------
molecule.set_calculator(c_mol)
molecule.get_potential_energy()
#Find band corresponding to lumo
lumo = c_mol.get_pseudo_wave_function(band=5, kpt=0, spin=1)
lumo = reshape(lumo, -1)
wf1_k = [c_mol.get_pseudo_wave_function(band=5, kpt=k, spin=1)
for k in range(len(c_mol.wfs.weight_k))]
wf2_k = [c_mol.get_pseudo_wave_function(band=6, kpt=k, spin=1)
for k in range(len(c_mol.wfs.weight_k))]
band_k = []
for k in range(len(c_mol.wfs.weight_k)):
wf1 = reshape(wf1_k[k], -1)
wf2 = reshape(wf2_k[k], -1)
p1 = abs(dot(wf1, lumo))
p2 = abs(dot(wf2, lumo))
if p1 > p2:
band_k.append(5)
else:
band_k.append(6)
#Lumo wavefunction
wf_u = [kpt.psit_nG[band_k[kpt.k]] for kpt in c_mol.wfs.kpt_u]
#Lumo projector overlaps
mol = range(len(slab))[-2:]
p_uai = [dict([(mol[a], P_ni[band_k[kpt.k]]) for a, P_ni in kpt.P_ani.items()])
for kpt in c_mol.wfs.kpt_u]
# Slab with adsorbed molecule
#-----------------------------------
slab.set_calculator(calc)
orbital = dscf.AEOrbital(calc, wf_u, p_uai)
dscf.dscf_calculation(calc, [[1.0, orbital, 1]], slab)
slab.get_potential_energy()
|
qsnake/gpaw
|
doc/documentation/dscf/lumo.py
|
Python
|
gpl-3.0
| 2,428
|
################# Sample 1 #################
"""
>>> import pandas as pd
>>> df = pd.read_csv('data/SMSSpamCollection', delimiter='\t', header=None)
>>> print df.head()
0 1
0 ham Go until jurong point, crazy.. Available only ...
1 ham Ok lar... Joking wif u oni...
2 spam Free entry in 2 a wkly comp to win FA Cup fina...
3 ham U dun say so early hor... U c already then say...
4 ham Nah I don't think he goes to usf, he lives aro...
[5 rows x 2 columns]
>>> print 'Number of spam messages:', df[df[0] == 'spam'][0].count()
>>> print 'Number of ham messages:', df[df[0] == 'ham'][0].count()
Number of spam messages: 747
Number of ham messages: 4825
"""
import pandas as pd
df = pd.read_csv('data/SMSSpamCollection', delimiter='\t', header=None)
print df.head()
print 'Number of spam messages:', df[df[0] == 'spam'][0].count()
print 'Number of ham messages:', df[df[0] == 'ham'][0].count()
################# Sample 2 #################
"""
>>> import pandas as pd
>>> from sklearn.feature_extraction.text import TfidfVectorizer
>>> from sklearn.linear_model.logistic import LogisticRegression
>>> from sklearn.cross_validation import train_test_split
>>> from sklearn.metrics.metrics import precision_score, recall_score, confusion_matrix
>>> df = pd.read_csv('sms/sms.csv')
>>> vectorizer = TfidfVectorizer()
>>> X_train = vectorizer.fit_transform(X_train_raw)
>>> X_test = vectorizer.transform(X_test_raw)
>>> classifier = LogisticRegression()
>>> classifier.fit(X_train, y_train)
>>> predictions = classifier.predict(X_test)
>>> for i, prediction in enumerate(predictions[:5]):
>>> print X_test_raw[i], 'prediction:', prediction
Were trying to find a Chinese food place around here prediction: 0
all the lastest from Stereophonics, Marley, Dizzee Racal, Libertines and The Strokes! Win Nookii games with Flirt!! Click TheMob WAP Bookmark or text WAP to 82468 prediction: 1
We have pizza if u want prediction: 0
I like you peoples very much:) but am very shy pa. prediction: 0
Dun need to use dial up juz open da browser n surf... prediction: 0
"""
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model.logistic import LogisticRegression
from sklearn.cross_validation import train_test_split
df = pd.read_csv('data/sms.csv')
X_train_raw, X_test_raw, y_train, y_test = train_test_split(df['message'], df['label'])
vectorizer = TfidfVectorizer()
X_train = vectorizer.fit_transform(X_train_raw)
X_test = vectorizer.transform(X_test_raw)
classifier = LogisticRegression()
classifier.fit(X_train, y_train)
predictions = classifier.predict(X_test)
for i, prediction in enumerate(predictions[:5]):
print X_test_raw[i], 'prediction:', prediction
################# Sample 3 #################
"""
>>> from sklearn.metrics import confusion_matrix
>>> import matplotlib.pyplot as plt
>>> y_test = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
>>> y_pred = [0, 1, 0, 0, 0, 0, 0, 1, 1, 1]
>>> confusion_matrix = confusion_matrix(y_test, y_pred)
>>> print(confusion_matrix)
>>> plt.matshow(confusion_matrix)
>>> plt.title('Confusion matrix')
>>> plt.colorbar()
>>> plt.ylabel('True label')
>>> plt.xlabel('Predicted label')
>>> plt.show()
[[4 1]
[2 3]]
"""
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
y_test = []
y_pred = []
confusion_matrix = confusion_matrix(y_test, y_pred)
print(confusion_matrix)
plt.matshow(confusion_matrix)
plt.title('Confusion matrix')
plt.colorbar()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
################# Sample 4 #################
"""
>>> from sklearn.metrics import accuracy_score
>>> y_pred, y_true = [0, 1, 1, 0], [1, 1, 1, 1]
>>> print 'Accuracy:', accuracy_score(y_true, y_pred)
Accuracy: 0.5
"""
from sklearn.metrics import accuracy_score
y_pred, y_true = [0, 1, 1, 0], [1, 1, 1, 1]
print accuracy_score(y_true, y_pred)
################# Sample: Evaluating the SMS Classifier #################
"""
>>> import numpy as np
>>> import pandas as pd
>>> from sklearn.feature_extraction.text import TfidfVectorizer
>>> from sklearn.linear_model.logistic import LogisticRegression
>>> from sklearn.cross_validation import train_test_split, cross_val_score
>>> df = pd.read_csv('data/sms.csv')
>>> X_train_raw, X_test_raw, y_train, y_test = train_test_split(df['message'], df['label'])
>>> vectorizer = TfidfVectorizer()
>>> X_train = vectorizer.fit_transform(X_train_raw)
>>> X_test = vectorizer.transform(X_test_raw)
>>> classifier = LogisticRegression()
>>> classifier.fit(X_train, y_train)
>>> scores = cross_val_score(classifier, X_train, y_train, cv=5)
>>> print np.mean(scores), scores
Accuracy 0.956217208018 [ 0.96057348 0.95334928 0.96411483 0.95454545 0.94850299]
"""
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model.logistic import LogisticRegression
from sklearn.cross_validation import train_test_split, cross_val_score
df = pd.read_csv('data/sms.csv')
X_train_raw, X_test_raw, y_train, y_test = train_test_split(df['message'], df['label'])
vectorizer = TfidfVectorizer()
X_train = vectorizer.fit_transform(X_train_raw)
X_test = vectorizer.transform(X_test_raw)
classifier = LogisticRegression()
classifier.fit(X_train, y_train)
scores = cross_val_score(classifier, X_train, y_train, cv=5)
print np.mean(scores), scores
################# Sample 6 #################
"""
>>> import numpy as np
>>> import pandas as pd
>>> from sklearn.feature_extraction.text import TfidfVectorizer
>>> from sklearn.linear_model.logistic import LogisticRegression
>>> from sklearn.cross_validation import train_test_split, cross_val_score
>>> df = pd.read_csv('data/sms.csv')
>>> X_train_raw, X_test_raw, y_train, y_test = train_test_split(df['message'], df['label'])
>>> vectorizer = TfidfVectorizer()
>>> X_train = vectorizer.fit_transform(X_train_raw)
>>> X_test = vectorizer.transform(X_test_raw)
>>> classifier = LogisticRegression()
>>> classifier.fit(X_train, y_train)
>>> precisions = cross_val_score(classifier, X_train, y_train, cv=5, scoring='precision')
>>> print 'Precision', np.mean(precisions), precisions
>>> recall = cross_val_score(classifier, X_train, y_train, cv=5, scoring='recall')
>>> print 'Recall', np.mean(recall), recall
Precision 0.992137651822 [ 0.98717949 0.98666667 1. 0.98684211 1. ]
Recall 0.677114261885 [ 0.7 0.67272727 0.6 0.68807339 0.72477064]
"""
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model.logistic import LogisticRegression
from sklearn.cross_validation import train_test_split, cross_val_score
df = pd.read_csv('data/sms.csv')
X_train_raw, X_test_raw, y_train, y_test = train_test_split(df['message'], df['label'])
vectorizer = TfidfVectorizer()
X_train = vectorizer.fit_transform(X_train_raw)
X_test = vectorizer.transform(X_test_raw)
classifier = LogisticRegression()
classifier.fit(X_train, y_train)
precisions = cross_val_score(classifier, X_train, y_train, cv=5, scoring='precision')
print 'Precision', np.mean(precisions), precisions
recalls = cross_val_score(classifier, X_train, y_train, cv=5, scoring='recall')
print 'Recall', np.mean(recalls), recalls
################# Sample: F1 Score #################
"""
>>> f1s = cross_val_score(classifier, X_train, y_train, cv=5, scoring='f1')
>>> print 'F1', np.mean(f1s), f1s
F1 0.80261302628 [ 0.82539683 0.8 0.77348066 0.83157895 0.7826087 ]
"""
f1s = cross_val_score(classifier, X_train, y_train, cv=5, scoring='f1')
print 'F1', np.mean(f1s), f1s
################# Sample: ROC AUC #################
"""
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model.logistic import LogisticRegression
from sklearn.cross_validation import train_test_split, cross_val_score
from sklearn.metrics import roc_curve, auc
df = pd.read_csv('data/sms.csv')
X_train_raw, X_test_raw, y_train, y_test = train_test_split(df['message'], df['label'])
vectorizer = TfidfVectorizer()
X_train = vectorizer.fit_transform(X_train_raw)
X_test = vectorizer.transform(X_test_raw)
classifier = LogisticRegression()
classifier.fit(X_train, y_train)
predictions = classifier.predict_proba(X_test)
false_positive_rate, recall, thresholds = roc_curve(y_test, predictions[:, 1])
roc_auc = auc(false_positive_rate, recall)
plt.title('Receiver Operating Characteristic')
plt.plot(false_positive_rate, recall, 'b', label='AUC = %0.2f' % roc_auc)
plt.legend(loc='lower right')
plt.plot([0, 1], [0, 1], 'r--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.ylabel('Recall')
plt.xlabel('Fall-out')
plt.show()
################# Sample 8 #################
"""
>>> import numpy as np
>>> import pandas as pd
>>> import matplotlib.pyplot as plt
>>> from sklearn.linear_model.logistic import LogisticRegression
>>> from sklearn.metrics import roc_curve, auc
>>> from sklearn.feature_extraction.text import TfidfVectorizer
>>> from sklearn.cross_validation import train_test_split
>>> df = pd.read_csv('sms/sms.csv')
>>> X_train_raw, X_test_raw, y_train, y_test = train_test_split(df['message'], df['label'])
>>> vectorizer = TfidfVectorizer()
>>> X_train = vectorizer.fit_transform(X_train_raw)
>>> X_test = vectorizer.transform(X_test_raw)
>>> classifier = LogisticRegression().fit_transform(X_train, y_train)
>>> clf = LogisticRegression()
>>> clf.fit_transform(X_train, y_train)
>>> predictions = clf.predict_proba(X_test)
>>> false_positive_rate, recall, threhsolds = roc_curve(y_test, predictions[:, 1])
>>> roc_auc = auc(false_positive_rate, recall)
>>> plt.title('Receiver Operating Characteristic')
>>> plt.plot(false_positive_rate, recall, 'b', label='AUC = %0.2f' % roc_auc)
>>> plt.legend(loc='lower right')
>>> plt.plot([0, 1], [0, 1], 'r--')
>>> plt.xlim([0.0, 1.0])
>>> plt.ylim([0.0, 1.0])
>>> plt.ylabel('Recall')
>>> plt.xlabel('Fall-out')
>>> plt.show()
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model.logistic import LogisticRegression
from sklearn.metrics import roc_curve, auc
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cross_validation import train_test_split
df = pd.read_csv('sms/sms.csv')
X_train_raw, X_test_raw, y_train, y_test = train_test_split(df['message'], df['label'])
vectorizer = TfidfVectorizer()
X_train = vectorizer.fit_transform(X_train_raw)
X_test = vectorizer.transform(X_test_raw)
classifier = LogisticRegression().fit_transform(X_train, y_train)
clf = LogisticRegression()
clf.fit_transform(X_train, y_train)
predictions = clf.predict_proba(X_test)
false_positive_rate, recall, threhsolds = roc_curve(y_test, predictions[:, 1])
roc_auc = auc(false_positive_rate, recall)
plt.title('Receiver Operating Characteristic')
plt.plot(false_positive_rate, recall, 'b', label='AUC = %0.2f' % roc_auc)
plt.legend(loc='lower right')
plt.plot([0, 1], [0, 1], 'r--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.ylabel('Recall')
plt.xlabel('Fall-out')
plt.show()
################# Sample 9 #################
"""
"""
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model.logistic import LogisticRegression
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.cross_validation import train_test_split
from sklearn.metrics import precision_score, recall_score, accuracy_score
pipeline = Pipeline([
('vect', TfidfVectorizer(stop_words='english')),
('clf', LogisticRegression())
])
parameters = {
'vect__max_df': (0.25, 0.5, 0.75),
'vect__stop_words': ('english', None),
'vect__max_features': (2500, 5000, 10000, None),
'vect__ngram_range': ((1, 1), (1, 2)),
'vect__use_idf': (True, False),
'vect__norm': ('l1', 'l2'),
'clf__penalty': ('l1', 'l2'),
'clf__C': (0.01, 0.1, 1, 10),
}
if __name__ == "__main__":
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1, scoring='accuracy', cv=3)
df = pd.read_csv('data/sms.csv')
X, y, = df['message'], df['label']
X_train, X_test, y_train, y_test = train_test_split(X, y)
grid_search.fit(X_train, y_train)
print 'Best score: %0.3f' % grid_search.best_score_
print 'Best parameters set:'
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print '\t%s: %r' % (param_name, best_parameters[param_name])
predictions = grid_search.predict(X_test)
print 'Accuracy:', accuracy_score(y_test, predictions)
print 'Precision:', precision_score(y_test, predictions)
print 'Recall:', recall_score(y_test, predictions)
Fitting 3 folds for each of 1536 candidates, totalling 4608 fits
[Parallel(n_jobs=-1)]: Done 1 jobs | elapsed: 0.2s
[Parallel(n_jobs=-1)]: Done 50 jobs | elapsed: 4.0s
[Parallel(n_jobs=-1)]: Done 200 jobs | elapsed: 16.9s
[Parallel(n_jobs=-1)]: Done 450 jobs | elapsed: 36.7s
[Parallel(n_jobs=-1)]: Done 800 jobs | elapsed: 1.1min
[Parallel(n_jobs=-1)]: Done 1250 jobs | elapsed: 1.7min
[Parallel(n_jobs=-1)]: Done 1800 jobs | elapsed: 2.5min
[Parallel(n_jobs=-1)]: Done 2450 jobs | elapsed: 3.4min
[Parallel(n_jobs=-1)]: Done 3200 jobs | elapsed: 4.4min
[Parallel(n_jobs=-1)]: Done 4050 jobs | elapsed: 7.7min
[Parallel(n_jobs=-1)]: Done 4608 out of 4608 | elapsed: 8.5min finished
Best score: 0.983
Best parameters set:
clf__C: 10
clf__penalty: 'l2'
vect__max_df: 0.5
vect__max_features: None
vect__ngram_range: (1, 2)
vect__norm: 'l2'
vect__stop_words: None
vect__use_idf: True
Accuracy: 0.989956958393
Precision: 0.988095238095
Recall: 0.932584269663
"""
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model.logistic import LogisticRegression
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.cross_validation import train_test_split
from sklearn.metrics import precision_score, recall_score, accuracy_score
pipeline = Pipeline([
('vect', TfidfVectorizer(stop_words='english')),
('clf', LogisticRegression())
])
parameters = {
'vect__max_df': (0.25, 0.5, 0.75),
'vect__stop_words': ('english', None),
'vect__max_features': (2500, 5000, 10000, None),
'vect__ngram_range': ((1, 1), (1, 2)),
'vect__use_idf': (True, False),
'vect__norm': ('l1', 'l2'),
'clf__penalty': ('l1', 'l2'),
'clf__C': (0.01, 0.1, 1, 10),
}
if __name__ == "__main__":
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1, scoring='accuracy', cv=3)
df = pd.read_csv('data/sms.csv')
X, y, = df['message'], df['label']
X_train, X_test, y_train, y_test = train_test_split(X, y)
grid_search.fit(X_train, y_train)
print 'Best score: %0.3f' % grid_search.best_score_
print 'Best parameters set:'
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print '\t%s: %r' % (param_name, best_parameters[param_name])
predictions = grid_search.predict(X_test)
print 'Accuracy:', accuracy_score(y_test, predictions)
print 'Precision:', precision_score(y_test, predictions)
print 'Recall:', recall_score(y_test, predictions)
"""
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model.logistic import LogisticRegression
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.cross_validation import train_test_split
from sklearn.metrics import precision_score, recall_score, accuracy_score
pipeline = Pipeline([
('vect', TfidfVectorizer(stop_words='english')),
('clf', LogisticRegression())
])
parameters = {
'vect__max_df': (0.25, 0.5, 0.75),
'vect__stop_words': ('english', None),
'vect__max_features': (2500, 5000, 10000, None),
'vect__ngram_range': ((1, 1), (1, 2)),
'vect__use_idf': (True, False),
'vect__norm': ('l1', 'l2'),
'clf__penalty': ('l1', 'l2'),
'clf__C': (0.01, 0.1, 1, 10),
}
if __name__ == "__main__":
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1, scoring='accuracy', cv=3)
df = pd.read_csv('data/sms.csv')
X, y, = df['message'], df['label']
X_train, X_test, y_train, y_test = train_test_split(X, y)
grid_search.fit(X_train, y_train)
print 'Best score: %0.3f' % grid_search.best_score_
print 'Best parameters set:'
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print '\t%s: %r' % (param_name, best_parameters[param_name])
predictions = grid_search.predict(X_test)
print 'Accuracy:', accuracy_score(y_test, predictions)
print 'Precision:', precision_score(y_test, predictions)
print 'Recall:', recall_score(y_test, predictions)
################# Sample 9 #################
"""
>>> import pandas as pd
>>> df = pd.read_csv('movie-reviews/train.tsv', header=0, delimiter='\t')
>>> print df.count()
PhraseId 156060
SentenceId 156060
Phrase 156060
Sentiment 156060
dtype: int64
>>> print df.head()
PhraseId SentenceId Phrase \
0 1 1 A series of escapades demonstrating the adage ...
1 2 1 A series of escapades demonstrating the adage ...
2 3 1 A series
3 4 1 A
4 5 1 series
Sentiment
0 1
1 2
2 2
3 2
4 2
[5 rows x 4 columns]
>>> print df['Phrase'].head(10)
0 A series of escapades demonstrating the adage ...
1 A series of escapades demonstrating the adage ...
2 A series
3 A
4 series
5 of escapades demonstrating the adage that what...
6 of
7 escapades demonstrating the adage that what is...
8 escapades
9 demonstrating the adage that what is good for ...
Name: Phrase, dtype: object
>>> print df['Sentiment'].describe()
count 156060.000000
mean 2.063578
std 0.893832
min 0.000000
25% 2.000000
50% 2.000000
75% 3.000000
max 4.000000
Name: Sentiment, dtype: float64
>>> print df['Sentiment'].value_counts()
2 79582
3 32927
1 27273
4 9206
0 7072
dtype: int64
>>> print df['Sentiment'].value_counts()/df['Sentiment'].count()
2 0.509945
3 0.210989
1 0.174760
4 0.058990
0 0.045316
dtype: float64
"""
import pandas as pd
df = pd.read_csv('data/train.tsv', header=0, delimiter='\t')
print df.count()
print df.head(10)
print df['Phrase'].head()
print df['Sentiment'].describe()
print df['Sentiment'].value_counts()
print df['Sentiment'].value_counts()/df['Sentiment'].count()
################# Sample: Multi-Class Classification of Movie Review Sentiments #################
"""
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model.logistic import LogisticRegression
from sklearn.cross_validation import train_test_split
from sklearn.metrics.metrics import classification_report, accuracy_score, confusion_matrix
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
def main():
pipeline = Pipeline([
('vect', TfidfVectorizer(stop_words='english')),
('clf', LogisticRegression())
])
parameters = {
'vect__max_df': (0.25, 0.5),
'vect__ngram_range': ((1, 1), (1, 2)),
'vect__use_idf': (True, False),
'clf__C': (0.1, 1, 10),
}
df = pd.read_csv('data/train.tsv', header=0, delimiter='\t')
X, y = df['Phrase'], df['Sentiment'].as_matrix()
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.5)
grid_search = GridSearchCV(pipeline, parameters, n_jobs=3, verbose=1, scoring='accuracy')
grid_search.fit(X_train, y_train)
print 'Best score: %0.3f' % grid_search.best_score_
print 'Best parameters set:'
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print '\t%s: %r' % (param_name, best_parameters[param_name])
if __name__ == '__main__':
main()
Fitting 3 folds for each of 24 candidates, totalling 72 fits
[Parallel(n_jobs=3)]: Done 1 jobs | elapsed: 3.3s
[Parallel(n_jobs=3)]: Done 50 jobs | elapsed: 1.1min
[Parallel(n_jobs=3)]: Done 68 out of 72 | elapsed: 1.9min remaining: 6.8s
[Parallel(n_jobs=3)]: Done 72 out of 72 | elapsed: 2.1min finished
Best score: 0.620
Best parameters set:
clf__C: 10
vect__max_df: 0.25
vect__ngram_range: (1, 2)
vect__use_idf: False
"""
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model.logistic import LogisticRegression
from sklearn.cross_validation import train_test_split
from sklearn.metrics.metrics import classification_report, accuracy_score, confusion_matrix
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
def main():
pipeline = Pipeline([
('vect', TfidfVectorizer(stop_words='english')),
('clf', LogisticRegression())
])
parameters = {
'vect__max_df': (0.25, 0.5),
'vect__ngram_range': ((1, 1), (1, 2)),
'vect__use_idf': (True, False),
'clf__C': (0.1, 1, 10),
}
df = pd.read_csv('data/train.tsv', header=0, delimiter='\t')
X, y = df['Phrase'], df['Sentiment'].as_matrix()
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.5)
grid_search = GridSearchCV(pipeline, parameters, n_jobs=3, verbose=1, scoring='accuracy')
grid_search.fit(X_train, y_train)
print 'Best score: %0.3f' % grid_search.best_score_
print 'Best parameters set:'
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print '\t%s: %r' % (param_name, best_parameters[param_name])
if __name__ == '__main__':
main()
################# Sample 11 #################
# Multi-Class Classification Performance Metrics
"""
>>> predictions = grid_search.predict(X_test)
>>> print 'Accuracy:', accuracy_score(y_test, predictions)
>>> print 'Confusion Matrix:', confusion_matrix(y_test, predictions)
>>> print 'Classification Report:', classification_report(y_test, predictions)
Accuracy: 0.611521209791
Confusion Matrix: [[ 443 1576 1367 93 4]
[ 248 3892 9021 534 16]
[ 54 1699 35836 2026 67]
[ 5 409 9024 6693 440]
[ 1 88 1112 2529 853]]
Classification Report: precision recall f1-score support
0 0.59 0.13 0.21 3483
1 0.51 0.28 0.36 13711
2 0.64 0.90 0.75 39682
3 0.56 0.40 0.47 16571
4 0.62 0.19 0.29 4583
avg / total 0.59 0.61 0.57 78030
"""
predictions = grid_search.predict(X_test)
print 'Accuracy:', accuracy_score(y_test, predictions)
print 'Confusion Matrix:', confusion_matrix(y_test, predictions)
print 'Classification Report:', classification_report(y_test, predictions)
################# Sample 12 #################
# Applying Multi-label Classification
"""
"""
################# Sample 13 #################
# Multi-Label Classification Performance Metrics
"""
>>> import numpy as np
>>> from sklearn.metrics import hamming_loss
>>> print hamming_loss(np.array([[0.0, 1.0], [1.0, 1.0]]), np.array([[0.0, 1.0], [1.0, 1.0]]))
0.0
>>> print hamming_loss(np.array([[0.0, 1.0], [1.0, 1.0]]), np.array([[1.0, 1.0], [1.0, 1.0]]))
0.25
>>> print hamming_loss(np.array([[0.0, 1.0], [1.0, 1.0]]), np.array([[1.0, 1.0], [0.0, 1.0]]))
0.5
>>> print jaccard_similarity_score(np.array([[0.0, 1.0], [1.0, 1.0]]), np.array([[0.0, 1.0], [1.0, 1.0]]))
1.0
>>> print jaccard_similarity_score(np.array([[0.0, 1.0], [1.0, 1.0]]), np.array([[1.0, 1.0], [1.0, 1.0]]))
0.75
>>> print jaccard_similarity_score(np.array([[0.0, 1.0], [1.0, 1.0]]), np.array([[1.0, 1.0], [0.0, 1.0]]))
0.5
"""
import numpy as np
from sklearn.metrics import hamming_loss, jaccard_similarity_score
print hamming_loss(np.array([[0.0, 1.0], [1.0, 1.0]]), np.array([[0.0, 1.0], [1.0, 1.0]]))
print hamming_loss(np.array([[0.0, 1.0], [1.0, 1.0]]), np.array([[1.0, 1.0], [1.0, 1.0]]))
print hamming_loss(np.array([[0.0, 1.0], [1.0, 1.0]]), np.array([[1.0, 1.0], [0.0, 1.0]]))
print jaccard_similarity_score(np.array([[0.0, 1.0], [1.0, 1.0]]), np.array([[0.0, 1.0], [1.0, 1.0]]))
print jaccard_similarity_score(np.array([[0.0, 1.0], [1.0, 1.0]]), np.array([[1.0, 1.0], [1.0, 1.0]]))
print jaccard_similarity_score(np.array([[0.0, 1.0], [1.0, 1.0]]), np.array([[1.0, 1.0], [0.0, 1.0]]))
|
moonbury/notebooks
|
github/MasteringMLWithScikit-learn/8365OS_04_Codes/ch4.py
|
Python
|
gpl-3.0
| 25,282
|
#!/usr/bin/python
#
# Copyright (C) 2016 Google, Inc
# Written by Simon Glass <sjg@chromium.org>
#
# SPDX-License-Identifier: GPL-2.0+
#
import fdt_util
import libfdt
import sys
# This deals with a device tree, presenting it as a list of Node and Prop
# objects, representing nodes and properties, respectively.
#
# This implementation uses a libfdt Python library to access the device tree,
# so it is fairly efficient.
class Prop:
"""A device tree property
Properties:
name: Property name (as per the device tree)
value: Property value as a string of bytes, or a list of strings of
bytes
type: Value type
"""
def __init__(self, name, bytes):
self.name = name
self.value = None
if not bytes:
self.type = fdt_util.TYPE_BOOL
self.value = True
return
self.type, self.value = fdt_util.BytesToValue(bytes)
def GetPhandle(self):
"""Get a (single) phandle value from a property
Gets the phandle valuie from a property and returns it as an integer
"""
return fdt_util.fdt32_to_cpu(self.value[:4])
def Widen(self, newprop):
"""Figure out which property type is more general
Given a current property and a new property, this function returns the
one that is less specific as to type. The less specific property will
be ble to represent the data in the more specific property. This is
used for things like:
node1 {
compatible = "fred";
value = <1>;
};
node1 {
compatible = "fred";
value = <1 2>;
};
He we want to use an int array for 'value'. The first property
suggests that a single int is enough, but the second one shows that
it is not. Calling this function with these two propertes would
update the current property to be like the second, since it is less
specific.
"""
if newprop.type < self.type:
self.type = newprop.type
if type(newprop.value) == list and type(self.value) != list:
self.value = [self.value]
if type(self.value) == list and len(newprop.value) > len(self.value):
val = fdt_util.GetEmpty(self.type)
while len(self.value) < len(newprop.value):
self.value.append(val)
class Node:
"""A device tree node
Properties:
offset: Integer offset in the device tree
name: Device tree node tname
path: Full path to node, along with the node name itself
_fdt: Device tree object
subnodes: A list of subnodes for this node, each a Node object
props: A dict of properties for this node, each a Prop object.
Keyed by property name
"""
def __init__(self, fdt, offset, name, path):
self.offset = offset
self.name = name
self.path = path
self._fdt = fdt
self.subnodes = []
self.props = {}
def Scan(self):
"""Scan a node's properties and subnodes
This fills in the props and subnodes properties, recursively
searching into subnodes so that the entire tree is built.
"""
self.props = self._fdt.GetProps(self.path)
offset = libfdt.fdt_first_subnode(self._fdt.GetFdt(), self.offset)
while offset >= 0:
sep = '' if self.path[-1] == '/' else '/'
name = libfdt.Name(self._fdt.GetFdt(), offset)
path = self.path + sep + name
node = Node(self._fdt, offset, name, path)
self.subnodes.append(node)
node.Scan()
offset = libfdt.fdt_next_subnode(self._fdt.GetFdt(), offset)
class Fdt:
"""Provides simple access to a flat device tree blob.
Properties:
fname: Filename of fdt
_root: Root of device tree (a Node object)
"""
def __init__(self, fname):
self.fname = fname
with open(fname) as fd:
self._fdt = fd.read()
def GetFdt(self):
"""Get the contents of the FDT
Returns:
The FDT contents as a string of bytes
"""
return self._fdt
def Scan(self):
"""Scan a device tree, building up a tree of Node objects
This fills in the self._root property
"""
self._root = Node(self, 0, '/', '/')
self._root.Scan()
def GetRoot(self):
"""Get the root Node of the device tree
Returns:
The root Node object
"""
return self._root
def GetProps(self, node):
"""Get all properties from a node.
Args:
node: Full path to node name to look in.
Returns:
A dictionary containing all the properties, indexed by node name.
The entries are Prop objects.
Raises:
ValueError: if the node does not exist.
"""
offset = libfdt.fdt_path_offset(self._fdt, node)
if offset < 0:
libfdt.Raise(offset)
props_dict = {}
poffset = libfdt.fdt_first_property_offset(self._fdt, offset)
while poffset >= 0:
dprop, plen = libfdt.fdt_get_property_by_offset(self._fdt, poffset)
prop = Prop(libfdt.String(self._fdt, dprop.nameoff), libfdt.Data(dprop))
props_dict[prop.name] = prop
poffset = libfdt.fdt_next_property_offset(self._fdt, poffset)
return props_dict
|
Jerry-X-Meng/AlphaBoxPlus
|
u-boot/tools/dtoc/fdt.py
|
Python
|
gpl-3.0
| 5,540
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
import frappe
from frappe import _
from frappe.utils import flt
from frappe.utils.nestedset import NestedSet, get_root_of
from erpnext import get_default_currency
class SalesPerson(NestedSet):
nsm_parent_field = 'parent_sales_person'
def validate(self):
if not self.parent_sales_person:
self.parent_sales_person = get_root_of("Sales Person")
for d in self.get('targets') or []:
if not flt(d.target_qty) and not flt(d.target_amount):
frappe.throw(_("Either target qty or target amount is mandatory."))
self.validate_employee_id()
def onload(self):
self.load_dashboard_info()
def load_dashboard_info(self):
company_default_currency = get_default_currency()
allocated_amount = frappe.db.sql("""
select sum(allocated_amount)
from `tabSales Team`
where sales_person = %s and docstatus=1 and parenttype = 'Sales Order'
""",(self.sales_person_name))
info = {}
info["allocated_amount"] = flt(allocated_amount[0][0]) if allocated_amount else 0
info["currency"] = company_default_currency
self.set_onload('dashboard_info', info)
def on_update(self):
super(SalesPerson, self).on_update()
self.validate_one_root()
def get_email_id(self):
if self.employee:
user = frappe.db.get_value("Employee", self.employee, "user_id")
if not user:
frappe.throw(_("User ID not set for Employee {0}").format(self.employee))
else:
return frappe.db.get_value("User", user, "email") or user
def validate_employee_id(self):
if self.employee:
sales_person = frappe.db.get_value("Sales Person", {"employee": self.employee})
if sales_person and sales_person != self.name:
frappe.throw(_("Another Sales Person {0} exists with the same Employee id").format(sales_person))
def on_doctype_update():
frappe.db.add_index("Sales Person", ["lft", "rgt"])
def get_timeline_data(doctype, name):
out = {}
out.update(dict(frappe.db.sql('''select
unix_timestamp(dt.transaction_date), count(st.parenttype)
from
`tabSales Order` dt, `tabSales Team` st
where
st.sales_person = %s and st.parent = dt.name and dt.transaction_date > date_sub(curdate(), interval 1 year)
group by dt.transaction_date ''', name)))
sales_invoice = dict(frappe.db.sql('''select
unix_timestamp(dt.posting_date), count(st.parenttype)
from
`tabSales Invoice` dt, `tabSales Team` st
where
st.sales_person = %s and st.parent = dt.name and dt.posting_date > date_sub(curdate(), interval 1 year)
group by dt.posting_date ''', name))
for key in sales_invoice:
if out.get(key):
out[key] += sales_invoice[key]
else:
out[key] = sales_invoice[key]
delivery_note = dict(frappe.db.sql('''select
unix_timestamp(dt.posting_date), count(st.parenttype)
from
`tabDelivery Note` dt, `tabSales Team` st
where
st.sales_person = %s and st.parent = dt.name and dt.posting_date > date_sub(curdate(), interval 1 year)
group by dt.posting_date ''', name))
for key in delivery_note:
if out.get(key):
out[key] += delivery_note[key]
else:
out[key] = delivery_note[key]
return out
|
mhbu50/erpnext
|
erpnext/setup/doctype/sales_person/sales_person.py
|
Python
|
gpl-3.0
| 3,186
|
# Unix SMB/CIFS implementation.
# Copyright (C) Kai Blin <kai@samba.org> 2011
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import struct
import random
import socket
import samba.ndr as ndr
import samba.dcerpc.dns as dns
from samba import credentials, param
from samba.tests import TestCase
from samba.dcerpc import dnsp, dnsserver
class DNSTest(TestCase):
def errstr(self, errcode):
"Return a readable error code"
string_codes = [
"OK",
"FORMERR",
"SERVFAIL",
"NXDOMAIN",
"NOTIMP",
"REFUSED",
"YXDOMAIN",
"YXRRSET",
"NXRRSET",
"NOTAUTH",
"NOTZONE",
]
return string_codes[errcode]
def assert_dns_rcode_equals(self, packet, rcode):
"Helper function to check return code"
p_errcode = packet.operation & 0x000F
self.assertEquals(p_errcode, rcode, "Expected RCODE %s, got %s" %
(self.errstr(rcode), self.errstr(p_errcode)))
def assert_dns_opcode_equals(self, packet, opcode):
"Helper function to check opcode"
p_opcode = packet.operation & 0x7800
self.assertEquals(p_opcode, opcode, "Expected OPCODE %s, got %s" %
(opcode, p_opcode))
def make_name_packet(self, opcode, qid=None):
"Helper creating a dns.name_packet"
p = dns.name_packet()
if qid is None:
p.id = random.randint(0x0, 0xffff)
p.operation = opcode
p.questions = []
return p
def finish_name_packet(self, packet, questions):
"Helper to finalize a dns.name_packet"
packet.qdcount = len(questions)
packet.questions = questions
def make_name_question(self, name, qtype, qclass):
"Helper creating a dns.name_question"
q = dns.name_question()
q.name = name
q.question_type = qtype
q.question_class = qclass
return q
def get_dns_domain(self):
"Helper to get dns domain"
return os.getenv('REALM', 'example.com').lower()
def dns_transaction_udp(self, packet, host=os.getenv('SERVER_IP'), dump=False):
"send a DNS query and read the reply"
s = None
try:
send_packet = ndr.ndr_pack(packet)
if dump:
print self.hexdump(send_packet)
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
s.connect((host, 53))
s.send(send_packet, 0)
recv_packet = s.recv(2048, 0)
if dump:
print self.hexdump(recv_packet)
return ndr.ndr_unpack(dns.name_packet, recv_packet)
finally:
if s is not None:
s.close()
def dns_transaction_tcp(self, packet, host=os.getenv('SERVER_IP'), dump=False):
"send a DNS query and read the reply"
s = None
try:
send_packet = ndr.ndr_pack(packet)
if dump:
print self.hexdump(send_packet)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
s.connect((host, 53))
tcp_packet = struct.pack('!H', len(send_packet))
tcp_packet += send_packet
s.send(tcp_packet, 0)
recv_packet = s.recv(0xffff + 2, 0)
if dump:
print self.hexdump(recv_packet)
return ndr.ndr_unpack(dns.name_packet, recv_packet[2:])
finally:
if s is not None:
s.close()
class TestSimpleQueries(DNSTest):
def test_one_a_query(self):
"create a query packet containing one query record"
p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
questions = []
name = "%s.%s" % (os.getenv('SERVER'), self.get_dns_domain())
q = self.make_name_question(name, dns.DNS_QTYPE_A, dns.DNS_QCLASS_IN)
print "asking for ", q.name
questions.append(q)
self.finish_name_packet(p, questions)
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
self.assert_dns_opcode_equals(response, dns.DNS_OPCODE_QUERY)
self.assertEquals(response.ancount, 1)
self.assertEquals(response.answers[0].rdata,
os.getenv('SERVER_IP'))
def test_one_a_query_tcp(self):
"create a query packet containing one query record via TCP"
p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
questions = []
name = "%s.%s" % (os.getenv('SERVER'), self.get_dns_domain())
q = self.make_name_question(name, dns.DNS_QTYPE_A, dns.DNS_QCLASS_IN)
print "asking for ", q.name
questions.append(q)
self.finish_name_packet(p, questions)
response = self.dns_transaction_tcp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
self.assert_dns_opcode_equals(response, dns.DNS_OPCODE_QUERY)
self.assertEquals(response.ancount, 1)
self.assertEquals(response.answers[0].rdata,
os.getenv('SERVER_IP'))
def test_one_mx_query(self):
"create a query packet causing an empty RCODE_OK answer"
p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
questions = []
name = "%s.%s" % (os.getenv('SERVER'), self.get_dns_domain())
q = self.make_name_question(name, dns.DNS_QTYPE_MX, dns.DNS_QCLASS_IN)
print "asking for ", q.name
questions.append(q)
self.finish_name_packet(p, questions)
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
self.assert_dns_opcode_equals(response, dns.DNS_OPCODE_QUERY)
self.assertEquals(response.ancount, 0)
p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
questions = []
name = "invalid-%s.%s" % (os.getenv('SERVER'), self.get_dns_domain())
q = self.make_name_question(name, dns.DNS_QTYPE_MX, dns.DNS_QCLASS_IN)
print "asking for ", q.name
questions.append(q)
self.finish_name_packet(p, questions)
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_NXDOMAIN)
self.assert_dns_opcode_equals(response, dns.DNS_OPCODE_QUERY)
self.assertEquals(response.ancount, 0)
def test_two_queries(self):
"create a query packet containing two query records"
p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
questions = []
name = "%s.%s" % (os.getenv('SERVER'), self.get_dns_domain())
q = self.make_name_question(name, dns.DNS_QTYPE_A, dns.DNS_QCLASS_IN)
questions.append(q)
name = "%s.%s" % ('bogusname', self.get_dns_domain())
q = self.make_name_question(name, dns.DNS_QTYPE_A, dns.DNS_QCLASS_IN)
questions.append(q)
self.finish_name_packet(p, questions)
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_FORMERR)
def test_qtype_all_query(self):
"create a QTYPE_ALL query"
p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
questions = []
name = "%s.%s" % (os.getenv('SERVER'), self.get_dns_domain())
q = self.make_name_question(name, dns.DNS_QTYPE_ALL, dns.DNS_QCLASS_IN)
print "asking for ", q.name
questions.append(q)
self.finish_name_packet(p, questions)
response = self.dns_transaction_udp(p)
num_answers = 1
dc_ipv6 = os.getenv('SERVER_IPV6')
if dc_ipv6 is not None:
num_answers += 1
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
self.assert_dns_opcode_equals(response, dns.DNS_OPCODE_QUERY)
self.assertEquals(response.ancount, num_answers)
self.assertEquals(response.answers[0].rdata,
os.getenv('SERVER_IP'))
if dc_ipv6 is not None:
self.assertEquals(response.answers[1].rdata, dc_ipv6)
def test_qclass_none_query(self):
"create a QCLASS_NONE query"
p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
questions = []
name = "%s.%s" % (os.getenv('SERVER'), self.get_dns_domain())
q = self.make_name_question(name, dns.DNS_QTYPE_ALL, dns.DNS_QCLASS_NONE)
questions.append(q)
self.finish_name_packet(p, questions)
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_NOTIMP)
# Only returns an authority section entry in BIND and Win DNS
# FIXME: Enable one Samba implements this feature
# def test_soa_hostname_query(self):
# "create a SOA query for a hostname"
# p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
# questions = []
#
# name = "%s.%s" % (os.getenv('SERVER'), self.get_dns_domain())
# q = self.make_name_question(name, dns.DNS_QTYPE_SOA, dns.DNS_QCLASS_IN)
# questions.append(q)
#
# self.finish_name_packet(p, questions)
# response = self.dns_transaction_udp(p)
# self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
# self.assert_dns_opcode_equals(response, dns.DNS_OPCODE_QUERY)
# # We don't get SOA records for single hosts
# self.assertEquals(response.ancount, 0)
def test_soa_domain_query(self):
"create a SOA query for a domain"
p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
questions = []
name = self.get_dns_domain()
q = self.make_name_question(name, dns.DNS_QTYPE_SOA, dns.DNS_QCLASS_IN)
questions.append(q)
self.finish_name_packet(p, questions)
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
self.assert_dns_opcode_equals(response, dns.DNS_OPCODE_QUERY)
self.assertEquals(response.ancount, 1)
self.assertEquals(response.answers[0].rdata.minimum, 3600)
class TestDNSUpdates(DNSTest):
def test_two_updates(self):
"create two update requests"
p = self.make_name_packet(dns.DNS_OPCODE_UPDATE)
updates = []
name = "%s.%s" % (os.getenv('SERVER'), self.get_dns_domain())
u = self.make_name_question(name, dns.DNS_QTYPE_A, dns.DNS_QCLASS_IN)
updates.append(u)
name = self.get_dns_domain()
u = self.make_name_question(name, dns.DNS_QTYPE_A, dns.DNS_QCLASS_IN)
updates.append(u)
self.finish_name_packet(p, updates)
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_FORMERR)
def test_update_wrong_qclass(self):
"create update with DNS_QCLASS_NONE"
p = self.make_name_packet(dns.DNS_OPCODE_UPDATE)
updates = []
name = self.get_dns_domain()
u = self.make_name_question(name, dns.DNS_QTYPE_A, dns.DNS_QCLASS_NONE)
updates.append(u)
self.finish_name_packet(p, updates)
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_NOTIMP)
def test_update_prereq_with_non_null_ttl(self):
"test update with a non-null TTL"
p = self.make_name_packet(dns.DNS_OPCODE_UPDATE)
updates = []
name = self.get_dns_domain()
u = self.make_name_question(name, dns.DNS_QTYPE_SOA, dns.DNS_QCLASS_IN)
updates.append(u)
self.finish_name_packet(p, updates)
prereqs = []
r = dns.res_rec()
r.name = "%s.%s" % (os.getenv('SERVER'), self.get_dns_domain())
r.rr_type = dns.DNS_QTYPE_TXT
r.rr_class = dns.DNS_QCLASS_NONE
r.ttl = 1
r.length = 0
prereqs.append(r)
p.ancount = len(prereqs)
p.answers = prereqs
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_FORMERR)
# I'd love to test this one, but it segfaults. :)
# def test_update_prereq_with_non_null_length(self):
# "test update with a non-null length"
# p = self.make_name_packet(dns.DNS_OPCODE_UPDATE)
# updates = []
#
# name = self.get_dns_domain()
#
# u = self.make_name_question(name, dns.DNS_QTYPE_SOA, dns.DNS_QCLASS_IN)
# updates.append(u)
# self.finish_name_packet(p, updates)
#
# prereqs = []
# r = dns.res_rec()
# r.name = "%s.%s" % (os.getenv('SERVER'), self.get_dns_domain())
# r.rr_type = dns.DNS_QTYPE_TXT
# r.rr_class = dns.DNS_QCLASS_ANY
# r.ttl = 0
# r.length = 1
# prereqs.append(r)
#
# p.ancount = len(prereqs)
# p.answers = prereqs
#
# response = self.dns_transaction_udp(p)
# self.assert_dns_rcode_equals(response, dns.DNS_RCODE_FORMERR)
def test_update_prereq_nonexisting_name(self):
"test update with a nonexisting name"
p = self.make_name_packet(dns.DNS_OPCODE_UPDATE)
updates = []
name = self.get_dns_domain()
u = self.make_name_question(name, dns.DNS_QTYPE_SOA, dns.DNS_QCLASS_IN)
updates.append(u)
self.finish_name_packet(p, updates)
prereqs = []
r = dns.res_rec()
r.name = "idontexist.%s" % self.get_dns_domain()
r.rr_type = dns.DNS_QTYPE_TXT
r.rr_class = dns.DNS_QCLASS_ANY
r.ttl = 0
r.length = 0
prereqs.append(r)
p.ancount = len(prereqs)
p.answers = prereqs
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_NXRRSET)
def test_update_add_txt_record(self):
"test adding records works"
p = self.make_name_packet(dns.DNS_OPCODE_UPDATE)
updates = []
name = self.get_dns_domain()
u = self.make_name_question(name, dns.DNS_QTYPE_SOA, dns.DNS_QCLASS_IN)
updates.append(u)
self.finish_name_packet(p, updates)
updates = []
r = dns.res_rec()
r.name = "textrec.%s" % self.get_dns_domain()
r.rr_type = dns.DNS_QTYPE_TXT
r.rr_class = dns.DNS_QCLASS_IN
r.ttl = 900
r.length = 0xffff
rdata = dns.txt_record()
rdata.txt = '"This is a test"'
r.rdata = rdata
updates.append(r)
p.nscount = len(updates)
p.nsrecs = updates
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
questions = []
name = "textrec.%s" % self.get_dns_domain()
q = self.make_name_question(name, dns.DNS_QTYPE_TXT, dns.DNS_QCLASS_IN)
questions.append(q)
self.finish_name_packet(p, questions)
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
self.assertEquals(response.ancount, 1)
self.assertEquals(response.answers[0].rdata.txt, '"This is a test"')
def test_update_add_two_txt_records(self):
"test adding two txt records works"
p = self.make_name_packet(dns.DNS_OPCODE_UPDATE)
updates = []
name = self.get_dns_domain()
u = self.make_name_question(name, dns.DNS_QTYPE_SOA, dns.DNS_QCLASS_IN)
updates.append(u)
self.finish_name_packet(p, updates)
updates = []
r = dns.res_rec()
r.name = "textrec2.%s" % self.get_dns_domain()
r.rr_type = dns.DNS_QTYPE_TXT
r.rr_class = dns.DNS_QCLASS_IN
r.ttl = 900
r.length = 0xffff
rdata = dns.txt_record()
rdata.txt = '"This is a test" "and this is a test, too"'
r.rdata = rdata
updates.append(r)
p.nscount = len(updates)
p.nsrecs = updates
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
questions = []
name = "textrec2.%s" % self.get_dns_domain()
q = self.make_name_question(name, dns.DNS_QTYPE_TXT, dns.DNS_QCLASS_IN)
questions.append(q)
self.finish_name_packet(p, questions)
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
self.assertEquals(response.ancount, 1)
self.assertEquals(response.answers[0].rdata.txt, '"This is a test" "and this is a test, too"')
def test_delete_record(self):
"Test if deleting records works"
NAME = "deleterec.%s" % self.get_dns_domain()
# First, create a record to make sure we have a record to delete.
p = self.make_name_packet(dns.DNS_OPCODE_UPDATE)
updates = []
name = self.get_dns_domain()
u = self.make_name_question(name, dns.DNS_QTYPE_SOA, dns.DNS_QCLASS_IN)
updates.append(u)
self.finish_name_packet(p, updates)
updates = []
r = dns.res_rec()
r.name = NAME
r.rr_type = dns.DNS_QTYPE_TXT
r.rr_class = dns.DNS_QCLASS_IN
r.ttl = 900
r.length = 0xffff
rdata = dns.txt_record()
rdata.txt = '"This is a test"'
r.rdata = rdata
updates.append(r)
p.nscount = len(updates)
p.nsrecs = updates
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
# Now check the record is around
p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
questions = []
q = self.make_name_question(NAME, dns.DNS_QTYPE_TXT, dns.DNS_QCLASS_IN)
questions.append(q)
self.finish_name_packet(p, questions)
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
# Now delete the record
p = self.make_name_packet(dns.DNS_OPCODE_UPDATE)
updates = []
name = self.get_dns_domain()
u = self.make_name_question(name, dns.DNS_QTYPE_SOA, dns.DNS_QCLASS_IN)
updates.append(u)
self.finish_name_packet(p, updates)
updates = []
r = dns.res_rec()
r.name = NAME
r.rr_type = dns.DNS_QTYPE_TXT
r.rr_class = dns.DNS_QCLASS_NONE
r.ttl = 0
r.length = 0xffff
rdata = dns.txt_record()
rdata.txt = '"This is a test"'
r.rdata = rdata
updates.append(r)
p.nscount = len(updates)
p.nsrecs = updates
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
# And finally check it's gone
p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
questions = []
q = self.make_name_question(NAME, dns.DNS_QTYPE_TXT, dns.DNS_QCLASS_IN)
questions.append(q)
self.finish_name_packet(p, questions)
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_NXDOMAIN)
def test_readd_record(self):
"Test if adding, deleting and then readding a records works"
NAME = "readdrec.%s" % self.get_dns_domain()
# Create the record
p = self.make_name_packet(dns.DNS_OPCODE_UPDATE)
updates = []
name = self.get_dns_domain()
u = self.make_name_question(name, dns.DNS_QTYPE_SOA, dns.DNS_QCLASS_IN)
updates.append(u)
self.finish_name_packet(p, updates)
updates = []
r = dns.res_rec()
r.name = NAME
r.rr_type = dns.DNS_QTYPE_TXT
r.rr_class = dns.DNS_QCLASS_IN
r.ttl = 900
r.length = 0xffff
rdata = dns.txt_record()
rdata.txt = '"This is a test"'
r.rdata = rdata
updates.append(r)
p.nscount = len(updates)
p.nsrecs = updates
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
# Now check the record is around
p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
questions = []
q = self.make_name_question(NAME, dns.DNS_QTYPE_TXT, dns.DNS_QCLASS_IN)
questions.append(q)
self.finish_name_packet(p, questions)
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
# Now delete the record
p = self.make_name_packet(dns.DNS_OPCODE_UPDATE)
updates = []
name = self.get_dns_domain()
u = self.make_name_question(name, dns.DNS_QTYPE_SOA, dns.DNS_QCLASS_IN)
updates.append(u)
self.finish_name_packet(p, updates)
updates = []
r = dns.res_rec()
r.name = NAME
r.rr_type = dns.DNS_QTYPE_TXT
r.rr_class = dns.DNS_QCLASS_NONE
r.ttl = 0
r.length = 0xffff
rdata = dns.txt_record()
rdata.txt = '"This is a test"'
r.rdata = rdata
updates.append(r)
p.nscount = len(updates)
p.nsrecs = updates
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
# check it's gone
p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
questions = []
q = self.make_name_question(NAME, dns.DNS_QTYPE_TXT, dns.DNS_QCLASS_IN)
questions.append(q)
self.finish_name_packet(p, questions)
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_NXDOMAIN)
# recreate the record
p = self.make_name_packet(dns.DNS_OPCODE_UPDATE)
updates = []
name = self.get_dns_domain()
u = self.make_name_question(name, dns.DNS_QTYPE_SOA, dns.DNS_QCLASS_IN)
updates.append(u)
self.finish_name_packet(p, updates)
updates = []
r = dns.res_rec()
r.name = NAME
r.rr_type = dns.DNS_QTYPE_TXT
r.rr_class = dns.DNS_QCLASS_IN
r.ttl = 900
r.length = 0xffff
rdata = dns.txt_record()
rdata.txt = '"This is a test"'
r.rdata = rdata
updates.append(r)
p.nscount = len(updates)
p.nsrecs = updates
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
# Now check the record is around
p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
questions = []
q = self.make_name_question(NAME, dns.DNS_QTYPE_TXT, dns.DNS_QCLASS_IN)
questions.append(q)
self.finish_name_packet(p, questions)
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
def test_update_add_mx_record(self):
"test adding MX records works"
p = self.make_name_packet(dns.DNS_OPCODE_UPDATE)
updates = []
name = self.get_dns_domain()
u = self.make_name_question(name, dns.DNS_QTYPE_SOA, dns.DNS_QCLASS_IN)
updates.append(u)
self.finish_name_packet(p, updates)
updates = []
r = dns.res_rec()
r.name = "%s" % self.get_dns_domain()
r.rr_type = dns.DNS_QTYPE_MX
r.rr_class = dns.DNS_QCLASS_IN
r.ttl = 900
r.length = 0xffff
rdata = dns.mx_record()
rdata.preference = 10
rdata.exchange = 'mail.%s' % self.get_dns_domain()
r.rdata = rdata
updates.append(r)
p.nscount = len(updates)
p.nsrecs = updates
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
questions = []
name = "%s" % self.get_dns_domain()
q = self.make_name_question(name, dns.DNS_QTYPE_MX, dns.DNS_QCLASS_IN)
questions.append(q)
self.finish_name_packet(p, questions)
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
self.assertEqual(response.ancount, 1)
ans = response.answers[0]
self.assertEqual(ans.rr_type, dns.DNS_QTYPE_MX)
self.assertEqual(ans.rdata.preference, 10)
self.assertEqual(ans.rdata.exchange, 'mail.%s' % self.get_dns_domain())
class TestComplexQueries(DNSTest):
def setUp(self):
super(TestComplexQueries, self).setUp()
p = self.make_name_packet(dns.DNS_OPCODE_UPDATE)
updates = []
name = self.get_dns_domain()
u = self.make_name_question(name, dns.DNS_QTYPE_SOA, dns.DNS_QCLASS_IN)
updates.append(u)
self.finish_name_packet(p, updates)
updates = []
r = dns.res_rec()
r.name = "cname_test.%s" % self.get_dns_domain()
r.rr_type = dns.DNS_QTYPE_CNAME
r.rr_class = dns.DNS_QCLASS_IN
r.ttl = 900
r.length = 0xffff
r.rdata = "%s.%s" % (os.getenv('SERVER'), self.get_dns_domain())
updates.append(r)
p.nscount = len(updates)
p.nsrecs = updates
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
def tearDown(self):
super(TestComplexQueries, self).tearDown()
p = self.make_name_packet(dns.DNS_OPCODE_UPDATE)
updates = []
name = self.get_dns_domain()
u = self.make_name_question(name, dns.DNS_QTYPE_SOA, dns.DNS_QCLASS_IN)
updates.append(u)
self.finish_name_packet(p, updates)
updates = []
r = dns.res_rec()
r.name = "cname_test.%s" % self.get_dns_domain()
r.rr_type = dns.DNS_QTYPE_CNAME
r.rr_class = dns.DNS_QCLASS_NONE
r.ttl = 0
r.length = 0xffff
r.rdata = "%s.%s" % (os.getenv('SERVER'), self.get_dns_domain())
updates.append(r)
p.nscount = len(updates)
p.nsrecs = updates
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
def test_one_a_query(self):
"create a query packet containing one query record"
p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
questions = []
name = "cname_test.%s" % self.get_dns_domain()
q = self.make_name_question(name, dns.DNS_QTYPE_A, dns.DNS_QCLASS_IN)
print "asking for ", q.name
questions.append(q)
self.finish_name_packet(p, questions)
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
self.assert_dns_opcode_equals(response, dns.DNS_OPCODE_QUERY)
self.assertEquals(response.ancount, 2)
self.assertEquals(response.answers[0].rr_type, dns.DNS_QTYPE_CNAME)
self.assertEquals(response.answers[0].rdata, "%s.%s" %
(os.getenv('SERVER'), self.get_dns_domain()))
self.assertEquals(response.answers[1].rr_type, dns.DNS_QTYPE_A)
self.assertEquals(response.answers[1].rdata,
os.getenv('SERVER_IP'))
class TestInvalidQueries(DNSTest):
def test_one_a_query(self):
"send 0 bytes follows by create a query packet containing one query record"
s = None
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
s.connect((os.getenv('SERVER_IP'), 53))
s.send("", 0)
finally:
if s is not None:
s.close()
p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
questions = []
name = "%s.%s" % (os.getenv('SERVER'), self.get_dns_domain())
q = self.make_name_question(name, dns.DNS_QTYPE_A, dns.DNS_QCLASS_IN)
print "asking for ", q.name
questions.append(q)
self.finish_name_packet(p, questions)
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
self.assert_dns_opcode_equals(response, dns.DNS_OPCODE_QUERY)
self.assertEquals(response.ancount, 1)
self.assertEquals(response.answers[0].rdata,
os.getenv('SERVER_IP'))
def test_one_a_reply(self):
"send a reply instead of a query"
p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
questions = []
name = "%s.%s" % ('fakefakefake', self.get_dns_domain())
q = self.make_name_question(name, dns.DNS_QTYPE_A, dns.DNS_QCLASS_IN)
print "asking for ", q.name
questions.append(q)
self.finish_name_packet(p, questions)
p.operation |= dns.DNS_FLAG_REPLY
s = None
try:
send_packet = ndr.ndr_pack(p)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
host=os.getenv('SERVER_IP')
s.connect((host, 53))
tcp_packet = struct.pack('!H', len(send_packet))
tcp_packet += send_packet
s.send(tcp_packet, 0)
recv_packet = s.recv(0xffff + 2, 0)
self.assertEquals(0, len(recv_packet))
finally:
if s is not None:
s.close()
class TestZones(DNSTest):
def get_loadparm(self):
lp = param.LoadParm()
lp.load(os.getenv("SMB_CONF_PATH"))
return lp
def get_credentials(self, lp):
creds = credentials.Credentials()
creds.guess(lp)
creds.set_machine_account(lp)
creds.set_krb_forwardable(credentials.NO_KRB_FORWARDABLE)
return creds
def setUp(self):
super(TestZones, self).setUp()
self.lp = self.get_loadparm()
self.creds = self.get_credentials(self.lp)
self.server = os.getenv("SERVER_IP")
self.zone = "test.lan"
self.rpc_conn = dnsserver.dnsserver("ncacn_ip_tcp:%s" % (self.server),
self.lp, self.creds)
def tearDown(self):
super(TestZones, self).tearDown()
try:
self.delete_zone(self.zone)
except RuntimeError, (num, string):
if num != 9601: #WERR_DNS_ERROR_ZONE_DOES_NOT_EXIST
raise
def create_zone(self, zone):
zone_create = dnsserver.DNS_RPC_ZONE_CREATE_INFO_LONGHORN()
zone_create.pszZoneName = zone
zone_create.dwZoneType = dnsp.DNS_ZONE_TYPE_PRIMARY
zone_create.fAllowUpdate = dnsp.DNS_ZONE_UPDATE_SECURE
zone_create.fAging = 0
zone_create.dwDpFlags = dnsserver.DNS_DP_DOMAIN_DEFAULT
self.rpc_conn.DnssrvOperation2(dnsserver.DNS_CLIENT_VERSION_LONGHORN,
0,
self.server,
None,
0,
'ZoneCreate',
dnsserver.DNSSRV_TYPEID_ZONE_CREATE,
zone_create)
def delete_zone(self, zone):
self.rpc_conn.DnssrvOperation2(dnsserver.DNS_CLIENT_VERSION_LONGHORN,
0,
self.server,
zone,
0,
'DeleteZoneFromDs',
dnsserver.DNSSRV_TYPEID_NULL,
None)
def test_soa_query(self):
zone = "test.lan"
p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
questions = []
q = self.make_name_question(zone, dns.DNS_QTYPE_SOA, dns.DNS_QCLASS_IN)
questions.append(q)
self.finish_name_packet(p, questions)
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_NXDOMAIN)
self.assert_dns_opcode_equals(response, dns.DNS_OPCODE_QUERY)
self.assertEquals(response.ancount, 0)
self.create_zone(zone)
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
self.assert_dns_opcode_equals(response, dns.DNS_OPCODE_QUERY)
self.assertEquals(response.ancount, 1)
self.assertEquals(response.answers[0].rr_type, dns.DNS_QTYPE_SOA)
self.delete_zone(zone)
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_NXDOMAIN)
self.assert_dns_opcode_equals(response, dns.DNS_OPCODE_QUERY)
self.assertEquals(response.ancount, 0)
if __name__ == "__main__":
import unittest
unittest.main()
|
msimacek/samba
|
python/samba/tests/dns.py
|
Python
|
gpl-3.0
| 33,091
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import getopt
import h5py
import os
import sys
import plot_compose as pc
import pvf_settings as ps
exten = ps.f_exten
plotdir = ps.f_plotdir
cmap = ps.plot2d_colormap
sctype = ps.plot2d_sctype
psize = ps.particles_size
pcolor = 'default'
gcolor = ''
linstyl = ''
axcuts = 'default'
cu, center = False, [0.0, 0.0, 0.0]
zoom = False, [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]
zmin, zmax = 0.0, 0.0
draw_part = False
draw_data = False
draw_grid = False
draw_uni, draw_amr = False, False
plotlevels, gridlist = '', ''
dnames = ''
uaxes = ''
nbins = 1
player = True, '0', '0', '0'
print('PIERNIK VISUALIZATION FACILITY')
def print_usage():
print('Usage: ./pvf.py <HDF5 files> [options]')
print('')
print('Usage for grid structure: ./pvf.py <HDF5 files> -g COLORS [options]')
print('Usage for grid datafields: ./pvf.py <HDF5 files> -d VARS [options]')
print('Usage for particles: ./pvf.py <HDF5 files> -p [options]')
print('')
print('Options:')
print(' -h, \t\t\t--help \t\t\t\tprint this help')
print(' -a CUT1[,CUT2], \t--axes CUT1[,CUT2] \t\tselect plot cuts from the following: 1x, 1y, 1z, xy, xz, yz, 1D, 2D [default: all 2D cuts, otherwise all 1D]')
print('\t\t\t--amr\t\t\t\tcollect all refinement levels of grid to plot [default: True while AMR refinement level structure exists]')
print(' -b BINS, \t\t--bins BINS \t\t\tmake a 2D histogram plot using BINS number instead of scattering particles [default: 1, which leads to scattering]')
print(' -c CX,CY,CZ, \t\t--center CX,CY,CZ \t\tplot cuts across given point coordinates CX, CY, CZ [default: computed domain center]')
print(' -d VAR[,VAR2], \t--dataset VAR[,VAR2] \t\tspecify one or more datafield(s) to plot [default: print available datafields; all or _all_ to plot all available datafields]')
print(' -D COLORMAP, \t\t--colormap COLORMAP \t\tuse COLORMAP palette [default: %s]' % ps.plot2d_colormap)
print(' -e EXTENSION, \t\t--extension EXTENSION \t\tsave plot in file using filename extension EXTENSION [default: %s]' % ps.f_exten[1:])
print(' -g COLOR, \t\t--gridcolor COLOR \t\tshow grids in color COLOR; possible list of colors for different grid refinement levels [default: none]')
print('\t\t\t--grid-list GRID1[,GRID2] \tplot only selected numbered grid blocks [default: all existing blocks]')
print(' -l LEVEL1[,LEVEL2], \t--level LEVEL1[,LEVEL2] \tplot only requested grid levels [default: 0 for --uniform, all for --amr]')
print('\t\t\t--linestyle STYLELIST \t\tline styles list for different refinement levels in 1D plots [default: %s]' % ps.plot1d_linestyle)
print(' -o OUTPUT, \t\t--output OUTPUT \t\tdump plot files into OUTPUT directory [default: %s]' % ps.f_plotdir)
print(' -p,\t\t\t--particles\t\t\tscatter particles onto slices [default: switched-off]')
print(' -P,\t\t\t--particle-color\t\tuse color for particles scattering or colormap for particles histogram plot [default: %s or %s]' % (ps.particles_color, ps.hist2d_colormap))
print(' -r W1[,W2,W3],\t\t--particle-slice W1[,W2,W3]\tread particles from layers +/-W1 around center; uses different widths for different projections if W1,W2,W3 requested [default: all particles]')
print(' -R W1[,W2,W3],\t\t--particle-space W1[,W2,W3]\tread particles from square +/-W1 around center or cuboid if W1,W2,W3 requested [default: no limits]')
print(' -s,\t\t\t--particle-sizes\t\tmarker sizes for scattering particles onto slices [default: switched-off]')
print(' -t SCALETYPE, \t\t--scale SCALETYPE \t\tdump use SCALETYPE scale type for displaying data (possible values: 0 | linear, 1 | symlin, 2 | log, 3 | symlog) [default: %s]' % ps.plot2d_sctype)
print(' -u UNIT, \t\t--units UNIT \t\t\tscale plot axes with UNIT [default: dataset units]')
print('\t\t\t--uniform\t\t\treconstruct uniform grid to plot [default: True while no AMR refinement level structure exists]')
print(' -z ZMIN,ZMAX, \t\t--zlim ZMIN,ZMAX \t\tlimit colorscale to ZMIN and ZMAX [default: computed data maxima symmetrized]')
print('\t\t\t--zoom XL,XR,YL,YR,ZL,ZR \tset plot axes ranges [default: domain edges]')
def cli_params(argv):
try:
opts, args = getopt.getopt(argv, "a:b:c:d:D:e:g:hl:o:pP:r:R:s:t:u:z:", ["help", "amr", "axes=", "bins=", "center=", "colormap=", "dataset=", "extension=", "gridcolor=", "grid-list=", "level=", "linestyle=", "output=", "particles", "particle-color=", "particle-space=", "particle-sizes=", "particle-slice=", "scale=", "uniform", "units=", "zlim=", "zoom="])
except getopt.GetoptError:
print("Unrecognized options: %s \n" % argv)
print_usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
print_usage()
sys.exit()
elif opt in ("-a", "--axes"):
global axcuts
axcuts = arg.split(',')
elif opt in ("-b", "--bins"):
global nbins
nbins = int(arg)
elif opt in ("-c", "--center"):
global center, cu
cx, cy, cz = arg.split(',')
cu, center = True, [float(cx), float(cy), float(cz)]
elif opt in ("-d", "--dataset"):
global dnames
global draw_data
dnames = str(arg)
draw_data = True
elif opt in ("-D", "--colormap"):
global cmap
cmap = str(arg)
elif opt in ("-e", "--extension"):
global exten
exten = '.' + str(arg)
print(exten)
elif opt in ("-g", "--gridcolor"):
global gcolor, draw_grid
gcolor = str(arg)
draw_grid = True
elif opt in ("-l", "--level"):
global plotlevels
plotlevels = [int(i) for i in arg.split(',')]
elif opt in ("--linestyle"):
global linstyl
linstyl = arg.split(',')
elif opt in ("-o", "--output"):
global plotdir
plotdir = str(arg)
print('PLOTDIR: ', plotdir)
elif opt in ("-p", "--particles"):
global draw_part
draw_part = True
elif opt in ("-P", "--particle-color"):
global pcolor
pcolor = str(arg)
elif opt in ("-r", "--particle-slice"):
global player
aux = arg.split(',')
if len(aux) >= 3:
player = True, aux[0], aux[1], aux[2]
else:
player = True, aux[0], aux[0], aux[0]
elif opt in ("-R", "--particle-space"):
aux = arg.split(',')
if len(aux) >= 3:
player = False, aux[0], aux[1], aux[2]
else:
player = False, aux[0], aux[0], aux[0]
elif opt in ("-s", "--particle-sizes"):
global psize
psize = float(arg)
elif opt in ("-t", "--scale"):
global sctype
sctype = str(arg)
elif opt in ("-u", "--units"):
global uaxes
uaxes = str(arg)
elif opt in ("-z", "--zlim"):
global zmin, zmax
zmin, zmax = arg.split(',')
zmin = float(zmin)
zmax = float(zmax)
print("zmin, zmax = ", zmin, zmax)
elif opt in ("--amr",):
global draw_amr
draw_amr = True
elif opt in ("--grid-list",):
global gridlist
gridlist = [int(i) for i in arg.split(',')]
elif opt in ("--uniform",):
global draw_uni
draw_uni = True
elif opt in ("--zoom",):
global zoom
aux = arg.split(',')
zoom = True, [float(aux[0]), float(aux[2]), float(aux[4])], [float(aux[1]), float(aux[3]), float(aux[5])]
print("ZOOM: xmin, xmax = ", zoom[1][0], zoom[2][0], 'ymin, ymax = ', zoom[1][1], zoom[2][1], 'zmin, zmax = ', zoom[1][2], zoom[2][2])
if (len(sys.argv) < 2):
print_usage()
exit()
files_list = []
optilist = []
for word in sys.argv[1:]:
if word.split('.')[-1] == 'h5':
files_list.append(word)
else:
optilist.append(word)
if files_list == []:
print('No h5 files selected. See ./pvf.py -h for help.')
cli_params(optilist)
if pcolor == 'default':
if nbins > 1:
pcolor = ps.hist2d_colormap
else:
pcolor = ps.particles_color
p1x, p1y, p1z, p2xy, p2xz, p2yz = False, False, False, False, False, False
if 'all' in axcuts:
p1x, p1y, p1z, p2xy, p2xz, p2yz = True, True, True, True, True, True
if '2D' in axcuts:
p2xy, p2xz, p2yz = True, True, True
if 'xy' in axcuts:
p2xy = True
if 'xz' in axcuts:
p2xz = True
if 'yz' in axcuts:
p2yz = True
if '1D' in axcuts:
p1x, p1y, p1z = True, True, True
if '1x' in axcuts:
p1x = True
if '1y' in axcuts:
p1y = True
if '1z' in axcuts:
p1z = True
axc = [p1x, p1y, p1z], [p2yz, p2xz, p2xy]
options = axc, zmin, zmax, cmap, pcolor, player, psize, sctype, cu, center, draw_grid, draw_data, draw_uni, draw_amr, draw_part, nbins, uaxes, zoom, plotlevels, gridlist, gcolor, linstyl
if not os.path.exists(plotdir):
os.makedirs(plotdir)
for pthfilen in files_list:
print('')
file_exists = os.path.exists(pthfilen)
if not file_exists:
print('The file %s does not exist!' % pthfilen)
continue
h5f = h5py.File(pthfilen, 'r')
particles_in_file = 'particle_types' in list(h5f)
if not (draw_data or draw_part or draw_grid) or (draw_data and dnames == '') or (not draw_data and not draw_grid and draw_part and not particles_in_file):
partincl = ''
if particles_in_file:
partincl = 'and particles'
else:
if draw_part:
print('Particles not available in the file!')
print('Available datafields in the file %s: \n' % pthfilen, list(h5f['field_types'].keys()), partincl)
h5f.close()
continue
filen = pthfilen.split('/')[-1]
print("Reading file: %s" % pthfilen)
prd, prp, prg = '', '', ''
if draw_data:
if dnames == "_all_" or dnames == "all":
varlist = h5f['field_types'].keys()
else:
varlist = dnames.split(',')
prd = 'datasets: %s' % varlist
if draw_part:
if particles_in_file:
prp = 'particles and '
else:
print('Particles not available in the file!')
elif particles_in_file:
varlist = [ps.particles_output]
prp = 'particles only'
elif draw_grid:
varlist = [ps.grid_output]
prg = 'grid only'
else:
varlist = []
if varlist != []:
print('Going to read ' + prp + prd + prg)
for var in varlist:
if (not draw_data or var in list(h5f['field_types'].keys())):
# output = plotdir+'/'+filen.split('/')[-1].replace('.h5',"_%s.png" % var)
fnl = filen.split('/')[-1]
output = [plotdir + '/' + '_'.join(fnl.split('_')[:-1]) + '_' + var + '_', fnl.split('_')[-1].replace('.h5', exten)]
pc.plotcompose(pthfilen, var, output, options)
else:
print(var, ' is not available in the file ', pthfilen)
h5f.close()
|
mogrodnik/piernik
|
visual/pvf.py
|
Python
|
gpl-3.0
| 11,241
|
import logging
import newrelic.agent
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from treeherder.etl.common import (fetch_json,
generate_revision_hash,
to_timestamp)
from treeherder.etl.push import store_push_data
from treeherder.model.models import Repository
logger = logging.getLogger(__name__)
class PushLoader:
"""Transform and load a list of pushes"""
def process(self, message_body, exchange):
transformer = self.get_transformer_class(exchange)(message_body)
try:
newrelic.agent.add_custom_parameter("url", transformer.repo_url)
newrelic.agent.add_custom_parameter("branch", transformer.branch)
repo = Repository.objects.get(url=transformer.repo_url,
branch=transformer.branch,
active_status="active")
newrelic.agent.add_custom_parameter("repository", repo.name)
except ObjectDoesNotExist:
repo_info = transformer.get_info()
repo_info.update({
"url": transformer.repo_url,
"branch": transformer.branch,
})
newrelic.agent.record_custom_event("skip_unknown_repository",
repo_info)
logger.warn("Skipping unsupported repo: {} {}".format(
transformer.repo_url,
transformer.branch))
return
transformed_data = transformer.transform(repo.name)
logger.info("Storing push for {} {} {}".format(
repo.name,
transformer.repo_url,
transformer.branch))
store_push_data(repo, [transformed_data])
def get_transformer_class(self, exchange):
if "github" in exchange:
if exchange.endswith("push"):
return GithubPushTransformer
elif exchange.endswith("pull-request"):
return GithubPullRequestTransformer
elif "/hgpushes/" in exchange:
return HgPushTransformer
raise PulsePushError(
"Unsupported push exchange: {}".format(exchange))
class GithubTransformer:
CREDENTIALS = {
"client_id": settings.GITHUB_CLIENT_ID,
"client_secret": settings.GITHUB_CLIENT_SECRET
}
def __init__(self, message_body):
self.message_body = message_body
self.repo_url = message_body["details"]["event.head.repo.url"].replace(
".git", "")
self.branch = self.get_branch()
def get_branch(self):
return self.message_body["details"]["event.base.repo.branch"]
def get_info(self):
# flatten the data a bit so it will show in new relic as fields
info = self.message_body["details"].copy()
info.update({
"organization": self.message_body["organization"],
"repository": self.message_body["repository"]
})
return info
def fetch_push(self, url, repository, sha=None):
params = {"sha": sha} if sha else {}
params.update(self.CREDENTIALS)
logger.info("Fetching push details: {}".format(url))
newrelic.agent.add_custom_parameter("sha", sha)
commits = self.get_cleaned_commits(fetch_json(url, params))
head_commit = commits[-1]
push = {
"revision": head_commit["sha"],
"push_timestamp": to_timestamp(
head_commit["commit"]["author"]["date"]),
"author": head_commit["commit"]["author"]["email"],
}
revisions = []
for commit in commits:
revisions.append({
"comment": commit["commit"]["message"],
"author": u"{} <{}>".format(
commit["commit"]["author"]["name"],
commit["commit"]["author"]["email"]),
"revision": commit["sha"]
})
push["revisions"] = revisions
return push
def get_cleaned_commits(self, commits):
"""Allow a subclass to change the order of the commits"""
return commits
class GithubPushTransformer(GithubTransformer):
# {
# organization:mozilla - services
# details:{
# event.type:push
# event.base.repo.branch:master
# event.head.repo.branch:master
# event.head.user.login:mozilla-cloudops-deploy
# event.head.repo.url:https://github.com/mozilla-services/cloudops-jenkins.git
# event.head.sha:845aa1c93726af92accd9b748ea361a37d5238b6
# event.head.ref:refs/heads/master
# event.head.user.email:mozilla-cloudops-deploy@noreply.github.com
# }
# repository:cloudops-jenkins
# version:1
# }
URL_BASE = "https://api.github.com/repos/{}/{}/commits"
def transform(self, repository):
commit = self.message_body["details"]["event.head.sha"]
push_url = self.URL_BASE.format(
self.message_body["organization"],
self.message_body["repository"]
)
return self.fetch_push(push_url, repository, sha=commit)
def get_cleaned_commits(self, commits):
# The list of commits will include ones not in the push. we
# need to trim the list
base_sha = self.message_body["details"]["event.base.sha"]
for idx, commit in enumerate(commits):
if commit["sha"] == base_sha:
commits = commits[:idx]
return list(reversed(commits))
class GithubPullRequestTransformer(GithubTransformer):
# {
# "organization": "mozilla",
# "action": "synchronize",
# "details": {
# "event.type": "pull_request.synchronize",
# "event.base.repo.branch": "master",
# "event.pullNumber": "1692",
# "event.base.user.login": "mozilla",
# "event.base.repo.url": "https: // github.com / mozilla / treeherder.git",
# "event.base.sha": "ff6a66a27c2c234e5820b8ffe48f17d85f1eb2db",
# "event.base.ref": "master",
# "event.head.user.login": "mozilla",
# "event.head.repo.url": "https: // github.com / mozilla / treeherder.git",
# "event.head.repo.branch": "github - pulse - pushes",
# "event.head.sha": "0efea0fa1396369b5058e16139a8ab51cdd7bd29",
# "event.head.ref": "github - pulse - pushes",
# "event.head.user.email": "mozilla@noreply.github.com",
# },
# "repository": "treeherder",
# "version": 1
# }
URL_BASE = "https://api.github.com/repos/{}/{}/pulls/{}/commits"
def get_branch(self):
"""
Pull requests don't use the actual branch, just the string "pull request"
"""
return "pull request"
def transform(self, repository):
pr_url = self.URL_BASE.format(
self.message_body["organization"],
self.message_body["repository"],
self.message_body["details"]["event.pullNumber"]
)
return self.fetch_push(pr_url, repository)
class HgPushTransformer:
# {
# "root": {
# "payload": {
# "pushlog_pushes": [
# {
# "time": 14698302460,
# "push_full_json_url": "https://hg.mozilla.org/try/json-pushes?version=2&full=1&startID=136597&endID=136598",
# "pushid": 136598,
# "push_json_url": " https: //hg.mozilla.org/try/json-pushes?version=2&startID=136597&endID=136598",
# "user": " james@hoppipolla.co.uk"
# }
# ],
# "heads": [
# "2f77bc4f354d9ba67ea5270b2fc789f4b0521287"
# ],
# "repo_url": "https://hg.mozilla.org/try",
# "_meta": {
# "sent": "2016-07-29T22:11:18.503365",
# "routing_key": "try",
# "serializer": "json",
# "exchange": "exchange/hgpushes/v1"
# }
# }
# }
# }
def __init__(self, message_body):
self.message_body = message_body
self.repo_url = message_body["payload"]["repo_url"]
self.branch = None
def get_info(self):
return self.message_body["payload"]
def transform(self, repository):
logger.info("transforming for {}".format(repository))
url = self.message_body["payload"]["pushlog_pushes"][0]["push_full_json_url"]
return self.fetch_push(url, repository)
def fetch_push(self, url, repository, sha=None):
newrelic.agent.add_custom_parameter("sha", sha)
logger.info("fetching for {} {}".format(repository, url))
# there will only ever be one, with this url
push = fetch_json(url)["pushes"].values()[0]
commits = []
# TODO: Remove this when bug 1257602 is addressed
rev_hash_components = []
# we only want to ingest the last 200 commits for each push,
# to protect against the 5000+ commit merges on release day uplift.
for commit in push['changesets'][-200:]:
commits.append({
"revision": commit["node"],
"author": commit["author"],
"comment": commit["desc"],
})
rev_hash_components.append(commit['node'])
rev_hash_components.append(commit['branch'])
return {
"revision": commits[-1]["revision"],
'revision_hash': generate_revision_hash(rev_hash_components),
"author": push["user"],
"push_timestamp": push["date"],
"revisions": commits,
}
class PulsePushError(ValueError):
pass
|
MikeLing/treeherder
|
treeherder/etl/push_loader.py
|
Python
|
mpl-2.0
| 9,763
|
"""Deprecated import support. Auto-generated by import_shims/generate_shims.sh."""
# pylint: disable=redefined-builtin,wrong-import-position,wildcard-import,useless-suppression,line-too-long
from import_shims.warn import warn_deprecated_import
warn_deprecated_import('contentstore.git_export_utils', 'cms.djangoapps.contentstore.git_export_utils')
from cms.djangoapps.contentstore.git_export_utils import *
|
eduNEXT/edunext-platform
|
import_shims/studio/contentstore/git_export_utils.py
|
Python
|
agpl-3.0
| 410
|
# flake8: noqa: F811
from inbox.test.util.base import (generic_account, gmail_account, db,
add_fake_yahoo_account)
from inbox.test.api.base import api_client, new_api_client
__all__ = ['db', 'api_client', 'generic_account', 'gmail_account']
def test_account(db, api_client, generic_account, gmail_account):
# Because we're using the generic_account namespace
api_client = new_api_client(db, generic_account.namespace)
resp_data = api_client.get_data('/account')
assert resp_data['id'] == generic_account.namespace.public_id
assert resp_data['object'] == 'account'
assert resp_data['account_id'] == generic_account.namespace.public_id
assert resp_data['email_address'] == generic_account.email_address
assert resp_data['name'] == generic_account.name
assert resp_data['organization_unit'] == 'folder'
assert 'sync_state' in resp_data
assert 'server_settings' not in resp_data
# Because we're using the gmail account namespace
api_client = new_api_client(db, gmail_account.namespace)
resp_data = api_client.get_data('/account')
assert resp_data['id'] == gmail_account.namespace.public_id
assert resp_data['provider'] == 'gmail'
assert resp_data['organization_unit'] == 'label'
assert 'sync_state' in resp_data
assert 'server_settings' not in resp_data
def test_account_expanded(db, api_client, generic_account, gmail_account):
# Generic accounts expose a `server_settings` attribute
# Custom IMAP
api_client = new_api_client(db, generic_account.namespace)
resp_data = api_client.get_data('/account/?view=expanded')
assert resp_data['provider'] == 'custom'
assert 'server_settings' in resp_data
assert set(resp_data['server_settings']) == set({
'imap_host': 'imap.custom.com',
'smtp_host': 'smtp.custom.com',
'imap_port': 993,
'smtp_port': 587,
'ssl_required': True})
# Yahoo
yahoo_account = add_fake_yahoo_account(db.session)
api_client = new_api_client(db, yahoo_account.namespace)
resp_data = api_client.get_data('/account/?view=expanded')
assert resp_data['provider'] == 'yahoo'
assert 'server_settings' in resp_data
assert set(resp_data['server_settings']) == set({
'imap_host': 'imap.mail.yahoo.com',
'smtp_host': 'smtp.mail.yahoo.com',
'imap_port': 993,
'smtp_port': 587,
'ssl_required': True})
# Gmail accounts don't expose a `server_settings` attribute
api_client = new_api_client(db, gmail_account.namespace)
resp_data = api_client.get_data('/account/?view=expanded')
assert resp_data['provider'] == 'gmail'
assert 'server_settings' not in resp_data
def test_account_repr_for_new_account(db):
account = add_fake_yahoo_account(db.session)
# Sync for the account has not started yet.
assert account.sync_state is None
# However the API-returned account object has `sync_state=running`
# so API clients can do the right thing.
api_client = new_api_client(db, account.namespace)
resp_data = api_client.get_data('/account')
assert resp_data['id'] == account.namespace.public_id
assert resp_data['sync_state'] == 'running'
# Verify other sync_states are not masked.
account.sync_state = 'invalid'
db.session.commit()
api_client = new_api_client(db, account.namespace)
resp_data = api_client.get_data('/account')
assert resp_data['id'] == account.namespace.public_id
assert resp_data['sync_state'] == 'invalid'
|
jobscore/sync-engine
|
inbox/test/api/test_account.py
|
Python
|
agpl-3.0
| 3,554
|
"""Fix Category column defaults.
Revision ID: 516024977fc5
Revises: 59e1cc690da9
Create Date: 2016-04-13 00:05:16.542436
"""
# revision identifiers, used by Alembic.
revision = '516024977fc5'
down_revision = '59e1cc690da9'
from alembic import op
from sqlalchemy.sql import text
def upgrade():
conn = op.get_bind()
conn.execute(text("set @@lock_wait_timeout = 20;"))
conn.execute(text("ALTER TABLE category "
"MODIFY COLUMN name VARCHAR(191) NOT NULL DEFAULT '', "
"MODIFY COLUMN deleted_at DATETIME NOT NULL DEFAULT '1970-01-01 00:00:00'"))
conn.execute(text("ALTER TABLE folder "
"MODIFY COLUMN name VARCHAR(191) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin NOT NULL, "
"MODIFY COLUMN canonical_name VARCHAR(191) NOT NULL DEFAULT '', "
"DROP INDEX account_id, "
"ADD CONSTRAINT UNIQUE account_id (account_id, name, canonical_name)"))
conn.execute(text("ALTER TABLE label "
"MODIFY COLUMN canonical_name VARCHAR(191) NOT NULL DEFAULT ''"))
def downgrade():
pass
|
nylas/sync-engine
|
migrations/versions/221_fix_category_column_defaults.py
|
Python
|
agpl-3.0
| 1,148
|
from __future__ import absolute_import
import random
from .hashed import murmur2
class DefaultPartitioner(object):
"""Default partitioner.
Hashes key to partition using murmur2 hashing (from java client)
If key is None, selects partition randomly from available,
or from all partitions if none are currently available
"""
@classmethod
def __call__(cls, key, all_partitions, available):
"""
Get the partition corresponding to key
:param key: partitioning key
:param all_partitions: list of all partitions sorted by partition ID
:param available: list of available partitions in no particular order
:return: one of the values from all_partitions or available
"""
if key is None:
if available:
return random.choice(available)
return random.choice(all_partitions)
idx = murmur2(key)
idx &= 0x7fffffff
idx %= len(all_partitions)
return all_partitions[idx]
|
OpenBankProject/OBP-Kafka-Python
|
lib/kafka/partitioner/default.py
|
Python
|
agpl-3.0
| 1,021
|
'''Test cases for PySide API2 support'''
import unittest
import sys
from PySide2.QtGui import QIntValidator, QValidator
from PySide2.QtWidgets import QWidget, QSpinBox, QApplication
from helper import UsesQApplication
class WidgetValidatorQInt(QWidget, QIntValidator):
def __init__(self, parent=None):
QWidget.__init__(self, parent)
QIntValidator.__init__(self, parent)
class WidgetValidatorQSpinBox(QSpinBox):
def __init__(self, parent=None):
QSpinBox.__init__(self, parent)
def fixup(self, text):
print("It was called!")
class DoubleQObjectInheritanceTest(UsesQApplication):
def testDouble(self):
'''Double inheritance from QObject classes'''
obj = WidgetValidatorQInt()
#QIntValidator methods
state, string, number = obj.validate('Test', 0)
self.assertEqual(state, QValidator.Invalid)
state, string, number = obj.validate('33', 0)
self.assertEqual(state, QValidator.Acceptable)
def testQSpinBox(self):
obj = WidgetValidatorQSpinBox()
obj.setRange(1, 10)
obj.setValue(0)
self.assertEqual(obj.value(), 1)
class QClipboardTest(UsesQApplication):
def testQClipboard(self):
#skip this test on MacOS because the clipboard is not available during the ssh session
#this cause problems in the buildbot
if sys.platform == 'darwin':
return
clip = QApplication.clipboard()
clip.setText("Testing this thing!")
text, subtype = clip.text("")
self.assertEqual(subtype, "plain")
self.assertEqual(text, "Testing this thing!")
if __name__ == '__main__':
unittest.main()
|
BadSingleton/pyside2
|
tests/QtWidgets/api2_test.py
|
Python
|
lgpl-2.1
| 1,698
|
#
# This file is part of Mapnik (c++ mapping toolkit)
#
# Copyright (C) 2015 Artem Pavlenko
#
# Mapnik is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
#
Import ('env')
can_build = False
if env.get('BOOST_LIB_VERSION_FROM_HEADER'):
boost_version_from_header = int(env['BOOST_LIB_VERSION_FROM_HEADER'].split('_')[1])
if boost_version_from_header >= 56:
can_build = True
if not can_build:
print 'WARNING: skipping building the optional geojson datasource plugin which requires boost >= 1.56'
else:
Import ('plugin_base')
PLUGIN_NAME = 'geojson'
plugin_env = plugin_base.Clone()
plugin_sources = Split(
"""
%(PLUGIN_NAME)s_datasource.cpp
%(PLUGIN_NAME)s_featureset.cpp
%(PLUGIN_NAME)s_index_featureset.cpp
%(PLUGIN_NAME)s_memory_index_featureset.cpp
""" % locals()
)
# Link Library to Dependencies
libraries = []
libraries.append('mapnik-json')
if env['PLUGIN_LINKING'] == 'shared':
libraries.append('boost_system%s' % env['BOOST_APPEND'])
libraries.insert(0,env['MAPNIK_NAME'])
libraries.append(env['ICU_LIB_NAME'])
TARGET = plugin_env.SharedLibrary('../%s' % PLUGIN_NAME,
SHLIBPREFIX='',
SHLIBSUFFIX='.input',
source=plugin_sources,
LIBS=libraries)
# if the plugin links to libmapnik ensure it is built first
Depends(TARGET, env.subst('../../../src/%s' % env['MAPNIK_LIB_NAME']))
Depends(TARGET, env.subst('../../../src/json/libmapnik-json${LIBSUFFIX}'))
if 'uninstall' not in COMMAND_LINE_TARGETS:
env.Install(env['MAPNIK_INPUT_PLUGINS_DEST'], TARGET)
env.Alias('install', env['MAPNIK_INPUT_PLUGINS_DEST'])
plugin_obj = {
'LIBS': libraries,
'SOURCES': plugin_sources,
}
Return('plugin_obj')
|
zerebubuth/mapnik
|
plugins/input/geojson/build.py
|
Python
|
lgpl-2.1
| 2,641
|
# -*- Mode: Python -*-
# vi:si:et:sw=4:sts=4:ts=4
# Flumotion - a streaming media server
# Copyright (C) 2004,2005,2006,2007,2008,2009 Fluendo, S.L.
# Copyright (C) 2010,2011 Flumotion Services, S.A.
# All rights reserved.
#
# This file may be distributed and/or modified under the terms of
# the GNU Lesser General Public License version 2.1 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.LGPL" in the source distribution for more information.
#
# Headers in this file shall remain intact.
"""
common code used by all flumotion processes
"""
__version__ = "$Rev$"
|
timvideos/flumotion
|
flumotion/common/__init__.py
|
Python
|
lgpl-2.1
| 725
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This file is part of the prometeo project.
This program is free software: you can redistribute it and/or modify it
under the terms of the GNU Lesser General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
"""
__author__ = 'Emanuele Bertoldi <emanuele.bertoldi@gmail.com>'
__copyright__ = 'Copyright (c) 2011 Emanuele Bertoldi'
__version__ = '0.0.5'
from django.conf.urls.defaults import *
urlpatterns = patterns('prometeo.core.registration.views',
url(r'^users/register/$', view='user_register', name='user_register'),
url(r'^users/activate/(?P<activation_key>[\w\d]+)/$', view='user_activate', name='user_activate'),
)
|
zuck/prometeo-erp
|
core/registration/urls.py
|
Python
|
lgpl-3.0
| 1,146
|
# encoding: utf-8
"""Tests for io.py"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import sys
from StringIO import StringIO
from subprocess import Popen, PIPE
import nose.tools as nt
from IPython.testing import decorators as dec
from IPython.utils.io import Tee, capture_output
from IPython.utils.py3compat import doctest_refactor_print
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
def test_tee_simple():
"Very simple check with stdout only"
chan = StringIO()
text = 'Hello'
tee = Tee(chan, channel='stdout')
print >> chan, text
nt.assert_equal(chan.getvalue(), text+"\n")
class TeeTestCase(dec.ParametricTestCase):
def tchan(self, channel, check='close'):
trap = StringIO()
chan = StringIO()
text = 'Hello'
std_ori = getattr(sys, channel)
setattr(sys, channel, trap)
tee = Tee(chan, channel=channel)
print >> chan, text,
setattr(sys, channel, std_ori)
trap_val = trap.getvalue()
nt.assert_equals(chan.getvalue(), text)
if check=='close':
tee.close()
else:
del tee
def test(self):
for chan in ['stdout', 'stderr']:
for check in ['close', 'del']:
yield self.tchan(chan, check)
def test_io_init():
"""Test that io.stdin/out/err exist at startup"""
for name in ('stdin', 'stdout', 'stderr'):
cmd = doctest_refactor_print("from IPython.utils import io;print io.%s.__class__"%name)
p = Popen([sys.executable, '-c', cmd],
stdout=PIPE)
p.wait()
classname = p.stdout.read().strip().decode('ascii')
# __class__ is a reference to the class object in Python 3, so we can't
# just test for string equality.
assert 'IPython.utils.io.IOStream' in classname, classname
def test_capture_output():
"""capture_output() context works"""
with capture_output() as io:
print 'hi, stdout'
print >> sys.stderr, 'hi, stderr'
nt.assert_equals(io.stdout, 'hi, stdout\n')
nt.assert_equals(io.stderr, 'hi, stderr\n')
|
cloud9ers/gurumate
|
environment/lib/python2.7/site-packages/IPython/utils/tests/test_io.py
|
Python
|
lgpl-3.0
| 2,724
|
# Copyright 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import socket
import ssl
import mock
from oslo_config import cfg
import six.moves.urllib.request as urlrequest
import testtools
import webob
import webob.exc
from neutron.common import exceptions as exception
from neutron.db import api
from neutron.tests import base
from neutron.tests.common import helpers
from neutron import wsgi
CONF = cfg.CONF
TEST_VAR_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', 'var'))
def open_no_proxy(*args, **kwargs):
# NOTE(jamespage):
# Deal with more secure certification chain verification
# introduced in python 2.7.9 under PEP-0476
# https://github.com/python/peps/blob/master/pep-0476.txt
if hasattr(ssl, "_create_unverified_context"):
opener = urlrequest.build_opener(
urlrequest.ProxyHandler({}),
urlrequest.HTTPSHandler(context=ssl._create_unverified_context())
)
else:
opener = urlrequest.build_opener(urlrequest.ProxyHandler({}))
return opener.open(*args, **kwargs)
class TestServiceBase(base.BaseTestCase):
"""Service tests base."""
@mock.patch("neutron.policy.refresh")
@mock.patch("neutron.common.config.setup_logging")
def _test_reset(self, worker_service, setup_logging_mock, refresh_mock):
worker_service.reset()
setup_logging_mock.assert_called_once_with()
refresh_mock.assert_called_once_with()
class TestWorkerService(TestServiceBase):
"""WorkerService tests."""
@mock.patch('neutron.db.api.get_engine')
def test_start_withoutdb_call(self, apimock):
# clear engine from other tests
api._FACADE = None
_service = mock.Mock()
_service.pool.spawn.return_value = None
_app = mock.Mock()
workerservice = wsgi.WorkerService(_service, _app)
workerservice.start()
self.assertFalse(apimock.called)
def test_reset(self):
_service = mock.Mock()
_app = mock.Mock()
worker_service = wsgi.WorkerService(_service, _app)
self._test_reset(worker_service)
class TestWSGIServer(base.BaseTestCase):
"""WSGI server tests."""
def test_start_random_port(self):
server = wsgi.Server("test_random_port")
server.start(None, 0, host="127.0.0.1")
self.assertNotEqual(0, server.port)
server.stop()
server.wait()
@mock.patch('oslo_service.service.ProcessLauncher')
def test_start_multiple_workers(self, ProcessLauncher):
launcher = ProcessLauncher.return_value
server = wsgi.Server("test_multiple_processes")
server.start(None, 0, host="127.0.0.1", workers=2)
launcher.launch_service.assert_called_once_with(mock.ANY, workers=2)
server.stop()
launcher.stop.assert_called_once_with()
server.wait()
launcher.wait.assert_called_once_with()
def test_start_random_port_with_ipv6(self):
server = wsgi.Server("test_random_port")
server.start(None, 0, host="::1")
self.assertEqual("::1", server.host)
self.assertNotEqual(0, server.port)
server.stop()
server.wait()
def test_ipv6_listen_called_with_scope(self):
server = wsgi.Server("test_app")
with mock.patch.object(wsgi.eventlet, 'listen') as mock_listen:
with mock.patch.object(socket, 'getaddrinfo') as mock_get_addr:
mock_get_addr.return_value = [
(socket.AF_INET6,
socket.SOCK_STREAM,
socket.IPPROTO_TCP,
'',
('fe80::204:acff:fe96:da87%eth0', 1234, 0, 2))
]
with mock.patch.object(server, 'pool') as mock_pool:
server.start(None,
1234,
host="fe80::204:acff:fe96:da87%eth0")
mock_get_addr.assert_called_once_with(
"fe80::204:acff:fe96:da87%eth0",
1234,
socket.AF_UNSPEC,
socket.SOCK_STREAM
)
mock_listen.assert_called_once_with(
('fe80::204:acff:fe96:da87%eth0', 1234, 0, 2),
family=socket.AF_INET6,
backlog=cfg.CONF.backlog
)
mock_pool.spawn.assert_has_calls([
mock.call(
server._run,
None,
mock_listen.return_value.dup.return_value)
])
def test_app(self):
greetings = 'Hello, World!!!'
def hello_world(env, start_response):
if env['PATH_INFO'] != '/':
start_response('404 Not Found',
[('Content-Type', 'text/plain')])
return ['Not Found\r\n']
start_response('200 OK', [('Content-Type', 'text/plain')])
return [greetings]
server = wsgi.Server("test_app")
server.start(hello_world, 0, host="127.0.0.1")
response = open_no_proxy('http://127.0.0.1:%d/' % server.port)
self.assertEqual(greetings.encode('utf-8'), response.read())
server.stop()
def test_disable_ssl(self):
CONF.set_default('use_ssl', True)
greetings = 'Hello, World!!!'
def hello_world(env, start_response):
if env['PATH_INFO'] != '/':
start_response('404 Not Found',
[('Content-Type', 'text/plain')])
return ['Not Found\r\n']
start_response('200 OK', [('Content-Type', 'text/plain')])
return [greetings]
server = wsgi.Server("test_app", disable_ssl=True)
server.start(hello_world, 0, host="127.0.0.1")
response = open_no_proxy('http://127.0.0.1:%d/' % server.port)
self.assertEqual(greetings.encode('utf-8'), response.read())
server.stop()
@mock.patch.object(wsgi, 'eventlet')
def test__run(self, eventlet_mock):
server = wsgi.Server('test')
server._run("app", "socket")
eventlet_mock.wsgi.server.assert_called_once_with(
'socket',
'app',
max_size=server.num_threads,
log=mock.ANY,
keepalive=CONF.wsgi_keep_alive,
socket_timeout=server.client_socket_timeout
)
class SerializerTest(base.BaseTestCase):
def test_serialize_unknown_content_type(self):
"""Verify that exception InvalidContentType is raised."""
input_dict = {'servers': {'test': 'pass'}}
content_type = 'application/unknown'
serializer = wsgi.Serializer()
self.assertRaises(
exception.InvalidContentType, serializer.serialize,
input_dict, content_type)
def test_get_deserialize_handler_unknown_content_type(self):
"""Verify that exception InvalidContentType is raised."""
content_type = 'application/unknown'
serializer = wsgi.Serializer()
self.assertRaises(
exception.InvalidContentType,
serializer.get_deserialize_handler, content_type)
def test_serialize_content_type_json(self):
"""Test serialize with content type json."""
input_data = {'servers': ['test=pass']}
content_type = 'application/json'
serializer = wsgi.Serializer()
result = serializer.serialize(input_data, content_type)
self.assertEqual(b'{"servers": ["test=pass"]}', result)
def test_deserialize_raise_bad_request(self):
"""Test serialize verifies that exception is raises."""
content_type = 'application/unknown'
data_string = 'test'
serializer = wsgi.Serializer()
self.assertRaises(
webob.exc.HTTPBadRequest,
serializer.deserialize, data_string, content_type)
def test_deserialize_json_content_type(self):
"""Test Serializer.deserialize with content type json."""
content_type = 'application/json'
data_string = '{"servers": ["test=pass"]}'
serializer = wsgi.Serializer()
result = serializer.deserialize(data_string, content_type)
self.assertEqual({'body': {u'servers': [u'test=pass']}}, result)
class RequestDeserializerTest(testtools.TestCase):
def setUp(self):
super(RequestDeserializerTest, self).setUp()
class JSONDeserializer(object):
def deserialize(self, data, action='default'):
return 'pew_json'
self.body_deserializers = {'application/json': JSONDeserializer()}
self.deserializer = wsgi.RequestDeserializer(self.body_deserializers)
def test_get_deserializer(self):
"""Test RequestDeserializer.get_body_deserializer."""
expected_json_serializer = self.deserializer.get_body_deserializer(
'application/json')
self.assertEqual(
expected_json_serializer,
self.body_deserializers['application/json'])
def test_get_expected_content_type(self):
"""Test RequestDeserializer.get_expected_content_type."""
request = wsgi.Request.blank('/')
request.headers['Accept'] = 'application/json'
self.assertEqual('application/json',
self.deserializer.get_expected_content_type(request))
def test_get_action_args(self):
"""Test RequestDeserializer.get_action_args."""
env = {
'wsgiorg.routing_args': [None, {
'controller': None,
'format': None,
'action': 'update',
'id': 12}]}
expected = {'action': 'update', 'id': 12}
self.assertEqual(expected,
self.deserializer.get_action_args(env))
def test_deserialize(self):
"""Test RequestDeserializer.deserialize."""
with mock.patch.object(
self.deserializer, 'get_action_args') as mock_method:
mock_method.return_value = {'action': 'create'}
request = wsgi.Request.blank('/')
request.headers['Accept'] = 'application/json'
deserialized = self.deserializer.deserialize(request)
expected = ('create', {}, 'application/json')
self.assertEqual(expected, deserialized)
def test_get_body_deserializer_unknown_content_type(self):
"""Verify that exception InvalidContentType is raised."""
content_type = 'application/unknown'
deserializer = wsgi.RequestDeserializer()
self.assertRaises(
exception.InvalidContentType,
deserializer.get_body_deserializer, content_type)
class ResponseSerializerTest(testtools.TestCase):
def setUp(self):
super(ResponseSerializerTest, self).setUp()
class JSONSerializer(object):
def serialize(self, data, action='default'):
return b'pew_json'
class HeadersSerializer(object):
def serialize(self, response, data, action):
response.status_int = 404
self.body_serializers = {'application/json': JSONSerializer()}
self.serializer = wsgi.ResponseSerializer(
self.body_serializers, HeadersSerializer())
def test_serialize_unknown_content_type(self):
"""Verify that exception InvalidContentType is raised."""
self.assertRaises(
exception.InvalidContentType,
self.serializer.serialize,
{}, 'application/unknown')
def test_get_body_serializer(self):
"""Verify that exception InvalidContentType is raised."""
self.assertRaises(
exception.InvalidContentType,
self.serializer.get_body_serializer, 'application/unknown')
def test_get_serializer(self):
"""Test ResponseSerializer.get_body_serializer."""
content_type = 'application/json'
self.assertEqual(self.body_serializers[content_type],
self.serializer.get_body_serializer(content_type))
def test_serialize_json_response(self):
response = self.serializer.serialize({}, 'application/json')
self.assertEqual('application/json', response.headers['Content-Type'])
self.assertEqual(b'pew_json', response.body)
self.assertEqual(404, response.status_int)
def test_serialize_response_None(self):
response = self.serializer.serialize(
None, 'application/json')
self.assertEqual('application/json', response.headers['Content-Type'])
self.assertEqual(b'', response.body)
self.assertEqual(404, response.status_int)
class RequestTest(base.BaseTestCase):
def test_content_type_missing(self):
request = wsgi.Request.blank('/tests/123', method='POST')
request.body = b"<body />"
self.assertIsNone(request.get_content_type())
def test_content_type_unsupported(self):
request = wsgi.Request.blank('/tests/123', method='POST')
request.headers["Content-Type"] = "text/html"
request.body = b"fake<br />"
self.assertIsNone(request.get_content_type())
def test_content_type_with_charset(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Content-Type"] = "application/json; charset=UTF-8"
result = request.get_content_type()
self.assertEqual("application/json", result)
def test_content_type_with_given_content_types(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Content-Type"] = "application/new-type;"
self.assertIsNone(request.get_content_type())
def test_content_type_from_accept(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = "application/json"
result = request.best_match_content_type()
self.assertEqual("application/json", result)
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = ("application/json; q=0.3")
result = request.best_match_content_type()
self.assertEqual("application/json", result)
def test_content_type_from_query_extension(self):
request = wsgi.Request.blank('/tests/123.json')
result = request.best_match_content_type()
self.assertEqual("application/json", result)
request = wsgi.Request.blank('/tests/123.invalid')
result = request.best_match_content_type()
self.assertEqual("application/json", result)
def test_content_type_accept_and_query_extension(self):
request = wsgi.Request.blank('/tests/123.json')
request.headers["Accept"] = "application/json"
result = request.best_match_content_type()
self.assertEqual("application/json", result)
def test_content_type_accept_default(self):
request = wsgi.Request.blank('/tests/123.unsupported')
request.headers["Accept"] = "application/unsupported1"
result = request.best_match_content_type()
self.assertEqual("application/json", result)
def test_content_type_accept_with_given_content_types(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = "application/new_type"
result = request.best_match_content_type()
self.assertEqual("application/json", result)
class ActionDispatcherTest(base.BaseTestCase):
def test_dispatch(self):
"""Test ActionDispatcher.dispatch."""
serializer = wsgi.ActionDispatcher()
serializer.create = lambda x: x
self.assertEqual('pants',
serializer.dispatch('pants', action='create'))
def test_dispatch_action_None(self):
"""Test ActionDispatcher.dispatch with none action."""
serializer = wsgi.ActionDispatcher()
serializer.create = lambda x: x + ' pants'
serializer.default = lambda x: x + ' trousers'
self.assertEqual('Two trousers',
serializer.dispatch('Two', action=None))
def test_dispatch_default(self):
serializer = wsgi.ActionDispatcher()
serializer.create = lambda x: x + ' pants'
serializer.default = lambda x: x + ' trousers'
self.assertEqual('Two trousers',
serializer.dispatch('Two', action='update'))
class ResponseHeadersSerializerTest(base.BaseTestCase):
def test_default(self):
serializer = wsgi.ResponseHeaderSerializer()
response = webob.Response()
serializer.serialize(response, {'v': '123'}, 'fake')
self.assertEqual(200, response.status_int)
def test_custom(self):
class Serializer(wsgi.ResponseHeaderSerializer):
def update(self, response, data):
response.status_int = 404
response.headers['X-Custom-Header'] = data['v']
serializer = Serializer()
response = webob.Response()
serializer.serialize(response, {'v': '123'}, 'update')
self.assertEqual(404, response.status_int)
self.assertEqual('123', response.headers['X-Custom-Header'])
class DictSerializerTest(base.BaseTestCase):
def test_dispatch_default(self):
serializer = wsgi.DictSerializer()
self.assertEqual('',
serializer.serialize({}, 'NonExistentAction'))
class JSONDictSerializerTest(base.BaseTestCase):
def test_json(self):
input_dict = dict(servers=dict(a=(2, 3)))
expected_json = b'{"servers":{"a":[2,3]}}'
serializer = wsgi.JSONDictSerializer()
result = serializer.serialize(input_dict)
result = result.replace(b'\n', b'').replace(b' ', b'')
self.assertEqual(expected_json, result)
# The tested behaviour is only meant to be witnessed in Python 2, so it is
# OK to skip this test with Python 3.
@helpers.requires_py2
def test_json_with_utf8(self):
input_dict = dict(servers=dict(a=(2, '\xe7\xbd\x91\xe7\xbb\x9c')))
expected_json = b'{"servers":{"a":[2,"\\u7f51\\u7edc"]}}'
serializer = wsgi.JSONDictSerializer()
result = serializer.serialize(input_dict)
result = result.replace(b'\n', b'').replace(b' ', b'')
self.assertEqual(expected_json, result)
def test_json_with_unicode(self):
input_dict = dict(servers=dict(a=(2, u'\u7f51\u7edc')))
expected_json = b'{"servers":{"a":[2,"\\u7f51\\u7edc"]}}'
serializer = wsgi.JSONDictSerializer()
result = serializer.serialize(input_dict)
result = result.replace(b'\n', b'').replace(b' ', b'')
self.assertEqual(expected_json, result)
class TextDeserializerTest(base.BaseTestCase):
def test_dispatch_default(self):
deserializer = wsgi.TextDeserializer()
self.assertEqual({},
deserializer.deserialize({}, 'update'))
class JSONDeserializerTest(base.BaseTestCase):
def test_json(self):
data = """{"a": {
"a1": "1",
"a2": "2",
"bs": ["1", "2", "3", {"c": {"c1": "1"}}],
"d": {"e": "1"},
"f": "1"}}"""
as_dict = {
'body': {
'a': {
'a1': '1',
'a2': '2',
'bs': ['1', '2', '3', {'c': {'c1': '1'}}],
'd': {'e': '1'},
'f': '1'}}}
deserializer = wsgi.JSONDeserializer()
self.assertEqual(as_dict,
deserializer.deserialize(data))
def test_default_raise_Malformed_Exception(self):
"""Test JsonDeserializer.default.
Test verifies JsonDeserializer.default raises exception
MalformedRequestBody correctly.
"""
data_string = ""
deserializer = wsgi.JSONDeserializer()
self.assertRaises(
exception.MalformedRequestBody, deserializer.default, data_string)
def test_json_with_utf8(self):
data = b'{"a": "\xe7\xbd\x91\xe7\xbb\x9c"}'
as_dict = {'body': {'a': u'\u7f51\u7edc'}}
deserializer = wsgi.JSONDeserializer()
self.assertEqual(as_dict,
deserializer.deserialize(data))
def test_json_with_unicode(self):
data = b'{"a": "\u7f51\u7edc"}'
as_dict = {'body': {'a': u'\u7f51\u7edc'}}
deserializer = wsgi.JSONDeserializer()
self.assertEqual(as_dict,
deserializer.deserialize(data))
class RequestHeadersDeserializerTest(base.BaseTestCase):
def test_default(self):
deserializer = wsgi.RequestHeadersDeserializer()
req = wsgi.Request.blank('/')
self.assertEqual({},
deserializer.deserialize(req, 'nonExistent'))
def test_custom(self):
class Deserializer(wsgi.RequestHeadersDeserializer):
def update(self, request):
return {'a': request.headers['X-Custom-Header']}
deserializer = Deserializer()
req = wsgi.Request.blank('/')
req.headers['X-Custom-Header'] = 'b'
self.assertEqual({'a': 'b'},
deserializer.deserialize(req, 'update'))
class ResourceTest(base.BaseTestCase):
@staticmethod
def my_fault_body_function():
return 'off'
class Controller(object):
def index(self, request, index=None):
return index
def test_dispatch(self):
resource = wsgi.Resource(self.Controller(),
self.my_fault_body_function)
actual = resource.dispatch(
resource.controller, 'index', action_args={'index': 'off'})
expected = 'off'
self.assertEqual(expected, actual)
def test_dispatch_unknown_controller_action(self):
resource = wsgi.Resource(self.Controller(),
self.my_fault_body_function)
self.assertRaises(
AttributeError, resource.dispatch,
resource.controller, 'create', {})
def test_malformed_request_body_throws_bad_request(self):
resource = wsgi.Resource(None, self.my_fault_body_function)
request = wsgi.Request.blank(
"/", body=b"{mal:formed", method='POST',
headers={'Content-Type': "application/json"})
response = resource(request)
self.assertEqual(400, response.status_int)
def test_wrong_content_type_throws_unsupported_media_type_error(self):
resource = wsgi.Resource(None, self.my_fault_body_function)
request = wsgi.Request.blank(
"/", body=b"{some:json}", method='POST',
headers={'Content-Type': "xxx"})
response = resource(request)
self.assertEqual(400, response.status_int)
def test_wrong_content_type_server_error(self):
resource = wsgi.Resource(None, self.my_fault_body_function)
request = wsgi.Request.blank(
"/", method='POST', headers={'Content-Type': "unknow"})
response = resource(request)
self.assertEqual(500, response.status_int)
def test_call_resource_class_bad_request(self):
class FakeRequest(object):
def __init__(self):
self.url = 'http://where.no'
self.environ = 'environ'
self.body = 'body'
def method(self):
pass
def best_match_content_type(self):
return 'best_match_content_type'
resource = wsgi.Resource(self.Controller(),
self.my_fault_body_function)
request = FakeRequest()
result = resource(request)
self.assertEqual(400, result.status_int)
def test_type_error(self):
resource = wsgi.Resource(self.Controller(),
self.my_fault_body_function)
request = wsgi.Request.blank(
"/", method='POST', headers={'Content-Type': "json"})
response = resource.dispatch(
request, action='index', action_args='test')
self.assertEqual(400, response.status_int)
def test_call_resource_class_internal_error(self):
class FakeRequest(object):
def __init__(self):
self.url = 'http://where.no'
self.environ = 'environ'
self.body = '{"Content-Type": "json"}'
def method(self):
pass
def best_match_content_type(self):
return 'application/json'
resource = wsgi.Resource(self.Controller(),
self.my_fault_body_function)
request = FakeRequest()
result = resource(request)
self.assertEqual(500, result.status_int)
class FaultTest(base.BaseTestCase):
def test_call_fault(self):
class MyException(object):
status_int = 415
explanation = 'test'
my_exceptions = MyException()
my_fault = wsgi.Fault(exception=my_exceptions)
request = wsgi.Request.blank(
"/", method='POST', headers={'Content-Type': "unknow"})
response = my_fault(request)
self.assertEqual(415, response.status_int)
|
wolverineav/neutron
|
neutron/tests/unit/test_wsgi.py
|
Python
|
apache-2.0
| 25,906
|
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
This script is used to configure openvswitch flows on XenServer hosts.
"""
import os
import sys
# This is written to Python 2.4, since that is what is available on XenServer
import netaddr
import simplejson as json
from novalib import execute, execute_get_output
OVS_OFCTL = '/usr/bin/ovs-ofctl'
class OvsFlow(object):
def __init__(self, bridge, params):
self.bridge = bridge
self.params = params
def add(self, rule):
execute(OVS_OFCTL, 'add-flow', self.bridge, rule % self.params)
def clear_flows(self, ofport):
execute(OVS_OFCTL, 'del-flows', self.bridge, "in_port=%s" % ofport)
def main(command, vif_raw, net_type):
if command not in ('online', 'offline'):
return
vif_name, dom_id, vif_index = vif_raw.split('-')
vif = "%s%s.%s" % (vif_name, dom_id, vif_index)
bridge = "xenbr%s" % vif_index
xsls = execute_get_output('/usr/bin/xenstore-ls',
'/local/domain/%s/vm-data/networking' % dom_id)
macs = [line.split("=")[0].strip() for line in xsls.splitlines()]
for mac in macs:
xsread = execute_get_output('/usr/bin/xenstore-read',
'/local/domain/%s/vm-data/networking/%s' %
(dom_id, mac))
data = json.loads(xsread)
if data["label"] == "public":
this_vif = "vif%s.0" % dom_id
else:
this_vif = "vif%s.1" % dom_id
if vif == this_vif:
vif_ofport = execute_get_output('/usr/bin/ovs-vsctl', 'get',
'Interface', vif, 'ofport')
params = dict(VIF_NAME=vif,
MAC=data['mac'],
OF_PORT=vif_ofport)
ovs = OvsFlow(bridge, params)
if command == 'offline':
# I haven't found a way to clear only IPv4 or IPv6 rules.
ovs.clear_flows(vif_ofport)
if command == 'online':
if net_type in ('ipv4', 'all') and 'ips' in data:
for ip4 in data['ips']:
ovs.params.update({'IPV4_ADDR': ip4['ip']})
apply_ovs_ipv4_flows(ovs, bridge, params)
if net_type in ('ipv6', 'all') and 'ip6s' in data:
for ip6 in data['ip6s']:
link_local = str(netaddr.EUI(data['mac']).eui64()\
.ipv6_link_local())
ovs.params.update({'IPV6_LINK_LOCAL_ADDR': link_local})
ovs.params.update({'IPV6_GLOBAL_ADDR': ip6['ip']})
apply_ovs_ipv6_flows(ovs, bridge, params)
def apply_ovs_ipv4_flows(ovs, bridge, params):
# allow valid ARP outbound (both request / reply)
ovs.add("priority=3,in_port=%(OF_PORT)s,dl_src=%(MAC)s,arp,"
"arp_sha=%(MAC)s,nw_src=%(IPV4_ADDR)s,actions=normal")
ovs.add("priority=3,in_port=%(OF_PORT)s,dl_src=%(MAC)s,arp,"
"arp_sha=%(MAC)s,nw_src=0.0.0.0,actions=normal")
# allow valid IPv4 outbound
ovs.add("priority=3,in_port=%(OF_PORT)s,dl_src=%(MAC)s,ip,"
"nw_src=%(IPV4_ADDR)s,actions=normal")
def apply_ovs_ipv6_flows(ovs, bridge, params):
# allow valid IPv6 ND outbound (are both global and local IPs needed?)
# Neighbor Solicitation
ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6,"
"ipv6_src=%(IPV6_LINK_LOCAL_ADDR)s,icmp_type=135,nd_sll=%(MAC)s,"
"actions=normal")
ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6,"
"ipv6_src=%(IPV6_LINK_LOCAL_ADDR)s,icmp_type=135,actions=normal")
ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6,"
"ipv6_src=%(IPV6_GLOBAL_ADDR)s,icmp_type=135,nd_sll=%(MAC)s,"
"actions=normal")
ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6,"
"ipv6_src=%(IPV6_GLOBAL_ADDR)s,icmp_type=135,actions=normal")
# Neighbor Advertisement
ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6,"
"ipv6_src=%(IPV6_LINK_LOCAL_ADDR)s,icmp_type=136,"
"nd_target=%(IPV6_LINK_LOCAL_ADDR)s,actions=normal")
ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6,"
"ipv6_src=%(IPV6_LINK_LOCAL_ADDR)s,icmp_type=136,actions=normal")
ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6,"
"ipv6_src=%(IPV6_GLOBAL_ADDR)s,icmp_type=136,"
"nd_target=%(IPV6_GLOBAL_ADDR)s,actions=normal")
ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6,"
"ipv6_src=%(IPV6_GLOBAL_ADDR)s,icmp_type=136,actions=normal")
# drop all other neighbor discovery (req b/c we permit all icmp6 below)
ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=135,actions=drop")
ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=136,actions=drop")
# do not allow sending specifc ICMPv6 types
# Router Advertisement
ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=134,actions=drop")
# Redirect Gateway
ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=137,actions=drop")
# Mobile Prefix Solicitation
ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=146,actions=drop")
# Mobile Prefix Advertisement
ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=147,actions=drop")
# Multicast Router Advertisement
ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=151,actions=drop")
# Multicast Router Solicitation
ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=152,actions=drop")
# Multicast Router Termination
ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=153,actions=drop")
# allow valid IPv6 outbound, by type
ovs.add("priority=4,in_port=%(OF_PORT)s,dl_src=%(MAC)s,"
"ipv6_src=%(IPV6_GLOBAL_ADDR)s,icmp6,actions=normal")
ovs.add("priority=4,in_port=%(OF_PORT)s,dl_src=%(MAC)s,"
"ipv6_src=%(IPV6_LINK_LOCAL_ADDR)s,icmp6,actions=normal")
ovs.add("priority=4,in_port=%(OF_PORT)s,dl_src=%(MAC)s,"
"ipv6_src=%(IPV6_GLOBAL_ADDR)s,tcp6,actions=normal")
ovs.add("priority=4,in_port=%(OF_PORT)s,dl_src=%(MAC)s,"
"ipv6_src=%(IPV6_LINK_LOCAL_ADDR)s,tcp6,actions=normal")
ovs.add("priority=4,in_port=%(OF_PORT)s,dl_src=%(MAC)s,"
"ipv6_src=%(IPV6_GLOBAL_ADDR)s,udp6,actions=normal")
ovs.add("priority=4,in_port=%(OF_PORT)s,dl_src=%(MAC)s,"
"ipv6_src=%(IPV6_LINK_LOCAL_ADDR)s,udp6,actions=normal")
# all else will be dropped ...
if __name__ == "__main__":
if len(sys.argv) != 4:
print "usage: %s [online|offline] vif-domid-idx [ipv4|ipv6|all] " % \
os.path.basename(sys.argv[0])
sys.exit(1)
else:
command, vif_raw, net_type = sys.argv[1:4]
main(command, vif_raw, net_type)
|
xushiwei/nova
|
plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py
|
Python
|
apache-2.0
| 7,617
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class authenticationcertaction(base_resource) :
""" Configuration for CERT action resource. """
def __init__(self) :
self._name = ""
self._twofactor = ""
self._usernamefield = ""
self._groupnamefield = ""
self._defaultauthenticationgroup = ""
self.___count = 0
@property
def name(self) :
ur"""Name for the client cert authentication server profile (action).
Must begin with a letter, number, or the underscore character (_), and must contain only letters, numbers, and the hyphen (-), period (.) pound (#), space ( ), at (@), equals (=), colon (:), and underscore characters. Cannot be changed after certifcate action is created.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my authentication action" or 'my authentication action').<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
ur"""Name for the client cert authentication server profile (action).
Must begin with a letter, number, or the underscore character (_), and must contain only letters, numbers, and the hyphen (-), period (.) pound (#), space ( ), at (@), equals (=), colon (:), and underscore characters. Cannot be changed after certifcate action is created.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my authentication action" or 'my authentication action').<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def twofactor(self) :
ur"""Enables or disables two-factor authentication.
Two factor authentication is client cert authentication followed by password authentication.<br/>Default value: OFF<br/>Possible values = ON, OFF.
"""
try :
return self._twofactor
except Exception as e:
raise e
@twofactor.setter
def twofactor(self, twofactor) :
ur"""Enables or disables two-factor authentication.
Two factor authentication is client cert authentication followed by password authentication.<br/>Default value: OFF<br/>Possible values = ON, OFF
"""
try :
self._twofactor = twofactor
except Exception as e:
raise e
@property
def usernamefield(self) :
ur"""Client-cert field from which the username is extracted. Must be set to either ""Subject"" and ""Issuer"" (include both sets of double quotation marks).
Format: <field>:<subfield>.<br/>Minimum length = 1.
"""
try :
return self._usernamefield
except Exception as e:
raise e
@usernamefield.setter
def usernamefield(self, usernamefield) :
ur"""Client-cert field from which the username is extracted. Must be set to either ""Subject"" and ""Issuer"" (include both sets of double quotation marks).
Format: <field>:<subfield>.<br/>Minimum length = 1
"""
try :
self._usernamefield = usernamefield
except Exception as e:
raise e
@property
def groupnamefield(self) :
ur"""Client-cert field from which the group is extracted. Must be set to either ""Subject"" and ""Issuer"" (include both sets of double quotation marks).
Format: <field>:<subfield>.<br/>Minimum length = 1.
"""
try :
return self._groupnamefield
except Exception as e:
raise e
@groupnamefield.setter
def groupnamefield(self, groupnamefield) :
ur"""Client-cert field from which the group is extracted. Must be set to either ""Subject"" and ""Issuer"" (include both sets of double quotation marks).
Format: <field>:<subfield>.<br/>Minimum length = 1
"""
try :
self._groupnamefield = groupnamefield
except Exception as e:
raise e
@property
def defaultauthenticationgroup(self) :
ur"""This is the default group that is chosen when the authentication succeeds in addition to extracted groups.<br/>Maximum length = 64.
"""
try :
return self._defaultauthenticationgroup
except Exception as e:
raise e
@defaultauthenticationgroup.setter
def defaultauthenticationgroup(self, defaultauthenticationgroup) :
ur"""This is the default group that is chosen when the authentication succeeds in addition to extracted groups.<br/>Maximum length = 64
"""
try :
self._defaultauthenticationgroup = defaultauthenticationgroup
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(authenticationcertaction_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.authenticationcertaction
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
ur""" Use this API to add authenticationcertaction.
"""
try :
if type(resource) is not list :
addresource = authenticationcertaction()
addresource.name = resource.name
addresource.twofactor = resource.twofactor
addresource.usernamefield = resource.usernamefield
addresource.groupnamefield = resource.groupnamefield
addresource.defaultauthenticationgroup = resource.defaultauthenticationgroup
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ authenticationcertaction() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].name = resource[i].name
addresources[i].twofactor = resource[i].twofactor
addresources[i].usernamefield = resource[i].usernamefield
addresources[i].groupnamefield = resource[i].groupnamefield
addresources[i].defaultauthenticationgroup = resource[i].defaultauthenticationgroup
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
ur""" Use this API to delete authenticationcertaction.
"""
try :
if type(resource) is not list :
deleteresource = authenticationcertaction()
if type(resource) != type(deleteresource):
deleteresource.name = resource
else :
deleteresource.name = resource.name
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ authenticationcertaction() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ authenticationcertaction() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
ur""" Use this API to update authenticationcertaction.
"""
try :
if type(resource) is not list :
updateresource = authenticationcertaction()
updateresource.name = resource.name
updateresource.twofactor = resource.twofactor
updateresource.usernamefield = resource.usernamefield
updateresource.groupnamefield = resource.groupnamefield
updateresource.defaultauthenticationgroup = resource.defaultauthenticationgroup
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ authenticationcertaction() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].twofactor = resource[i].twofactor
updateresources[i].usernamefield = resource[i].usernamefield
updateresources[i].groupnamefield = resource[i].groupnamefield
updateresources[i].defaultauthenticationgroup = resource[i].defaultauthenticationgroup
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
ur""" Use this API to unset the properties of authenticationcertaction resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = authenticationcertaction()
if type(resource) != type(unsetresource):
unsetresource.name = resource
else :
unsetresource.name = resource.name
return unsetresource.unset_resource(client, args)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
unsetresources = [ authenticationcertaction() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
unsetresources = [ authenticationcertaction() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].name = resource[i].name
result = cls.unset_bulk_request(client, unsetresources, args)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
ur""" Use this API to fetch all the authenticationcertaction resources that are configured on netscaler.
"""
try :
if not name :
obj = authenticationcertaction()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = authenticationcertaction()
obj.name = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [authenticationcertaction() for _ in range(len(name))]
obj = [authenticationcertaction() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = authenticationcertaction()
obj[i].name = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
ur""" Use this API to fetch filtered set of authenticationcertaction resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = authenticationcertaction()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
ur""" Use this API to count the authenticationcertaction resources configured on NetScaler.
"""
try :
obj = authenticationcertaction()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
ur""" Use this API to count filtered the set of authenticationcertaction resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = authenticationcertaction()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Twofactor:
ON = "ON"
OFF = "OFF"
class authenticationcertaction_response(base_response) :
def __init__(self, length=1) :
self.authenticationcertaction = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.authenticationcertaction = [authenticationcertaction() for _ in range(length)]
|
benfinke/ns_python
|
nssrc/com/citrix/netscaler/nitro/resource/config/authentication/authenticationcertaction.py
|
Python
|
apache-2.0
| 13,239
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from libcloud.utils.py3 import httplib
from libcloud.container.base import ContainerImage
from libcloud.container.drivers.kubernetes import KubernetesContainerDriver
from libcloud.test.secrets import CONTAINER_PARAMS_KUBERNETES
from libcloud.test.common.test_kubernetes import KubernetesAuthTestCaseMixin
from libcloud.test.file_fixtures import ContainerFileFixtures
from libcloud.test import MockHttp
from libcloud.test import unittest
class KubernetesContainerDriverTestCase(unittest.TestCase,
KubernetesAuthTestCaseMixin):
driver_cls = KubernetesContainerDriver
def setUp(self):
KubernetesContainerDriver.connectionCls.conn_class = KubernetesMockHttp
KubernetesMockHttp.type = None
KubernetesMockHttp.use_param = 'a'
self.driver = KubernetesContainerDriver(*CONTAINER_PARAMS_KUBERNETES)
def test_list_containers(self):
containers = self.driver.list_containers()
self.assertEqual(len(containers), 1)
self.assertEqual(containers[0].id,
'docker://3c48b5cda79bce4c8866f02a3b96a024edb8f660d10e7d1755e9ced49ef47b36')
self.assertEqual(containers[0].name, 'hello-world')
def test_list_clusters(self):
clusters = self.driver.list_clusters()
self.assertEqual(len(clusters), 2)
self.assertEqual(clusters[0].id,
'default')
self.assertEqual(clusters[0].name, 'default')
def test_get_cluster(self):
cluster = self.driver.get_cluster('default')
self.assertEqual(cluster.id,
'default')
self.assertEqual(cluster.name, 'default')
def test_create_cluster(self):
cluster = self.driver.create_cluster('test')
self.assertEqual(cluster.id,
'test')
self.assertEqual(cluster.name, 'test')
def test_destroy_cluster(self):
cluster = self.driver.get_cluster('default')
result = self.driver.destroy_cluster(cluster)
self.assertTrue(result)
def test_deploy_container(self):
image = ContainerImage(
id=None,
name='hello-world',
path=None,
driver=self.driver,
version=None
)
container = self.driver.deploy_container('hello-world', image=image)
self.assertEqual(container.name, 'hello-world')
def test_get_container(self):
container = self.driver.get_container('docker://3c48b5cda79bce4c8866f02a3b96a024edb8f660d10e7d1755e9ced49ef47b36')
assert container.id == 'docker://3c48b5cda79bce4c8866f02a3b96a024edb8f660d10e7d1755e9ced49ef47b36'
class KubernetesMockHttp(MockHttp):
fixtures = ContainerFileFixtures('kubernetes')
def _api_v1_pods(
self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('_api_v1_pods.json')
else:
raise AssertionError('Unsupported method')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _api_v1_namespaces(
self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('_api_v1_namespaces.json')
elif method == 'POST':
body = self.fixtures.load('_api_v1_namespaces_test.json')
else:
raise AssertionError('Unsupported method')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _api_v1_namespaces_default(
self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('_api_v1_namespaces_default.json')
elif method == 'DELETE':
body = self.fixtures.load('_api_v1_namespaces_default_DELETE.json')
else:
raise AssertionError('Unsupported method')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _api_v1_namespaces_default_pods(
self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('_api_v1_namespaces_default_pods.json')
elif method == 'POST':
body = self.fixtures.load('_api_v1_namespaces_default_pods_POST.json')
else:
raise AssertionError('Unsupported method')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if __name__ == '__main__':
sys.exit(unittest.main())
|
Kami/libcloud
|
libcloud/test/container/test_kubernetes.py
|
Python
|
apache-2.0
| 5,199
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# IkaLog
# ======
# Copyright (C) 2015 Takeshi HASEGAWA, Junki MIZUSHIMA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from ikalog.utils.image_filters.filters import *
|
hasegaw/IkaLog
|
ikalog/utils/image_filters/__init__.py
|
Python
|
apache-2.0
| 732
|
# Copyright 2014 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import os
import re
import time
from proboscis.asserts import fail
from six.moves.urllib.parse import urlparse
from troveclient.compat.client import TroveHTTPClient
from trove.tests.config import CONFIG
print_req = True
def shorten_url(url):
parsed = urlparse(url)
if parsed.query:
method_url = parsed.path + '?' + parsed.query
else:
method_url = parsed.path
return method_url
class SnippetWriter(object):
def __init__(self, conf, get_replace_list):
self.conf = conf
self.get_replace_list = get_replace_list
def output_request(self, user_details, name, url, output_headers, body,
content_type, method, static_auth_token=True):
headers = []
parsed = urlparse(url)
method_url = shorten_url(url)
headers.append("%s %s HTTP/1.1" % (method, method_url))
headers.append("User-Agent: %s" % output_headers['User-Agent'])
headers.append("Host: %s" % parsed.netloc)
# static_auth_token option for documentation purposes
if static_auth_token:
output_token = '87c6033c-9ff6-405f-943e-2deb73f278b7'
else:
output_token = output_headers['X-Auth-Token']
headers.append("X-Auth-Token: %s" % output_token)
headers.append("Accept: %s" % output_headers['Accept'])
print("OUTPUT HEADERS: %s" % output_headers)
headers.append("Content-Type: %s" % output_headers['Content-Type'])
self.write_file(user_details, name, "-%s-http.txt" % content_type, url,
method, "request", output='\n'.join(headers))
pretty_body = self.format_body(body, content_type)
self.write_file(user_details, name, ".%s" % content_type, url,
method, "request", output=pretty_body)
def output_response(self, user_details, name, content_type, url, method,
resp, body):
version = "1.1" # if resp.version == 11 else "1.0"
lines = [
["HTTP/%s %s %s" % (version, resp.status, resp.reason)],
["Content-Type: %s" % resp['content-type']],
]
if 'via' in resp:
lines.append(["Via: %s" % resp['via']])
lines.append(["Content-Length: %s" % resp['content-length']])
lines.append(["Date: Mon, 18 Mar 2013 19:09:17 GMT"])
if 'server' in resp:
lines.append(["Server: %s" % resp["server"]])
new_lines = [x[0] for x in lines]
joined_lines = '\n'.join(new_lines)
self.write_file(user_details, name, "-%s-http.txt" % content_type, url,
method, "response", output=joined_lines)
if body:
pretty_body = self.format_body(body, content_type)
self.write_file(user_details, name, ".%s" % content_type, url,
method, "response", output=pretty_body)
def format_body(self, body, content_type):
assert content_type == 'json'
try:
if self.conf['replace_dns_hostname']:
before = r'\"hostname\": \"[a-zA-Z0-9-_\.]*\"'
after = '\"hostname\": \"%s\"' % self.conf[
'replace_dns_hostname']
body = re.sub(before, after, body)
return json.dumps(json.loads(body), sort_keys=True, indent=4)
except Exception:
return body or ''
def write_request_file(self, user_details, name, content_type, url, method,
req_headers, request_body):
if print_req:
print("\t%s req url:%s" % (content_type, url))
print("\t%s req method:%s" % (content_type, method))
print("\t%s req headers:%s" % (content_type, req_headers))
print("\t%s req body:%s" % (content_type, request_body))
self.output_request(user_details, name, url, req_headers, request_body,
content_type, method)
def write_response_file(self, user_details, name, content_type, url,
method, resp, resp_content):
if print_req:
print("\t%s resp:%s" % (content_type, resp))
print("\t%s resp content:%s" % (content_type, resp_content))
self.output_response(user_details, name, content_type, url, method,
resp, resp_content)
def write_file(self, user_details, name, content_type, url, method,
in_or_out, output):
output = output.replace(user_details['tenant'], '1234')
if self.conf['replace_host']:
output = output.replace(user_details['api_url'],
self.conf['replace_host'])
pre_host_port = urlparse(user_details['service_url']).netloc
post_host = urlparse(self.conf['replace_host']).netloc
output = output.replace(pre_host_port, post_host)
output = output.replace("fake_host", "hostname")
output = output.replace("FAKE_", "")
for resource in self.get_replace_list():
output = output.replace(str(resource[0]), str(resource[1]))
filename = "%s/db-%s-%s%s" % (self.conf['directory'],
name.replace('_', '-'), in_or_out,
content_type)
self._write_file(filename, output)
def _write_file(self, filename, output):
empty = len(output.strip()) == 0
# Manipulate actual data to appease doc niceness checks
actual = [line.rstrip() for line in output.split("\n")]
if not empty and actual[len(actual) - 1] != '':
actual.append("")
def goofy_diff(a, b):
diff = []
for i in range(len(a)):
if i < len(b):
if a[i].rstrip() != b[i].rstrip():
diff.append('Expected line %d :%s\n'
' Actual line %d :%s'
% (i + 1, a[i], i + 1, b[i]))
else:
diff.append("Expected line %d :%s" % (i + 1, a[i]))
for j in range(len(b) - len(a)):
i2 = len(a) + j
diff.append(" Actual line %d :%s" % (i2 + 1, b[i2]))
return diff
def write_actual_file():
# Always write the file.
with open(filename, "w") as file:
for line in actual:
file.write("%s\n" % line)
def assert_output_matches():
if os.path.isfile(filename):
with open(filename, 'r') as original_file:
original = original_file.read()
if empty:
fail('Error: output missing in new snippet generation '
'for %s. Old content follows:\n"""%s"""'
% (filename, original))
elif filename.endswith('.json'):
assert_json_matches(original)
else:
assert_file_matches(original)
elif not empty:
fail('Error: new file necessary where there was no file '
'before. Filename=%s\nContent follows:\n"""%s"""'
% (filename, output))
def assert_file_matches(original):
expected = original.split('\n')
# Remove the last item which will look like a duplicated
# file ending newline
expected.pop()
diff = '\n'.join(goofy_diff(expected, actual))
if diff:
fail('Error: output files differ for %s:\n%s'
% (filename, diff))
def order_json(json_obj):
"""Sort the json object so that it can be compared properly."""
if isinstance(json_obj, list):
return sorted(order_json(elem) for elem in json_obj)
if isinstance(json_obj, dict):
return sorted(
(key, order_json(value))
for key, value in json_obj.items())
else:
return json_obj
def assert_json_matches(original):
try:
expected_json = json.loads(original)
actual_json = json.loads(output)
except ValueError:
fail('Invalid json!\nExpected: %s\nActual: %s'
% (original, output))
if order_json(expected_json) != order_json(actual_json):
# Re-Use the same failure output if the json is different
assert_file_matches(original)
if not os.environ.get('TESTS_FIX_EXAMPLES'):
assert_output_matches()
elif not empty:
write_actual_file()
# This method is mixed into the client class.
# It requires the following fields: snippet_writer, content_type, and
# "name," the last of which must be set before each call.
def write_to_snippet(self, args, kwargs, resp, body):
if self.name is None:
raise RuntimeError("'name' not set before call.")
url = args[0]
method = args[1]
request_headers = kwargs['headers']
request_body = kwargs.get('body', None)
response_headers = resp
response_body = body
# Log request
user_details = {
'api_url': self.service_url,
'service_url': self.service_url,
'tenant': self.tenant,
}
self.snippet_writer.write_request_file(user_details, self.name,
self.content_type, url, method,
request_headers, request_body)
self.snippet_writer.write_response_file(user_details, self.name,
self.content_type, url, method,
response_headers, response_body)
# Create a short url to assert against.
short_url = url
base_url = self.service_url
for prefix in (base_url):
if short_url.startswith(prefix):
short_url = short_url[len(prefix):]
self.old_info = {
'url': shorten_url(short_url),
'method': method,
'request_headers': request_headers,
'request_body': request_body,
'response_headers': response_headers,
'response_body': response_body
}
def add_fake_response_headers(headers):
"""
Fakes other items that would appear if you were using, just to make up
an example, a proxy.
"""
conf = CONFIG.examples
if 'via' in conf and 'via' not in headers:
headers['via'] = conf['via']
if 'server' in conf and 'server' not in headers:
headers['server'] = conf['server']
if 'date' not in headers:
date_string = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
headers['date'] = date_string
class JsonClient(TroveHTTPClient):
content_type = 'json'
def http_log(self, args, kwargs, resp, body):
add_fake_response_headers(resp)
self.pretty_log(args, kwargs, resp, body)
def write_snippet():
return write_to_snippet(self, args, kwargs, resp, body)
self.write_snippet = write_snippet
|
zhangg/trove
|
trove/tests/examples/client.py
|
Python
|
apache-2.0
| 11,824
|
# a small 2-node example, just a producer and consumer
import os
import stat
import sys
print("Executing python script.")
file = open('test-2.sh','w')
file.write('#!/bin/sh\n')
#file.write('echo "Executing binary for 2" >'+sys.argv[-1][8:-1]+'\n')
file.write('echo "Executing binary for 2" >'+sys.argv[-1]+'\n')
file.close()
os.chmod('test-2.sh', stat.S_IRWXU)
|
mF2C/COMPSs
|
tests/sources/python/3_module_decaf_constrained/src/decaf/test-2.py
|
Python
|
apache-2.0
| 366
|
"""
mbed CMSIS-DAP debugger
Copyright (c) 2006-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from target_kinetis import Kinetis
from .memory_map import (FlashRegion, RamRegion, MemoryMap)
import logging
class KL46Z(Kinetis):
memoryMap = MemoryMap(
FlashRegion( start=0, length=0x40000, blocksize=0x400, isBootMemory=True),
RamRegion( start=0x1fffe000, length=0x8000)
)
def __init__(self, link):
super(KL46Z, self).__init__(link, self.memoryMap)
self.mdm_idr = 0x001c0020
|
tgarc/pyOCD
|
pyOCD/target/target_kl46z.py
|
Python
|
apache-2.0
| 1,060
|
from twisted.internet import reactor
from rackattack.tcp import transportserver
import sys
import simplejson
class Server:
def handle(self, string, respondCallback, peer):
obj = simplejson.loads(string)
respondCallback(simplejson.dumps(["Echoing", obj]))
server = Server()
factory = transportserver.TransportFactory(server.handle)
reactor.listenTCP(int(sys.argv[1]), factory)
reactor.run()
|
eliran-stratoscale/rackattack-api
|
test/twistedserver_echojson.py
|
Python
|
apache-2.0
| 414
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.networks.ports.tables import\
get_fixed_ips
LOG = logging.getLogger(__name__)
def get_device_owner(port):
if port['device_owner'] == 'network:router_gateway':
return _('External Gateway')
elif port['device_owner'] == 'network:router_interface':
return _('Internal Interface')
else:
return ' '
class AddInterface(tables.LinkAction):
name = "create"
verbose_name = _("Add Interface")
url = "horizon:project:routers:addinterface"
classes = ("ajax-modal", "btn-create")
def get_link_url(self, datum=None):
router_id = self.table.kwargs['router_id']
return reverse(self.url, args=(router_id,))
class RemoveInterface(tables.DeleteAction):
data_type_singular = _("Interface")
data_type_plural = _("Interfaces")
failure_url = 'horizon:project:routers:detail'
def delete(self, request, obj_id):
try:
router_id = self.table.kwargs['router_id']
port = api.quantum.port_get(request, obj_id)
if port['device_owner'] == 'network:router_gateway':
api.quantum.router_remove_gateway(request, router_id)
else:
api.quantum.router_remove_interface(request,
router_id,
port_id=obj_id)
except:
msg = _('Failed to delete interface %s') % obj_id
LOG.info(msg)
router_id = self.table.kwargs['router_id']
redirect = reverse(self.failure_url,
args=[router_id])
exceptions.handle(request, msg, redirect=redirect)
def allowed(self, request, datum=None):
if datum and datum['device_owner'] == 'network:router_gateway':
return False
return True
class PortsTable(tables.DataTable):
name = tables.Column("name",
verbose_name=_("Name"),
link="horizon:project:networks:ports:detail")
fixed_ips = tables.Column(get_fixed_ips, verbose_name=_("Fixed IPs"))
status = tables.Column("status", verbose_name=_("Status"))
device_owner = tables.Column(get_device_owner,
verbose_name=_("Type"))
admin_state = tables.Column("admin_state",
verbose_name=_("Admin State"))
def get_object_display(self, port):
return port.id
class Meta:
name = "interfaces"
verbose_name = _("Interfaces")
table_actions = (AddInterface, RemoveInterface)
row_actions = (RemoveInterface, )
|
Frostman/eho-horizon
|
openstack_dashboard/dashboards/project/routers/ports/tables.py
|
Python
|
apache-2.0
| 3,558
|
import pytest
from rancher import ApiError
from .common import random_str
from .conftest import wait_for
from .alert_common import MockReceiveAlert
dingtalk_config = {
"type": "/v3/schemas/dingtalkConfig",
"url": "http://127.0.0.1:4050/dingtalk/test/",
}
microsoft_teams_config = {
"type": "/v3/schemas/msTeamsConfig",
"url": "http://127.0.0.1:4050/microsoftTeams",
}
MOCK_RECEIVER_ALERT_PORT = 4050
def test_alert_access(admin_mc, admin_pc, admin_cc, user_mc, remove_resource):
"""Tests that a user with read-only access is not
able to deactivate an alert.
"""
prtb = admin_mc.client.create_project_role_template_binding(
name="prtb-" + random_str(),
userId=user_mc.user.id,
projectId=admin_pc.project.id,
roleTemplateId="read-only")
remove_resource(prtb)
# we get some project defaults, wait for them to come up
wait_for(projectAlertRules(user_mc.client),
fail_handler=lambda: "failed waiting for project alerts",
timeout=120)
# list with admin_mc to get action not available to user
alerts = admin_mc.client.list_projectAlertRule(
projectId=admin_pc.project.id
)
with pytest.raises(ApiError) as e:
user_mc.client.action(obj=alerts.data[0], action_name="deactivate")
assert e.value.error.status == 404
def projectAlertRules(client):
"""Wait for the crtb to have the userId populated"""
def cb():
return len(client.list_projectAlertRule().data) > 0
return cb
@pytest.fixture(scope="module")
def mock_receiver_alert():
server = MockReceiveAlert(port=MOCK_RECEIVER_ALERT_PORT)
server.start()
yield server
server.shutdown_server()
def test_add_notifier(admin_mc, remove_resource, mock_receiver_alert):
client = admin_mc.client
# Add the notifier dingtalk and microsoftTeams
notifier_dingtalk = client.create_notifier(name="dingtalk",
clusterId="local",
dingtalkConfig=dingtalk_config)
notifier_microsoft_teams = client.create_notifier(
name="microsoftTeams",
clusterId="local",
msteamsConfig=microsoft_teams_config)
client.action(obj=notifier_microsoft_teams,
action_name="send",
msteamsConfig=microsoft_teams_config)
client.action(obj=notifier_dingtalk,
action_name="send",
dingtalkConfig=dingtalk_config)
# Remove the notifiers
remove_resource(notifier_dingtalk)
remove_resource(notifier_microsoft_teams)
|
rancher/rancher
|
tests/integration/suite/test_alert.py
|
Python
|
apache-2.0
| 2,622
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Optional
from airflow.models.taskinstance import SimpleTaskInstance
class CallbackRequest:
"""
Base Class with information about the callback to be executed.
:param full_filepath: File Path to use to run the callback
:param msg: Additional Message that can be used for logging
"""
def __init__(self, full_filepath: str, msg: Optional[str] = None):
self.full_filepath = full_filepath
self.msg = msg
def __eq__(self, other):
if isinstance(other, CallbackRequest):
return self.__dict__ == other.__dict__
return False
def __repr__(self):
return str(self.__dict__)
class TaskCallbackRequest(CallbackRequest):
"""
A Class with information about the success/failure TI callback to be executed. Currently, only failure
callbacks (when tasks are externally killed) and Zombies are run via DagFileProcessorProcess.
:param full_filepath: File Path to use to run the callback
:param simple_task_instance: Simplified Task Instance representation
:param is_failure_callback: Flag to determine whether it is a Failure Callback or Success Callback
:param msg: Additional Message that can be used for logging to determine failure/zombie
"""
def __init__(
self,
full_filepath: str,
simple_task_instance: SimpleTaskInstance,
is_failure_callback: Optional[bool] = True,
msg: Optional[str] = None,
):
super().__init__(full_filepath=full_filepath, msg=msg)
self.simple_task_instance = simple_task_instance
self.is_failure_callback = is_failure_callback
class DagCallbackRequest(CallbackRequest):
"""
A Class with information about the success/failure DAG callback to be executed.
:param full_filepath: File Path to use to run the callback
:param dag_id: DAG ID
:param run_id: Run ID for the DagRun
:param is_failure_callback: Flag to determine whether it is a Failure Callback or Success Callback
:param msg: Additional Message that can be used for logging
"""
def __init__(
self,
full_filepath: str,
dag_id: str,
run_id: str,
is_failure_callback: Optional[bool] = True,
msg: Optional[str] = None,
):
super().__init__(full_filepath=full_filepath, msg=msg)
self.dag_id = dag_id
self.run_id = run_id
self.is_failure_callback = is_failure_callback
class SlaCallbackRequest(CallbackRequest):
"""
A class with information about the SLA callback to be executed.
:param full_filepath: File Path to use to run the callback
:param dag_id: DAG ID
"""
def __init__(self, full_filepath: str, dag_id: str):
super().__init__(full_filepath)
self.dag_id = dag_id
|
apache/incubator-airflow
|
airflow/utils/callback_requests.py
|
Python
|
apache-2.0
| 3,597
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Report.report_filter_fields'
db.delete_column('autoreports_report', 'report_filter_fields')
# Deleting field 'Report.advanced_options'
db.delete_column('autoreports_report', 'advanced_options')
# Deleting field 'Report.report_display_fields'
db.delete_column('autoreports_report', 'report_display_fields')
# Adding field 'Report.options'
db.add_column('autoreports_report', 'options', self.gf('configfield.dbfields.JSONField')(null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Adding field 'Report.report_filter_fields'
db.add_column('autoreports_report', 'report_filter_fields', self.gf('django.db.models.fields.TextField')(null=True, blank=True), keep_default=False)
# Adding field 'Report.advanced_options'
db.add_column('autoreports_report', 'advanced_options', self.gf('django.db.models.fields.TextField')(null=True, blank=True), keep_default=False)
# Adding field 'Report.report_display_fields'
db.add_column('autoreports_report', 'report_display_fields', self.gf('django.db.models.fields.TextField')(null=True, blank=True), keep_default=False)
# Deleting field 'Report.options'
db.delete_column('autoreports_report', 'options')
models = {
'autoreports.report': {
'Meta': {'object_name': 'Report'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'options': ('configfield.dbfields.JSONField', [], {'null': 'True', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['autoreports']
|
fatihzkaratana/intranet
|
backend/autoreports/migrations/0002_auto__del_field_report_report_filter_fields__del_field_report_advanced.py
|
Python
|
apache-2.0
| 2,623
|
# Copyright (c) 2013 NEC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslotest import base as test_base
from testtools import matchers
import webob
import webob.dec
from oslo.middleware import request_id
class RequestIdTest(test_base.BaseTestCase):
def test_generate_request_id(self):
@webob.dec.wsgify
def application(req):
return req.environ[request_id.ENV_REQUEST_ID]
app = request_id.RequestId(application)
req = webob.Request.blank('/test')
res = req.get_response(app)
res_req_id = res.headers.get(request_id.HTTP_RESP_HEADER_REQUEST_ID)
self.assertThat(res_req_id, matchers.StartsWith(b'req-'))
# request-id in request environ is returned as response body
self.assertEqual(res_req_id, res.body)
|
varunarya10/oslo.middleware
|
tests/test_request_id.py
|
Python
|
apache-2.0
| 1,364
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the Sleuthkit (TSK) bodyfile (or mactime) event formatter."""
import unittest
from plaso.formatters import mactime
from tests.formatters import test_lib
class MactimeFormatterTest(test_lib.EventFormatterTestCase):
"""Tests for the mactime event formatter."""
def testInitialization(self):
"""Tests the initialization."""
event_formatter = mactime.MactimeFormatter()
self.assertNotEqual(event_formatter, None)
def testGetFormatStringAttributeNames(self):
"""Tests the GetFormatStringAttributeNames function."""
event_formatter = mactime.MactimeFormatter()
expected_attribute_names = [u'filename']
self._TestGetFormatStringAttributeNames(
event_formatter, expected_attribute_names)
# TODO: add test for GetMessages.
if __name__ == '__main__':
unittest.main()
|
ostree/plaso
|
tests/formatters/mactime.py
|
Python
|
apache-2.0
| 870
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils as json
from tempest.lib.common import rest_client
class ImageMembersClient(rest_client.RestClient):
api_version = "v1"
def list_image_members(self, image_id):
"""List all members of an image."""
url = 'images/%s/members' % image_id
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def list_shared_images(self, tenant_id):
"""List image memberships for the given tenant.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/image/v1/#list-shared-images
"""
url = 'shared-images/%s' % tenant_id
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def create_image_member(self, image_id, member_id, **kwargs):
"""Add a member to an image.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/image/v1/#add-member-to-image
"""
url = 'images/%s/members/%s' % (image_id, member_id)
body = json.dumps({'member': kwargs})
resp, __ = self.put(url, body)
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp)
def delete_image_member(self, image_id, member_id):
"""Removes a membership from the image.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/image/v1/#remove-member
"""
url = 'images/%s/members/%s' % (image_id, member_id)
resp, __ = self.delete(url)
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp)
|
openstack/tempest
|
tempest/lib/services/image/v1/image_members_client.py
|
Python
|
apache-2.0
| 2,551
|
# -*- coding: utf-8 -*-
'''
Manage chassis via Salt Proxies.
.. versionadded:: 2015.8.2
Below is an example state that sets basic parameters:
.. code-block:: yaml
my-dell-chassis:
dellchassis.chassis:
- chassis_name: my-dell-chassis
- datacenter: dc-1-us
- location: my-location
- mode: 2
- idrac_launch: 1
- slot_names:
- server-1: my-slot-name
- server-2: my-other-slot-name
- blade_power_states:
- server-1: on
- server-2: off
- server-3: powercycle
However, it is possible to place the entire set of chassis configuration
data in pillar. Here's an example pillar structure:
.. code-block:: yaml
proxy:
host: 10.27.20.18
admin_username: root
fallback_admin_username: root
passwords:
- super-secret
- old-secret
proxytype: fx2
chassis:
name: fx2-1
username: root
password: saltstack1
datacenter: london
location: rack-1-shelf-3
management_mode: 2
idrac_launch: 0
slot_names:
- 'server-1': blade1
- 'server-2': blade2
servers:
server-1:
idrac_password: saltstack1
ipmi_over_lan: True
ip: 172.17.17.132
netmask: 255.255.0.0
gateway: 172.17.17.1
server-2:
idrac_password: saltstack1
ipmi_over_lan: True
ip: 172.17.17.2
netmask: 255.255.0.0
gateway: 172.17.17.1
server-3:
idrac_password: saltstack1
ipmi_over_lan: True
ip: 172.17.17.20
netmask: 255.255.0.0
gateway: 172.17.17.1
server-4:
idrac_password: saltstack1
ipmi_over_lan: True
ip: 172.17.17.2
netmask: 255.255.0.0
gateway: 172.17.17.1
switches:
switch-1:
ip: 192.168.1.2
netmask: 255.255.255.0
gateway: 192.168.1.1
snmp: nonpublic
password: saltstack1
switch-2:
ip: 192.168.1.3
netmask: 255.255.255.0
gateway: 192.168.1.1
snmp: nonpublic
password: saltstack1
And to go with it, here's an example state that pulls the data from the
pillar stated above:
.. code-block:: yaml
{% set details = pillar.get('proxy:chassis', {}) %}
standup-step1:
dellchassis.chassis:
- name: {{ details['name'] }}
- location: {{ details['location'] }}
- mode: {{ details['management_mode'] }}
- idrac_launch: {{ details['idrac_launch'] }}
- slot_names:
{% for entry details['slot_names'] %}
- {{ entry.keys()[0] }}: {{ entry[entry.keys()[0]] }}
{% endfor %}
blade_powercycle:
dellchassis.chassis:
- blade_power_states:
- server-1: powercycle
- server-2: powercycle
- server-3: powercycle
- server-4: powercycle
# Set idrac_passwords for blades. racadm needs them to be called 'server-x'
{% for k, v in details['servers'].iteritems() %}
{{ k }}:
dellchassis.blade_idrac:
- idrac_password: {{ v['idrac_password'] }}
{% endfor %}
# Set management ip addresses, passwords, and snmp strings for switches
{% for k, v in details['switches'].iteritems() %}
{{ k }}-switch-setup:
dellchassis.switch:
- name: {{ k }}
- ip: {{ v['ip'] }}
- netmask: {{ v['netmask'] }}
- gateway: {{ v['gateway'] }}
- password: {{ v['password'] }}
- snmp: {{ v['snmp'] }}
{% endfor %}
.. note::
This state module relies on the dracr.py execution module, which runs racadm commands on
the chassis, blades, etc. The racadm command runs very slowly and, depending on your state,
the proxy minion return might timeout before the racadm commands have completed. If you
are repeatedly seeing minions timeout after state calls, please use the ``-t`` CLI argument
to increase the timeout variable.
For example:
.. code-block:: bash
salt '*' state.sls my-dell-chasis-state-name -t 60
.. note::
The Dell CMC units perform adequately but many iDRACs are **excruciatingly**
slow. Some functions can take minutes to execute.
'''
# Import python libs
from __future__ import absolute_import
import logging
import os
log = logging.getLogger(__name__)
from salt.exceptions import CommandExecutionError
def __virtual__():
return 'chassis.cmd' in __salt__
def blade_idrac(name, idrac_password=None, idrac_ipmi=None,
idrac_ip=None, idrac_netmask=None, idrac_gateway=None,
idrac_dnsname=None,
idrac_dhcp=None):
'''
Set parameters for iDRAC in a blade.
:param idrac_password: Password to use to connect to the iDRACs directly
(idrac_ipmi and idrac_dnsname must be set directly on the iDRAC. They
can't be set through the CMC. If this password is present, use it
instead of the CMC password)
:param idrac_ipmi: Enable/Disable IPMI over LAN
:param idrac_ip: Set IP address for iDRAC
:param idrac_netmask: Set netmask for iDRAC
:param idrac_gateway: Set gateway for iDRAC
:param idrac_dhcp: Turn on DHCP for iDRAC (True turns on, False does nothing
becaause setting a static IP will disable DHCP).
:return: A standard Salt changes dictionary
NOTE: If any of the IP address settings is configured, all of ip, netmask,
and gateway must be present
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
if not idrac_password:
(username, password) = __salt__['chassis.chassis_credentials']()
else:
password = idrac_password
module_network = __salt__['chassis.cmd']('network_info', module=name)
current_idrac_ip = module_network['Network']['IP Address']
if idrac_ipmi is not None:
if idrac_ipmi is True or idrac_ipmi == 1:
idrac_ipmi = '1'
if idrac_ipmi is False or idrac_ipmi == 0:
idrac_ipmi = '0'
current_ipmi = __salt__['dracr.get_general']('cfgIpmiLan', 'cfgIpmiLanEnable',
host=current_idrac_ip, admin_username='root',
admin_password=password)
if current_ipmi != idrac_ipmi:
ch = {'Old': current_ipmi, 'New': idrac_ipmi}
ret['changes']['IPMI'] = ch
if idrac_dnsname is not None:
dnsret = __salt__['dracr.get_dns_dracname'](host=current_idrac_ip,
admin_username='root',
admin_password=password)
current_dnsname = dnsret['[Key=iDRAC.Embedded.1#NIC.1]']['DNSRacName']
if current_dnsname != idrac_dnsname:
ch = {'Old': current_dnsname,
'New': idrac_dnsname}
ret['changes']['DNSRacName'] = ch
if idrac_dhcp is not None or idrac_ip or idrac_netmask or idrac_gateway:
if idrac_dhcp is True or idrac_dhcp == 1:
idrac_dhcp = 1
else:
idrac_dhcp = 0
if str(module_network['Network']['DHCP Enabled']) == '0' and idrac_dhcp == 1:
ch = {'Old': module_network['Network']['DHCP Enabled'],
'New': idrac_dhcp}
ret['changes']['DRAC DHCP'] = ch
if idrac_dhcp == 0 and all([idrac_ip, idrac_netmask, idrac_netmask]):
current_network = __salt__['chassis.cmd']('network_info',
module=name)
old_ipv4 = {}
new_ipv4 = {}
if current_network['Network']['IP Address'] != idrac_ip:
old_ipv4['ip'] = current_network['Network']['IP Address']
new_ipv4['ip'] = idrac_ip
if current_network['Network']['Subnet Mask'] != idrac_netmask:
old_ipv4['netmask'] = current_network['Network']['Subnet Mask']
new_ipv4['netmask'] = idrac_netmask
if current_network['Network']['Gateway'] != idrac_gateway:
old_ipv4['gateway'] = current_network['Network']['Gateway']
new_ipv4['gateway'] = idrac_gateway
if new_ipv4 != {}:
ret['changes']['Network'] = {}
ret['changes']['Network']['Old'] = old_ipv4
ret['changes']['Network']['New'] = new_ipv4
if ret['changes'] == {}:
ret['comment'] = 'iDRAC on blade is already in the desired state.'
return ret
if __opts__['test'] and ret['changes'] != {}:
ret['result'] = None
ret['comment'] = 'iDRAC on blade will change.'
return ret
if 'IPMI' in ret['changes']:
ipmi_result = __salt__['dracr.set_general']('cfgIpmiLan',
'cfgIpmiLanEnable',
idrac_ipmi,
host=current_idrac_ip,
admin_username='root',
admin_password=password)
if not ipmi_result:
ret['result'] = False
ret['changes']['IPMI']['success'] = False
if 'DNSRacName' in ret['changes']:
dnsracname_result = __salt__['dracr.set_dns_dracname'](idrac_dnsname,
host=current_idrac_ip,
admin_username='root',
admin_password=password)
if dnsracname_result['retcode'] == 0:
ret['changes']['DNSRacName']['success'] = True
else:
ret['result'] = False
ret['changes']['DNSRacName']['success'] = False
ret['changes']['DNSRacName']['return'] = dnsracname_result
if 'DRAC DHCP' in ret['changes']:
dhcp_result = __salt__['chassis.cmd']('set_niccfg', dhcp=idrac_dhcp)
if dhcp_result['retcode']:
ret['changes']['DRAC DHCP']['success'] = True
else:
ret['result'] = False
ret['changes']['DRAC DHCP']['success'] = False
ret['changes']['DRAC DHCP']['return'] = dhcp_result
if 'Network' in ret['changes']:
network_result = __salt__['chassis.cmd']('set_niccfg', ip=idrac_ip,
netmask=idrac_netmask,
gateway=idrac_gateway,
module=name)
if network_result['retcode'] == 0:
ret['changes']['Network']['success'] = True
else:
ret['result'] = False
ret['changes']['Network']['success'] = False
ret['changes']['Network']['return'] = network_result
return ret
def chassis(name, chassis_name=None, password=None, datacenter=None,
location=None, mode=None, idrac_launch=None, slot_names=None,
blade_power_states=None):
'''
Manage a Dell Chassis.
chassis_name
The name of the chassis.
datacenter
The datacenter in which the chassis is located
location
The location of the chassis.
password
Password for the chassis. Note: If this password is set for the chassis,
the current implementation of this state will set this password both on
the chassis and the iDrac passwords on any configured blades. If the
password for the blades should be distinct, they should be set separately
with the blade_idrac function.
mode
The management mode of the chassis. Viable options are:
- 0: None
- 1: Monitor
- 2: Manage and Monitor
idrac_launch
The iDRAC launch method of the chassis. Viable options are:
- 0: Disabled (launch iDRAC using IP address)
- 1: Enabled (launch iDRAC using DNS name)
slot_names
The names of the slots, provided as a list identified by
their slot numbers.
blade_power_states
The power states of a blade server, provided as a list and
identified by their server numbers. Viable options are:
- on: Ensure the blade server is powered on.
- off: Ensure the blade server is powered off.
- powercycle: Power cycle the blade server.
Example:
.. code-block:: yaml
my-dell-chassis:
dellchassis.chassis:
- chassis_name: my-dell-chassis
- location: my-location
- datacenter: london
- mode: 2
- idrac_launch: 1
- slot_names:
- 1: my-slot-name
- 2: my-other-slot-name
- blade_power_states:
- server-1: on
- server-2: off
- server-3: powercycle
'''
ret = {'name': chassis_name,
'chassis_name': chassis_name,
'result': True,
'changes': {},
'comment': ''}
chassis_cmd = 'chassis.cmd'
cfg_tuning = 'cfgRacTuning'
mode_cmd = 'cfgRacTuneChassisMgmtAtServer'
launch_cmd = 'cfgRacTuneIdracDNSLaunchEnable'
inventory = __salt__[chassis_cmd]('inventory')
if idrac_launch:
idrac_launch = str(idrac_launch)
current_name = __salt__[chassis_cmd]('get_chassis_name')
if chassis_name != current_name:
ret['changes'].update({'Name':
{'Old': current_name,
'New': chassis_name}})
current_dc = __salt__[chassis_cmd]('get_chassis_datacenter')
if datacenter and datacenter != current_dc:
ret['changes'].update({'Datacenter':
{'Old': current_dc,
'New': datacenter}})
if password:
ret['changes'].update({'Password':
{'Old': '******',
'New': '******'}})
if location:
current_location = __salt__[chassis_cmd]('get_chassis_location')
if location != current_location:
ret['changes'].update({'Location':
{'Old': current_location,
'New': location}})
if mode:
current_mode = __salt__[chassis_cmd]('get_general', cfg_tuning, mode_cmd)
if mode != current_mode:
ret['changes'].update({'Management Mode':
{'Old': current_mode,
'New': mode}})
if idrac_launch:
current_launch_method = __salt__[chassis_cmd]('get_general', cfg_tuning, launch_cmd)
if idrac_launch != current_launch_method:
ret['changes'].update({'iDrac Launch Method':
{'Old': current_launch_method,
'New': idrac_launch}})
if slot_names:
current_slot_names = __salt__[chassis_cmd]('list_slotnames')
for s in slot_names:
key = s.keys()[0]
new_name = s[key]
if key.startswith('slot-'):
key = key[5:]
current_slot_name = current_slot_names.get(key).get('slotname')
if current_slot_name != new_name:
old = {key: current_slot_name}
new = {key: new_name}
if ret['changes'].get('Slot Names') is None:
ret['changes'].update({'Slot Names':
{'Old': {},
'New': {}}})
ret['changes']['Slot Names']['Old'].update(old)
ret['changes']['Slot Names']['New'].update(new)
current_power_states = {}
target_power_states = {}
if blade_power_states:
for b in blade_power_states:
key = b.keys()[0]
status = __salt__[chassis_cmd]('server_powerstatus', module=key)
current_power_states[key] = status.get('status', -1)
if b[key] == 'powerdown':
if current_power_states[key] != -1 and current_power_states[key]:
target_power_states[key] = 'powerdown'
if b[key] == 'powerup':
if current_power_states[key] != -1 and not current_power_states[key]:
target_power_states[key] = 'powerup'
if b[key] == 'powercycle':
if current_power_states[key] != -1 and not current_power_states[key]:
target_power_states[key] = 'powerup'
if current_power_states[key] != -1 and current_power_states[key]:
target_power_states[key] = 'powercycle'
for k, v in target_power_states.iteritems():
old = {k: current_power_states[k]}
new = {k: v}
if ret['changes'].get('Blade Power States') is None:
ret['changes'].update({'Blade Power States':
{'Old': {},
'New': {}}})
ret['changes']['Blade Power States']['Old'].update(old)
ret['changes']['Blade Power States']['New'].update(new)
if ret['changes'] == {}:
ret['comment'] = 'Dell chassis is already in the desired state.'
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Dell chassis configuration will change.'
return ret
# Finally, set the necessary configurations on the chassis.
name = __salt__[chassis_cmd]('set_chassis_name', chassis_name)
if location:
location = __salt__[chassis_cmd]('set_chassis_location', location)
pw_result = True
if password:
pw_single = True
if __salt__[chassis_cmd]('change_password', username='root', uid=1,
password=password):
for blade in inventory['server'].keys():
pw_single = __salt__[chassis_cmd]('deploy_password',
username='root',
password=password,
module=blade)
if not pw_single:
pw_result = False
else:
pw_result = False
if datacenter:
datacenter_result = __salt__[chassis_cmd]('set_chassis_datacenter',
datacenter)
if mode:
mode = __salt__[chassis_cmd]('set_general', cfg_tuning, mode_cmd, mode)
if idrac_launch:
idrac_launch = __salt__[chassis_cmd]('set_general', cfg_tuning, launch_cmd, idrac_launch)
if ret['changes'].get('Slot Names') is not None:
slot_rets = []
for s in slot_names:
key = s.keys()[0]
new_name = s[key]
if key.startswith('slot-'):
key = key[5:]
slot_rets.append(__salt__[chassis_cmd]('set_slotname', key, new_name))
if any(slot_rets) is False:
slot_names = False
else:
slot_names = True
powerchange_all_ok = True
for k, v in target_power_states.iteritems():
powerchange_ok = __salt__[chassis_cmd]('server_power', v, module=k)
if not powerchange_ok:
powerchange_all_ok = False
if any([name, location, mode, idrac_launch,
slot_names, powerchange_all_ok]) is False:
ret['result'] = False
ret['comment'] = 'There was an error setting the Dell chassis.'
ret['comment'] = 'Dell chassis was updated.'
return ret
def switch(name, ip=None, netmask=None, gateway=None, dhcp=None,
password=None, snmp=None):
'''
Manage switches in a Dell Chassis.
name
The switch designation (e.g. switch-1, switch-2)
ip
The Static IP Address of the switch
netmask
The netmask for the static IP
gateway
The gateway for the static IP
dhcp
True: Enable DHCP
False: Do not change DHCP setup
(disabling DHCP is automatic when a static IP is set)
password
The access (root) password for the switch
snmp
The SNMP community string for the switch
Example:
.. code-block:: yaml
my-dell-chassis:
dellchassis.switch:
- switch: switch-1
- ip: 192.168.1.1
- netmask: 255.255.255.0
- gateway: 192.168.1.254
- dhcp: True
- password: secret
- snmp: public
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
current_nic = __salt__['chassis.cmd']('network_info', module=name)
try:
if current_nic.get('retcode', 0) != 0:
ret['result'] = False
ret['comment'] = current_nic['stdout']
return ret
if ip or netmask or gateway:
if not ip:
ip = current_nic['Network']['IP Address']
if not netmask:
ip = current_nic['Network']['Subnet Mask']
if not gateway:
ip = current_nic['Network']['Gateway']
if current_nic['Network']['DHCP Enabled'] == '0' and dhcp:
ret['changes'].update({'DHCP': {'Old': {'DHCP Enabled': current_nic['Network']['DHCP Enabled']},
'New': {'DHCP Enabled': dhcp}}})
if ((ip or netmask or gateway) and not dhcp and (ip != current_nic['Network']['IP Address'] or
netmask != current_nic['Network']['Subnet Mask'] or
gateway != current_nic['Network']['Gateway'])):
ret['changes'].update({'IP': {'Old': current_nic['Network'],
'New': {'IP Address': ip,
'Subnet Mask': netmask,
'Gateway': gateway}}})
if password:
if 'New' not in ret['changes']:
ret['changes']['New'] = {}
ret['changes']['New'].update({'Password': '*****'})
if snmp:
if 'New' not in ret['changes']:
ret['changes']['New'] = {}
ret['changes']['New'].update({'SNMP': '*****'})
if ret['changes'] == {}:
ret['comment'] = 'Switch ' + name + ' is already in desired state'
return ret
except AttributeError:
ret['changes'] = {}
ret['comment'] = 'Something went wrong retrieving the switch details'
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Switch ' + name + ' configuration will change'
return ret
# Finally, set the necessary configurations on the chassis.
dhcp_ret = net_ret = password_ret = snmp_ret = True
if dhcp:
dhcp_ret = __salt__['chassis.cmd']('set_niccfg', module=name, dhcp=dhcp)
if ip or netmask or gateway:
net_ret = __salt__['chassis.cmd']('set_niccfg', ip, netmask, gateway, module=name)
if password:
password_ret = __salt__['chassis.cmd']('deploy_password', 'root', password, module=name)
if snmp:
snmp_ret = __salt__['chassis.cmd']('deploy_snmp', snmp, module=name)
if any([password_ret, snmp_ret, net_ret, dhcp_ret]) is False:
ret['result'] = False
ret['comment'] = 'There was an error setting the switch {0}.'.format(name)
ret['comment'] = 'Dell chassis switch {0} was updated.'.format(name)
return ret
def _firmware_update(firmwarefile='', host='',
directory=''):
'''
Update firmware for a single host
'''
dest = os.path.join(directory, firmwarefile[7:])
__salt__['cp.get_file'](firmwarefile, dest)
username = __pillar__['proxy']['admin_user']
password = __pillar__['proxy']['admin_password']
__salt__['dracr.update_firmware'](dest,
host=host,
admin_username=username,
admin_password=password)
def firmware_update(hosts=None, directory=''):
'''
State to update the firmware on host
using the ``racadm`` command
firmwarefile
filename (string) starting with ``salt://``
host
string representing the hostname
supplied to the ``racadm`` command
directory
Directory name where firmwarefile
will be downloaded
.. code-block:: yaml
dell-chassis-firmware-update:
dellchassis.firmware_update:
hosts:
cmc:
salt://firmware_cmc.exe
server-1:
salt://firmware.exe
directory: /opt/firmwares
'''
ret = {}
ret.changes = {}
success = True
for host, firmwarefile in hosts:
try:
_firmware_update(firmwarefile, host, directory)
ret['changes'].update({
'host': {
'comment': 'Firmware update submitted for {0}'.format(host),
'success': True,
}
})
except CommandExecutionError as err:
success = False
ret['changes'].update({
'host': {
'comment': 'FAILED to update firmware for {0}'.format(host),
'success': False,
'reason': str(err),
}
})
ret['result'] = success
return ret
|
smallyear/linuxLearn
|
salt/salt/states/dellchassis.py
|
Python
|
apache-2.0
| 25,828
|
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import alembic
from oslo_serialization import jsonutils
import six
import sqlalchemy as sa
from nailgun.db import db
from nailgun.db import dropdb
from nailgun.db.migration import ALEMBIC_CONFIG
from nailgun.extensions.consts import extensions_migration_buffer_table_name
from nailgun.test import base
_prepare_revision = '37608259013'
_test_revision = '1e50a4903910'
def setup_module(module):
dropdb()
alembic.command.upgrade(ALEMBIC_CONFIG, _prepare_revision)
prepare()
alembic.command.upgrade(ALEMBIC_CONFIG, _test_revision)
def prepare():
meta = base.reflect_db_metadata()
db.execute(
meta.tables['plugins'].insert(),
[{
'name': 'test_plugin',
'title': 'Test plugin',
'version': '1.0.0',
'description': 'Test plugin for Fuel',
'homepage': 'http://fuel_plugins.test_plugin.com',
'package_version': '3.0.0',
'groups': jsonutils.dumps(['tgroup']),
'authors': jsonutils.dumps(['tauthor']),
'licenses': jsonutils.dumps(['tlicense']),
'releases': jsonutils.dumps([
{'repository_path': 'repositories/ubuntu'}
]),
'fuel_version': jsonutils.dumps(['6.1', '7.0']),
}])
result = db.execute(
meta.tables['releases'].insert(),
[{
'name': 'test_name',
'version': '2014.2.2-6.1',
'operating_system': 'ubuntu',
'state': 'available',
'roles': jsonutils.dumps([
'controller',
'compute',
'mongo',
]),
'roles_metadata': jsonutils.dumps({
'controller': {
'name': 'Controller',
'description': 'Controller role',
'has_primary': True,
},
'zabbix-server': {
'name': 'Zabbix Server',
'description': 'Zabbix Server role'
},
'cinder': {
'name': 'Cinder',
'description': 'Cinder role'
},
'mongo': {
'name': 'Telemetry - MongoDB',
'description': 'mongo is',
'has_primary': True,
}
}),
'attributes_metadata': jsonutils.dumps({}),
'networks_metadata': jsonutils.dumps({
'bonding': {
'properties': {
'linux': {
'mode': [
{
"values": ["balance-rr",
"active-backup",
"802.3ad"]
},
{
"values": ["balance-xor",
"broadcast",
"balance-tlb",
"balance-alb"],
"condition": "'experimental' in "
"version:feature_groups"
}
]
}
}
},
}),
'is_deployable': True,
}])
releaseid = result.inserted_primary_key[0]
result = db.execute(
meta.tables['releases'].insert(),
[{
'name': 'test_name_2',
'version': '2014.2-6.0',
'operating_system': 'ubuntu',
'state': 'available',
'roles': jsonutils.dumps([
'controller',
'compute',
'mongo',
]),
'roles_metadata': jsonutils.dumps({}),
'attributes_metadata': jsonutils.dumps({}),
'networks_metadata': jsonutils.dumps({
'bonding': {
'properties': {
'ovs': {
'mode': [
{
"values": ["active-backup",
"balance-slb",
"lacp-balance-tcp"]
}
]
}
}
},
}),
'is_deployable': True
}])
result = db.execute(
meta.tables['clusters'].insert(),
[{
'name': 'test_env',
'release_id': releaseid,
'mode': 'ha_compact',
'status': 'new',
'net_provider': 'neutron',
'grouping': 'roles',
'fuel_version': '6.1',
}])
clusterid = result.inserted_primary_key[0]
result = db.execute(
meta.tables['networking_configs'].insert(),
[{
'cluster_id': None,
'dns_nameservers': ['8.8.8.8'],
'floating_ranges': [],
'configuration_template': None,
}])
db.execute(
meta.tables['neutron_config'].insert(),
[{
'id': result.inserted_primary_key[0],
'vlan_range': [],
'gre_id_range': [],
'base_mac': '00:00:00:00:00:00',
'internal_cidr': '10.10.10.00/24',
'internal_gateway': '10.10.10.01',
'segmentation_type': 'vlan',
'net_l23_provider': 'ovs'
}])
result = db.execute(
meta.tables['nodes'].insert(),
[
{
'uuid': 'one',
'cluster_id': clusterid,
'group_id': None,
'status': 'ready',
'meta': '{}',
'mac': 'aa:aa:aa:aa:aa:aa',
'pending_addition': True,
'pending_deletion': False,
'timestamp': datetime.datetime.utcnow(),
}
])
nodeid_a = result.inserted_primary_key[0]
result = db.execute(
meta.tables['nodes'].insert(),
[
{
'uuid': 'two',
'cluster_id': clusterid,
'group_id': None,
'status': 'discover',
'meta': '{}',
'mac': 'bb:bb:bb:bb:bb:bb',
'pending_addition': True,
'pending_deletion': False,
'timestamp': datetime.datetime.utcnow(),
}
])
nodeid_b = result.inserted_primary_key[0]
result = db.execute(
meta.tables['nodes'].insert(),
[
{
'uuid': 'three',
'cluster_id': None,
'group_id': None,
'status': 'discover',
'meta': '{}',
'mac': 'cc:cc:cc:cc:cc:cc',
'pending_addition': True,
'pending_deletion': False,
'timestamp': datetime.datetime.utcnow(),
}
])
nodeid_c = result.inserted_primary_key[0]
db.execute(
meta.tables['node_attributes'].insert(),
[
{
'node_id': nodeid_a,
'volumes': jsonutils.dumps([{'volume': nodeid_a}])
},
{
'node_id': nodeid_b,
'volumes': jsonutils.dumps([{'volume': nodeid_b}])
},
{
'node_id': nodeid_c,
'volumes': jsonutils.dumps([{'volume': nodeid_c}])
},
])
result = db.execute(
meta.tables['roles'].insert(),
[
{'release_id': releaseid, 'name': 'controller'},
])
controllerroleid = result.inserted_primary_key[0]
result = db.execute(
meta.tables['roles'].insert(),
[
{'release_id': releaseid, 'name': 'mongo'},
])
mongoroleid = result.inserted_primary_key[0]
result = db.execute(
meta.tables['node_roles'].insert(),
[
{'role': controllerroleid, 'node': nodeid_a, 'primary': False},
{'role': controllerroleid, 'node': nodeid_b, 'primary': False},
{'role': controllerroleid, 'node': nodeid_c, 'primary': True},
{'role': mongoroleid, 'node': nodeid_a, 'primary': False},
])
result = db.execute(
meta.tables['pending_node_roles'].insert(),
[
{'role': mongoroleid, 'node': nodeid_b, 'primary': True},
{'role': mongoroleid, 'node': nodeid_c, 'primary': False},
])
db.execute(
meta.tables['node_nic_interfaces'].insert(),
[
{
'id': 1,
'node_id': nodeid_a,
'name': 'test_interface',
'mac': '00:00:00:00:00:01',
'max_speed': 200,
'current_speed': 100,
'ip_addr': '10.20.0.2',
'netmask': '255.255.255.0',
'state': 'test_state',
'interface_properties': jsonutils.dumps(
{'test_property': 'test_value'}),
'driver': 'test_driver',
'bus_info': 'some_test_info'
},
{
'id': 2,
'node_id': nodeid_a,
'name': 'test_interface_2',
'mac': '00:00:00:00:00:02',
'max_speed': 200,
'current_speed': 100,
'ip_addr': '10.30.0.2',
'netmask': '255.255.255.0',
'state': 'test_state',
'interface_properties': jsonutils.dumps(
{'test_property': 'test_value'}),
'driver': 'test_driver',
'bus_info': 'some_test_info'
},
{
'id': 3,
'node_id': nodeid_a,
'name': 'test_interface_3',
'mac': '00:00:00:00:00:03',
'max_speed': 200,
'current_speed': 100,
'ip_addr': '10.30.0.2',
'netmask': '255.255.255.0',
'state': 'test_state',
'interface_properties': jsonutils.dumps(
{'test_property': 'test_value'}),
'driver': 'test_driver',
'bus_info': 'some_test_info'
}])
db.execute(
meta.tables['node_bond_interfaces'].insert(),
[{
'node_id': nodeid_a,
'name': 'test_bond_interface',
'mode': 'active-backup',
'bond_properties': jsonutils.dumps(
{'test_property': 'test_value'})
}])
db.execute(
meta.tables['network_groups'].insert(),
[
{
'id': 1,
'name': 'fuelweb_admin',
'vlan_start': None,
'cidr': '10.20.0.0/24',
'gateway': '10.20.0.200',
},
{
'id': 2,
'name': 'public',
'vlan_start': None,
'cidr': '10.30.0.0/24',
'gateway': '10.30.0.200'
}
]
)
db.execute(
meta.tables['net_nic_assignments'].insert(),
[
{
'network_id': 1,
'interface_id': 1
},
{
'network_id': 2,
'interface_id': 2
},
{
'network_id': 2,
'interface_id': 3
}
]
)
db.commit()
class TestPluginAttributesMigration(base.BaseAlembicMigrationTest):
def test_old_fields_exists(self):
result = db.execute(
sa.select([self.meta.tables['plugins'].c.name]))
self.assertEqual(
result.fetchone()[0], 'test_plugin')
result = db.execute(
sa.select([self.meta.tables['plugins'].c.title]))
self.assertEqual(
result.fetchone()[0], 'Test plugin')
result = db.execute(
sa.select([self.meta.tables['plugins'].c.version]))
self.assertEqual(
result.fetchone()[0], '1.0.0')
result = db.execute(
sa.select([self.meta.tables['plugins'].c.description]))
self.assertEqual(
result.fetchone()[0], 'Test plugin for Fuel')
result = db.execute(
sa.select([self.meta.tables['plugins'].c.homepage]))
self.assertEqual(
result.fetchone()[0], 'http://fuel_plugins.test_plugin.com')
result = db.execute(
sa.select([self.meta.tables['plugins'].c.package_version]))
self.assertEqual(
result.fetchone()[0], '3.0.0')
result = db.execute(
sa.select([self.meta.tables['plugins'].c.groups]))
self.assertEqual(
jsonutils.loads(result.fetchone()[0]), ['tgroup'])
result = db.execute(
sa.select([self.meta.tables['plugins'].c.authors]))
self.assertEqual(
jsonutils.loads(result.fetchone()[0]), ['tauthor'])
result = db.execute(
sa.select([self.meta.tables['plugins'].c.licenses]))
self.assertEqual(
jsonutils.loads(result.fetchone()[0]), ['tlicense'])
result = db.execute(
sa.select([self.meta.tables['plugins'].c.releases]))
self.assertEqual(
jsonutils.loads(result.fetchone()[0]),
[{'repository_path': 'repositories/ubuntu'}])
result = db.execute(
sa.select([self.meta.tables['plugins'].c.fuel_version]))
self.assertEqual(
jsonutils.loads(result.fetchone()[0]), ['6.1', '7.0'])
def test_new_fields_exists_and_empty(self):
# check attributes_metadata field exists
plugin_table = self.meta.tables['plugins']
column_values = [
(plugin_table.c.attributes_metadata, {}),
(plugin_table.c.volumes_metadata, {}),
(plugin_table.c.roles_metadata, {}),
(plugin_table.c.network_roles_metadata, []),
(plugin_table.c.deployment_tasks, []),
(plugin_table.c.tasks, []),
]
result = db.execute(sa.select(
[item[0] for item in column_values]))
db_values = result.fetchone()
for idx, db_value in enumerate(db_values):
self.assertEqual(jsonutils.loads(db_value), column_values[idx][1])
class TestPublicIpRequired(base.BaseAlembicMigrationTest):
def test_public_ip_required(self):
result = db.execute(
sa.select([self.meta.tables['releases'].c.roles_metadata]))
roles_metadata = jsonutils.loads(result.fetchone()[0])
for role, role_info in six.iteritems(roles_metadata):
if role in ['controller', 'zabbix-server']:
self.assertTrue(role_info['public_ip_required'])
else:
self.assertFalse(role_info.get('public_ip_required'))
class TestInterfacesOffloadingModesMigration(base.BaseAlembicMigrationTest):
def test_old_fields_exists(self):
# check node_nic_interfaces fields
nic_table = self.meta.tables['node_nic_interfaces']
result = db.execute(
sa.select([nic_table.c.node_id, nic_table.c.name, nic_table.c.mac,
nic_table.c.max_speed, nic_table.c.current_speed,
nic_table.c.ip_addr, nic_table.c.netmask,
nic_table.c.state, nic_table.c.interface_properties,
nic_table.c.driver, nic_table.c.bus_info]).
where(nic_table.c.id == 1))
res = result.fetchone()
check_res = [1, u'test_interface', u'00:00:00:00:00:01', 200, 100,
u'10.20.0.2', u'255.255.255.0', u'test_state',
u'{"test_property": "test_value"}',
u'test_driver', u'some_test_info']
self.assertListEqual(list(res), check_res)
def test_new_fields_exists_and_empty(self):
# check node_nic_interfaces fields
result = db.execute(
sa.select([self.meta.tables['node_nic_interfaces']
.c.offloading_modes]))
self.assertEqual(
jsonutils.loads(result.fetchone()[0]), [])
# the same for bond interfaces
result = db.execute(
sa.select([self.meta.tables['node_bond_interfaces']
.c.offloading_modes]))
self.assertEqual(
jsonutils.loads(result.fetchone()[0]), [])
class TestNetworkingTemplatesMigration(base.BaseAlembicMigrationTest):
def test_new_fields_exists_and_empty(self):
result = db.execute(
sa.select([self.meta.tables['networking_configs']
.c.configuration_template]))
self.assertIsNone(result.fetchone()[0])
result = db.execute(
sa.select([self.meta.tables['nodes']
.c.network_template]))
self.assertIsNone(result.fetchone()[0])
class TestNodeHostnamePropertyMigration(base.BaseAlembicMigrationTest):
def test_hostname_field_exists_and_contains_correct_values(self):
result = db.execute(
sa.select([self.meta.tables['nodes'].c.id,
self.meta.tables['nodes'].c.hostname]))
for node_id, hostname in result:
self.assertEqual(
"node-{0}".format(node_id),
hostname)
def test_fqdn_field_is_dropped(self):
node_table = self.meta.tables['nodes']
self.assertNotIn('fqdn', node_table.c)
class TestInterfacesPxePropertyMigration(base.BaseAlembicMigrationTest):
def test_old_fields_exists(self):
# check node_nic_interfaces fields
ng_table = self.meta.tables['network_groups']
result = db.execute(
sa.select([ng_table.c.name, ng_table.c.vlan_start,
ng_table.c.cidr, ng_table.c.gateway]).
where(ng_table.c.id == 1))
res = result.fetchone()
check_res = [u'fuelweb_admin', None, u'10.20.0.0/24', u'10.20.0.200']
self.assertListEqual(list(res), check_res)
result = db.execute(
sa.select([self.meta.tables['net_nic_assignments'].c.network_id]))
self.assertEqual(
result.fetchone()[0], 1)
def test_new_field_exists_and_filled(self):
nic_table = self.meta.tables['node_nic_interfaces']
result = db.execute(
sa.select([nic_table.c.pxe]).where(nic_table.c.id == 1))
# check 'pxe' property is true for admin interfaces
self.assertTrue(result.fetchone()[0])
result = db.execute(
sa.select([nic_table.c.pxe]).where(nic_table.c.id != 1))
# and 'false' for any others
for res in result.fetchall():
self.assertFalse(res[0])
class TestMigrateVolumesIntoExtension(base.BaseAlembicMigrationTest):
def test_data_are_moved_into_buffer_table(self):
# "volumes" column got deleted
columns = [t.name for t in self.meta.tables['node_attributes'].columns]
self.assertItemsEqual(columns, ['id', 'node_id', 'interfaces',
'vms_conf'])
# The data are stored in the buffer
table_name = extensions_migration_buffer_table_name
result = db.execute(
sa.select([
self.meta.tables[table_name].c.id,
self.meta.tables[table_name].c.extension_name,
self.meta.tables[table_name].c.data]))
records = list(result)
# Extension name is volume_manager
names = [r[1] for r in records]
self.assertEqual(
list(names),
['volume_manager'] * 3)
# Check the data, each dict has node_id and volumes
volumes = [jsonutils.loads(r[2]) for r in records]
for volume in volumes:
self.assertEqual(
volume['volumes'],
[{'volume': volume['node_id']}])
class TestSchemalessRoles(base.BaseAlembicMigrationTest):
def test_nodes_has_roles_attrs(self):
result = db.execute(
sa.select([
self.meta.tables['nodes'].c.roles,
self.meta.tables['nodes'].c.pending_roles,
self.meta.tables['nodes'].c.primary_roles,
]).order_by(self.meta.tables['nodes'].c.id))
nodes = [
(roles, pending_roles, primary_roles)
for roles, pending_roles, primary_roles in result
]
# node_a
roles, pending_roles, primary_roles = nodes[0]
self.assertItemsEqual(['controller', 'mongo'], roles)
self.assertItemsEqual([], pending_roles)
self.assertItemsEqual([], primary_roles)
# node_b
roles, pending_roles, primary_roles = nodes[1]
self.assertItemsEqual(['controller'], roles)
self.assertItemsEqual(['mongo'], pending_roles)
self.assertItemsEqual(['mongo'], primary_roles)
# node_c
roles, pending_roles, primary_roles = nodes[2]
self.assertItemsEqual(['controller'], roles)
self.assertItemsEqual(['mongo'], pending_roles)
self.assertItemsEqual(['controller'], primary_roles)
def test_old_tables_are_dropped(self):
self.assertNotIn('node_roles', self.meta.tables)
self.assertNotIn('pending_node_roles', self.meta.tables)
self.assertNotIn('roles', self.meta.tables)
def test_network_roles_metadata_exists_and_empty(self):
# check attributes_metadata field exists
result = db.execute(
sa.select([self.meta.tables['releases'].c.network_roles_metadata]))
# check attributes_metadata value is empty
self.assertEqual(jsonutils.loads(result.fetchone()[0]), [])
def test_weight_is_injected_to_roles_meta(self):
result = db.execute(
sa.select([self.meta.tables['releases'].c.roles_metadata])
)
rel_row = result.fetchone()
r_meta = jsonutils.loads(rel_row[0])
default_roles_weight = {
"controller": 10,
"compute": 20,
"cinder": 30,
"cinder-vmware": 40,
"ceph-osd": 50,
"mongo": 60,
"base-os": 70,
"virt": 80
}
for r_name in r_meta:
r_weight = r_meta[r_name].get('weight')
self.assertIsNotNone(r_weight)
if r_name in default_roles_weight:
self.assertEquals(
r_weight, default_roles_weight.get(r_name)
)
# role which is not present in list of default ones
else:
self.assertEquals(r_weight, 10000)
class TestClusterUISettingsMigration(base.BaseAlembicMigrationTest):
def test_grouping_field_removed(self):
clusters_table = self.meta.tables['clusters']
self.assertNotIn('grouping', clusters_table.c)
def test_ui_settings_field_exists_and_has_default_value(self):
clusters_table = self.meta.tables['clusters']
self.assertIn('ui_settings', clusters_table.c)
ui_settings = jsonutils.loads(
db.execute(
sa.select([clusters_table.c.ui_settings])
).fetchone()[0]
)
self.assertItemsEqual(ui_settings['view_mode'], 'standard')
self.assertItemsEqual(ui_settings['filter'], {})
self.assertItemsEqual(ui_settings['sort'], [{'roles': 'asc'}])
self.assertItemsEqual(ui_settings['filter_by_labels'], {})
self.assertItemsEqual(ui_settings['sort_by_labels'], [])
self.assertItemsEqual(ui_settings['search'], '')
class TestClusterBondMetaMigration(base.BaseAlembicMigrationTest):
def test_cluster_bond_meta_field_exists_and_has_proper_value_lnx(self):
lnx_meta = [
{
"values": ["balance-rr", "active-backup", "802.3ad"],
"condition": "interface:pxe == false"
},
{
"values": ["balance-xor", "broadcast", "balance-tlb",
"balance-alb"],
"condition": "interface:pxe == false and "
"'experimental' in version:feature_groups"
}
]
# check data for linux bonds (fuel 6.1 version)
result = db.execute(
sa.select([self.meta.tables['releases'].c.networks_metadata]).
where(self.meta.tables['releases'].c.name == 'test_name'))
bond_meta = jsonutils.loads(result.fetchone()[0])['bonding']
self.assertEqual(bond_meta['properties']['linux']['mode'], lnx_meta)
def test_cluster_bond_meta_field_exists_and_has_proper_value_ovs(self):
ovs_meta = [
{
"values": ["active-backup", "balance-slb",
"lacp-balance-tcp"],
"condition": "interface:pxe == false"
}
]
# check data for ovs bonds (fuel < 6.1 version)
result = db.execute(
sa.select([self.meta.tables['releases'].c.networks_metadata]).
where(self.meta.tables['releases'].c.name == 'test_name_2'))
bond_meta = jsonutils.loads(result.fetchone()[0])['bonding']
self.assertEqual(bond_meta['properties']['ovs']['mode'], ovs_meta)
class TestExtensionsField(base.BaseAlembicMigrationTest):
def test_extensions_field_with_default_data(self):
cluster_result = db.execute(
sa.select([self.meta.tables['clusters'].c.extensions])).fetchone()
release_result = db.execute(
sa.select([self.meta.tables['releases'].c.extensions])).fetchone()
self.assertEqual(list(cluster_result)[0], ['volume_manager'])
self.assertEqual(list(release_result)[0], ['volume_manager'])
class TestOldReleasesIsUndeployable(base.BaseAlembicMigrationTest):
def test_old_releases_has_deployable_false(self):
result = db.execute(
sa.select([self.meta.tables['releases'].c.is_deployable]).
where(self.meta.tables['releases'].c.version == '2014.2.2-6.1'))
for (is_deployable, ) in result:
self.assertFalse(is_deployable)
class TestNodeLabelsMigration(base.BaseAlembicMigrationTest):
def test_node_labels_field_exists_and_has_default_value(self):
nodes_table = self.meta.tables['nodes']
self.assertIn('labels', nodes_table.c)
default_labels = jsonutils.loads(
db.execute(
sa.select([nodes_table.c.labels])
).fetchone()[0]
)
self.assertEqual(default_labels, {})
class TestTunSegmentType(base.BaseAlembicMigrationTest):
def test_tun_segment_type_added(self):
result = db.execute(
self.meta.tables['networking_configs'].insert(),
[{
'cluster_id': None,
'dns_nameservers': ['8.8.8.8'],
'floating_ranges': [],
'configuration_template': None,
}])
db.execute(
self.meta.tables['neutron_config'].insert(),
[{
'id': result.inserted_primary_key[0],
'vlan_range': [],
'gre_id_range': [],
'base_mac': '00:00:00:00:00:00',
'internal_cidr': '10.10.10.00/24',
'internal_gateway': '10.10.10.01',
'segmentation_type': 'tun',
'net_l23_provider': 'ovs'
}])
types = db.execute(
sa.select(
[self.meta.tables['neutron_config'].c.segmentation_type])).\
fetchall()
self.assertIn(('tun',), types)
class TestStringNetworkGroupName(base.BaseAlembicMigrationTest):
def test_tun_segment_type_added(self):
db.execute(
self.meta.tables['network_groups'].insert(),
[{
'id': 3,
'name': 'custom_name',
'vlan_start': None,
'cidr': '10.20.0.0/24',
'gateway': '10.20.0.200',
}])
names = db.execute(
sa.select(
[self.meta.tables['network_groups'].c.name])). \
fetchall()
self.assertIn(('custom_name',), names)
|
huntxu/fuel-web
|
nailgun/nailgun/test/unit/test_migration_fuel_7_0.py
|
Python
|
apache-2.0
| 29,060
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.nets.resnet_v1."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from nets import resnet_utils
from nets import resnet_v1
slim = tf.contrib.slim
def create_test_input(batch_size, height, width, channels):
"""Create test input tensor.
Args:
batch_size: The number of images per batch or `None` if unknown.
height: The height of each image or `None` if unknown.
width: The width of each image or `None` if unknown.
channels: The number of channels per image or `None` if unknown.
Returns:
Either a placeholder `Tensor` of dimension
[batch_size, height, width, channels] if any of the inputs are `None` or a
constant `Tensor` with the mesh grid values along the spatial dimensions.
"""
if None in [batch_size, height, width, channels]:
return tf.placeholder(tf.float32, (batch_size, height, width, channels))
else:
return tf.to_float(
np.tile(
np.reshape(
np.reshape(np.arange(height), [height, 1]) +
np.reshape(np.arange(width), [1, width]),
[1, height, width, 1]),
[batch_size, 1, 1, channels]))
class ResnetUtilsTest(tf.test.TestCase):
def testSubsampleThreeByThree(self):
x = tf.reshape(tf.to_float(tf.range(9)), [1, 3, 3, 1])
x = resnet_utils.subsample(x, 2)
expected = tf.reshape(tf.constant([0, 2, 6, 8]), [1, 2, 2, 1])
with self.test_session():
self.assertAllClose(x.eval(), expected.eval())
def testSubsampleFourByFour(self):
x = tf.reshape(tf.to_float(tf.range(16)), [1, 4, 4, 1])
x = resnet_utils.subsample(x, 2)
expected = tf.reshape(tf.constant([0, 2, 8, 10]), [1, 2, 2, 1])
with self.test_session():
self.assertAllClose(x.eval(), expected.eval())
def testConv2DSameEven(self):
n, n2 = 4, 2
# Input image.
x = create_test_input(1, n, n, 1)
# Convolution kernel.
w = create_test_input(1, 3, 3, 1)
w = tf.reshape(w, [3, 3, 1, 1])
tf.get_variable('Conv/weights', initializer=w)
tf.get_variable('Conv/biases', initializer=tf.zeros([1]))
tf.get_variable_scope().reuse_variables()
y1 = slim.conv2d(x, 1, [3, 3], stride=1, scope='Conv')
y1_expected = tf.to_float([[14, 28, 43, 26],
[28, 48, 66, 37],
[43, 66, 84, 46],
[26, 37, 46, 22]])
y1_expected = tf.reshape(y1_expected, [1, n, n, 1])
y2 = resnet_utils.subsample(y1, 2)
y2_expected = tf.to_float([[14, 43],
[43, 84]])
y2_expected = tf.reshape(y2_expected, [1, n2, n2, 1])
y3 = resnet_utils.conv2d_same(x, 1, 3, stride=2, scope='Conv')
y3_expected = y2_expected
y4 = slim.conv2d(x, 1, [3, 3], stride=2, scope='Conv')
y4_expected = tf.to_float([[48, 37],
[37, 22]])
y4_expected = tf.reshape(y4_expected, [1, n2, n2, 1])
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertAllClose(y1.eval(), y1_expected.eval())
self.assertAllClose(y2.eval(), y2_expected.eval())
self.assertAllClose(y3.eval(), y3_expected.eval())
self.assertAllClose(y4.eval(), y4_expected.eval())
def testConv2DSameOdd(self):
n, n2 = 5, 3
# Input image.
x = create_test_input(1, n, n, 1)
# Convolution kernel.
w = create_test_input(1, 3, 3, 1)
w = tf.reshape(w, [3, 3, 1, 1])
tf.get_variable('Conv/weights', initializer=w)
tf.get_variable('Conv/biases', initializer=tf.zeros([1]))
tf.get_variable_scope().reuse_variables()
y1 = slim.conv2d(x, 1, [3, 3], stride=1, scope='Conv')
y1_expected = tf.to_float([[14, 28, 43, 58, 34],
[28, 48, 66, 84, 46],
[43, 66, 84, 102, 55],
[58, 84, 102, 120, 64],
[34, 46, 55, 64, 30]])
y1_expected = tf.reshape(y1_expected, [1, n, n, 1])
y2 = resnet_utils.subsample(y1, 2)
y2_expected = tf.to_float([[14, 43, 34],
[43, 84, 55],
[34, 55, 30]])
y2_expected = tf.reshape(y2_expected, [1, n2, n2, 1])
y3 = resnet_utils.conv2d_same(x, 1, 3, stride=2, scope='Conv')
y3_expected = y2_expected
y4 = slim.conv2d(x, 1, [3, 3], stride=2, scope='Conv')
y4_expected = y2_expected
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertAllClose(y1.eval(), y1_expected.eval())
self.assertAllClose(y2.eval(), y2_expected.eval())
self.assertAllClose(y3.eval(), y3_expected.eval())
self.assertAllClose(y4.eval(), y4_expected.eval())
def _resnet_plain(self, inputs, blocks, output_stride=None, scope=None):
"""A plain ResNet without extra layers before or after the ResNet blocks."""
with tf.variable_scope(scope, values=[inputs]):
with slim.arg_scope([slim.conv2d], outputs_collections='end_points'):
net = resnet_utils.stack_blocks_dense(inputs, blocks, output_stride)
end_points = slim.utils.convert_collection_to_dict('end_points')
return net, end_points
def testEndPointsV1(self):
"""Test the end points of a tiny v1 bottleneck network."""
blocks = [
resnet_v1.resnet_v1_block(
'block1', base_depth=1, num_units=2, stride=2),
resnet_v1.resnet_v1_block(
'block2', base_depth=2, num_units=2, stride=1),
]
inputs = create_test_input(2, 32, 16, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_plain(inputs, blocks, scope='tiny')
expected = [
'tiny/block1/unit_1/bottleneck_v1/shortcut',
'tiny/block1/unit_1/bottleneck_v1/conv1',
'tiny/block1/unit_1/bottleneck_v1/conv2',
'tiny/block1/unit_1/bottleneck_v1/conv3',
'tiny/block1/unit_2/bottleneck_v1/conv1',
'tiny/block1/unit_2/bottleneck_v1/conv2',
'tiny/block1/unit_2/bottleneck_v1/conv3',
'tiny/block2/unit_1/bottleneck_v1/shortcut',
'tiny/block2/unit_1/bottleneck_v1/conv1',
'tiny/block2/unit_1/bottleneck_v1/conv2',
'tiny/block2/unit_1/bottleneck_v1/conv3',
'tiny/block2/unit_2/bottleneck_v1/conv1',
'tiny/block2/unit_2/bottleneck_v1/conv2',
'tiny/block2/unit_2/bottleneck_v1/conv3']
self.assertItemsEqual(expected, end_points)
def _stack_blocks_nondense(self, net, blocks):
"""A simplified ResNet Block stacker without output stride control."""
for block in blocks:
with tf.variable_scope(block.scope, 'block', [net]):
for i, unit in enumerate(block.args):
with tf.variable_scope('unit_%d' % (i + 1), values=[net]):
net = block.unit_fn(net, rate=1, **unit)
return net
def testAtrousValuesBottleneck(self):
"""Verify the values of dense feature extraction by atrous convolution.
Make sure that dense feature extraction by stack_blocks_dense() followed by
subsampling gives identical results to feature extraction at the nominal
network output stride using the simple self._stack_blocks_nondense() above.
"""
block = resnet_v1.resnet_v1_block
blocks = [
block('block1', base_depth=1, num_units=2, stride=2),
block('block2', base_depth=2, num_units=2, stride=2),
block('block3', base_depth=4, num_units=2, stride=2),
block('block4', base_depth=8, num_units=2, stride=1),
]
nominal_stride = 8
# Test both odd and even input dimensions.
height = 30
width = 31
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
with slim.arg_scope([slim.batch_norm], is_training=False):
for output_stride in [1, 2, 4, 8, None]:
with tf.Graph().as_default():
with self.test_session() as sess:
tf.set_random_seed(0)
inputs = create_test_input(1, height, width, 3)
# Dense feature extraction followed by subsampling.
output = resnet_utils.stack_blocks_dense(inputs,
blocks,
output_stride)
if output_stride is None:
factor = 1
else:
factor = nominal_stride // output_stride
output = resnet_utils.subsample(output, factor)
# Make the two networks use the same weights.
tf.get_variable_scope().reuse_variables()
# Feature extraction at the nominal network rate.
expected = self._stack_blocks_nondense(inputs, blocks)
sess.run(tf.global_variables_initializer())
output, expected = sess.run([output, expected])
self.assertAllClose(output, expected, atol=1e-4, rtol=1e-4)
class ResnetCompleteNetworkTest(tf.test.TestCase):
"""Tests with complete small ResNet v1 networks."""
def _resnet_small(self,
inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
include_root_block=True,
reuse=None,
scope='resnet_v1_small'):
"""A shallow and thin ResNet v1 for faster tests."""
block = resnet_v1.resnet_v1_block
blocks = [
block('block1', base_depth=1, num_units=3, stride=2),
block('block2', base_depth=2, num_units=3, stride=2),
block('block3', base_depth=4, num_units=3, stride=2),
block('block4', base_depth=8, num_units=2, stride=1),
]
return resnet_v1.resnet_v1(inputs, blocks, num_classes,
is_training=is_training,
global_pool=global_pool,
output_stride=output_stride,
include_root_block=include_root_block,
reuse=reuse,
scope=scope)
def testClassificationEndPoints(self):
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
logits, end_points = self._resnet_small(inputs, num_classes,
global_pool=global_pool,
scope='resnet')
self.assertTrue(logits.op.name.startswith('resnet/logits'))
self.assertListEqual(logits.get_shape().as_list(), [2, 1, 1, num_classes])
self.assertTrue('predictions' in end_points)
self.assertListEqual(end_points['predictions'].get_shape().as_list(),
[2, 1, 1, num_classes])
def testClassificationShapes(self):
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(inputs, num_classes,
global_pool=global_pool,
scope='resnet')
endpoint_to_shape = {
'resnet/block1': [2, 28, 28, 4],
'resnet/block2': [2, 14, 14, 8],
'resnet/block3': [2, 7, 7, 16],
'resnet/block4': [2, 7, 7, 32]}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testFullyConvolutionalEndpointShapes(self):
global_pool = False
num_classes = 10
inputs = create_test_input(2, 321, 321, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(inputs, num_classes,
global_pool=global_pool,
scope='resnet')
endpoint_to_shape = {
'resnet/block1': [2, 41, 41, 4],
'resnet/block2': [2, 21, 21, 8],
'resnet/block3': [2, 11, 11, 16],
'resnet/block4': [2, 11, 11, 32]}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testRootlessFullyConvolutionalEndpointShapes(self):
global_pool = False
num_classes = 10
inputs = create_test_input(2, 128, 128, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(inputs, num_classes,
global_pool=global_pool,
include_root_block=False,
scope='resnet')
endpoint_to_shape = {
'resnet/block1': [2, 64, 64, 4],
'resnet/block2': [2, 32, 32, 8],
'resnet/block3': [2, 16, 16, 16],
'resnet/block4': [2, 16, 16, 32]}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testAtrousFullyConvolutionalEndpointShapes(self):
global_pool = False
num_classes = 10
output_stride = 8
inputs = create_test_input(2, 321, 321, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(inputs,
num_classes,
global_pool=global_pool,
output_stride=output_stride,
scope='resnet')
endpoint_to_shape = {
'resnet/block1': [2, 41, 41, 4],
'resnet/block2': [2, 41, 41, 8],
'resnet/block3': [2, 41, 41, 16],
'resnet/block4': [2, 41, 41, 32]}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testAtrousFullyConvolutionalValues(self):
"""Verify dense feature extraction with atrous convolution."""
nominal_stride = 32
for output_stride in [4, 8, 16, 32, None]:
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
with tf.Graph().as_default():
with self.test_session() as sess:
tf.set_random_seed(0)
inputs = create_test_input(2, 81, 81, 3)
# Dense feature extraction followed by subsampling.
output, _ = self._resnet_small(inputs, None, is_training=False,
global_pool=False,
output_stride=output_stride)
if output_stride is None:
factor = 1
else:
factor = nominal_stride // output_stride
output = resnet_utils.subsample(output, factor)
# Make the two networks use the same weights.
tf.get_variable_scope().reuse_variables()
# Feature extraction at the nominal network rate.
expected, _ = self._resnet_small(inputs, None, is_training=False,
global_pool=False)
sess.run(tf.global_variables_initializer())
self.assertAllClose(output.eval(), expected.eval(),
atol=1e-4, rtol=1e-4)
def testUnknownBatchSize(self):
batch = 2
height, width = 65, 65
global_pool = True
num_classes = 10
inputs = create_test_input(None, height, width, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
logits, _ = self._resnet_small(inputs, num_classes,
global_pool=global_pool,
scope='resnet')
self.assertTrue(logits.op.name.startswith('resnet/logits'))
self.assertListEqual(logits.get_shape().as_list(),
[None, 1, 1, num_classes])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 1, 1, num_classes))
def testFullyConvolutionalUnknownHeightWidth(self):
batch = 2
height, width = 65, 65
global_pool = False
inputs = create_test_input(batch, None, None, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
output, _ = self._resnet_small(inputs, None, global_pool=global_pool)
self.assertListEqual(output.get_shape().as_list(),
[batch, None, None, 32])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(output, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 3, 3, 32))
def testAtrousFullyConvolutionalUnknownHeightWidth(self):
batch = 2
height, width = 65, 65
global_pool = False
output_stride = 8
inputs = create_test_input(batch, None, None, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
output, _ = self._resnet_small(inputs,
None,
global_pool=global_pool,
output_stride=output_stride)
self.assertListEqual(output.get_shape().as_list(),
[batch, None, None, 32])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(output, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 9, 9, 32))
if __name__ == '__main__':
tf.test.main()
|
unnikrishnankgs/va
|
venv/lib/python3.5/site-packages/tensorflow/models/slim/nets/resnet_v1_test.py
|
Python
|
bsd-2-clause
| 18,554
|
"""Add FastCache for avoiding retrieving the app_url in every single update
Revision ID: 471e6f7722a7
Revises: 6d09f595667
Create Date: 2015-04-14 07:40:57.006143
"""
# revision identifiers, used by Alembic.
revision = '471e6f7722a7'
down_revision = '6d09f595667'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('TranslationFastCaches',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('app_url', sa.Unicode(length=255), nullable=True),
sa.Column('translation_url', sa.Unicode(length=255), nullable=True),
sa.Column('original_messages', sa.UnicodeText(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(u'ix_TranslationFastCaches_app_url', 'TranslationFastCaches', ['app_url'], unique=True)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(u'ix_TranslationFastCaches_app_url', table_name='TranslationFastCaches')
op.drop_table('TranslationFastCaches')
### end Alembic commands ###
|
porduna/appcomposer
|
alembic/versions/471e6f7722a7_add_fastcache_for_avoiding_retrieving_.py
|
Python
|
bsd-2-clause
| 1,130
|
import jingo
import paypal
def handle_paypal_error(fn):
"""Wraps the view so that if a paypal error occurs, you show
a more menaningful error message. May or may not make sense for
all views, so providing as a decorator."""
def wrapper(request, *args, **kw):
try:
return fn(request, *args, **kw)
except paypal.PaypalError:
# This is specific handling for the submission step.
dest = request.GET.get('dest')
return jingo.render(request, 'site/500_paypal.html',
{'submission': dest == 'submission',
'addon': kw.get('addon', None)},
status=500)
return wrapper
|
wagnerand/zamboni
|
apps/paypal/decorators.py
|
Python
|
bsd-3-clause
| 746
|
from django.conf.urls import url
from olympia.addons.urls import ADDON_ID
from olympia.versions.feeds import VersionsRss
from . import views
urlpatterns = [
url('^$',
views.version_list, name='addons.versions'),
url('^format:rss$',
VersionsRss(), name='addons.versions.rss'),
url('^(?P<version_num>[^/]+)$', views.version_detail,
name='addons.versions'),
url('^(?P<version_num>[^/]+)/updateinfo/$', views.update_info,
name='addons.versions.update_info'),
]
download_patterns = [
# .* at the end to match filenames.
# /file/:id/type:attachment
url('^file/(?P<file_id>\d+)(?:/type:(?P<type>\w+))?(?:/.*)?',
views.download_file, name='downloads.file'),
url('^source/(?P<version_id>\d+)',
views.download_source, name='downloads.source'),
# /latest/1865/type:xpi/platform:5
url('^latest/%s/(?:type:(?P<type>\w+)/)?'
'(?:platform:(?P<platform>\d+)/)?.*' % ADDON_ID,
views.download_latest, name='downloads.latest'),
]
|
lavish205/olympia
|
src/olympia/versions/urls.py
|
Python
|
bsd-3-clause
| 1,022
|
"""
S3 Storage object.
"""
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import os
from boto.s3.key import Key
from boto.s3.connection import S3Connection
from io import BytesIO
from django.conf import settings
from tempfile import SpooledTemporaryFile
from .base import BaseStorage, StorageError
class Storage(BaseStorage):
""" S3 API Storage. """
S3_BUCKET = getattr(settings, 'DBBACKUP_S3_BUCKET', None)
S3_ACCESS_KEY = getattr(settings, 'DBBACKUP_S3_ACCESS_KEY', None)
S3_SECRET_KEY = getattr(settings, 'DBBACKUP_S3_SECRET_KEY', None)
S3_DOMAIN = getattr(settings, 'DBBACKUP_S3_DOMAIN', 's3.amazonaws.com')
S3_IS_SECURE = getattr(settings, 'DBBACKUP_S3_USE_SSL', True)
S3_DIRECTORY = getattr(settings, 'DBBACKUP_S3_DIRECTORY', "django-dbbackups/")
S3_SERVER_SIDE_ENCRYPTION = getattr(settings, 'DBBACKUP_S3_SERVER_SIDE_ENCRYPTION', False)
if S3_DIRECTORY:
S3_DIRECTORY = '%s/' % S3_DIRECTORY.strip('/')
def __init__(self, server_name=None):
self._check_filesystem_errors()
self.name = 'AmazonS3'
self.conn = S3Connection(aws_access_key_id=self.S3_ACCESS_KEY,
aws_secret_access_key=self.S3_SECRET_KEY, host=self.S3_DOMAIN,
is_secure=self.S3_IS_SECURE)
self.bucket = self.conn.get_bucket(self.S3_BUCKET)
BaseStorage.__init__(self)
def _check_filesystem_errors(self):
if not self.S3_BUCKET:
raise StorageError('Filesystem storage requires DBBACKUP_S3_BUCKET to be defined in settings.')
if not self.S3_ACCESS_KEY:
raise StorageError('Filesystem storage requires DBBACKUP_S3_ACCESS_KEY to be defined in settings.')
if not self.S3_SECRET_KEY:
raise StorageError('Filesystem storage requires DBBACKUP_S3_SECRET_KEY to be defined in settings.')
@property
def backup_dir(self):
return self.S3_DIRECTORY
def delete_file(self, filepath):
self.bucket.delete_key(filepath)
def list_directory(self):
return [k.name for k in self.bucket.list(prefix=self.S3_DIRECTORY)]
def write_file(self, filehandle, filename):
# Use multipart upload because normal upload maximum is 5 GB.
filepath = os.path.join(self.S3_DIRECTORY, filename)
filehandle.seek(0)
handle = self.bucket.initiate_multipart_upload(filepath,
encrypt_key=self.S3_SERVER_SIDE_ENCRYPTION)
try:
chunk = 1
while True:
chunkdata = filehandle.read(5 * 1024 * 1024)
if not chunkdata:
break
tmpfile = BytesIO(chunkdata)
tmpfile.seek(0)
handle.upload_part_from_file(tmpfile, chunk)
tmpfile.close()
chunk += 1
handle.complete_upload()
except Exception:
handle.cancel_upload()
raise
def read_file(self, filepath):
""" Read the specified file and return it's handle. """
key = Key(self.bucket)
key.key = filepath
filehandle = SpooledTemporaryFile(max_size=10 * 1024 * 1024)
key.get_contents_to_file(filehandle)
return filehandle
|
gerhc/django-dbbackup
|
dbbackup/storage/s3_storage.py
|
Python
|
bsd-3-clause
| 3,264
|
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# Marijn van Vliet <w.m.vanvliet@gmail.com>
# Jona Sassenhagen <jona.sassenhagen@gmail.com>
# Teon Brooks <teon.brooks@gmail.com>
# Christian Brodbeck <christianbrodbeck@nyu.edu>
# Stefan Appelhoff <stefan.appelhoff@mailbox.org>
# Joan Massich <mailsik@gmail.com>
#
# License: Simplified BSD
from collections import OrderedDict
from copy import deepcopy
import os.path as op
import re
import numpy as np
from ..defaults import HEAD_SIZE_DEFAULT
from .._freesurfer import get_mni_fiducials
from ..viz import plot_montage
from ..transforms import (apply_trans, get_ras_to_neuromag_trans, _sph_to_cart,
_topo_to_sph, _frame_to_str, Transform,
_verbose_frames, _fit_matched_points,
_quat_to_affine, _ensure_trans)
from ..io._digitization import (_count_points_by_type,
_get_dig_eeg, _make_dig_points, write_dig,
_read_dig_fif, _format_dig_points,
_get_fid_coords, _coord_frame_const,
_get_data_as_dict_from_dig)
from ..io.meas_info import create_info
from ..io.open import fiff_open
from ..io.pick import pick_types, _picks_to_idx, channel_type
from ..io.constants import FIFF, CHANNEL_LOC_ALIASES
from ..utils import (warn, copy_function_doc_to_method_doc, _pl, verbose,
_check_option, _validate_type, _check_fname, _on_missing,
fill_doc, _docdict)
from ._dig_montage_utils import _read_dig_montage_egi
from ._dig_montage_utils import _parse_brainvision_dig_montage
_BUILT_IN_MONTAGES = [
'EGI_256',
'GSN-HydroCel-128', 'GSN-HydroCel-129', 'GSN-HydroCel-256',
'GSN-HydroCel-257', 'GSN-HydroCel-32', 'GSN-HydroCel-64_1.0',
'GSN-HydroCel-65_1.0',
'biosemi128', 'biosemi16', 'biosemi160', 'biosemi256',
'biosemi32', 'biosemi64',
'easycap-M1', 'easycap-M10',
'mgh60', 'mgh70',
'standard_1005', 'standard_1020', 'standard_alphabetic',
'standard_postfixed', 'standard_prefixed', 'standard_primed',
'artinis-octamon', 'artinis-brite23'
]
def _check_get_coord_frame(dig):
dig_coord_frames = sorted(set(d['coord_frame'] for d in dig))
if len(dig_coord_frames) != 1:
raise RuntimeError(
'Only a single coordinate frame in dig is supported, got '
f'{dig_coord_frames}')
return _frame_to_str[dig_coord_frames.pop()] if dig_coord_frames else None
def get_builtin_montages():
"""Get a list of all builtin montages.
Returns
-------
montages : list
Names of all builtin montages that can be used by
:func:`make_standard_montage`.
"""
return _BUILT_IN_MONTAGES
def make_dig_montage(ch_pos=None, nasion=None, lpa=None, rpa=None,
hsp=None, hpi=None, coord_frame='unknown'):
r"""Make montage from arrays.
Parameters
----------
ch_pos : dict | None
Dictionary of channel positions. Keys are channel names and values
are 3D coordinates - array of shape (3,) - in native digitizer space
in m.
nasion : None | array, shape (3,)
The position of the nasion fiducial point.
This point is assumed to be in the native digitizer space in m.
lpa : None | array, shape (3,)
The position of the left periauricular fiducial point.
This point is assumed to be in the native digitizer space in m.
rpa : None | array, shape (3,)
The position of the right periauricular fiducial point.
This point is assumed to be in the native digitizer space in m.
hsp : None | array, shape (n_points, 3)
This corresponds to an array of positions of the headshape points in
3d. These points are assumed to be in the native digitizer space in m.
hpi : None | array, shape (n_hpi, 3)
This corresponds to an array of HPI points in the native digitizer
space. They only necessary if computation of a ``compute_dev_head_t``
is True.
coord_frame : str
The coordinate frame of the points. Usually this is "unknown"
for native digitizer space.
Returns
-------
montage : instance of DigMontage
The montage.
See Also
--------
DigMontage
read_dig_captrak
read_dig_egi
read_dig_fif
read_dig_localite
read_dig_polhemus_isotrak
Notes
-----
Valid ``coord_frame`` arguments are 'meg', 'mri', 'mri_voxel', 'head',
'mri_tal', 'ras', 'fs_tal', 'ctf_head', 'ctf_meg', 'unknown'. For custom
montages without fiducials this parameter has to be set to 'head'.
"""
_validate_type(ch_pos, (dict, None), 'ch_pos')
if ch_pos is None:
ch_names = None
else:
ch_names = list(ch_pos)
dig = _make_dig_points(
nasion=nasion, lpa=lpa, rpa=rpa, hpi=hpi, extra_points=hsp,
dig_ch_pos=ch_pos, coord_frame=coord_frame
)
return DigMontage(dig=dig, ch_names=ch_names)
class DigMontage(object):
"""Montage for digitized electrode and headshape position data.
.. warning:: Montages are typically created using one of the helper
functions in the ``See Also`` section below instead of
instantiating this class directly.
Parameters
----------
dig : list of dict
The object containing all the dig points.
ch_names : list of str
The names of the EEG channels.
See Also
--------
read_dig_captrak
read_dig_dat
read_dig_egi
read_dig_fif
read_dig_hpts
read_dig_localite
read_dig_polhemus_isotrak
make_dig_montage
Notes
-----
.. versionadded:: 0.9.0
"""
def __init__(self, *, dig=None, ch_names=None):
dig = list() if dig is None else dig
_validate_type(item=dig, types=list, item_name='dig')
ch_names = list() if ch_names is None else ch_names
n_eeg = sum([1 for d in dig if d['kind'] == FIFF.FIFFV_POINT_EEG])
if n_eeg != len(ch_names):
raise ValueError(
'The number of EEG channels (%d) does not match the number'
' of channel names provided (%d)' % (n_eeg, len(ch_names))
)
self.dig = dig
self.ch_names = ch_names
def __repr__(self):
"""Return string representation."""
n_points = _count_points_by_type(self.dig)
return ('<DigMontage | {extra:d} extras (headshape), {hpi:d} HPIs,'
' {fid:d} fiducials, {eeg:d} channels>').format(**n_points)
@copy_function_doc_to_method_doc(plot_montage)
def plot(self, scale_factor=20, show_names=True, kind='topomap', show=True,
sphere=None, verbose=None):
return plot_montage(self, scale_factor=scale_factor,
show_names=show_names, kind=kind, show=show,
sphere=sphere)
@fill_doc
def rename_channels(self, mapping, allow_duplicates=False):
"""Rename the channels.
Parameters
----------
%(rename_channels_mapping_duplicates)s
Returns
-------
inst : instance of DigMontage
The instance. Operates in-place.
"""
from .channels import rename_channels
temp_info = create_info(list(self._get_ch_pos()), 1000., 'eeg')
rename_channels(temp_info, mapping, allow_duplicates)
self.ch_names = temp_info['ch_names']
def save(self, fname):
"""Save digitization points to FIF.
Parameters
----------
fname : str
The filename to use. Should end in .fif or .fif.gz.
"""
coord_frame = _check_get_coord_frame(self.dig)
write_dig(fname, self.dig, coord_frame)
def __iadd__(self, other):
"""Add two DigMontages in place.
Notes
-----
Two DigMontages can only be added if there are no duplicated ch_names
and if fiducials are present they should share the same coordinate
system and location values.
"""
def is_fid_defined(fid):
return not(
fid.nasion is None and fid.lpa is None and fid.rpa is None
)
# Check for none duplicated ch_names
ch_names_intersection = set(self.ch_names).intersection(other.ch_names)
if ch_names_intersection:
raise RuntimeError((
"Cannot add two DigMontage objects if they contain duplicated"
" channel names. Duplicated channel(s) found: {}."
).format(
', '.join(['%r' % v for v in sorted(ch_names_intersection)])
))
# Check for unique matching fiducials
self_fid, self_coord = _get_fid_coords(self.dig)
other_fid, other_coord = _get_fid_coords(other.dig)
if is_fid_defined(self_fid) and is_fid_defined(other_fid):
if self_coord != other_coord:
raise RuntimeError('Cannot add two DigMontage objects if '
'fiducial locations are not in the same '
'coordinate system.')
for kk in self_fid:
if not np.array_equal(self_fid[kk], other_fid[kk]):
raise RuntimeError('Cannot add two DigMontage objects if '
'fiducial locations do not match '
'(%s)' % kk)
# keep self
self.dig = _format_dig_points(
self.dig + [d for d in other.dig
if d['kind'] != FIFF.FIFFV_POINT_CARDINAL]
)
else:
self.dig = _format_dig_points(self.dig + other.dig)
self.ch_names += other.ch_names
return self
def copy(self):
"""Copy the DigMontage object.
Returns
-------
dig : instance of DigMontage
The copied DigMontage instance.
"""
return deepcopy(self)
def __add__(self, other):
"""Add two DigMontages."""
out = self.copy()
out += other
return out
def _get_ch_pos(self):
pos = [d['r'] for d in _get_dig_eeg(self.dig)]
assert len(self.ch_names) == len(pos)
return OrderedDict(zip(self.ch_names, pos))
def _get_dig_names(self):
NAMED_KIND = (FIFF.FIFFV_POINT_EEG,)
is_eeg = np.array([d['kind'] in NAMED_KIND for d in self.dig])
assert len(self.ch_names) == is_eeg.sum()
dig_names = [None] * len(self.dig)
for ch_name_idx, dig_idx in enumerate(np.where(is_eeg)[0]):
dig_names[dig_idx] = self.ch_names[ch_name_idx]
return dig_names
def get_positions(self):
"""Get all channel and fiducial positions.
Returns
-------
positions : dict
A dictionary of the positions for channels (``ch_pos``),
coordinate frame (``coord_frame``), nasion (``nasion``),
left preauricular point (``lpa``),
right preauricular point (``rpa``),
Head Shape Polhemus (``hsp``), and
Head Position Indicator(``hpi``).
E.g.::
{
'ch_pos': {'EEG061': [0, 0, 0]},
'nasion': [0, 0, 1],
'coord_frame': 'mni_tal',
'lpa': [0, 1, 0],
'rpa': [1, 0, 0],
'hsp': None,
'hpi': None
}
"""
# get channel positions as dict
ch_pos = self._get_ch_pos()
# get coordframe and fiducial coordinates
montage_bunch = _get_data_as_dict_from_dig(self.dig)
coord_frame = _frame_to_str.get(montage_bunch.coord_frame)
# return dictionary
positions = dict(
ch_pos=ch_pos,
coord_frame=coord_frame,
nasion=montage_bunch.nasion,
lpa=montage_bunch.lpa,
rpa=montage_bunch.rpa,
hsp=montage_bunch.hsp,
hpi=montage_bunch.hpi,
)
return positions
@verbose
def apply_trans(self, trans, verbose=None):
"""Apply a transformation matrix to the montage.
Parameters
----------
trans : instance of mne.transforms.Transform
The transformation matrix to be applied.
%(verbose)s
"""
_validate_type(trans, Transform, 'trans')
coord_frame = self.get_positions()['coord_frame']
trans = _ensure_trans(trans, fro=coord_frame, to=trans['to'])
for d in self.dig:
d['r'] = apply_trans(trans, d['r'])
d['coord_frame'] = trans['to']
@verbose
def add_estimated_fiducials(self, subject, subjects_dir=None,
verbose=None):
"""Estimate fiducials based on FreeSurfer ``fsaverage`` subject.
This takes a montage with the ``mri`` coordinate frame,
corresponding to the FreeSurfer RAS (xyz in the volume) T1w
image of the specific subject. It will call
:func:`mne.coreg.get_mni_fiducials` to estimate LPA, RPA and
Nasion fiducial points.
Parameters
----------
%(subject)s
%(subjects_dir)s
%(verbose)s
Returns
-------
inst : instance of DigMontage
The instance, modified in-place.
See Also
--------
:ref:`tut-source-alignment`
Notes
-----
Since MNE uses the FIF data structure, it relies on the ``head``
coordinate frame. Any coordinate frame can be transformed
to ``head`` if the fiducials (i.e. LPA, RPA and Nasion) are
defined. One can use this function to estimate those fiducials
and then use ``montage.get_native_head_t()`` to get the
head <-> MRI transform.
"""
# get coordframe and fiducial coordinates
montage_bunch = _get_data_as_dict_from_dig(self.dig)
# get the coordinate frame and check that it's MRI
if montage_bunch.coord_frame != FIFF.FIFFV_COORD_MRI:
raise RuntimeError(
f'Montage should be in the "mri" coordinate frame '
f'to use `add_estimated_fiducials`. The current coordinate '
f'frame is {montage_bunch.coord_frame}')
# estimate LPA, nasion, RPA from FreeSurfer fsaverage
fids_mri = list(get_mni_fiducials(subject, subjects_dir))
# add those digpoints to front of montage
self.dig = fids_mri + self.dig
return self
@verbose
def add_mni_fiducials(self, subjects_dir=None, verbose=None):
"""Add fiducials to a montage in MNI space.
Parameters
----------
%(subjects_dir)s
%(verbose)s
Returns
-------
inst : instance of DigMontage
The instance, modified in-place.
Notes
-----
``fsaverage`` is in MNI space and so its fiducials can be
added to a montage in "mni_tal". MNI is an ACPC-aligned
coordinate system (the posterior commissure is the origin)
so since BIDS requires channel locations for ECoG, sEEG and
DBS to be in ACPC space, this function can be used to allow
those coordinate to be transformed to "head" space (origin
between LPA and RPA).
"""
montage_bunch = _get_data_as_dict_from_dig(self.dig)
# get the coordinate frame and check that it's MNI TAL
if montage_bunch.coord_frame != FIFF.FIFFV_MNE_COORD_MNI_TAL:
raise RuntimeError(
f'Montage should be in the "mni_tal" coordinate frame '
f'to use `add_estimated_fiducials`. The current coordinate '
f'frame is {montage_bunch.coord_frame}')
fids_mni = get_mni_fiducials('fsaverage', subjects_dir)
for fid in fids_mni:
# "mri" and "mni_tal" are equivalent for fsaverage
assert fid['coord_frame'] == FIFF.FIFFV_COORD_MRI
fid['coord_frame'] = FIFF.FIFFV_MNE_COORD_MNI_TAL
self.dig = fids_mni + self.dig
return self
@verbose
def remove_fiducials(self, verbose=None):
"""Remove the fiducial points from a montage.
Parameters
----------
%(verbose)s
Returns
-------
inst : instance of DigMontage
The instance, modified in-place.
Notes
-----
MNE will transform a montage to the internal "head" coordinate
frame if the fiducials are present. Under most circumstances, this
is ideal as it standardizes the coordinate frame for things like
plotting. However, in some circumstances, such as saving a ``raw``
with intracranial data to BIDS format, the coordinate frame
should not be changed by removing fiducials.
"""
for d in self.dig.copy():
if d['kind'] == FIFF.FIFFV_POINT_CARDINAL:
self.dig.remove(d)
return self
VALID_SCALES = dict(mm=1e-3, cm=1e-2, m=1)
def _check_unit_and_get_scaling(unit):
_check_option('unit', unit, sorted(VALID_SCALES.keys()))
return VALID_SCALES[unit]
def transform_to_head(montage):
"""Transform a DigMontage object into head coordinate.
It requires that the LPA, RPA and Nasion fiducial
point are available. It requires that all fiducial
points are in the same coordinate e.g. 'unknown'
and it will convert all the point in this coordinate
system to Neuromag head coordinate system.
Parameters
----------
montage : instance of DigMontage
The montage.
Returns
-------
montage : instance of DigMontage
The montage after transforming the points to head
coordinate system.
"""
# Get fiducial points and their coord_frame
native_head_t = compute_native_head_t(montage)
montage = montage.copy() # to avoid inplace modification
if native_head_t['from'] != FIFF.FIFFV_COORD_HEAD:
for d in montage.dig:
if d['coord_frame'] == native_head_t['from']:
d['r'] = apply_trans(native_head_t, d['r'])
d['coord_frame'] = FIFF.FIFFV_COORD_HEAD
return montage
def read_dig_dat(fname):
r"""Read electrode positions from a ``*.dat`` file.
.. Warning::
This function was implemented based on ``*.dat`` files available from
`Compumedics <https://compumedicsneuroscan.com/scan-acquire-
configuration-files/>`__ and might not work as expected with novel
files. If it does not read your files correctly please contact the
mne-python developers.
Parameters
----------
fname : path-like
File from which to read electrode locations.
Returns
-------
montage : DigMontage
The montage.
See Also
--------
read_dig_captrak
read_dig_dat
read_dig_egi
read_dig_fif
read_dig_hpts
read_dig_localite
read_dig_polhemus_isotrak
make_dig_montage
Notes
-----
``*.dat`` files are plain text files and can be inspected and amended with
a plain text editor.
"""
from ._standard_montage_utils import _check_dupes_odict
fname = _check_fname(fname, overwrite='read', must_exist=True)
with open(fname, 'r') as fid:
lines = fid.readlines()
ch_names, poss = list(), list()
nasion = lpa = rpa = None
for i, line in enumerate(lines):
items = line.split()
if not items:
continue
elif len(items) != 5:
raise ValueError(
"Error reading %s, line %s has unexpected number of entries:\n"
"%s" % (fname, i, line.rstrip()))
num = items[1]
if num == '67':
continue # centroid
pos = np.array([float(item) for item in items[2:]])
if num == '78':
nasion = pos
elif num == '76':
lpa = pos
elif num == '82':
rpa = pos
else:
ch_names.append(items[0])
poss.append(pos)
electrodes = _check_dupes_odict(ch_names, poss)
return make_dig_montage(electrodes, nasion, lpa, rpa)
def read_dig_fif(fname):
r"""Read digitized points from a .fif file.
Note that electrode names are not present in the .fif file so
they are here defined with the convention from VectorView
systems (EEG001, EEG002, etc.)
Parameters
----------
fname : path-like
FIF file from which to read digitization locations.
Returns
-------
montage : instance of DigMontage
The montage.
See Also
--------
DigMontage
read_dig_dat
read_dig_egi
read_dig_captrak
read_dig_polhemus_isotrak
read_dig_hpts
read_dig_localite
make_dig_montage
"""
_check_fname(fname, overwrite='read', must_exist=True)
# Load the dig data
f, tree = fiff_open(fname)[:2]
with f as fid:
dig = _read_dig_fif(fid, tree)
ch_names = []
for d in dig:
if d['kind'] == FIFF.FIFFV_POINT_EEG:
ch_names.append('EEG%03d' % d['ident'])
montage = DigMontage(dig=dig, ch_names=ch_names)
return montage
def read_dig_hpts(fname, unit='mm'):
"""Read historical .hpts mne-c files.
Parameters
----------
fname : str
The filepath of .hpts file.
unit : 'm' | 'cm' | 'mm'
Unit of the positions. Defaults to 'mm'.
Returns
-------
montage : instance of DigMontage
The montage.
See Also
--------
DigMontage
read_dig_captrak
read_dig_dat
read_dig_egi
read_dig_fif
read_dig_localite
read_dig_polhemus_isotrak
make_dig_montage
Notes
-----
The hpts format digitzer data file may contain comment lines starting
with the pound sign (#) and data lines of the form::
<*category*> <*identifier*> <*x/mm*> <*y/mm*> <*z/mm*>
where:
``<*category*>``
defines the type of points. Allowed categories are: ``hpi``,
``cardinal`` (fiducial), ``eeg``, and ``extra`` corresponding to
head-position indicator coil locations, cardinal landmarks, EEG
electrode locations, and additional head surface points,
respectively.
``<*identifier*>``
identifies the point. The identifiers are usually sequential
numbers. For cardinal landmarks, 1 = left auricular point,
2 = nasion, and 3 = right auricular point. For EEG electrodes,
identifier = 0 signifies the reference electrode.
``<*x/mm*> , <*y/mm*> , <*z/mm*>``
Location of the point, usually in the head coordinate system
in millimeters. If your points are in [m] then unit parameter can
be changed.
For example::
cardinal 2 -5.6729 -12.3873 -30.3671
cardinal 1 -37.6782 -10.4957 91.5228
cardinal 3 -131.3127 9.3976 -22.2363
hpi 1 -30.4493 -11.8450 83.3601
hpi 2 -122.5353 9.2232 -28.6828
hpi 3 -6.8518 -47.0697 -37.0829
hpi 4 7.3744 -50.6297 -12.1376
hpi 5 -33.4264 -43.7352 -57.7756
eeg FP1 3.8676 -77.0439 -13.0212
eeg FP2 -31.9297 -70.6852 -57.4881
eeg F7 -6.1042 -68.2969 45.4939
...
"""
from ._standard_montage_utils import _str_names, _str
_scale = _check_unit_and_get_scaling(unit)
out = np.genfromtxt(fname, comments='#',
dtype=(_str, _str, 'f8', 'f8', 'f8'))
kind, label = _str_names(out['f0']), _str_names(out['f1'])
kind = [k.lower() for k in kind]
xyz = np.array([out['f%d' % ii] for ii in range(2, 5)]).T
xyz *= _scale
del _scale
fid_idx_to_label = {'1': 'lpa', '2': 'nasion', '3': 'rpa'}
fid = {fid_idx_to_label[label[ii]]: this_xyz
for ii, this_xyz in enumerate(xyz) if kind[ii] == 'cardinal'}
ch_pos = {label[ii]: this_xyz
for ii, this_xyz in enumerate(xyz) if kind[ii] == 'eeg'}
hpi = np.array([this_xyz for ii, this_xyz in enumerate(xyz)
if kind[ii] == 'hpi'])
hpi.shape = (-1, 3) # in case it's empty
hsp = np.array([this_xyz for ii, this_xyz in enumerate(xyz)
if kind[ii] == 'extra'])
hsp.shape = (-1, 3) # in case it's empty
return make_dig_montage(ch_pos=ch_pos, **fid, hpi=hpi, hsp=hsp)
def read_dig_egi(fname):
"""Read electrode locations from EGI system.
Parameters
----------
fname : path-like
EGI MFF XML coordinates file from which to read digitization locations.
Returns
-------
montage : instance of DigMontage
The montage.
See Also
--------
DigMontage
read_dig_captrak
read_dig_dat
read_dig_fif
read_dig_hpts
read_dig_localite
read_dig_polhemus_isotrak
make_dig_montage
"""
_check_fname(fname, overwrite='read', must_exist=True)
data = _read_dig_montage_egi(
fname=fname,
_scaling=1.,
_all_data_kwargs_are_none=True
)
return make_dig_montage(**data)
def read_dig_captrak(fname):
"""Read electrode locations from CapTrak Brain Products system.
Parameters
----------
fname : path-like
BrainVision CapTrak coordinates file from which to read EEG electrode
locations. This is typically in XML format with the .bvct extension.
Returns
-------
montage : instance of DigMontage
The montage.
See Also
--------
DigMontage
read_dig_dat
read_dig_egi
read_dig_fif
read_dig_hpts
read_dig_localite
read_dig_polhemus_isotrak
make_dig_montage
"""
_check_fname(fname, overwrite='read', must_exist=True)
data = _parse_brainvision_dig_montage(fname, scale=1e-3)
return make_dig_montage(**data)
def read_dig_localite(fname, nasion=None, lpa=None, rpa=None):
"""Read Localite .csv file.
Parameters
----------
fname : path-like
File name.
nasion : str | None
Name of nasion fiducial point.
lpa : str | None
Name of left preauricular fiducial point.
rpa : str | None
Name of right preauricular fiducial point.
Returns
-------
montage : instance of DigMontage
The montage.
See Also
--------
DigMontage
read_dig_captrak
read_dig_dat
read_dig_egi
read_dig_fif
read_dig_hpts
read_dig_polhemus_isotrak
make_dig_montage
"""
ch_pos = {}
with open(fname) as f:
f.readline() # skip first row
for row in f:
_, name, x, y, z = row.split(",")
ch_pos[name] = np.array((float(x), float(y), float(z))) / 1000
if nasion is not None:
nasion = ch_pos.pop(nasion)
if lpa is not None:
lpa = ch_pos.pop(lpa)
if rpa is not None:
rpa = ch_pos.pop(rpa)
return make_dig_montage(ch_pos, nasion, lpa, rpa)
def _get_montage_in_head(montage):
coords = set([d['coord_frame'] for d in montage.dig])
if len(coords) == 1 and coords.pop() == FIFF.FIFFV_COORD_HEAD:
return montage
else:
return transform_to_head(montage.copy())
def _set_montage_fnirs(info, montage):
"""Set the montage for fNIRS data.
This needs to be different to electrodes as each channel has three
coordinates that need to be set. For each channel there is a source optode
location, a detector optode location, and a channel midpoint that must be
stored. This function modifies info['chs'][#]['loc'] and info['dig'] in
place.
"""
from ..preprocessing.nirs import _validate_nirs_info
# Validate that the fNIRS info is correctly formatted
picks = _validate_nirs_info(info)
# Modify info['chs'][#]['loc'] in place
num_ficiduals = len(montage.dig) - len(montage.ch_names)
for ch_idx in picks:
ch = info['chs'][ch_idx]['ch_name']
source, detector = ch.split(' ')[0].split('_')
source_pos = montage.dig[montage.ch_names.index(source)
+ num_ficiduals]['r']
detector_pos = montage.dig[montage.ch_names.index(detector)
+ num_ficiduals]['r']
info['chs'][ch_idx]['loc'][3:6] = source_pos
info['chs'][ch_idx]['loc'][6:9] = detector_pos
midpoint = (source_pos + detector_pos) / 2
info['chs'][ch_idx]['loc'][:3] = midpoint
info['chs'][ch_idx]['coord_frame'] = FIFF.FIFFV_COORD_HEAD
# Modify info['dig'] in place
with info._unlock():
info['dig'] = montage.dig
@fill_doc
def _set_montage(info, montage, match_case=True, match_alias=False,
on_missing='raise'):
"""Apply montage to data.
With a DigMontage, this function will replace the digitizer info with
the values specified for the particular montage.
Usually, a montage is expected to contain the positions of all EEG
electrodes and a warning is raised when this is not the case.
Parameters
----------
%(info_not_none)s
%(montage)s
%(match_case)s
%(match_alias)s
%(on_missing_montage)s
Notes
-----
This function will change the info variable in place.
"""
_validate_type(montage, (DigMontage, None, str), 'montage')
if montage is None:
# Next line modifies info['dig'] in place
with info._unlock():
info['dig'] = None
for ch in info['chs']:
# Next line modifies info['chs'][#]['loc'] in place
ch['loc'] = np.full(12, np.nan)
return
if isinstance(montage, str): # load builtin montage
_check_option('montage', montage, _BUILT_IN_MONTAGES)
montage = make_standard_montage(montage)
mnt_head = _get_montage_in_head(montage)
del montage
def _backcompat_value(pos, ref_pos):
if any(np.isnan(pos)):
return np.full(6, np.nan)
else:
return np.concatenate((pos, ref_pos))
# get the channels in the montage in head
ch_pos = mnt_head._get_ch_pos()
# only get the eeg, seeg, dbs, ecog channels
picks = pick_types(
info, meg=False, eeg=True, seeg=True, dbs=True, ecog=True,
exclude=())
non_picks = np.setdiff1d(np.arange(info['nchan']), picks)
# get the reference position from the loc[3:6]
chs = [info['chs'][ii] for ii in picks]
non_names = [info['chs'][ii]['ch_name'] for ii in non_picks]
del picks
ref_pos = [ch['loc'][3:6] for ch in chs]
# keep reference location from EEG-like channels if they
# already exist and are all the same.
custom_eeg_ref_dig = False
# Note: ref position is an empty list for fieldtrip data
if ref_pos:
if all([np.equal(ref_pos[0], pos).all() for pos in ref_pos]) \
and not np.equal(ref_pos[0], [0, 0, 0]).all():
eeg_ref_pos = ref_pos[0]
# since we have an EEG reference position, we have
# to add it into the info['dig'] as EEG000
custom_eeg_ref_dig = True
if not custom_eeg_ref_dig:
refs = set(ch_pos) & {'EEG000', 'REF'}
assert len(refs) <= 1
eeg_ref_pos = np.zeros(3) if not(refs) else ch_pos.pop(refs.pop())
# This raises based on info being subset/superset of montage
info_names = [ch['ch_name'] for ch in chs]
dig_names = mnt_head._get_dig_names()
ref_names = [None, 'EEG000', 'REF']
if match_case:
info_names_use = info_names
dig_names_use = dig_names
non_names_use = non_names
else:
ch_pos_use = OrderedDict(
(name.lower(), pos) for name, pos in ch_pos.items())
info_names_use = [name.lower() for name in info_names]
dig_names_use = [name.lower() if name is not None else name
for name in dig_names]
non_names_use = [name.lower() for name in non_names]
ref_names = [name.lower() if name is not None else name
for name in ref_names]
n_dup = len(ch_pos) - len(ch_pos_use)
if n_dup:
raise ValueError('Cannot use match_case=False as %s montage '
'name(s) require case sensitivity' % n_dup)
n_dup = len(info_names_use) - len(set(info_names_use))
if n_dup:
raise ValueError('Cannot use match_case=False as %s channel '
'name(s) require case sensitivity' % n_dup)
ch_pos = ch_pos_use
del ch_pos_use
del dig_names
# use lookup table to match unrecognized channel names to known aliases
if match_alias:
alias_dict = (match_alias if isinstance(match_alias, dict) else
CHANNEL_LOC_ALIASES)
if not match_case:
alias_dict = {
ch_name.lower(): ch_alias.lower()
for ch_name, ch_alias in alias_dict.items()
}
# excluded ch_alias not in info, to prevent unnecessary mapping and
# warning messages based on aliases.
alias_dict = {
ch_name: ch_alias
for ch_name, ch_alias in alias_dict.items()
}
info_names_use = [
alias_dict.get(ch_name, ch_name) for ch_name in info_names_use
]
non_names_use = [
alias_dict.get(ch_name, ch_name) for ch_name in non_names_use
]
# warn user if there is not a full overlap of montage with info_chs
missing = np.where([use not in ch_pos for use in info_names_use])[0]
if len(missing): # DigMontage is subset of info
missing_names = [info_names[ii] for ii in missing]
missing_coord_msg = (
'DigMontage is only a subset of info. There are '
f'{len(missing)} channel position{_pl(missing)} '
'not present in the DigMontage. The required channels are:\n\n'
f'{missing_names}.\n\nConsider using inst.set_channel_types '
'if these are not EEG channels, or use the on_missing '
'parameter if the channel positions are allowed to be unknown '
'in your analyses.'
)
_on_missing(on_missing, missing_coord_msg)
# set ch coordinates and names from digmontage or nan coords
for ii in missing:
ch_pos[info_names_use[ii]] = [np.nan] * 3
del info_names
assert len(non_names_use) == len(non_names)
# There are no issues here with fNIRS being in non_names_use because
# these names are like "D1_S1_760" and the ch_pos for a fNIRS montage
# will have entries "D1" and "S1".
extra = np.where([non in ch_pos for non in non_names_use])[0]
if len(extra):
types = '/'.join(sorted(set(
channel_type(info, non_picks[ii]) for ii in extra)))
names = [non_names[ii] for ii in extra]
warn(f'Not setting position{_pl(extra)} of {len(extra)} {types} '
f'channel{_pl(extra)} found in montage:\n{names}\n'
'Consider setting the channel types to be of '
f'{_docdict["montage_types"]} '
'using inst.set_channel_types before calling inst.set_montage, '
'or omit these channels when creating your montage.')
for ch, use in zip(chs, info_names_use):
# Next line modifies info['chs'][#]['loc'] in place
if use in ch_pos:
ch['loc'][:6] = _backcompat_value(ch_pos[use], eeg_ref_pos)
ch['coord_frame'] = FIFF.FIFFV_COORD_HEAD
del ch_pos
# XXX this is probably wrong as it uses the order from the montage
# rather than the order of our info['ch_names'] ...
digpoints = [
mnt_head.dig[ii] for ii, name in enumerate(dig_names_use)
if name in (info_names_use + ref_names)]
# get a copy of the old dig
if info['dig'] is not None:
old_dig = info['dig'].copy()
else:
old_dig = []
# determine if needed to add an extra EEG REF DigPoint
if custom_eeg_ref_dig:
# ref_name = 'EEG000' if match_case else 'eeg000'
ref_dig_dict = {'kind': FIFF.FIFFV_POINT_EEG,
'r': eeg_ref_pos,
'ident': 0,
'coord_frame': info['dig'].pop()['coord_frame']}
ref_dig_point = _format_dig_points([ref_dig_dict])[0]
# only append the reference dig point if it was already
# in the old dig
if ref_dig_point in old_dig:
digpoints.append(ref_dig_point)
# Next line modifies info['dig'] in place
with info._unlock():
info['dig'] = _format_dig_points(digpoints, enforce_order=True)
# Handle fNIRS with source, detector and channel
fnirs_picks = _picks_to_idx(info, 'fnirs', allow_empty=True)
if len(fnirs_picks) > 0:
_set_montage_fnirs(info, mnt_head)
def _read_isotrak_elp_points(fname):
"""Read Polhemus Isotrak digitizer data from a ``.elp`` file.
Parameters
----------
fname : str
The filepath of .elp Polhemus Isotrak file.
Returns
-------
out : dict of arrays
The dictionary containing locations for 'nasion', 'lpa', 'rpa'
and 'points'.
"""
value_pattern = r"\-?\d+\.?\d*e?\-?\d*"
coord_pattern = r"({0})\s+({0})\s+({0})\s*$".format(value_pattern)
with open(fname) as fid:
file_str = fid.read()
points_str = [m.groups() for m in re.finditer(coord_pattern, file_str,
re.MULTILINE)]
points = np.array(points_str, dtype=float)
return {
'nasion': points[0], 'lpa': points[1], 'rpa': points[2],
'points': points[3:]
}
def _read_isotrak_hsp_points(fname):
"""Read Polhemus Isotrak digitizer data from a ``.hsp`` file.
Parameters
----------
fname : str
The filepath of .hsp Polhemus Isotrak file.
Returns
-------
out : dict of arrays
The dictionary containing locations for 'nasion', 'lpa', 'rpa'
and 'points'.
"""
def get_hsp_fiducial(line):
return np.fromstring(line.replace('%F', ''), dtype=float, sep='\t')
with open(fname) as ff:
for line in ff:
if 'position of fiducials' in line.lower():
break
nasion = get_hsp_fiducial(ff.readline())
lpa = get_hsp_fiducial(ff.readline())
rpa = get_hsp_fiducial(ff.readline())
_ = ff.readline()
line = ff.readline()
if line:
n_points, n_cols = np.fromstring(line, dtype=int, sep='\t')
points = np.fromstring(
string=ff.read(), dtype=float, sep='\t',
).reshape(-1, n_cols)
assert points.shape[0] == n_points
else:
points = np.empty((0, 3))
return {
'nasion': nasion, 'lpa': lpa, 'rpa': rpa, 'points': points
}
def read_dig_polhemus_isotrak(fname, ch_names=None, unit='m'):
"""Read Polhemus digitizer data from a file.
Parameters
----------
fname : str
The filepath of Polhemus ISOTrak formatted file.
File extension is expected to be '.hsp', '.elp' or '.eeg'.
ch_names : None | list of str
The names of the points. This will make the points
considered as EEG channels. If None, channels will be assumed
to be HPI if the extension is ``'.elp'``, and extra headshape
points otherwise.
unit : 'm' | 'cm' | 'mm'
Unit of the digitizer file. Polhemus ISOTrak systems data is usually
exported in meters. Defaults to 'm'.
Returns
-------
montage : instance of DigMontage
The montage.
See Also
--------
DigMontage
make_dig_montage
read_polhemus_fastscan
read_dig_captrak
read_dig_dat
read_dig_egi
read_dig_fif
read_dig_localite
"""
VALID_FILE_EXT = ('.hsp', '.elp', '.eeg')
_scale = _check_unit_and_get_scaling(unit)
_, ext = op.splitext(fname)
_check_option('fname', ext, VALID_FILE_EXT)
if ext == '.elp':
data = _read_isotrak_elp_points(fname)
else:
# Default case we read points as hsp since is the most likely scenario
data = _read_isotrak_hsp_points(fname)
if _scale != 1:
data = {key: val * _scale for key, val in data.items()}
else:
pass # noqa
if ch_names is None:
keyword = 'hpi' if ext == '.elp' else 'hsp'
data[keyword] = data.pop('points')
else:
points = data.pop('points')
if points.shape[0] == len(ch_names):
data['ch_pos'] = OrderedDict(zip(ch_names, points))
else:
raise ValueError((
"Length of ``ch_names`` does not match the number of points"
" in {fname}. Expected ``ch_names`` length {n_points:d},"
" given {n_chnames:d}"
).format(
fname=fname, n_points=points.shape[0], n_chnames=len(ch_names)
))
return make_dig_montage(**data)
def _is_polhemus_fastscan(fname):
header = ''
with open(fname, 'r') as fid:
for line in fid:
if not line.startswith('%'):
break
header += line
return 'FastSCAN' in header
@verbose
def read_polhemus_fastscan(fname, unit='mm', on_header_missing='raise', *,
verbose=None):
"""Read Polhemus FastSCAN digitizer data from a ``.txt`` file.
Parameters
----------
fname : str
The filepath of .txt Polhemus FastSCAN file.
unit : 'm' | 'cm' | 'mm'
Unit of the digitizer file. Polhemus FastSCAN systems data is usually
exported in millimeters. Defaults to 'mm'.
%(on_header_missing)s
%(verbose)s
Returns
-------
points : array, shape (n_points, 3)
The digitization points in digitizer coordinates.
See Also
--------
read_dig_polhemus_isotrak
make_dig_montage
"""
VALID_FILE_EXT = ['.txt']
_scale = _check_unit_and_get_scaling(unit)
_, ext = op.splitext(fname)
_check_option('fname', ext, VALID_FILE_EXT)
if not _is_polhemus_fastscan(fname):
msg = "%s does not contain a valid Polhemus FastSCAN header" % fname
_on_missing(on_header_missing, msg)
points = _scale * np.loadtxt(fname, comments='%', ndmin=2)
_check_dig_shape(points)
return points
def _read_eeglab_locations(fname):
ch_names = np.genfromtxt(fname, dtype=str, usecols=3).tolist()
topo = np.loadtxt(fname, dtype=float, usecols=[1, 2])
sph = _topo_to_sph(topo)
pos = _sph_to_cart(sph)
pos[:, [0, 1]] = pos[:, [1, 0]] * [-1, 1]
return ch_names, pos
def read_custom_montage(fname, head_size=HEAD_SIZE_DEFAULT, coord_frame=None):
"""Read a montage from a file.
Parameters
----------
fname : str
File extension is expected to be:
'.loc' or '.locs' or '.eloc' (for EEGLAB files),
'.sfp' (BESA/EGI files), '.csd',
'.elc', '.txt', '.csd', '.elp' (BESA spherical),
'.bvef' (BrainVision files),
'.csv', '.tsv', '.xyz' (XYZ coordinates).
head_size : float | None
The size of the head (radius, in [m]). If ``None``, returns the values
read from the montage file with no modification. Defaults to 0.095m.
coord_frame : str | None
The coordinate frame of the points. Usually this is "unknown"
for native digitizer space. Defaults to None, which is "unknown" for
most readers but "head" for EEGLAB.
.. versionadded:: 0.20
Returns
-------
montage : instance of DigMontage
The montage.
See Also
--------
make_dig_montage
make_standard_montage
Notes
-----
The function is a helper to read electrode positions you may have
in various formats. Most of these format are weakly specified
in terms of units, coordinate systems. It implies that setting
a montage using a DigMontage produced by this function may
be problematic. If you use a standard/template (eg. 10/20,
10/10 or 10/05) we recommend you use :func:`make_standard_montage`.
If you can have positions in memory you can also use
:func:`make_dig_montage` that takes arrays as input.
"""
from ._standard_montage_utils import (
_read_theta_phi_in_degrees, _read_sfp, _read_csd, _read_elc,
_read_elp_besa, _read_brainvision, _read_xyz
)
SUPPORTED_FILE_EXT = {
'eeglab': ('.loc', '.locs', '.eloc', ),
'hydrocel': ('.sfp', ),
'matlab': ('.csd', ),
'asa electrode': ('.elc', ),
'generic (Theta-phi in degrees)': ('.txt', ),
'standard BESA spherical': ('.elp', ), # NB: not same as polhemus elp
'brainvision': ('.bvef', ),
'xyz': ('.csv', '.tsv', '.xyz'),
}
_, ext = op.splitext(fname)
_check_option('fname', ext, list(sum(SUPPORTED_FILE_EXT.values(), ())))
if ext in SUPPORTED_FILE_EXT['eeglab']:
if head_size is None:
raise ValueError(
"``head_size`` cannot be None for '{}'".format(ext))
ch_names, pos = _read_eeglab_locations(fname)
scale = head_size / np.median(np.linalg.norm(pos, axis=-1))
pos *= scale
montage = make_dig_montage(
ch_pos=OrderedDict(zip(ch_names, pos)),
coord_frame='head',
)
elif ext in SUPPORTED_FILE_EXT['hydrocel']:
montage = _read_sfp(fname, head_size=head_size)
elif ext in SUPPORTED_FILE_EXT['matlab']:
montage = _read_csd(fname, head_size=head_size)
elif ext in SUPPORTED_FILE_EXT['asa electrode']:
montage = _read_elc(fname, head_size=head_size)
elif ext in SUPPORTED_FILE_EXT['generic (Theta-phi in degrees)']:
if head_size is None:
raise ValueError(
"``head_size`` cannot be None for '{}'".format(ext))
montage = _read_theta_phi_in_degrees(fname, head_size=head_size,
fid_names=('Nz', 'LPA', 'RPA'))
elif ext in SUPPORTED_FILE_EXT['standard BESA spherical']:
montage = _read_elp_besa(fname, head_size)
elif ext in SUPPORTED_FILE_EXT['brainvision']:
montage = _read_brainvision(fname, head_size)
elif ext in SUPPORTED_FILE_EXT['xyz']:
montage = _read_xyz(fname)
if coord_frame is not None:
coord_frame = _coord_frame_const(coord_frame)
for d in montage.dig:
d['coord_frame'] = coord_frame
return montage
def compute_dev_head_t(montage):
"""Compute device to head transform from a DigMontage.
Parameters
----------
montage : instance of DigMontage
The DigMontage must contain the fiducials in head
coordinate system and hpi points in both head and
meg device coordinate system.
Returns
-------
dev_head_t : instance of Transform
A Device-to-Head transformation matrix.
"""
_, coord_frame = _get_fid_coords(montage.dig)
if coord_frame != FIFF.FIFFV_COORD_HEAD:
raise ValueError('montage should have been set to head coordinate '
'system with transform_to_head function.')
hpi_head = np.array(
[d['r'] for d in montage.dig
if (d['kind'] == FIFF.FIFFV_POINT_HPI and
d['coord_frame'] == FIFF.FIFFV_COORD_HEAD)], float)
hpi_dev = np.array(
[d['r'] for d in montage.dig
if (d['kind'] == FIFF.FIFFV_POINT_HPI and
d['coord_frame'] == FIFF.FIFFV_COORD_DEVICE)], float)
if not (len(hpi_head) == len(hpi_dev) and len(hpi_dev) > 0):
raise ValueError((
"To compute Device-to-Head transformation, the same number of HPI"
" points in device and head coordinates is required. (Got {dev}"
" points in device and {head} points in head coordinate systems)"
).format(dev=len(hpi_dev), head=len(hpi_head)))
trans = _quat_to_affine(_fit_matched_points(hpi_dev, hpi_head)[0])
return Transform(fro='meg', to='head', trans=trans)
def compute_native_head_t(montage):
"""Compute the native-to-head transformation for a montage.
This uses the fiducials in the native space to transform to compute the
transform to the head coordinate frame.
Parameters
----------
montage : instance of DigMontage
The montage.
Returns
-------
native_head_t : instance of Transform
A native-to-head transformation matrix.
"""
# Get fiducial points and their coord_frame
fid_coords, coord_frame = _get_fid_coords(montage.dig, raise_error=False)
if coord_frame is None:
coord_frame = FIFF.FIFFV_COORD_UNKNOWN
if coord_frame == FIFF.FIFFV_COORD_HEAD:
native_head_t = np.eye(4)
else:
fid_keys = ('nasion', 'lpa', 'rpa')
for key in fid_keys:
if fid_coords[key] is None:
warn('Fiducial point %s not found, assuming identity %s to '
'head transformation'
% (key, _verbose_frames[coord_frame],))
native_head_t = np.eye(4)
break
else:
native_head_t = get_ras_to_neuromag_trans(
*[fid_coords[key] for key in fid_keys])
return Transform(coord_frame, 'head', native_head_t)
def make_standard_montage(kind, head_size=HEAD_SIZE_DEFAULT):
"""Read a generic (built-in) montage.
Parameters
----------
kind : str
The name of the montage to use. See notes for valid kinds.
head_size : float
The head size (radius, in meters) to use for spherical montages.
Defaults to 95mm.
Returns
-------
montage : instance of DigMontage
The montage.
See Also
--------
DigMontage
make_dig_montage
read_custom_montage
Notes
-----
Individualized (digitized) electrode positions should be read in using
:func:`read_dig_captrak`, :func:`read_dig_dat`, :func:`read_dig_egi`,
:func:`read_dig_fif`, :func:`read_dig_polhemus_isotrak`,
:func:`read_dig_hpts` or made with :func:`make_dig_montage`.
Valid ``kind`` arguments are:
=================== =====================================================
Kind Description
=================== =====================================================
standard_1005 Electrodes are named and positioned according to the
international 10-05 system (343+3 locations)
standard_1020 Electrodes are named and positioned according to the
international 10-20 system (94+3 locations)
standard_alphabetic Electrodes are named with LETTER-NUMBER combinations
(A1, B2, F4, ...) (65+3 locations)
standard_postfixed Electrodes are named according to the international
10-20 system using postfixes for intermediate
positions (100+3 locations)
standard_prefixed Electrodes are named according to the international
10-20 system using prefixes for intermediate
positions (74+3 locations)
standard_primed Electrodes are named according to the international
10-20 system using prime marks (' and '') for
intermediate positions (100+3 locations)
biosemi16 BioSemi cap with 16 electrodes (16+3 locations)
biosemi32 BioSemi cap with 32 electrodes (32+3 locations)
biosemi64 BioSemi cap with 64 electrodes (64+3 locations)
biosemi128 BioSemi cap with 128 electrodes (128+3 locations)
biosemi160 BioSemi cap with 160 electrodes (160+3 locations)
biosemi256 BioSemi cap with 256 electrodes (256+3 locations)
easycap-M1 EasyCap with 10-05 electrode names (74 locations)
easycap-M10 EasyCap with numbered electrodes (61 locations)
EGI_256 Geodesic Sensor Net (256 locations)
GSN-HydroCel-32 HydroCel Geodesic Sensor Net and Cz (33+3 locations)
GSN-HydroCel-64_1.0 HydroCel Geodesic Sensor Net (64+3 locations)
GSN-HydroCel-65_1.0 HydroCel Geodesic Sensor Net and Cz (65+3 locations)
GSN-HydroCel-128 HydroCel Geodesic Sensor Net (128+3 locations)
GSN-HydroCel-129 HydroCel Geodesic Sensor Net and Cz (129+3 locations)
GSN-HydroCel-256 HydroCel Geodesic Sensor Net (256+3 locations)
GSN-HydroCel-257 HydroCel Geodesic Sensor Net and Cz (257+3 locations)
mgh60 The (older) 60-channel cap used at
MGH (60+3 locations)
mgh70 The (newer) 70-channel BrainVision cap used at
MGH (70+3 locations)
artinis-octamon Artinis OctaMon fNIRS (8 sources, 2 detectors)
artinis-brite23 Artinis Brite23 fNIRS (11 sources, 7 detectors)
=================== =====================================================
.. versionadded:: 0.19.0
"""
from ._standard_montage_utils import standard_montage_look_up_table
_check_option('kind', kind, _BUILT_IN_MONTAGES)
return standard_montage_look_up_table[kind](head_size=head_size)
def _check_dig_shape(pts):
_validate_type(pts, np.ndarray, 'points')
if pts.ndim != 2 or pts.shape[-1] != 3:
raise ValueError(
f'Points must be of shape (n, 3) instead of {pts.shape}')
|
bloyl/mne-python
|
mne/channels/montage.py
|
Python
|
bsd-3-clause
| 53,823
|
import logging
import numpy as np
from ginga import AstroImage
from ginga.mockw.ImageViewMock import CanvasView
class TestImageView(object):
def setup_class(self):
self.logger = logging.getLogger("TestImageView")
self.viewer = CanvasView(logger=self.logger)
self.data = np.identity(2000)
self.image = AstroImage.AstroImage(logger=self.logger)
self.image.set_data(self.data)
def test_scale(self):
viewer = self.viewer
viewer.set_window_size(900, 1100)
viewer.set_image(self.image)
zoom = 0.0
scale_x = scale_y = 1.0
viewer.scale_to(scale_x, scale_y)
zoomlevel = viewer.get_zoom()
assert zoomlevel == zoom
def test_centering(self):
viewer = self.viewer
viewer.set_window_size(900, 1100)
viewer.set_image(self.image)
viewer.center_image()
ht, wd = self.data.shape[:2]
ctr_x, ctr_y = wd / 2. - viewer.data_off, ht / 2. - viewer.data_off
pan_x, pan_y = viewer.get_pan()
assert np.isclose(pan_x, ctr_x) and np.isclose(pan_y, ctr_y)
def test_pan(self):
viewer = self.viewer
viewer.set_window_size(900, 1100)
viewer.set_image(self.image)
viewer.set_pan(401.0, 501.0)
pan_x, pan_y = viewer.get_pan()
assert np.isclose(pan_x, 401.0) and np.isclose(pan_y, 501.0)
def test_pan2(self):
viewer = self.viewer
viewer.set_window_size(400, 300)
viewer.set_image(self.image)
viewer.set_pan(401.0, 501.0)
viewer.scale_to(8.0, 8.0)
x1, y1, x2, y2 = viewer.get_datarect()
result = np.array([(x1, y1), (x2, y2)])
expected = np.array([[376., 482.25], [426., 519.75]])
assert np.all(np.isclose(expected, result))
|
pllim/ginga
|
ginga/tests/test_ImageView.py
|
Python
|
bsd-3-clause
| 1,805
|
from __future__ import print_function, division
from sympy.core import S, Add, Expr, Basic
from sympy.assumptions import Q, ask
from sympy.core.logic import fuzzy_not
def refine(expr, assumptions=True):
"""
Simplify an expression using assumptions.
Gives the form of expr that would be obtained if symbols
in it were replaced by explicit numerical expressions satisfying
the assumptions.
Examples
========
>>> from sympy import refine, sqrt, Q
>>> from sympy.abc import x
>>> refine(sqrt(x**2), Q.real(x))
Abs(x)
>>> refine(sqrt(x**2), Q.positive(x))
x
"""
if not isinstance(expr, Basic):
return expr
if not expr.is_Atom:
args = [refine(arg, assumptions) for arg in expr.args]
# TODO: this will probably not work with Integral or Polynomial
expr = expr.func(*args)
if hasattr(expr, '_eval_refine'):
ref_expr = expr._eval_refine()
if ref_expr is not None:
return ref_expr
name = expr.__class__.__name__
handler = handlers_dict.get(name, None)
if handler is None:
return expr
new_expr = handler(expr, assumptions)
if (new_expr is None) or (expr == new_expr):
return expr
if not isinstance(new_expr, Expr):
return new_expr
return refine(new_expr, assumptions)
def refine_abs(expr, assumptions):
"""
Handler for the absolute value.
Examples
========
>>> from sympy import Symbol, Q, refine, Abs
>>> from sympy.assumptions.refine import refine_abs
>>> from sympy.abc import x
>>> refine_abs(Abs(x), Q.real(x))
>>> refine_abs(Abs(x), Q.positive(x))
x
>>> refine_abs(Abs(x), Q.negative(x))
-x
"""
arg = expr.args[0]
if ask(Q.real(arg), assumptions) and \
fuzzy_not(ask(Q.negative(arg), assumptions)):
# if it's nonnegative
return arg
if ask(Q.negative(arg), assumptions):
return -arg
def refine_Pow(expr, assumptions):
"""
Handler for instances of Pow.
>>> from sympy import Symbol, Q
>>> from sympy.assumptions.refine import refine_Pow
>>> from sympy.abc import x,y,z
>>> refine_Pow((-1)**x, Q.real(x))
>>> refine_Pow((-1)**x, Q.even(x))
1
>>> refine_Pow((-1)**x, Q.odd(x))
-1
For powers of -1, even parts of the exponent can be simplified:
>>> refine_Pow((-1)**(x+y), Q.even(x))
(-1)**y
>>> refine_Pow((-1)**(x+y+z), Q.odd(x) & Q.odd(z))
(-1)**y
>>> refine_Pow((-1)**(x+y+2), Q.odd(x))
(-1)**(y + 1)
>>> refine_Pow((-1)**(x+3), True)
(-1)**(x + 1)
"""
from sympy.core import Pow, Rational
from sympy.functions.elementary.complexes import Abs
from sympy.functions import sign
if isinstance(expr.base, Abs):
if ask(Q.real(expr.base.args[0]), assumptions) and \
ask(Q.even(expr.exp), assumptions):
return expr.base.args[0] ** expr.exp
if ask(Q.real(expr.base), assumptions):
if expr.base.is_number:
if ask(Q.even(expr.exp), assumptions):
return abs(expr.base) ** expr.exp
if ask(Q.odd(expr.exp), assumptions):
return sign(expr.base) * abs(expr.base) ** expr.exp
if isinstance(expr.exp, Rational):
if type(expr.base) is Pow:
return abs(expr.base.base) ** (expr.base.exp * expr.exp)
if expr.base is S.NegativeOne:
if expr.exp.is_Add:
old = expr
# For powers of (-1) we can remove
# - even terms
# - pairs of odd terms
# - a single odd term + 1
# - A numerical constant N can be replaced with mod(N,2)
coeff, terms = expr.exp.as_coeff_add()
terms = set(terms)
even_terms = set([])
odd_terms = set([])
initial_number_of_terms = len(terms)
for t in terms:
if ask(Q.even(t), assumptions):
even_terms.add(t)
elif ask(Q.odd(t), assumptions):
odd_terms.add(t)
terms -= even_terms
if len(odd_terms) % 2:
terms -= odd_terms
new_coeff = (coeff + S.One) % 2
else:
terms -= odd_terms
new_coeff = coeff % 2
if new_coeff != coeff or len(terms) < initial_number_of_terms:
terms.add(new_coeff)
expr = expr.base**(Add(*terms))
# Handle (-1)**((-1)**n/2 + m/2)
e2 = 2*expr.exp
if ask(Q.even(e2), assumptions):
if e2.could_extract_minus_sign():
e2 *= expr.base
if e2.is_Add:
i, p = e2.as_two_terms()
if p.is_Pow and p.base is S.NegativeOne:
if ask(Q.integer(p.exp), assumptions):
i = (i + 1)/2
if ask(Q.even(i), assumptions):
return expr.base**p.exp
elif ask(Q.odd(i), assumptions):
return expr.base**(p.exp + 1)
else:
return expr.base**(p.exp + i)
if old != expr:
return expr
def refine_exp(expr, assumptions):
"""
Handler for exponential function.
>>> from sympy import Symbol, Q, exp, I, pi
>>> from sympy.assumptions.refine import refine_exp
>>> from sympy.abc import x
>>> refine_exp(exp(pi*I*2*x), Q.real(x))
>>> refine_exp(exp(pi*I*2*x), Q.integer(x))
1
"""
arg = expr.args[0]
if arg.is_Mul:
coeff = arg.as_coefficient(S.Pi*S.ImaginaryUnit)
if coeff:
if ask(Q.integer(2*coeff), assumptions):
if ask(Q.even(coeff), assumptions):
return S.One
elif ask(Q.odd(coeff), assumptions):
return S.NegativeOne
elif ask(Q.even(coeff + S.Half), assumptions):
return -S.ImaginaryUnit
elif ask(Q.odd(coeff + S.Half), assumptions):
return S.ImaginaryUnit
def refine_atan2(expr, assumptions):
"""
Handler for the atan2 function
Examples
========
>>> from sympy import Symbol, Q, refine, atan2
>>> from sympy.assumptions.refine import refine_atan2
>>> from sympy.abc import x, y
>>> refine_atan2(atan2(y,x), Q.real(y) & Q.positive(x))
atan(y/x)
>>> refine_atan2(atan2(y,x), Q.negative(y) & Q.negative(x))
atan(y/x) - pi
>>> refine_atan2(atan2(y,x), Q.positive(y) & Q.negative(x))
atan(y/x) + pi
>>> refine_atan2(atan2(y,x), Q.zero(y) & Q.negative(x))
pi
>>> refine_atan2(atan2(y,x), Q.positive(y) & Q.zero(x))
pi/2
>>> refine_atan2(atan2(y,x), Q.negative(y) & Q.zero(x))
-pi/2
>>> refine_atan2(atan2(y,x), Q.zero(y) & Q.zero(x))
nan
"""
from sympy.functions.elementary.trigonometric import atan
from sympy.core import S
y, x = expr.args
if ask(Q.real(y) & Q.positive(x), assumptions):
return atan(y / x)
elif ask(Q.negative(y) & Q.negative(x), assumptions):
return atan(y / x) - S.Pi
elif ask(Q.positive(y) & Q.negative(x), assumptions):
return atan(y / x) + S.Pi
elif ask(Q.zero(y) & Q.negative(x), assumptions):
return S.Pi
elif ask(Q.positive(y) & Q.zero(x), assumptions):
return S.Pi/2
elif ask(Q.negative(y) & Q.zero(x), assumptions):
return -S.Pi/2
elif ask(Q.zero(y) & Q.zero(x), assumptions):
return S.NaN
else:
return expr
def refine_Relational(expr, assumptions):
"""
Handler for Relational
>>> from sympy.assumptions.refine import refine_Relational
>>> from sympy.assumptions.ask import Q
>>> from sympy.abc import x
>>> refine_Relational(x<0, ~Q.is_true(x<0))
False
"""
return ask(Q.is_true(expr), assumptions)
handlers_dict = {
'Abs': refine_abs,
'Pow': refine_Pow,
'exp': refine_exp,
'atan2': refine_atan2,
'Equality': refine_Relational,
'Unequality': refine_Relational,
'GreaterThan': refine_Relational,
'LessThan': refine_Relational,
'StrictGreaterThan': refine_Relational,
'StrictLessThan': refine_Relational
}
|
Designist/sympy
|
sympy/assumptions/refine.py
|
Python
|
bsd-3-clause
| 8,560
|
from pallets_sphinx_themes import get_version
from pallets_sphinx_themes import ProjectLink
# Project --------------------------------------------------------------
project = "Jinja"
copyright = "2007 Pallets"
author = "Pallets"
release, version = get_version("Jinja2")
# General --------------------------------------------------------------
master_doc = "index"
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"pallets_sphinx_themes",
"sphinxcontrib.log_cabinet",
"sphinx_issues",
]
autodoc_typehints = "description"
intersphinx_mapping = {"python": ("https://docs.python.org/3/", None)}
issues_github_path = "pallets/jinja"
# HTML -----------------------------------------------------------------
html_theme = "jinja"
html_theme_options = {"index_sidebar_logo": False}
html_context = {
"project_links": [
ProjectLink("Donate", "https://palletsprojects.com/donate"),
ProjectLink("PyPI Releases", "https://pypi.org/project/Jinja2/"),
ProjectLink("Source Code", "https://github.com/pallets/jinja/"),
ProjectLink("Issue Tracker", "https://github.com/pallets/jinja/issues/"),
ProjectLink("Website", "https://palletsprojects.com/p/jinja/"),
ProjectLink("Twitter", "https://twitter.com/PalletsTeam"),
ProjectLink("Chat", "https://discord.gg/pallets"),
]
}
html_sidebars = {
"index": ["project.html", "localtoc.html", "searchbox.html", "ethicalads.html"],
"**": ["localtoc.html", "relations.html", "searchbox.html", "ethicalads.html"],
}
singlehtml_sidebars = {"index": ["project.html", "localtoc.html", "ethicalads.html"]}
html_static_path = ["_static"]
html_favicon = "_static/jinja-logo-sidebar.png"
html_logo = "_static/jinja-logo-sidebar.png"
html_title = f"Jinja Documentation ({version})"
html_show_sourcelink = False
# LaTeX ----------------------------------------------------------------
latex_documents = [(master_doc, f"Jinja-{version}.tex", html_title, author, "manual")]
|
pallets/jinja
|
docs/conf.py
|
Python
|
bsd-3-clause
| 1,995
|
from __future__ import print_function
from getpass import getpass
from os.path import join, dirname
from re import match
from urllib import urlencode
from urlparse import urljoin
from datetime import datetime
from time import sleep
import json
from boto.ec2 import EC2Connection
from boto.route53 import Route53Connection
from boto.ec2.blockdevicemapping import BlockDeviceMapping, BlockDeviceType
from oauth2client.client import OAuth2WebServerFlow
from itsdangerous import Signer
import gspread
import requests
GITHUB_API_BASE = 'https://api.github.com/'
GDOCS_API_BASE = 'https://www.googleapis.com/drive/v2/files/'
CHIME_LOGIN_MASTER = '1P_X4B9aX7MTCln5ossJNNVkjxp4prnU5ny3SPeKg2qI'
CHIME_INSTANCES_LIST = '1ODc62B7clyNMzwRtpOeqDupsDdaomtfZK-Z_GX0CM90'
WEBHOOK_URL = 'https://chime-webhook.herokuapp.com'
def check_status(resp, task):
''' Raise a RuntimeError if response is not HTTP 2XX.
'''
if resp.status_code not in range(200, 299):
raise RuntimeError('Got an HTTP error {} trying to {}'.format(resp.status_code, task))
def check_repo_state(reponame, token):
''' Return True if repository name exists.
'''
auth = token, 'x-oauth-basic'
path = '/repos/chimecms/{}'.format(reponame)
resp = requests.get(urljoin(GITHUB_API_BASE, path), auth=auth)
return bool(resp.status_code == 200)
def get_input(environ):
'''
'''
github_client_id = environ['GITHUB_CLIENT_ID']
github_client_secret = environ['GITHUB_CLIENT_SECRET']
gdocs_client_id = environ['GDOCS_CLIENT_ID']
gdocs_client_secret = environ['GDOCS_CLIENT_SECRET']
print('--> Enter Github details:')
username = raw_input(' Github username: ')
password = getpass(' Github password: ')
reponame = raw_input(' New Github repository name: ')
if not match(r'\w+(-\w+)*$', reponame):
raise RuntimeError('Repository "{}" does not match "\w+(-\w+)*$"'.format(reponame))
ec2 = EC2Connection()
route53 = Route53Connection()
return github_client_id, github_client_secret, \
gdocs_client_id, gdocs_client_secret, \
username, password, reponame, ec2, route53
def authenticate_google(gdocs_client_id, gdocs_client_secret):
'''
'''
scopes = [
'https://spreadsheets.google.com/feeds/',
# http://stackoverflow.com/questions/24293523/im-trying-to-access-google-drive-through-the-cli-but-keep-getting-not-authori
'https://docs.google.com/feeds',
]
flow = OAuth2WebServerFlow(gdocs_client_id, gdocs_client_secret, scopes)
flow_info = flow.step1_get_device_and_user_codes()
user_code, verification_url = flow_info.user_code, flow_info.verification_url
print('--> Authenticate with Google Docs:')
print(' Visit {verification_url} with code "{user_code}"'.format(**locals()))
print(' (then come back here and press enter)')
raw_input()
credentials = flow.step2_exchange(device_flow_info=flow_info)
print('--> Google Docs authentication OK')
return credentials
def create_google_spreadsheet(credentials, reponame):
'''
'''
email = 'frances@codeforamerica.org'
headers = {'Content-Type': 'application/json'}
url = urljoin(GDOCS_API_BASE, '{}/copy'.format(CHIME_LOGIN_MASTER))
gc = gspread.authorize(credentials)
resp = gc.session.post(url, '{ }', headers=headers)
info = json.load(resp)
new_id = info['id']
print(' Created spreadsheet "{title}"'.format(**info))
url = urljoin(GDOCS_API_BASE, new_id)
new_title = 'Chime CMS logins for {reponame}'.format(**locals())
patch = dict(title=new_title)
gc = gspread.authorize(credentials)
gc.session.request('PATCH', url, json.dumps(patch), headers=headers)
print(' Updated title to "{new_title}"'.format(**locals()))
url = urljoin(GDOCS_API_BASE, '{new_id}/permissions'.format(**locals()))
permission = dict(type='anyone', role='reader', withLink=True)
gc = gspread.authorize(credentials)
gc.session.post(url, json.dumps(permission), headers=headers)
print(' Allowed anyone with the link to see "{new_title}"'.format(**locals()))
query = urlencode(dict(sendNotificationEmails='true', emailMessage='Yo.'))
url = urljoin(GDOCS_API_BASE, '{new_id}/permissions?{query}'.format(**locals()))
permission = dict(type='user', role='writer', emailAddress=email, value=email)
gc = gspread.authorize(credentials)
gc.session.post(url, json.dumps(permission), headers=headers)
print(' Invited {email} to "{new_title}"'.format(**locals()))
sheet_url = 'https://docs.google.com/spreadsheets/d/{}'.format(new_id)
print('--> Created spreadsheet {}'.format(sheet_url))
return sheet_url
def get_github_authorization(client_id, client_secret, auth):
''' Create a new authorization with Github.
https://developer.github.com/v3/oauth_authorizations/#create-a-new-authorization
'''
info = dict(
scopes='repo',
note='Chime setup script',
client_id=client_id,
client_secret=client_secret
)
url = urljoin(GITHUB_API_BASE, '/authorizations')
resp = requests.post(url, json.dumps(info), auth=auth)
check_status(resp, 'create a new authorization')
auth_id = resp.json().get('id')
temporary_token = resp.json().get('token')
print('--> Github authorization OK')
return auth_id, temporary_token
def verify_github_authorization(client_id, client_secret, temporary_token, auth_id):
''' Verify status of Github authorization.
https://developer.github.com/v3/oauth_authorizations/#check-an-authorization
'''
path = '/applications/{client_id}/tokens/{token}'
kwargs = dict(client_id=client_id, token=temporary_token)
url = urljoin(GITHUB_API_BASE, path.format(**kwargs))
resp = requests.get(url, auth=(client_id, client_secret))
check_status(resp, 'check authorization {}'.format(auth_id))
def create_ec2_instance(ec2, reponame, sheet_url, client_id, client_secret, token):
'''
'''
with open(join(dirname(__file__), 'user-data.sh')) as file:
user_data = file.read().format(
branch_name='master',
ga_client_id=client_id,
ga_client_secret=client_secret,
github_temporary_token=token,
github_repo=reponame,
auth_data_href=sheet_url
)
device_sda1 = BlockDeviceType(size=16, delete_on_termination=True)
device_map = BlockDeviceMapping()
device_map['/dev/sda1'] = device_sda1
ec2_args = dict(instance_type='c3.large', user_data=user_data,
key_name='cfa-chime-keypair', block_device_map=device_map,
security_groups=['default'])
instance = ec2.run_instances('ami-f8763a90', **ec2_args).instances[0]
instance.add_tag('Name', 'Chime Test {}'.format(reponame))
print(' Prepared EC2 instance', instance.id)
while not instance.dns_name:
instance.update()
sleep(1)
print('--> Available at', instance.dns_name)
return instance
def add_github_webhook(reponame, auth):
''' Add a new repository webhook.
https://developer.github.com/v3/repos/hooks/#create-a-hook
'''
url = urljoin(GITHUB_API_BASE, '/repos/chimecms/{}/hooks'.format(reponame))
body = dict(name='web', config=dict(url=WEBHOOK_URL))
resp = requests.post(url, data=json.dumps(body), auth=auth)
code = resp.status_code
if code not in range(200, 299):
raise RuntimeError('Github webhook creation failed, status {}'.format(code))
print('--> Webhook created')
def get_public_deploy_key(instance_dns_name, secret, salt):
''' Wait for and retrieve instance public key.
'''
signer = Signer(secret, salt)
path = '/.well-known/deploy-key.txt'
while True:
print(' Waiting for', path)
sleep(5)
resp = requests.get('http://{}{}'.format(instance_dns_name, path))
if resp.status_code == 200:
break
return signer.unsign(resp.content)
def add_permanent_github_deploy_key(deploy_key, reponame, auth):
''' Add a new repository deploy key.
https://developer.github.com/v3/repos/keys/#create
'''
key_name = 'chimecms-key'
keys_url = urljoin(GITHUB_API_BASE, '/repos/chimecms/{}/keys'.format(reponame))
head = {'Content-Type': 'application/json'}
body = json.dumps(dict(title=key_name, key=deploy_key))
resp = requests.post(keys_url, body, headers=head, auth=auth)
code = resp.status_code
if code == 422:
# Github deploy key already exists, but likely to be tied to OAuth token.
# Delete it, and recreate with basic auth so it survives auth deletion.
resp = requests.get(keys_url, auth=auth)
key_url = [k['url'] for k in resp.json() if k['title'] == 'token-key'][0]
resp = requests.delete(key_url, auth=auth)
code = resp.status_code
if code not in range(200, 299):
raise RuntimeError('Github deploy key deletion failed, status {}'.format(code))
print(' Deleted temporary token key')
resp = requests.post(keys_url, body, headers=head, auth=auth)
code = resp.status_code
if code not in range(200, 299):
raise RuntimeError('Github deploy key recreation failed, status {}'.format(code))
elif code not in range(200, 299):
raise RuntimeError('Github deploy key creation failed, status {}'.format(code))
print('--> Created permanent deploy key', key_name)
def delete_temporary_github_authorization(github_auth_id, auth):
''' Delete Github authorization.
https://developer.github.com/v3/oauth_authorizations/#delete-an-authorization
'''
url = urljoin(GITHUB_API_BASE, '/authorizations/{}'.format(github_auth_id))
resp = requests.delete(url, auth=auth)
check_status(resp, 'delete authorization {}'.format(github_auth_id))
print('--> Deleted temporary Github token')
def create_cname_record(route53, reponame, cname_value):
''' Write domain name to Route 53.
'''
cname = '{reponame}.chimecms.org'.format(**locals())
zone = route53.get_zone('chimecms.org')
zone.add_record('CNAME', cname, cname_value, 60)
print('--> Prepared DNS name', cname)
return cname
def save_details(credentials, name, cname, instance, reponame, sheet_url, deploy_key):
'''
'''
print(' Preparing details for instances spreadsheet')
chimecms_url = 'http://{}'.format(cname)
instance_query = 'region={}#Instances:instanceId={}'.format(instance.region.name, instance.id)
instance_url = 'https://console.aws.amazon.com/ec2/v2/home?{}'.format(instance_query)
github_url = 'https://github.com/chimecms/{}'.format(reponame)
gc = gspread.authorize(credentials)
doc = gc.open_by_key(CHIME_INSTANCES_LIST)
sheet = doc.worksheet('Instances')
new_row = [str(datetime.utcnow()), name,
chimecms_url, instance_url, github_url, sheet_url, deploy_key]
sheet.append_row(new_row)
print('--> Wrote details to instances spreadsheet')
|
yudiutomo/chime
|
chime/instantiation/functions.py
|
Python
|
bsd-3-clause
| 11,112
|
"""
oauthlib.openid.connect.core.grant_types
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
import logging
from oauthlib.oauth2.rfc6749.grant_types.authorization_code import (
AuthorizationCodeGrant as OAuth2AuthorizationCodeGrant,
)
from .base import GrantTypeBase
log = logging.getLogger(__name__)
class AuthorizationCodeGrant(GrantTypeBase):
def __init__(self, request_validator=None, **kwargs):
self.proxy_target = OAuth2AuthorizationCodeGrant(
request_validator=request_validator, **kwargs)
self.custom_validators.post_auth.append(
self.openid_authorization_validator)
self.register_token_modifier(self.add_id_token)
def add_id_token(self, token, token_handler, request):
"""
Construct an initial version of id_token, and let the
request_validator sign or encrypt it.
The authorization_code version of this method is used to
retrieve the nonce accordingly to the code storage.
"""
# Treat it as normal OAuth 2 auth code request if openid is not present
if not request.scopes or 'openid' not in request.scopes:
return token
nonce = self.request_validator.get_authorization_code_nonce(
request.client_id,
request.code,
request.redirect_uri,
request
)
return super().add_id_token(token, token_handler, request, nonce=nonce)
|
idan/oauthlib
|
oauthlib/openid/connect/core/grant_types/authorization_code.py
|
Python
|
bsd-3-clause
| 1,441
|
# Generated by Django 1.10.7 on 2017-05-02 12:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("gallery", "0005_image_phash")]
operations = [
migrations.AddField(
model_name="image",
name="gallery_ready",
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name="image",
name="preview_ready",
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name="image",
name="thumbnail_ready",
field=models.BooleanField(default=False),
),
]
|
manti-by/M2-Blog-Engine
|
manti_by/apps/gallery/migrations/0006_auto_20170502_1246.py
|
Python
|
bsd-3-clause
| 694
|
#------------------------------------------------------------------------------
# Copyright (c) 2005, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
#
# Author: Enthought, Inc.
# Description: <Enthought pyface package component>
#------------------------------------------------------------------------------
""" Heading text. """
# Import the toolkit specific version.
from toolkit import toolkit_object
HeadingText = toolkit_object('heading_text:HeadingText')
#### EOF ######################################################################
|
pankajp/pyface
|
pyface/heading_text.py
|
Python
|
bsd-3-clause
| 876
|
from __future__ import unicode_literals
from collections import OrderedDict
import datetime
from django.contrib.auth.models import User
from django.test import TestCase
from .models import TestObject, Order, RevisionableModel
class ExtraRegressTests(TestCase):
def setUp(self):
self.u = User.objects.create_user(
username="fred",
password="secret",
email="fred@example.com"
)
def test_regression_7314_7372(self):
"""
Regression tests for #7314 and #7372
"""
rm = RevisionableModel.objects.create(
title='First Revision',
when=datetime.datetime(2008, 9, 28, 10, 30, 0)
)
self.assertEqual(rm.pk, rm.base.pk)
rm2 = rm.new_revision()
rm2.title = "Second Revision"
rm.when = datetime.datetime(2008, 9, 28, 14, 25, 0)
rm2.save()
self.assertEqual(rm2.title, 'Second Revision')
self.assertEqual(rm2.base.title, 'First Revision')
self.assertNotEqual(rm2.pk, rm.pk)
self.assertEqual(rm2.base.pk, rm.pk)
# Queryset to match most recent revision:
qs = RevisionableModel.objects.extra(
where=["%(table)s.id IN (SELECT MAX(rev.id) FROM %(table)s rev GROUP BY rev.base_id)" % {
'table': RevisionableModel._meta.db_table,
}]
)
self.assertQuerysetEqual(qs,
[('Second Revision', 'First Revision')],
transform=lambda r: (r.title, r.base.title)
)
# Queryset to search for string in title:
qs2 = RevisionableModel.objects.filter(title__contains="Revision")
self.assertQuerysetEqual(qs2,
[
('First Revision', 'First Revision'),
('Second Revision', 'First Revision'),
],
transform=lambda r: (r.title, r.base.title),
ordered=False
)
# Following queryset should return the most recent revision:
self.assertQuerysetEqual(qs & qs2,
[('Second Revision', 'First Revision')],
transform=lambda r: (r.title, r.base.title),
ordered=False
)
def test_extra_stay_tied(self):
# Extra select parameters should stay tied to their corresponding
# select portions. Applies when portions are updated or otherwise
# moved around.
qs = User.objects.extra(
select=OrderedDict((("alpha", "%s"), ("beta", "2"), ("gamma", "%s"))),
select_params=(1, 3)
)
qs = qs.extra(select={"beta": 4})
qs = qs.extra(select={"alpha": "%s"}, select_params=[5])
self.assertEqual(
list(qs.filter(id=self.u.id).values('alpha', 'beta', 'gamma')),
[{'alpha': 5, 'beta': 4, 'gamma': 3}]
)
def test_regression_7957(self):
"""
Regression test for #7957: Combining extra() calls should leave the
corresponding parameters associated with the right extra() bit. I.e.
internal dictionary must remain sorted.
"""
self.assertEqual(
User.objects.extra(select={"alpha": "%s"}, select_params=(1,)
).extra(select={"beta": "%s"}, select_params=(2,))[0].alpha,
1)
self.assertEqual(
User.objects.extra(select={"beta": "%s"}, select_params=(1,)
).extra(select={"alpha": "%s"}, select_params=(2,))[0].alpha,
2)
def test_regression_7961(self):
"""
Regression test for #7961: When not using a portion of an
extra(...) in a query, remove any corresponding parameters from the
query as well.
"""
self.assertEqual(
list(User.objects.extra(select={"alpha": "%s"}, select_params=(-6,)
).filter(id=self.u.id).values_list('id', flat=True)),
[self.u.id]
)
def test_regression_8063(self):
"""
Regression test for #8063: limiting a query shouldn't discard any
extra() bits.
"""
qs = User.objects.all().extra(where=['id=%s'], params=[self.u.id])
self.assertQuerysetEqual(qs, ['<User: fred>'])
self.assertQuerysetEqual(qs[:1], ['<User: fred>'])
def test_regression_8039(self):
"""
Regression test for #8039: Ordering sometimes removed relevant tables
from extra(). This test is the critical case: ordering uses a table,
but then removes the reference because of an optimisation. The table
should still be present because of the extra() call.
"""
self.assertQuerysetEqual(
Order.objects.extra(where=["username=%s"],
params=["fred"],
tables=["auth_user"]
).order_by('created_by'),
[]
)
def test_regression_8819(self):
"""
Regression test for #8819: Fields in the extra(select=...) list
should be available to extra(order_by=...).
"""
self.assertQuerysetEqual(
User.objects.filter(pk=self.u.id).extra(select={'extra_field': 1}).distinct(),
['<User: fred>']
)
self.assertQuerysetEqual(
User.objects.filter(pk=self.u.id).extra(select={'extra_field': 1}, order_by=['extra_field']),
['<User: fred>']
)
self.assertQuerysetEqual(
User.objects.filter(pk=self.u.id).extra(select={'extra_field': 1}, order_by=['extra_field']).distinct(),
['<User: fred>']
)
def test_dates_query(self):
"""
When calling the dates() method on a queryset with extra selection
columns, we can (and should) ignore those columns. They don't change
the result and cause incorrect SQL to be produced otherwise.
"""
RevisionableModel.objects.create(
title='First Revision',
when=datetime.datetime(2008, 9, 28, 10, 30, 0)
)
self.assertQuerysetEqual(
RevisionableModel.objects.extra(select={"the_answer": 'id'}).datetimes('when', 'month'),
[datetime.datetime(2008, 9, 1, 0, 0)],
transform=lambda d: d,
)
def test_values_with_extra(self):
"""
Regression test for #10256... If there is a values() clause, Extra
columns are only returned if they are explicitly mentioned.
"""
obj = TestObject(first='first', second='second', third='third')
obj.save()
self.assertEqual(
list(TestObject.objects.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))).values()),
[{'bar': 'second', 'third': 'third', 'second': 'second', 'whiz': 'third', 'foo': 'first', 'id': obj.pk, 'first': 'first'}]
)
# Extra clauses after an empty values clause are still included
self.assertEqual(
list(TestObject.objects.values().extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third'))))),
[{'bar': 'second', 'third': 'third', 'second': 'second', 'whiz': 'third', 'foo': 'first', 'id': obj.pk, 'first': 'first'}]
)
# Extra columns are ignored if not mentioned in the values() clause
self.assertEqual(
list(TestObject.objects.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))).values('first', 'second')),
[{'second': 'second', 'first': 'first'}]
)
# Extra columns after a non-empty values() clause are ignored
self.assertEqual(
list(TestObject.objects.values('first', 'second').extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third'))))),
[{'second': 'second', 'first': 'first'}]
)
# Extra columns can be partially returned
self.assertEqual(
list(TestObject.objects.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))).values('first', 'second', 'foo')),
[{'second': 'second', 'foo': 'first', 'first': 'first'}]
)
# Also works if only extra columns are included
self.assertEqual(
list(TestObject.objects.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))).values('foo', 'whiz')),
[{'foo': 'first', 'whiz': 'third'}]
)
# Values list works the same way
# All columns are returned for an empty values_list()
self.assertEqual(
list(TestObject.objects.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))).values_list()),
[('first', 'second', 'third', obj.pk, 'first', 'second', 'third')]
)
# Extra columns after an empty values_list() are still included
self.assertEqual(
list(TestObject.objects.values_list().extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third'))))),
[('first', 'second', 'third', obj.pk, 'first', 'second', 'third')]
)
# Extra columns ignored completely if not mentioned in values_list()
self.assertEqual(
list(TestObject.objects.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))).values_list('first', 'second')),
[('first', 'second')]
)
# Extra columns after a non-empty values_list() clause are ignored completely
self.assertEqual(
list(TestObject.objects.values_list('first', 'second').extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third'))))),
[('first', 'second')]
)
self.assertEqual(
list(TestObject.objects.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))).values_list('second', flat=True)),
['second']
)
# Only the extra columns specified in the values_list() are returned
self.assertEqual(
list(TestObject.objects.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))).values_list('first', 'second', 'whiz')),
[('first', 'second', 'third')]
)
# ...also works if only extra columns are included
self.assertEqual(
list(TestObject.objects.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))).values_list('foo', 'whiz')),
[('first', 'third')]
)
self.assertEqual(
list(TestObject.objects.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))).values_list('whiz', flat=True)),
['third']
)
# ... and values are returned in the order they are specified
self.assertEqual(
list(TestObject.objects.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))).values_list('whiz', 'foo')),
[('third', 'first')]
)
self.assertEqual(
list(TestObject.objects.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))).values_list('first', 'id')),
[('first', obj.pk)]
)
self.assertEqual(
list(TestObject.objects.extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))).values_list('whiz', 'first', 'bar', 'id')),
[('third', 'first', 'second', obj.pk)]
)
def test_regression_10847(self):
"""
Regression for #10847: the list of extra columns can always be
accurately evaluated. Using an inner query ensures that as_sql() is
producing correct output without requiring full evaluation and
execution of the inner query.
"""
obj = TestObject(first='first', second='second', third='third')
obj.save()
self.assertEqual(
list(TestObject.objects.extra(select={'extra': 1}).values('pk')),
[{'pk': obj.pk}]
)
self.assertQuerysetEqual(
TestObject.objects.filter(
pk__in=TestObject.objects.extra(select={'extra': 1}).values('pk')
),
['<TestObject: TestObject: first,second,third>']
)
self.assertEqual(
list(TestObject.objects.values('pk').extra(select={'extra': 1})),
[{'pk': obj.pk}]
)
self.assertQuerysetEqual(
TestObject.objects.filter(
pk__in=TestObject.objects.values('pk').extra(select={'extra': 1})
),
['<TestObject: TestObject: first,second,third>']
)
self.assertQuerysetEqual(
TestObject.objects.filter(pk=obj.pk) | TestObject.objects.extra(where=["id > %s"], params=[obj.pk]),
['<TestObject: TestObject: first,second,third>']
)
def test_regression_17877(self):
"""
Ensure that extra WHERE clauses get correctly ANDed, even when they
contain OR operations.
"""
# Test Case 1: should appear in queryset.
t = TestObject(first='a', second='a', third='a')
t.save()
# Test Case 2: should appear in queryset.
t = TestObject(first='b', second='a', third='a')
t.save()
# Test Case 3: should not appear in queryset, bug case.
t = TestObject(first='a', second='a', third='b')
t.save()
# Test Case 4: should not appear in queryset.
t = TestObject(first='b', second='a', third='b')
t.save()
# Test Case 5: should not appear in queryset.
t = TestObject(first='b', second='b', third='a')
t.save()
# Test Case 6: should not appear in queryset, bug case.
t = TestObject(first='a', second='b', third='b')
t.save()
self.assertQuerysetEqual(
TestObject.objects.extra(
where=["first = 'a' OR second = 'a'", "third = 'a'"],
),
['<TestObject: TestObject: a,a,a>', '<TestObject: TestObject: b,a,a>'],
ordered=False
)
def test_extra_values_distinct_ordering(self):
t1 = TestObject.objects.create(first='a', second='a', third='a')
t2 = TestObject.objects.create(first='a', second='b', third='b')
qs = TestObject.objects.extra(
select={'second_extra': 'second'}
).values_list('id', flat=True).distinct()
self.assertQuerysetEqual(
qs.order_by('second_extra'), [t1.pk, t2.pk], lambda x: x)
self.assertQuerysetEqual(
qs.order_by('-second_extra'), [t2.pk, t1.pk], lambda x: x)
# Note: the extra ordering must appear in select clause, so we get two
# non-distinct results here (this is on purpose, see #7070).
self.assertQuerysetEqual(
qs.order_by('-second_extra').values_list('first', flat=True),
['a', 'a'], lambda x: x)
|
rogerhu/django
|
tests/extra_regress/tests.py
|
Python
|
bsd-3-clause
| 14,987
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Simple utility functions and bug fixes for compatibility with all supported
versions of Python. This module should generally not be used directly, as
everything in `__all__` will be imported into `astropy.utils.compat` and can
be accessed from there.
"""
import sys
import functools
from contextlib import suppress
__all__ = ['override__dir__', 'suppress',
'possible_filename', 'namedtuple_asdict']
def possible_filename(filename):
"""
Determine if the ``filename`` argument is an allowable type for a filename.
In Python 3.3 use of non-unicode filenames on system calls such as
`os.stat` and others that accept a filename argument was deprecated (and
may be removed outright in the future).
Therefore this returns `True` in all cases except for `bytes` strings in
Windows.
"""
if isinstance(filename, str):
return True
elif isinstance(filename, bytes):
return not (sys.platform == 'win32')
return False
def override__dir__(f):
"""
When overriding a __dir__ method on an object, you often want to
include the "standard" members on the object as well. This
decorator takes care of that automatically, and all the wrapped
function needs to do is return a list of the "special" members
that wouldn't be found by the normal Python means.
Example
-------
@override__dir__
def __dir__(self):
return ['special_method1', 'special_method2']
"""
# http://bugs.python.org/issue12166
@functools.wraps(f)
def override__dir__wrapper(self):
members = set(object.__dir__(self))
members.update(f(self))
return sorted(members)
return override__dir__wrapper
def namedtuple_asdict(namedtuple):
"""
The same as ``namedtuple._adict()``.
Parameters
----------
namedtuple : collections.namedtuple
The named tuple to get the dict of
"""
return namedtuple._asdict()
|
bsipocz/astropy
|
astropy/utils/compat/misc.py
|
Python
|
bsd-3-clause
| 2,023
|
import json
import unittest
import urllib.request
from unittest.mock import patch
from urllib.error import URLError
from django import template
from django.core.exceptions import ValidationError
from django.test import TestCase, override_settings
from django.urls import reverse
from wagtail.core import blocks
from wagtail.embeds import oembed_providers
from wagtail.embeds.blocks import EmbedBlock, EmbedValue
from wagtail.embeds.embeds import get_embed
from wagtail.embeds.exceptions import EmbedNotFoundException, EmbedUnsupportedProviderException
from wagtail.embeds.finders import get_finders
from wagtail.embeds.finders.embedly import EmbedlyFinder as EmbedlyFinder
from wagtail.embeds.finders.embedly import AccessDeniedEmbedlyException, EmbedlyException
from wagtail.embeds.finders.oembed import OEmbedFinder as OEmbedFinder
from wagtail.embeds.models import Embed
from wagtail.embeds.templatetags.wagtailembeds_tags import embed_tag
from wagtail.tests.utils import WagtailTestUtils
try:
import embedly # noqa
no_embedly = False
except ImportError:
no_embedly = True
class TestGetFinders(TestCase):
def test_defaults_to_oembed(self):
finders = get_finders()
self.assertEqual(len(finders), 1)
self.assertIsInstance(finders[0], OEmbedFinder)
# New WAGTAILEMBEDS_FINDERS setting
@override_settings(WAGTAILEMBEDS_FINDERS=[
{
'class': 'wagtail.embeds.finders.oembed'
}
])
def test_new_find_oembed(self):
finders = get_finders()
self.assertEqual(len(finders), 1)
self.assertIsInstance(finders[0], OEmbedFinder)
@override_settings(WAGTAILEMBEDS_FINDERS=[
{
'class': 'wagtail.embeds.finders.embedly',
'key': 'foo',
}
])
def test_new_find_embedly(self):
finders = get_finders()
self.assertEqual(len(finders), 1)
self.assertIsInstance(finders[0], EmbedlyFinder)
self.assertEqual(finders[0].get_key(), 'foo')
@override_settings(WAGTAILEMBEDS_FINDERS=[
{
'class': 'wagtail.embeds.finders.oembed',
'options': {'foo': 'bar'}
}
])
def test_new_find_oembed_with_options(self):
finders = get_finders()
self.assertEqual(len(finders), 1)
self.assertIsInstance(finders[0], OEmbedFinder)
self.assertEqual(finders[0].options, {'foo': 'bar'})
class TestEmbeds(TestCase):
def setUp(self):
self.hit_count = 0
def dummy_finder(self, url, max_width=None):
# Up hit count
self.hit_count += 1
# Return a pretend record
return {
'title': "Test: " + url,
'type': 'video',
'thumbnail_url': '',
'width': max_width if max_width else 640,
'height': 480,
'html': "<p>Blah blah blah</p>",
}
def test_get_embed(self):
embed = get_embed('www.test.com/1234', max_width=400, finder=self.dummy_finder)
# Check that the embed is correct
self.assertEqual(embed.title, "Test: www.test.com/1234")
self.assertEqual(embed.type, 'video')
self.assertEqual(embed.width, 400)
# Check ratio calculations
self.assertEqual(embed.ratio, 480 / 400)
self.assertEqual(embed.ratio_css, '120.0%')
self.assertTrue(embed.is_responsive)
# Check that there has only been one hit to the backend
self.assertEqual(self.hit_count, 1)
# Look for the same embed again and check the hit count hasn't increased
embed = get_embed('www.test.com/1234', max_width=400, finder=self.dummy_finder)
self.assertEqual(self.hit_count, 1)
# Look for a different embed, hit count should increase
embed = get_embed('www.test.com/4321', max_width=400, finder=self.dummy_finder)
self.assertEqual(self.hit_count, 2)
# Look for the same embed with a different width, this should also increase hit count
embed = get_embed('www.test.com/4321', finder=self.dummy_finder)
self.assertEqual(self.hit_count, 3)
def dummy_finder_invalid_width(self, url, max_width=None):
# Return a record with an invalid width
return {
'title': "Test: " + url,
'type': 'video',
'thumbnail_url': '',
'width': '100%',
'height': 480,
'html': "<p>Blah blah blah</p>",
}
def test_invalid_width(self):
embed = get_embed('www.test.com/1234', max_width=400, finder=self.dummy_finder_invalid_width)
# Width must be set to None
self.assertEqual(embed.width, None)
def test_no_html(self):
def no_html_finder(url, max_width=None):
"""
A finder which returns everything but HTML
"""
embed = self.dummy_finder(url, max_width)
embed['html'] = None
return embed
embed = get_embed('www.test.com/1234', max_width=400, finder=no_html_finder)
self.assertEqual(embed.html, '')
@override_settings(WAGTAILEMBEDS_FINDERS=[])
def test_no_finders_available(self):
with self.assertRaises(EmbedUnsupportedProviderException):
get_embed('www.test.com/1234', max_width=400)
class TestChooser(TestCase, WagtailTestUtils):
def setUp(self):
# login
self.login()
def test_chooser(self):
r = self.client.get('/admin/embeds/chooser/')
self.assertEqual(r.status_code, 200)
def test_chooser_with_edit_params(self):
r = self.client.get('/admin/embeds/chooser/?url=http://example2.com')
self.assertEqual(r.status_code, 200)
response_json = json.loads(r.content.decode())
self.assertEqual(response_json['step'], 'chooser')
self.assertIn('value="http://example2.com"', response_json['html'])
@patch('wagtail.embeds.embeds.get_embed')
def test_submit_valid_embed(self, get_embed):
get_embed.return_value = Embed(html='<img src="http://www.example.com" />', title="An example embed")
response = self.client.post(reverse('wagtailembeds:chooser_upload'), {
'embed-chooser-url': 'http://www.example.com/'
})
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content.decode())
self.assertEqual(response_json['step'], 'embed_chosen')
self.assertEqual(response_json['embed_data']['title'], "An example embed")
@patch('wagtail.embeds.embeds.get_embed')
def test_submit_unrecognised_embed(self, get_embed):
get_embed.side_effect = EmbedNotFoundException
response = self.client.post(reverse('wagtailembeds:chooser_upload'), {
'embed-chooser-url': 'http://www.example.com/'
})
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content.decode())
self.assertEqual(response_json['step'], 'chooser')
self.assertIn("Cannot find an embed for this URL.", response_json['html'])
class TestEmbedly(TestCase):
@unittest.skipIf(no_embedly, "Embedly is not installed")
def test_embedly_oembed_called_with_correct_arguments(self):
with patch('embedly.Embedly.oembed') as oembed:
oembed.return_value = {'type': 'photo',
'url': 'http://www.example.com'}
EmbedlyFinder(key='foo').find_embed('http://www.example.com')
oembed.assert_called_with('http://www.example.com', better=False)
EmbedlyFinder(key='foo').find_embed('http://www.example.com', max_width=100)
oembed.assert_called_with('http://www.example.com', maxwidth=100, better=False)
@unittest.skipIf(no_embedly, "Embedly is not installed")
def test_embedly_401(self):
with patch('embedly.Embedly.oembed') as oembed:
oembed.return_value = {'type': 'photo',
'url': 'http://www.example.com',
'error': True,
'error_code': 401}
self.assertRaises(AccessDeniedEmbedlyException,
EmbedlyFinder(key='foo').find_embed, 'http://www.example.com')
@unittest.skipIf(no_embedly, "Embedly is not installed")
def test_embedly_403(self):
with patch('embedly.Embedly.oembed') as oembed:
oembed.return_value = {'type': 'photo',
'url': 'http://www.example.com',
'error': True,
'error_code': 403}
self.assertRaises(AccessDeniedEmbedlyException,
EmbedlyFinder(key='foo').find_embed, 'http://www.example.com')
@unittest.skipIf(no_embedly, "Embedly is not installed")
def test_embedly_404(self):
with patch('embedly.Embedly.oembed') as oembed:
oembed.return_value = {'type': 'photo',
'url': 'http://www.example.com',
'error': True,
'error_code': 404}
self.assertRaises(EmbedNotFoundException,
EmbedlyFinder(key='foo').find_embed, 'http://www.example.com')
@unittest.skipIf(no_embedly, "Embedly is not installed")
def test_embedly_other_error(self):
with patch('embedly.Embedly.oembed') as oembed:
oembed.return_value = {'type': 'photo',
'url': 'http://www.example.com',
'error': True,
'error_code': 999}
self.assertRaises(EmbedlyException, EmbedlyFinder(key='foo').find_embed,
'http://www.example.com')
@unittest.skipIf(no_embedly, "Embedly is not installed")
def test_embedly_html_conversion(self):
with patch('embedly.Embedly.oembed') as oembed:
oembed.return_value = {'type': 'photo',
'url': 'http://www.example.com'}
result = EmbedlyFinder(key='foo').find_embed('http://www.example.com')
self.assertEqual(result['html'], '<img src="http://www.example.com" alt="">')
oembed.return_value = {'type': 'something else',
'html': '<foo>bar</foo>'}
result = EmbedlyFinder(key='foo').find_embed('http://www.example.com')
self.assertEqual(result['html'], '<foo>bar</foo>')
@unittest.skipIf(no_embedly, "Embedly is not installed")
def test_embedly_return_value(self):
with patch('embedly.Embedly.oembed') as oembed:
oembed.return_value = {'type': 'something else',
'html': '<foo>bar</foo>'}
result = EmbedlyFinder(key='foo').find_embed('http://www.example.com')
self.assertEqual(result, {
'title': '',
'author_name': '',
'provider_name': '',
'type': 'something else',
'thumbnail_url': None,
'width': None,
'height': None,
'html': '<foo>bar</foo>'})
oembed.return_value = {'type': 'something else',
'author_name': 'Alice',
'provider_name': 'Bob',
'title': 'foo',
'thumbnail_url': 'http://www.example.com',
'width': 100,
'height': 100,
'html': '<foo>bar</foo>'}
result = EmbedlyFinder(key='foo').find_embed('http://www.example.com')
self.assertEqual(result, {'type': 'something else',
'author_name': 'Alice',
'provider_name': 'Bob',
'title': 'foo',
'thumbnail_url': 'http://www.example.com',
'width': 100,
'height': 100,
'html': '<foo>bar</foo>'})
class TestOembed(TestCase):
def setUp(self):
class DummyResponse:
def read(self):
return b"foo"
self.dummy_response = DummyResponse()
def test_oembed_invalid_provider(self):
self.assertRaises(EmbedNotFoundException, OEmbedFinder().find_embed, "foo")
def test_oembed_invalid_request(self):
config = {'side_effect': URLError('foo')}
with patch.object(urllib.request, 'urlopen', **config):
self.assertRaises(EmbedNotFoundException, OEmbedFinder().find_embed,
"http://www.youtube.com/watch/")
@patch('urllib.request.urlopen')
@patch('json.loads')
def test_oembed_photo_request(self, loads, urlopen):
urlopen.return_value = self.dummy_response
loads.return_value = {'type': 'photo',
'url': 'http://www.example.com'}
result = OEmbedFinder().find_embed("http://www.youtube.com/watch/")
self.assertEqual(result['type'], 'photo')
self.assertEqual(result['html'], '<img src="http://www.example.com" alt="">')
loads.assert_called_with("foo")
@patch('urllib.request.urlopen')
@patch('json.loads')
def test_oembed_return_values(self, loads, urlopen):
urlopen.return_value = self.dummy_response
loads.return_value = {
'type': 'something',
'url': 'http://www.example.com',
'title': 'test_title',
'author_name': 'test_author',
'provider_name': 'test_provider_name',
'thumbnail_url': 'test_thumbail_url',
'width': 'test_width',
'height': 'test_height',
'html': 'test_html'
}
result = OEmbedFinder().find_embed("http://www.youtube.com/watch/")
self.assertEqual(result, {
'type': 'something',
'title': 'test_title',
'author_name': 'test_author',
'provider_name': 'test_provider_name',
'thumbnail_url': 'test_thumbail_url',
'width': 'test_width',
'height': 'test_height',
'html': 'test_html'
})
def test_oembed_accepts_known_provider(self):
finder = OEmbedFinder(providers=[oembed_providers.youtube])
self.assertTrue(finder.accept("http://www.youtube.com/watch/"))
def test_oembed_doesnt_accept_unknown_provider(self):
finder = OEmbedFinder(providers=[oembed_providers.twitter])
self.assertFalse(finder.accept("http://www.youtube.com/watch/"))
@patch('urllib.request.urlopen')
@patch('json.loads')
def test_endpoint_with_format_param(self, loads, urlopen):
urlopen.return_value = self.dummy_response
loads.return_value = {'type': 'video',
'url': 'http://www.example.com'}
result = OEmbedFinder().find_embed("https://vimeo.com/217403396")
self.assertEqual(result['type'], 'video')
request = urlopen.call_args[0][0]
self.assertEqual(request.get_full_url().split('?')[0], "http://www.vimeo.com/api/oembed.json")
class TestEmbedTag(TestCase):
@patch('wagtail.embeds.embeds.get_embed')
def test_direct_call(self, get_embed):
get_embed.return_value = Embed(html='<img src="http://www.example.com" />')
result = embed_tag('http://www.youtube.com/watch/')
self.assertEqual(result, '<img src="http://www.example.com" />')
@patch('wagtail.embeds.embeds.get_embed')
def test_call_from_template(self, get_embed):
get_embed.return_value = Embed(html='<img src="http://www.example.com" />')
temp = template.Template('{% load wagtailembeds_tags %}{% embed "http://www.youtube.com/watch/" %}')
result = temp.render(template.Context())
self.assertEqual(result, '<img src="http://www.example.com" />')
@patch('wagtail.embeds.embeds.get_embed')
def test_catches_embed_not_found(self, get_embed):
get_embed.side_effect = EmbedNotFoundException
temp = template.Template('{% load wagtailembeds_tags %}{% embed "http://www.youtube.com/watch/" %}')
result = temp.render(template.Context())
self.assertEqual(result, '')
class TestEmbedBlock(TestCase):
def test_deserialize(self):
"""
Deserialising the JSONish value of an EmbedBlock (a URL) should give us an EmbedValue
for that URL
"""
block = EmbedBlock(required=False)
block_val = block.to_python('http://www.example.com/foo')
self.assertIsInstance(block_val, EmbedValue)
self.assertEqual(block_val.url, 'http://www.example.com/foo')
# empty values should yield None
empty_block_val = block.to_python('')
self.assertEqual(empty_block_val, None)
def test_serialize(self):
block = EmbedBlock(required=False)
block_val = EmbedValue('http://www.example.com/foo')
serialized_val = block.get_prep_value(block_val)
self.assertEqual(serialized_val, 'http://www.example.com/foo')
serialized_empty_val = block.get_prep_value(None)
self.assertEqual(serialized_empty_val, '')
@patch('wagtail.embeds.embeds.get_embed')
def test_render(self, get_embed):
get_embed.return_value = Embed(html='<h1>Hello world!</h1>')
block = EmbedBlock()
block_val = block.to_python('http://www.example.com/foo')
temp = template.Template('embed: {{ embed }}')
context = template.Context({'embed': block_val})
result = temp.render(context)
# Check that the embed was in the returned HTML
self.assertIn('<h1>Hello world!</h1>', result)
# Check that get_embed was called correctly
get_embed.assert_any_call('http://www.example.com/foo')
@patch('wagtail.embeds.embeds.get_embed')
def test_render_within_structblock(self, get_embed):
"""
When rendering the value of an EmbedBlock directly in a template
(as happens when accessing it as a child of a StructBlock), the
proper embed output should be rendered, not the URL.
"""
get_embed.return_value = Embed(html='<h1>Hello world!</h1>')
block = blocks.StructBlock([
('title', blocks.CharBlock()),
('embed', EmbedBlock()),
])
block_val = block.to_python({'title': 'A test', 'embed': 'http://www.example.com/foo'})
temp = template.Template('embed: {{ self.embed }}')
context = template.Context({'self': block_val})
result = temp.render(context)
self.assertIn('<h1>Hello world!</h1>', result)
# Check that get_embed was called correctly
get_embed.assert_any_call('http://www.example.com/foo')
def test_render_form(self):
"""
The form field for an EmbedBlock should be a text input containing
the URL
"""
block = EmbedBlock()
form_html = block.render_form(EmbedValue('http://www.example.com/foo'), prefix='myembed')
self.assertIn('<input ', form_html)
self.assertIn('value="http://www.example.com/foo"', form_html)
def test_value_from_form(self):
"""
EmbedBlock should be able to turn a URL submitted as part of a form
back into an EmbedValue
"""
block = EmbedBlock(required=False)
block_val = block.value_from_datadict({'myembed': 'http://www.example.com/foo'}, {}, prefix='myembed')
self.assertIsInstance(block_val, EmbedValue)
self.assertEqual(block_val.url, 'http://www.example.com/foo')
# empty value should result in None
empty_val = block.value_from_datadict({'myembed': ''}, {}, prefix='myembed')
self.assertEqual(empty_val, None)
def test_default(self):
block1 = EmbedBlock()
self.assertEqual(block1.get_default(), None)
block2 = EmbedBlock(default='')
self.assertEqual(block2.get_default(), None)
block3 = EmbedBlock(default=None)
self.assertEqual(block3.get_default(), None)
block4 = EmbedBlock(default='http://www.example.com/foo')
self.assertIsInstance(block4.get_default(), EmbedValue)
self.assertEqual(block4.get_default().url, 'http://www.example.com/foo')
block5 = EmbedBlock(default=EmbedValue('http://www.example.com/foo'))
self.assertIsInstance(block5.get_default(), EmbedValue)
self.assertEqual(block5.get_default().url, 'http://www.example.com/foo')
@patch('wagtail.embeds.embeds.get_embed')
def test_clean_required(self, get_embed):
get_embed.return_value = Embed(html='<h1>Hello world!</h1>')
block = EmbedBlock()
cleaned_value = block.clean(
EmbedValue('https://www.youtube.com/watch?v=_U79Wc965vw'))
self.assertIsInstance(cleaned_value, EmbedValue)
self.assertEqual(cleaned_value.url,
'https://www.youtube.com/watch?v=_U79Wc965vw')
with self.assertRaisesMessage(ValidationError, ''):
block.clean(None)
@patch('wagtail.embeds.embeds.get_embed')
def test_clean_non_required(self, get_embed):
get_embed.return_value = Embed(html='<h1>Hello world!</h1>')
block = EmbedBlock(required=False)
cleaned_value = block.clean(
EmbedValue('https://www.youtube.com/watch?v=_U79Wc965vw'))
self.assertIsInstance(cleaned_value, EmbedValue)
self.assertEqual(cleaned_value.url,
'https://www.youtube.com/watch?v=_U79Wc965vw')
cleaned_value = block.clean(None)
self.assertIsNone(cleaned_value)
@patch('wagtail.embeds.embeds.get_embed')
def test_clean_invalid_url(self, get_embed):
get_embed.side_effect = EmbedNotFoundException
non_required_block = EmbedBlock(required=False)
with self.assertRaises(ValidationError):
non_required_block.clean(
EmbedValue('http://no-oembed-here.com/something'))
required_block = EmbedBlock()
with self.assertRaises(ValidationError):
required_block.clean(
EmbedValue('http://no-oembed-here.com/something'))
|
mikedingjan/wagtail
|
wagtail/embeds/tests/test_embeds.py
|
Python
|
bsd-3-clause
| 22,486
|
# -*- coding: utf-8 -*-
from quixote.errors import PublishError
class CodeAPIError(PublishError):
title = "Internal Server Error"
status_code = 400
problem_type = ""
def __init__(self, detail=""):
super(CodeAPIError, self).__init__()
self.detail = detail
def __dict__(self):
error_data = {
"type": self.problem_type,
"title": self.title,
"code": self.status_code,
"message": self.detail,
}
return error_data
def to_dict(self):
return self.__dict__()
class NotJSONError(CodeAPIError):
title = "Request body can not be parsed into JSON"
status_code = 400
problem_type = "not_json"
class UnauthorizedError(CodeAPIError):
title = "Unauthorized Request"
status_code = 401
problem_type = "unauthorized"
class ForbiddenError(CodeAPIError):
title = "You have NO permissions to perform this request"
status_code = 403
problem_type = "forbidden"
class NoPushPermissionError(ForbiddenError):
title = "You have no permission to push to this repository"
problem_type = "no_push_permission"
class NotFoundError(CodeAPIError):
title = "The resource you request does NOT exist"
status_code = 404
problem_type = "not_found"
def __init__(self, resource_name="resource"):
detail = "The %s your requested can not be found, it might have been removed" % resource_name # noqa
super(NotFoundError, self).__init__(detail)
class MethodNotAllowedError(CodeAPIError):
title = "The request method is not allowed"
status_code = 405
problem_type = "method_not_allowed"
class NotAcceptableError(CodeAPIError):
title = "The request is not acceptable"
status_code = 406
problem_type = "not_accetable"
class UnprocessableEntityError(CodeAPIError):
title = "Validation Failed"
status_code = 422
problem_type = "validation_failed"
class InvalidFieldError(UnprocessableEntityError):
title = "The request's query data is invalid"
def __init__(self, field_name, format_desc="a right one"):
detail = "The field %s is invalid, you need supply %s." % (
field_name, format_desc)
super(InvalidFieldError, self).__init__(detail)
class MissingFieldError(UnprocessableEntityError):
title = "Missing field"
def __init__(self, field):
if isinstance(field, basestring):
detail = "This field is required: %s" % field
elif isinstance(field, list):
detail = "These fields are required: %s" % ", ".join(field)
super(MissingFieldError, self).__init__(detail)
|
douban/code
|
vilya/views/api/errors.py
|
Python
|
bsd-3-clause
| 2,662
|
from __future__ import absolute_import, unicode_literals
import abc
from textwrap import dedent
from six import add_metaclass
from virtualenv.create.describe import Python3Supports
from virtualenv.create.via_global_ref.builtin.ref import PathRefToDest
from virtualenv.create.via_global_ref.store import is_store_python
from virtualenv.util.path import Path
from .common import CPython, CPythonPosix, CPythonWindows, is_mac_os_framework
@add_metaclass(abc.ABCMeta)
class CPython3(CPython, Python3Supports):
""" """
class CPython3Posix(CPythonPosix, CPython3):
@classmethod
def can_describe(cls, interpreter):
return is_mac_os_framework(interpreter) is False and super(CPython3Posix, cls).can_describe(interpreter)
def env_patch_text(self):
text = super(CPython3Posix, self).env_patch_text()
if self.pyvenv_launch_patch_active(self.interpreter):
text += dedent(
"""
# for https://github.com/python/cpython/pull/9516, see https://github.com/pypa/virtualenv/issues/1704
import os
if "__PYVENV_LAUNCHER__" in os.environ:
del os.environ["__PYVENV_LAUNCHER__"]
""",
)
return text
@classmethod
def pyvenv_launch_patch_active(cls, interpreter):
ver = interpreter.version_info
return interpreter.platform == "darwin" and ((3, 7, 8) > ver >= (3, 7) or (3, 8, 3) > ver >= (3, 8))
class CPython3Windows(CPythonWindows, CPython3):
""" """
@classmethod
def setup_meta(cls, interpreter):
if is_store_python(interpreter): # store python is not supported here
return None
return super(CPython3Windows, cls).setup_meta(interpreter)
@classmethod
def sources(cls, interpreter):
for src in super(CPython3Windows, cls).sources(interpreter):
yield src
if not cls.has_shim(interpreter):
for src in cls.include_dll_and_pyd(interpreter):
yield src
@classmethod
def has_shim(cls, interpreter):
return interpreter.version_info.minor >= 7 and cls.shim(interpreter) is not None
@classmethod
def shim(cls, interpreter):
shim = Path(interpreter.system_stdlib) / "venv" / "scripts" / "nt" / "python.exe"
if shim.exists():
return shim
return None
@classmethod
def host_python(cls, interpreter):
if cls.has_shim(interpreter):
# starting with CPython 3.7 Windows ships with a venvlauncher.exe that avoids the need for dll/pyd copies
# it also means the wrapper must be copied to avoid bugs such as https://bugs.python.org/issue42013
return cls.shim(interpreter)
return super(CPython3Windows, cls).host_python(interpreter)
@classmethod
def include_dll_and_pyd(cls, interpreter):
dll_folder = Path(interpreter.system_prefix) / "DLLs"
host_exe_folder = Path(interpreter.system_executable).parent
for folder in [host_exe_folder, dll_folder]:
for file in folder.iterdir():
if file.suffix in (".pyd", ".dll"):
yield PathRefToDest(file, dest=cls.to_dll_and_pyd)
def to_dll_and_pyd(self, src):
return self.bin_dir / src.name
|
pypa/virtualenv
|
src/virtualenv/create/via_global_ref/builtin/cpython/cpython3.py
|
Python
|
mit
| 3,312
|
import time
import unittest
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer import function_hooks
from chainer import functions
from chainer.functions.math import basic_math
from chainer import testing
from chainer.testing import attr
def check_history(self, t, function_type, return_type):
func_name = t[0]
assert func_name == function_type.__name__
self.assertIsInstance(t[1], return_type)
class SimpleLink(chainer.Link):
def __init__(self):
super(SimpleLink, self).__init__()
with self.init_scope():
init_w = numpy.random.uniform(-1, 1, (3, 5)).astype(
numpy.float32)
self.w = chainer.Parameter(init_w)
def forward(self, x):
return self.w * x
class TestTimerHookToLink(unittest.TestCase):
def setUp(self):
self.h = function_hooks.TimerHook()
self.l = SimpleLink()
self.x = numpy.random.uniform(-0.1, 0.1, (3, 5)).astype(numpy.float32)
self.gy = numpy.random.uniform(-0.1, 0.1, (3, 5)).astype(numpy.float32)
def test_name(self):
self.assertEqual(self.h.name, 'TimerHook')
def check_forward(self, x):
with self.h:
self.l(chainer.Variable(x))
self.assertEqual(1, len(self.h.call_history))
check_history(self, self.h.call_history[0], basic_math.Mul, float)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
def test_forward_gpu(self):
self.l.to_gpu()
self.check_forward(cuda.to_gpu(self.x))
def check_backward(self, x, gy):
x = chainer.Variable(x)
y = self.l(x)
y.grad = gy
with self.h:
y.backward()
# It includes forward of + that accumulates gradients to W and b
self.assertEqual(3, len(self.h.call_history), self.h.call_history)
for entry in self.h.call_history:
if entry[0] == 'Add':
continue
check_history(self, entry, basic_math.Mul, float)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.l.to_gpu()
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
class TestTimerHookToFunction(unittest.TestCase):
def setUp(self):
self.h = function_hooks.TimerHook()
self.f = functions.math.exponential.Exp()
self.f.add_hook(self.h)
self.x = numpy.random.uniform(-0.1, 0.1, (3, 5)).astype(numpy.float32)
self.gy = numpy.random.uniform(-0.1, 0.1, (3, 5)).astype(numpy.float32)
def check_forward(self, x):
self.f.apply((chainer.Variable(x),))
self.assertEqual(1, len(self.h.call_history))
check_history(self, self.h.call_history[0],
functions.math.exponential.Exp, float)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x))
def check_backward(self, x, gy):
x = chainer.Variable(x)
y = self.f.apply((x,))[0]
y.grad = gy
y.backward()
self.assertEqual(2, len(self.h.call_history))
check_history(self, self.h.call_history[1],
functions.math.exponential.Exp, float)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
def test_reentrant(self):
# In/grad data are random; these do not simulate the actually possible
# cases.
# any function other than Exp is ok
g = functions.math.identity.Identity()
self.h.backward_preprocess(self.f, (self.x,), (self.gy,))
t1 = time.time()
time.sleep(0.001) # longer than each hook call
self.h.forward_preprocess(g, (self.x,))
self.h.forward_postprocess(g, (self.x,))
t2 = time.time()
self.h.backward_postprocess(self.f, (self.x,), (self.gy,))
history = dict(self.h.call_history)
self.assertEqual(len(history), 2)
self.assertIn(self.f._impl_name, history)
self.assertIn(g._impl_name, history)
f_time = history[self.f._impl_name]
g_time = history[g._impl_name]
self.assertLessEqual(g_time, t2 - t1)
self.assertGreaterEqual(f_time, t2 - t1)
def test_reentrant_total_time(self):
g = functions.math.identity.Identity()
t0 = time.time()
self.h.backward_preprocess(self.f, (self.x,), (self.gy,))
t1 = time.time()
self.h.forward_preprocess(g, (self.x,))
time.sleep(0.001)
self.h.forward_postprocess(g, (self.x,))
t2 = time.time()
self.h.backward_postprocess(self.f, (self.x,), (self.gy,))
t3 = time.time()
self.assertLessEqual(self.h.total_time(), t3 - t0)
self.assertGreaterEqual(self.h.total_time(), t2 - t1)
class TestTimerPrintReport(unittest.TestCase):
def setUp(self):
self.h = function_hooks.TimerHook()
self.f = functions.math.exponential.Exp()
self.f.add_hook(self.h)
self.x = numpy.random.uniform(-0.1, 0.1, (3, 5)).astype(numpy.float32)
def test_summary(self):
x = self.x
self.f.apply((chainer.Variable(x),))
self.f.apply((chainer.Variable(x),))
self.assertEqual(2, len(self.h.call_history))
self.assertEqual(1, len(self.h.summary()))
def test_print_report(self):
x = self.x
self.f.apply((chainer.Variable(x),))
self.f.apply((chainer.Variable(x),))
io = six.StringIO()
self.h.print_report(file=io)
expect = r'''\AFunctionName ElapsedTime Occurrence
+Exp +[0-9.\-e]+.s +[0-9]+$
'''
actual = io.getvalue()
six.assertRegex(self, actual, expect)
testing.run_module(__name__, __file__)
|
rezoo/chainer
|
tests/chainer_tests/function_hooks_tests/test_timer.py
|
Python
|
mit
| 5,956
|
from sqlalchemy.testing import assert_raises_message, eq_, \
AssertsCompiledSQL, is_
from sqlalchemy.testing import fixtures
from sqlalchemy.orm import relationships, foreign, remote
from sqlalchemy import MetaData, Table, Column, ForeignKey, Integer, \
select, ForeignKeyConstraint, exc, func, and_
from sqlalchemy.orm.interfaces import ONETOMANY, MANYTOONE, MANYTOMANY
class _JoinFixtures(object):
@classmethod
def setup_class(cls):
m = MetaData()
cls.left = Table('lft', m,
Column('id', Integer, primary_key=True),
Column('x', Integer),
Column('y', Integer),
)
cls.right = Table('rgt', m,
Column('id', Integer, primary_key=True),
Column('lid', Integer, ForeignKey('lft.id')),
Column('x', Integer),
Column('y', Integer),
)
cls.right_multi_fk = Table('rgt_multi_fk', m,
Column('id', Integer, primary_key=True),
Column('lid1', Integer, ForeignKey('lft.id')),
Column('lid2', Integer, ForeignKey('lft.id')),
)
cls.selfref = Table('selfref', m,
Column('id', Integer, primary_key=True),
Column('sid', Integer, ForeignKey('selfref.id'))
)
cls.composite_selfref = Table('composite_selfref', m,
Column('id', Integer, primary_key=True),
Column('group_id', Integer, primary_key=True),
Column('parent_id', Integer),
ForeignKeyConstraint(
['parent_id', 'group_id'],
['composite_selfref.id', 'composite_selfref.group_id']
)
)
cls.m2mleft = Table('m2mlft', m,
Column('id', Integer, primary_key=True),
)
cls.m2mright = Table('m2mrgt', m,
Column('id', Integer, primary_key=True),
)
cls.m2msecondary = Table('m2msecondary', m,
Column('lid', Integer, ForeignKey('m2mlft.id'), primary_key=True),
Column('rid', Integer, ForeignKey('m2mrgt.id'), primary_key=True),
)
cls.m2msecondary_no_fks = Table('m2msecondary_no_fks', m,
Column('lid', Integer, primary_key=True),
Column('rid', Integer, primary_key=True),
)
cls.m2msecondary_ambig_fks = Table('m2msecondary_ambig_fks', m,
Column('lid1', Integer, ForeignKey('m2mlft.id'), primary_key=True),
Column('rid1', Integer, ForeignKey('m2mrgt.id'), primary_key=True),
Column('lid2', Integer, ForeignKey('m2mlft.id'), primary_key=True),
Column('rid2', Integer, ForeignKey('m2mrgt.id'), primary_key=True),
)
cls.base_w_sub_rel = Table('base_w_sub_rel', m,
Column('id', Integer, primary_key=True),
Column('sub_id', Integer, ForeignKey('rel_sub.id'))
)
cls.rel_sub = Table('rel_sub', m,
Column('id', Integer, ForeignKey('base_w_sub_rel.id'),
primary_key=True)
)
cls.base = Table('base', m,
Column('id', Integer, primary_key=True),
)
cls.sub = Table('sub', m,
Column('id', Integer, ForeignKey('base.id'),
primary_key=True),
)
cls.sub_w_base_rel = Table('sub_w_base_rel', m,
Column('id', Integer, ForeignKey('base.id'),
primary_key=True),
Column('base_id', Integer, ForeignKey('base.id'))
)
cls.sub_w_sub_rel = Table('sub_w_sub_rel', m,
Column('id', Integer, ForeignKey('base.id'),
primary_key=True),
Column('sub_id', Integer, ForeignKey('sub.id'))
)
cls.right_w_base_rel = Table('right_w_base_rel', m,
Column('id', Integer, primary_key=True),
Column('base_id', Integer, ForeignKey('base.id'))
)
cls.three_tab_a = Table('three_tab_a', m,
Column('id', Integer, primary_key=True),
)
cls.three_tab_b = Table('three_tab_b', m,
Column('id', Integer, primary_key=True),
Column('aid', Integer, ForeignKey('three_tab_a.id'))
)
cls.three_tab_c = Table('three_tab_c', m,
Column('id', Integer, primary_key=True),
Column('aid', Integer, ForeignKey('three_tab_a.id')),
Column('bid', Integer, ForeignKey('three_tab_b.id'))
)
def _join_fixture_overlapping_three_tables(self, **kw):
def _can_sync(*cols):
for c in cols:
if self.three_tab_c.c.contains_column(c):
return False
else:
return True
return relationships.JoinCondition(
self.three_tab_a,
self.three_tab_b,
self.three_tab_a,
self.three_tab_b,
support_sync=False,
can_be_synced_fn=_can_sync,
primaryjoin=and_(
self.three_tab_a.c.id == self.three_tab_b.c.aid,
self.three_tab_c.c.bid == self.three_tab_b.c.id,
self.three_tab_c.c.aid == self.three_tab_a.c.id
)
)
def _join_fixture_m2m(self, **kw):
return relationships.JoinCondition(
self.m2mleft,
self.m2mright,
self.m2mleft,
self.m2mright,
secondary=self.m2msecondary,
**kw
)
def _join_fixture_m2m_backref(self, **kw):
"""return JoinCondition in the same way RelationshipProperty
calls it for a backref on an m2m.
"""
j1 = self._join_fixture_m2m()
return j1, relationships.JoinCondition(
self.m2mright,
self.m2mleft,
self.m2mright,
self.m2mleft,
secondary=self.m2msecondary,
primaryjoin=j1.secondaryjoin_minus_local,
secondaryjoin=j1.primaryjoin_minus_local
)
def _join_fixture_o2m(self, **kw):
return relationships.JoinCondition(
self.left,
self.right,
self.left,
self.right,
**kw
)
def _join_fixture_m2o(self, **kw):
return relationships.JoinCondition(
self.right,
self.left,
self.right,
self.left,
**kw
)
def _join_fixture_o2m_selfref(self, **kw):
return relationships.JoinCondition(
self.selfref,
self.selfref,
self.selfref,
self.selfref,
**kw
)
def _join_fixture_m2o_selfref(self, **kw):
return relationships.JoinCondition(
self.selfref,
self.selfref,
self.selfref,
self.selfref,
remote_side=set([self.selfref.c.id]),
**kw
)
def _join_fixture_o2m_composite_selfref(self, **kw):
return relationships.JoinCondition(
self.composite_selfref,
self.composite_selfref,
self.composite_selfref,
self.composite_selfref,
**kw
)
def _join_fixture_m2o_composite_selfref(self, **kw):
return relationships.JoinCondition(
self.composite_selfref,
self.composite_selfref,
self.composite_selfref,
self.composite_selfref,
remote_side=set([self.composite_selfref.c.id,
self.composite_selfref.c.group_id]),
**kw
)
def _join_fixture_o2m_composite_selfref_func(self, **kw):
return relationships.JoinCondition(
self.composite_selfref,
self.composite_selfref,
self.composite_selfref,
self.composite_selfref,
primaryjoin=and_(
self.composite_selfref.c.group_id ==
func.foo(self.composite_selfref.c.group_id),
self.composite_selfref.c.parent_id ==
self.composite_selfref.c.id
),
**kw
)
def _join_fixture_o2m_composite_selfref_func_annotated(self, **kw):
return relationships.JoinCondition(
self.composite_selfref,
self.composite_selfref,
self.composite_selfref,
self.composite_selfref,
primaryjoin=and_(
remote(self.composite_selfref.c.group_id) ==
func.foo(self.composite_selfref.c.group_id),
remote(self.composite_selfref.c.parent_id) ==
self.composite_selfref.c.id
),
**kw
)
def _join_fixture_compound_expression_1(self, **kw):
return relationships.JoinCondition(
self.left,
self.right,
self.left,
self.right,
primaryjoin=(self.left.c.x + self.left.c.y) == \
relationships.remote(relationships.foreign(
self.right.c.x * self.right.c.y
)),
**kw
)
def _join_fixture_compound_expression_2(self, **kw):
return relationships.JoinCondition(
self.left,
self.right,
self.left,
self.right,
primaryjoin=(self.left.c.x + self.left.c.y) == \
relationships.foreign(
self.right.c.x * self.right.c.y
),
**kw
)
def _join_fixture_compound_expression_1_non_annotated(self, **kw):
return relationships.JoinCondition(
self.left,
self.right,
self.left,
self.right,
primaryjoin=(self.left.c.x + self.left.c.y) == \
(
self.right.c.x * self.right.c.y
),
**kw
)
def _join_fixture_base_to_joined_sub(self, **kw):
# see test/orm/inheritance/test_abc_inheritance:TestaTobM2O
# and others there
right = self.base_w_sub_rel.join(self.rel_sub,
self.base_w_sub_rel.c.id == self.rel_sub.c.id
)
return relationships.JoinCondition(
self.base_w_sub_rel,
right,
self.base_w_sub_rel,
self.rel_sub,
primaryjoin=self.base_w_sub_rel.c.sub_id == \
self.rel_sub.c.id,
**kw
)
def _join_fixture_o2m_joined_sub_to_base(self, **kw):
left = self.base.join(self.sub_w_base_rel,
self.base.c.id == self.sub_w_base_rel.c.id)
return relationships.JoinCondition(
left,
self.base,
self.sub_w_base_rel,
self.base,
primaryjoin=self.sub_w_base_rel.c.base_id == self.base.c.id
)
def _join_fixture_m2o_joined_sub_to_sub_on_base(self, **kw):
# this is a late add - a variant of the test case
# in #2491 where we join on the base cols instead. only
# m2o has a problem at the time of this test.
left = self.base.join(self.sub, self.base.c.id == self.sub.c.id)
right = self.base.join(self.sub_w_base_rel,
self.base.c.id == self.sub_w_base_rel.c.id)
return relationships.JoinCondition(
left,
right,
self.sub,
self.sub_w_base_rel,
primaryjoin=self.sub_w_base_rel.c.base_id == self.base.c.id,
)
def _join_fixture_o2m_joined_sub_to_sub(self, **kw):
left = self.base.join(self.sub, self.base.c.id == self.sub.c.id)
right = self.base.join(self.sub_w_sub_rel,
self.base.c.id == self.sub_w_sub_rel.c.id)
return relationships.JoinCondition(
left,
right,
self.sub,
self.sub_w_sub_rel,
primaryjoin=self.sub.c.id == self.sub_w_sub_rel.c.sub_id
)
def _join_fixture_m2o_sub_to_joined_sub(self, **kw):
# see test.orm.test_mapper:MapperTest.test_add_column_prop_deannotate,
right = self.base.join(self.right_w_base_rel,
self.base.c.id == self.right_w_base_rel.c.id)
return relationships.JoinCondition(
self.right_w_base_rel,
right,
self.right_w_base_rel,
self.right_w_base_rel,
)
def _join_fixture_m2o_sub_to_joined_sub_func(self, **kw):
# see test.orm.test_mapper:MapperTest.test_add_column_prop_deannotate,
right = self.base.join(self.right_w_base_rel,
self.base.c.id == self.right_w_base_rel.c.id)
return relationships.JoinCondition(
self.right_w_base_rel,
right,
self.right_w_base_rel,
self.right_w_base_rel,
primaryjoin=self.right_w_base_rel.c.base_id == \
func.foo(self.base.c.id)
)
def _join_fixture_o2o_joined_sub_to_base(self, **kw):
left = self.base.join(self.sub,
self.base.c.id == self.sub.c.id)
# see test_relationships->AmbiguousJoinInterpretedAsSelfRef
return relationships.JoinCondition(
left,
self.sub,
left,
self.sub,
)
def _join_fixture_o2m_to_annotated_func(self, **kw):
return relationships.JoinCondition(
self.left,
self.right,
self.left,
self.right,
primaryjoin=self.left.c.id ==
foreign(func.foo(self.right.c.lid)),
**kw
)
def _join_fixture_o2m_to_oldstyle_func(self, **kw):
return relationships.JoinCondition(
self.left,
self.right,
self.left,
self.right,
primaryjoin=self.left.c.id ==
func.foo(self.right.c.lid),
consider_as_foreign_keys=[self.right.c.lid],
**kw
)
def _assert_non_simple_warning(self, fn):
assert_raises_message(
exc.SAWarning,
"Non-simple column elements in "
"primary join condition for property "
r"None - consider using remote\(\) "
"annotations to mark the remote side.",
fn
)
def _assert_raises_no_relevant_fks(self, fn, expr, relname,
primary, *arg, **kw):
assert_raises_message(
exc.ArgumentError,
r"Could not locate any relevant foreign key columns "
r"for %s join condition '%s' on relationship %s. "
r"Ensure that referencing columns are associated with "
r"a ForeignKey or ForeignKeyConstraint, or are annotated "
r"in the join condition with the foreign\(\) annotation."
% (
primary, expr, relname
),
fn, *arg, **kw
)
def _assert_raises_no_equality(self, fn, expr, relname,
primary, *arg, **kw):
assert_raises_message(
exc.ArgumentError,
"Could not locate any simple equality expressions "
"involving locally mapped foreign key columns for %s join "
"condition '%s' on relationship %s. "
"Ensure that referencing columns are associated with a "
"ForeignKey or ForeignKeyConstraint, or are annotated in "
r"the join condition with the foreign\(\) annotation. "
"To allow comparison operators other than '==', "
"the relationship can be marked as viewonly=True." % (
primary, expr, relname
),
fn, *arg, **kw
)
def _assert_raises_ambig_join(self, fn, relname, secondary_arg,
*arg, **kw):
if secondary_arg is not None:
assert_raises_message(
exc.AmbiguousForeignKeysError,
"Could not determine join condition between "
"parent/child tables on relationship %s - "
"there are multiple foreign key paths linking the "
"tables via secondary table '%s'. "
"Specify the 'foreign_keys' argument, providing a list "
"of those columns which should be counted as "
"containing a foreign key reference from the "
"secondary table to each of the parent and child tables."
% (relname, secondary_arg),
fn, *arg, **kw)
else:
assert_raises_message(
exc.AmbiguousForeignKeysError,
"Could not determine join condition between "
"parent/child tables on relationship %s - "
"there are no foreign keys linking these tables. "
% (relname,),
fn, *arg, **kw)
def _assert_raises_no_join(self, fn, relname, secondary_arg,
*arg, **kw):
if secondary_arg is not None:
assert_raises_message(
exc.NoForeignKeysError,
"Could not determine join condition between "
"parent/child tables on relationship %s - "
"there are no foreign keys linking these tables "
"via secondary table '%s'. "
"Ensure that referencing columns are associated "
"with a ForeignKey "
"or ForeignKeyConstraint, or specify 'primaryjoin' and "
"'secondaryjoin' expressions"
% (relname, secondary_arg),
fn, *arg, **kw)
else:
assert_raises_message(
exc.NoForeignKeysError,
"Could not determine join condition between "
"parent/child tables on relationship %s - "
"there are no foreign keys linking these tables. "
"Ensure that referencing columns are associated "
"with a ForeignKey "
"or ForeignKeyConstraint, or specify a 'primaryjoin' "
"expression."
% (relname,),
fn, *arg, **kw)
class ColumnCollectionsTest(_JoinFixtures, fixtures.TestBase,
AssertsCompiledSQL):
def test_determine_local_remote_pairs_o2o_joined_sub_to_base(self):
joincond = self._join_fixture_o2o_joined_sub_to_base()
eq_(
joincond.local_remote_pairs,
[(self.base.c.id, self.sub.c.id)]
)
def test_determine_synchronize_pairs_o2m_to_annotated_func(self):
joincond = self._join_fixture_o2m_to_annotated_func()
eq_(
joincond.synchronize_pairs,
[(self.left.c.id, self.right.c.lid)]
)
def test_determine_synchronize_pairs_o2m_to_oldstyle_func(self):
joincond = self._join_fixture_o2m_to_oldstyle_func()
eq_(
joincond.synchronize_pairs,
[(self.left.c.id, self.right.c.lid)]
)
def test_determinelocal_remote_m2o_joined_sub_to_sub_on_base(self):
joincond = self._join_fixture_m2o_joined_sub_to_sub_on_base()
eq_(
joincond.local_remote_pairs,
[(self.base.c.id, self.sub_w_base_rel.c.base_id)]
)
def test_determine_local_remote_base_to_joined_sub(self):
joincond = self._join_fixture_base_to_joined_sub()
eq_(
joincond.local_remote_pairs,
[
(self.base_w_sub_rel.c.sub_id, self.rel_sub.c.id)
]
)
def test_determine_local_remote_o2m_joined_sub_to_base(self):
joincond = self._join_fixture_o2m_joined_sub_to_base()
eq_(
joincond.local_remote_pairs,
[
(self.sub_w_base_rel.c.base_id, self.base.c.id)
]
)
def test_determine_local_remote_m2o_sub_to_joined_sub(self):
joincond = self._join_fixture_m2o_sub_to_joined_sub()
eq_(
joincond.local_remote_pairs,
[
(self.right_w_base_rel.c.base_id, self.base.c.id)
]
)
def test_determine_remote_columns_o2m_joined_sub_to_sub(self):
joincond = self._join_fixture_o2m_joined_sub_to_sub()
eq_(
joincond.local_remote_pairs,
[
(self.sub.c.id, self.sub_w_sub_rel.c.sub_id)
]
)
def test_determine_remote_columns_compound_1(self):
joincond = self._join_fixture_compound_expression_1(
support_sync=False)
eq_(
joincond.remote_columns,
set([self.right.c.x, self.right.c.y])
)
def test_determine_local_remote_compound_1(self):
joincond = self._join_fixture_compound_expression_1(
support_sync=False)
eq_(
joincond.local_remote_pairs,
[
(self.left.c.x, self.right.c.x),
(self.left.c.x, self.right.c.y),
(self.left.c.y, self.right.c.x),
(self.left.c.y, self.right.c.y)
]
)
def test_determine_local_remote_compound_2(self):
joincond = self._join_fixture_compound_expression_2(
support_sync=False)
eq_(
joincond.local_remote_pairs,
[
(self.left.c.x, self.right.c.x),
(self.left.c.x, self.right.c.y),
(self.left.c.y, self.right.c.x),
(self.left.c.y, self.right.c.y)
]
)
def test_determine_local_remote_compound_3(self):
joincond = self._join_fixture_compound_expression_1()
eq_(
joincond.local_remote_pairs,
[
(self.left.c.x, self.right.c.x),
(self.left.c.x, self.right.c.y),
(self.left.c.y, self.right.c.x),
(self.left.c.y, self.right.c.y),
]
)
def test_err_local_remote_compound_1(self):
self._assert_raises_no_relevant_fks(
self._join_fixture_compound_expression_1_non_annotated,
r'lft.x \+ lft.y = rgt.x \* rgt.y',
"None", "primary"
)
def test_determine_remote_columns_compound_2(self):
joincond = self._join_fixture_compound_expression_2(
support_sync=False)
eq_(
joincond.remote_columns,
set([self.right.c.x, self.right.c.y])
)
def test_determine_remote_columns_o2m(self):
joincond = self._join_fixture_o2m()
eq_(
joincond.remote_columns,
set([self.right.c.lid])
)
def test_determine_remote_columns_o2m_selfref(self):
joincond = self._join_fixture_o2m_selfref()
eq_(
joincond.remote_columns,
set([self.selfref.c.sid])
)
def test_determine_local_remote_pairs_o2m_composite_selfref(self):
joincond = self._join_fixture_o2m_composite_selfref()
eq_(
joincond.local_remote_pairs,
[
(self.composite_selfref.c.group_id,
self.composite_selfref.c.group_id),
(self.composite_selfref.c.id,
self.composite_selfref.c.parent_id),
]
)
def test_determine_local_remote_pairs_o2m_composite_selfref_func_warning(self):
self._assert_non_simple_warning(
self._join_fixture_o2m_composite_selfref_func
)
def test_determine_local_remote_pairs_o2m_overlap_func_warning(self):
self._assert_non_simple_warning(
self._join_fixture_m2o_sub_to_joined_sub_func
)
def test_determine_local_remote_pairs_o2m_composite_selfref_func_annotated(self):
joincond = self._join_fixture_o2m_composite_selfref_func_annotated()
eq_(
joincond.local_remote_pairs,
[
(self.composite_selfref.c.group_id,
self.composite_selfref.c.group_id),
(self.composite_selfref.c.id,
self.composite_selfref.c.parent_id),
]
)
def test_determine_remote_columns_m2o_composite_selfref(self):
joincond = self._join_fixture_m2o_composite_selfref()
eq_(
joincond.remote_columns,
set([self.composite_selfref.c.id,
self.composite_selfref.c.group_id])
)
def test_determine_remote_columns_m2o(self):
joincond = self._join_fixture_m2o()
eq_(
joincond.remote_columns,
set([self.left.c.id])
)
def test_determine_local_remote_pairs_o2m(self):
joincond = self._join_fixture_o2m()
eq_(
joincond.local_remote_pairs,
[(self.left.c.id, self.right.c.lid)]
)
def test_determine_synchronize_pairs_m2m(self):
joincond = self._join_fixture_m2m()
eq_(
joincond.synchronize_pairs,
[(self.m2mleft.c.id, self.m2msecondary.c.lid)]
)
eq_(
joincond.secondary_synchronize_pairs,
[(self.m2mright.c.id, self.m2msecondary.c.rid)]
)
def test_determine_local_remote_pairs_o2m_backref(self):
joincond = self._join_fixture_o2m()
joincond2 = self._join_fixture_m2o(
primaryjoin=joincond.primaryjoin_reverse_remote,
)
eq_(
joincond2.local_remote_pairs,
[(self.right.c.lid, self.left.c.id)]
)
def test_determine_local_remote_pairs_m2m(self):
joincond = self._join_fixture_m2m()
eq_(
joincond.local_remote_pairs,
[(self.m2mleft.c.id, self.m2msecondary.c.lid),
(self.m2mright.c.id, self.m2msecondary.c.rid)]
)
def test_determine_local_remote_pairs_m2m_backref(self):
j1, j2 = self._join_fixture_m2m_backref()
eq_(
j1.local_remote_pairs,
[(self.m2mleft.c.id, self.m2msecondary.c.lid),
(self.m2mright.c.id, self.m2msecondary.c.rid)]
)
eq_(
j2.local_remote_pairs,
[
(self.m2mright.c.id, self.m2msecondary.c.rid),
(self.m2mleft.c.id, self.m2msecondary.c.lid),
]
)
def test_determine_local_columns_m2m_backref(self):
j1, j2 = self._join_fixture_m2m_backref()
eq_(
j1.local_columns,
set([self.m2mleft.c.id])
)
eq_(
j2.local_columns,
set([self.m2mright.c.id])
)
def test_determine_remote_columns_m2m_backref(self):
j1, j2 = self._join_fixture_m2m_backref()
eq_(
j1.remote_columns,
set([self.m2msecondary.c.lid, self.m2msecondary.c.rid])
)
eq_(
j2.remote_columns,
set([self.m2msecondary.c.lid, self.m2msecondary.c.rid])
)
def test_determine_remote_columns_m2o_selfref(self):
joincond = self._join_fixture_m2o_selfref()
eq_(
joincond.remote_columns,
set([self.selfref.c.id])
)
def test_determine_local_remote_cols_three_tab_viewonly(self):
joincond = self._join_fixture_overlapping_three_tables()
eq_(
joincond.local_remote_pairs,
[(self.three_tab_a.c.id, self.three_tab_b.c.aid)]
)
eq_(
joincond.remote_columns,
set([self.three_tab_b.c.id, self.three_tab_b.c.aid])
)
class DirectionTest(_JoinFixtures, fixtures.TestBase, AssertsCompiledSQL):
def test_determine_direction_compound_2(self):
joincond = self._join_fixture_compound_expression_2(
support_sync=False)
is_(
joincond.direction,
ONETOMANY
)
def test_determine_direction_o2m(self):
joincond = self._join_fixture_o2m()
is_(joincond.direction, ONETOMANY)
def test_determine_direction_o2m_selfref(self):
joincond = self._join_fixture_o2m_selfref()
is_(joincond.direction, ONETOMANY)
def test_determine_direction_m2o_selfref(self):
joincond = self._join_fixture_m2o_selfref()
is_(joincond.direction, MANYTOONE)
def test_determine_direction_o2m_composite_selfref(self):
joincond = self._join_fixture_o2m_composite_selfref()
is_(joincond.direction, ONETOMANY)
def test_determine_direction_m2o_composite_selfref(self):
joincond = self._join_fixture_m2o_composite_selfref()
is_(joincond.direction, MANYTOONE)
def test_determine_direction_m2o(self):
joincond = self._join_fixture_m2o()
is_(joincond.direction, MANYTOONE)
class DetermineJoinTest(_JoinFixtures, fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
def test_determine_join_o2m(self):
joincond = self._join_fixture_o2m()
self.assert_compile(
joincond.primaryjoin,
"lft.id = rgt.lid"
)
def test_determine_join_o2m_selfref(self):
joincond = self._join_fixture_o2m_selfref()
self.assert_compile(
joincond.primaryjoin,
"selfref.id = selfref.sid"
)
def test_determine_join_m2o_selfref(self):
joincond = self._join_fixture_m2o_selfref()
self.assert_compile(
joincond.primaryjoin,
"selfref.id = selfref.sid"
)
def test_determine_join_o2m_composite_selfref(self):
joincond = self._join_fixture_o2m_composite_selfref()
self.assert_compile(
joincond.primaryjoin,
"composite_selfref.group_id = composite_selfref.group_id "
"AND composite_selfref.id = composite_selfref.parent_id"
)
def test_determine_join_m2o_composite_selfref(self):
joincond = self._join_fixture_m2o_composite_selfref()
self.assert_compile(
joincond.primaryjoin,
"composite_selfref.group_id = composite_selfref.group_id "
"AND composite_selfref.id = composite_selfref.parent_id"
)
def test_determine_join_m2o(self):
joincond = self._join_fixture_m2o()
self.assert_compile(
joincond.primaryjoin,
"lft.id = rgt.lid"
)
def test_determine_join_ambiguous_fks_o2m(self):
assert_raises_message(
exc.AmbiguousForeignKeysError,
"Could not determine join condition between "
"parent/child tables on relationship None - "
"there are multiple foreign key paths linking "
"the tables. Specify the 'foreign_keys' argument, "
"providing a list of those columns which "
"should be counted as containing a foreign "
"key reference to the parent table.",
relationships.JoinCondition,
self.left,
self.right_multi_fk,
self.left,
self.right_multi_fk,
)
def test_determine_join_no_fks_o2m(self):
self._assert_raises_no_join(
relationships.JoinCondition,
"None", None,
self.left,
self.selfref,
self.left,
self.selfref,
)
def test_determine_join_ambiguous_fks_m2m(self):
self._assert_raises_ambig_join(
relationships.JoinCondition,
"None", self.m2msecondary_ambig_fks,
self.m2mleft,
self.m2mright,
self.m2mleft,
self.m2mright,
secondary=self.m2msecondary_ambig_fks
)
def test_determine_join_no_fks_m2m(self):
self._assert_raises_no_join(
relationships.JoinCondition,
"None", self.m2msecondary_no_fks,
self.m2mleft,
self.m2mright,
self.m2mleft,
self.m2mright,
secondary=self.m2msecondary_no_fks
)
def _join_fixture_fks_ambig_m2m(self):
return relationships.JoinCondition(
self.m2mleft,
self.m2mright,
self.m2mleft,
self.m2mright,
secondary=self.m2msecondary_ambig_fks,
consider_as_foreign_keys=[
self.m2msecondary_ambig_fks.c.lid1,
self.m2msecondary_ambig_fks.c.rid1]
)
def test_determine_join_w_fks_ambig_m2m(self):
joincond = self._join_fixture_fks_ambig_m2m()
self.assert_compile(
joincond.primaryjoin,
"m2mlft.id = m2msecondary_ambig_fks.lid1"
)
self.assert_compile(
joincond.secondaryjoin,
"m2mrgt.id = m2msecondary_ambig_fks.rid1"
)
class AdaptedJoinTest(_JoinFixtures, fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
def test_join_targets_o2m_selfref(self):
joincond = self._join_fixture_o2m_selfref()
left = select([joincond.parent_selectable]).alias('pj')
pj, sj, sec, adapter, ds = joincond.join_targets(
left,
joincond.child_selectable,
True)
self.assert_compile(
pj, "pj.id = selfref.sid"
)
right = select([joincond.child_selectable]).alias('pj')
pj, sj, sec, adapter, ds = joincond.join_targets(
joincond.parent_selectable,
right,
True)
self.assert_compile(
pj, "selfref.id = pj.sid"
)
def test_join_targets_o2m_plain(self):
joincond = self._join_fixture_o2m()
pj, sj, sec, adapter, ds = joincond.join_targets(
joincond.parent_selectable,
joincond.child_selectable,
False)
self.assert_compile(
pj, "lft.id = rgt.lid"
)
def test_join_targets_o2m_left_aliased(self):
joincond = self._join_fixture_o2m()
left = select([joincond.parent_selectable]).alias('pj')
pj, sj, sec, adapter, ds = joincond.join_targets(
left,
joincond.child_selectable,
True)
self.assert_compile(
pj, "pj.id = rgt.lid"
)
def test_join_targets_o2m_right_aliased(self):
joincond = self._join_fixture_o2m()
right = select([joincond.child_selectable]).alias('pj')
pj, sj, sec, adapter, ds = joincond.join_targets(
joincond.parent_selectable,
right,
True)
self.assert_compile(
pj, "lft.id = pj.lid"
)
def test_join_targets_o2m_composite_selfref(self):
joincond = self._join_fixture_o2m_composite_selfref()
right = select([joincond.child_selectable]).alias('pj')
pj, sj, sec, adapter, ds = joincond.join_targets(
joincond.parent_selectable,
right,
True)
self.assert_compile(
pj,
"pj.group_id = composite_selfref.group_id "
"AND composite_selfref.id = pj.parent_id"
)
def test_join_targets_m2o_composite_selfref(self):
joincond = self._join_fixture_m2o_composite_selfref()
right = select([joincond.child_selectable]).alias('pj')
pj, sj, sec, adapter, ds = joincond.join_targets(
joincond.parent_selectable,
right,
True)
self.assert_compile(
pj,
"pj.group_id = composite_selfref.group_id "
"AND pj.id = composite_selfref.parent_id"
)
class LazyClauseTest(_JoinFixtures, fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
def test_lazy_clause_o2m(self):
joincond = self._join_fixture_o2m()
lazywhere, bind_to_col, equated_columns = joincond.create_lazy_clause()
self.assert_compile(
lazywhere,
":param_1 = rgt.lid"
)
def test_lazy_clause_o2m_reverse(self):
joincond = self._join_fixture_o2m()
lazywhere, bind_to_col, equated_columns =\
joincond.create_lazy_clause(reverse_direction=True)
self.assert_compile(
lazywhere,
"lft.id = :param_1"
)
|
alex/sqlalchemy
|
test/orm/test_rel_fn.py
|
Python
|
mit
| 37,493
|
# test builtin range type
# print
print(range(4))
# len
print(len(range(0)))
print(len(range(4)))
print(len(range(1, 4)))
print(len(range(1, 4, 2)))
print(len(range(1, 4, -1)))
print(len(range(4, 1, -1)))
print(len(range(4, 1, -2)))
# subscr
print(range(4)[0])
print(range(4)[1])
print(range(4)[-1])
# slice
print(range(4)[0:])
print(range(4)[1:])
print(range(4)[1:2])
print(range(4)[1:3])
print(range(4)[1::2])
print(range(4)[1:-2:2])
|
Vogtinator/micropython
|
tests/basics/builtin_range.py
|
Python
|
mit
| 440
|
"""
Return dictionary of clausulae found in the prosody of Latin prose.
The clausulae analysis function returns a dictionary in which the key is the type of clausula and the value is the number
of times it occurs in the text. The list of clausulae used in the method is derived from the 2019 Journal of Roman Studies
paper "Auceps syllabarum: A Digital Analysis of Latin Prose Rhythm". The list of clausulae are mutually exclusive so no one
rhythm will be counted in multiple categories.
"""
from collections import namedtuple
from typing import Dict, List
__author__ = ["Tyler Kirby <tyler.kirby9398@gmail.com>"]
__license__ = "MIT License. See LICENSE"
Clausula = namedtuple("Clausula", "rhythm_name rhythm")
standard_clausulae = [
Clausula("cretic_trochee", "-u--x"),
Clausula("cretic_trochee_resolved_a", "uuu--x"),
Clausula("cretic_trochee_resolved_b", "-uuu-x"),
Clausula("cretic_trochee_resolved_c", "-u-uux"),
Clausula("double_cretic", "-u--ux"),
Clausula("molossus_cretic", "----ux"),
Clausula("double_molossus_cretic_resolved_a", "uuu--ux"),
Clausula("double_molossus_cretic_resolved_b", "-uuu-ux"),
Clausula("double_molossus_cretic_resolved_c", "-u-uux"),
Clausula("double_molossus_cretic_resolved_d", "uu---ux"),
Clausula("double_molossus_cretic_resolved_e", "-uu--ux"),
Clausula("double_molossus_cretic_resolved_f", "--uu-ux"),
Clausula("double_molossus_cretic_resolved_g", "---uuux"),
Clausula("double_molossus_cretic_resolved_h", "-u---ux"),
Clausula("double_trochee", "-u-x"),
Clausula("double_trochee_resolved_a", "uuu-x"),
Clausula("double_trochee_resolved_b", "-uuux"),
Clausula("hypodochmiac", "-u-ux"),
Clausula("hypodochmiac_resolved_a", "uuu-ux"),
Clausula("hypodochmiac_resolved_b", "-uuuux"),
Clausula("spondaic", "---x"),
Clausula("heroic", "-uu-x"),
]
class Clausulae:
def __init__(self, rhythms: List[Clausula] = standard_clausulae):
"""Initialize class."""
self.rhythms = rhythms
def clausulae_analysis(self, prosody: List) -> List[Dict[str, int]]:
"""
Return dictionary in which the key is a type of clausula and the value is its frequency.
:param prosody: the prosody of a prose text (must be in the format of the scansion produced by the scanner classes.
:return: dictionary of prosody
>>> Clausulae().clausulae_analysis(['-uuu-uuu-u--x', 'uu-uu-uu----x'])
[{'cretic_trochee': 1}, {'cretic_trochee_resolved_a': 0}, {'cretic_trochee_resolved_b': 0}, {'cretic_trochee_resolved_c': 0}, {'double_cretic': 0}, {'molossus_cretic': 0}, {'double_molossus_cretic_resolved_a': 0}, {'double_molossus_cretic_resolved_b': 0}, {'double_molossus_cretic_resolved_c': 0}, {'double_molossus_cretic_resolved_d': 0}, {'double_molossus_cretic_resolved_e': 0}, {'double_molossus_cretic_resolved_f': 0}, {'double_molossus_cretic_resolved_g': 0}, {'double_molossus_cretic_resolved_h': 0}, {'double_trochee': 0}, {'double_trochee_resolved_a': 0}, {'double_trochee_resolved_b': 0}, {'hypodochmiac': 0}, {'hypodochmiac_resolved_a': 0}, {'hypodochmiac_resolved_b': 0}, {'spondaic': 1}, {'heroic': 0}]
"""
prosody = " ".join(prosody)
return [{r.rhythm_name: prosody.count(r.rhythm)} for r in self.rhythms]
|
kylepjohnson/cltk
|
src/cltk/prosody/lat/clausulae_analysis.py
|
Python
|
mit
| 3,300
|
# coding: utf-8
# Abstract classes.
if False: # MYPY
from typing import Any, Dict, Optional, List # NOQA
def CommentCheck():
# type: () -> None
pass
class Event(object):
__slots__ = 'start_mark', 'end_mark', 'comment'
def __init__(self, start_mark=None, end_mark=None, comment=CommentCheck):
# type: (Any, Any, Any) -> None
self.start_mark = start_mark
self.end_mark = end_mark
# assert comment is not CommentCheck
if comment is CommentCheck:
comment = None
self.comment = comment
def __repr__(self):
# type: () -> Any
attributes = [
key
for key in ['anchor', 'tag', 'implicit', 'value', 'flow_style', 'style']
if hasattr(self, key)
]
arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) for key in attributes])
if self.comment not in [None, CommentCheck]:
arguments += ', comment={!r}'.format(self.comment)
return '%s(%s)' % (self.__class__.__name__, arguments)
class NodeEvent(Event):
__slots__ = ('anchor',)
def __init__(self, anchor, start_mark=None, end_mark=None, comment=None):
# type: (Any, Any, Any, Any) -> None
Event.__init__(self, start_mark, end_mark, comment)
self.anchor = anchor
class CollectionStartEvent(NodeEvent):
__slots__ = 'tag', 'implicit', 'flow_style', 'nr_items'
def __init__(
self,
anchor,
tag,
implicit,
start_mark=None,
end_mark=None,
flow_style=None,
comment=None,
nr_items=None,
):
# type: (Any, Any, Any, Any, Any, Any, Any, Optional[int]) -> None
NodeEvent.__init__(self, anchor, start_mark, end_mark, comment)
self.tag = tag
self.implicit = implicit
self.flow_style = flow_style
self.nr_items = nr_items
class CollectionEndEvent(Event):
__slots__ = ()
# Implementations.
class StreamStartEvent(Event):
__slots__ = ('encoding',)
def __init__(self, start_mark=None, end_mark=None, encoding=None, comment=None):
# type: (Any, Any, Any, Any) -> None
Event.__init__(self, start_mark, end_mark, comment)
self.encoding = encoding
class StreamEndEvent(Event):
__slots__ = ()
class DocumentStartEvent(Event):
__slots__ = 'explicit', 'version', 'tags'
def __init__(
self,
start_mark=None,
end_mark=None,
explicit=None,
version=None,
tags=None,
comment=None,
):
# type: (Any, Any, Any, Any, Any, Any) -> None
Event.__init__(self, start_mark, end_mark, comment)
self.explicit = explicit
self.version = version
self.tags = tags
class DocumentEndEvent(Event):
__slots__ = ('explicit',)
def __init__(self, start_mark=None, end_mark=None, explicit=None, comment=None):
# type: (Any, Any, Any, Any) -> None
Event.__init__(self, start_mark, end_mark, comment)
self.explicit = explicit
class AliasEvent(NodeEvent):
__slots__ = ()
class ScalarEvent(NodeEvent):
__slots__ = 'tag', 'implicit', 'value', 'style'
def __init__(
self,
anchor,
tag,
implicit,
value,
start_mark=None,
end_mark=None,
style=None,
comment=None,
):
# type: (Any, Any, Any, Any, Any, Any, Any, Any) -> None
NodeEvent.__init__(self, anchor, start_mark, end_mark, comment)
self.tag = tag
self.implicit = implicit
self.value = value
self.style = style
class SequenceStartEvent(CollectionStartEvent):
__slots__ = ()
class SequenceEndEvent(CollectionEndEvent):
__slots__ = ()
class MappingStartEvent(CollectionStartEvent):
__slots__ = ()
class MappingEndEvent(CollectionEndEvent):
__slots__ = ()
|
explosion/srsly
|
srsly/ruamel_yaml/events.py
|
Python
|
mit
| 3,902
|
# Data sources
database(
thermoLibraries = ['KlippensteinH2O2', 'primaryThermoLibrary','DFT_QCI_thermo','CH','CHN','CHO','CHON','CN','NISTThermoLibrary','thermo_DFT_CCSDTF12_BAC','GRI-Mech3.0-N'],
reactionLibraries = [('Nitrogen_Dean_and_Bozzelli',False)],
seedMechanisms = ['ERC-FoundationFuelv0.9'],
kineticsDepositories = ['training'],
kineticsFamilies = 'default',
kineticsEstimator = 'rate rules',
)
# Constraints on generated species
generatedSpeciesConstraints(
allowed = ['seed mechanisms', 'reaction libraries'],
#maximumCarbonAtoms = 7,
#maximumOxygenAtoms = 5,
maximumNitrogenAtoms = 2,
#maximumSiliconAtoms = 0,
#maximumSulfurAtoms = 0,
#maximumHeavyAtoms = 3,
maximumRadicalElectrons = 2,
allowSingletO2 = False,
)
# List of species
species(
label='CH3NO2',
reactive=True,
structure=adjacencyList(
"""
1 C u0 p0 {2,S} {3,S} {4,S} {5,S}
2 H u0 p0 {1,S}
3 H u0 p0 {1,S}
4 H u0 p0 {1,S}
5 N u0 p0 c+1 {1,S} {6,D} {7,S}
6 O u0 p2 {5,D}
7 O u0 p3 c-1 {5,S}
"""),
)
species(
label='O2',
reactive=True,
structure=adjacencyList(
"""
1 O u1 p2 {2,S}
2 O u1 p2 {1,S}
"""),
)
species(
label='N2',
reactive=True,
structure=adjacencyList(
"""
1 N u0 p1 {2,T}
2 N u0 p1 {1,T}
"""),
)
# Reaction systems
simpleReactor(
temperature=(1500,'K'),
pressure=(10.0,'bar'),
initialMoleFractions={
"CH3NO2": 0.1,
"O2": 0.21,
"N2": 0.69,
},
terminationConversion={
'CH3NO2': 0.1,
},
)
model(
toleranceKeepInEdge=1e-5,
toleranceMoveToCore=0.1,
toleranceInterruptSimulation=0.1,
maximumEdgeSpecies=10000
)
options(
units='si',
saveRestartPeriod=None,
generateOutputHTML=False,
generatePlots=False,
)
|
pierrelb/RMG-Py
|
examples/rmg/ch3no2/input.py
|
Python
|
mit
| 1,947
|
#!/bin/python
import os, subprocess
import logging
from autotest.client import test
from autotest.client.shared import error
class urlview(test.test):
"""
Autotest module for testing basic functionality
of urlview
@author Kingsuk Deb <kingsdeb@linux.vnet.ibm.com> ##
"""
version = 1
nfail = 0
path = ''
def initialize(self):
"""
Sets the overall failure counter for the test.
"""
self.nfail = 0
logging.info('\n Test initialize successfully')
def run_once(self, test_path=''):
"""
Trigger test run
"""
try:
os.environ["LTPBIN"] = "%s/shared" %(test_path)
ret_val = subprocess.call(test_path + '/urlview' + '/urlview.sh', shell=True)
if ret_val != 0:
self.nfail += 1
except error.CmdError, e:
self.nfail += 1
logging.error("Test Failed: %s", e)
def postprocess(self):
if self.nfail != 0:
logging.info('\n nfails is non-zero')
raise error.TestError('\nTest failed')
else:
logging.info('\n Test completed successfully ')
|
PoornimaNayak/autotest-client-tests
|
linux-tools/urlview/urlview.py
|
Python
|
gpl-2.0
| 1,199
|
'''
Copyright (C) 2014 Travis DeWolf
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from Arms.one_link.arm import Arm1Link as Arm1
from Arms.three_link.arm import Arm3Link as Arm3
import Controllers.dmp as dmp
import Controllers.osc as osc
import Controllers.trace as trace
import Controllers.trajectory as trajectory_class
import Tasks.Write.number_array as na
import numpy as np
# TODO: subclass trajectory tracing tasks
def Task(arm_class, control_type,
sequence=[1,2,3], writebox=np.array([-.35,.35,.25,.4])):
"""
This task sets up the arm to write numbers inside
a specified area (-x_bias, x_bias, -y_bias, y_bias).
"""
if not issubclass(control_type, trajectory_class.Shell):
raise Exception('System must use trajectory control'\
'(dmp | trace) for writing tasks.')
if issubclass(arm_class, Arm1):
raise Exception('System must can not use 1 link arm '\
'for writing tasks')
if issubclass(arm_class, Arm3):
writebox=np.array([-2., 2., 1., 2.])
trajectory = na.get_sequence(sequence, writebox)
control_pars = {'gain':1000, # pd gain for trajectory following
'pen_down':False,
'trajectory':trajectory.T}
if issubclass(control_type, dmp.Shell):
# number of goals is the number of (NANs - 1) * number of DMPs
num_goals = (np.sum(trajectory[:,0] != trajectory[:,0]) - 1) * 2
# respecify goals for spatial scaling by changing add_to_goals
control_pars.update({'add_to_goals':[0]*num_goals,
'bfs':1000, # how many basis function per DMP
'tau':.01}) # tau is the time scaling term
else:
# trajectory based control
control_pars.update({'tau':.0001}) # how fast the trajectory rolls out
runner_pars = {'control_type':'write_numbers',
'infinite_trail':True,
'title':'Task: Writing numbers',
'trajectory':trajectory}
if issubclass(arm_class, Arm3):
control_pars.update({'threshold':.1})
runner_pars.update({'box':[-5,5,-5,5]})
kp = 50 # position error gain on the PD controller
controller = osc.Control(kp=kp, kv=np.sqrt(kp))
control_shell = control_type(controller=controller, **control_pars)
return (control_shell, runner_pars)
|
msunardi/blog
|
Control/Tasks/write_numbers.py
|
Python
|
gpl-3.0
| 2,983
|
# -*- coding: utf-8 -*-
from __future__ import (unicode_literals, division, absolute_import, print_function)
__license__ = 'GPL 3'
__copyright__ = '2011, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
import re, string
from operator import attrgetter
from PyQt5.Qt import (Qt, QAbstractItemModel, QPixmap, QModelIndex, QSize,
pyqtSignal)
from calibre.gui2 import FunctionDispatcher
from calibre.gui2.store.search_result import SearchResult
from calibre.gui2.store.search.download_thread import DetailsThreadPool, \
CoverThreadPool
from calibre.utils.icu import sort_key
from calibre.utils.search_query_parser import SearchQueryParser
def comparable_price(text):
# this keep thousand and fraction separators
match = re.search(r'(?:\d|[,.](?=\d))(?:\d*(?:[,.\' ](?=\d))?)+', text)
if match:
# replace all separators with '.'
m = re.sub(r'[.,\' ]', '.', match.group())
# remove all separators accept fraction,
# leave only 2 digits in fraction
m = re.sub(r'\.(?!\d*$)', r'', m)
text = '{0:0>8.0f}'.format(float(m) * 100.)
return text
class Matches(QAbstractItemModel):
total_changed = pyqtSignal(int)
HEADERS = [_('Cover'), _('Title'), _('Price'), _('DRM'), _('Store'), _('Download'), _('Affiliate')]
HTML_COLS = (1, 4)
def __init__(self, cover_thread_count=2, detail_thread_count=4):
QAbstractItemModel.__init__(self)
self.DRM_LOCKED_ICON = QPixmap(I('drm-locked.png')).scaledToHeight(64,
Qt.SmoothTransformation)
self.DRM_UNLOCKED_ICON = QPixmap(I('drm-unlocked.png')).scaledToHeight(64,
Qt.SmoothTransformation)
self.DRM_UNKNOWN_ICON = QPixmap(I('dialog_question.png')).scaledToHeight(64,
Qt.SmoothTransformation)
self.DONATE_ICON = QPixmap(I('donate.png')).scaledToHeight(16,
Qt.SmoothTransformation)
self.DOWNLOAD_ICON = QPixmap(I('arrow-down.png')).scaledToHeight(16,
Qt.SmoothTransformation)
# All matches. Used to determine the order to display
# self.matches because the SearchFilter returns
# matches unordered.
self.all_matches = []
# Only the showing matches.
self.matches = []
self.query = ''
self.filterable_query = False
self.search_filter = SearchFilter()
self.cover_pool = CoverThreadPool(cover_thread_count)
self.details_pool = DetailsThreadPool(detail_thread_count)
self.filter_results_dispatcher = FunctionDispatcher(self.filter_results)
self.got_result_details_dispatcher = FunctionDispatcher(self.got_result_details)
self.sort_col = 2
self.sort_order = Qt.AscendingOrder
def closing(self):
self.cover_pool.abort()
self.details_pool.abort()
def clear_results(self):
self.all_matches = []
self.matches = []
self.all_matches = []
self.search_filter.clear_search_results()
self.query = ''
self.filterable_query = False
self.cover_pool.abort()
self.details_pool.abort()
self.total_changed.emit(self.rowCount())
self.beginResetModel(), self.endResetModel()
def add_result(self, result, store_plugin):
if result not in self.all_matches:
self.layoutAboutToBeChanged.emit()
self.all_matches.append(result)
self.search_filter.add_search_result(result)
if result.cover_url:
result.cover_queued = True
self.cover_pool.add_task(result, self.filter_results_dispatcher)
else:
result.cover_queued = False
self.details_pool.add_task(result, store_plugin, self.got_result_details_dispatcher)
self.filter_results()
self.layoutChanged.emit()
def get_result(self, index):
row = index.row()
if row < len(self.matches):
return self.matches[row]
else:
return None
def has_results(self):
return len(self.matches) > 0
def filter_results(self):
self.layoutAboutToBeChanged.emit()
# Only use the search filter's filtered results when there is a query
# and it is a filterable query. This allows for the stores best guess
# matches to come though.
if self.query and self.filterable_query:
self.matches = list(self.search_filter.parse(self.query))
else:
self.matches = list(self.search_filter.universal_set())
self.total_changed.emit(self.rowCount())
self.sort(self.sort_col, self.sort_order, False)
self.layoutChanged.emit()
def got_result_details(self, result):
if not result.cover_queued and result.cover_url:
result.cover_queued = True
self.cover_pool.add_task(result, self.filter_results_dispatcher)
if result in self.matches:
row = self.matches.index(result)
self.dataChanged.emit(self.index(row, 0), self.index(row, self.columnCount() - 1))
if result.drm not in (SearchResult.DRM_LOCKED, SearchResult.DRM_UNLOCKED, SearchResult.DRM_UNKNOWN):
result.drm = SearchResult.DRM_UNKNOWN
self.filter_results()
def set_query(self, query):
self.query = query
self.filterable_query = self.is_filterable_query(query)
def is_filterable_query(self, query):
# Remove control modifiers.
query = query.replace('\\', '')
query = query.replace('!', '')
query = query.replace('=', '')
query = query.replace('~', '')
query = query.replace('>', '')
query = query.replace('<', '')
# Store the query at this point for comparision later
mod_query = query
# Remove filter identifiers
# Remove the prefix.
for loc in ('all', 'author', 'author2', 'authors', 'title', 'title2'):
query = re.sub(r'%s:"(?P<a>[^\s"]+)"' % loc, '\g<a>', query)
query = query.replace('%s:' % loc, '')
# Remove the prefix and search text.
for loc in ('cover', 'download', 'downloads', 'drm', 'format', 'formats', 'price', 'store'):
query = re.sub(r'%s:"[^"]"' % loc, '', query)
query = re.sub(r'%s:[^\s]*' % loc, '', query)
# Remove whitespace
query = re.sub('\s', '', query)
mod_query = re.sub('\s', '', mod_query)
# If mod_query and query are the same then there were no filter modifiers
# so this isn't a filterable query.
if mod_query == query:
return False
return True
def index(self, row, column, parent=QModelIndex()):
return self.createIndex(row, column)
def parent(self, index):
if not index.isValid() or index.internalId() == 0:
return QModelIndex()
return self.createIndex(0, 0)
def rowCount(self, *args):
return len(self.matches)
def columnCount(self, *args):
return len(self.HEADERS)
def headerData(self, section, orientation, role):
if role != Qt.DisplayRole:
return None
text = ''
if orientation == Qt.Horizontal:
if section < len(self.HEADERS):
text = self.HEADERS[section]
return (text)
else:
return (section+1)
def data(self, index, role):
row, col = index.row(), index.column()
if row >= len(self.matches):
return None
result = self.matches[row]
if role == Qt.DisplayRole:
if col == 1:
t = result.title if result.title else _('Unknown')
a = result.author if result.author else ''
return ('<b>%s</b><br><i>%s</i>' % (t, a))
elif col == 2:
return (result.price)
elif col == 4:
return ('%s<br>%s' % (result.store_name, result.formats))
return None
elif role == Qt.DecorationRole:
if col == 0 and result.cover_data:
p = QPixmap()
p.loadFromData(result.cover_data)
return (p)
if col == 3:
if result.drm == SearchResult.DRM_LOCKED:
return (self.DRM_LOCKED_ICON)
elif result.drm == SearchResult.DRM_UNLOCKED:
return (self.DRM_UNLOCKED_ICON)
elif result.drm == SearchResult.DRM_UNKNOWN:
return (self.DRM_UNKNOWN_ICON)
if col == 5:
if result.downloads:
return (self.DOWNLOAD_ICON)
if col == 6:
if result.affiliate:
return (self.DONATE_ICON)
elif role == Qt.ToolTipRole:
if col == 1:
return ('<p>%s</p>' % result.title)
elif col == 2:
return ('<p>' + _('Detected price as: %s. Check with the store before making a purchase to verify this price is correct. This price often does not include promotions the store may be running.') % result.price + '</p>') # noqa
elif col == 3:
if result.drm == SearchResult.DRM_LOCKED:
return ('<p>' + _('This book as been detected as having DRM restrictions. This book may not work with your reader and you will have limitations placed upon you as to what you can do with this book. Check with the store before making any purchases to ensure you can actually read this book.') + '</p>') # noqa
elif result.drm == SearchResult.DRM_UNLOCKED:
return ('<p>' + _('This book has been detected as being DRM Free. You should be able to use this book on any device provided it is in a format calibre supports for conversion. However, before making a purchase double check the DRM status with the store. The store may not be disclosing the use of DRM.') + '</p>') # noqa
else:
return ('<p>' + _('The DRM status of this book could not be determined. There is a very high likelihood that this book is actually DRM restricted.') + '</p>') # noqa
elif col == 4:
return ('<p>%s</p>' % result.formats)
elif col == 5:
if result.downloads:
return ('<p>' + _('The following formats can be downloaded directly: %s.') % ', '.join(result.downloads.keys()) + '</p>')
elif col == 6:
if result.affiliate:
return ('<p>' + _('Buying from this store supports the calibre developer: %s.') % result.plugin_author + '</p>')
elif role == Qt.SizeHintRole:
return QSize(64, 64)
return None
def data_as_text(self, result, col):
text = ''
if col == 1:
text = result.title
elif col == 2:
text = comparable_price(result.price)
elif col == 3:
if result.drm == SearchResult.DRM_UNLOCKED:
text = 'a'
if result.drm == SearchResult.DRM_LOCKED:
text = 'b'
else:
text = 'c'
elif col == 4:
text = result.store_name
elif col == 5:
if result.downloads:
text = 'a'
else:
text = 'b'
elif col == 6:
if result.affiliate:
text = 'a'
else:
text = 'b'
return text
def sort(self, col, order, reset=True):
self.sort_col = col
self.sort_order = order
if not self.matches:
return
descending = order == Qt.DescendingOrder
self.all_matches.sort(None,
lambda x: sort_key(unicode(self.data_as_text(x, col))),
descending)
self.reorder_matches()
if reset:
self.beginResetModel(), self.endResetModel()
def reorder_matches(self):
def keygen(x):
try:
return self.all_matches.index(x)
except:
return 100000
self.matches = sorted(self.matches, key=keygen)
class SearchFilter(SearchQueryParser):
CONTAINS_MATCH = 0
EQUALS_MATCH = 1
REGEXP_MATCH = 2
IN_MATCH = 3
USABLE_LOCATIONS = [
'all',
'affiliate',
'author',
'author2',
'authors',
'cover',
'download',
'downloads',
'drm',
'format',
'formats',
'price',
'title',
'title2',
'store',
]
def __init__(self):
SearchQueryParser.__init__(self, locations=self.USABLE_LOCATIONS)
self.srs = set([])
# remove joiner words surrounded by space or at string boundaries
self.joiner_pat = re.compile(r'(^|\s)(and|not|or|a|the|is|of)(\s|$)', re.IGNORECASE)
self.punctuation_table = {ord(x):' ' for x in string.punctuation}
def add_search_result(self, search_result):
self.srs.add(search_result)
def clear_search_results(self):
self.srs = set([])
def universal_set(self):
return self.srs
def _match(self, query, value, matchkind):
for t in value:
try: # ignore regexp exceptions, required because search-ahead tries before typing is finished
t = icu_lower(t)
if matchkind == self.EQUALS_MATCH:
if query == t:
return True
elif matchkind == self.REGEXP_MATCH:
if re.search(query, t, re.I|re.UNICODE):
return True
elif matchkind == self.CONTAINS_MATCH:
if query in t:
return True
elif matchkind == self.IN_MATCH:
if t in query:
return True
except re.error:
pass
return False
def get_matches(self, location, query):
query = query.strip()
location = location.lower().strip()
if location == 'authors':
location = 'author'
elif location == 'downloads':
location = 'download'
elif location == 'formats':
location = 'format'
matchkind = self.CONTAINS_MATCH
if len(query) > 1:
if query.startswith('\\'):
query = query[1:]
elif query.startswith('='):
matchkind = self.EQUALS_MATCH
query = query[1:]
elif query.startswith('~'):
matchkind = self.REGEXP_MATCH
query = query[1:]
if matchkind != self.REGEXP_MATCH: # leave case in regexps because it can be significant e.g. \S \W \D
query = query.lower()
if location not in self.USABLE_LOCATIONS:
return set([])
matches = set([])
all_locs = set(self.USABLE_LOCATIONS) - set(['all'])
locations = all_locs if location == 'all' else [location]
q = {
'affiliate': attrgetter('affiliate'),
'author': lambda x: x.author.lower(),
'cover': attrgetter('cover_url'),
'drm': attrgetter('drm'),
'download': attrgetter('downloads'),
'format': attrgetter('formats'),
'price': lambda x: comparable_price(x.price),
'store': lambda x: x.store_name.lower(),
'title': lambda x: x.title.lower(),
}
for x in ('author', 'download', 'format'):
q[x+'s'] = q[x]
q['author2'] = q['author']
q['title2'] = q['title']
# make the price in query the same format as result
if location == 'price':
query = comparable_price(query)
for sr in self.srs:
for locvalue in locations:
final_query = query
accessor = q[locvalue]
if query == 'true':
# True/False.
if locvalue == 'affiliate':
if accessor(sr):
matches.add(sr)
# Special that are treated as True/False.
elif locvalue == 'drm':
if accessor(sr) == SearchResult.DRM_LOCKED:
matches.add(sr)
# Testing for something or nothing.
else:
if accessor(sr) is not None:
matches.add(sr)
continue
if query == 'false':
# True/False.
if locvalue == 'affiliate':
if not accessor(sr):
matches.add(sr)
# Special that are treated as True/False.
elif locvalue == 'drm':
if accessor(sr) == SearchResult.DRM_UNLOCKED:
matches.add(sr)
# Testing for something or nothing.
else:
if accessor(sr) is None:
matches.add(sr)
continue
# this is bool or treated as bool, so can't match below.
if locvalue in ('affiliate', 'drm', 'download', 'downloads'):
continue
try:
# Can't separate authors because comma is used for name sep and author sep
# Exact match might not get what you want. For that reason, turn author
# exactmatch searches into contains searches.
if locvalue == 'author' and matchkind == self.EQUALS_MATCH:
m = self.CONTAINS_MATCH
else:
m = matchkind
if locvalue == 'format':
vals = accessor(sr).split(',')
elif locvalue in {'author2', 'title2'}:
m = self.IN_MATCH
vals = [x for x in self.field_trimmer(accessor(sr)).split() if x]
final_query = ' '.join(self.field_trimmer(icu_lower(query)).split())
else:
vals = [accessor(sr)]
if self._match(final_query, vals, m):
matches.add(sr)
break
except ValueError: # Unicode errors
import traceback
traceback.print_exc()
return matches
def field_trimmer(self, field):
''' Remove common joiner words and punctuation to improve matching,
punctuation is removed first, so that a.and.b becomes a b '''
return self.joiner_pat.sub(' ', field.translate(self.punctuation_table))
|
drxaero/calibre
|
src/calibre/gui2/store/search/models.py
|
Python
|
gpl-3.0
| 18,913
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2016, René Moser <mail@renemoser.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: cs_nic
short_description: Manages NICs and secondary IPs of an instance on Apache CloudStack based clouds.
description:
- Add and remove secondary IPs to and from a NIC.
version_added: "2.3"
author: "René Moser (@resmo)"
options:
vm:
description:
- Name of instance.
required: true
aliases: ['name']
network:
description:
- Name of the network.
- Required to find the NIC if instance has multiple networks assigned.
required: false
default: null
vm_guest_ip:
description:
- Secondary IP address to be added to the instance nic.
- If not set, the API always returns a new IP address and idempotency is not given.
required: false
default: null
aliases: ['secondary_ip']
vpc:
description:
- Name of the VPC the C(vm) is related to.
required: false
default: null
domain:
description:
- Domain the instance is related to.
required: false
default: null
account:
description:
- Account the instance is related to.
required: false
default: null
project:
description:
- Name of the project the instance is deployed in.
required: false
default: null
zone:
description:
- Name of the zone in which the instance is deployed in.
- If not set, default zone is used.
required: false
default: null
state:
description:
- State of the ipaddress.
required: false
default: "present"
choices: [ 'present', 'absent' ]
poll_async:
description:
- Poll async jobs until job has finished.
required: false
default: true
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# Assign a specific IP to the default NIC of the VM
- local_action:
module: cs_nic
vm: customer_xy
vm_guest_ip: 10.10.10.10
# Assign an IP to the default NIC of the VM
# Note: If vm_guest_ip is not set, you will get a new IP address on every run.
- local_action:
module: cs_nic
vm: customer_xy
# Remove a specific IP from the default NIC
- local_action:
module: cs_nic
vm: customer_xy
vm_guest_ip: 10.10.10.10
state: absent
'''
RETURN = '''
---
id:
description: UUID of the nic.
returned: success
type: string
sample: 87b1e0ce-4e01-11e4-bb66-0050569e64b8
vm:
description: Name of the VM.
returned: success
type: string
sample: web-01
ip_address:
description: Primary IP of the NIC.
returned: success
type: string
sample: 10.10.10.10
netmask:
description: Netmask of the NIC.
returned: success
type: string
sample: 255.255.255.0
mac_address:
description: MAC address of the NIC.
returned: success
type: string
sample: 02:00:33:31:00:e4
vm_guest_ip:
description: Secondary IP of the NIC.
returned: success
type: string
sample: 10.10.10.10
network:
description: Name of the network if not default.
returned: success
type: string
sample: sync network
domain:
description: Domain the VM is related to.
returned: success
type: string
sample: example domain
account:
description: Account the VM is related to.
returned: success
type: string
sample: example account
project:
description: Name of project the VM is related to.
returned: success
type: string
sample: Production
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import *
class AnsibleCloudStackNic(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackNic, self).__init__(module)
self.vm_guest_ip = self.module.params.get('vm_guest_ip')
self.nic = None
self.returns = {
'ipaddress': 'ip_address',
'macaddress': 'mac_address',
'netmask': 'netmask',
}
def get_nic(self):
if self.nic:
return self.nic
args = {
'virtualmachineid': self.get_vm(key='id'),
'networkdid': self.get_network(key='id'),
}
nics = self.cs.listNics(**args)
if nics:
self.nic = nics['nic'][0]
return self.nic
self.module.fail_json(msg="NIC for VM %s in network %s not found" %(self.get_vm(key='name'), self.get_network(key='name')))
def get_secondary_ip(self):
nic = self.get_nic()
if self.vm_guest_ip:
secondary_ips = nic.get('secondaryip') or []
for secondary_ip in secondary_ips:
if secondary_ip['ipaddress'] == self.vm_guest_ip:
return secondary_ip
return None
def present_nic(self):
nic = self.get_nic()
if not self.get_secondary_ip():
self.result['changed'] = True
args = {
'nicid': nic['id'],
'ipaddress': self.vm_guest_ip,
}
if not self.module.check_mode:
res = self.cs.addIpToNic(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
poll_async = self.module.params.get('poll_async')
if poll_async:
nic = self.poll_job(res, 'nicsecondaryip')
# Save result for RETURNS
self.vm_guest_ip = nic['ipaddress']
return nic
def absent_nic(self):
nic = self.get_nic()
secondary_ip = self.get_secondary_ip()
if secondary_ip:
self.result['changed'] = True
if not self.module.check_mode:
res = self.cs.removeIpFromNic(id=secondary_ip['id'])
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % nic['errortext'])
poll_async = self.module.params.get('poll_async')
if poll_async:
self.poll_job(res, 'nicsecondaryip')
return nic
def get_result(self, nic):
super(AnsibleCloudStackNic, self).get_result(nic)
if nic and not self.module.params.get('network'):
self.module.params['network'] = nic.get('networkid')
self.result['network'] = self.get_network(key='name')
self.result['vm'] = self.get_vm(key='name')
self.result['vm_guest_ip'] = self.vm_guest_ip
self.result['domain'] = self.get_domain(key='path')
self.result['account'] = self.get_account(key='name')
self.result['project'] = self.get_project(key='name')
return self.result
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
vm=dict(required=True, aliases=['name']),
vm_guest_ip=dict(default=None, aliases=['secondary_ip']),
network=dict(default=None),
vpc=dict(default=None),
state=dict(choices=['present', 'absent'], default='present'),
domain=dict(default=None),
account=dict(default=None),
project=dict(default=None),
zone=dict(default=None),
poll_async=dict(type='bool', default=True),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True,
required_if=([
('state', 'absent', ['vm_guest_ip'])
])
)
try:
acs_nic = AnsibleCloudStackNic(module)
state = module.params.get('state')
if state == 'absent':
nic = acs_nic.absent_nic()
else:
nic = acs_nic.present_nic()
result = acs_nic.get_result(nic)
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)
if __name__ == '__main__':
main()
|
t0mk/ansible
|
lib/ansible/modules/cloud/cloudstack/cs_nic.py
|
Python
|
gpl-3.0
| 8,594
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016-2017, Yanis Guenane <yanis+ansible@guenane.org>
# Copyright: (c) 2017, Markus Teufelberger <mteufelberger+ansible@mgit.at>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: openssl_privatekey_info
version_added: '2.8'
short_description: Provide information for OpenSSL private keys
description:
- This module allows one to query information on OpenSSL private keys.
- In case the key consistency checks fail, the module will fail as this indicates a faked
private key. In this case, all return variables are still returned. Note that key consistency
checks are not available all key types; if none is available, C(none) is returned for
C(key_is_consistent).
- It uses the pyOpenSSL or cryptography python library to interact with OpenSSL. If both the
cryptography and PyOpenSSL libraries are available (and meet the minimum version requirements)
cryptography will be preferred as a backend over PyOpenSSL (unless the backend is forced with
C(select_crypto_backend))
requirements:
- PyOpenSSL >= 0.15 or cryptography >= 1.2.3
author:
- Felix Fontein (@felixfontein)
- Yanis Guenane (@Spredzy)
options:
path:
description:
- Remote absolute path where the private key file is loaded from.
type: path
required: true
passphrase:
description:
- The passphrase for the private key.
type: str
return_private_key_data:
description:
- Whether to return private key data.
- Only set this to C(yes) when you want private information about this key to
leave the remote machine.
- "WARNING: you have to make sure that private key data isn't accidentally logged!"
type: bool
default: no
select_crypto_backend:
description:
- Determines which crypto backend to use.
- The default choice is C(auto), which tries to use C(cryptography) if available, and falls back to C(pyopenssl).
- If set to C(pyopenssl), will try to use the L(pyOpenSSL,https://pypi.org/project/pyOpenSSL/) library.
- If set to C(cryptography), will try to use the L(cryptography,https://cryptography.io/) library.
type: str
default: auto
choices: [ auto, cryptography, pyopenssl ]
seealso:
- module: openssl_privatekey
'''
EXAMPLES = r'''
- name: Generate an OpenSSL private key with the default values (4096 bits, RSA)
openssl_privatekey:
path: /etc/ssl/private/ansible.com.pem
- name: Get information on generated key
openssl_privatekey_info:
path: /etc/ssl/private/ansible.com.pem
register: result
- name: Dump information
debug:
var: result
'''
RETURN = r'''
can_load_key:
description: Whether the module was able to load the private key from disk
returned: always
type: bool
can_parse_key:
description: Whether the module was able to parse the private key
returned: always
type: bool
key_is_consistent:
description:
- Whether the key is consistent. Can also return C(none) next to C(yes) and
C(no), to indicate that consistency couldn't be checked.
- In case the check returns C(no), the module will fail.
returned: always
type: bool
public_key:
description: Private key's public key in PEM format
returned: success
type: str
sample: "-----BEGIN PUBLIC KEY-----\nMIICIjANBgkqhkiG9w0BAQEFAAOCAg8A..."
public_key_fingerprints:
description:
- Fingerprints of private key's public key.
- For every hash algorithm available, the fingerprint is computed.
returned: success
type: dict
sample: "{'sha256': 'd4:b3:aa:6d:c8:04:ce:4e:ba:f6:29:4d:92:a3:94:b0:c2:ff:bd:bf:33:63:11:43:34:0f:51:b0:95:09:2f:63',
'sha512': 'f7:07:4a:f0:b0:f0:e6:8b:95:5f:f9:e6:61:0a:32:68:f1..."
type:
description:
- The key's type.
- One of C(RSA), C(DSA), C(ECC), C(Ed25519), C(X25519), C(Ed448), or C(X448).
- Will start with C(unknown) if the key type cannot be determined.
returned: success
type: str
sample: RSA
public_data:
description:
- Public key data. Depends on key type.
returned: success
type: dict
private_data:
description:
- Private key data. Depends on key type.
returned: success and when I(return_private_key_data) is set to C(yes)
type: dict
'''
import abc
import os
import traceback
from distutils.version import LooseVersion
from ansible.module_utils import crypto as crypto_utils
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native, to_bytes
MINIMAL_CRYPTOGRAPHY_VERSION = '1.2.3'
MINIMAL_PYOPENSSL_VERSION = '0.15'
PYOPENSSL_IMP_ERR = None
try:
import OpenSSL
from OpenSSL import crypto
PYOPENSSL_VERSION = LooseVersion(OpenSSL.__version__)
except ImportError:
PYOPENSSL_IMP_ERR = traceback.format_exc()
PYOPENSSL_FOUND = False
else:
PYOPENSSL_FOUND = True
CRYPTOGRAPHY_IMP_ERR = None
try:
import cryptography
from cryptography.hazmat.primitives import serialization
CRYPTOGRAPHY_VERSION = LooseVersion(cryptography.__version__)
try:
import cryptography.hazmat.primitives.asymmetric.x25519
CRYPTOGRAPHY_HAS_X25519 = True
except ImportError:
CRYPTOGRAPHY_HAS_X25519 = False
try:
import cryptography.hazmat.primitives.asymmetric.x448
CRYPTOGRAPHY_HAS_X448 = True
except ImportError:
CRYPTOGRAPHY_HAS_X448 = False
try:
import cryptography.hazmat.primitives.asymmetric.ed25519
CRYPTOGRAPHY_HAS_ED25519 = True
except ImportError:
CRYPTOGRAPHY_HAS_ED25519 = False
try:
import cryptography.hazmat.primitives.asymmetric.ed448
CRYPTOGRAPHY_HAS_ED448 = True
except ImportError:
CRYPTOGRAPHY_HAS_ED448 = False
except ImportError:
CRYPTOGRAPHY_IMP_ERR = traceback.format_exc()
CRYPTOGRAPHY_FOUND = False
else:
CRYPTOGRAPHY_FOUND = True
SIGNATURE_TEST_DATA = b'1234'
def _get_cryptography_key_info(key):
key_public_data = dict()
key_private_data = dict()
if isinstance(key, cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey):
key_type = 'RSA'
key_public_data['size'] = key.key_size
key_public_data['modulus'] = key.public_key().public_numbers().n
key_public_data['exponent'] = key.public_key().public_numbers().e
key_private_data['p'] = key.private_numbers().p
key_private_data['q'] = key.private_numbers().q
key_private_data['exponent'] = key.private_numbers().d
elif isinstance(key, cryptography.hazmat.primitives.asymmetric.dsa.DSAPrivateKey):
key_type = 'DSA'
key_public_data['size'] = key.key_size
key_public_data['p'] = key.parameters().parameter_numbers().p
key_public_data['q'] = key.parameters().parameter_numbers().q
key_public_data['g'] = key.parameters().parameter_numbers().g
key_public_data['y'] = key.public_key().public_numbers().y
key_private_data['x'] = key.private_numbers().x
elif CRYPTOGRAPHY_HAS_X25519 and isinstance(key, cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey):
key_type = 'X25519'
elif CRYPTOGRAPHY_HAS_X448 and isinstance(key, cryptography.hazmat.primitives.asymmetric.x448.X448PrivateKey):
key_type = 'X448'
elif CRYPTOGRAPHY_HAS_ED25519 and isinstance(key, cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey):
key_type = 'Ed25519'
elif CRYPTOGRAPHY_HAS_ED448 and isinstance(key, cryptography.hazmat.primitives.asymmetric.ed448.Ed448PrivateKey):
key_type = 'Ed448'
elif isinstance(key, cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePrivateKey):
key_type = 'ECC'
key_public_data['curve'] = key.public_key().curve.name
key_public_data['x'] = key.public_key().public_numbers().x
key_public_data['y'] = key.public_key().public_numbers().y
key_public_data['exponent_size'] = key.public_key().curve.key_size
key_private_data['multiplier'] = key.private_numbers().private_value
else:
key_type = 'unknown ({0})'.format(type(key))
return key_type, key_public_data, key_private_data
def _check_dsa_consistency(key_public_data, key_private_data):
# Get parameters
p = key_public_data.get('p')
q = key_public_data.get('q')
g = key_public_data.get('g')
y = key_public_data.get('y')
x = key_private_data.get('x')
for v in (p, q, g, y, x):
if v is None:
return None
# Make sure that g is not 0, 1 or -1 in Z/pZ
if g < 2 or g >= p - 1:
return False
# Make sure that x is in range
if x < 1 or x >= q:
return False
# Check whether q divides p-1
if (p - 1) % q != 0:
return False
# Check that g**q mod p == 1
if crypto_utils.binary_exp_mod(g, q, p) != 1:
return False
# Check whether g**x mod p == y
if crypto_utils.binary_exp_mod(g, x, p) != y:
return False
# Check (quickly) whether p or q are not primes
if crypto_utils.quick_is_not_prime(q) or crypto_utils.quick_is_not_prime(p):
return False
return True
def _is_cryptography_key_consistent(key, key_public_data, key_private_data):
if isinstance(key, cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey):
return bool(key._backend._lib.RSA_check_key(key._rsa_cdata))
if isinstance(key, cryptography.hazmat.primitives.asymmetric.dsa.DSAPrivateKey):
result = _check_dsa_consistency(key_public_data, key_private_data)
if result is not None:
return result
try:
signature = key.sign(SIGNATURE_TEST_DATA, cryptography.hazmat.primitives.hashes.SHA256())
except AttributeError:
# sign() was added in cryptography 1.5, but we support older versions
return None
try:
key.public_key().verify(
signature,
SIGNATURE_TEST_DATA,
cryptography.hazmat.primitives.hashes.SHA256()
)
return True
except cryptography.exceptions.InvalidSignature:
return False
if isinstance(key, cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePrivateKey):
try:
signature = key.sign(
SIGNATURE_TEST_DATA,
cryptography.hazmat.primitives.asymmetric.ec.ECDSA(cryptography.hazmat.primitives.hashes.SHA256())
)
except AttributeError:
# sign() was added in cryptography 1.5, but we support older versions
return None
try:
key.public_key().verify(
signature,
SIGNATURE_TEST_DATA,
cryptography.hazmat.primitives.asymmetric.ec.ECDSA(cryptography.hazmat.primitives.hashes.SHA256())
)
return True
except cryptography.exceptions.InvalidSignature:
return False
has_simple_sign_function = False
if CRYPTOGRAPHY_HAS_ED25519 and isinstance(key, cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey):
has_simple_sign_function = True
if CRYPTOGRAPHY_HAS_ED448 and isinstance(key, cryptography.hazmat.primitives.asymmetric.ed448.Ed448PrivateKey):
has_simple_sign_function = True
if has_simple_sign_function:
signature = key.sign(SIGNATURE_TEST_DATA)
try:
key.public_key().verify(signature, SIGNATURE_TEST_DATA)
return True
except cryptography.exceptions.InvalidSignature:
return False
# For X25519 and X448, there's no test yet.
return None
class PrivateKeyInfo(crypto_utils.OpenSSLObject):
def __init__(self, module, backend):
super(PrivateKeyInfo, self).__init__(
module.params['path'],
'present',
False,
module.check_mode,
)
self.backend = backend
self.module = module
self.passphrase = module.params['passphrase']
self.return_private_key_data = module.params['return_private_key_data']
def generate(self):
# Empty method because crypto_utils.OpenSSLObject wants this
pass
def dump(self):
# Empty method because crypto_utils.OpenSSLObject wants this
pass
@abc.abstractmethod
def _get_public_key(self, binary):
pass
@abc.abstractmethod
def _get_key_info(self):
pass
@abc.abstractmethod
def _is_key_consistent(self, key_public_data, key_private_data):
pass
def get_info(self):
result = dict(
can_load_key=False,
can_parse_key=False,
key_is_consistent=None,
)
try:
with open(self.path, 'rb') as b_priv_key_fh:
priv_key_detail = b_priv_key_fh.read()
result['can_load_key'] = True
except (IOError, OSError) as exc:
self.module.fail_json(msg=to_native(exc), **result)
try:
self.key = crypto_utils.load_privatekey(
path=None,
content=priv_key_detail,
passphrase=to_bytes(self.passphrase) if self.passphrase is not None else self.passphrase,
backend=self.backend
)
result['can_parse_key'] = True
except crypto_utils.OpenSSLObjectError as exc:
self.module.fail_json(msg=to_native(exc), **result)
result['public_key'] = self._get_public_key(binary=False)
pk = self._get_public_key(binary=True)
result['public_key_fingerprints'] = crypto_utils.get_fingerprint_of_bytes(pk) if pk is not None else dict()
key_type, key_public_data, key_private_data = self._get_key_info()
result['type'] = key_type
result['public_data'] = key_public_data
if self.return_private_key_data:
result['private_data'] = key_private_data
result['key_is_consistent'] = self._is_key_consistent(key_public_data, key_private_data)
if result['key_is_consistent'] is False:
# Only fail when it is False, to avoid to fail on None (which means "we don't know")
result['key_is_consistent'] = False
self.module.fail_json(
msg="Private key is not consistent! (See "
"https://blog.hboeck.de/archives/888-How-I-tricked-Symantec-with-a-Fake-Private-Key.html)",
**result
)
return result
class PrivateKeyInfoCryptography(PrivateKeyInfo):
"""Validate the supplied private key, using the cryptography backend"""
def __init__(self, module):
super(PrivateKeyInfoCryptography, self).__init__(module, 'cryptography')
def _get_public_key(self, binary):
return self.key.public_key().public_bytes(
serialization.Encoding.DER if binary else serialization.Encoding.PEM,
serialization.PublicFormat.SubjectPublicKeyInfo
)
def _get_key_info(self):
return _get_cryptography_key_info(self.key)
def _is_key_consistent(self, key_public_data, key_private_data):
return _is_cryptography_key_consistent(self.key, key_public_data, key_private_data)
class PrivateKeyInfoPyOpenSSL(PrivateKeyInfo):
"""validate the supplied private key."""
def __init__(self, module):
super(PrivateKeyInfoPyOpenSSL, self).__init__(module, 'pyopenssl')
def _get_public_key(self, binary):
try:
return crypto.dump_publickey(
crypto.FILETYPE_ASN1 if binary else crypto.FILETYPE_PEM,
self.key
)
except AttributeError:
try:
# pyOpenSSL < 16.0:
bio = crypto._new_mem_buf()
if binary:
rc = crypto._lib.i2d_PUBKEY_bio(bio, self.key._pkey)
else:
rc = crypto._lib.PEM_write_bio_PUBKEY(bio, self.key._pkey)
if rc != 1:
crypto._raise_current_error()
return crypto._bio_to_string(bio)
except AttributeError:
self.module.warn('Your pyOpenSSL version does not support dumping public keys. '
'Please upgrade to version 16.0 or newer, or use the cryptography backend.')
def bigint_to_int(self, bn):
'''Convert OpenSSL BIGINT to Python integer'''
if bn == OpenSSL._util.ffi.NULL:
return None
try:
hex = OpenSSL._util.lib.BN_bn2hex(bn)
return int(OpenSSL._util.ffi.string(hex), 16)
finally:
OpenSSL._util.lib.OPENSSL_free(hex)
def _get_key_info(self):
key_public_data = dict()
key_private_data = dict()
openssl_key_type = self.key.type()
try_fallback = True
if crypto.TYPE_RSA == openssl_key_type:
key_type = 'RSA'
key_public_data['size'] = self.key.bits()
try:
# Use OpenSSL directly to extract key data
key = OpenSSL._util.lib.EVP_PKEY_get1_RSA(self.key._pkey)
key = OpenSSL._util.ffi.gc(key, OpenSSL._util.lib.RSA_free)
# OpenSSL 1.1 and newer have functions to extract the parameters
# from the EVP PKEY data structures. Older versions didn't have
# these getters, and it was common use to simply access the values
# directly. Since there's no guarantee that these data structures
# will still be accessible in the future, we use the getters for
# 1.1 and later, and directly access the values for 1.0.x and
# earlier.
if OpenSSL.SSL.OPENSSL_VERSION_NUMBER >= 0x10100000:
# Get modulus and exponents
n = OpenSSL._util.ffi.new("BIGNUM **")
e = OpenSSL._util.ffi.new("BIGNUM **")
d = OpenSSL._util.ffi.new("BIGNUM **")
OpenSSL._util.lib.RSA_get0_key(key, n, e, d)
key_public_data['modulus'] = self.bigint_to_int(n[0])
key_public_data['exponent'] = self.bigint_to_int(e[0])
key_private_data['exponent'] = self.bigint_to_int(d[0])
# Get factors
p = OpenSSL._util.ffi.new("BIGNUM **")
q = OpenSSL._util.ffi.new("BIGNUM **")
OpenSSL._util.lib.RSA_get0_factors(key, p, q)
key_private_data['p'] = self.bigint_to_int(p[0])
key_private_data['q'] = self.bigint_to_int(q[0])
else:
# Get modulus and exponents
key_public_data['modulus'] = self.bigint_to_int(key.n)
key_public_data['exponent'] = self.bigint_to_int(key.e)
key_private_data['exponent'] = self.bigint_to_int(key.d)
# Get factors
key_private_data['p'] = self.bigint_to_int(key.p)
key_private_data['q'] = self.bigint_to_int(key.q)
try_fallback = False
except AttributeError:
# Use fallback if available
pass
elif crypto.TYPE_DSA == openssl_key_type:
key_type = 'DSA'
key_public_data['size'] = self.key.bits()
try:
# Use OpenSSL directly to extract key data
key = OpenSSL._util.lib.EVP_PKEY_get1_DSA(self.key._pkey)
key = OpenSSL._util.ffi.gc(key, OpenSSL._util.lib.DSA_free)
# OpenSSL 1.1 and newer have functions to extract the parameters
# from the EVP PKEY data structures. Older versions didn't have
# these getters, and it was common use to simply access the values
# directly. Since there's no guarantee that these data structures
# will still be accessible in the future, we use the getters for
# 1.1 and later, and directly access the values for 1.0.x and
# earlier.
if OpenSSL.SSL.OPENSSL_VERSION_NUMBER >= 0x10100000:
# Get public parameters (primes and group element)
p = OpenSSL._util.ffi.new("BIGNUM **")
q = OpenSSL._util.ffi.new("BIGNUM **")
g = OpenSSL._util.ffi.new("BIGNUM **")
OpenSSL._util.lib.DSA_get0_pqg(key, p, q, g)
key_public_data['p'] = self.bigint_to_int(p[0])
key_public_data['q'] = self.bigint_to_int(q[0])
key_public_data['g'] = self.bigint_to_int(g[0])
# Get public and private key exponents
y = OpenSSL._util.ffi.new("BIGNUM **")
x = OpenSSL._util.ffi.new("BIGNUM **")
OpenSSL._util.lib.DSA_get0_key(key, y, x)
key_public_data['y'] = self.bigint_to_int(y[0])
key_private_data['x'] = self.bigint_to_int(x[0])
else:
# Get public parameters (primes and group element)
key_public_data['p'] = self.bigint_to_int(key.p)
key_public_data['q'] = self.bigint_to_int(key.q)
key_public_data['g'] = self.bigint_to_int(key.g)
# Get public and private key exponents
key_public_data['y'] = self.bigint_to_int(key.pub_key)
key_private_data['x'] = self.bigint_to_int(key.priv_key)
try_fallback = False
except AttributeError:
# Use fallback if available
pass
else:
# Return 'unknown'
key_type = 'unknown ({0})'.format(self.key.type())
# If needed and if possible, fall back to cryptography
if try_fallback and PYOPENSSL_VERSION >= LooseVersion('16.1.0') and CRYPTOGRAPHY_FOUND:
return _get_cryptography_key_info(self.key.to_cryptography_key())
return key_type, key_public_data, key_private_data
def _is_key_consistent(self, key_public_data, key_private_data):
openssl_key_type = self.key.type()
if crypto.TYPE_RSA == openssl_key_type:
try:
return self.key.check()
except crypto.Error:
# OpenSSL error means that key is not consistent
return False
if crypto.TYPE_DSA == openssl_key_type:
result = _check_dsa_consistency(key_public_data, key_private_data)
if result is not None:
return result
signature = crypto.sign(self.key, SIGNATURE_TEST_DATA, 'sha256')
# Verify wants a cert (where it can get the public key from)
cert = crypto.X509()
cert.set_pubkey(self.key)
try:
crypto.verify(cert, signature, SIGNATURE_TEST_DATA, 'sha256')
return True
except crypto.Error:
return False
# If needed and if possible, fall back to cryptography
if PYOPENSSL_VERSION >= LooseVersion('16.1.0') and CRYPTOGRAPHY_FOUND:
return _is_cryptography_key_consistent(self.key.to_cryptography_key(), key_public_data, key_private_data)
return None
def main():
module = AnsibleModule(
argument_spec=dict(
path=dict(type='path', required=True),
passphrase=dict(type='str', no_log=True),
return_private_key_data=dict(type='bool', default=False),
select_crypto_backend=dict(type='str', default='auto', choices=['auto', 'cryptography', 'pyopenssl']),
),
supports_check_mode=True,
)
try:
base_dir = os.path.dirname(module.params['path']) or '.'
if not os.path.isdir(base_dir):
module.fail_json(
name=base_dir,
msg='The directory %s does not exist or the file is not a directory' % base_dir
)
backend = module.params['select_crypto_backend']
if backend == 'auto':
# Detect what backend we can use
can_use_cryptography = CRYPTOGRAPHY_FOUND and CRYPTOGRAPHY_VERSION >= LooseVersion(MINIMAL_CRYPTOGRAPHY_VERSION)
can_use_pyopenssl = PYOPENSSL_FOUND and PYOPENSSL_VERSION >= LooseVersion(MINIMAL_PYOPENSSL_VERSION)
# If cryptography is available we'll use it
if can_use_cryptography:
backend = 'cryptography'
elif can_use_pyopenssl:
backend = 'pyopenssl'
# Fail if no backend has been found
if backend == 'auto':
module.fail_json(msg=("Can't detect any of the required Python libraries "
"cryptography (>= {0}) or PyOpenSSL (>= {1})").format(
MINIMAL_CRYPTOGRAPHY_VERSION,
MINIMAL_PYOPENSSL_VERSION))
if backend == 'pyopenssl':
if not PYOPENSSL_FOUND:
module.fail_json(msg=missing_required_lib('pyOpenSSL >= {0}'.format(MINIMAL_PYOPENSSL_VERSION)),
exception=PYOPENSSL_IMP_ERR)
privatekey = PrivateKeyInfoPyOpenSSL(module)
elif backend == 'cryptography':
if not CRYPTOGRAPHY_FOUND:
module.fail_json(msg=missing_required_lib('cryptography >= {0}'.format(MINIMAL_CRYPTOGRAPHY_VERSION)),
exception=CRYPTOGRAPHY_IMP_ERR)
privatekey = PrivateKeyInfoCryptography(module)
result = privatekey.get_info()
module.exit_json(**result)
except crypto_utils.OpenSSLObjectError as exc:
module.fail_json(msg=to_native(exc))
if __name__ == "__main__":
main()
|
pgmillon/ansible
|
lib/ansible/modules/crypto/openssl_privatekey_info.py
|
Python
|
gpl-3.0
| 26,377
|
import os
# display
DISPTYPE = 'pygame'
DISPSIZE = (1920,1080)
# rumble on/off
RUMBLE = True
# we can only use the rumble function in Windows
if os.name != u'nt':
RUMBLE = False
|
smathot/PyGaze
|
examples/joystick_test/constants.py
|
Python
|
gpl-3.0
| 192
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigiq_device_facts
short_description: Collect facts from F5 BIG-IQ devices
description:
- Collect facts from F5 BIG-IQ devices.
version_added: 2.8
options:
gather_subset:
description:
- When supplied, this argument will restrict the facts returned to a given subset.
- Can specify a list of values to include a larger subset.
- Values can also be used with an initial C(!) to specify that a specific subset
should not be collected.
required: True
choices:
- all
- applications
- managed-devices
- purchased-pool-licenses
- regkey-pools
- system-info
- vlans
- "!all"
- "!applications"
- "!managed-devices"
- "!purchased-pool-licenses"
- "!regkey-pools"
- "!system-info"
- "!vlans"
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Collect BIG-IQ facts
bigiq_device_facts:
gather_subset:
- system-info
- vlans
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Collect all BIG-IQ facts
bigiq_device_facts:
gather_subset:
- all
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Collect all BIG-IP facts except trunks
bigiq_device_facts:
gather_subset:
- all
- "!trunks"
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
'''
RETURN = r'''
applications:
description: Application related facts
returned: When C(managed-devices) is specified in C(gather_subset).
type: complex
contains:
protection_mode:
description:
- The type of F5 Web Application Security Service protection on the application.
returned: changed
type: string
sample: Not Protected
id:
description:
- ID of the application as known to the BIG-IQ.
returned: changed
type: string
sample: 996baae8-5d1d-3662-8a2d-3612fa2aceae
name:
description:
- Name of the application.
returned: changed
type: string
sample: site12http.example.com
status:
description:
- Current state of the application.
returned: changed
type: string
sample: DEPLOYED
transactions_per_second:
description:
- Current measurement of Transactions Per second being handled by the application.
returned: changed
type: float
sample: 0.87
connections:
description:
- Current number of connections established to the application.
returned: changed
type: float
sample: 3.06
new_connections:
description:
- Number of new connections being established per second.
returned: changed
type: float
sample: 0.35
response_time:
description:
- Measured response time of the application in milliseconds.
returned: changed
type: float
sample: 0.02
health:
description:
- Health of the application.
returned: changed
type: string
sample: Good
active_alerts:
description:
- Number of alerts active on the application.
returned: changed
type: int
sample: 0
bad_traffic:
description:
- Percent of traffic to application that is determined to be 'bad'.
- This value is dependent on C(protection_mode) being enabled.
returned: changed
type: float
sample: 1.7498
enhanced_analytics:
description:
- Whether enhanced analytics is enabled for the application or not.
returned: changed
type: bool
sample: yes
bad_traffic_growth:
description:
- Whether or not Bad Traffic Growth alerts are configured to be triggered or not.
returned: changed
type: bool
sample: no
sample: hash/dictionary of values
managed_devices:
description: Managed device related facts.
returned: When C(managed-devices) is specified in C(gather_subset).
type: complex
contains:
address:
description:
- Address where the device was discovered.
returned: changed
type: string
sample: 10.10.10.10
build:
description:
- Build of the version.
returned: changed
type: string
sample: 0.0.4
device_uri:
description:
- URI to reach the management interface of the device.
returned: changed
type: string
sample: "https://10.10.10.10:443"
edition:
description:
- Edition string of the product version.
returned: changed
type: string
sample: Final
group_name:
description:
- BIG-IQ group that the device is a member of.
returned: changed
type: string
sample: cm-bigip-allBigIpDevices
hostname:
description:
- Discovered hostname of the device.
returned: changed
type: string
sample: tier2labB1.lab.fp.foo.com
https_port:
description:
- HTTPS port available on the management interface of the device.
returned: changed
type: int
sample: 443
is_clustered:
description:
- Whether the device is clustered or not.
returned: changed
type: bool
sample: no
is_license_expired:
description:
- Whether the license on the device is expired or not.
returned: changed
type: bool
sample: yes
is_virtual:
description:
- Whether the device is a virtual edition or not.
returned: changed
type: bool
sample: yes
machine_id:
description:
- Machine specific ID assigned to this device by BIG-IQ.
returned: changed
type: string
sample: c141bc88-f734-4434-be64-a3e9ea98356e
management_address:
description:
- IP address of the management interface on the device.
returned: changed
type: string
sample: 10.10.10.10
mcp_device_name:
description:
- Device name as known by MCPD on the BIG-IP.
returned: changed
type: string
sample: /Common/tier2labB1.lab.fp.foo.com
product:
description:
- Product that the managed device is identified as.
returned: changed
type: string
sample: BIG-IP
rest_framework_version:
description:
- REST framework version running on the device
returned: changed
type: string
sample: 13.1.1-0.0.4
self_link:
description:
- Internal reference to the managed device in BIG-IQ.
returned: changed
type: string
sample: "https://localhost/mgmt/shared/resolver/device-groups/cm-bigip-allBigIpDevices/devices/c141bc88-f734-4434-be64-a3e9ea98356e"
slots:
description:
- Volumes on the device and versions of software installed in those volumes.
returned: changed
type: complex
sample: {"volume": "HD1.1", "product": "BIG-IP", "version": "13.1.1", "build": "0.0.4", "isActive": "yes"}
state:
description:
- State of the device.
returned: changed
type: string
sample: ACTIVE
tags:
description:
- Misc tags that are assigned to the device.
returned: changed
type: complex
sample: {'BIGIQ_tier_2_device': '2018-08-22T13:30:47.693-07:00', 'BIGIQ_SSG_name': 'tim-ssg'}
trust_domain_guid:
description:
- GUID of the trust domain the device is part of.
returned: changed
type: string
sample: 40ddf541-e604-4905-bde3005056813e36
uuid:
description:
- UUID of the device in BIG-IQ.
returned: changed
type: string
sample: c141bc88-f734-4434-be64-a3e9ea98356e
version:
description:
- Version of TMOS installed on the device.
returned: changed
type: string
sample: 13.1.1
sample: hash/dictionary of values
purchased_pool_licenses:
description: Purchased Pool License related facts.
returned: When C(purchased-pool-licenses) is specified in C(gather_subset).
type: complex
contains:
base_reg_key:
description:
- Base registration key of the purchased pool
returned: changed
type: string
sample: XXXXX-XXXXX-XXXXX-XXXXX-XXXXXXX
dossier:
description:
- Dossier of the purchased pool license
returned: changed
type: string
sample: d6bd4b8ba5...e9a1a1199b73af9932948a
free_device_licenses:
description:
- Number of free licenses remaining.
returned: changed
type: int
sample: 34
name:
description:
- Name of the purchased pool
returned: changed
type: string
sample: my-pool1
state:
description:
- State of the purchased pool license
returned: changed
type: string
sample: LICENSED
total_device_licenses:
description:
- Total number of licenses in the pool.
returned: changed
type: int
sample: 40
uuid:
description:
- UUID of the purchased pool license
returned: changed
type: string
sample: b2112329-cba7-4f1f-9a26-fab9be416d60
vendor:
description:
- Vendor who provided the license
returned: changed
type: string
sample: F5 Networks, Inc
licensed_date_time:
description:
- Timestamp that the pool was licensed.
returned: changed
type: string
sample: "2018-09-10T00:00:00-07:00"
licensed_version:
description:
- Version of BIG-IQ that is licensed.
returned: changed
type: string
sample: 6.0.1
evaluation_start_date_time:
description:
- Date that evaluation license starts.
returned: changed
type: string
sample: "2018-09-09T00:00:00-07:00"
evaluation_end_date_time:
description:
- Date that evaluation license ends.
returned: changed
type: string
sample: "2018-10-11T00:00:00-07:00"
license_end_date_time:
description:
- Date that the license expires.
returned: changed
type: string
sample: "2018-10-11T00:00:00-07:00"
license_start_date_time:
description:
- Date that the license starts.
returned: changed
type: string
sample: "2018-09-09T00:00:00-07:00"
registration_key:
description:
- Purchased pool license key.
returned: changed
type: string
sample: XXXXX-XXXXX-XXXXX-XXXXX-XXXXXXX
sample: hash/dictionary of values
regkey_pools:
description: Regkey Pool related facts.
returned: When C(regkey-pools) is specified in C(gather_subset).
type: complex
contains:
name:
description:
- Name of the regkey pool.
returned: changed
type: string
sample: pool1
id:
description:
- ID of the regkey pool.
returned: changed
type: string
sample: 4f9b565c-0831-4657-b6c2-6dde6182a502
total_offerings:
description:
- Total number of offerings in the pool
returned: changed
type: int
sample: 10
offerings:
description: List of the offerings in the pool.
type: complex
contains:
dossier:
description:
- Dossier of the license.
returned: changed
type: string
sample: d6bd4b8ba5...e9a1a1199b73af9932948a
name:
description:
- Name of the regkey.
returned: changed
type: string
sample: regkey1
state:
description:
- State of the regkey license
returned: changed
type: string
sample: LICENSED
licensed_date_time:
description:
- Timestamp that the regkey was licensed.
returned: changed
type: string
sample: "2018-09-10T00:00:00-07:00"
licensed_version:
description:
- Version of BIG-IQ that is licensed.
returned: changed
type: string
sample: 6.0.1
evaluation_start_date_time:
description:
- Date that evaluation license starts.
returned: changed
type: string
sample: "2018-09-09T00:00:00-07:00"
evaluation_end_date_time:
description:
- Date that evaluation license ends.
returned: changed
type: string
sample: "2018-10-11T00:00:00-07:00"
license_end_date_time:
description:
- Date that the license expires.
returned: changed
type: string
sample: "2018-10-11T00:00:00-07:00"
license_start_date_time:
description:
- Date that the license starts.
returned: changed
type: string
sample: "2018-09-09T00:00:00-07:00"
registration_key:
description:
- Registration license key.
returned: changed
type: string
sample: XXXXX-XXXXX-XXXXX-XXXXX-XXXXXXX
sample: hash/dictionary of values
sample: hash/dictionary of values
system_info:
description: System info related facts.
returned: When C(system-info) is specified in C(gather_subset).
type: complex
contains:
base_mac_address:
description:
- Media Access Control address (MAC address) of the device.
returned: changed
type: string
sample: "fa:16:3e:c3:42:6f"
marketing_name:
description:
- Marketing name of the device platform.
returned: changed
type: string
sample: BIG-IQ Virtual Edition
time:
description:
- Mapping of the current time information to specific time-named keys.
returned: changed
type: complex
contains:
day:
description:
- The current day of the month, in numeric form.
returned: changed
type: int
sample: 7
hour:
description:
- The current hour of the day in 24-hour form.
returned: changed
type: int
sample: 18
minute:
description:
- The current minute of the hour.
returned: changed
type: int
sample: 16
month:
description:
- The current month, in numeric form.
returned: changed
type: int
sample: 6
second:
description:
- The current second of the minute.
returned: changed
type: int
sample: 51
year:
description:
- The current year in 4-digit form.
returned: changed
type: int
sample: 2018
hardware_information:
description:
- Information related to the hardware (drives and CPUs) of the system.
type: complex
returned: changed
contains:
model:
description:
- The model of the hardware.
type: string
sample: Virtual Disk
name:
description:
- The name of the hardware.
type: string
sample: HD1
type:
description:
- The type of hardware.
type: string
sample: physical-disk
versions:
description:
- Hardware specific properties
type: complex
contains:
name:
description:
- Name of the property
type: string
sample: Size
version:
description:
- Value of the property
type: string
sample: 154.00G
is_admin_password_changed:
description:
- Whether the admin password was changed from its default or not.
returned: changed
type: bool
sample: yes
is_root_password_changed:
description:
- Whether the root password was changed from its default or not.
returned: changed
type: bool
sample: no
is_system_setup:
description:
- Whether the system has been setup or not.
returned: changed
type: bool
sample: yes
package_edition:
description:
- Displays the software edition.
returned: changed
type: string
sample: Point Release 7
package_version:
description:
- A string combining the C(product_build) and C(product_build_date).
type: string
sample: "Build 0.0.1 - Tue May 15 15:26:30 PDT 2018"
product_code:
description:
- Code identifying the product.
type: string
sample: BIG-IQ
product_build:
description:
- Build version of the release version.
type: string
sample: 0.0.1
product_version:
description:
- Major product version of the running software.
type: string
sample: 6.0.0
product_built:
description:
- Unix timestamp of when the product was built.
type: int
sample: 180515152630
product_build_date:
description:
- Human readable build date.
type: string
sample: "Tue May 15 15:26:30 PDT 2018"
product_changelist:
description:
- Changelist that product branches from.
type: int
sample: 2557198
product_jobid:
description:
- ID of the job that built the product version.
type: int
sample: 1012030
chassis_serial:
description:
- Serial of the chassis
type: string
sample: 11111111-2222-3333-444444444444
host_board_part_revision:
description:
- Revision of the host board.
type: string
host_board_serial:
description:
- Serial of the host board.
type: string
platform:
description:
- Platform identifier.
type: string
sample: Z100
switch_board_part_revision:
description:
- Switch board revision.
type: string
switch_board_serial:
description:
- Serial of the switch board.
type: string
uptime:
description:
- Time, in seconds, since the system booted.
type: int
sample: 603202
sample: hash/dictionary of values
vlans:
description: List of VLAN facts.
returned: When C(vlans) is specified in C(gather_subset).
type: complex
contains:
auto_lasthop:
description:
- Allows the system to send return traffic to the MAC address that transmitted the
request, even if the routing table points to a different network or interface.
returned: changed
type: string
sample: enabled
cmp_hash_algorithm:
description:
- Specifies how the traffic on the VLAN will be disaggregated.
returned: changed
type: string
sample: default
description:
description:
- Description of the VLAN.
returned: changed
type: string
sample: My vlan
failsafe_action:
description:
- Action for the system to take when the fail-safe mechanism is triggered.
returned: changed
type: string
sample: reboot
failsafe_enabled:
description:
- Whether failsafe is enabled or not.
returned: changed
type: bool
sample: yes
failsafe_timeout:
description:
- Number of seconds that an active unit can run without detecting network traffic
on this VLAN before it starts a failover.
returned: changed
type: int
sample: 90
if_index:
description:
- Index assigned to this VLAN. It is a unique identifier assigned for all objects
displayed in the SNMP IF-MIB.
returned: changed
type: int
sample: 176
learning_mode:
description:
- Whether switch ports placed in the VLAN are configured for switch learning,
forwarding only, or dropped.
returned: changed
type: string
sample: enable-forward
interfaces:
description:
- List of tagged or untagged interfaces and trunks that you want to configure for the VLAN.
returned: changed
type: complex
contains:
full_path:
description:
- Full name of the resource as known to BIG-IP.
returned: changed
type: string
sample: 1.3
name:
description:
- Relative name of the resource in BIG-IP.
returned: changed
type: string
sample: 1.3
tagged:
description:
- Whether the interface is tagged or not.
returned: changed
type: bool
sample: no
mtu:
description:
- Specific maximum transition unit (MTU) for the VLAN.
returned: changed
type: int
sample: 1500
sflow_poll_interval:
description:
- Maximum interval in seconds between two pollings.
returned: changed
type: int
sample: 0
sflow_poll_interval_global:
description:
- Whether the global VLAN poll-interval setting, overrides the object-level
poll-interval setting.
returned: changed
type: bool
sample: no
sflow_sampling_rate:
description:
- Ratio of packets observed to the samples generated.
returned: changed
type: int
sample: 0
sflow_sampling_rate_global:
description:
- Whether the global VLAN sampling-rate setting, overrides the object-level
sampling-rate setting.
returned: changed
type: bool
sample: yes
source_check_enabled:
description:
- Specifies that only connections that have a return route in the routing table are accepted.
returned: changed
type: bool
sample: yes
true_mac_address:
description:
- Media access control (MAC) address for the lowest-numbered interface assigned to this VLAN.
returned: changed
type: string
sample: "fa:16:3e:10:da:ff"
tag:
description:
- Tag number for the VLAN.
returned: changed
type: int
sample: 30
sample: hash/dictionary of values
'''
import datetime
import math
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
from ansible.module_utils.six import string_types
try:
from library.module_utils.network.f5.bigiq import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import flatten_boolean
from library.module_utils.network.f5.ipaddress import is_valid_ip
from library.module_utils.network.f5.common import transform_name
except ImportError:
from ansible.module_utils.network.f5.bigiq import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import flatten_boolean
from ansible.module_utils.network.f5.ipaddress import is_valid_ip
from ansible.module_utils.network.f5.common import transform_name
def parseStats(entry):
if 'description' in entry:
return entry['description']
elif 'value' in entry:
return entry['value']
elif 'entries' in entry or 'nestedStats' in entry and 'entries' in entry['nestedStats']:
if 'entries' in entry:
entries = entry['entries']
else:
entries = entry['nestedStats']['entries']
result = None
for name in entries:
entry = entries[name]
if 'https://localhost' in name:
name = name.split('/')
name = name[-1]
if result and isinstance(result, list):
result.append(parseStats(entry))
elif result and isinstance(result, dict):
result[name] = parseStats(entry)
else:
try:
int(name)
result = list()
result.append(parseStats(entry))
except ValueError:
result = dict()
result[name] = parseStats(entry)
else:
if '.' in name:
names = name.split('.')
key = names[0]
value = names[1]
if not result[key]:
result[key] = {}
result[key][value] = parseStats(entry)
else:
if result and isinstance(result, list):
result.append(parseStats(entry))
elif result and isinstance(result, dict):
result[name] = parseStats(entry)
else:
try:
int(name)
result = list()
result.append(parseStats(entry))
except ValueError:
result = dict()
result[name] = parseStats(entry)
return result
class BaseManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.kwargs = kwargs
def exec_module(self):
results = []
facts = self.read_facts()
for item in facts:
attrs = item.to_return()
results.append(attrs)
return results
class Parameters(AnsibleF5Parameters):
@property
def gather_subset(self):
if isinstance(self._values['gather_subset'], string_types):
self._values['gather_subset'] = [self._values['gather_subset']]
elif not isinstance(self._values['gather_subset'], list):
raise F5ModuleError(
"The specified gather_subset must be a list."
)
tmp = list(set(self._values['gather_subset']))
tmp.sort()
self._values['gather_subset'] = tmp
return self._values['gather_subset']
class BaseParameters(Parameters):
@property
def enabled(self):
return flatten_boolean(self._values['enabled'])
@property
def disabled(self):
return flatten_boolean(self._values['disabled'])
def _remove_internal_keywords(self, resource):
resource.pop('kind', None)
resource.pop('generation', None)
resource.pop('selfLink', None)
resource.pop('isSubcollection', None)
resource.pop('fullPath', None)
def to_return(self):
result = {}
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
class ApplicationsParameters(BaseParameters):
api_map = {
'protectionMode': 'protection_mode',
'transactionsPerSecond': 'transactions_per_second',
'newConnections': 'new_connections',
'responseTime': 'response_time',
'activeAlerts': 'active_alerts',
'badTraffic': 'bad_traffic',
'enhancedAnalytics': 'enhanced_analytics',
'badTrafficGrowth': 'bad_traffic_growth'
}
returnables = [
'protection_mode',
'id',
'name',
'status',
'transactions_per_second',
'connections',
'new_connections',
'response_time',
'health',
'active_alerts',
'bad_traffic',
'enhanced_analytics',
'bad_traffic_growth',
]
@property
def enhanced_analytics(self):
return flatten_boolean(self._values['enhanced_analytics'])
@property
def bad_traffic_growth(self):
return flatten_boolean(self._values['bad_traffic_growth'])
class ApplicationsFactManager(BaseManager):
def __init__(self, *args, **kwargs):
self.client = kwargs.get('client', None)
self.module = kwargs.get('module', None)
super(ApplicationsFactManager, self).__init__(**kwargs)
self.want = ApplicationsParameters(params=self.module.params)
def exec_module(self):
facts = self._exec_module()
result = dict(applications=facts)
return result
def _exec_module(self):
results = []
facts = self.read_facts()
for item in facts:
attrs = item.to_return()
results.append(attrs)
results = sorted(results, key=lambda k: k['name'])
return results
def read_facts(self):
results = []
collection = self.read_collection_from_device()
for resource in collection:
params = ApplicationsParameters(params=resource)
results.append(params)
return results
def read_collection_from_device(self):
uri = "https://{0}:{1}/mgmt/ap/query/v1/tenants/default/reports/AllApplicationsList".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
try:
return response['result']['items']
except KeyError:
return []
class ManagedDevicesParameters(BaseParameters):
api_map = {
'deviceUri': 'device_uri',
'groupName': 'group_name',
'httpsPort': 'https_port',
'isClustered': 'is_clustered',
'isLicenseExpired': 'is_license_expired',
'isVirtual': 'is_virtual',
'machineId': 'machine_id',
'managementAddress': 'management_address',
'mcpDeviceName': 'mcp_device_name',
'restFrameworkVersion': 'rest_framework_version',
'selfLink': 'self_link',
'trustDomainGuid': 'trust_domain_guid',
}
returnables = [
'address',
'build',
'device_uri',
'edition',
'group_name',
'hostname',
'https_port',
'is_clustered',
'is_license_expired',
'is_virtual',
'machine_id',
'management_address',
'mcp_device_name',
'product',
'rest_framework_version',
'self_link',
'slots',
'state',
'tags',
'trust_domain_guid',
'uuid',
'version',
]
@property
def slots(self):
result = []
if self._values['slots'] is None:
return None
for x in self._values['slots']:
x['is_active'] = flatten_boolean(x.pop('isActive', False))
result.append(x)
return result
@property
def tags(self):
if self._values['tags'] is None:
return None
result = dict((x['name'], x['value']) for x in self._values['tags'])
return result
@property
def https_port(self):
return int(self._values['https_port'])
@property
def is_clustered(self):
return flatten_boolean(self._values['is_clustered'])
@property
def is_license_expired(self):
return flatten_boolean(self._values['is_license_expired'])
@property
def is_virtual(self):
return flatten_boolean(self._values['is_virtual'])
class ManagedDevicesFactManager(BaseManager):
def __init__(self, *args, **kwargs):
self.client = kwargs.get('client', None)
self.module = kwargs.get('module', None)
super(ManagedDevicesFactManager, self).__init__(**kwargs)
self.want = ManagedDevicesParameters(params=self.module.params)
def exec_module(self):
facts = self._exec_module()
result = dict(managed_devices=facts)
return result
def _exec_module(self):
results = []
facts = self.read_facts()
for item in facts:
attrs = item.to_return()
results.append(attrs)
results = sorted(results, key=lambda k: k['hostname'])
return results
def read_facts(self):
results = []
collection = self.read_collection_from_device()
for resource in collection:
params = ManagedDevicesParameters(params=resource)
results.append(params)
return results
def read_collection_from_device(self):
uri = "https://{0}:{1}/mgmt/shared/resolver/device-groups/cm-bigip-allBigIpDevices/devices".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
if 'items' not in response:
return []
result = response['items']
return result
class PurchasedPoolLicensesParameters(BaseParameters):
api_map = {
'baseRegKey': 'base_reg_key',
'freeDeviceLicenses': 'free_device_licenses',
'licenseState': 'license_state',
'totalDeviceLicenses': 'total_device_licenses',
}
returnables = [
'base_reg_key',
'dossier',
'free_device_licenses',
'name',
'state',
'total_device_licenses',
'uuid',
# license_state facts
'vendor',
'licensed_date_time',
'licensed_version',
'evaluation_start_date_time',
'evaluation_end_date_time',
'license_end_date_time',
'license_start_date_time',
'registration_key',
]
@property
def registration_key(self):
try:
return self._values['license_state']['registrationKey']
except KeyError:
return None
@property
def license_start_date_time(self):
try:
return self._values['license_state']['licenseStartDateTime']
except KeyError:
return None
@property
def license_end_date_time(self):
try:
return self._values['license_state']['licenseEndDateTime']
except KeyError:
return None
@property
def evaluation_end_date_time(self):
try:
return self._values['license_state']['evaluationEndDateTime']
except KeyError:
return None
@property
def evaluation_start_date_time(self):
try:
return self._values['license_state']['evaluationStartDateTime']
except KeyError:
return None
@property
def licensed_version(self):
try:
return self._values['license_state']['licensedVersion']
except KeyError:
return None
@property
def licensed_date_time(self):
try:
return self._values['license_state']['licensedDateTime']
except KeyError:
return None
@property
def vendor(self):
try:
return self._values['license_state']['vendor']
except KeyError:
return None
class PurchasedPoolLicensesFactManager(BaseManager):
def __init__(self, *args, **kwargs):
self.client = kwargs.get('client', None)
self.module = kwargs.get('module', None)
super(PurchasedPoolLicensesFactManager, self).__init__(**kwargs)
self.want = PurchasedPoolLicensesParameters(params=self.module.params)
def exec_module(self):
facts = self._exec_module()
result = dict(purchased_pool_licenses=facts)
return result
def _exec_module(self):
results = []
facts = self.read_facts()
for item in facts:
attrs = item.to_return()
results.append(attrs)
results = sorted(results, key=lambda k: k['name'])
return results
def read_facts(self):
results = []
collection = self.read_collection_from_device()
for resource in collection:
params = PurchasedPoolLicensesParameters(params=resource)
results.append(params)
return results
def read_collection_from_device(self):
uri = "https://{0}:{1}/mgmt/cm/device/licensing/pool/purchased-pool/licenses".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
try:
return response['items']
except KeyError:
return []
class RegkeyPoolsParameters(BaseParameters):
api_map = {
}
returnables = [
'name',
'id',
'offerings',
'total_offerings',
]
class RegkeyPoolsOfferingParameters(BaseParameters):
api_map = {
'regKey': 'registration_key',
'licenseState': 'license_state',
'status': 'state',
}
returnables = [
'name',
'dossier',
'state',
# license_state facts
'licensed_date_time',
'licensed_version',
'evaluation_start_date_time',
'evaluation_end_date_time',
'license_end_date_time',
'license_start_date_time',
'registration_key',
]
@property
def registration_key(self):
try:
return self._values['license_state']['registrationKey']
except KeyError:
return None
@property
def license_start_date_time(self):
try:
return self._values['license_state']['licenseStartDateTime']
except KeyError:
return None
@property
def license_end_date_time(self):
try:
return self._values['license_state']['licenseEndDateTime']
except KeyError:
return None
@property
def evaluation_end_date_time(self):
try:
return self._values['license_state']['evaluationEndDateTime']
except KeyError:
return None
@property
def evaluation_start_date_time(self):
try:
return self._values['license_state']['evaluationStartDateTime']
except KeyError:
return None
@property
def licensed_version(self):
try:
return self._values['license_state']['licensedVersion']
except KeyError:
return None
@property
def licensed_date_time(self):
try:
return self._values['license_state']['licensedDateTime']
except KeyError:
return None
@property
def vendor(self):
try:
return self._values['license_state']['vendor']
except KeyError:
return None
class RegkeyPoolsFactManager(BaseManager):
def __init__(self, *args, **kwargs):
self.client = kwargs.get('client', None)
self.module = kwargs.get('module', None)
super(RegkeyPoolsFactManager, self).__init__(**kwargs)
self.want = RegkeyPoolsParameters(params=self.module.params)
def exec_module(self):
facts = self._exec_module()
result = dict(regkey_pools=facts)
return result
def _exec_module(self):
results = []
facts = self.read_facts()
for item in facts:
attrs = item.to_return()
results.append(attrs)
results = sorted(results, key=lambda k: k['name'])
return results
def read_facts(self):
results = []
collection = self.read_collection_from_device()
for resource in collection:
params = RegkeyPoolsParameters(params=resource)
offerings = self.read_offerings_from_device(resource['id'])
params.update({'total_offerings': len(offerings)})
for offering in offerings:
params2 = RegkeyPoolsOfferingParameters(params=offering)
params.update({'offerings': params2.to_return()})
results.append(params)
return results
def read_collection_from_device(self):
uri = "https://{0}:{1}/mgmt/cm/device/licensing/pool/regkey/licenses".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
try:
return response['items']
except KeyError:
return []
def read_offerings_from_device(self, license):
uri = "https://{0}:{1}/mgmt/cm/device/licensing/pool/regkey/licenses/{2}/offerings".format(
self.client.provider['server'],
self.client.provider['server_port'],
license,
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
try:
return response['items']
except KeyError:
return []
class SystemInfoParameters(BaseParameters):
api_map = {
'isSystemSetup': 'is_system_setup',
'isAdminPasswordChanged': 'is_admin_password_changed',
'isRootPasswordChanged': 'is_root_password_changed'
}
returnables = [
'base_mac_address',
'chassis_serial',
'hardware_information',
'host_board_part_revision',
'host_board_serial',
'is_admin_password_changed',
'is_root_password_changed',
'is_system_setup',
'marketing_name',
'package_edition',
'package_version',
'platform',
'product_build',
'product_build_date',
'product_built',
'product_changelist',
'product_code',
'product_information',
'product_jobid',
'product_version',
'switch_board_part_revision',
'switch_board_serial',
'time',
'uptime',
]
@property
def is_admin_password_changed(self):
return flatten_boolean(self._values['is_admin_password_changed'])
@property
def is_root_password_changed(self):
return flatten_boolean(self._values['is_root_password_changed'])
@property
def is_system_setup(self):
if self._values['is_system_setup'] is None:
return 'no'
return flatten_boolean(self._values['is_system_setup'])
@property
def chassis_serial(self):
if self._values['system-info'] is None:
return None
# Yes, this is still called "bigip" even though this is querying the BIG-IQ
# product. This is likely due to BIG-IQ inheriting TMOS.
if 'bigipChassisSerialNum' not in self._values['system-info'][0]:
return None
return self._values['system-info'][0]['bigipChassisSerialNum']
@property
def switch_board_serial(self):
if self._values['system-info'] is None:
return None
if 'switchBoardSerialNum' not in self._values['system-info'][0]:
return None
if self._values['system-info'][0]['switchBoardSerialNum'].strip() == '':
return None
return self._values['system-info'][0]['switchBoardSerialNum']
@property
def switch_board_part_revision(self):
if self._values['system-info'] is None:
return None
if 'switchBoardPartRevNum' not in self._values['system-info'][0]:
return None
if self._values['system-info'][0]['switchBoardPartRevNum'].strip() == '':
return None
return self._values['system-info'][0]['switchBoardPartRevNum']
@property
def platform(self):
if self._values['system-info'] is None:
return None
return self._values['system-info'][0]['platform']
@property
def host_board_serial(self):
if self._values['system-info'] is None:
return None
if 'hostBoardSerialNum' not in self._values['system-info'][0]:
return None
if self._values['system-info'][0]['hostBoardSerialNum'].strip() == '':
return None
return self._values['system-info'][0]['hostBoardSerialNum']
@property
def host_board_part_revision(self):
if self._values['system-info'] is None:
return None
if 'hostBoardPartRevNum' not in self._values['system-info'][0]:
return None
if self._values['system-info'][0]['hostBoardPartRevNum'].strip() == '':
return None
return self._values['system-info'][0]['hostBoardPartRevNum']
@property
def package_edition(self):
return self._values['Edition']
@property
def package_version(self):
return 'Build {0} - {1}'.format(self._values['Build'], self._values['Date'])
@property
def product_build(self):
return self._values['Build']
@property
def product_build_date(self):
return self._values['Date']
@property
def product_built(self):
if 'version_info' not in self._values:
return None
if 'Built' in self._values['version_info']:
return int(self._values['version_info']['Built'])
@property
def product_changelist(self):
if 'version_info' not in self._values:
return None
if 'Changelist' in self._values['version_info']:
return int(self._values['version_info']['Changelist'])
@property
def product_jobid(self):
if 'version_info' not in self._values:
return None
if 'JobID' in self._values['version_info']:
return int(self._values['version_info']['JobID'])
@property
def product_code(self):
return self._values['Product']
@property
def product_version(self):
return self._values['Version']
@property
def hardware_information(self):
if self._values['hardware-version'] is None:
return None
self._transform_name_attribute(self._values['hardware-version'])
result = [v for k, v in iteritems(self._values['hardware-version'])]
return result
def _transform_name_attribute(self, entry):
if isinstance(entry, dict):
for k, v in iteritems(entry):
if k == 'tmName':
entry['name'] = entry.pop('tmName')
self._transform_name_attribute(v)
elif isinstance(entry, list):
for k in entry:
if k == 'tmName':
entry['name'] = entry.pop('tmName')
self._transform_name_attribute(k)
else:
return
@property
def time(self):
if self._values['fullDate'] is None:
return None
date = datetime.datetime.strptime(self._values['fullDate'], "%Y-%m-%dT%H:%M:%SZ")
result = dict(
day=date.day,
hour=date.hour,
minute=date.minute,
month=date.month,
second=date.second,
year=date.year
)
return result
@property
def marketing_name(self):
if self._values['platform'] is None:
return None
return self._values['platform'][0]['marketingName']
@property
def base_mac_address(self):
if self._values['platform'] is None:
return None
return self._values['platform'][0]['baseMac']
class SystemInfoFactManager(BaseManager):
def __init__(self, *args, **kwargs):
self.client = kwargs.get('client', None)
self.module = kwargs.get('module', None)
super(SystemInfoFactManager, self).__init__(**kwargs)
self.want = SystemInfoParameters(params=self.module.params)
def exec_module(self):
facts = self._exec_module()
result = dict(system_info=facts)
return result
def _exec_module(self):
facts = self.read_facts()
results = facts.to_return()
return results
def read_facts(self):
collection = self.read_collection_from_device()
params = SystemInfoParameters(params=collection)
return params
def read_collection_from_device(self):
result = dict()
tmp = self.read_hardware_info_from_device()
if tmp:
result.update(tmp)
tmp = self.read_system_setup_from_device()
if tmp:
result.update(tmp)
tmp = self.read_clock_info_from_device()
if tmp:
result.update(tmp)
tmp = self.read_version_info_from_device()
if tmp:
result.update(tmp)
tmp = self.read_uptime_info_from_device()
if tmp:
result.update(tmp)
tmp = self.read_version_file_info_from_device()
if tmp:
result.update(tmp)
return result
def read_system_setup_from_device(self):
uri = "https://{0}:{1}/mgmt/shared/system/setup".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return response
def read_version_file_info_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/util/bash".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
args = dict(
command='run',
utilCmdArgs='-c "cat /VERSION"'
)
resp = self.client.api.post(uri, json=args)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
try:
pattern = r'^(?P<key>(Product|Build|Sequence|BaseBuild|Edition|Date|Built|Changelist|JobID))\:(?P<value>.*)'
result = response['commandResult'].strip()
except KeyError:
return None
if 'No such file or directory' in result:
return None
lines = response['commandResult'].split("\n")
result = dict()
for line in lines:
if not line:
continue
matches = re.match(pattern, line)
if matches:
result[matches.group('key')] = matches.group('value').strip()
if result:
return dict(
version_info=result
)
def read_uptime_info_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/util/bash".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
args = dict(
command='run',
utilCmdArgs='-c "cat /proc/uptime"'
)
resp = self.client.api.post(uri, json=args)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
try:
parts = response['commandResult'].strip().split(' ')
return dict(
uptime=math.floor(float(parts[0]))
)
except KeyError:
pass
def read_hardware_info_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/sys/hardware".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
result = parseStats(response)
return result
def read_clock_info_from_device(self):
"""Parses clock info from the REST API
The clock stat returned from the REST API (at the time of 13.1.0.7)
is similar to the following.
{
"kind": "tm:sys:clock:clockstats",
"selfLink": "https://localhost/mgmt/tm/sys/clock?ver=13.1.0.4",
"entries": {
"https://localhost/mgmt/tm/sys/clock/0": {
"nestedStats": {
"entries": {
"fullDate": {
"description": "2018-06-05T13:38:33Z"
}
}
}
}
}
}
Parsing this data using the ``parseStats`` method, yields a list of
the clock stats in a format resembling that below.
[{'fullDate': '2018-06-05T13:41:05Z'}]
Therefore, this method cherry-picks the first entry from this list
and returns it. There can be no other items in this list.
Returns:
A dict mapping keys to the corresponding clock stats. For
example:
{'fullDate': '2018-06-05T13:41:05Z'}
There should never not be a clock stat, unless by chance it
is removed from the API in the future, or changed to a different
API endpoint.
Raises:
F5ModuleError: A non-successful HTTP code was returned or a JSON
response was not found.
"""
uri = "https://{0}:{1}/mgmt/tm/sys/clock".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
result = parseStats(response)
if result is None:
return None
return result[0]
def read_version_info_from_device(self):
"""Parses version info from the REST API
The version stat returned from the REST API (at the time of 13.1.0.7)
is similar to the following.
{
"kind": "tm:sys:version:versionstats",
"selfLink": "https://localhost/mgmt/tm/sys/version?ver=13.1.0.4",
"entries": {
"https://localhost/mgmt/tm/sys/version/0": {
"nestedStats": {
"entries": {
"Build": {
"description": "0.0.6"
},
"Date": {
"description": "Tue Mar 13 20:10:42 PDT 2018"
},
"Edition": {
"description": "Point Release 4"
},
"Product": {
"description": "BIG-IP"
},
"Title": {
"description": "Main Package"
},
"Version": {
"description": "13.1.0.4"
}
}
}
}
}
}
Parsing this data using the ``parseStats`` method, yields a list of
the clock stats in a format resembling that below.
[{'Build': '0.0.6', 'Date': 'Tue Mar 13 20:10:42 PDT 2018',
'Edition': 'Point Release 4', 'Product': 'BIG-IP', 'Title': 'Main Package',
'Version': '13.1.0.4'}]
Therefore, this method cherry-picks the first entry from this list
and returns it. There can be no other items in this list.
Returns:
A dict mapping keys to the corresponding clock stats. For
example:
{'Build': '0.0.6', 'Date': 'Tue Mar 13 20:10:42 PDT 2018',
'Edition': 'Point Release 4', 'Product': 'BIG-IP', 'Title': 'Main Package',
'Version': '13.1.0.4'}
There should never not be a version stat, unless by chance it
is removed from the API in the future, or changed to a different
API endpoint.
Raises:
F5ModuleError: A non-successful HTTP code was returned or a JSON
response was not found.
"""
uri = "https://{0}:{1}/mgmt/tm/sys/version".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
result = parseStats(response)
if result is None:
return None
return result[0]
class VlansParameters(BaseParameters):
api_map = {
'autoLasthop': 'auto_lasthop',
'cmpHash': 'cmp_hash_algorithm',
'failsafeAction': 'failsafe_action',
'failsafe': 'failsafe_enabled',
'failsafeTimeout': 'failsafe_timeout',
'ifIndex': 'if_index',
'learning': 'learning_mode',
'interfacesReference': 'interfaces',
'sourceChecking': 'source_check_enabled',
'fullPath': 'full_path'
}
returnables = [
'full_path',
'name',
'auto_lasthop',
'cmp_hash_algorithm',
'description',
'failsafe_action',
'failsafe_enabled',
'failsafe_timeout',
'if_index',
'learning_mode',
'interfaces',
'mtu',
'sflow_poll_interval',
'sflow_poll_interval_global',
'sflow_sampling_rate',
'sflow_sampling_rate_global',
'source_check_enabled',
'true_mac_address',
'tag',
]
@property
def interfaces(self):
if self._values['interfaces'] is None:
return None
if 'items' not in self._values['interfaces']:
return None
result = []
for item in self._values['interfaces']['items']:
tmp = dict(
name=item['name'],
full_path=item['fullPath']
)
if 'tagged' in item:
tmp['tagged'] = 'yes'
else:
tmp['tagged'] = 'no'
result.append(tmp)
return result
@property
def sflow_poll_interval(self):
return int(self._values['sflow']['pollInterval'])
@property
def sflow_poll_interval_global(self):
return flatten_boolean(self._values['sflow']['pollIntervalGlobal'])
@property
def sflow_sampling_rate(self):
return int(self._values['sflow']['samplingRate'])
@property
def sflow_sampling_rate_global(self):
return flatten_boolean(self._values['sflow']['samplingRateGlobal'])
@property
def source_check_state(self):
return flatten_boolean(self._values['source_check_state'])
@property
def true_mac_address(self):
if self._values['stats']['macTrue'] in [None, 'none']:
return None
return self._values['stats']['macTrue']
@property
def tag(self):
return self._values['stats']['id']
@property
def failsafe_enabled(self):
return flatten_boolean(self._values['failsafe_enabled'])
class VlansFactManager(BaseManager):
def __init__(self, *args, **kwargs):
self.client = kwargs.get('client', None)
self.module = kwargs.get('module', None)
super(VlansFactManager, self).__init__(**kwargs)
self.want = VlansParameters(params=self.module.params)
def exec_module(self):
facts = self._exec_module()
result = dict(vlans=facts)
return result
def _exec_module(self):
results = []
facts = self.read_facts()
for item in facts:
attrs = item.to_return()
results.append(attrs)
results = sorted(results, key=lambda k: k['full_path'])
return results
def read_facts(self):
results = []
collection = self.read_collection_from_device()
for resource in collection:
resource.update(self.read_stats(resource['fullPath']))
params = VlansParameters(params=resource)
results.append(params)
return results
def read_stats(self, resource):
uri = "https://{0}:{1}/mgmt/tm/net/vlan/{2}/stats".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(name=resource)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
result = parseStats(response)
return result
def read_collection_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/net/vlan/?expandSubcollections=true".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
if 'items' not in response:
return []
result = response['items']
return result
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.kwargs = kwargs
self.want = Parameters(params=self.module.params)
self.managers = {
'applications': dict(
manager=ApplicationsFactManager,
client=F5RestClient,
),
'managed-devices': dict(
manager=ManagedDevicesFactManager,
client=F5RestClient,
),
'purchased-pool-licenses': dict(
manager=PurchasedPoolLicensesFactManager,
client=F5RestClient,
),
'regkey-pools': dict(
manager=RegkeyPoolsFactManager,
client=F5RestClient,
),
'system-info': dict(
manager=SystemInfoFactManager,
client=F5RestClient,
),
'vlans': dict(
manager=VlansFactManager,
client=F5RestClient,
),
}
def exec_module(self):
self.handle_all_keyword()
res = self.check_valid_gather_subset(self.want.gather_subset)
if res:
invalid = ','.join(res)
raise F5ModuleError(
"The specified 'gather_subset' options are invalid: {0}".format(invalid)
)
result = self.filter_excluded_facts()
managers = []
for name in result:
manager = self.get_manager(name)
if manager:
managers.append(manager)
if not managers:
result = dict(
changed=False
)
return result
result = self.execute_managers(managers)
if result:
result['changed'] = True
else:
result['changed'] = False
return result
def filter_excluded_facts(self):
# Remove the excluded entries from the list of possible facts
exclude = [x[1:] for x in self.want.gather_subset if x[0] == '!']
include = [x for x in self.want.gather_subset if x[0] != '!']
result = [x for x in include if x not in exclude]
return result
def handle_all_keyword(self):
if 'all' not in self.want.gather_subset:
return
managers = list(self.managers.keys()) + self.want.gather_subset
managers.remove('all')
self.want.update({'gather_subset': managers})
def check_valid_gather_subset(self, includes):
"""Check that the specified subset is valid
The ``gather_subset`` parameter is specified as a "raw" field which means that
any Python type could technically be provided
:param includes:
:return:
"""
keys = self.managers.keys()
result = []
for x in includes:
if x not in keys:
if x[0] == '!':
if x[1:] not in keys:
result.append(x)
else:
result.append(x)
return result
def execute_managers(self, managers):
results = dict()
for manager in managers:
result = manager.exec_module()
results.update(result)
return results
def get_manager(self, which):
result = {}
info = self.managers.get(which, None)
if not info:
return result
kwargs = dict()
kwargs.update(self.kwargs)
manager = info.get('manager', None)
client = info.get('client', None)
kwargs['client'] = client(**self.module.params)
result = manager(**kwargs)
return result
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = False
argument_spec = dict(
gather_subset=dict(
type='list',
required=True,
choices=[
# Meta choices
'all',
# Non-meta choices
'applications',
'managed-devices',
'purchased-pool-licenses',
'regkey-pools',
'system-info',
'vlans',
# Negations of meta choices
'!all',
# Negations of non-meta-choices
'!applications',
'!managed-devices',
'!purchased-pool-licenses',
'!regkey-pools',
'!system-info',
'!vlans',
]
),
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
try:
client = F5RestClient(**module.params)
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
module.exit_json(**results)
except F5ModuleError as ex:
cleanup_tokens(client)
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
|
alexlo03/ansible
|
lib/ansible/modules/network/f5/bigiq_device_facts.py
|
Python
|
gpl-3.0
| 70,468
|
import imp
import os
import sys
from marshal import Unmarshaller
__debugging__ = False
# Todo: This should be stored in a central place.
supported_magic = 62211 # CPython 2.7
def __readPycHeader(file):
def read():
return ord(file.read(1))
magic = read() | (read()<<8)
if not ( file.read(1) == '\r' and file.read(1) == '\n' ):
raise TypeError("not valid pyc-file")
mtime = read() | (read()<<8) | (read()<<16) | (read()<<24)
return magic, mtime
def __makeModule(name, code, path):
module = sys.modules.get(name)
if not module:
module = sys.modules[name] = imp.new_module(name)
module.__file__ = path
exec code in module.__dict__
return module
class __Importer(object):
def __init__(self, path):
if __debugging__: print "Importer invoked"
self.__path = path
def find_module(self, fullname, path=None):
if __debugging__:
print "Importer.find_module(fullname=%s, path=%s)" % (
repr(fullname), repr(path))
path = fullname.split('.')
filename = path[-1]
path = path[:-1]
pycfile = os.path.join(self.__path, *(path + [filename + '.pyc']))
pyfile = os.path.join(self.__path, *(path + [filename + '.py']))
if os.path.exists(pycfile):
f = open(pycfile, 'rb')
try:
magic, mtime = __readPycHeader(f)
except:
return None # abort! not a valid pyc-file
f.close()
# Todo: This check should also be in Unmarshaller
if magic != supported_magic:
return None # bytecode version mismatch
if os.path.exists(pyfile):
pytime = os.stat(pyfile).st_mtime
if pytime > mtime:
return None # abort! py-file was newer
return self
else:
return None # abort! pyc-file does not exist
def load_module(self, fullname):
path = fullname.split('.')
path[-1] += '.pyc'
filename = os.path.join(self.__path, *path)
f = open(filename, 'rb')
magic, mtime = __readPycHeader(f)
if magic != supported_magic:
if __debugging__: print "Unsupported bytecode version:", fullname
return None
#code = Unmarshaller(f, magic=magic).load()
code = Unmarshaller(f).load()
if __debugging__: print "Successfully loaded:", fullname
return __makeModule( fullname, code, filename )
class __MetaImporter(object):
def __init__(self):
self.__importers = {}
def find_module(self, fullname, path):
if __debugging__: print "MetaImporter.find_module(%s, %s)" % (
repr(fullname), repr(path))
for _path in sys.path:
if _path not in self.__importers:
try:
self.__importers[_path] = __Importer(_path)
except:
self.__importers[_path] = None
importer = self.__importers[_path]
if importer is not None:
loader = importer.find_module(fullname, path)
if loader is not None:
return loader
else:
return None
sys.meta_path.insert(0, __MetaImporter())
|
bikashgupta11/javarobot
|
src/main/resources/jython/Lib/pycimport.py
|
Python
|
gpl-3.0
| 3,296
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2011 NUMA Extreme Systems (www.numaes.com) for Cubic ERP - Teradata SAC. (http://cubicerp.com).
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from openerp.osv import fields, osv
class ple_3_4 (osv.Model):
_name = "l10n_pe.ple_3_4"
_inherit = "l10n_pe.ple"
_columns= {
'lines_ids': fields.one2many ('l10n_pe.ple_line_3_4', 'ple_3_4_id', 'Lines', readonly=True, states={'draft':[('readonly',False)],}),
}
def action_reload (self, cr, uid, ids, context=None):
assert ids and len(ids)==1
ple_line_obj = self.pool.get('l10n_pe.ple_line_3_4')
#Remove existing lines
ple = self.browse(cr, uid, ids[0], context=context)
cmds = []
for line in ple.lines_ids:
cmds.append((2, line.id))
if cmds:
ple.write({'lines_ids': cmds}, context=context)
ple.refresh()
# Get the list of involved movements
period_ids = self.get_all_periods_up_to (cr, uid, ids, ple.period_id.id)
move_lines = self.get_move_lines (cr, uid, period_ids, '3_4', ple.company_id.id, context=context)
#Get the list of involved partners
partner_ids = list(set([aml.partner_id.id for aml in move_lines]))
for partner_id in partner_ids:
vals = {
'ple_3_4_id': ple.id,
'partner_id': partner_id,
}
vals.update(ple_line_obj.onchange_partner_id(cr, uid, [], partner_id, ple.period_id.id, context=context)['value'])
ple_line_obj.create(cr, uid, vals, context=context)
return True
def action_renumber (self, cr, uid, ids, context=None):
assert ids and len(ids)==1
ple = self.browse (cr, uid, ids[0], context=context)
next_seq = 1
for line in sorted(ple.lines_ids, key=lambda l: l.operation_date_3):
line.write ({'sequence': next_seq}, context=context)
next_seq += 1
return True
def action_report (self, cr, uid, ids, context=None):
assert ids and len(ids)==1
return {
'type': 'ir.actions.report.xml',
'report_name': 'l10n_pe.sunat_3_4',
'datas': {},
}
def get_output_lines (self, cr, uid, ids, context=None):
assert ids and len(ids)==1
ple = self.browse (cr, uid, ids[0], context=context)
res = []
for r in ple.lines_ids:
elements = [
"%s%s00" % (ple.period_id.date_start[0:4], ple.period_id.date_start[5:7]),
self.convert_str(r.sequence),
self.convert_str(r.partner_doc_type_3),
self.convert_str(r.partner_doc_number_4),
self.convert_str(r.partner_name_5),
self.convert_date (r.operation_date_6),
self.convert_amount(r.amount_7),
self.convert_str(r.operation_state_8),
]
res.append('|'.join(elements))
return res
def get_output_filename (self, cr, uid, ids, context=None):
return "sunat_3_4.txt"
class ple_line_3_4 (osv.Model):
_name = 'l10n_pe.ple_line_3_4'
_inherit = 'l10n_pe.ple_line'
def _get_doc_type_selection (self, cr, uid, context=None):
bt_obj = self.pool.get('base.element')
return bt_obj.get_as_selection(cr, uid, 'PE.SUNAT.TABLA_02', context=context)
_columns = {
'ple_3_4_id': fields.many2one('l10n_pe.ple_3_4', 'PLE', on_delete='cascade'),
'state': fields.related ('ple_3_4_id', 'state', string='State', type="char"),
'company_id': fields.related ('ple_3_4_id', 'company_id', string='Company', type="many2one", relation="res.company"),
'partner_id': fields.many2one('res.partner', 'Partner', on_delete="restrict"),
'partner_doc_type_3': fields.selection(_get_doc_type_selection, "Doc. type", required=True, size=3, help="Tipo de Documento de Identidad del accionista, socio o personal"),
'partner_doc_number_4': fields.char("Partner's doc. number", size=15, help="Número de Documento de Identidad del accionista, socio o personal"),
'partner_name_5': fields.char("Partner's name", size=60, required=True, help="Apellidos y Nombres, Denominación o Razón Social del accionista, socio o personal"),
'operation_date_6': fields.date ('Operation date', required=True, help="Fecha de inicio de la operación"),
'amount_7': fields.float('Amount', digits=(12,2), help="Monto de cada cuenta por cobrar del accionista, socio o personal"),
'operation_state_8': fields.selection ([
('1', '1'),
('8', '8'),
('9', '9'),
], 'Operation state', required=True, help="""
Registrar '1' cuando la operación corresponde al periodo,
'8' cuando la operación corresponde a un periodo anterior y NO ha sido anotada en dicho periodo o
'9' cuando la operación corresponde a un periodo anterior y SI ha sido anotada en dicho periodo."""),
}
_order = 'sequence'
_defaults = {
'operation_date_6': fields.date.context_today,
'operation_state_8': '1',
}
def onchange_partner_id (self, cr, uid, ids, partner_id, period_id, context=None):
vals = {}
if partner_id:
ple_3_4_obj = self.pool.get('l10n_pe.ple_3_4')
period_obj = self.pool.get('account.period')
partner_obj = self.pool.get('res.partner')
lines = ple_3_4_obj.get_move_lines(cr, uid,
ple_3_4_obj.get_all_periods_up_to(period_id, context=context),
'3_4',
partner_id=partner_id,
context=context)
lines.sort(key=lambda l: (l.period_id.date_start))
initial_balance = 0.0
final_balance = 0.0
first_move_date = None
period = period_obj.browse(cr, uid, period_id, context=context)
if lines:
for l in lines:
if l.date_start >= period.date_start:
final_balance += (l.credit - l.debit)
else:
initial_balance += (l.credit - l.debit)
if (not first_move_date) or first_move_date > l.date:
first_move_date = l.date
final_balance += initial_balance
partner = partner_obj.browse(cr, uid, partner_id, context=context)
vals['partner_doc_type_3'] = partner.doc_type
vals['partner_doc_number_4'] = partner.doc_number
vals['partner_name_5'] = partner.name
vals['operation_date_6'] = first_move_date
vals['amount_7'] = final_balance
return {'value': vals}
else:
return False
def onchange_doc (self, cr, uid, ids, partner_doc_type, partner_doc_number, context=None):
res = {}
if partner_doc_type == '0':
if (not partner_doc_number) or len (partner_doc_number) > 15:
raise osv.except_osv (_('Value error'),
_('Document number should be alfanumeric, not longer than 15 characters! Please check!'))
elif partner_doc_type == '1':
if (not partner_doc_number) or len (partner_doc_number) != 8 or not partner_doc_number.isdigit():
raise osv.except_osv (_('Value error'),
_('Libreta electoral or DNI should be numeric, exactly 8 numbers long! Please check!'))
elif partner_doc_type == '4':
if (not partner_doc_number) or len (partner_doc_number) > 12:
raise osv.except_osv (_('Value error'),
_('Carnet de extranjeria should be alfanumeric, not longer than 12 characters! Please check!'))
elif partner_doc_type == '6':
if (not partner_doc_number) or (len (partner_doc_number) < 8 or len(partner_doc_number) > 11) or not partner_doc_number.isdigit():
raise osv.except_osv (_('Value error'),
_('RUC should be numeric, 8-11 numbers long! Please check!'))
elif partner_doc_type == '7':
if (not partner_doc_number) or len (partner_doc_number) > 12:
raise osv.except_osv (_('Value error'),
_('Pasaporte should be alfanumeric, not longer than 12 characters! Please check!'))
elif partner_doc_type == 'A':
if (not partner_doc_number) or len (partner_doc_number) != 15 or not partner_doc_number.isdigit():
raise osv.except_osv (_('Value error'),
_('Cedula diplomatica should be numeric, exactly 15 numbers long! Please check!'))
class ple_configuration (osv.osv):
_inherit = "l10n_pe.ple_configuration"
def get_report_type (self, cr, uid, context=None):
rep_types = super(ple_configuration, self).get_report_type(cr, uid, context=context)
rep_types.append (('3_4', '3.4 Accionistas'))
return sorted(rep_types, key=lambda e: e[0])
|
Jgarcia-IAS/SAT
|
openerp/addons-extra/odoo-pruebas/odoo-server/addons-extra/l10n_pe_ple03/sunat_3_4.py
|
Python
|
agpl-3.0
| 10,529
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Personalizzazione realizzata da Andrea Cometa
# Compatible with OpenERP release 6.0.0
# Copyright (C) 2012 Andrea Cometa. All Rights Reserved.
# Email: info@andreacometa.it
# Web site: http://www.andreacometa.it
#
##############################################################################
import stock
|
john-wang-metro/metro-openerp
|
third_modules/stock_cancel/__init__.py
|
Python
|
agpl-3.0
| 431
|
# -*- coding:utf-8 -*-
##############################################################################
#
# Copyright (C) 2014 Savoir-faire Linux. All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, orm
from itertools import permutations
class hr_hourly_rate_class(orm.Model):
_name = 'hr.hourly.rate.class'
_description = 'Hourly rate class'
_columns = {
'name': fields.char(
'Class Name',
required=True,
),
'line_ids': fields.one2many(
'hr.hourly.rate',
'class_id',
'Hourly Rates'
),
'contract_job_ids': fields.one2many(
'hr.contract.job',
'hourly_rate_class_id',
'Contract Jobs'
),
}
def _check_overlapping_rates(self, cr, uid, ids, context=None):
"""
Checks if a class has two rates that overlap in time.
"""
for hourly_rate_class in self.browse(cr, uid, ids, context):
for r1, r2 in permutations(hourly_rate_class.line_ids, 2):
if r1.date_end and (
r1.date_start <= r2.date_start <= r1.date_end):
return False
elif not r1.date_end and (r1.date_start <= r2.date_start):
return False
return True
_constraints = [(
_check_overlapping_rates,
'Error! You cannot have overlapping rates',
['line_ids']
)]
|
macopedia/hr
|
__unported__/hr_contract_hourly_rate/hr_hourly_rate_class.py
|
Python
|
agpl-3.0
| 2,229
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import fields, models
from odoo.addons.stock_landed_costs.models.stock_landed_cost import SPLIT_METHOD
class ProductTemplate(models.Model):
_inherit = "product.template"
landed_cost_ok = fields.Boolean('Is a Landed Cost', help='Indicates whether the product is a landed cost.')
split_method_landed_cost = fields.Selection(SPLIT_METHOD, string="Default Split Method",
help="Default Split Method when used for Landed Cost")
|
ygol/odoo
|
addons/stock_landed_costs/models/product.py
|
Python
|
agpl-3.0
| 595
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt
from frappe import _
from frappe.model.document import Document
class CForm(Document):
def validate(self):
"""Validate invoice that c-form is applicable
and no other c-form is received for that"""
for d in self.get('invoices'):
if d.invoice_no:
inv = frappe.db.sql("""select c_form_applicable, c_form_no from
`tabSales Invoice` where name = %s and docstatus = 1""", d.invoice_no)
if inv and inv[0][0] != 'Yes':
frappe.throw(_("C-form is not applicable for Invoice: {0}").format(d.invoice_no))
elif inv and inv[0][1] and inv[0][1] != self.name:
frappe.throw(_("""Invoice {0} is tagged in another C-form: {1}.
If you want to change C-form no for this invoice,
please remove invoice no from the previous c-form and then try again"""\
.format(d.invoice_no, inv[0][1])))
elif not inv:
frappe.throw(_("Row {0}: Invoice {1} is invalid, it might be cancelled / does not exist. \
Please enter a valid Invoice".format(d.idx, d.invoice_no)))
def on_update(self):
""" Update C-Form No on invoices"""
self.set_total_invoiced_amount()
def on_submit(self):
self.set_cform_in_sales_invoices()
def before_cancel(self):
# remove cform reference
frappe.db.sql("""update `tabSales Invoice` set c_form_no=null where c_form_no=%s""", self.name)
def set_cform_in_sales_invoices(self):
inv = [d.invoice_no for d in self.get('invoices')]
if inv:
frappe.db.sql("""update `tabSales Invoice` set c_form_no=%s, modified=%s where name in (%s)""" %
('%s', '%s', ', '.join(['%s'] * len(inv))), tuple([self.name, self.modified] + inv))
frappe.db.sql("""update `tabSales Invoice` set c_form_no = null, modified = %s
where name not in (%s) and ifnull(c_form_no, '') = %s""" %
('%s', ', '.join(['%s']*len(inv)), '%s'), tuple([self.modified] + inv + [self.name]))
else:
frappe.throw(_("Please enter atleast 1 invoice in the table"))
def set_total_invoiced_amount(self):
total = sum([flt(d.grand_total) for d in self.get('invoices')])
frappe.db.set(self, 'total_invoiced_amount', total)
def get_invoice_details(self, invoice_no):
""" Pull details from invoices for referrence """
if invoice_no:
inv = frappe.db.get_value("Sales Invoice", invoice_no,
["posting_date", "territory", "base_net_total", "base_grand_total"], as_dict=True)
return {
'invoice_date' : inv.posting_date,
'territory' : inv.territory,
'net_total' : inv.base_net_total,
'grand_total' : inv.base_grand_total
}
|
gsnbng/erpnext
|
erpnext/accounts/doctype/c_form/c_form.py
|
Python
|
agpl-3.0
| 2,722
|
#!/usr/bin/env python
from PyQt5 import QtCore, QtWidgets
from peacock import PeacockApp
from peacock.utils import Testing
import os
class TestPostprocessorState(Testing.PeacockImageTestCase):
"""
Test for ExodusViewer state when executable is re-run.
"""
qapp = QtWidgets.QApplication([])
def setUp(self):
"""
Creates the peacock application.
"""
args = ["-size", "1024", "768", "-i", "../../common/time_data.i", "-e", Testing.find_moose_test_exe(), "-w", os.getcwd()]
self._app = PeacockApp.PeacockApp(args, self.qapp)
self._window = self._app.main_widget.tab_plugin.VectorPostprocessorViewer.currentWidget().FigurePlugin
Testing.set_window_size(self._window)
Testing.remove_file('peacock_run_exe_tmp_out.e')
def selectTab(self, tab):
"""
Helper function for toggling tabs.
"""
self._app.main_widget.tab_plugin.setCurrentWidget(tab)
self._app.main_widget.tab_plugin.currentChanged.emit(self._app.main_widget.tab_plugin.currentIndex())
Testing.process_events(t=1)
def execute(self):
"""
Helper for running executable.
"""
execute = self._app.main_widget.tab_plugin.ExecuteTabPlugin
execute.ExecuteRunnerPlugin.runClicked()
execute.ExecuteRunnerPlugin.runner.process.waitForFinished()
Testing.process_events(t=1)
def testState(self):
"""
Tests that re-executing doesn't change the state of the exodus viewer.
"""
# The tabs to switch between
vpp = self._app.main_widget.tab_plugin.VectorPostprocessorViewer
execute = self._app.main_widget.tab_plugin.ExecuteTabPlugin
execute.ExecuteOptionsPlugin.csv_checkbox.setCheckState(QtCore.Qt.Unchecked)
# Run and check that basic results show up
self.execute()
# Plot something on VPP tab
self.selectTab(vpp)
toggle = vpp.currentWidget().PostprocessorSelectPlugin._groups[0]._toggles['u']
toggle.CheckBox.setCheckState(QtCore.Qt.Checked)
toggle.CheckBox.clicked.emit(True)
self.assertImage("testDefault.png")
# Re-run and check results again
self.selectTab(execute)
self.execute()
self.selectTab(vpp)
self.assertImage("testDefault.png")
def testTabChange(self):
"""
Tests that changing tabs disables data update
"""
# The tabs to switch between
vpp = self._app.main_widget.tab_plugin.VectorPostprocessorViewer
execute = self._app.main_widget.tab_plugin.ExecuteTabPlugin
execute.ExecuteOptionsPlugin.csv_checkbox.setCheckState(QtCore.Qt.Unchecked)
self.execute()
# Execute tab active, VPP timer off
def get_active():
return any([group._data._timer.isActive() for group in vpp.currentWidget().PostprocessorSelectPlugin._groups])
self.assertFalse(get_active())
# VPP tab active, VPP timer on
self.selectTab(vpp)
self.assertTrue(get_active())
if __name__ == '__main__':
Testing.run_tests()
|
Chuban/moose
|
python/peacock/tests/peacock_app/check_postprocessor_state/test_postprocessor_state.py
|
Python
|
lgpl-2.1
| 3,134
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Yoda(AutotoolsPackage):
"""YODA - Yet more Objects for Data Analysis"""
homepage = "https://yoda.hepforge.org/"
url = "https://yoda.hepforge.org/downloads/?f=YODA-1.8.3.tar.bz2"
tags = ['hep']
version('1.9.0', sha256='9a55de12ffebbe41d1704459c5c9289eeaf0f0eb6a4d0104ea222d7ab889fdf4')
version('1.8.5', sha256='4c2e6b8571fc176271515a309b45687a2981af1b07ff3f00d0b035a597aa32fd')
version('1.8.4', sha256='9d24a41c9b7cc6eb14cab0a48f65d2fca7ec9d794afe0922ceb158d0153c150e')
version('1.8.3', sha256='d9dd0ea5e0f630cdf4893c09a40c78bd44455777c2125385ecc26fa9a2acba8a')
version('1.8.2', sha256='89558c11cf9b88b0899713e5b4bf8781fdcecc480ff155985ebbf148c6d80bdb')
version('1.8.1', sha256='51472e12065b9469f13906f0dc609e036d0c1dbd2a8e445e7d654aba73660112')
version('1.8.0', sha256='82c62bbaedb4b6b7d50cd42ce5409d453d46c1cc6724047db5efa74d34dd6dc5')
version('1.7.7', sha256='cfb64b099a79ec4d138792f0b464a8fbb04c4345143f77bbdca07acb744628ce')
version('1.7.6', sha256='864a1459c82676c991fcaed931263a415e815e3c9dc2cad2f94bda6fa4d112e5')
version('1.7.5', sha256='7b1dc7bb380d0fbadce12072f5cc21912c115e826182a3922d864e7edea131db')
version('1.7.4', sha256='3df316b89e9c0052104f8956e4f7d26c0b0b05cdace7d908be35c383361e3a71')
version('1.7.3', sha256='ebf6094733823e9cc2d1586aff06db2d8999c74a47e666baf305322f62c48058')
version('1.7.2', sha256='7f093cf947824ec118767c7c1999a50ea9343c173cf8c5062e3800ba54c2943e')
version('1.7.1', sha256='edd7971ecd272314309c800395200b07cf68547cbac3378a02d0b8c9ac03027b')
version('1.7.0', sha256='b3d6bfb0c52ed87cd240cee5e93e09102832d9ef32505d7275f4d3191a35ce3b')
version('1.6.7', sha256='2abf378573832c201bc6a9fecfff5b2006fc98c7a272540326cda8eb5bd95e16')
version('1.6.6', sha256='cf172a496d9108b93420530ea91055d07ecd514d2894d78db46b806530e91d21')
version('1.6.5', sha256='1477fe754cfe2e4e06aa363a773accf18aab960a8b899968b77834368cac14c5')
version('1.6.4', sha256='4c01f43c18b7b2e71f61dea0bb8c6fdc099c8e1a66256c510652884c4ffffbca')
version('1.6.3', sha256='1dd7e334fe54a05ff911d9e227d395abc5efd29e29d60187a036b2201f97da19')
version('1.6.2', sha256='5793cd1320694118423888801ca520f2719565fde04699ee69e1751f47cb57a8')
version('1.6.1', sha256='ec3f4cc4eb57f94fb431cc37db10eb831f025df95ffd9e516b8009199253c62b')
version('1.6.0', sha256='2920ef2588268484b650dc08438664a3539b79c65a9e80d58e3771bb699e2a6b')
version('1.5.9', sha256='1a19cc8c34c08f1797a93d355250e682eb85d62d4ab277b6714d7873b4bdde75')
version('1.5.8', sha256='011c5be5cc565f8baf02e7ebbe57a57b4d70dc6a528d5b0102700020bbf5a973')
version('1.5.7', sha256='f775df11b034154b8f5d43f12007692c3314672e60d3e554b3928fe5b0f00c29')
version('1.5.6', sha256='050e17b1b80658213281a2e4112dfecc0096f01f269cd739d601b2fd0e790a0c')
version('1.5.5', sha256='ce45df6248c6c50633953048240513dc52ca5c9144ef69ea72ada2df23bc4918')
version('1.5.4', sha256='c41853a1f3aa0794875ae09c1ba4348942eb890e798ac7cee6e3505a9b68b678')
version('1.5.3', sha256='1220ac0ae204c3ed6b22a6a35c30d9b5c1ded35a1054cff131861b4a919d4904')
version('1.5.2', sha256='ec113c53a6174b174aaea8f45802cc419184ce056123b93ab8d3f80fc1bd4986')
version('1.5.1', sha256='a8b088b3ede67d560e40f91f4f99be313f21841c46ce2f657af7692a7bbe3276')
version('1.5.0', sha256='2c2b77344854fac937a8ef07c0928c50829ff4c69bcad6e0afb92da611b7dd18')
version('1.4.0', sha256='e76a129f7c2b72b53525fe0b712606eeeab0dc145daa070ebf0728f0384eaf48')
version('1.3.1', sha256='274e196d009e3aac6dd1f2db876de9613ca1a3c21ec3364bc3662f5493bc9747')
version('1.3.0', sha256='d63197d5940b481ecb06cf4703d9c0b49388f32cad61ccae580d1b80312bd215')
version('1.2.1', sha256='e86964e91e4fbbba443d2848f55c028001de4713dcc64c40339389de053e7d8b')
version('1.2.0', sha256='143fa86cd7965d26d3897a5752307bfe08f4866c2f9a9f226a393127d19ee353')
version('1.1.0', sha256='5d2e8f3c1cddfb59fe651931c7c605fe0ed067864fa86047aed312c6a7938e01')
version('1.0.7', sha256='145c27d922c27a4e1d6d50030f4ddece5f03d6c309a5e392a5fcbb5e83e747ab')
version('1.0.6', sha256='357732448d67a593e5ff004418f2a2a263a1401ffe84e021f8a714aa183eaa21')
version('1.0.5', sha256='ba72bc3943a1b39fa63900570948199cf5ed5c7523f2c4af4740e51b098f1794')
version('1.0.4', sha256='697fe397c69689feecb2a731e19b2ff85e19343b8198c4f18a7064c4f7123950')
version('1.0.3', sha256='6a1d1d75d9d74da457726ea9463c1b0b6ba38d4b43ef54e1c33f885e70fdae4b')
variant("root", default=False, description="Enable ROOT interface")
depends_on('python', type=('build', 'run'))
depends_on('py-future', type=('build', 'run'))
depends_on('zlib')
depends_on('boost', when='@:1.6.0', type=('build', 'run'))
depends_on('py-cython@0.18:', type='build', when='@:1.4.0')
depends_on('py-cython@0.20:', type='build', when='@1.4.0:1.6.5')
depends_on('py-cython@0.23.5:', type='build', when='@1.6.5:1.8.0')
depends_on('py-cython@0.24:', type='build', when='@1.8.0:')
depends_on('py-matplotlib', when='@1.3.0:', type=('build', 'run'))
depends_on('root', type=('build', 'link', 'run'), when='+root')
patch('yoda-1.5.5.patch', level=0, when='@1.5.5')
patch('yoda-1.5.9.patch', level=0, when='@1.5.9')
patch('yoda-1.6.1.patch', level=0, when='@1.6.1')
patch('yoda-1.6.2.patch', level=0, when='@1.6.2')
patch('yoda-1.6.3.patch', level=0, when='@1.6.3')
patch('yoda-1.6.4.patch', level=0, when='@1.6.4')
patch('yoda-1.6.5.patch', level=0, when='@1.6.5')
patch('yoda-1.6.6.patch', level=0, when='@1.6.6')
patch('yoda-1.6.7.patch', level=0, when='@1.6.7')
conflicts("%gcc@10:", when="@:1.8.5",
msg="yoda up to 1.8.5 is missing a <limits> include in AnalysisObject.h."
"Use version 1.9.0 or later, or patch earlier versions if needed.")
def configure_args(self):
args = []
if self.spec.satisfies('@:1.6.0'):
args.append('--with-boost=' + self.spec['boost'].prefix)
args.extend(self.enable_or_disable('root'))
return args
|
LLNL/spack
|
var/spack/repos/builtin/packages/yoda/package.py
|
Python
|
lgpl-2.1
| 6,268
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Xsm(AutotoolsPackage, XorgPackage):
"""X Session Manager."""
homepage = "https://cgit.freedesktop.org/xorg/app/xsm"
xorg_mirror_path = "app/xsm-1.0.3.tar.gz"
version('1.0.3', sha256='f70815139d62416dbec5915ec37db66f325932a69f6350bb1a74c0940cdc796a')
depends_on('libx11')
depends_on('libxt@1.1.0:')
depends_on('libice')
depends_on('libsm')
depends_on('libxaw')
depends_on('pkgconfig', type='build')
depends_on('util-macros', type='build')
|
LLNL/spack
|
var/spack/repos/builtin/packages/xsm/package.py
|
Python
|
lgpl-2.1
| 712
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class FailingBuild(Package):
"""This package has a trivial install method that fails."""
homepage = "http://www.example.com/trivial_install"
url = "http://www.unit-test-should-replace-this-url/trivial_install-1.0.tar.gz"
version('1.0', '0123456789abcdef0123456789abcdef')
def install(self, spec, prefix):
raise InstallError("Expected failure.")
|
LLNL/spack
|
var/spack/repos/builtin.mock/packages/failing-build/package.py
|
Python
|
lgpl-2.1
| 601
|
# Copyright (c) 2010-2013 by Yaco Sistemas <goinnn@gmail.com> or <pmartin@yaco.es>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this programe. If not, see <http://www.gnu.org/licenses/>.
try:
from django.conf.urls import include, patterns, url
except ImportError: # Django < 1.4
from django.conf.urls.defaults import include, patterns, url
urlpatterns = patterns('testing.unusual_fields.views',
url(r'^$', 'unusual_index', name='unusual_index'),
url(r'^(?P<unusual_id>\w+)/$', 'unusual_edit', name="unusual_edit"),
)
|
tlevine/django-inplaceedit
|
testing/testing/unusual_fields/urls.py
|
Python
|
lgpl-3.0
| 1,091
|
"""
REQUIREMENTS:
- install pip with distribute (http://packages.python.org/distribute/)
- sudo pip install Fabric
"""
from fabric.api import local
def lang(mode="extract"):
"""
REQUIREMENTS:
- Install before pip with distribute_setup.py (Read the environment setup document)
- sudo pip install babel
- sudo pip install jinja2
HOW TO RUN:
option 1) fab lang
option 2) fab lang:compile
"""
if mode == "compile":
local("pybabel compile -f -d ./locale")
else:
local("pybabel extract -F ./locale/babel.cfg -o ./locale/messages.pot ./ --sort-output --no-location --omit-header")
local("pybabel update -l cs_CZ -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete")
local("pybabel update -l de_DE -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete")
local("pybabel update -l en_US -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete")
local("pybabel update -l es_ES -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete")
local("pybabel update -l fr_FR -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete")
local("pybabel update -l id_ID -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete")
local("pybabel update -l it_IT -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete")
local("pybabel update -l nl_NL -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete")
local("pybabel update -l pt_BR -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete")
local("pybabel update -l ru_RU -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete")
local("pybabel update -l vi_VN -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete")
local("pybabel update -l zh_CN -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete")
def start(mode="normal"):
"""
HOW TO RUN:
option 1) fab start
option 2) fab start:clear
"""
if mode == "clear":
local("dev_appserver.py ./ --host 0.0.0.0 --port 8002 --clear_datastore=yes")
else:
local("dev_appserver.py ./ --host 0.0.0.0 --port 8002")
def deploy():
"""
app.yaml never has to be version:default
"""
local("appcfg.py --oauth2 update .")
def test(os="mac"):
"""
REQUIREMENTS:
- install pip with distribute (http://packages.python.org/distribute/)
- sudo pip install mock
- sudo pip install webtest
- sudo pip install pyquery
HOW TO RUN:
option 1) fab test
option 2) fab test:mac
option 3) fab test:linux
"""
path = {
"mac": "/usr/local/google_appengine",
}[os]
local("python testrunner.py {0} ./".format(path))
|
shupelneker/gae_new_structure
|
fabfile.py
|
Python
|
lgpl-3.0
| 2,916
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Vocabulary related functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import tensorflow as tf
from tensorflow import gfile
SpecialVocab = collections.namedtuple("SpecialVocab",
["UNK", "SEQUENCE_START", "SEQUENCE_END"])
class VocabInfo(
collections.namedtuple("VocbabInfo",
["path", "vocab_size", "special_vocab"])):
"""Convenience structure for vocabulary information.
"""
@property
def total_size(self):
"""Returns size the the base vocabulary plus the size of extra vocabulary"""
return self.vocab_size + len(self.special_vocab)
def get_vocab_info(vocab_path):
"""Creates a `VocabInfo` instance that contains the vocabulary size and
the special vocabulary for the given file.
Args:
vocab_path: Path to a vocabulary file with one word per line.
Returns:
A VocabInfo tuple.
"""
with gfile.GFile(vocab_path) as file:
vocab_size = sum(1 for _ in file)
special_vocab = get_special_vocab(vocab_size)
return VocabInfo(vocab_path, vocab_size, special_vocab)
def get_special_vocab(vocabulary_size):
"""Returns the `SpecialVocab` instance for a given vocabulary size.
"""
return SpecialVocab(*range(vocabulary_size, vocabulary_size + 3))
def create_vocabulary_lookup_table(filename, default_value=None):
"""Creates a lookup table for a vocabulary file.
Args:
filename: Path to a vocabulary file containg one word per line.
Each word is mapped to its line number.
default_value: UNK tokens will be mapped to this id.
If None, UNK tokens will be mapped to [vocab_size]
Returns:
A tuple (vocab_to_id_table, id_to_vocab_table,
word_to_count_table, vocab_size). The vocab size does not include
the UNK token.
"""
if not gfile.Exists(filename):
raise ValueError("File does not exist: {}".format(filename))
# Load vocabulary into memory
with gfile.GFile(filename) as file:
vocab = list(line.strip("\n") for line in file)
vocab_size = len(vocab)
has_counts = len(vocab[0].split("\t")) == 2
if has_counts:
vocab, counts = zip(*[_.split("\t") for _ in vocab])
counts = [float(_) for _ in counts]
vocab = list(vocab)
else:
counts = [-1. for _ in vocab]
# Add special vocabulary items
special_vocab = get_special_vocab(vocab_size)
vocab += list(special_vocab._fields)
vocab_size += len(special_vocab)
counts += [-1. for _ in list(special_vocab._fields)]
if default_value is None:
default_value = special_vocab.UNK
tf.logging.info("Creating vocabulary lookup table of size %d", vocab_size)
vocab_tensor = tf.constant(vocab)
count_tensor = tf.constant(counts, dtype=tf.float32)
vocab_idx_tensor = tf.range(vocab_size, dtype=tf.int64)
# Create ID -> word mapping
id_to_vocab_init = tf.contrib.lookup.KeyValueTensorInitializer(
vocab_idx_tensor, vocab_tensor, tf.int64, tf.string)
id_to_vocab_table = tf.contrib.lookup.HashTable(id_to_vocab_init, "UNK")
# Create word -> id mapping
vocab_to_id_init = tf.contrib.lookup.KeyValueTensorInitializer(
vocab_tensor, vocab_idx_tensor, tf.string, tf.int64)
vocab_to_id_table = tf.contrib.lookup.HashTable(vocab_to_id_init,
default_value)
# Create word -> count mapping
word_to_count_init = tf.contrib.lookup.KeyValueTensorInitializer(
vocab_tensor, count_tensor, tf.string, tf.float32)
word_to_count_table = tf.contrib.lookup.HashTable(word_to_count_init, -1)
return vocab_to_id_table, id_to_vocab_table, word_to_count_table, vocab_size
|
shashankrajput/seq2seq
|
seq2seq/data/vocab.py
|
Python
|
apache-2.0
| 4,274
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from recommonmark.parser import CommonMarkParser
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ----------------------------------------------------
source_parsers = {
'.md': CommonMarkParser,
}
source_suffix = ['.rst', '.md']
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
#'sphinx.ext.intersphinx',
]
# autodoc generation is a bit aggressive and a nuisance when doing heavy
# text edit cycles.
# execute "export SPHINX_DEBUG=1" in your terminal to disable
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'kubernetes-python-client'
copyright = u'2017, Kubernetes'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# html_static_path = ['static']
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index',
'%s.tex' % project,
u'%s Documentation' % project,
u'Kubernetes', 'manual'),
]
# Example configuration for intersphinx: refer to the Python standard library.
#intersphinx_mapping = {'http://docs.python.org/': None}
|
sebgoa/client-python
|
doc/source/conf.py
|
Python
|
apache-2.0
| 2,569
|
# Copyright 2013-2016 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test Mongo Connector's behavior when its source MongoDB system is
experiencing a rollback.
"""
import os
import sys
import time
from pymongo.read_preferences import ReadPreference
from pymongo.write_concern import WriteConcern
sys.path[0:0] = [""] # noqa
from mongo_connector.doc_managers.doc_manager_simulator import DocManager
from mongo_connector.locking_dict import LockingDict
from mongo_connector.namespace_config import NamespaceConfig
from mongo_connector.oplog_manager import OplogThread
from mongo_connector.test_utils import (
assert_soon,
close_client,
ReplicaSet,
STRESS_COUNT,
)
from mongo_connector.util import retry_until_ok
from tests import unittest
class TestRollbacks(unittest.TestCase):
def tearDown(self):
close_client(self.primary_conn)
close_client(self.secondary_conn)
try:
self.opman.join()
except RuntimeError:
# OplogThread may not have been started
pass
self.repl_set.stop()
def setUp(self):
# Create a new oplog progress file
try:
os.unlink("oplog.timestamp")
except OSError:
pass
open("oplog.timestamp", "w").close()
# Start a replica set
self.repl_set = ReplicaSet().start()
# Connection to the replica set as a whole
self.main_conn = self.repl_set.client()
# Connection to the primary specifically
self.primary_conn = self.repl_set.primary.client()
# Connection to the secondary specifically
self.secondary_conn = self.repl_set.secondary.client(
read_preference=ReadPreference.SECONDARY_PREFERRED
)
# Wipe any test data
self.main_conn.drop_database("test")
# Oplog thread
doc_manager = DocManager()
oplog_progress = LockingDict()
self.opman = OplogThread(
primary_client=self.main_conn,
doc_managers=(doc_manager,),
oplog_progress_dict=oplog_progress,
namespace_config=NamespaceConfig(namespace_set=["test.mc"]),
)
def test_single_target(self):
"""Test with a single replication target"""
self.opman.start()
# Insert first document with primary up
self.main_conn["test"]["mc"].insert_one({"i": 0})
self.assertEqual(self.primary_conn["test"]["mc"].find().count(), 1)
# Make sure the insert is replicated
secondary = self.secondary_conn
assert_soon(
lambda: secondary["test"]["mc"].count() == 1,
"first write didn't replicate to secondary",
)
# Kill the primary
self.repl_set.primary.stop(destroy=False)
# Wait for the secondary to be promoted
assert_soon(lambda: secondary["admin"].command("isMaster")["ismaster"])
# Insert another document. This will be rolled back later
retry_until_ok(self.main_conn["test"]["mc"].insert_one, {"i": 1})
self.assertEqual(secondary["test"]["mc"].count(), 2)
# Wait for replication to doc manager
assert_soon(
lambda: len(self.opman.doc_managers[0]._search()) == 2,
"not all writes were replicated to doc manager",
)
# Kill the new primary
self.repl_set.secondary.stop(destroy=False)
# Start both servers back up
self.repl_set.primary.start()
primary_admin = self.primary_conn["admin"]
assert_soon(
lambda: primary_admin.command("isMaster")["ismaster"],
"restarted primary never resumed primary status",
)
self.repl_set.secondary.start()
assert_soon(
lambda: retry_until_ok(secondary.admin.command, "replSetGetStatus")[
"myState"
]
== 2,
"restarted secondary never resumed secondary status",
)
assert_soon(
lambda: retry_until_ok(self.main_conn.test.mc.find().count) > 0,
"documents not found after primary/secondary restarted",
)
# Only first document should exist in MongoDB
self.assertEqual(self.main_conn["test"]["mc"].count(), 1)
self.assertEqual(self.main_conn["test"]["mc"].find_one()["i"], 0)
# Same case should hold for the doc manager
doc_manager = self.opman.doc_managers[0]
assert_soon(
lambda: len(doc_manager._search()) == 1,
"documents never rolled back in doc manager.",
)
self.assertEqual(doc_manager._search()[0]["i"], 0)
# cleanup
self.opman.join()
def test_many_targets(self):
"""Test with several replication targets"""
# OplogThread has multiple doc managers
doc_managers = [DocManager(), DocManager(), DocManager()]
self.opman.doc_managers = doc_managers
self.opman.start()
# Insert a document into each namespace
self.main_conn["test"]["mc"].insert_one({"i": 0})
self.assertEqual(self.primary_conn["test"]["mc"].count(), 1)
# Make sure the insert is replicated
secondary = self.secondary_conn
assert_soon(
lambda: secondary["test"]["mc"].count() == 1,
"first write didn't replicate to secondary",
)
# Kill the primary
self.repl_set.primary.stop(destroy=False)
# Wait for the secondary to be promoted
assert_soon(
lambda: secondary.admin.command("isMaster")["ismaster"],
"secondary was never promoted",
)
# Insert more documents. This will be rolled back later
# Some of these documents will be manually removed from
# certain doc managers, to emulate the effect of certain
# target systems being ahead/behind others
secondary_ids = []
for i in range(1, 10):
secondary_ids.append(
retry_until_ok(
self.main_conn["test"]["mc"].insert_one, {"i": i}
).inserted_id
)
self.assertEqual(self.secondary_conn["test"]["mc"].count(), 10)
# Wait for replication to the doc managers
def docmans_done():
for dm in self.opman.doc_managers:
if len(dm._search()) != 10:
return False
return True
assert_soon(docmans_done, "not all writes were replicated to doc managers")
# Remove some documents from the doc managers to simulate
# uneven replication
ts = self.opman.doc_managers[0].get_last_doc()["_ts"]
for id in secondary_ids[8:]:
self.opman.doc_managers[1].remove(id, "test.mc", ts)
for id in secondary_ids[2:]:
self.opman.doc_managers[2].remove(id, "test.mc", ts)
# Kill the new primary
self.repl_set.secondary.stop(destroy=False)
# Start both servers back up
self.repl_set.primary.start()
primary_admin = self.primary_conn["admin"]
assert_soon(
lambda: primary_admin.command("isMaster")["ismaster"],
"restarted primary never resumed primary status",
)
self.repl_set.secondary.start()
assert_soon(
lambda: retry_until_ok(secondary.admin.command, "replSetGetStatus")[
"myState"
]
== 2,
"restarted secondary never resumed secondary status",
)
assert_soon(
lambda: retry_until_ok(self.primary_conn.test.mc.find().count) > 0,
"documents not found after primary/secondary restarted",
)
# Only first document should exist in MongoDB
self.assertEqual(self.primary_conn["test"]["mc"].count(), 1)
self.assertEqual(self.primary_conn["test"]["mc"].find_one()["i"], 0)
# Give OplogThread some time to catch up
time.sleep(10)
# Same case should hold for the doc managers
for dm in self.opman.doc_managers:
self.assertEqual(len(dm._search()), 1)
self.assertEqual(dm._search()[0]["i"], 0)
self.opman.join()
def test_deletions(self):
"""Test rolling back 'd' operations"""
self.opman.start()
# Insert a document, wait till it replicates to secondary
self.main_conn["test"]["mc"].insert_one({"i": 0})
self.main_conn["test"]["mc"].insert_one({"i": 1})
self.assertEqual(self.primary_conn["test"]["mc"].find().count(), 2)
assert_soon(
lambda: self.secondary_conn["test"]["mc"].count() == 2,
"first write didn't replicate to secondary",
)
# Kill the primary, wait for secondary to be promoted
self.repl_set.primary.stop(destroy=False)
assert_soon(
lambda: self.secondary_conn["admin"].command("isMaster")["ismaster"]
)
# Delete first document
retry_until_ok(self.main_conn["test"]["mc"].delete_one, {"i": 0})
self.assertEqual(self.secondary_conn["test"]["mc"].count(), 1)
# Wait for replication to doc manager
assert_soon(
lambda: len(self.opman.doc_managers[0]._search()) == 1,
"delete was not replicated to doc manager",
)
# Kill the new primary
self.repl_set.secondary.stop(destroy=False)
# Start both servers back up
self.repl_set.primary.start()
primary_admin = self.primary_conn["admin"]
assert_soon(
lambda: primary_admin.command("isMaster")["ismaster"],
"restarted primary never resumed primary status",
)
self.repl_set.secondary.start()
assert_soon(
lambda: retry_until_ok(
self.secondary_conn.admin.command, "replSetGetStatus"
)["myState"]
== 2,
"restarted secondary never resumed secondary status",
)
# Both documents should exist in mongo
assert_soon(lambda: retry_until_ok(self.main_conn["test"]["mc"].count) == 2)
# Both document should exist in doc manager
doc_manager = self.opman.doc_managers[0]
assert_soon(
lambda: len(list(doc_manager._search())) == 2,
("Expected two documents, but got: %r" % list(doc_manager._search())),
)
self.opman.join()
def test_stressed_rollback(self):
"""Stress test for a rollback with many documents."""
self.opman.start()
c = self.main_conn.test.mc
docman = self.opman.doc_managers[0]
c2 = c.with_options(write_concern=WriteConcern(w=2))
c2.insert_many([{"i": i} for i in range(STRESS_COUNT)])
assert_soon(lambda: c2.count() == STRESS_COUNT)
def condition():
return len(docman._search()) == STRESS_COUNT
assert_soon(
condition,
(
"Was expecting %d documents in DocManager, "
"but %d found instead." % (STRESS_COUNT, len(docman._search()))
),
)
primary_conn = self.repl_set.primary.client()
self.repl_set.primary.stop(destroy=False)
new_primary_conn = self.repl_set.secondary.client()
admin = new_primary_conn.admin
assert_soon(lambda: retry_until_ok(admin.command, "isMaster")["ismaster"])
retry_until_ok(
c.insert_many, [{"i": str(STRESS_COUNT + i)} for i in range(STRESS_COUNT)]
)
self.repl_set.secondary.stop(destroy=False)
self.repl_set.primary.start()
admin = primary_conn.admin
assert_soon(lambda: retry_until_ok(admin.command, "isMaster")["ismaster"])
self.repl_set.secondary.start()
assert_soon(lambda: retry_until_ok(c.count) == STRESS_COUNT)
assert_soon(
condition,
(
"Was expecting %d documents in DocManager, "
"but %d found instead." % (STRESS_COUNT, len(docman._search()))
),
)
self.opman.join()
if __name__ == "__main__":
unittest.main()
|
ShaneHarvey/mongo-connector
|
tests/test_rollbacks.py
|
Python
|
apache-2.0
| 12,660
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from oslo_serialization import jsonutils
import webob
from nova.api.openstack.compute import image_metadata as image_metadata_v21
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import image_fixtures
IMAGE_FIXTURES = image_fixtures.get_image_fixtures()
CHK_QUOTA_STR = 'nova.api.openstack.common.check_img_metadata_properties_quota'
def get_image_123():
return copy.deepcopy(IMAGE_FIXTURES)[0]
class ImageMetaDataTestV21(test.NoDBTestCase):
controller_class = image_metadata_v21.ImageMetadataController
invalid_request = exception.ValidationError
def setUp(self):
super(ImageMetaDataTestV21, self).setUp()
self.controller = self.controller_class()
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_index(self, get_all_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata')
res_dict = self.controller.index(req, '123')
expected = {'metadata': {'key1': 'value1'}}
self.assertEqual(res_dict, expected)
get_all_mocked.assert_called_once_with(mock.ANY, '123')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_show(self, get_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
res_dict = self.controller.show(req, '123', 'key1')
self.assertIn('meta', res_dict)
self.assertEqual(len(res_dict['meta']), 1)
self.assertEqual('value1', res_dict['meta']['key1'])
get_mocked.assert_called_once_with(mock.ANY, '123')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_show_not_found(self, _get_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key9')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, '123', 'key9')
@mock.patch('nova.image.api.API.get',
side_effect=exception.ImageNotFound(image_id='100'))
def test_show_image_not_found(self, _get_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/100/metadata/key1')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, '100', 'key9')
@mock.patch(CHK_QUOTA_STR)
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_create(self, get_mocked, update_mocked, quota_mocked):
mock_result = copy.deepcopy(get_image_123())
mock_result['properties']['key7'] = 'value7'
update_mocked.return_value = mock_result
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata')
req.method = 'POST'
body = {"metadata": {"key7": "value7"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
res = self.controller.create(req, '123', body=body)
get_mocked.assert_called_once_with(mock.ANY, '123')
expected = copy.deepcopy(get_image_123())
expected['properties'] = {
'key1': 'value1', # existing meta
'key7': 'value7' # new meta
}
quota_mocked.assert_called_once_with(mock.ANY, expected["properties"])
update_mocked.assert_called_once_with(mock.ANY, '123', expected,
data=None, purge_props=True)
expected_output = {'metadata': {'key1': 'value1', 'key7': 'value7'}}
self.assertEqual(expected_output, res)
@mock.patch(CHK_QUOTA_STR)
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get',
side_effect=exception.ImageNotFound(image_id='100'))
def test_create_image_not_found(self, _get_mocked, update_mocked,
quota_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/100/metadata')
req.method = 'POST'
body = {"metadata": {"key7": "value7"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.create, req, '100', body=body)
self.assertFalse(quota_mocked.called)
self.assertFalse(update_mocked.called)
@mock.patch(CHK_QUOTA_STR)
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_update_all(self, get_mocked, update_mocked, quota_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata')
req.method = 'PUT'
body = {"metadata": {"key9": "value9"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
res = self.controller.update_all(req, '123', body=body)
get_mocked.assert_called_once_with(mock.ANY, '123')
expected = copy.deepcopy(get_image_123())
expected['properties'] = {
'key9': 'value9' # replace meta
}
quota_mocked.assert_called_once_with(mock.ANY, expected["properties"])
update_mocked.assert_called_once_with(mock.ANY, '123', expected,
data=None, purge_props=True)
expected_output = {'metadata': {'key9': 'value9'}}
self.assertEqual(expected_output, res)
@mock.patch(CHK_QUOTA_STR)
@mock.patch('nova.image.api.API.get',
side_effect=exception.ImageNotFound(image_id='100'))
def test_update_all_image_not_found(self, _get_mocked, quota_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/100/metadata')
req.method = 'PUT'
body = {"metadata": {"key9": "value9"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update_all, req, '100', body=body)
self.assertFalse(quota_mocked.called)
@mock.patch(CHK_QUOTA_STR)
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_update_item(self, _get_mocked, update_mocked, quota_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
req.method = 'PUT'
body = {"meta": {"key1": "zz"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
res = self.controller.update(req, '123', 'key1', body=body)
expected = copy.deepcopy(get_image_123())
expected['properties'] = {
'key1': 'zz' # changed meta
}
quota_mocked.assert_called_once_with(mock.ANY, expected["properties"])
update_mocked.assert_called_once_with(mock.ANY, '123', expected,
data=None, purge_props=True)
expected_output = {'meta': {'key1': 'zz'}}
self.assertEqual(res, expected_output)
@mock.patch(CHK_QUOTA_STR)
@mock.patch('nova.image.api.API.get',
side_effect=exception.ImageNotFound(image_id='100'))
def test_update_item_image_not_found(self, _get_mocked, quota_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/100/metadata/key1')
req.method = 'PUT'
body = {"meta": {"key1": "zz"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update, req, '100', 'key1',
body=body)
self.assertFalse(quota_mocked.called)
@mock.patch(CHK_QUOTA_STR)
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get')
def test_update_item_bad_body(self, get_mocked, update_mocked,
quota_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
req.method = 'PUT'
body = {"key1": "zz"}
req.body = b''
req.headers["content-type"] = "application/json"
self.assertRaises(self.invalid_request,
self.controller.update, req, '123', 'key1',
body=body)
self.assertFalse(get_mocked.called)
self.assertFalse(quota_mocked.called)
self.assertFalse(update_mocked.called)
@mock.patch(CHK_QUOTA_STR,
side_effect=webob.exc.HTTPBadRequest())
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get')
def test_update_item_too_many_keys(self, get_mocked, update_mocked,
_quota_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
req.method = 'PUT'
body = {"meta": {"foo": "bar"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, '123', 'key1',
body=body)
self.assertFalse(get_mocked.called)
self.assertFalse(update_mocked.called)
@mock.patch(CHK_QUOTA_STR)
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_update_item_body_uri_mismatch(self, _get_mocked, update_mocked,
quota_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/bad')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, '123', 'bad',
body=body)
self.assertFalse(quota_mocked.called)
self.assertFalse(update_mocked.called)
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_delete(self, _get_mocked, update_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
req.method = 'DELETE'
res = self.controller.delete(req, '123', 'key1')
expected = copy.deepcopy(get_image_123())
expected['properties'] = {}
update_mocked.assert_called_once_with(mock.ANY, '123', expected,
data=None, purge_props=True)
self.assertIsNone(res)
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_delete_not_found(self, _get_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/blah')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, req, '123', 'blah')
@mock.patch('nova.image.api.API.get',
side_effect=exception.ImageNotFound(image_id='100'))
def test_delete_image_not_found(self, _get_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/100/metadata/key1')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, req, '100', 'key1')
@mock.patch(CHK_QUOTA_STR,
side_effect=webob.exc.HTTPForbidden(explanation=''))
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_too_many_metadata_items_on_create(self, _get_mocked,
update_mocked, _quota_mocked):
body = {"metadata": {"foo": "bar"}}
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata')
req.method = 'POST'
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.create, req, '123', body=body)
self.assertFalse(update_mocked.called)
@mock.patch(CHK_QUOTA_STR,
side_effect=webob.exc.HTTPForbidden(explanation=''))
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_too_many_metadata_items_on_put(self, _get_mocked,
update_mocked, _quota_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/blah')
req.method = 'PUT'
body = {"meta": {"blah": "blah", "blah1": "blah1"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(self.invalid_request,
self.controller.update, req, '123', 'blah',
body=body)
self.assertFalse(update_mocked.called)
@mock.patch('nova.image.api.API.get',
side_effect=exception.ImageNotAuthorized(image_id='123'))
def test_image_not_authorized_update(self, _get_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.update, req, '123', 'key1',
body=body)
@mock.patch('nova.image.api.API.get',
side_effect=exception.ImageNotAuthorized(image_id='123'))
def test_image_not_authorized_update_all(self, _get_mocked):
image_id = 131
# see nova.tests.unit.api.openstack.fakes:_make_image_fixtures
req = fakes.HTTPRequest.blank('/v2/fake/images/%s/metadata/key1'
% image_id)
req.method = 'PUT'
body = {"metadata": {"key1": "value1"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.update_all, req, image_id,
body=body)
@mock.patch('nova.image.api.API.get',
side_effect=exception.ImageNotAuthorized(image_id='123'))
def test_image_not_authorized_create(self, _get_mocked):
image_id = 131
# see nova.tests.unit.api.openstack.fakes:_make_image_fixtures
req = fakes.HTTPRequest.blank('/v2/fake/images/%s/metadata/key1'
% image_id)
req.method = 'POST'
body = {"metadata": {"key1": "value1"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.create, req, image_id,
body=body)
|
bigswitch/nova
|
nova/tests/unit/api/openstack/compute/test_image_metadata.py
|
Python
|
apache-2.0
| 15,727
|
from __future__ import print_function
import numpy as np
from openmdao.api import IndepVarComp, Component, Group, Problem
class AngularVelocity321(Component):
"""
Notes
------
Evaluates the body frame angular velocity from 321 Euler angles and their derivatives
Units are in radians and radians/s
Params
------
Yaw : float
Yaw angle (3-axis rotation) of body frame with respect to the inertial NED frame. Default value is 0.0 rad
Pitch : float
Pitch angle (2-axis rotation) of body fram with respect to the inertial NED frame. Default value is 0.0 rad
Roll : float
Roll angle (1-axis rotation) of body fram with respect to the inertial NED frame. Default value is 0.0 rad
Yaw rate : float
Yaw rate of pod body frame. Default value is .01 rad/s
Pitch rate : float
Pitch rate of pod body frame. Default value is .01 rad/s
Roll rate : float
Roll rate of pod body frame. Default value is 0.0 rad/s
Returns
-------
Angular velocity : float
Returns the body fame angular velocity of the pod in rad/s
"""
def __init__(self):
super(AngularVelocity321, self).__init__()
self.add_param('psi', val = 0.0, units = 'rad', desc = 'Pod yaw angle')
self.add_param('theta', val = 0.0, units = 'rad', desc = 'Pod pitch angle')
self.add_param('phi', val = 0.0, units = 'rad', desc = 'Pod roll angle')
self.add_param('psi_dot', val = 0.0, units = 'rad', desc = 'Pod yaw rate')
self.add_param('theta_dot', val = 0.0, units = 'rad', desc = 'Pod pitch rate')
self.add_param('phi_dot', val = 0.0, units = 'rad', desc = 'Pod roll rate')
self.add_output('omega_b', val = np.matrix('0.0; 0.0; 0.0'), units = 'rad/s', desc = 'Angular velocity vector')
def solve_nonlinear(self, p, u, r):
"""
Notes
------
omega = [[s(psi)*s(theta), c(psi), 0], [c(psi)*s(theta), -s(psi), 0], [c(theta), 0,1]] * [[phi], [theta], [psi]]
Params
------
Yaw : float
Yaw angle (3-axis rotation) of body frame with respect to the inertial NED frame. Default value is 0.0 rad
Pitch : float
Pitch angle (2-axis rotation) of body fram with respect to the inertial NED frame. Default value is 0.0 rad
Roll : float
Roll angle (1-axis rotation) of body fram with respect to the inertial NED frame. Default value is 0.0 rad
Yaw rate : float
Yaw rate of pod body frame. Default value is .01 rad/s
Pitch rate : float
Pitch rate of pod body frame. Default value is .01 rad/s
Roll rate : float
Roll rate of pod body frame. Default value is 0.0 rad/s
Returns
-------
Angular velocity : float
Returns the body fame angular velocity of the pod in rad/s
"""
psi = p['psi']
theta = p['theta']
phi = p['phi']
psi_dot = p['psi_dot']
theta_dot = p['theta_dot']
phi_dot = p['phi_dot']
B = np.matrix([[-np.sin(theta), 0.0, 1.0], [np.sin(phi)*np.cos(theta), np.cos(phi), 0.0], [np.cos(phi)*np.cos(theta), -np.sin(phi), 0]])
u['omega_b'] = B * np.matrix([[phi_dot], [theta_dot], [psi_dot]])
if __name__ == '__main__':
top = Problem()
root = top.root = Group()
params = (
('psi', 0.0, {'units' : 'rad'}),
('theta', 0.0, {'units' : 'rad'}),
('phi', 0.0, {'units' : 'rad'}),
('psi_dot', 0.1, {'units' : 'rad'}),
('theta_dot', 0.1, {'units' : 'rad'}),
('phi_dot', 0.0, {'units' : 'rad'})
)
root.add('input_vars', IndepVarComp(params), promotes = ['psi', 'theta', 'phi', 'psi_dot', 'theta_dot', 'psi_dot'])
root.add('p', AngularVelocity321(), promotes = ['psi', 'theta', 'phi', 'psi_dot', 'theta_dot', 'psi_dot', 'omega_b'])
top.setup()
top.run()
print('Bod frame angular velocity vector = ')
print(top['omega_b'])
|
jcchin/Hyperloop_v2
|
src/hyperloop/Python/angular_velocity321.py
|
Python
|
apache-2.0
| 3,785
|
"""Validate dependencies."""
import pathlib
from typing import Dict
import re
import voluptuous as vol
from voluptuous.humanize import humanize_error
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_validation as cv
from homeassistant.util.yaml import load_yaml
from .model import Integration
def exists(value):
"""Check if value exists."""
if value is None:
raise vol.Invalid("Value cannot be None")
return value
FIELD_SCHEMA = vol.Schema(
{
vol.Required("description"): str,
vol.Optional("example"): exists,
vol.Optional("default"): exists,
vol.Optional("values"): exists,
vol.Optional("required"): bool,
}
)
SERVICE_SCHEMA = vol.Schema(
{
vol.Required("description"): str,
vol.Optional("fields"): vol.Schema({str: FIELD_SCHEMA}),
}
)
SERVICES_SCHEMA = vol.Schema({cv.slug: SERVICE_SCHEMA})
def grep_dir(path: pathlib.Path, glob_pattern: str, search_pattern: str) -> bool:
"""Recursively go through a dir and it's children and find the regex."""
pattern = re.compile(search_pattern)
for fil in path.glob(glob_pattern):
if not fil.is_file():
continue
if pattern.search(fil.read_text()):
return True
return False
def validate_services(integration: Integration):
"""Validate services."""
# Find if integration uses services
has_services = grep_dir(
integration.path, "**/*.py", r"hass\.services\.(register|async_register)"
)
if not has_services:
return
try:
data = load_yaml(str(integration.path / "services.yaml"))
except FileNotFoundError:
integration.add_error("services", "Registers services but has no services.yaml")
return
except HomeAssistantError:
integration.add_error(
"services", "Registers services but unable to load services.yaml"
)
return
try:
SERVICES_SCHEMA(data)
except vol.Invalid as err:
integration.add_error(
"services", "Invalid services.yaml: {}".format(humanize_error(data, err))
)
def validate(integrations: Dict[str, Integration], config):
"""Handle dependencies for integrations."""
# check services.yaml is cool
for integration in integrations.values():
if not integration.manifest:
continue
validate_services(integration)
|
fbradyirl/home-assistant
|
script/hassfest/services.py
|
Python
|
apache-2.0
| 2,462
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils for make_zip tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
import operator
import os
import re
import string
import traceback
import zipfile
import numpy as np
from six import StringIO
# pylint: disable=g-import-not-at-top
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from tensorflow.lite.testing import _pywrap_string_util
from tensorflow.lite.testing import generate_examples_report as report_lib
from tensorflow.python.framework import graph_util as tf_graph_util
# A map from names to functions which make test cases.
_MAKE_TEST_FUNCTIONS_MAP = {}
# A decorator to register the make test functions.
# Usage:
# All the make_*_test should be registered. Example:
# @register_make_test_function()
# def make_conv_tests(options):
# # ...
# If a function is decorated by other decorators, it's required to specify the
# name explicitly. Example:
# @register_make_test_function(name="make_unidirectional_sequence_lstm_tests")
# @test_util.enable_control_flow_v2
# def make_unidirectional_sequence_lstm_tests(options):
# # ...
def register_make_test_function(name=None):
def decorate(function, name=name):
if name is None:
name = function.__name__
_MAKE_TEST_FUNCTIONS_MAP[name] = function
return decorate
def get_test_function(test_function_name):
"""Get the test function according to the test function name."""
if test_function_name not in _MAKE_TEST_FUNCTIONS_MAP:
return None
return _MAKE_TEST_FUNCTIONS_MAP[test_function_name]
RANDOM_SEED = 342
TF_TYPE_INFO = {
tf.float32: (np.float32, "FLOAT"),
tf.float16: (np.float16, "FLOAT"),
tf.float64: (np.double, "FLOAT64"),
tf.int32: (np.int32, "INT32"),
tf.uint8: (np.uint8, "QUANTIZED_UINT8"),
tf.int16: (np.int16, "QUANTIZED_INT16"),
tf.int64: (np.int64, "INT64"),
tf.bool: (np.bool, "BOOL"),
tf.string: (np.string_, "STRING"),
}
class ExtraTocoOptions(object):
"""Additional toco options besides input, output, shape."""
def __init__(self):
# Whether to ignore control dependency nodes.
self.drop_control_dependency = False
# Allow custom ops in the toco conversion.
self.allow_custom_ops = False
# Rnn states that are used to support rnn / lstm cells.
self.rnn_states = None
# Split the LSTM inputs from 5 inputs to 18 inputs for TFLite.
self.split_tflite_lstm_inputs = None
# The inference input type passed to TFLiteConvert.
self.inference_input_type = None
# The inference output type passed to TFLiteConvert.
self.inference_output_type = None
def create_tensor_data(dtype, shape, min_value=-100, max_value=100):
"""Build tensor data spreading the range [min_value, max_value)."""
if dtype in TF_TYPE_INFO:
dtype = TF_TYPE_INFO[dtype][0]
if dtype in (tf.float32, tf.float16, tf.float64):
value = (max_value - min_value) * np.random.random_sample(shape) + min_value
elif dtype in (tf.int32, tf.uint8, tf.int64, tf.int16):
value = np.random.randint(min_value, max_value + 1, shape)
elif dtype == tf.bool:
value = np.random.choice([True, False], size=shape)
elif dtype == np.string_:
# Not the best strings, but they will do for some basic testing.
letters = list(string.ascii_uppercase)
return np.random.choice(letters, size=shape).astype(dtype)
return np.dtype(dtype).type(value) if np.isscalar(value) else value.astype(
dtype)
def create_scalar_data(dtype, min_value=-100, max_value=100):
"""Build scalar tensor data range from min_value to max_value exclusively."""
if dtype in TF_TYPE_INFO:
dtype = TF_TYPE_INFO[dtype][0]
if dtype in (tf.float32, tf.float16, tf.float64):
value = (max_value - min_value) * np.random.random() + min_value
elif dtype in (tf.int32, tf.uint8, tf.int64, tf.int16):
value = np.random.randint(min_value, max_value + 1)
elif dtype == tf.bool:
value = np.random.choice([True, False])
elif dtype == np.string_:
l = np.random.randint(1, 6)
value = "".join(np.random.choice(list(string.ascii_uppercase), size=l))
return np.array(value, dtype=dtype)
def freeze_graph(session, outputs):
"""Freeze the current graph.
Args:
session: Tensorflow sessions containing the graph
outputs: List of output tensors
Returns:
The frozen graph_def.
"""
return tf_graph_util.convert_variables_to_constants(
session, session.graph.as_graph_def(), [x.op.name for x in outputs])
def format_result(t):
"""Convert a tensor to a format that can be used in test specs."""
if t.dtype.kind not in [np.dtype(np.string_).kind, np.dtype(np.object_).kind]:
# Output 9 digits after the point to ensure the precision is good enough.
values = ["{:.9f}".format(value) for value in list(t.flatten())]
return ",".join(values)
else:
# SerializeAsHexString returns bytes in PY3, so decode if appropriate.
return _pywrap_string_util.SerializeAsHexString(t.flatten()).decode("utf-8")
def write_examples(fp, examples):
"""Given a list `examples`, write a text format representation.
The file format is csv like with a simple repeated pattern. We would ike
to use proto here, but we can't yet due to interfacing with the Android
team using this format.
Args:
fp: File-like object to write to.
examples: Example dictionary consisting of keys "inputs" and "outputs"
"""
def write_tensor(fp, x):
"""Write tensor in file format supported by TFLITE example."""
fp.write("dtype,%s\n" % x.dtype)
fp.write("shape," + ",".join(map(str, x.shape)) + "\n")
fp.write("values," + format_result(x) + "\n")
fp.write("test_cases,%d\n" % len(examples))
for example in examples:
fp.write("inputs,%d\n" % len(example["inputs"]))
for i in example["inputs"]:
write_tensor(fp, i)
fp.write("outputs,%d\n" % len(example["outputs"]))
for i in example["outputs"]:
write_tensor(fp, i)
def write_test_cases(fp, model_name, examples):
"""Given a dictionary of `examples`, write a text format representation.
The file format is protocol-buffer-like, even though we don't use proto due
to the needs of the Android team.
Args:
fp: File-like object to write to.
model_name: Filename where the model was written to, relative to filename.
examples: Example dictionary consisting of keys "inputs" and "outputs"
"""
fp.write("load_model: %s\n" % os.path.basename(model_name))
for example in examples:
fp.write("reshape {\n")
for t in example["inputs"]:
fp.write(" input: \"" + ",".join(map(str, t.shape)) + "\"\n")
fp.write("}\n")
fp.write("invoke {\n")
for t in example["inputs"]:
fp.write(" input: \"" + format_result(t) + "\"\n")
for t in example["outputs"]:
fp.write(" output: \"" + format_result(t) + "\"\n")
fp.write(" output_shape: \"" + ",".join([str(dim) for dim in t.shape]) +
"\"\n")
fp.write("}\n")
def get_input_shapes_map(input_tensors):
"""Gets a map of input names to shapes.
Args:
input_tensors: List of input tensor tuples `(name, shape, type)`.
Returns:
{string : list of integers}.
"""
input_arrays = [tensor[0] for tensor in input_tensors]
input_shapes_list = []
for _, shape, _ in input_tensors:
dims = None
if shape:
dims = [dim.value for dim in shape.dims]
input_shapes_list.append(dims)
input_shapes = {
name: shape
for name, shape in zip(input_arrays, input_shapes_list)
if shape
}
return input_shapes
def _normalize_output_name(output_name):
"""Remove :0 suffix from tensor names."""
return output_name.split(":")[0] if output_name.endswith(
":0") else output_name
# How many test cases we may have in a zip file. Too many test cases will
# slow down the test data generation process.
_MAX_TESTS_PER_ZIP = 500
def make_zip_of_tests(options,
test_parameters,
make_graph,
make_test_inputs,
extra_toco_options=ExtraTocoOptions(),
use_frozen_graph=False,
expected_tf_failures=0):
"""Helper to make a zip file of a bunch of TensorFlow models.
This does a cartesian product of the dictionary of test_parameters and
calls make_graph() for each item in the cartesian product set.
If the graph is built successfully, then make_test_inputs() is called to
build expected input/output value pairs. The model is then converted to tflite
with toco, and the examples are serialized with the tflite model into a zip
file (2 files per item in the cartesian product set).
Args:
options: An Options instance.
test_parameters: Dictionary mapping to lists for each parameter.
e.g. `{"strides": [[1,3,3,1], [1,2,2,1]], "foo": [1.2, 1.3]}`
make_graph: function that takes current parameters and returns tuple
`[input1, input2, ...], [output1, output2, ...]`
make_test_inputs: function taking `curr_params`, `session`, `input_tensors`,
`output_tensors` and returns tuple `(input_values, output_values)`.
extra_toco_options: Additional toco options.
use_frozen_graph: Whether or not freeze graph before toco converter.
expected_tf_failures: Number of times tensorflow is expected to fail in
executing the input graphs. In some cases it is OK for TensorFlow to fail
because the one or more combination of parameters is invalid.
Raises:
RuntimeError: if there are converter errors that can't be ignored.
"""
zip_path = os.path.join(options.output_path, options.zip_to_output)
parameter_count = 0
for parameters in test_parameters:
parameter_count += functools.reduce(
operator.mul, [len(values) for values in parameters.values()])
all_parameter_count = parameter_count
if options.multi_gen_state:
all_parameter_count += options.multi_gen_state.parameter_count
if not options.no_tests_limit and all_parameter_count > _MAX_TESTS_PER_ZIP:
raise RuntimeError(
"Too many parameter combinations for generating '%s'.\n"
"There are at least %d combinations while the upper limit is %d.\n"
"Having too many combinations will slow down the tests.\n"
"Please consider splitting the test into multiple functions.\n" %
(zip_path, all_parameter_count, _MAX_TESTS_PER_ZIP))
if options.multi_gen_state:
options.multi_gen_state.parameter_count = all_parameter_count
# TODO(aselle): Make this allow multiple inputs outputs.
if options.multi_gen_state:
archive = options.multi_gen_state.archive
else:
archive = zipfile.PyZipFile(zip_path, "w")
zip_manifest = []
convert_report = []
toco_errors = 0
processed_labels = set()
if options.make_edgetpu_tests:
extra_toco_options.inference_input_type = tf.uint8
extra_toco_options.inference_output_type = tf.uint8
# Only count parameters when fully_quantize is True.
parameter_count = 0
for parameters in test_parameters:
if True in parameters.get("fully_quantize",
[]) and False in parameters.get(
"quant_16x8", [False]):
parameter_count += functools.reduce(operator.mul, [
len(values)
for key, values in parameters.items()
if key != "fully_quantize" and key != "quant_16x8"
])
label_base_path = zip_path
if options.multi_gen_state:
label_base_path = options.multi_gen_state.label_base_path
for parameters in test_parameters:
keys = parameters.keys()
for curr in itertools.product(*parameters.values()):
label = label_base_path.replace(".zip", "_") + (",".join(
"%s=%r" % z for z in sorted(zip(keys, curr))).replace(" ", ""))
if label[0] == "/":
label = label[1:]
if label in processed_labels:
# Do not populate data for the same label more than once. It will cause
# errors when unzipping.
continue
processed_labels.add(label)
param_dict = dict(zip(keys, curr))
if options.make_edgetpu_tests and (not param_dict.get(
"fully_quantize", False) or param_dict.get("quant_16x8", False)):
continue
def generate_inputs_outputs(tflite_model_binary,
min_value=0,
max_value=255):
"""Generate input values and output values of the given tflite model.
Args:
tflite_model_binary: A serialized flatbuffer as a string.
min_value: min value for the input tensor.
max_value: max value for the input tensor.
Returns:
(input_values, output_values): input values and output values built.
"""
interpreter = tf.lite.Interpreter(model_content=tflite_model_binary)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
input_values = []
for input_detail in input_details:
input_value = create_tensor_data(
input_detail["dtype"],
input_detail["shape"],
min_value=min_value,
max_value=max_value)
interpreter.set_tensor(input_detail["index"], input_value)
input_values.append(input_value)
interpreter.invoke()
output_details = interpreter.get_output_details()
output_values = []
for output_detail in output_details:
output_values.append(interpreter.get_tensor(output_detail["index"]))
return input_values, output_values
def build_example(label, param_dict_real):
"""Build the model with parameter values set in param_dict_real.
Args:
label: Label of the model (i.e. the filename in the zip).
param_dict_real: Parameter dictionary (arguments to the factories
make_graph and make_test_inputs)
Returns:
(tflite_model_binary, report) where tflite_model_binary is the
serialized flatbuffer as a string and report is a dictionary with
keys `toco_log` (log of toco conversion), `tf_log` (log of tf
conversion), `toco` (a string of success status of the conversion),
`tf` (a string success status of the conversion).
"""
np.random.seed(RANDOM_SEED)
report = {"toco": report_lib.NOTRUN, "tf": report_lib.FAILED}
# Build graph
report["tf_log"] = ""
report["toco_log"] = ""
tf.reset_default_graph()
with tf.Graph().as_default():
with tf.device("/cpu:0"):
try:
inputs, outputs = make_graph(param_dict_real)
except (tf.errors.UnimplementedError,
tf.errors.InvalidArgumentError, ValueError):
report["tf_log"] += traceback.format_exc()
return None, report
sess = tf.Session()
try:
baseline_inputs, baseline_outputs = (
make_test_inputs(param_dict_real, sess, inputs, outputs))
except (tf.errors.UnimplementedError, tf.errors.InvalidArgumentError,
ValueError):
report["tf_log"] += traceback.format_exc()
return None, report
report["toco"] = report_lib.FAILED
report["tf"] = report_lib.SUCCESS
# Convert graph to toco
input_tensors = [(input_tensor.name.split(":")[0], input_tensor.shape,
input_tensor.dtype) for input_tensor in inputs]
output_tensors = [_normalize_output_name(out.name) for out in outputs]
# pylint: disable=g-long-ternary
graph_def = freeze_graph(
sess,
tf.global_variables() + inputs +
outputs) if use_frozen_graph else sess.graph_def
if "split_tflite_lstm_inputs" in param_dict_real:
extra_toco_options.split_tflite_lstm_inputs = param_dict_real[
"split_tflite_lstm_inputs"]
tflite_model_binary, toco_log = options.tflite_convert_function(
options,
graph_def,
input_tensors,
output_tensors,
extra_toco_options=extra_toco_options,
test_params=param_dict_real)
report["toco"] = (
report_lib.SUCCESS
if tflite_model_binary is not None else report_lib.FAILED)
report["toco_log"] = toco_log
if options.save_graphdefs:
archive.writestr(label + ".pbtxt",
text_format.MessageToString(graph_def),
zipfile.ZIP_DEFLATED)
if tflite_model_binary:
if options.make_edgetpu_tests:
# Set proper min max values according to input dtype.
baseline_inputs, baseline_outputs = generate_inputs_outputs(
tflite_model_binary, min_value=0, max_value=255)
archive.writestr(label + ".bin", tflite_model_binary,
zipfile.ZIP_DEFLATED)
example = {"inputs": baseline_inputs, "outputs": baseline_outputs}
example_fp = StringIO()
write_examples(example_fp, [example])
archive.writestr(label + ".inputs", example_fp.getvalue(),
zipfile.ZIP_DEFLATED)
example_fp2 = StringIO()
write_test_cases(example_fp2, label + ".bin", [example])
archive.writestr(label + "_tests.txt", example_fp2.getvalue(),
zipfile.ZIP_DEFLATED)
zip_manifest.append(label + "\n")
return tflite_model_binary, report
_, report = build_example(label, param_dict)
if report["toco"] == report_lib.FAILED:
ignore_error = False
if not options.known_bugs_are_errors:
for pattern, bug_number in options.known_bugs.items():
if re.search(pattern, label):
print("Ignored converter error due to bug %s" % bug_number)
ignore_error = True
if not ignore_error:
toco_errors += 1
print("-----------------\nconverter error!\n%s\n-----------------\n" %
report["toco_log"])
convert_report.append((param_dict, report))
if not options.no_conversion_report:
report_io = StringIO()
report_lib.make_report_table(report_io, zip_path, convert_report)
if options.multi_gen_state:
archive.writestr("report_" + options.multi_gen_state.test_name + ".html",
report_io.getvalue())
else:
archive.writestr("report.html", report_io.getvalue())
if options.multi_gen_state:
options.multi_gen_state.zip_manifest.extend(zip_manifest)
else:
archive.writestr("manifest.txt", "".join(zip_manifest),
zipfile.ZIP_DEFLATED)
# Log statistics of what succeeded
total_conversions = len(convert_report)
tf_success = sum(
1 for x in convert_report if x[1]["tf"] == report_lib.SUCCESS)
toco_success = sum(
1 for x in convert_report if x[1]["toco"] == report_lib.SUCCESS)
percent = 0
if tf_success > 0:
percent = float(toco_success) / float(tf_success) * 100.
tf.logging.info(("Archive %s Considered %d graphs, %d TF evaluated graphs "
" and %d TOCO converted graphs (%.1f%%"), zip_path,
total_conversions, tf_success, toco_success, percent)
tf_failures = parameter_count - tf_success
if tf_failures / parameter_count > 0.8:
raise RuntimeError(("Test for '%s' is not very useful. "
"TensorFlow fails in %d percent of the cases.") %
(zip_path, int(100 * tf_failures / parameter_count)))
if not options.make_edgetpu_tests and tf_failures != expected_tf_failures:
raise RuntimeError(("Expected TF to fail %d times while generating '%s', "
"but that happened %d times") %
(expected_tf_failures, zip_path, tf_failures))
if not options.ignore_converter_errors and toco_errors > 0:
raise RuntimeError("Found %d errors while generating toco models" %
toco_errors)
|
davidzchen/tensorflow
|
tensorflow/lite/testing/zip_test_utils.py
|
Python
|
apache-2.0
| 20,899
|
from __future__ import unicode_literals
from future.builtins import str
from datetime import datetime
import re
try:
from urllib.parse import quote
except ImportError:
# Python 2
from urllib import quote
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.html import urlize
from django.utils.timezone import make_aware, utc
from django.utils.translation import ugettext_lazy as _
from requests_oauthlib import OAuth1
import requests
from mezzanine.conf import settings
from mezzanine.twitter import QUERY_TYPE_CHOICES, QUERY_TYPE_USER, \
QUERY_TYPE_LIST, QUERY_TYPE_SEARCH
from mezzanine.twitter import get_auth_settings
from mezzanine.twitter.managers import TweetManager
re_usernames = re.compile(r"(^|\W)@([0-9a-zA-Z+_]+)", re.IGNORECASE)
re_hashtags = re.compile(r"#([0-9a-zA-Z+_]+)", re.IGNORECASE)
replace_hashtags = "<a href=\"http://twitter.com/search?q=%23\\1\">#\\1</a>"
replace_usernames = "\\1<a href=\"http://twitter.com/\\2\">@\\2</a>"
class TwitterQueryException(Exception):
pass
@python_2_unicode_compatible
class Query(models.Model):
type = models.CharField(_("Type"), choices=QUERY_TYPE_CHOICES,
max_length=10)
value = models.CharField(_("Value"), max_length=140)
interested = models.BooleanField("Interested", default=True)
class Meta:
verbose_name = _("Twitter query")
verbose_name_plural = _("Twitter queries")
ordering = ("-id",)
def __str__(self):
return "%s: %s" % (self.get_type_display(), self.value)
def run(self):
"""
Request new tweets from the Twitter API.
"""
try:
value = quote(self.value)
except KeyError:
value = self.value
urls = {
QUERY_TYPE_USER: ("https://api.twitter.com/1.1/statuses/"
"user_timeline.json?screen_name=%s"
"&include_rts=true" % value.lstrip("@")),
QUERY_TYPE_LIST: ("https://api.twitter.com/1.1/lists/statuses.json"
"?list_id=%s&include_rts=true" % value),
QUERY_TYPE_SEARCH: "https://api.twitter.com/1.1/search/tweets.json"
"?q=%s" % value,
}
try:
url = urls[self.type]
except KeyError:
raise TwitterQueryException("Invalid query type: %s" % self.type)
auth_settings = get_auth_settings()
if not auth_settings:
from mezzanine.conf import registry
if self.value == registry["TWITTER_DEFAULT_QUERY"]["default"]:
# These are some read-only keys and secrets we use
# for the default query (eg nothing has been configured)
auth_settings = (
"KxZTRD3OBft4PP0iQW0aNQ",
"sXpQRSDUVJ2AVPZTfh6MrJjHfOGcdK4wRb1WTGQ",
"1368725588-ldWCsd54AJpG2xcB5nyTHyCeIC3RJcNVUAkB1OI",
"r9u7qS18t8ad4Hu9XVqmCGxlIpzoCN3e1vx6LOSVgyw3R",
)
else:
raise TwitterQueryException("Twitter OAuth settings missing")
try:
tweets = requests.get(url, auth=OAuth1(*auth_settings)).json()
except Exception as e:
raise TwitterQueryException("Error retrieving: %s" % e)
try:
raise TwitterQueryException(tweets["errors"][0]["message"])
except (IndexError, KeyError, TypeError):
pass
if self.type == "search":
tweets = tweets["statuses"]
for tweet_json in tweets:
remote_id = str(tweet_json["id"])
tweet, created = self.tweets.get_or_create(remote_id=remote_id)
if not created:
continue
if "retweeted_status" in tweet_json:
user = tweet_json['user']
tweet.retweeter_user_name = user["screen_name"]
tweet.retweeter_full_name = user["name"]
tweet.retweeter_profile_image_url = user["profile_image_url"]
tweet_json = tweet_json["retweeted_status"]
if self.type == QUERY_TYPE_SEARCH:
tweet.user_name = tweet_json['user']['screen_name']
tweet.full_name = tweet_json['user']['name']
tweet.profile_image_url = \
tweet_json['user']["profile_image_url"]
date_format = "%a %b %d %H:%M:%S +0000 %Y"
else:
user = tweet_json["user"]
tweet.user_name = user["screen_name"]
tweet.full_name = user["name"]
tweet.profile_image_url = user["profile_image_url"]
date_format = "%a %b %d %H:%M:%S +0000 %Y"
tweet.text = urlize(tweet_json["text"])
tweet.text = re_usernames.sub(replace_usernames, tweet.text)
tweet.text = re_hashtags.sub(replace_hashtags, tweet.text)
if getattr(settings, 'TWITTER_STRIP_HIGH_MULTIBYTE', False):
chars = [ch for ch in tweet.text if ord(ch) < 0x800]
tweet.text = ''.join(chars)
d = datetime.strptime(tweet_json["created_at"], date_format)
tweet.created_at = make_aware(d, utc)
try:
tweet.save()
except Warning:
pass
tweet.save()
self.interested = False
self.save()
class Tweet(models.Model):
remote_id = models.CharField(_("Twitter ID"), max_length=50)
created_at = models.DateTimeField(_("Date/time"), null=True)
text = models.TextField(_("Message"), null=True)
profile_image_url = models.URLField(_("Profile image URL"), null=True)
user_name = models.CharField(_("User name"), max_length=100, null=True)
full_name = models.CharField(_("Full name"), max_length=100, null=True)
retweeter_profile_image_url = models.URLField(
_("Profile image URL (Retweeted by)"), null=True)
retweeter_user_name = models.CharField(
_("User name (Retweeted by)"), max_length=100, null=True)
retweeter_full_name = models.CharField(
_("Full name (Retweeted by)"), max_length=100, null=True)
query = models.ForeignKey("Query", on_delete=models.CASCADE,
related_name="tweets")
objects = TweetManager()
class Meta:
verbose_name = _("Tweet")
verbose_name_plural = _("Tweets")
ordering = ("-created_at",)
def __str__(self):
return "%s: %s" % (self.user_name, self.text)
def is_retweet(self):
return self.retweeter_user_name is not None
|
molokov/mezzanine
|
mezzanine/twitter/models.py
|
Python
|
bsd-2-clause
| 6,660
|
# -*- coding: utf-8 -*-
from django.conf import settings
from django.core.management.base import BaseCommand
from olympia.files.models import WebextPermissionDescription
from olympia.files.tasks import update_webext_descriptions_all
class Command(BaseCommand):
help = ('Download and update webextension permission descriptions from '
'mozilla-central.')
def add_arguments(self, parser):
"""Handle command arguments."""
parser.add_argument(
'--clear',
action='store_true',
dest='clear',
default=False,
help='Clear existing descriptions in the database first.')
def handle(self, *args, **options):
if options['clear']:
WebextPermissionDescription.objects.all().delete()
central_url = settings.WEBEXT_PERM_DESCRIPTIONS_URL
locales_url = settings.WEBEXT_PERM_DESCRIPTIONS_LOCALISED_URL
amo_locales = [l for l in settings.AMO_LANGUAGES
if l not in ('en-US', 'dbg', 'dbr', 'dbl')]
# Fetch canonical en-US descriptions first; then l10n after.
update_webext_descriptions_all.apply_async(
args=[(central_url, 'en-US'),
[(locales_url.format(locale=locale), locale)
for locale in amo_locales]])
|
tsl143/addons-server
|
src/olympia/files/management/commands/update_permissions_from_mc.py
|
Python
|
bsd-3-clause
| 1,322
|
"""
Using VTK to assemble a pipeline for segmenting MRI images. This example
shows how to insert well-controled custom VTK filters in Mayavi.
This example downloads an MRI scan, turns it into a 3D numpy array,
applies a segmentation procedure made of VTK filters to extract the
gray-matter/white-matter boundary.
The segmentation algorithm used here is very naive and should, of course,
not be used as an example of segmentation.
"""
### Download the data, if not already on disk ##################################
import os
if not os.path.exists('MRbrain.tar.gz'):
# Download the data
import urllib
print "Downloading data, Please Wait (7.8MB)"
opener = urllib.urlopen(
'http://graphics.stanford.edu/data/voldata/MRbrain.tar.gz')
open('MRbrain.tar.gz', 'wb').write(opener.read())
# Extract the data
import tarfile
tar_file = tarfile.open('MRbrain.tar.gz')
try:
os.mkdir('mri_data')
except:
pass
tar_file.extractall('mri_data')
tar_file.close()
### Read the data in a numpy 3D array ##########################################
import numpy as np
data = np.array([np.fromfile(os.path.join('mri_data', 'MRbrain.%i' % i),
dtype='>u2') for i in range(1, 110)])
data.shape = (109, 256, 256)
data = data.T
################################################################################
# Heuristic for finding the threshold for the brain
# Exctract the percentile 20 and 80 (without using
# scipy.stats.scoreatpercentile)
sorted_data = np.sort(data.ravel())
l = len(sorted_data)
lower_thr = sorted_data[0.2*l]
upper_thr = sorted_data[0.8*l]
# The white matter boundary: find the densest part of the upper half
# of histogram, and take a value 10% higher, to cut _in_ the white matter
hist, bins = np.histogram(data[data > np.mean(data)], bins=50)
brain_thr_idx = np.argmax(hist)
brain_thr = bins[brain_thr_idx + 4]
del hist, bins, brain_thr_idx
# Display the data #############################################################
from mayavi import mlab
from tvtk.api import tvtk
fig = mlab.figure(bgcolor=(0, 0, 0), size=(400, 500))
# to speed things up
fig.scene.disable_render = True
src = mlab.pipeline.scalar_field(data)
# Our data is not equally spaced in all directions:
src.spacing = [1, 1, 1.5]
src.update_image_data = True
#----------------------------------------------------------------------
# Brain extraction pipeline
# In the following, we create a Mayavi pipeline that strongly
# relies on VTK filters. For this, we make heavy use of the
# mlab.pipeline.user_defined function, to include VTK filters in
# the Mayavi pipeline.
# Apply image-based filters to clean up noise
thresh_filter = tvtk.ImageThreshold()
thresh_filter.threshold_between(lower_thr, upper_thr)
thresh = mlab.pipeline.user_defined(src, filter=thresh_filter)
median_filter = tvtk.ImageMedian3D()
median_filter.set_kernel_size(3, 3, 3)
median = mlab.pipeline.user_defined(thresh, filter=median_filter)
diffuse_filter = tvtk.ImageAnisotropicDiffusion3D(
diffusion_factor=1.0,
diffusion_threshold=100.0,
number_of_iterations=5, )
diffuse = mlab.pipeline.user_defined(median, filter=diffuse_filter)
# Extract brain surface
contour = mlab.pipeline.contour(diffuse, )
contour.filter.contours = [brain_thr, ]
# Apply mesh filter to clean up the mesh (decimation and smoothing)
dec = mlab.pipeline.decimate_pro(contour)
dec.filter.feature_angle = 60.
dec.filter.target_reduction = 0.7
smooth_ = tvtk.SmoothPolyDataFilter(
number_of_iterations=10,
relaxation_factor=0.1,
feature_angle=60,
feature_edge_smoothing=False,
boundary_smoothing=False,
convergence=0.,
)
smooth = mlab.pipeline.user_defined(dec, filter=smooth_)
# Get the largest connected region
connect_ = tvtk.PolyDataConnectivityFilter(extraction_mode=4)
connect = mlab.pipeline.user_defined(smooth, filter=connect_)
# Compute normals for shading the surface
compute_normals = mlab.pipeline.poly_data_normals(connect)
compute_normals.filter.feature_angle = 80
surf = mlab.pipeline.surface(compute_normals,
color=(0.9, 0.72, 0.62))
#----------------------------------------------------------------------
# Display a cut plane of the raw data
ipw = mlab.pipeline.image_plane_widget(src, colormap='bone',
plane_orientation='z_axes',
slice_index=55)
mlab.view(-165, 32, 350, [143, 133, 73])
mlab.roll(180)
fig.scene.disable_render = False
#----------------------------------------------------------------------
# To make the link between the Mayavi pipeline and the much more
# complex VTK pipeline, we display both:
mlab.show_pipeline(rich_view=False)
from tvtk.pipeline.browser import PipelineBrowser
browser = PipelineBrowser(fig.scene)
browser.show()
mlab.show()
|
liulion/mayavi
|
examples/mayavi/advanced_visualization/tvtk_segmentation.py
|
Python
|
bsd-3-clause
| 5,023
|
"""
Plot MEG inverse solution
=========================
Data were computed using mne-python (http://martinos.org/mne)
"""
import os
import numpy as np
from surfer import Brain
from surfer.io import read_stc
print(__doc__)
"""
define subject, surface and hemisphere(s) to plot
"""
subject_id, surface = 'fsaverage', 'inflated'
hemi = 'split'
"""
create Brain object for visualization
"""
brain = Brain(subject_id, hemi, surface, size=(800, 400))
"""
read MNE dSPM inverse solution
"""
for hemi in ['lh', 'rh']:
stc_fname = os.path.join('example_data/meg_source_estimate-' +
hemi + '.stc')
stc = read_stc(stc_fname)
"""
data and vertices for which the data is defined
"""
data = stc['data']
vertices = stc['vertices']
"""
time points (in seconds)
"""
time = np.linspace(stc['tmin'], stc['tmin'] + data.shape[1] * stc['tstep'],
data.shape[1])
"""
colormap to use
"""
colormap = 'hot'
"""
label for time annotation in milliseconds
"""
time_label = lambda t: 'time=%0.2f ms' % (t * 1e3)
brain.add_data(data, colormap=colormap, vertices=vertices,
smoothing_steps=10, time=time, time_label=time_label,
hemi=hemi)
"""
scale colormap and set time (index) to display
"""
brain.set_data_time_index(2)
brain.scale_data_colormap(fmin=13, fmid=18, fmax=22, transparent=True)
"""
uncomment these lines to use the interactive TimeViewer GUI
"""
# from surfer import TimeViewer
# viewer = TimeViewer(brain)
|
diego0020/PySurfer
|
examples/plot_meg_inverse_solution.py
|
Python
|
bsd-3-clause
| 1,574
|
"""
Sphinx plugins for Django documentation.
"""
import json
import os
import re
from docutils import nodes
from docutils.parsers.rst import directives
from sphinx import addnodes
from sphinx.builders.html import StandaloneHTMLBuilder
from sphinx.domains.std import Cmdoption
from sphinx.util.compat import Directive
from sphinx.util.console import bold
from sphinx.util.nodes import set_source_info
from sphinx.writers.html import SmartyPantsHTMLTranslator
# RE for option descriptions without a '--' prefix
simple_option_desc_re = re.compile(
r'([-_a-zA-Z0-9]+)(\s*.*?)(?=,\s+(?:/|-|--)|$)')
def setup(app):
app.add_crossref_type(
directivename="setting",
rolename="setting",
indextemplate="pair: %s; setting",
)
app.add_crossref_type(
directivename="templatetag",
rolename="ttag",
indextemplate="pair: %s; template tag"
)
app.add_crossref_type(
directivename="templatefilter",
rolename="tfilter",
indextemplate="pair: %s; template filter"
)
app.add_crossref_type(
directivename="fieldlookup",
rolename="lookup",
indextemplate="pair: %s; field lookup type",
)
app.add_description_unit(
directivename="django-admin",
rolename="djadmin",
indextemplate="pair: %s; django-admin command",
parse_node=parse_django_admin_node,
)
app.add_directive('django-admin-option', Cmdoption)
app.add_config_value('django_next_version', '0.0', True)
app.add_directive('versionadded', VersionDirective)
app.add_directive('versionchanged', VersionDirective)
app.add_builder(DjangoStandaloneHTMLBuilder)
# register the snippet directive
app.add_directive('snippet', SnippetWithFilename)
# register a node for snippet directive so that the xml parser
# knows how to handle the enter/exit parsing event
app.add_node(snippet_with_filename,
html=(visit_snippet, depart_snippet_literal),
latex=(visit_snippet_latex, depart_snippet_latex),
man=(visit_snippet_literal, depart_snippet_literal),
text=(visit_snippet_literal, depart_snippet_literal),
texinfo=(visit_snippet_literal, depart_snippet_literal))
return {'parallel_read_safe': True}
class snippet_with_filename(nodes.literal_block):
"""
Subclass the literal_block to override the visit/depart event handlers
"""
pass
def visit_snippet_literal(self, node):
"""
default literal block handler
"""
self.visit_literal_block(node)
def depart_snippet_literal(self, node):
"""
default literal block handler
"""
self.depart_literal_block(node)
def visit_snippet(self, node):
"""
HTML document generator visit handler
"""
lang = self.highlightlang
linenos = node.rawsource.count('\n') >= self.highlightlinenothreshold - 1
fname = node['filename']
highlight_args = node.get('highlight_args', {})
if 'language' in node:
# code-block directives
lang = node['language']
highlight_args['force'] = True
if 'linenos' in node:
linenos = node['linenos']
def warner(msg):
self.builder.warn(msg, (self.builder.current_docname, node.line))
highlighted = self.highlighter.highlight_block(node.rawsource, lang,
warn=warner,
linenos=linenos,
**highlight_args)
starttag = self.starttag(node, 'div', suffix='',
CLASS='highlight-%s snippet' % lang)
self.body.append(starttag)
self.body.append('<div class="snippet-filename">%s</div>\n''' % (fname,))
self.body.append(highlighted)
self.body.append('</div>\n')
raise nodes.SkipNode
def visit_snippet_latex(self, node):
"""
Latex document generator visit handler
"""
code = node.rawsource.rstrip('\n')
lang = self.hlsettingstack[-1][0]
linenos = code.count('\n') >= self.hlsettingstack[-1][1] - 1
fname = node['filename']
highlight_args = node.get('highlight_args', {})
if 'language' in node:
# code-block directives
lang = node['language']
highlight_args['force'] = True
if 'linenos' in node:
linenos = node['linenos']
def warner(msg):
self.builder.warn(msg, (self.curfilestack[-1], node.line))
hlcode = self.highlighter.highlight_block(code, lang, warn=warner,
linenos=linenos,
**highlight_args)
self.body.append(
'\n{\\colorbox[rgb]{0.9,0.9,0.9}'
'{\\makebox[\\textwidth][l]'
'{\\small\\texttt{%s}}}}\n' % (
# Some filenames have '_', which is special in latex.
fname.replace('_', r'\_'),
)
)
if self.table:
hlcode = hlcode.replace('\\begin{Verbatim}',
'\\begin{OriginalVerbatim}')
self.table.has_problematic = True
self.table.has_verbatim = True
hlcode = hlcode.rstrip()[:-14] # strip \end{Verbatim}
hlcode = hlcode.rstrip() + '\n'
self.body.append('\n' + hlcode + '\\end{%sVerbatim}\n' %
(self.table and 'Original' or ''))
# Prevent rawsource from appearing in output a second time.
raise nodes.SkipNode
def depart_snippet_latex(self, node):
"""
Latex document generator depart handler.
"""
pass
class SnippetWithFilename(Directive):
"""
The 'snippet' directive that allows to add the filename (optional)
of a code snippet in the document. This is modeled after CodeBlock.
"""
has_content = True
optional_arguments = 1
option_spec = {'filename': directives.unchanged_required}
def run(self):
code = '\n'.join(self.content)
literal = snippet_with_filename(code, code)
if self.arguments:
literal['language'] = self.arguments[0]
literal['filename'] = self.options['filename']
set_source_info(self, literal)
return [literal]
class VersionDirective(Directive):
has_content = True
required_arguments = 1
optional_arguments = 1
final_argument_whitespace = True
option_spec = {}
def run(self):
if len(self.arguments) > 1:
msg = """Only one argument accepted for directive '{directive_name}::'.
Comments should be provided as content,
not as an extra argument.""".format(directive_name=self.name)
raise self.error(msg)
env = self.state.document.settings.env
ret = []
node = addnodes.versionmodified()
ret.append(node)
if self.arguments[0] == env.config.django_next_version:
node['version'] = "Development version"
else:
node['version'] = self.arguments[0]
node['type'] = self.name
if self.content:
self.state.nested_parse(self.content, self.content_offset, node)
env.note_versionchange(node['type'], node['version'], node, self.lineno)
return ret
class DjangoHTMLTranslator(SmartyPantsHTMLTranslator):
"""
Django-specific reST to HTML tweaks.
"""
# Don't use border=1, which docutils does by default.
def visit_table(self, node):
self.context.append(self.compact_p)
self.compact_p = True
self._table_row_index = 0 # Needed by Sphinx
self.body.append(self.starttag(node, 'table', CLASS='docutils'))
def depart_table(self, node):
self.compact_p = self.context.pop()
self.body.append('</table>\n')
def visit_desc_parameterlist(self, node):
self.body.append('(') # by default sphinx puts <big> around the "("
self.first_param = 1
self.optional_param_level = 0
self.param_separator = node.child_text_separator
self.required_params_left = sum([isinstance(c, addnodes.desc_parameter)
for c in node.children])
def depart_desc_parameterlist(self, node):
self.body.append(')')
#
# Turn the "new in version" stuff (versionadded/versionchanged) into a
# better callout -- the Sphinx default is just a little span,
# which is a bit less obvious that I'd like.
#
# FIXME: these messages are all hardcoded in English. We need to change
# that to accommodate other language docs, but I can't work out how to make
# that work.
#
version_text = {
'versionchanged': 'Changed in Django %s',
'versionadded': 'New in Django %s',
}
def visit_versionmodified(self, node):
self.body.append(
self.starttag(node, 'div', CLASS=node['type'])
)
version_text = self.version_text.get(node['type'])
if version_text:
title = "%s%s" % (
version_text % node['version'],
":" if len(node) else "."
)
self.body.append('<span class="title">%s</span> ' % title)
def depart_versionmodified(self, node):
self.body.append("</div>\n")
# Give each section a unique ID -- nice for custom CSS hooks
def visit_section(self, node):
old_ids = node.get('ids', [])
node['ids'] = ['s-' + i for i in old_ids]
node['ids'].extend(old_ids)
SmartyPantsHTMLTranslator.visit_section(self, node)
node['ids'] = old_ids
def parse_django_admin_node(env, sig, signode):
command = sig.split(' ')[0]
env.ref_context['std:program'] = command
title = "django-admin %s" % sig
signode += addnodes.desc_name(title, title)
return command
class DjangoStandaloneHTMLBuilder(StandaloneHTMLBuilder):
"""
Subclass to add some extra things we need.
"""
name = 'djangohtml'
def finish(self):
super(DjangoStandaloneHTMLBuilder, self).finish()
self.info(bold("writing templatebuiltins.js..."))
xrefs = self.env.domaindata["std"]["objects"]
templatebuiltins = {
"ttags": [
n for ((t, n), (k, a)) in xrefs.items()
if t == "templatetag" and k == "ref/templates/builtins"
],
"tfilters": [
n for ((t, n), (k, a)) in xrefs.items()
if t == "templatefilter" and k == "ref/templates/builtins"
],
}
outfilename = os.path.join(self.outdir, "templatebuiltins.js")
with open(outfilename, 'w') as fp:
fp.write('var django_template_builtins = ')
json.dump(templatebuiltins, fp)
fp.write(';\n')
|
frishberg/django
|
docs/_ext/djangodocs.py
|
Python
|
bsd-3-clause
| 10,734
|