repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
sda2b/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/helsinki.py
|
165
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import js_to_json
class HelsinkiIE(InfoExtractor):
IE_DESC = 'helsinki.fi'
_VALID_URL = r'https?://video\.helsinki\.fi/Arkisto/flash\.php\?id=(?P<id>\d+)'
_TEST = {
'url': 'http://video.helsinki.fi/Arkisto/flash.php?id=20258',
'info_dict': {
'id': '20258',
'ext': 'mp4',
'title': 'Tietotekniikkafoorumi-iltapäivä',
'description': 'md5:f5c904224d43c133225130fe156a5ee0',
},
'params': {
'skip_download': True, # RTMP
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
params = self._parse_json(self._html_search_regex(
r'(?s)jwplayer\("player"\).setup\((\{.*?\})\);',
webpage, 'player code'), video_id, transform_source=js_to_json)
formats = [{
'url': s['file'],
'ext': 'mp4',
} for s in params['sources']]
self._sort_formats(formats)
return {
'id': video_id,
'title': self._og_search_title(webpage).replace('Video: ', ''),
'description': self._og_search_description(webpage),
'formats': formats,
}
|
gpatonay/popy
|
refs/heads/master
|
popy.py
|
1
|
#!/usr/bin/env python2
import json
from utils import UnixLogin
from smtp_server import *
def loadCfg( cfgFile ):
try:
with open( cfgFile ) as f:
return json.loads( f.read() )
except Exception :
raise Exception( "Config file missing or invalid" )
cfg=loadCfg("popy.json")
# configure served domains
postoffices=[ str(key) for key in cfg["postoffices"].iterkeys() ]
PopPolicy.postoffices=postoffices
PopMailHandler.postoffices=postoffices
# configure handlers for delivery
PopMailHandler.local=LocalDelivery()
PopMailHandler.forward=Forward()
# configure authentication
if cfg["server"]["auth"] == "unix" :
PopAuth.setAuth( UnixLogin.auth )
else :
PopAuth.setAuth( lambda u,p : True ) # just for testing!!!
sslparams={}
if cfg["server"]["keyfile"] :
sslparams["key"]=cfg["server"]["keyfile"]
if cfg["server"]["certfile"] :
sslparams["cert"]=cfg["server"]["certfile"]
server=SmtpServer( cfg["server"]["host"], cfg["server"]["port"], sslparams )
try:
server.serve_forever()
except KeyboardInterrupt:
pass
|
shishaochen/TensorFlow-0.8-Win
|
refs/heads/master
|
third_party/eigen-eigen-50812b426b7c/debug/gdb/__init__.py
|
377
|
# Intentionally empty
|
40223119/2015w13
|
refs/heads/master
|
static/Brython3.1.0-20150301-090019/Lib/site-packages/pygame/SDL.py
|
603
|
from browser import document
SDL_INIT_VIDEO=0
SDL_GL_DOUBLEBUFFER=1
SDL_GL_DEPTH_SIZE=2
SDL_DOUBLEBUF=3
SDL_ANYFORMAT=4
SDL_ACTIVEEVENT=5
SDL_ALLEVENTS=5
SDL_KEYDOWN=6
SDL_KEYUP=7
SDL_MOUSEMOTION=8
SDL_MOUSEBUTTONDOWN=9
SDL_MOUSEBUTTONUP=10
SDL_JOYAXISMOTION=11
SDL_JOYBALLMOTION=12
SDL_JOYHATMOTION=13
SDL_JOYBUTTONUP=14
SDL_JOYBUTTONDOWN=15
SDL_QUIT=16
SDL_SYSWMEVENT=17
SDL_VIDEORESIZE=18
SDL_VIDEOEXPOSE=19
SDL_NOEVENT=20
SDL_GETEVENT=21
SDL_OPENGL=False
def SDL_WasInit(var):
return True
_attrs={}
_wm={}
def SDL_PeepEvents(num, event, mask):
pass
def SDL_GL_SetAttribute(variable, value):
_attrs[variable]=value
def SDL_GL_GetAttribute(variable):
return _attrs.getvalue(variable, None)
def SDL_GL_SetVideoMode(width, height, depth, flags):
pass
def SDL_WM_SetCaption(title, icontitle):
_wm['title']=title
_wm['icontitle']=icontitle
def SDL_PumpEvents():
pass
def SDL_SetVideoMode(width, height, depth, flags):
pass
def SDL_SetColorKey(surface, key, value):
pass
def SDL_WM_GetCaption():
return _wm.get('title', ''), _wm.get('icontitle', '')
def SDL_UpdateRect(screen, x1, y1, x2, y2):
screen.canvas.style.width=screen.canvas.style.width
def SDL_UpdateRects(screen, rects):
for _rect in rects:
SDL_UpdateRect(screen, _rect)
def SDL_GetVideoSurface():
return _Screen
def SDL_GetVideoInfo():
return
def SDL_VideoModeOK(width, height, depth, flags):
pass
def SDL_SetPalette(surface, sdl_var, colors, flag):
pass
class Screen:
def __init__(self):
self.flags=0
@property
def canvas(self):
return document.get(selector='canvas')[0]
_Screen=Screen()
class SDL_Rect:
def __init__(self, x, y, w, h):
self.x=x
self.y=y
self.w=w
self.h=h
def SDL_Flip(screen):
pass
|
ezeteze/android_kernel_huawei_u8815_slim
|
refs/heads/master
|
tools/perf/python/twatch.py
|
3213
|
#! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, sample_period = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
|
kingvuplus/PKT-gui2
|
refs/heads/master
|
lib/python/Plugins/Extensions/DVDBurn/DVDTitle.py
|
34
|
from Components.config import ConfigSubsection, ConfigSubList, ConfigInteger, ConfigText, ConfigSelection
import TitleCutter
class ConfigFixedText(ConfigText):
def __init__(self, text, visible_width=60):
ConfigText.__init__(self, default = text, fixed_size = True, visible_width = visible_width)
def handleKey(self, key):
pass
class DVDTitle:
def __init__(self, project):
self.properties = ConfigSubsection()
self.properties.menutitle = ConfigText(fixed_size = False, visible_width = 80)
self.properties.menusubtitle = ConfigText(fixed_size = False, visible_width = 80)
self.properties.aspect = ConfigSelection(choices = [("4:3", _("4:3")), ("16:9", _("16:9"))])
self.properties.widescreen = ConfigSelection(choices = [("nopanscan", "nopanscan"), ("noletterbox", "noletterbox")])
self.properties.autochapter = ConfigInteger(default = 0, limits = (0, 60))
self.properties.audiotracks = ConfigSubList()
self.DVBname = _("Title")
self.DVBdescr = _("Description")
self.DVBchannel = _("Channel")
self.cuesheet = [ ]
self.source = None
self.filesize = 0
self.estimatedDiskspace = 0
self.inputfile = ""
self.cutlist = [ ]
self.chaptermarks = [ ]
self.timeCreate = None
self.VideoType = -1
self.project = project
self.length = 0
def addService(self, service):
from os import path
from enigma import eServiceCenter, iServiceInformation
from ServiceReference import ServiceReference
from time import localtime
self.source = service
serviceHandler = eServiceCenter.getInstance()
info = serviceHandler.info(service)
sDescr = info and info.getInfoString(service, iServiceInformation.sDescription) or ""
self.DVBdescr = sDescr
sTimeCreate = info.getInfo(service, iServiceInformation.sTimeCreate)
if sTimeCreate > 1:
self.timeCreate = localtime(sTimeCreate)
serviceref = ServiceReference(info.getInfoString(service, iServiceInformation.sServiceref))
name = info and info.getName(service) or "Title" + sDescr
self.DVBname = name
self.DVBchannel = serviceref.getServiceName()
self.inputfile = service.getPath()
self.filesize = path.getsize(self.inputfile)
self.estimatedDiskspace = self.filesize
self.length = info.getLength(service)
def addFile(self, filename):
from enigma import eServiceReference
ref = eServiceReference(1, 0, filename)
self.addService(ref)
self.project.session.openWithCallback(self.titleEditDone, TitleCutter.CutlistReader, self)
def titleEditDone(self, cutlist):
self.initDVDmenuText(len(self.project.titles))
self.cuesheet = cutlist
self.produceFinalCuesheet()
def initDVDmenuText(self, track):
s = self.project.menutemplate.settings
self.properties.menutitle.setValue(self.formatDVDmenuText(s.titleformat.value, track))
self.properties.menusubtitle.setValue(self.formatDVDmenuText(s.subtitleformat.value, track))
def formatDVDmenuText(self, template, track):
template = template.replace("$i", str(track))
template = template.replace("$t", self.DVBname)
template = template.replace("$d", self.DVBdescr)
template = template.replace("$c", str(len(self.chaptermarks)+1))
template = template.replace("$f", self.inputfile)
template = template.replace("$C", self.DVBchannel)
#if template.find("$A") >= 0:
from TitleProperties import languageChoices
audiolist = [ ]
for audiotrack in self.properties.audiotracks:
active = audiotrack.active.value
if active:
trackstring = audiotrack.format.value
language = audiotrack.language.value
if languageChoices.langdict.has_key(language):
trackstring += ' (' + languageChoices.langdict[language] + ')'
audiolist.append(trackstring)
audiostring = ', '.join(audiolist)
template = template.replace("$A", audiostring)
if template.find("$l") >= 0:
l = self.length
lengthstring = "%d:%02d:%02d" % (l/3600, l%3600/60, l%60)
template = template.replace("$l", lengthstring)
if self.timeCreate:
template = template.replace("$Y", str(self.timeCreate[0]))
template = template.replace("$M", str(self.timeCreate[1]))
template = template.replace("$D", str(self.timeCreate[2]))
timestring = "%d:%02d" % (self.timeCreate[3], self.timeCreate[4])
template = template.replace("$T", timestring)
else:
template = template.replace("$Y", "").replace("$M", "").replace("$D", "").replace("$T", "")
return template
def produceFinalCuesheet(self):
CUT_TYPE_IN = 0
CUT_TYPE_OUT = 1
CUT_TYPE_MARK = 2
CUT_TYPE_LAST = 3
accumulated_in = 0
accumulated_at = 0
last_in = 0
self.cutlist = [ ]
self.chaptermarks = [ ]
# our demuxer expects *strictly* IN,OUT lists.
currently_in = not any(type == CUT_TYPE_IN for pts, type in self.cuesheet)
if currently_in:
self.cutlist.append(0) # emulate "in" at first
for (pts, type) in self.cuesheet:
#print "pts=", pts, "type=", type, "accumulated_in=", accumulated_in, "accumulated_at=", accumulated_at, "last_in=", last_in
if type == CUT_TYPE_IN and not currently_in:
self.cutlist.append(pts)
last_in = pts
currently_in = True
if type == CUT_TYPE_OUT and currently_in:
self.cutlist.append(pts)
# accumulate the segment
accumulated_in += pts - last_in
accumulated_at = pts
currently_in = False
if type == CUT_TYPE_MARK and currently_in:
# relocate chaptermark against "in" time. This is not 100% accurate,
# as the in/out points are not.
reloc_pts = pts - last_in + accumulated_in
self.chaptermarks.append(reloc_pts)
if len(self.cutlist) > 1:
part = accumulated_in / (self.length*90000.0)
usedsize = int ( part * self.filesize )
self.estimatedDiskspace = usedsize
self.length = accumulated_in / 90000
def getChapterMarks(self, template="$h:$m:$s.$t"):
timestamps = [ ]
chapters = [ ]
minutes = self.properties.autochapter.value
if len(self.chaptermarks) < 1 and minutes > 0:
chapterpts = 0
while chapterpts < (self.length-60*minutes)*90000:
chapterpts += 90000 * 60 * minutes
chapters.append(chapterpts)
else:
chapters = self.chaptermarks
for p in chapters:
timestring = template.replace("$h", str(p / (90000 * 3600)))
timestring = timestring.replace("$m", ("%02d" % (p % (90000 * 3600) / (90000 * 60))))
timestring = timestring.replace("$s", ("%02d" % (p % (90000 * 60) / 90000)))
timestring = timestring.replace("$t", ("%03d" % ((p % 90000) / 90)))
timestamps.append(timestring)
return timestamps
|
olivetree123/memory_profiler
|
refs/heads/master
|
examples/reporting_file.py
|
5
|
from memory_profiler import profile
f=open('hi.txt','w+')
@profile(stream=f)
def my_func():
a = [1] * (10 ** 6)
b = [2] * (2 * 10 ** 7)
del b
return a
@profile(stream=f)
def my_func1():
a = [2] * (10 ** 6)
b = [3] * (2 * 10 ** 7)
del b
return a
if __name__ == '__main__':
my_func()
my_func1()
|
ricardogsilva/QGIS
|
refs/heads/master
|
tests/src/python/test_qgshashlinesymbollayer.py
|
23
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
test_qgshashlinesymbollayer.py
---------------------
Date : March 2019
Copyright : (C) 2019 by Nyall Dawson
Email : nyall dot dawson at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Nyall Dawson'
__date__ = 'March 2019'
__copyright__ = '(C) 2019, Nyall Dawson'
import qgis # NOQA
import os
from utilities import unitTestDataPath
from qgis.PyQt.QtCore import QDir, Qt, QSize
from qgis.PyQt.QtGui import QImage, QColor, QPainter
from qgis.PyQt.QtXml import QDomDocument
from qgis.core import (QgsGeometry,
QgsFillSymbol,
QgsRenderContext,
QgsFeature,
QgsMapSettings,
QgsRenderChecker,
QgsReadWriteContext,
QgsSymbolLayerUtils,
QgsSimpleMarkerSymbolLayer,
QgsLineSymbolLayer,
QgsMarkerLineSymbolLayer,
QgsMarkerSymbol,
QgsGeometryGeneratorSymbolLayer,
QgsSymbol,
QgsFontMarkerSymbolLayer,
QgsMultiRenderChecker,
QgsLineSymbol,
QgsSymbolLayer,
QgsProperty,
QgsRectangle,
QgsUnitTypes,
QgsSimpleLineSymbolLayer,
QgsTemplatedLineSymbolLayerBase,
QgsHashedLineSymbolLayer,
QgsVectorLayer,
QgsSingleSymbolRenderer
)
from qgis.testing import unittest, start_app
start_app()
TEST_DATA_DIR = unitTestDataPath()
class TestQgsHashedLineSymbolLayer(unittest.TestCase):
def setUp(self):
self.report = "<h1>Python QgsHashedLineSymbolLayer Tests</h1>\n"
def tearDown(self):
report_file_path = "%s/qgistest.html" % QDir.tempPath()
with open(report_file_path, 'a') as report_file:
report_file.write(self.report)
def testWidth(self):
ms = QgsMapSettings()
extent = QgsRectangle(100, 200, 100, 200)
ms.setExtent(extent)
ms.setOutputSize(QSize(400, 400))
context = QgsRenderContext.fromMapSettings(ms)
context.setScaleFactor(96 / 25.4) # 96 DPI
ms.setExtent(QgsRectangle(100, 150, 100, 150))
ms.setOutputDpi(ms.outputDpi() * 2)
context2 = QgsRenderContext.fromMapSettings(ms)
context2.setScaleFactor(300 / 25.4)
s = QgsFillSymbol()
s.deleteSymbolLayer(0)
hash_line = QgsHashedLineSymbolLayer(True)
hash_line.setPlacement(QgsTemplatedLineSymbolLayerBase.FirstVertex)
simple_line = QgsSimpleLineSymbolLayer()
line_symbol = QgsLineSymbol()
line_symbol.changeSymbolLayer(0, simple_line)
hash_line.setSubSymbol(line_symbol)
hash_line.setHashLength(10)
self.assertEqual(hash_line.width(), 10)
self.assertAlmostEqual(hash_line.width(context), 37.795275590551185, 3)
self.assertAlmostEqual(hash_line.width(context2), 118.11023622047244, 3)
hash_line.setHashLengthUnit(QgsUnitTypes.RenderPixels)
self.assertAlmostEqual(hash_line.width(context), 10.0, 3)
self.assertAlmostEqual(hash_line.width(context2), 10.0, 3)
def testHashAngle(self):
s = QgsLineSymbol()
s.deleteSymbolLayer(0)
hash_line = QgsHashedLineSymbolLayer(True)
hash_line.setPlacement(QgsTemplatedLineSymbolLayerBase.Interval)
hash_line.setInterval(6)
simple_line = QgsSimpleLineSymbolLayer()
simple_line.setColor(QColor(0, 255, 0))
simple_line.setWidth(1)
line_symbol = QgsLineSymbol()
line_symbol.changeSymbolLayer(0, simple_line)
hash_line.setSubSymbol(line_symbol)
hash_line.setHashLength(7)
hash_line.setHashAngle(45)
hash_line.setAverageAngleLength(0)
s.appendSymbolLayer(hash_line.clone())
g = QgsGeometry.fromWkt('LineString(0 0, 10 10, 10 0)')
rendered_image = self.renderGeometry(s, g)
assert self.imageCheck('line_hash_angle', 'line_hash_angle', rendered_image)
s.symbolLayer(0).setRotateSymbols(False)
g = QgsGeometry.fromWkt('LineString(0 0, 10 10, 10 0)')
rendered_image = self.renderGeometry(s, g)
assert self.imageCheck('line_hash_no_rotate', 'line_hash_no_rotate', rendered_image)
def testHashAverageAngle(self):
s = QgsLineSymbol()
s.deleteSymbolLayer(0)
hash_line = QgsHashedLineSymbolLayer(True)
hash_line.setPlacement(QgsTemplatedLineSymbolLayerBase.Interval)
hash_line.setInterval(6)
simple_line = QgsSimpleLineSymbolLayer()
simple_line.setColor(QColor(0, 255, 0))
simple_line.setWidth(1)
line_symbol = QgsLineSymbol()
line_symbol.changeSymbolLayer(0, simple_line)
hash_line.setSubSymbol(line_symbol)
hash_line.setHashLength(7)
hash_line.setHashAngle(45)
hash_line.setAverageAngleLength(30)
s.appendSymbolLayer(hash_line.clone())
g = QgsGeometry.fromWkt('LineString(0 0, 10 10, 10 0)')
rendered_image = self.renderGeometry(s, g)
assert self.imageCheck('line_hash_average_angle', 'line_hash_average_angle', rendered_image)
def testHashAverageAngle(self):
s = QgsLineSymbol()
s.deleteSymbolLayer(0)
hash_line = QgsHashedLineSymbolLayer(True)
hash_line.setPlacement(QgsTemplatedLineSymbolLayerBase.CentralPoint)
simple_line = QgsSimpleLineSymbolLayer()
simple_line.setColor(QColor(0, 255, 0))
simple_line.setWidth(1)
line_symbol = QgsLineSymbol()
line_symbol.changeSymbolLayer(0, simple_line)
hash_line.setSubSymbol(line_symbol)
hash_line.setHashLength(7)
hash_line.setHashAngle(45)
hash_line.setAverageAngleLength(30)
s.appendSymbolLayer(hash_line.clone())
g = QgsGeometry.fromWkt('LineString(0 0, 10 10, 10 0)')
rendered_image = self.renderGeometry(s, g)
assert self.imageCheck('line_hash_center_average_angle', 'line_hash_center_average_angle', rendered_image)
def testHashAverageAngleClosedRing(self):
s = QgsLineSymbol()
s.deleteSymbolLayer(0)
hash_line = QgsHashedLineSymbolLayer(True)
hash_line.setPlacement(QgsTemplatedLineSymbolLayerBase.Interval)
hash_line.setInterval(6)
simple_line = QgsSimpleLineSymbolLayer()
simple_line.setColor(QColor(0, 255, 0))
simple_line.setWidth(1)
line_symbol = QgsLineSymbol()
line_symbol.changeSymbolLayer(0, simple_line)
hash_line.setSubSymbol(line_symbol)
hash_line.setHashLength(7)
hash_line.setHashAngle(0)
hash_line.setAverageAngleLength(30)
s.appendSymbolLayer(hash_line.clone())
g = QgsGeometry.fromWkt('LineString(0 0, 0 10, 10 10, 10 0, 0 0)')
rendered_image = self.renderGeometry(s, g)
assert self.imageCheck('line_hash_ring_average_angle', 'line_hash_ring_average_angle', rendered_image)
def testHashPlacement(self):
s = QgsLineSymbol()
s.deleteSymbolLayer(0)
hash_line = QgsHashedLineSymbolLayer(True)
hash_line.setPlacement(QgsTemplatedLineSymbolLayerBase.Vertex)
hash_line.setInterval(6)
simple_line = QgsSimpleLineSymbolLayer()
simple_line.setColor(QColor(0, 255, 0))
simple_line.setWidth(1)
line_symbol = QgsLineSymbol()
line_symbol.changeSymbolLayer(0, simple_line)
hash_line.setSubSymbol(line_symbol)
hash_line.setHashLength(7)
hash_line.setAverageAngleLength(0)
s.appendSymbolLayer(hash_line.clone())
g = QgsGeometry.fromWkt('LineString(0 0, 10 10, 10 0)')
rendered_image = self.renderGeometry(s, g)
assert self.imageCheck('line_hash_vertex', 'line_hash_vertex', rendered_image)
s.symbolLayer(0).setPlacement(QgsTemplatedLineSymbolLayerBase.FirstVertex)
g = QgsGeometry.fromWkt('LineString(0 0, 10 10, 10 0)')
rendered_image = self.renderGeometry(s, g)
assert self.imageCheck('line_hash_first', 'line_hash_first', rendered_image)
s.symbolLayer(0).setPlacement(QgsTemplatedLineSymbolLayerBase.LastVertex)
g = QgsGeometry.fromWkt('LineString(0 0, 10 10, 10 0)')
rendered_image = self.renderGeometry(s, g)
assert self.imageCheck('line_hash_last', 'line_hash_last', rendered_image)
def testRingFilter(self):
# test filtering rings during rendering
s = QgsFillSymbol()
s.deleteSymbolLayer(0)
hash_line = QgsHashedLineSymbolLayer(True)
hash_line.setPlacement(QgsTemplatedLineSymbolLayerBase.Interval)
hash_line.setInterval(6)
simple_line = QgsSimpleLineSymbolLayer()
simple_line.setColor(QColor(0, 255, 0))
simple_line.setWidth(1)
line_symbol = QgsLineSymbol()
line_symbol.changeSymbolLayer(0, simple_line)
hash_line.setSubSymbol(line_symbol)
hash_line.setHashLength(10)
hash_line.setAverageAngleLength(0)
s.appendSymbolLayer(hash_line.clone())
self.assertEqual(s.symbolLayer(0).ringFilter(), QgsLineSymbolLayer.AllRings)
s.symbolLayer(0).setRingFilter(QgsLineSymbolLayer.ExteriorRingOnly)
self.assertEqual(s.symbolLayer(0).ringFilter(), QgsLineSymbolLayer.ExteriorRingOnly)
s2 = s.clone()
self.assertEqual(s2.symbolLayer(0).ringFilter(), QgsLineSymbolLayer.ExteriorRingOnly)
doc = QDomDocument()
context = QgsReadWriteContext()
element = QgsSymbolLayerUtils.saveSymbol('test', s, doc, context)
s2 = QgsSymbolLayerUtils.loadSymbol(element, context)
self.assertEqual(s2.symbolLayer(0).ringFilter(), QgsLineSymbolLayer.ExteriorRingOnly)
# rendering test
s3 = QgsFillSymbol()
s3.deleteSymbolLayer(0)
s3.appendSymbolLayer(
hash_line.clone())
s3.symbolLayer(0).setRingFilter(QgsLineSymbolLayer.ExteriorRingOnly)
g = QgsGeometry.fromWkt('Polygon((0 0, 10 0, 10 10, 0 10, 0 0),(1 1, 1 2, 2 2, 2 1, 1 1),(8 8, 9 8, 9 9, 8 9, 8 8))')
rendered_image = self.renderGeometry(s3, g)
assert self.imageCheck('hashline_exterioronly', 'hashline_exterioronly', rendered_image)
s3.symbolLayer(0).setRingFilter(QgsLineSymbolLayer.InteriorRingsOnly)
g = QgsGeometry.fromWkt('Polygon((0 0, 10 0, 10 10, 0 10, 0 0),(1 1, 1 2, 2 2, 2 1, 1 1),(8 8, 9 8, 9 9, 8 9, 8 8))')
rendered_image = self.renderGeometry(s3, g)
assert self.imageCheck('hashline_interioronly', 'hashline_interioronly', rendered_image)
def testLineOffset(self):
s = QgsLineSymbol()
s.deleteSymbolLayer(0)
hash_line = QgsHashedLineSymbolLayer(True)
hash_line.setPlacement(QgsTemplatedLineSymbolLayerBase.Interval)
hash_line.setInterval(6)
simple_line = QgsSimpleLineSymbolLayer()
simple_line.setColor(QColor(0, 255, 0))
simple_line.setWidth(1)
line_symbol = QgsLineSymbol()
line_symbol.changeSymbolLayer(0, simple_line)
hash_line.setSubSymbol(line_symbol)
hash_line.setHashLength(10)
hash_line.setAverageAngleLength(0)
s.appendSymbolLayer(hash_line.clone())
s.symbolLayer(0).setOffset(3)
g = QgsGeometry.fromWkt('LineString(0 0, 10 10, 10 0)')
rendered_image = self.renderGeometry(s, g)
assert self.imageCheck('line_offset_positive', 'line_offset_positive', rendered_image)
s.symbolLayer(0).setOffset(-3)
g = QgsGeometry.fromWkt('LineString(0 0, 10 10, 10 0)')
rendered_image = self.renderGeometry(s, g)
assert self.imageCheck('line_offset_negative', 'line_offset_negative', rendered_image)
def testPointNumInterval(self):
s = QgsLineSymbol()
s.deleteSymbolLayer(0)
hash_line = QgsHashedLineSymbolLayer(True)
hash_line.setPlacement(QgsTemplatedLineSymbolLayerBase.Interval)
hash_line.setInterval(6)
simple_line = QgsSimpleLineSymbolLayer()
simple_line.setColor(QColor(0, 255, 0))
simple_line.setWidth(1)
line_symbol = QgsLineSymbol()
line_symbol.changeSymbolLayer(0, simple_line)
hash_line.setSubSymbol(line_symbol)
hash_line.setHashLength(10)
hash_line.setAverageAngleLength(0)
s.appendSymbolLayer(hash_line.clone())
s.symbolLayer(0).setDataDefinedProperty(QgsSymbolLayer.PropertyLineDistance, QgsProperty.fromExpression(
"@geometry_point_num * 2"))
g = QgsGeometry.fromWkt('LineString(0 0, 10 10, 10 0)')
rendered_image = self.renderGeometry(s, g)
assert self.imageCheck('line_dd_size', 'line_dd_size', rendered_image)
def testSegmentCenter(self):
s = QgsLineSymbol()
s.deleteSymbolLayer(0)
hash_line = QgsHashedLineSymbolLayer(True)
hash_line.setPlacement(QgsTemplatedLineSymbolLayerBase.SegmentCenter)
simple_line = QgsSimpleLineSymbolLayer()
simple_line.setColor(QColor(0, 255, 0))
simple_line.setWidth(1)
line_symbol = QgsLineSymbol()
line_symbol.changeSymbolLayer(0, simple_line)
hash_line.setSubSymbol(line_symbol)
hash_line.setHashLength(10)
hash_line.setAverageAngleLength(0)
s.appendSymbolLayer(hash_line.clone())
g = QgsGeometry.fromWkt('LineString(0 0, 10 0, 0 10)')
rendered_image = self.renderGeometry(s, g)
assert self.imageCheck('line_hash_segmentcenter', 'line_hash_segmentcenter', rendered_image)
def testOpacityWithDataDefinedColor(self):
line_shp = os.path.join(TEST_DATA_DIR, 'lines.shp')
line_layer = QgsVectorLayer(line_shp, 'Lines', 'ogr')
self.assertTrue(line_layer.isValid())
s = QgsLineSymbol()
s.deleteSymbolLayer(0)
hash_line = QgsHashedLineSymbolLayer(True)
simple_line = QgsSimpleLineSymbolLayer()
simple_line.setColor(QColor(0, 255, 0))
simple_line.setDataDefinedProperty(QgsSymbolLayer.PropertyStrokeColor, QgsProperty.fromExpression(
"if(Name='Arterial', 'red', 'green')"))
simple_line.setWidth(1)
line_symbol = QgsLineSymbol()
line_symbol.changeSymbolLayer(0, simple_line)
line_symbol.setOpacity(0.5)
hash_line.setSubSymbol(line_symbol)
hash_line.setHashLength(10)
hash_line.setAverageAngleLength(0)
s.appendSymbolLayer(hash_line.clone())
# set opacity on both the symbol and subsymbol, to test that they get combined
s.setOpacity(0.5)
line_layer.setRenderer(QgsSingleSymbolRenderer(s))
ms = QgsMapSettings()
ms.setOutputSize(QSize(400, 400))
ms.setOutputDpi(96)
ms.setExtent(QgsRectangle(-118.5, 19.0, -81.4, 50.4))
ms.setLayers([line_layer])
# Test rendering
renderchecker = QgsMultiRenderChecker()
renderchecker.setMapSettings(ms)
renderchecker.setControlPathPrefix('symbol_hashline')
renderchecker.setControlName('expected_hashline_opacityddcolor')
res = renderchecker.runTest('expected_hashline_opacityddcolor')
self.report += renderchecker.report()
self.assertTrue(res)
def testDataDefinedOpacity(self):
line_shp = os.path.join(TEST_DATA_DIR, 'lines.shp')
line_layer = QgsVectorLayer(line_shp, 'Lines', 'ogr')
self.assertTrue(line_layer.isValid())
s = QgsLineSymbol()
s.deleteSymbolLayer(0)
hash_line = QgsHashedLineSymbolLayer(True)
simple_line = QgsSimpleLineSymbolLayer()
simple_line.setColor(QColor(0, 255, 0))
simple_line.setDataDefinedProperty(QgsSymbolLayer.PropertyStrokeColor, QgsProperty.fromExpression(
"if(Name='Arterial', 'red', 'green')"))
simple_line.setWidth(1)
line_symbol = QgsLineSymbol()
line_symbol.changeSymbolLayer(0, simple_line)
line_symbol.setOpacity(0.5)
hash_line.setSubSymbol(line_symbol)
hash_line.setHashLength(10)
hash_line.setAverageAngleLength(0)
s.appendSymbolLayer(hash_line.clone())
s.setDataDefinedProperty(QgsSymbol.PropertyOpacity, QgsProperty.fromExpression("if(\"Value\" = 1, 25, 50)"))
line_layer.setRenderer(QgsSingleSymbolRenderer(s))
ms = QgsMapSettings()
ms.setOutputSize(QSize(400, 400))
ms.setOutputDpi(96)
ms.setExtent(QgsRectangle(-118.5, 19.0, -81.4, 50.4))
ms.setLayers([line_layer])
# Test rendering
renderchecker = QgsMultiRenderChecker()
renderchecker.setMapSettings(ms)
renderchecker.setControlPathPrefix('symbol_hashline')
renderchecker.setControlName('expected_hashline_ddopacity')
res = renderchecker.runTest('expected_hashline_ddopacity')
self.report += renderchecker.report()
self.assertTrue(res)
def renderGeometry(self, symbol, geom, buffer=20):
f = QgsFeature()
f.setGeometry(geom)
image = QImage(200, 200, QImage.Format_RGB32)
painter = QPainter()
ms = QgsMapSettings()
extent = geom.get().boundingBox()
# buffer extent by 10%
if extent.width() > 0:
extent = extent.buffered((extent.height() + extent.width()) / buffer)
else:
extent = extent.buffered(buffer / 2)
ms.setExtent(extent)
ms.setOutputSize(image.size())
context = QgsRenderContext.fromMapSettings(ms)
context.setPainter(painter)
context.setScaleFactor(96 / 25.4) # 96 DPI
context.expressionContext().setFeature(f)
painter.begin(image)
try:
image.fill(QColor(0, 0, 0))
symbol.startRender(context)
symbol.renderFeature(f, context)
symbol.stopRender(context)
finally:
painter.end()
return image
def imageCheck(self, name, reference_image, image):
self.report += "<h2>Render {}</h2>\n".format(name)
temp_dir = QDir.tempPath() + '/'
file_name = temp_dir + 'symbol_' + name + ".png"
image.save(file_name, "PNG")
checker = QgsRenderChecker()
checker.setControlPathPrefix("symbol_hashline")
checker.setControlName("expected_" + reference_image)
checker.setRenderedImage(file_name)
checker.setColorTolerance(2)
result = checker.compareImages(name, 20)
self.report += checker.report()
print((self.report))
return result
if __name__ == '__main__':
unittest.main()
|
C1994/learn-python3
|
refs/heads/master
|
samples/context/do_with.py
|
21
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from contextlib import contextmanager
@contextmanager
def log(name):
print('[%s] start...' % name)
yield
print('[%s] end.' % name)
with log('DEBUG'):
print('Hello, world!')
print('Hello, Python!')
|
tdyas/pants
|
refs/heads/master
|
contrib/scrooge/tests/python/pants_test/contrib/scrooge/tasks/test_thrift_linter_integration.py
|
2
|
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from functools import wraps
from typing import Any, Callable, TypeVar
from pants.testutil.pants_run_integration_test import PantsRunIntegrationTest
FuncType = Callable[..., Any]
F = TypeVar("F", bound=FuncType)
class ThriftLinterTest(PantsRunIntegrationTest):
lint_warn_token = "LINT-WARN"
lint_error_token = "LINT-ERROR"
thrift_folder_root = "contrib/scrooge/tests/thrift/org/pantsbuild/contrib/scrooge/thrift_linter"
@classmethod
def hermetic(cls):
return True
@classmethod
def thrift_test_target(cls, name):
return f"{cls.thrift_folder_root}:{name}"
def rename_build_file(func: F) -> F:
"""This decorator implements the TEST_BUILD pattern.
Because these tests use files that intentionally should fail linting, the goal `./pants lint ::`
we use in CI would complain about these files. To avoid this, we rename BUILD to TEST_BUILD.
However, these tests require us to temporarily rename TEST_BUILD back to BUILD.
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
with self.file_renamed(
self.thrift_folder_root, test_name="TEST_BUILD", real_name="BUILD"
):
func(self, *args, **kwargs)
return wrapper # type: ignore[return-value]
def run_pants(self, command, config=None, stdin_data=None, extra_env=None, **kwargs):
full_config = {
"GLOBAL": {
"pythonpath": ["%(buildroot)s/contrib/scrooge/src/python"],
"backend_packages": [
"pants.backend.codegen",
"pants.backend.jvm",
"pants.contrib.scrooge",
],
},
}
if config:
for scope, scoped_cfgs in config.items():
updated = full_config.get(scope, {})
updated.update(scoped_cfgs)
full_config[scope] = updated
return super().run_pants(command, full_config, stdin_data, extra_env, **kwargs)
@rename_build_file
def test_good(self):
# thrift-linter should pass without warnings with correct thrift files.
cmd = ["lint.thrift", self.thrift_test_target("good-thrift")]
pants_run = self.run_pants(cmd)
self.assert_success(pants_run)
self.assertNotIn(self.lint_error_token, pants_run.stdout_data)
@rename_build_file
def test_bad_default(self):
# thrift-linter fails on linter errors.
cmd = ["lint.thrift", self.thrift_test_target("bad-thrift-default")]
pants_run = self.run_pants(cmd)
self.assert_success(pants_run)
self.assertIn(self.lint_warn_token, pants_run.stdout_data)
@rename_build_file
def test_bad_strict(self):
# thrift-linter fails on linter errors (BUILD target defines thrift_linter_strict=True)
cmd = ["lint.thrift", self.thrift_test_target("bad-thrift-strict")]
pants_run = self.run_pants(cmd)
self.assert_failure(pants_run)
self.assertIn(self.lint_error_token, pants_run.stdout_data)
@rename_build_file
def test_bad_non_strict(self):
# thrift-linter fails on linter errors (BUILD target defines thrift_linter_strict=False)
cmd = ["lint.thrift", self.thrift_test_target("bad-thrift-non-strict")]
pants_run = self.run_pants(cmd)
self.assert_success(pants_run)
self.assertIn(self.lint_warn_token, pants_run.stdout_data)
@rename_build_file
def test_bad_default_override(self):
# thrift-linter fails with command line flag overriding the BUILD section.
cmd = [
"--scrooge-linter-strict",
"lint.thrift",
self.thrift_test_target("bad-thrift-default"),
]
pants_run = self.run_pants(cmd)
self.assert_failure(pants_run)
self.assertIn(self.lint_error_token, pants_run.stdout_data)
@rename_build_file
def test_multiple_bad_strict_override(self):
# Using -q to make sure bad thrift files are in the final exception messages.
target_a = self.thrift_test_target("bad-thrift-strict")
target_b = self.thrift_test_target("bad-thrift-strict2")
cmd = [
"-q",
"--scrooge-linter-strict",
"lint.thrift",
target_a,
target_b,
]
pants_run = self.run_pants(cmd)
self.assert_failure(pants_run)
self.assertIn("bad-strict2.thrift", pants_run.stdout_data)
self.assertIn("bad-strict.thrift", pants_run.stdout_data)
self.assertIn(target_a, pants_run.stdout_data)
self.assertIn(target_b, pants_run.stdout_data)
@rename_build_file
def test_bad_strict_override(self):
# thrift-linter passes with non-strict command line flag overriding the BUILD section.
cmd = [
"--no-scrooge-linter-strict",
"lint.thrift",
self.thrift_test_target("bad-thrift-strict"),
]
pants_run = self.run_pants(cmd)
self.assert_success(pants_run)
self.assertIn(self.lint_warn_token, pants_run.stdout_data)
@rename_build_file
def test_bad_non_strict_override(self):
# thrift-linter fails with command line flag overriding the BUILD section.
cmd = [
"--scrooge-linter-strict",
"lint.thrift",
self.thrift_test_target("bad-thrift-non-strict"),
]
pants_run = self.run_pants(cmd)
self.assert_failure(pants_run)
self.assertIn(self.lint_error_token, pants_run.stdout_data)
@rename_build_file
def test_bad_pants_ini_strict(self):
# thrift-linter fails if pants.toml has a thrift-linter:strict=True setting.
cmd = ["lint.thrift", self.thrift_test_target("bad-thrift-default")]
pants_ini_config = {"scrooge-linter": {"strict": True}}
pants_run = self.run_pants(cmd, config=pants_ini_config)
self.assert_failure(pants_run)
self.assertIn(self.lint_error_token, pants_run.stdout_data)
@rename_build_file
def test_bad_pants_ini_strict_overridden(self):
# thrift-linter passes if pants.toml has a thrift-linter:strict=True setting and
# a command line non-strict flag is passed.
cmd = [
"--no-scrooge-linter-strict",
"lint.thrift",
self.thrift_test_target("bad-thrift-default"),
]
pants_ini_config = {"scrooge-linter": {"strict": True}}
pants_run = self.run_pants(cmd, config=pants_ini_config)
self.assert_success(pants_run)
self.assertIn(self.lint_warn_token, pants_run.stdout_data)
|
charlescearl/VirtualMesos
|
refs/heads/mesos-vm
|
frameworks/hadoop-0.20.2/src/contrib/hod/hodlib/Hod/hod.py
|
182
|
#Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
# -*- python -*-
import sys, os, getpass, pprint, re, cPickle, random, shutil, time, errno
import hodlib.Common.logger
from hodlib.ServiceRegistry.serviceRegistry import svcrgy
from hodlib.Common.xmlrpc import hodXRClient
from hodlib.Common.util import to_http_url, get_exception_string
from hodlib.Common.util import get_exception_error_string
from hodlib.Common.util import hodInterrupt, HodInterruptException
from hodlib.Common.util import HOD_INTERRUPTED_CODE
from hodlib.Common.nodepoolutil import NodePoolUtil
from hodlib.Hod.hadoop import hadoopCluster, hadoopScript
CLUSTER_DATA_FILE = 'clusters'
INVALID_STATE_FILE_MSGS = \
[
"Requested operation cannot be performed. Cannot read %s: " + \
"Permission denied.",
"Requested operation cannot be performed. " + \
"Cannot write to %s: Permission denied.",
"Requested operation cannot be performed. " + \
"Cannot read/write to %s: Permission denied.",
"Cannot update %s: Permission denied. " + \
"Cluster is deallocated, but info and list " + \
"operations might show incorrect information.",
]
class hodState:
def __init__(self, store):
self.__store = store
self.__stateFile = None
self.__init_store()
self.__STORE_EXT = ".state"
def __init_store(self):
if not os.path.exists(self.__store):
os.mkdir(self.__store)
def __set_state_file(self, id=None):
if id:
self.__stateFile = os.path.join(self.__store, "%s%s" % (id,
self.__STORE_EXT))
else:
for item in os.listdir(self.__store):
if item.endswith(self.__STORE_EXT):
self.__stateFile = os.path.join(self.__store, item)
def get_state_file(self):
return self.__stateFile
def checkStateFile(self, id=None, modes=(os.R_OK,)):
# is state file exists/readable/writable/both?
self.__set_state_file(id)
# return true if file doesn't exist, because HOD CAN create
# state file and so WILL have permissions to read and/or write
try:
os.stat(self.__stateFile)
except OSError, err:
if err.errno == errno.ENOENT: # error 2 (no such file)
return True
# file exists
ret = True
for mode in modes:
ret = ret and os.access(self.__stateFile, mode)
return ret
def read(self, id=None):
info = {}
self.__set_state_file(id)
if self.__stateFile:
if os.path.isfile(self.__stateFile):
stateFile = open(self.__stateFile, 'r')
try:
info = cPickle.load(stateFile)
except EOFError:
pass
stateFile.close()
return info
def write(self, id, info):
self.__set_state_file(id)
if not os.path.exists(self.__stateFile):
self.clear(id)
stateFile = open(self.__stateFile, 'w')
cPickle.dump(info, stateFile)
stateFile.close()
def clear(self, id=None):
self.__set_state_file(id)
if self.__stateFile and os.path.exists(self.__stateFile):
os.remove(self.__stateFile)
else:
for item in os.listdir(self.__store):
if item.endswith(self.__STORE_EXT):
os.remove(item)
class hodRunner:
def __init__(self, cfg, log=None, cluster=None):
self.__hodhelp = hodHelp()
self.__ops = self.__hodhelp.ops
self.__cfg = cfg
self.__npd = self.__cfg['nodepooldesc']
self.__opCode = 0
self.__user = getpass.getuser()
self.__registry = None
self.__baseLogger = None
# Allowing to pass in log object to help testing - a stub can be passed in
if log is None:
self.__setup_logger()
else:
self.__log = log
self.__userState = hodState(self.__cfg['hod']['user_state'])
self.__clusterState = None
self.__clusterStateInfo = { 'env' : None, 'hdfs' : None, 'mapred' : None }
# Allowing to pass in log object to help testing - a stib can be passed in
if cluster is None:
self.__cluster = hadoopCluster(self.__cfg, self.__log)
else:
self.__cluster = cluster
def __setup_logger(self):
self.__baseLogger = hodlib.Common.logger.hodLog('hod')
self.__log = self.__baseLogger.add_logger(self.__user )
if self.__cfg['hod']['stream']:
self.__baseLogger.add_stream(level=self.__cfg['hod']['debug'],
addToLoggerNames=(self.__user ,))
if self.__cfg['hod'].has_key('syslog-address'):
self.__baseLogger.add_syslog(self.__cfg['hod']['syslog-address'],
level=self.__cfg['hod']['debug'],
addToLoggerNames=(self.__user ,))
def get_logger(self):
return self.__log
def __setup_cluster_logger(self, directory):
self.__baseLogger.add_file(logDirectory=directory, level=4,
backupCount=self.__cfg['hod']['log-rollover-count'],
addToLoggerNames=(self.__user ,))
def __setup_cluster_state(self, directory):
self.__clusterState = hodState(directory)
def __norm_cluster_dir(self, directory):
directory = os.path.expanduser(directory)
if not os.path.isabs(directory):
directory = os.path.join(self.__cfg['hod']['original-dir'], directory)
directory = os.path.abspath(directory)
return directory
def __setup_service_registry(self):
cfg = self.__cfg['hod'].copy()
cfg['debug'] = 0
self.__registry = svcrgy(cfg, self.__log)
self.__registry.start()
self.__log.debug(self.__registry.getXMLRPCAddr())
self.__cfg['hod']['xrs-address'] = self.__registry.getXMLRPCAddr()
self.__cfg['ringmaster']['svcrgy-addr'] = self.__cfg['hod']['xrs-address']
def __set_cluster_state_info(self, env, hdfs, mapred, ring, jobid, min, max):
self.__clusterStateInfo['env'] = env
self.__clusterStateInfo['hdfs'] = "http://%s" % hdfs
self.__clusterStateInfo['mapred'] = "http://%s" % mapred
self.__clusterStateInfo['ring'] = ring
self.__clusterStateInfo['jobid'] = jobid
self.__clusterStateInfo['min'] = min
self.__clusterStateInfo['max'] = max
def __set_user_state_info(self, info):
userState = self.__userState.read(CLUSTER_DATA_FILE)
for key in info.keys():
userState[key] = info[key]
self.__userState.write(CLUSTER_DATA_FILE, userState)
def __remove_cluster(self, clusterDir):
clusterInfo = self.__userState.read(CLUSTER_DATA_FILE)
if clusterDir in clusterInfo:
del(clusterInfo[clusterDir])
self.__userState.write(CLUSTER_DATA_FILE, clusterInfo)
def __cleanup(self):
if self.__registry: self.__registry.stop()
def __check_operation(self, operation):
opList = operation.split()
if not opList[0] in self.__ops:
self.__log.critical("Invalid hod operation specified: %s" % operation)
self._op_help(None)
self.__opCode = 2
return opList
def __adjustMasterFailureCountConfig(self, nodeCount):
# This method adjusts the ringmaster.max-master-failures variable
# to a value that is bounded by the a function of the number of
# nodes.
maxFailures = self.__cfg['ringmaster']['max-master-failures']
# Count number of masters required - depends on which services
# are external
masters = 0
if not self.__cfg['gridservice-hdfs']['external']:
masters += 1
if not self.__cfg['gridservice-mapred']['external']:
masters += 1
# So, if there are n nodes and m masters, we look atleast for
# all masters to come up. Therefore, atleast m nodes should be
# good, which means a maximum of n-m master nodes can fail.
maxFailedNodes = nodeCount - masters
# The configured max number of failures is now bounded by this
# number.
self.__cfg['ringmaster']['max-master-failures'] = \
min(maxFailures, maxFailedNodes)
def _op_allocate(self, args):
operation = "allocate"
argLength = len(args)
min = 0
max = 0
errorFlag = False
errorMsgs = []
if argLength == 3:
nodes = args[2]
clusterDir = self.__norm_cluster_dir(args[1])
if not os.path.exists(clusterDir):
try:
os.makedirs(clusterDir)
except OSError, err:
errorFlag = True
errorMsgs.append("Could not create cluster directory. %s" \
% (str(err)))
elif not os.path.isdir(clusterDir):
errorFlag = True
errorMsgs.append( \
"Invalid cluster directory (--hod.clusterdir or -d) : " + \
clusterDir + " : Not a directory")
if int(nodes) < 3 :
errorFlag = True
errorMsgs.append("Invalid nodecount (--hod.nodecount or -n) : " + \
"Must be >= 3. Given nodes: %s" % nodes)
if errorFlag:
for msg in errorMsgs:
self.__log.critical(msg)
self.__opCode = 3
return
if not self.__userState.checkStateFile(CLUSTER_DATA_FILE, \
(os.R_OK, os.W_OK)):
self.__log.critical(INVALID_STATE_FILE_MSGS[2] % \
self.__userState.get_state_file())
self.__opCode = 1
return
clusterList = self.__userState.read(CLUSTER_DATA_FILE)
if clusterDir in clusterList.keys():
self.__setup_cluster_state(clusterDir)
clusterInfo = self.__clusterState.read()
# Check if the job is not running. Only then can we safely
# allocate another cluster. Otherwise the user would need
# to deallocate and free up resources himself.
if clusterInfo.has_key('jobid') and \
self.__cluster.is_cluster_deallocated(clusterInfo['jobid']):
self.__log.warn("Found a dead cluster at cluster directory '%s'. Deallocating it to allocate a new one." % (clusterDir))
self.__remove_cluster(clusterDir)
self.__clusterState.clear()
else:
self.__log.critical("Found a previously allocated cluster at cluster directory '%s'. HOD cannot determine if this cluster can be automatically deallocated. Deallocate the cluster if it is unused." % (clusterDir))
self.__opCode = 12
return
self.__setup_cluster_logger(clusterDir)
(status, message) = self.__cluster.is_valid_account()
if status is not 0:
if message:
for line in message:
self.__log.critical("verify-account output: %s" % line)
self.__log.critical("Cluster cannot be allocated because account verification failed. " \
+ "verify-account returned exit code: %s." % status)
self.__opCode = 4
return
else:
self.__log.debug("verify-account returned zero exit code.")
if message:
self.__log.debug("verify-account output: %s" % message)
if re.match('\d+-\d+', nodes):
(min, max) = nodes.split("-")
min = int(min)
max = int(max)
else:
try:
nodes = int(nodes)
min = nodes
max = nodes
except ValueError:
print self.__hodhelp.help(operation)
self.__log.critical(
"%s operation requires a pos_int value for n(nodecount)." %
operation)
self.__opCode = 3
else:
self.__setup_cluster_state(clusterDir)
clusterInfo = self.__clusterState.read()
self.__opCode = self.__cluster.check_cluster(clusterInfo)
if self.__opCode == 0 or self.__opCode == 15:
self.__setup_service_registry()
if hodInterrupt.isSet():
self.__cleanup()
raise HodInterruptException()
self.__log.debug("Service Registry started.")
self.__adjustMasterFailureCountConfig(nodes)
try:
allocateStatus = self.__cluster.allocate(clusterDir, min, max)
except HodInterruptException, h:
self.__cleanup()
raise h
# Allocation has gone through.
# Don't care about interrupts any more
try:
if allocateStatus == 0:
self.__set_cluster_state_info(os.environ,
self.__cluster.hdfsInfo,
self.__cluster.mapredInfo,
self.__cluster.ringmasterXRS,
self.__cluster.jobId,
min, max)
self.__setup_cluster_state(clusterDir)
self.__clusterState.write(self.__cluster.jobId,
self.__clusterStateInfo)
# Do we need to check for interrupts here ??
self.__set_user_state_info(
{ clusterDir : self.__cluster.jobId, } )
self.__opCode = allocateStatus
except Exception, e:
# Some unknown problem.
self.__cleanup()
self.__cluster.deallocate(clusterDir, self.__clusterStateInfo)
self.__opCode = 1
raise Exception(e)
elif self.__opCode == 12:
self.__log.critical("Cluster %s already allocated." % clusterDir)
elif self.__opCode == 10:
self.__log.critical("dead\t%s\t%s" % (clusterInfo['jobid'],
clusterDir))
elif self.__opCode == 13:
self.__log.warn("hdfs dead\t%s\t%s" % (clusterInfo['jobid'],
clusterDir))
elif self.__opCode == 14:
self.__log.warn("mapred dead\t%s\t%s" % (clusterInfo['jobid'],
clusterDir))
if self.__opCode > 0 and self.__opCode != 15:
self.__log.critical("Cannot allocate cluster %s" % clusterDir)
else:
print self.__hodhelp.help(operation)
self.__log.critical("%s operation requires two arguments. " % operation
+ "A cluster directory and a nodecount.")
self.__opCode = 3
def _is_cluster_allocated(self, clusterDir):
if os.path.isdir(clusterDir):
self.__setup_cluster_state(clusterDir)
clusterInfo = self.__clusterState.read()
if clusterInfo != {}:
return True
return False
def _op_deallocate(self, args):
operation = "deallocate"
argLength = len(args)
if argLength == 2:
clusterDir = self.__norm_cluster_dir(args[1])
if os.path.isdir(clusterDir):
self.__setup_cluster_state(clusterDir)
clusterInfo = self.__clusterState.read()
if clusterInfo == {}:
self.__handle_invalid_cluster_directory(clusterDir, cleanUp=True)
else:
self.__opCode = \
self.__cluster.deallocate(clusterDir, clusterInfo)
# irrespective of whether deallocate failed or not\
# remove the cluster state.
self.__clusterState.clear()
if not self.__userState.checkStateFile(CLUSTER_DATA_FILE, (os.W_OK,)):
self.__log.critical(INVALID_STATE_FILE_MSGS[3] % \
self.__userState.get_state_file())
self.__opCode = 1
return
self.__remove_cluster(clusterDir)
else:
self.__handle_invalid_cluster_directory(clusterDir, cleanUp=True)
else:
print self.__hodhelp.help(operation)
self.__log.critical("%s operation requires one argument. " % operation
+ "A cluster path.")
self.__opCode = 3
def _op_list(self, args):
operation = 'list'
clusterList = self.__userState.read(CLUSTER_DATA_FILE)
for path in clusterList.keys():
if not os.path.isdir(path):
self.__log.info("cluster state unknown\t%s\t%s" % (clusterList[path], path))
continue
self.__setup_cluster_state(path)
clusterInfo = self.__clusterState.read()
if clusterInfo == {}:
# something wrong with the cluster directory.
self.__log.info("cluster state unknown\t%s\t%s" % (clusterList[path], path))
continue
clusterStatus = self.__cluster.check_cluster(clusterInfo)
if clusterStatus == 12:
self.__log.info("alive\t%s\t%s" % (clusterList[path], path))
elif clusterStatus == 10:
self.__log.info("dead\t%s\t%s" % (clusterList[path], path))
elif clusterStatus == 13:
self.__log.info("hdfs dead\t%s\t%s" % (clusterList[path], path))
elif clusterStatus == 14:
self.__log.info("mapred dead\t%s\t%s" % (clusterList[path], path))
def _op_info(self, args):
operation = 'info'
argLength = len(args)
if argLength == 2:
clusterDir = self.__norm_cluster_dir(args[1])
if os.path.isdir(clusterDir):
self.__setup_cluster_state(clusterDir)
clusterInfo = self.__clusterState.read()
if clusterInfo == {}:
# something wrong with the cluster directory.
self.__handle_invalid_cluster_directory(clusterDir)
else:
clusterStatus = self.__cluster.check_cluster(clusterInfo)
if clusterStatus == 12:
self.__print_cluster_info(clusterInfo)
self.__log.info("hadoop-site.xml at %s" % clusterDir)
elif clusterStatus == 10:
self.__log.critical("%s cluster is dead" % clusterDir)
elif clusterStatus == 13:
self.__log.warn("%s cluster hdfs is dead" % clusterDir)
elif clusterStatus == 14:
self.__log.warn("%s cluster mapred is dead" % clusterDir)
if clusterStatus != 12:
if clusterStatus == 15:
self.__log.critical("Cluster %s not allocated." % clusterDir)
else:
self.__print_cluster_info(clusterInfo)
self.__log.info("hadoop-site.xml at %s" % clusterDir)
self.__opCode = clusterStatus
else:
self.__handle_invalid_cluster_directory(clusterDir)
else:
print self.__hodhelp.help(operation)
self.__log.critical("%s operation requires one argument. " % operation
+ "A cluster path.")
self.__opCode = 3
def __handle_invalid_cluster_directory(self, clusterDir, cleanUp=False):
if not self.__userState.checkStateFile(CLUSTER_DATA_FILE, (os.R_OK,)):
self.__log.critical(INVALID_STATE_FILE_MSGS[0] % \
self.__userState.get_state_file())
self.__opCode = 1
return
clusterList = self.__userState.read(CLUSTER_DATA_FILE)
if clusterDir in clusterList.keys():
# previously allocated cluster.
self.__log.critical("Cannot find information for cluster with id '%s' in previously allocated cluster directory '%s'." % (clusterList[clusterDir], clusterDir))
if cleanUp:
self.__cluster.delete_job(clusterList[clusterDir])
self.__log.critical("Freeing resources allocated to the cluster.")
if not self.__userState.checkStateFile(CLUSTER_DATA_FILE, (os.W_OK,)):
self.__log.critical(INVALID_STATE_FILE_MSGS[1] % \
self.__userState.get_state_file())
self.__opCode = 1
return
self.__remove_cluster(clusterDir)
self.__opCode = 3
else:
if not os.path.exists(clusterDir):
self.__log.critical( \
"Invalid hod.clusterdir(--hod.clusterdir or -d). " + \
clusterDir + " : No such directory")
elif not os.path.isdir(clusterDir):
self.__log.critical( \
"Invalid hod.clusterdir(--hod.clusterdir or -d). " + \
clusterDir + " : Not a directory")
else:
self.__log.critical( \
"Invalid hod.clusterdir(--hod.clusterdir or -d). " + \
clusterDir + " : Not tied to any allocated cluster.")
self.__opCode = 15
def __print_cluster_info(self, clusterInfo):
keys = clusterInfo.keys()
_dict = {
'jobid' : 'Cluster Id', 'min' : 'Nodecount',
'hdfs' : 'HDFS UI at' , 'mapred' : 'Mapred UI at'
}
for key in _dict.keys():
if clusterInfo.has_key(key):
self.__log.info("%s %s" % (_dict[key], clusterInfo[key]))
if clusterInfo.has_key('ring'):
self.__log.debug("%s\t%s" % ('Ringmaster at ', clusterInfo['ring']))
if self.__cfg['hod']['debug'] == 4:
for var in clusterInfo['env'].keys():
self.__log.debug("%s = %s" % (var, clusterInfo['env'][var]))
def _op_help(self, arg):
if arg == None or arg.__len__() != 2:
print "hod commands:\n"
for op in self.__ops:
print self.__hodhelp.help(op)
else:
if arg[1] not in self.__ops:
print self.__hodhelp.help('help')
self.__log.critical("Help requested for invalid operation : %s"%arg[1])
self.__opCode = 3
else: print self.__hodhelp.help(arg[1])
def operation(self):
operation = self.__cfg['hod']['operation']
try:
opList = self.__check_operation(operation)
if self.__opCode == 0:
if not self.__userState.checkStateFile(CLUSTER_DATA_FILE, (os.R_OK,)):
self.__log.critical(INVALID_STATE_FILE_MSGS[0] % \
self.__userState.get_state_file())
self.__opCode = 1
return self.__opCode
getattr(self, "_op_%s" % opList[0])(opList)
except HodInterruptException, h:
self.__log.critical("op: %s failed because of a process interrupt." \
% operation)
self.__opCode = HOD_INTERRUPTED_CODE
except:
self.__log.critical("op: %s failed: %s" % (operation,
get_exception_error_string()))
self.__log.debug(get_exception_string())
self.__cleanup()
self.__log.debug("return code: %s" % self.__opCode)
return self.__opCode
def script(self):
errorFlag = False
errorMsgs = []
scriptRet = 0 # return from the script, if run
script = self.__cfg['hod']['script']
nodes = self.__cfg['hod']['nodecount']
clusterDir = self.__cfg['hod']['clusterdir']
if not os.path.exists(script):
errorFlag = True
errorMsgs.append("Invalid script file (--hod.script or -s) : " + \
script + " : No such file")
elif not os.path.isfile(script):
errorFlag = True
errorMsgs.append("Invalid script file (--hod.script or -s) : " + \
script + " : Not a file.")
else:
isExecutable = os.access(script, os.X_OK)
if not isExecutable:
errorFlag = True
errorMsgs.append("Invalid script file (--hod.script or -s) : " + \
script + " : Not an executable.")
if not os.path.exists(clusterDir):
try:
os.makedirs(clusterDir)
except OSError, err:
errorFlag = True
errorMsgs.append("Could not create cluster directory. %s" % (str(err)))
elif not os.path.isdir(clusterDir):
errorFlag = True
errorMsgs.append( \
"Invalid cluster directory (--hod.clusterdir or -d) : " + \
clusterDir + " : Not a directory")
if int(self.__cfg['hod']['nodecount']) < 3 :
errorFlag = True
errorMsgs.append("Invalid nodecount (--hod.nodecount or -n) : " + \
"Must be >= 3. Given nodes: %s" % nodes)
if errorFlag:
for msg in errorMsgs:
self.__log.critical(msg)
self.handle_script_exit_code(scriptRet, clusterDir)
sys.exit(3)
try:
self._op_allocate(('allocate', clusterDir, str(nodes)))
if self.__opCode == 0:
if self.__cfg['hod'].has_key('script-wait-time'):
time.sleep(self.__cfg['hod']['script-wait-time'])
self.__log.debug('Slept for %d time. Now going to run the script' % self.__cfg['hod']['script-wait-time'])
if hodInterrupt.isSet():
self.__log.debug('Hod interrupted - not executing script')
else:
scriptRunner = hadoopScript(clusterDir,
self.__cfg['hod']['original-dir'])
self.__opCode = scriptRunner.run(script)
scriptRet = self.__opCode
self.__log.info("Exit code from running the script: %d" % self.__opCode)
else:
self.__log.critical("Error %d in allocating the cluster. Cannot run the script." % self.__opCode)
if hodInterrupt.isSet():
# Got interrupt while executing script. Unsetting it for deallocating
hodInterrupt.setFlag(False)
if self._is_cluster_allocated(clusterDir):
self._op_deallocate(('deallocate', clusterDir))
except HodInterruptException, h:
self.__log.critical("Script failed because of a process interrupt.")
self.__opCode = HOD_INTERRUPTED_CODE
except:
self.__log.critical("script: %s failed: %s" % (script,
get_exception_error_string()))
self.__log.debug(get_exception_string())
self.__cleanup()
self.handle_script_exit_code(scriptRet, clusterDir)
return self.__opCode
def handle_script_exit_code(self, scriptRet, clusterDir):
# We want to give importance to a failed script's exit code, and write out exit code to a file separately
# so users can easily get it if required. This way they can differentiate between the script's exit code
# and hod's exit code.
if os.path.exists(clusterDir):
exit_code_file_name = (os.path.join(clusterDir, 'script.exitcode'))
if scriptRet != 0:
exit_code_file = open(exit_code_file_name, 'w')
print >>exit_code_file, scriptRet
exit_code_file.close()
self.__opCode = scriptRet
else:
#ensure script exit code file is not there:
if (os.path.exists(exit_code_file_name)):
os.remove(exit_code_file_name)
class hodHelp:
def __init__(self):
self.ops = ['allocate', 'deallocate', 'info', 'list','script', 'help']
self.usage_strings = \
{
'allocate' : 'hod allocate -d <clusterdir> -n <nodecount> [OPTIONS]',
'deallocate' : 'hod deallocate -d <clusterdir> [OPTIONS]',
'list' : 'hod list [OPTIONS]',
'info' : 'hod info -d <clusterdir> [OPTIONS]',
'script' :
'hod script -d <clusterdir> -n <nodecount> -s <script> [OPTIONS]',
'help' : 'hod help <OPERATION>',
}
self.description_strings = \
{
'allocate' : "Allocates a cluster of n nodes using the specified \n" + \
" cluster directory to store cluster state \n" + \
" information. The Hadoop site XML is also stored \n" + \
" in this location.\n",
'deallocate' : "Deallocates a cluster using the specified \n" + \
" cluster directory. This operation is also \n" + \
" required to clean up a dead cluster.\n",
'list' : "List all clusters currently allocated by a user, \n" + \
" along with limited status information and the \n" + \
" cluster ID.\n",
'info' : "Provide detailed information on an allocated cluster.\n",
'script' : "Allocates a cluster of n nodes with the given \n" +\
" cluster directory, runs the specified script \n" + \
" using the allocated cluster, and then \n" + \
" deallocates the cluster.\n",
'help' : "Print help for the operation and exit.\n" + \
"Available operations : %s.\n" % self.ops,
}
def usage(self, op):
return "Usage : " + self.usage_strings[op] + "\n" + \
"For full description: hod help " + op + ".\n"
def help(self, op=None):
if op is None:
return "hod <operation> [ARGS] [OPTIONS]\n" + \
"Available operations : %s\n" % self.ops + \
"For help on a particular operation : hod help <operation>.\n" + \
"For all options : hod help options."
else:
return "Usage : " + self.usage_strings[op] + "\n" + \
"Description : " + self.description_strings[op] + \
"For all options : hod help options.\n"
|
bhavesh37/sp17-i524
|
refs/heads/master
|
project/all.py
|
15
|
from __future__ import print_function
import glob
import yaml
from pprint import pprint
import sys
import re
import os
makes = glob.glob("S*/report/Makefile")
os.system("echo > ~/all.log ")
for make in makes:
d = make.replace("/Makefile", "")
print (70 * "=")
print (d)
os.system("cd " + d + "; rm report.pdf >> ~/all.log ")
os.system("cd " + d + "; make clean >> ~/all.log")
os.system("cd " + d + "; make >> ~/all.log")
print (70 * "=")
os.system("make all >> ~/all.log")
|
lightfaith/locasploit
|
refs/heads/master
|
source/modules/iot_binwalk_scan.py
|
1
|
#!/usr/bin/env python3
"""
This module uses binwalk to analyze structure of a binary file.
"""
from source.modules._generic_module import *
class Module(GenericModule):
def __init__(self):
self.authors = [
Author(name='Vitezslav Grygar', email='vitezslav.grygar@gmail.com', web='https://badsulog.blogspot.com'),
]
self.name = 'iot.binwalk.scan'
self.short_description = 'Performs a binwalk on a file.'
self.references = [
'',
]
self.date = '2016-07-22'
self.license = 'GNU GPLv2'
self.version = '1.0'
self.tags = [
'IoT',
'Internet of Things',
'binwalk',
'firmware'
]
self.description = """
Performs a binwalk on a file.
"""
self.dependencies = {
}
self.changelog = """
"""
self.reset_parameters()
def reset_parameters(self):
self.parameters = {
'ACTIVEROOT': Parameter(mandatory=True, description='System to work with'),
'SILENT': Parameter(value='no', mandatory=True, description='Suppress the output'),
'BINFILE': Parameter(mandatory=True, description='File to analyze'),
}
def check(self, silent=None):
if silent is None:
silent = positive(self.parameters['SILENT'].value)
result = CHECK_SUCCESS
# binwalk available?
try:
import binwalk
except:
if not silent:
log.err('Binwalk is not available.')
result = CHECK_FAILURE
return result
def run(self):
silent = positive(self.parameters['SILENT'].value)
import binwalk
try:
# Perform a signature scan against the files specified on the command line and suppress the usual binwalk output.
path = io.get_fullpath(self.parameters['ACTIVEROOT'].value, self.parameters['BINFILE'].value)
for module in binwalk.scan(path, signature=True, quiet=True):
if not silent:
for result in module.results:
log.writeline('0x%.8X %s [%s]' % (result.offset, result.description, str(result.valid)))
except binwalk.ModuleException as e:
log.err(str(e))
return None
"""
class Thread(threading.Thread):
def __init__(self, silent, timeout):
threading.Thread.__init__(self)
self.silent = silent
self.timeout = timeout
self.terminate = False
# starts the thread
def run(self):
if not self.silent:
log.info('You have %d seconds.' % (self.timeout))
while self.timeout > 0:
self.timeout -= 1
time.sleep(1)
if not self.silent:
log.ok('Time\'s up!')
# terminates the thread
def stop(self):
self.terminate = True
"""
lib.module_objects.append(Module())
|
deepfire/partus
|
refs/heads/master
|
llvm-bitcode.py
|
1
|
import struct
def error(x, args):
raise Exception(x % args)
def the(type, x):
if not isinstance(x, type):
error("%s is not of type %s.", x, type)
def ceil(x, mask):
return ((x | mask) + 1) if x & mask else x
##
##
class llvm_bytecode_object():
def __len__(self):
return self.size
class block():
## unmet expectations: __len__ and encode for all content
def __init__(self, type, content):
self.type = the(int, type)
self.content_size = len(content)
self.size = 8 + ceil(self.content_size, 0b11)
self.content = content
def encode(self):
if self.type & ~0xffffffff: error("Invalid block type: 0x%x", self.type)
if self.size & ~0xffffffff: error("Invalid block size: 0x%x", self.size)
return (struct.pack("II", self.type, self.size)
+ encode(content)
+ bytes([0]) * (ceil(self.content_size) - self.content_size))
class llist():
def __init__(self, content):
self.content_size = len(content)
self.size = 4 + self.content_size ??
self.content = content
def encode(self):
if self.size & ~0xffffffff: error("Invalid llist size: 0x%x", self.size)
return (struct.pack("I", self.size)
+ encode(content))
class zlist():
def __init__(self, content):
self.content_size = len(content)
self.size = self.content_size + ??
self.content = content
def encode(self):
return encode(content)
class vbr():
def __init__(self, x):
nbits = int.bit_length(x) + (1 if x < 0 else 0)
self.size = (nbits//7) + 1
self.content = x
def encode(self):
bits = (self.content << 1) | (self.content < 0)
return (bytes(0x80 | ((bits & (0x7f << shift)) >> shift)
for shift in range(7, , 7))
+ bytes(bits & 0x7f))
|
kizniche/Mycodo
|
refs/heads/master
|
mycodo/utils/__init__.py
|
1054
|
# coding=utf-8
|
ProfessionalIT/maxigenios-website
|
refs/heads/master
|
sdk/google_appengine/lib/cherrypy/cherrypy/test/test_http.py
|
36
|
"""Tests for managing HTTP issues (malformed requests, etc)."""
import errno
import mimetypes
import socket
import sys
import cherrypy
from cherrypy._cpcompat import HTTPConnection, HTTPSConnection, ntob, py3k
def encode_multipart_formdata(files):
"""Return (content_type, body) ready for httplib.HTTP instance.
files: a sequence of (name, filename, value) tuples for multipart uploads.
"""
BOUNDARY = '________ThIs_Is_tHe_bouNdaRY_$'
L = []
for key, filename, value in files:
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"; filename="%s"' %
(key, filename))
ct = mimetypes.guess_type(filename)[0] or 'application/octet-stream'
L.append('Content-Type: %s' % ct)
L.append('')
L.append(value)
L.append('--' + BOUNDARY + '--')
L.append('')
body = '\r\n'.join(L)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
from cherrypy.test import helper
class HTTPTests(helper.CPWebCase):
def setup_server():
class Root:
def index(self, *args, **kwargs):
return "Hello world!"
index.exposed = True
def no_body(self, *args, **kwargs):
return "Hello world!"
no_body.exposed = True
no_body._cp_config = {'request.process_request_body': False}
def post_multipart(self, file):
"""Return a summary ("a * 65536\nb * 65536") of the uploaded file."""
contents = file.file.read()
summary = []
curchar = None
count = 0
for c in contents:
if c == curchar:
count += 1
else:
if count:
if py3k: curchar = chr(curchar)
summary.append("%s * %d" % (curchar, count))
count = 1
curchar = c
if count:
if py3k: curchar = chr(curchar)
summary.append("%s * %d" % (curchar, count))
return ", ".join(summary)
post_multipart.exposed = True
cherrypy.tree.mount(Root())
cherrypy.config.update({'server.max_request_body_size': 30000000})
setup_server = staticmethod(setup_server)
def test_no_content_length(self):
# "The presence of a message-body in a request is signaled by the
# inclusion of a Content-Length or Transfer-Encoding header field in
# the request's message-headers."
#
# Send a message with neither header and no body. Even though
# the request is of method POST, this should be OK because we set
# request.process_request_body to False for our handler.
if self.scheme == "https":
c = HTTPSConnection('%s:%s' % (self.interface(), self.PORT))
else:
c = HTTPConnection('%s:%s' % (self.interface(), self.PORT))
c.request("POST", "/no_body")
response = c.getresponse()
self.body = response.fp.read()
self.status = str(response.status)
self.assertStatus(200)
self.assertBody(ntob('Hello world!'))
# Now send a message that has no Content-Length, but does send a body.
# Verify that CP times out the socket and responds
# with 411 Length Required.
if self.scheme == "https":
c = HTTPSConnection('%s:%s' % (self.interface(), self.PORT))
else:
c = HTTPConnection('%s:%s' % (self.interface(), self.PORT))
c.request("POST", "/")
response = c.getresponse()
self.body = response.fp.read()
self.status = str(response.status)
self.assertStatus(411)
def test_post_multipart(self):
alphabet = "abcdefghijklmnopqrstuvwxyz"
# generate file contents for a large post
contents = "".join([c * 65536 for c in alphabet])
# encode as multipart form data
files=[('file', 'file.txt', contents)]
content_type, body = encode_multipart_formdata(files)
body = body.encode('Latin-1')
# post file
if self.scheme == 'https':
c = HTTPSConnection('%s:%s' % (self.interface(), self.PORT))
else:
c = HTTPConnection('%s:%s' % (self.interface(), self.PORT))
c.putrequest('POST', '/post_multipart')
c.putheader('Content-Type', content_type)
c.putheader('Content-Length', str(len(body)))
c.endheaders()
c.send(body)
response = c.getresponse()
self.body = response.fp.read()
self.status = str(response.status)
self.assertStatus(200)
self.assertBody(", ".join(["%s * 65536" % c for c in alphabet]))
def test_malformed_request_line(self):
if getattr(cherrypy.server, "using_apache", False):
return self.skip("skipped due to known Apache differences...")
# Test missing version in Request-Line
if self.scheme == 'https':
c = HTTPSConnection('%s:%s' % (self.interface(), self.PORT))
else:
c = HTTPConnection('%s:%s' % (self.interface(), self.PORT))
c._output(ntob('GET /'))
c._send_output()
if hasattr(c, 'strict'):
response = c.response_class(c.sock, strict=c.strict, method='GET')
else:
# Python 3.2 removed the 'strict' feature, saying:
# "http.client now always assumes HTTP/1.x compliant servers."
response = c.response_class(c.sock, method='GET')
response.begin()
self.assertEqual(response.status, 400)
self.assertEqual(response.fp.read(22), ntob("Malformed Request-Line"))
c.close()
def test_malformed_header(self):
if self.scheme == 'https':
c = HTTPSConnection('%s:%s' % (self.interface(), self.PORT))
else:
c = HTTPConnection('%s:%s' % (self.interface(), self.PORT))
c.putrequest('GET', '/')
c.putheader('Content-Type', 'text/plain')
# See http://www.cherrypy.org/ticket/941
c._output(ntob('Re, 1.2.3.4#015#012'))
c.endheaders()
response = c.getresponse()
self.status = str(response.status)
self.assertStatus(400)
self.body = response.fp.read(20)
self.assertBody("Illegal header line.")
def test_http_over_https(self):
if self.scheme != 'https':
return self.skip("skipped (not running HTTPS)... ")
# Try connecting without SSL.
conn = HTTPConnection('%s:%s' % (self.interface(), self.PORT))
conn.putrequest("GET", "/", skip_host=True)
conn.putheader("Host", self.HOST)
conn.endheaders()
response = conn.response_class(conn.sock, method="GET")
try:
response.begin()
self.assertEqual(response.status, 400)
self.body = response.read()
self.assertBody("The client sent a plain HTTP request, but this "
"server only speaks HTTPS on this port.")
except socket.error:
e = sys.exc_info()[1]
# "Connection reset by peer" is also acceptable.
if e.errno != errno.ECONNRESET:
raise
def test_garbage_in(self):
# Connect without SSL regardless of server.scheme
c = HTTPConnection('%s:%s' % (self.interface(), self.PORT))
c._output(ntob('gjkgjklsgjklsgjkljklsg'))
c._send_output()
response = c.response_class(c.sock, method="GET")
try:
response.begin()
self.assertEqual(response.status, 400)
self.assertEqual(response.fp.read(22), ntob("Malformed Request-Line"))
c.close()
except socket.error:
e = sys.exc_info()[1]
# "Connection reset by peer" is also acceptable.
if e.errno != errno.ECONNRESET:
raise
|
amousset/ansible
|
refs/heads/devel
|
lib/ansible/plugins/lookup/cartesian.py
|
133
|
# (c) 2013, Bradley Young <young.bradley@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from itertools import product
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
from ansible.utils.listify import listify_lookup_plugin_terms
class LookupModule(LookupBase):
"""
Create the cartesian product of lists
[1, 2, 3], [a, b] -> [1, a], [1, b], [2, a], [2, b], [3, a], [3, b]
"""
def _lookup_variables(self, terms):
"""
Turn this:
terms == ["1,2,3", "a,b"]
into this:
terms == [[1,2,3], [a, b]]
"""
results = []
for x in terms:
intermediate = listify_lookup_plugin_terms(x, templar=self._templar, loader=self._loader)
results.append(intermediate)
return results
def run(self, terms, variables=None, **kwargs):
terms = self._lookup_variables(terms)
my_list = terms[:]
if len(my_list) == 0:
raise AnsibleError("with_cartesian requires at least one element in each list")
return [self._flatten(x) for x in product(*my_list, fillvalue=None)]
|
andim/scipy
|
refs/heads/master
|
scipy/optimize/tests/test_trustregion.py
|
105
|
"""
Unit tests for trust-region optimization routines.
To run it in its simplest form::
nosetests test_optimize.py
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.optimize import (minimize, rosen, rosen_der, rosen_hess,
rosen_hess_prod)
from numpy.testing import (TestCase, assert_, assert_equal, assert_allclose,
run_module_suite)
class Accumulator:
""" This is for testing callbacks."""
def __init__(self):
self.count = 0
self.accum = None
def __call__(self, x):
self.count += 1
if self.accum is None:
self.accum = np.array(x)
else:
self.accum += x
class TestTrustRegionSolvers(TestCase):
def setUp(self):
self.x_opt = [1.0, 1.0]
self.easy_guess = [2.0, 2.0]
self.hard_guess = [-1.2, 1.0]
def test_dogleg_accuracy(self):
# test the accuracy and the return_all option
x0 = self.hard_guess
r = minimize(rosen, x0, jac=rosen_der, hess=rosen_hess, tol=1e-8,
method='dogleg', options={'return_all': True},)
assert_allclose(x0, r['allvecs'][0])
assert_allclose(r['x'], r['allvecs'][-1])
assert_allclose(r['x'], self.x_opt)
def test_dogleg_callback(self):
# test the callback mechanism and the maxiter and return_all options
accumulator = Accumulator()
maxiter = 5
r = minimize(rosen, self.hard_guess, jac=rosen_der, hess=rosen_hess,
callback=accumulator, method='dogleg',
options={'return_all': True, 'maxiter': maxiter},)
assert_equal(accumulator.count, maxiter)
assert_equal(len(r['allvecs']), maxiter+1)
assert_allclose(r['x'], r['allvecs'][-1])
assert_allclose(sum(r['allvecs'][1:]), accumulator.accum)
def test_solver_concordance(self):
# Assert that dogleg uses fewer iterations than ncg on the Rosenbrock
# test function, although this does not necessarily mean
# that dogleg is faster or better than ncg even for this function
# and especially not for other test functions.
f = rosen
g = rosen_der
h = rosen_hess
for x0 in (self.easy_guess, self.hard_guess):
r_dogleg = minimize(f, x0, jac=g, hess=h, tol=1e-8,
method='dogleg', options={'return_all': True})
r_trust_ncg = minimize(f, x0, jac=g, hess=h, tol=1e-8,
method='trust-ncg',
options={'return_all': True})
r_ncg = minimize(f, x0, jac=g, hess=h, tol=1e-8,
method='newton-cg', options={'return_all': True})
assert_allclose(self.x_opt, r_dogleg['x'])
assert_allclose(self.x_opt, r_trust_ncg['x'])
assert_allclose(self.x_opt, r_ncg['x'])
assert_(len(r_dogleg['allvecs']) < len(r_ncg['allvecs']))
def test_trust_ncg_hessp(self):
for x0 in (self.easy_guess, self.hard_guess):
r = minimize(rosen, x0, jac=rosen_der, hessp=rosen_hess_prod,
tol=1e-8, method='trust-ncg')
assert_allclose(self.x_opt, r['x'])
if __name__ == '__main__':
run_module_suite()
|
nathanial/lettuce
|
refs/heads/master
|
tests/integration/lib/Django-1.2.5/django/contrib/localflavor/fr/fr_department.py
|
314
|
# -*- coding: utf-8 -*-
DEPARTMENT_ASCII_CHOICES = (
('01', '01 - Ain'),
('02', '02 - Aisne'),
('03', '03 - Allier'),
('04', '04 - Alpes-de-Haute-Provence'),
('05', '05 - Hautes-Alpes'),
('06', '06 - Alpes-Maritimes'),
('07', '07 - Ardeche'),
('08', '08 - Ardennes'),
('09', '09 - Ariege'),
('10', '10 - Aube'),
('11', '11 - Aude'),
('12', '12 - Aveyron'),
('13', '13 - Bouches-du-Rhone'),
('14', '14 - Calvados'),
('15', '15 - Cantal'),
('16', '16 - Charente'),
('17', '17 - Charente-Maritime'),
('18', '18 - Cher'),
('19', '19 - Correze'),
('21', '21 - Cote-d\'Or'),
('22', '22 - Cotes-d\'Armor'),
('23', '23 - Creuse'),
('24', '24 - Dordogne'),
('25', '25 - Doubs'),
('26', '26 - Drome'),
('27', '27 - Eure'),
('28', '28 - Eure-et-Loire'),
('29', '29 - Finistere'),
('2A', '2A - Corse-du-Sud'),
('2B', '2B - Haute-Corse'),
('30', '30 - Gard'),
('31', '31 - Haute-Garonne'),
('32', '32 - Gers'),
('33', '33 - Gironde'),
('34', '34 - Herault'),
('35', '35 - Ille-et-Vilaine'),
('36', '36 - Indre'),
('37', '37 - Indre-et-Loire'),
('38', '38 - Isere'),
('39', '39 - Jura'),
('40', '40 - Landes'),
('41', '41 - Loir-et-Cher'),
('42', '42 - Loire'),
('43', '43 - Haute-Loire'),
('44', '44 - Loire-Atlantique'),
('45', '45 - Loiret'),
('46', '46 - Lot'),
('47', '47 - Lot-et-Garonne'),
('48', '48 - Lozere'),
('49', '49 - Maine-et-Loire'),
('50', '50 - Manche'),
('51', '51 - Marne'),
('52', '52 - Haute-Marne'),
('53', '53 - Mayenne'),
('54', '54 - Meurthe-et-Moselle'),
('55', '55 - Meuse'),
('56', '56 - Morbihan'),
('57', '57 - Moselle'),
('58', '58 - Nievre'),
('59', '59 - Nord'),
('60', '60 - Oise'),
('61', '61 - Orne'),
('62', '62 - Pas-de-Calais'),
('63', '63 - Puy-de-Dome'),
('64', '64 - Pyrenees-Atlantiques'),
('65', '65 - Hautes-Pyrenees'),
('66', '66 - Pyrenees-Orientales'),
('67', '67 - Bas-Rhin'),
('68', '68 - Haut-Rhin'),
('69', '69 - Rhone'),
('70', '70 - Haute-Saone'),
('71', '71 - Saone-et-Loire'),
('72', '72 - Sarthe'),
('73', '73 - Savoie'),
('74', '74 - Haute-Savoie'),
('75', '75 - Paris'),
('76', '76 - Seine-Maritime'),
('77', '77 - Seine-et-Marne'),
('78', '78 - Yvelines'),
('79', '79 - Deux-Sevres'),
('80', '80 - Somme'),
('81', '81 - Tarn'),
('82', '82 - Tarn-et-Garonne'),
('83', '83 - Var'),
('84', '84 - Vaucluse'),
('85', '85 - Vendee'),
('86', '86 - Vienne'),
('87', '87 - Haute-Vienne'),
('88', '88 - Vosges'),
('89', '89 - Yonne'),
('90', '90 - Territoire de Belfort'),
('91', '91 - Essonne'),
('92', '92 - Hauts-de-Seine'),
('93', '93 - Seine-Saint-Denis'),
('94', '94 - Val-de-Marne'),
('95', '95 - Val-d\'Oise'),
('971', '971 - Guadeloupe'),
('972', '972 - Martinique'),
('973', '973 - Guyane'),
('974', '974 - La Reunion'),
('975', '975 - Saint-Pierre-et-Miquelon'),
('976', '976 - Mayotte'),
('984', '984 - Terres Australes et Antarctiques'),
('986', '986 - Wallis et Futuna'),
('987', '987 - Polynesie Francaise'),
('988', '988 - Nouvelle-Caledonie'),
)
|
t0mk/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/amazon/ec2_ami.py
|
25
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'committer',
'version': '1.0'}
DOCUMENTATION = '''
---
module: ec2_ami
version_added: "1.3"
short_description: create or destroy an image in ec2
description:
- Creates or deletes ec2 images.
options:
instance_id:
description:
- Instance ID to create the AMI from.
required: false
default: null
name:
description:
- The name of the new AMI.
required: false
default: null
architecture:
version_added: "2.3"
description:
- The target architecture of the image to register
required: false
default: null
kernel_id:
version_added: "2.3"
description:
- The target kernel id of the image to register
required: false
default: null
virtualization_type:
version_added: "2.3"
description:
- The virtualization type of the image to register
required: false
default: null
root_device_name:
version_added: "2.3"
description:
- The root device name of the image to register
required: false
default: null
wait:
description:
- Wait for the AMI to be in state 'available' before returning.
required: false
default: "no"
choices: [ "yes", "no" ]
wait_timeout:
description:
- How long before wait gives up, in seconds.
default: 300
state:
description:
- Create or deregister/delete AMI.
required: false
default: 'present'
choices: [ "absent", "present" ]
description:
description:
- Human-readable string describing the contents and purpose of the AMI.
required: false
default: null
no_reboot:
description:
- Flag indicating that the bundling process should not attempt to shutdown the instance before bundling. If this flag is True, the responsibility of maintaining file system integrity is left to the owner of the instance.
required: false
default: no
choices: [ "yes", "no" ]
image_id:
description:
- Image ID to be deregistered.
required: false
default: null
device_mapping:
version_added: "2.0"
description:
- List of device hashes/dictionaries with custom configurations (same block-device-mapping parameters)
- "Valid properties include: device_name, volume_type, size (in GB), delete_on_termination (boolean), no_device (boolean), snapshot_id, iops (for io1 volume_type)"
required: false
default: null
delete_snapshot:
description:
- Delete snapshots when deregistering the AMI.
required: false
default: "no"
choices: [ "yes", "no" ]
tags:
description:
- A dictionary of tags to add to the new image; '{"key":"value"}' and '{"key":"value","key":"value"}'
required: false
default: null
version_added: "2.0"
launch_permissions:
description:
- Users and groups that should be able to launch the AMI. Expects
dictionary with a key of user_ids and/or group_names. user_ids should
be a list of account ids. group_name should be a list of groups, "all"
is the only acceptable value currently.
required: false
default: null
version_added: "2.0"
author:
- "Evan Duffield (@scicoin-project) <eduffield@iacquire.com>"
- "Constantin Bugneac (@Constantin07) <constantin.bugneac@endava.com>"
- "Ross Williams (@gunzy83) <gunzy83au@gmail.com>"
extends_documentation_fragment:
- aws
- ec2
'''
# Thank you to iAcquire for sponsoring development of this module.
EXAMPLES = '''
# Basic AMI Creation
- ec2_ami:
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
instance_id: i-xxxxxx
wait: yes
name: newtest
tags:
Name: newtest
Service: TestService
register: image
# Basic AMI Creation, without waiting
- ec2_ami:
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
region: xxxxxx
instance_id: i-xxxxxx
wait: no
name: newtest
register: image
# AMI Registration from EBS Snapshot
- ec2_ami:
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
region: xxxxxx
name: newtest
state: present
architecture: x86_64
virtualization_type: hvm
root_device_name: /dev/xvda
device_mapping:
- device_name: /dev/xvda
size: 8
snapshot_id: snap-xxxxxxxx
delete_on_termination: true
volume_type: gp2
register: image
# AMI Creation, with a custom root-device size and another EBS attached
- ec2_ami:
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
instance_id: i-xxxxxx
name: newtest
device_mapping:
- device_name: /dev/sda1
size: XXX
delete_on_termination: true
volume_type: gp2
- device_name: /dev/sdb
size: YYY
delete_on_termination: false
volume_type: gp2
register: image
# AMI Creation, excluding a volume attached at /dev/sdb
- ec2_ami:
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
instance_id: i-xxxxxx
name: newtest
device_mapping:
- device_name: /dev/sda1
size: XXX
delete_on_termination: true
volume_type: gp2
- device_name: /dev/sdb
no_device: yes
register: image
# Deregister/Delete AMI (keep associated snapshots)
- ec2_ami:
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
region: xxxxxx
image_id: "{{ instance.image_id }}"
delete_snapshot: False
state: absent
# Deregister AMI (delete associated snapshots too)
- ec2_ami:
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
region: xxxxxx
image_id: "{{ instance.image_id }}"
delete_snapshot: True
state: absent
# Update AMI Launch Permissions, making it public
- ec2_ami:
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
region: xxxxxx
image_id: "{{ instance.image_id }}"
state: present
launch_permissions:
group_names: ['all']
# Allow AMI to be launched by another account
- ec2_ami:
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
region: xxxxxx
image_id: "{{ instance.image_id }}"
state: present
launch_permissions:
user_ids: ['123456789012']
'''
RETURN = '''
architecture:
description: architecture of image
returned: when AMI is created or already exists
type: string
sample: "x86_64"
block_device_mapping:
description: block device mapping associated with image
returned: when AMI is created or already exists
type: a dictionary of block devices
sample: {
"/dev/sda1": {
"delete_on_termination": true,
"encrypted": false,
"size": 10,
"snapshot_id": "snap-1a03b80e7",
"volume_type": "standard"
}
}
creationDate:
description: creation date of image
returned: when AMI is created or already exists
type: string
sample: "2015-10-15T22:43:44.000Z"
description:
description: description of image
returned: when AMI is created or already exists
type: string
sample: "nat-server"
hypervisor:
description: type of hypervisor
returned: when AMI is created or already exists
type: string
sample: "xen"
image_id:
description: id of the image
returned: when AMI is created or already exists
type: string
sample: "ami-1234abcd"
is_public:
description: whether image is public
returned: when AMI is created or already exists
type: bool
sample: false
location:
description: location of image
returned: when AMI is created or already exists
type: string
sample: "315210894379/nat-server"
name:
description: ami name of image
returned: when AMI is created or already exists
type: string
sample: "nat-server"
ownerId:
description: owner of image
returned: when AMI is created or already exists
type: string
sample: "435210894375"
platform:
description: platform of image
returned: when AMI is created or already exists
type: string
sample: null
root_device_name:
description: root device name of image
returned: when AMI is created or already exists
type: string
sample: "/dev/sda1"
root_device_type:
description: root device type of image
returned: when AMI is created or already exists
type: string
sample: "ebs"
state:
description: state of image
returned: when AMI is created or already exists
type: string
sample: "available"
tags:
description: a dictionary of tags assigned to image
returned: when AMI is created or already exists
type: dictionary of tags
sample: {
"Env": "devel",
"Name": "nat-server"
}
virtualization_type:
description: image virtualization type
returned: when AMI is created or already exists
type: string
sample: "hvm"
snapshots_deleted:
description: a list of snapshot ids deleted after deregistering image
returned: after AMI is deregistered, if 'delete_snapshot' is set to 'yes'
type: list
sample: [
"snap-fbcccb8f",
"snap-cfe7cdb4"
]
'''
import sys
import time
try:
import boto
import boto.ec2
from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def get_block_device_mapping(image):
"""
Retrieves block device mapping from AMI
"""
bdm_dict = dict()
if image is not None and hasattr(image, 'block_device_mapping'):
bdm = getattr(image,'block_device_mapping')
for device_name in bdm.keys():
bdm_dict[device_name] = {
'size': bdm[device_name].size,
'snapshot_id': bdm[device_name].snapshot_id,
'volume_type': bdm[device_name].volume_type,
'encrypted': bdm[device_name].encrypted,
'delete_on_termination': bdm[device_name].delete_on_termination
}
return bdm_dict
def get_ami_info(image):
return dict(
image_id=image.id,
state=image.state,
architecture=image.architecture,
block_device_mapping=get_block_device_mapping(image),
creationDate=image.creationDate,
description=image.description,
hypervisor=image.hypervisor,
is_public=image.is_public,
location=image.location,
ownerId=image.ownerId,
root_device_name=image.root_device_name,
root_device_type=image.root_device_type,
tags=image.tags,
virtualization_type = image.virtualization_type
)
def create_image(module, ec2):
"""
Creates new AMI
module : AnsibleModule object
ec2: authenticated ec2 connection object
"""
instance_id = module.params.get('instance_id')
name = module.params.get('name')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
description = module.params.get('description')
architecture = module.params.get('architecture')
kernel_id = module.params.get('kernel_id')
root_device_name = module.params.get('root_device_name')
virtualization_type = module.params.get('virtualization_type')
no_reboot = module.params.get('no_reboot')
device_mapping = module.params.get('device_mapping')
tags = module.params.get('tags')
launch_permissions = module.params.get('launch_permissions')
try:
params = {'name': name,
'description': description}
images = ec2.get_all_images(filters={'name': name})
if images and images[0]:
# ensure that launch_permissions are up to date
update_image(module, ec2, images[0].id)
bdm = None
if device_mapping:
bdm = BlockDeviceMapping()
for device in device_mapping:
if 'device_name' not in device:
module.fail_json(msg = 'Device name must be set for volume')
device_name = device['device_name']
del device['device_name']
bd = BlockDeviceType(**device)
bdm[device_name] = bd
if instance_id:
params['instance_id'] = instance_id
params['no_reboot'] = no_reboot
if bdm:
params['block_device_mapping'] = bdm
image_id = ec2.create_image(**params)
else:
params['architecture'] = architecture
params['virtualization_type'] = virtualization_type
if kernel_id:
params['kernel_id'] = kernel_id
if root_device_name:
params['root_device_name'] = root_device_name
if bdm:
params['block_device_map'] = bdm
image_id = ec2.register_image(**params)
except boto.exception.BotoServerError as e:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
# Wait until the image is recognized. EC2 API has eventual consistency,
# such that a successful CreateImage API call doesn't guarantee the success
# of subsequent DescribeImages API call using the new image id returned.
for i in range(wait_timeout):
try:
img = ec2.get_image(image_id)
if img.state == 'available':
break
elif img.state == 'failed':
module.fail_json(msg="AMI creation failed, please see the AWS console for more details")
except boto.exception.EC2ResponseError as e:
if ('InvalidAMIID.NotFound' not in e.error_code and 'InvalidAMIID.Unavailable' not in e.error_code) and wait and i == wait_timeout - 1:
module.fail_json(msg="Error while trying to find the new image. Using wait=yes and/or a longer wait_timeout may help. %s: %s" % (e.error_code, e.error_message))
finally:
time.sleep(1)
if img.state != 'available':
module.fail_json(msg="Error while trying to find the new image. Using wait=yes and/or a longer wait_timeout may help.")
if tags:
try:
ec2.create_tags(image_id, tags)
except boto.exception.EC2ResponseError as e:
module.fail_json(msg = "Image tagging failed => %s: %s" % (e.error_code, e.error_message))
if launch_permissions:
try:
img = ec2.get_image(image_id)
img.set_launch_permissions(**launch_permissions)
except boto.exception.BotoServerError as e:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message), image_id=image_id)
module.exit_json(msg="AMI creation operation complete", changed=True, **get_ami_info(img))
def deregister_image(module, ec2):
"""
Deregisters AMI
"""
image_id = module.params.get('image_id')
delete_snapshot = module.params.get('delete_snapshot')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
img = ec2.get_image(image_id)
if img is None:
module.fail_json(msg = "Image %s does not exist" % image_id, changed=False)
# Get all associated snapshot ids before deregistering image otherwise this information becomes unavailable
snapshots = []
if hasattr(img, 'block_device_mapping'):
for key in img.block_device_mapping:
snapshots.append(img.block_device_mapping[key].snapshot_id)
# When trying to re-delete already deleted image it doesn't raise an exception
# It just returns an object without image attributes
if hasattr(img, 'id'):
try:
params = {'image_id': image_id,
'delete_snapshot': delete_snapshot}
res = ec2.deregister_image(**params)
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
else:
module.exit_json(msg = "Image %s has already been deleted" % image_id, changed=False)
# wait here until the image is gone
img = ec2.get_image(image_id)
wait_timeout = time.time() + wait_timeout
while wait and wait_timeout > time.time() and img is not None:
img = ec2.get_image(image_id)
time.sleep(3)
if wait and wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = "timed out waiting for image to be deregistered/deleted")
# Boto library has hardcoded the deletion of the snapshot for the root volume mounted as '/dev/sda1' only
# Make it possible to delete all snapshots which belong to image, including root block device mapped as '/dev/xvda'
if delete_snapshot:
try:
for snapshot_id in snapshots:
ec2.delete_snapshot(snapshot_id)
except boto.exception.BotoServerError as e:
if e.error_code == 'InvalidSnapshot.NotFound':
# Don't error out if root volume snapshot was already deleted as part of deregister_image
pass
module.exit_json(msg="AMI deregister/delete operation complete", changed=True, snapshots_deleted=snapshots)
else:
module.exit_json(msg="AMI deregister/delete operation complete", changed=True)
def update_image(module, ec2, image_id):
"""
Updates AMI
"""
launch_permissions = module.params.get('launch_permissions') or []
if 'user_ids' in launch_permissions:
launch_permissions['user_ids'] = [str(user_id) for user_id in launch_permissions['user_ids']]
img = ec2.get_image(image_id)
if img is None:
module.fail_json(msg = "Image %s does not exist" % image_id, changed=False)
try:
set_permissions = img.get_launch_permissions()
if set_permissions != launch_permissions:
if ('user_ids' in launch_permissions and launch_permissions['user_ids']) or ('group_names' in launch_permissions and launch_permissions['group_names']):
res = img.set_launch_permissions(**launch_permissions)
elif ('user_ids' in set_permissions and set_permissions['user_ids']) or ('group_names' in set_permissions and set_permissions['group_names']):
res = img.remove_launch_permissions(**set_permissions)
else:
module.exit_json(msg="AMI not updated", launch_permissions=set_permissions, changed=False)
module.exit_json(msg="AMI launch permissions updated", launch_permissions=launch_permissions, set_perms=set_permissions, changed=True)
else:
module.exit_json(msg="AMI not updated", launch_permissions=set_permissions, changed=False)
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
instance_id = dict(),
image_id = dict(),
architecture = dict(default="x86_64"),
kernel_id = dict(),
virtualization_type = dict(default="hvm"),
root_device_name = dict(),
delete_snapshot = dict(default=False, type='bool'),
name = dict(),
wait = dict(type='bool', default=False),
wait_timeout = dict(default=900),
description = dict(default=""),
no_reboot = dict(default=False, type='bool'),
state = dict(default='present'),
device_mapping = dict(type='list'),
tags = dict(type='dict'),
launch_permissions = dict(type='dict')
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
try:
ec2 = ec2_connect(module)
except Exception as e:
module.fail_json(msg="Error while connecting to aws: %s" % str(e))
if module.params.get('state') == 'absent':
if not module.params.get('image_id'):
module.fail_json(msg='image_id needs to be an ami image to registered/delete')
deregister_image(module, ec2)
elif module.params.get('state') == 'present':
if module.params.get('image_id') and module.params.get('launch_permissions'):
# Update image's launch permissions
update_image(module, ec2,module.params.get('image_id'))
# Changed is always set to true when provisioning new AMI
if not module.params.get('instance_id') and not module.params.get('device_mapping'):
module.fail_json(msg='instance_id or device_mapping (register from ebs snapshot) parameter is required for new image')
if not module.params.get('name'):
module.fail_json(msg='name parameter is required for new image')
create_image(module, ec2)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
|
m-lab/ndt-debian
|
refs/heads/master
|
ndt-3.6.5.2/tfw/scenarios.py
|
4
|
#!/usr/bin/python
import wx
class Scenario:
def __init__(self, ctrl, name):
self._ctrl = ctrl
self._name = name
def setName(self, name):
self._name = name
def getName(self):
return self._name
def addHost(self, event):
self._ctrl.addHost()
self._parent.Dismiss()
def startScenario(self, event):
self._ctrl.startScenario()
def stopScenario(self, event):
self._ctrl.stopScenario()
def getPanel(self, parent):
panel = wx.Panel(parent, -1)
a = wx.Button(panel, -1, "Add host", (1, 1))
self._parent = parent
wx.EVT_BUTTON(panel, a.GetId(), self.addHost)
sz = a.GetBestSize()
panel.SetSize( (sz.width, sz.height) )
return panel
def getInfoPanel(self, parent):
wx.StaticText(parent, -1, "Scenario", wx.Point(5,5))
wx.StaticText(parent, -1, " Name: " + self.getName(), wx.Point(5,20))
if self._ctrl.getNotAssignedHostsAmount() > 0:
wx.StaticText(parent, -1, " Hosts: " + str(self._ctrl.getHostsAmount()) +
" (" + str(self._ctrl.getNotAssignedHostsAmount()) + " not assigned)", wx.Point(5,35))
else:
wx.StaticText(parent, -1, " Hosts: " + str(self._ctrl.getHostsAmount()), wx.Point(5,35))
a = wx.Button(parent, -1, "Start", (5, 100))
wx.EVT_BUTTON(parent, a.GetId(), self.startScenario)
b = wx.Button(parent, -1, "Stop", (130, 100))
wx.EVT_BUTTON(parent, b.GetId(), self.stopScenario)
|
simone-campagna/invoice
|
refs/heads/master
|
invoice/database/upgrade/upgrader_v2_6_x__v2_7_0.py
|
1
|
# -*- coding: utf-8 -*-
#
# Copyright 2015 Simone Campagna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = "Simone Campagna"
__all__ = [
'Upgrader_v2_6_x__v2_7_0',
]
import collections
import sqlite3
from ..db_types import Bool, Str, StrTuple, Int, Float
from ..db_table import DbTable
from .upgrader import MajorMinorUpgrader
from ...version import Version
from ...validation_result import ValidationResult
from ... import conf
class Upgrader_v2_6_x__v2_7_0(MajorMinorUpgrader):
VERSION_FROM_MAJOR_MINOR = Version(2, 6, None)
VERSION_TO_MAJOR_MINOR = Version(2, 7, 0)
CONFIGURATION_TABLE_v2_6_x = DbTable(
fields=(
('warning_mode', Str()),
('error_mode', Str()),
('remove_orphaned', Bool()),
('partial_update', Bool()),
('header', Bool()),
('total', Bool()),
('stats_group', Str()),
('list_field_names', StrTuple()),
('show_scan_report', Bool()),
('table_mode', Str()),
('max_interruption_days', Int()),
),
)
CONFIGURATION_TABLE_v2_7_0 = DbTable(
fields=(
('warning_mode', Str()),
('error_mode', Str()),
('remove_orphaned', Bool()),
('partial_update', Bool()),
('header', Bool()),
('total', Bool()),
('stats_group', Str()),
('list_field_names', StrTuple()),
('show_scan_report', Bool()),
('table_mode', Str()),
('max_interruption_days', Int()),
('spy_notify_level', Str()),
('spy_delay', Float()),
),
)
INTERNAL_OPTIONS_TABLE_v2_7_0 = DbTable(
fields=(
('needs_refresh', Bool()),
),
)
def impl_downgrade(self, db, version_from, version_to, connection=None):
def new_to_old(new_data):
return {
'spy_notify_level': conf.DEFAULT_SPY_NOTIFY_LEVEL,
'spy_delay': conf.DEFAULT_SPY_DELAY,
}
return self.do_downgrade(
table_name="configuration",
old_table=self.CONFIGURATION_TABLE_v2_6_x,
new_table=self.CONFIGURATION_TABLE_v2_7_0,
new_to_old=new_to_old,
db=db,
version_from=version_from,
version_to=version_to,
connection=connection
)
with db.connect() as connection:
cursor = connection.cursor()
db.execute(cursor, "DROP TABLE internal_options;")
db.execute(cursor, "DROP TRIGGER insert_on_validators;")
db.execute(cursor, "DROP TRIGGER update_on_validators;")
db.execute(cursor, "DROP TRIGGER delete_on_validators;")
def impl_upgrade(self, db, version_from, version_to, connection=None):
def old_to_new(old_data):
return {
'spy_notify_level': conf.DEFAULT_SPY_NOTIFY_LEVEL,
'spy_delay': conf.DEFAULT_SPY_DELAY,
}
self.do_upgrade(
table_name="configuration",
old_table=self.CONFIGURATION_TABLE_v2_6_x,
new_table=self.CONFIGURATION_TABLE_v2_7_0,
old_to_new=old_to_new,
db=db,
version_from=version_from,
version_to=version_to,
connection=connection
)
with db.connect() as connection:
cursor = connection.cursor()
# internal options
try:
db.create_table('internal_options', self.INTERNAL_OPTIONS_TABLE_v2_7_0.fields, connection=connection)
except sqlite3.OperationalError:
pass
# validators triggers
sql = """CREATE TRIGGER insert_on_validators BEFORE INSERT ON validators
BEGIN
UPDATE internal_options SET needs_refresh = 1 WHERE NOT needs_refresh;
END"""
db.execute(cursor, sql)
sql = """CREATE TRIGGER update_on_validators BEFORE UPDATE ON validators
BEGIN
UPDATE internal_options SET needs_refresh = 1 WHERE NOT needs_refresh;
END"""
db.execute(cursor, sql)
sql = """CREATE TRIGGER delete_on_validators BEFORE DELETE ON validators
BEGIN
UPDATE internal_options SET needs_refresh = 1 WHERE NOT needs_refresh;
END"""
db.execute(cursor, sql)
|
stephanehenry27/Sickbeard-anime
|
refs/heads/master
|
autoProcessTV/sabToSickBeard.py
|
51
|
#!/usr/bin/env python
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import sys
import autoProcessTV
if len(sys.argv) < 2:
print "No folder supplied - is this being called from SABnzbd?"
sys.exit()
elif len(sys.argv) >= 3:
autoProcessTV.processEpisode(sys.argv[1], sys.argv[2])
else:
autoProcessTV.processEpisode(sys.argv[1])
|
elcolie/fight
|
refs/heads/master
|
vanilla/model_views.py
|
1
|
#coding: utf-8
from django.core.exceptions import ImproperlyConfigured
from django.core.paginator import Paginator, InvalidPage
from django.forms import models as model_forms
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.template.response import TemplateResponse
from django.utils.translation import ugettext as _
from django.views.generic import View
import warnings
class GenericModelView(View):
"""
Base class for all model generic views.
"""
model = None
fields = None
# Object lookup parameters. These are used in the URL kwargs, and when
# performing the model instance lookup.
# Note that if unset then `lookup_url_kwarg` defaults to using the same
# value as `lookup_field`.
lookup_field = 'pk'
lookup_url_kwarg = None
# All the following are optional, and fall back to default values
# based on the 'model' shortcut.
# Each of these has a corresponding `.get_<attribute>()` method.
queryset = None
form_class = None
template_name = None
context_object_name = None
# Pagination parameters.
# Set `paginate_by` to an integer value to turn pagination on.
paginate_by = None
page_kwarg = 'page'
# Suffix that should be appended to automatically generated template names.
template_name_suffix = None
# Queryset and object lookup
def get_object(self):
"""
Returns the object the view is displaying.
"""
queryset = self.get_queryset()
lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field
try:
lookup = {self.lookup_field: self.kwargs[lookup_url_kwarg]}
except KeyError:
msg = "Lookup field '%s' was not provided in view kwargs to '%s'"
raise ImproperlyConfigured(msg % (lookup_url_kwarg, self.__class__.__name__))
return get_object_or_404(queryset, **lookup)
def get_queryset(self):
"""
Returns the base queryset for the view.
Either used as a list of objects to display, or as the queryset
from which to perform the individual object lookup.
"""
if self.queryset is not None:
return self.queryset._clone()
if self.model is not None:
return self.model._default_manager.all()
msg = "'%s' must either define 'queryset' or 'model', or override 'get_queryset()'"
raise ImproperlyConfigured(msg % self.__class__.__name__)
# Form instantiation
def get_form_class(self):
"""
Returns the form class to use in this view.
"""
if self.form_class is not None:
return self.form_class
if self.model is not None and self.fields is not None:
return model_forms.modelform_factory(self.model, fields=self.fields)
msg = "'%s' must either define 'form_class' or both 'model' and " \
"'fields', or override 'get_form_class()'"
raise ImproperlyConfigured(msg % self.__class__.__name__)
def get_form(self, data=None, files=None, **kwargs):
"""
Returns a form instance.
"""
cls = self.get_form_class()
return cls(data=data, files=files, **kwargs)
# Pagination
def get_paginate_by(self):
"""
Returns the size of pages to use with pagination.
"""
return self.paginate_by
def get_paginator(self, queryset, page_size):
"""
Returns a paginator instance.
"""
return Paginator(queryset, page_size)
def paginate_queryset(self, queryset, page_size):
"""
Paginates a queryset, and returns a page object.
"""
paginator = self.get_paginator(queryset, page_size)
page_kwarg = self.kwargs.get(self.page_kwarg)
page_query_param = self.request.GET.get(self.page_kwarg)
page_number = page_kwarg or page_query_param or 1
try:
page_number = int(page_number)
except ValueError:
if page_number == 'last':
page_number = paginator.num_pages
else:
msg = "Page is not 'last', nor can it be converted to an int."
raise Http404(_(msg))
try:
return paginator.page(page_number)
except InvalidPage as exc:
msg = 'Invalid page (%s): %s'
raise Http404(_(msg % (page_number, str(exc))))
# Response rendering
def get_context_object_name(self, is_list=False):
"""
Returns a descriptive name to use in the context in addition to the
default 'object'/'object_list'.
"""
if self.context_object_name is not None:
return self.context_object_name
elif self.model is not None:
fmt = '%s_list' if is_list else '%s'
return fmt % self.model._meta.object_name.lower()
return None
def get_context_data(self, **kwargs):
"""
Returns a dictionary to use as the context of the response.
Takes a set of keyword arguments to use as the base context,
and adds the following keys:
* 'view'
* Optionally, 'object' or 'object_list'
* Optionally, '{context_object_name}' or '{context_object_name}_list'
"""
kwargs['view'] = self
if getattr(self, 'object', None) is not None:
kwargs['object'] = self.object
context_object_name = self.get_context_object_name()
if context_object_name:
kwargs[context_object_name] = self.object
if getattr(self, 'object_list', None) is not None:
kwargs['object_list'] = self.object_list
context_object_name = self.get_context_object_name(is_list=True)
if context_object_name:
kwargs[context_object_name] = self.object_list
return kwargs
def get_template_names(self):
"""
Returns a list of template names to use when rendering the response.
If `.template_name` is not specified, then defaults to the following
pattern: "{app_label}/{model_name}{template_name_suffix}.html"
"""
if self.template_name is not None:
return [self.template_name]
if self.model is not None and self.template_name_suffix is not None:
return ["%s/%s%s.html" % (
self.model._meta.app_label,
self.model._meta.object_name.lower(),
self.template_name_suffix
)]
msg = "'%s' must either define 'template_name' or 'model' and " \
"'template_name_suffix', or override 'get_template_names()'"
raise ImproperlyConfigured(msg % self.__class__.__name__)
def render_to_response(self, context):
"""
Given a context dictionary, returns an HTTP response.
"""
return TemplateResponse(
request=self.request,
template=self.get_template_names(),
context=context
)
## The concrete model views
class ListView(GenericModelView):
template_name_suffix = '_list'
allow_empty = True
def get(self, request, *args, **kwargs):
queryset = self.get_queryset()
paginate_by = self.get_paginate_by()
if not self.allow_empty and not queryset.exists():
raise Http404
if paginate_by is None:
# Unpaginated response
self.object_list = queryset
context = self.get_context_data(
page_obj=None,
is_paginated=False,
paginator=None,
)
else:
# Paginated response
page = self.paginate_queryset(queryset, paginate_by)
self.object_list = page.object_list
context = self.get_context_data(
page_obj=page,
is_paginated=page.has_other_pages(),
paginator=page.paginator,
)
return self.render_to_response(context)
class DetailView(GenericModelView):
template_name_suffix = '_detail'
def get(self, request, *args, **kwargs):
self.object = self.get_object()
context = self.get_context_data()
return self.render_to_response(context)
class CreateView(GenericModelView):
success_url = None
template_name_suffix = '_form'
def get(self, request, *args, **kwargs):
form = self.get_form()
context = self.get_context_data(form=form)
return self.render_to_response(context)
def post(self, request, *args, **kwargs):
form = self.get_form(data=request.POST, files=request.FILES)
if form.is_valid():
return self.form_valid(form)
return self.form_invalid(form)
def form_valid(self, form):
self.object = form.save()
return HttpResponseRedirect(self.get_success_url())
def form_invalid(self, form):
context = self.get_context_data(form=form)
return self.render_to_response(context)
def get_success_url(self):
try:
return self.success_url or self.object.get_absolute_url()
except AttributeError:
msg = "No URL to redirect to. '%s' must provide 'success_url' " \
"or define a 'get_absolute_url()' method on the Model."
raise ImproperlyConfigured(msg % self.__class__.__name__)
class UpdateView(GenericModelView):
success_url = None
template_name_suffix = '_form'
def get(self, request, *args, **kwargs):
self.object = self.get_object()
form = self.get_form(instance=self.object)
context = self.get_context_data(form=form)
return self.render_to_response(context)
def post(self, request, *args, **kwargs):
self.object = self.get_object()
form = self.get_form(data=request.POST, files=request.FILES, instance=self.object)
if form.is_valid():
return self.form_valid(form)
return self.form_invalid(form)
def form_valid(self, form):
self.object = form.save()
return HttpResponseRedirect(self.get_success_url())
def form_invalid(self, form):
context = self.get_context_data(form=form)
return self.render_to_response(context)
def get_success_url(self):
try:
return self.success_url or self.object.get_absolute_url()
except AttributeError:
msg = "No URL to redirect to. '%s' must provide 'success_url' " \
"or define a 'get_absolute_url()' method on the Model."
raise ImproperlyConfigured(msg % self.__class__.__name__)
class DeleteView(GenericModelView):
success_url = None
template_name_suffix = '_confirm_delete'
def get(self, request, *args, **kwargs):
self.object = self.get_object()
context = self.get_context_data()
return self.render_to_response(context)
def post(self, request, *args, **kwargs):
self.object = self.get_object()
self.object.delete()
return HttpResponseRedirect(self.get_success_url())
def get_success_url(self):
if self.success_url is None:
msg = "No URL to redirect to. '%s' must define 'success_url'"
raise ImproperlyConfigured(msg % self.__class__.__name__)
return self.success_url
|
czbiohub/singlecell-dash
|
refs/heads/master
|
singlecell_dash/app.py
|
1
|
# -*- coding: utf-8 -*-
import locale
import dash
import dash_html_components as html
import dash_core_components as dcc
import numpy as np
import pandas as pd
import plotly.graph_objs as go
from .apps.diff_expr import DifferentialExpression
from .apps.color_by import ColorByGeneExpression, ColorByMetadata
from .apps.gene_vs_gene import GeneVsGene
from .apps.smushed_plot import SmushedPlot
from .apps.dropdown_subset import SubsetGroup
from .apps.umis_vs_genes import UMIsVsGenesGate
# Use commas to separate thousands
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
def run_singlecell_dash(cell_metadata, counts, dropdown_col, smushed,
top_genes=None, javascript=None):
# def run_singlecell_dash(javascript=None):
app = dash.Dash()
if javascript is not None:
app.scripts.append_script({"external_url": javascript})
# Necessary with a modular layout since we add callbacks without adding a
# layout
app.config.supress_callback_exceptions = True
# Add nice looking css
app.css.append_css({"external_url":
"https://codepen.io/chriddyp/pen/bWLwgP.css"})
# creating a new MyBlock will register all callbacks
# gene_vs_gene = GeneVsGene(app, cell_metadata, counts, group_col)
subset = SubsetGroup(app, cell_metadata[dropdown_col].unique(),
name=dropdown_col)
color_by_gene_expression = ColorByGeneExpression(app, counts,
cell_metadata,
dropdown_col)
color_by_metadata = ColorByMetadata(app, cell_metadata, dropdown_col)
smushed = SmushedPlot(app, cell_metadata, dropdown_col, smushed, counts,
top_genes=top_genes)
diff_expr = DifferentialExpression(app, cell_metadata, dropdown_col,
counts)
gate = UMIsVsGenesGate(app, cell_metadata, dropdown_col)
gene_vs_gene = GeneVsGene(app, cell_metadata, counts, dropdown_col)
# now insert this component into the app's layout
app.layout = html.Div([html.H1('Single Cell Dashboard App'),
subset.layout,
html.Div([html.Div([color_by_gene_expression.layout,
color_by_metadata.layout,
smushed.layout],
className='six columns'),
diff_expr.layout
],
className='row'),
html.Div([gate.layout, gene_vs_gene.layout
],
className='row')],
className='ten columns offset-by-one')
return app
|
dga4654dan/UTM-Demo
|
refs/heads/master
|
V_1_0_1/UtmDemo_Sfs_2.9.0/UtmDemo_Sfs_2.9.0_Server/lib/Lib/sre.py
|
8
|
#
# Secret Labs' Regular Expression Engine
#
# re-compatible interface for the sre matching engine
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# This version of the SRE library can be redistributed under CNRI's
# Python 1.6 license. For any other use, please contact Secret Labs
# AB (info@pythonware.com).
#
# Portions of this engine have been developed in cooperation with
# CNRI. Hewlett-Packard provided funding for 1.6 integration and
# other compatibility work.
#
r"""Support for regular expressions (RE).
This module provides regular expression matching operations similar to
those found in Perl. It supports both 8-bit and Unicode strings; both
the pattern and the strings being processed can contain null bytes and
characters outside the US ASCII range.
Regular expressions can contain both special and ordinary characters.
Most ordinary characters, like "A", "a", or "0", are the simplest
regular expressions; they simply match themselves. You can
concatenate ordinary characters, so last matches the string 'last'.
The special characters are:
"." Matches any character except a newline.
"^" Matches the start of the string.
"$" Matches the end of the string.
"*" Matches 0 or more (greedy) repetitions of the preceding RE.
Greedy means that it will match as many repetitions as possible.
"+" Matches 1 or more (greedy) repetitions of the preceding RE.
"?" Matches 0 or 1 (greedy) of the preceding RE.
*?,+?,?? Non-greedy versions of the previous three special characters.
{m,n} Matches from m to n repetitions of the preceding RE.
{m,n}? Non-greedy version of the above.
"\\" Either escapes special characters or signals a special sequence.
[] Indicates a set of characters.
A "^" as the first character indicates a complementing set.
"|" A|B, creates an RE that will match either A or B.
(...) Matches the RE inside the parentheses.
The contents can be retrieved or matched later in the string.
(?iLmsux) Set the I, L, M, S, U, or X flag for the RE (see below).
(?:...) Non-grouping version of regular parentheses.
(?P<name>...) The substring matched by the group is accessible by name.
(?P=name) Matches the text matched earlier by the group named name.
(?#...) A comment; ignored.
(?=...) Matches if ... matches next, but doesn't consume the string.
(?!...) Matches if ... doesn't match next.
The special sequences consist of "\\" and a character from the list
below. If the ordinary character is not on the list, then the
resulting RE will match the second character.
\number Matches the contents of the group of the same number.
\A Matches only at the start of the string.
\Z Matches only at the end of the string.
\b Matches the empty string, but only at the start or end of a word.
\B Matches the empty string, but not at the start or end of a word.
\d Matches any decimal digit; equivalent to the set [0-9].
\D Matches any non-digit character; equivalent to the set [^0-9].
\s Matches any whitespace character; equivalent to [ \t\n\r\f\v].
\S Matches any non-whitespace character; equiv. to [^ \t\n\r\f\v].
\w Matches any alphanumeric character; equivalent to [a-zA-Z0-9_].
With LOCALE, it will match the set [0-9_] plus characters defined
as letters for the current locale.
\W Matches the complement of \w.
\\ Matches a literal backslash.
This module exports the following functions:
match Match a regular expression pattern to the beginning of a string.
search Search a string for the presence of a pattern.
sub Substitute occurrences of a pattern found in a string.
subn Same as sub, but also return the number of substitutions made.
split Split a string by the occurrences of a pattern.
findall Find all occurrences of a pattern in a string.
compile Compile a pattern into a RegexObject.
purge Clear the regular expression cache.
escape Backslash all non-alphanumerics in a string.
Some of the functions in this module takes flags as optional parameters:
I IGNORECASE Perform case-insensitive matching.
L LOCALE Make \w, \W, \b, \B, dependent on the current locale.
M MULTILINE "^" matches the beginning of lines as well as the string.
"$" matches the end of lines as well as the string.
S DOTALL "." matches any character at all, including the newline.
X VERBOSE Ignore whitespace and comments for nicer looking RE's.
U UNICODE Make \w, \W, \b, \B, dependent on the Unicode locale.
This module also defines an exception 'error'.
"""
import sys
import sre_compile
import sre_parse
# public symbols
__all__ = [ "match", "search", "sub", "subn", "split", "findall",
"compile", "purge", "template", "escape", "I", "L", "M", "S", "X",
"U", "IGNORECASE", "LOCALE", "MULTILINE", "DOTALL", "VERBOSE",
"UNICODE", "error" ]
__version__ = "2.2.1"
# this module works under 1.5.2 and later. don't use string methods
import string
# flags
I = IGNORECASE = sre_compile.SRE_FLAG_IGNORECASE # ignore case
L = LOCALE = sre_compile.SRE_FLAG_LOCALE # assume current 8-bit locale
U = UNICODE = sre_compile.SRE_FLAG_UNICODE # assume unicode locale
M = MULTILINE = sre_compile.SRE_FLAG_MULTILINE # make anchors look for newline
S = DOTALL = sre_compile.SRE_FLAG_DOTALL # make dot match newline
X = VERBOSE = sre_compile.SRE_FLAG_VERBOSE # ignore whitespace and comments
# sre extensions (experimental, don't rely on these)
T = TEMPLATE = sre_compile.SRE_FLAG_TEMPLATE # disable backtracking
DEBUG = sre_compile.SRE_FLAG_DEBUG # dump pattern after compilation
# sre exception
error = sre_compile.error
# --------------------------------------------------------------------
# public interface
def match(pattern, string, flags=0):
"""Try to apply the pattern at the start of the string, returning
a match object, or None if no match was found."""
return _compile(pattern, flags).match(string)
def search(pattern, string, flags=0):
"""Scan through string looking for a match to the pattern, returning
a match object, or None if no match was found."""
return _compile(pattern, flags).search(string)
def sub(pattern, repl, string, count=0):
"""Return the string obtained by replacing the leftmost
non-overlapping occurrences of the pattern in string by the
replacement repl"""
return _compile(pattern, 0).sub(repl, string, count)
def subn(pattern, repl, string, count=0):
"""Return a 2-tuple containing (new_string, number).
new_string is the string obtained by replacing the leftmost
non-overlapping occurrences of the pattern in the source
string by the replacement repl. number is the number of
substitutions that were made."""
return _compile(pattern, 0).subn(repl, string, count)
def split(pattern, string, maxsplit=0):
"""Split the source string by the occurrences of the pattern,
returning a list containing the resulting substrings."""
return _compile(pattern, 0).split(string, maxsplit)
def findall(pattern, string):
"""Return a list of all non-overlapping matches in the string.
If one or more groups are present in the pattern, return a
list of groups; this will be a list of tuples if the pattern
has more than one group.
Empty matches are included in the result."""
return _compile(pattern, 0).findall(string)
if sys.hexversion >= 0x02020000:
__all__.append("finditer")
def finditer(pattern, string):
"""Return an iterator over all non-overlapping matches in the
string. For each match, the iterator returns a match object.
Empty matches are included in the result."""
return _compile(pattern, 0).finditer(string)
def compile(pattern, flags=0):
"Compile a regular expression pattern, returning a pattern object."
return _compile(pattern, flags)
def purge():
"Clear the regular expression cache"
_cache.clear()
_cache_repl.clear()
def template(pattern, flags=0):
"Compile a template pattern, returning a pattern object"
return _compile(pattern, flags|T)
def escape(pattern):
"Escape all non-alphanumeric characters in pattern."
s = list(pattern)
for i in range(len(pattern)):
c = pattern[i]
if not ("a" <= c <= "z" or "A" <= c <= "Z" or "0" <= c <= "9"):
if c == "\000":
s[i] = "\\000"
else:
s[i] = "\\" + c
return _join(s, pattern)
# --------------------------------------------------------------------
# internals
_cache = {}
_cache_repl = {}
_pattern_type = type(sre_compile.compile("", 0))
_MAXCACHE = 100
def _join(seq, sep):
# internal: join into string having the same type as sep
return string.join(seq, sep[:0])
def _compile(*key):
# internal: compile pattern
p = _cache.get(key)
if p is not None:
return p
pattern, flags = key
if type(pattern) is _pattern_type:
return pattern
if type(pattern) not in sre_compile.STRING_TYPES:
raise TypeError, "first argument must be string or compiled pattern"
try:
p = sre_compile.compile(pattern, flags)
except error, v:
raise error, v # invalid expression
if len(_cache) >= _MAXCACHE:
_cache.clear()
_cache[key] = p
return p
def _compile_repl(*key):
# internal: compile replacement pattern
p = _cache_repl.get(key)
if p is not None:
return p
repl, pattern = key
try:
p = sre_parse.parse_template(repl, pattern)
except error, v:
raise error, v # invalid expression
if len(_cache_repl) >= _MAXCACHE:
_cache_repl.clear()
_cache_repl[key] = p
return p
def _expand(pattern, match, template):
# internal: match.expand implementation hook
template = sre_parse.parse_template(template, pattern)
return sre_parse.expand_template(template, match)
def _subx(pattern, template):
# internal: pattern.sub/subn implementation helper
template = _compile_repl(template, pattern)
if not template[0] and len(template[1]) == 1:
# literal replacement
return template[1][0]
def filter(match, template=template):
return sre_parse.expand_template(template, match)
return filter
# register myself for pickling
import copy_reg
def _pickle(p):
return _compile, (p.pattern, p.flags)
copy_reg.pickle(_pattern_type, _pickle, _compile)
# --------------------------------------------------------------------
# experimental stuff (see python-dev discussions for details)
class Scanner:
def __init__(self, lexicon, flags=0):
from sre_constants import BRANCH, SUBPATTERN
self.lexicon = lexicon
# combine phrases into a compound pattern
p = []
s = sre_parse.Pattern()
s.flags = flags
for phrase, action in lexicon:
p.append(sre_parse.SubPattern(s, [
(SUBPATTERN, (len(p)+1, sre_parse.parse(phrase, flags))),
]))
p = sre_parse.SubPattern(s, [(BRANCH, (None, p))])
s.groups = len(p)
self.scanner = sre_compile.compile(p)
def scan(self, string):
result = []
append = result.append
match = self.scanner.scanner(string).match
i = 0
while 1:
m = match()
if not m:
break
j = m.end()
if i == j:
break
action = self.lexicon[m.lastindex-1][1]
if callable(action):
self.match = m
action = action(self, m.group())
if action is not None:
append(action)
i = j
return result, string[i:]
|
drewp/tahoe-lafs
|
refs/heads/master
|
setuptools-0.6c16dev3.egg/setuptools/command/scriptsetup.py
|
2
|
from distutils.errors import DistutilsSetupError
from setuptools import Command
import sys
class scriptsetup(Command):
action = (sys.platform == "win32"
and "set up .pyscript association and PATHEXT variable to run scripts"
or "this does nothing on non-Windows platforms")
user_options = [
('allusers', 'a',
'make changes for all users of this Windows installation (requires Administrator privileges)'),
]
boolean_options = ['allusers']
def initialize_options(self):
self.allusers = False
def finalize_options(self):
pass
def run(self):
if sys.platform != "win32":
print "\n'scriptsetup' isn't needed on non-Windows platforms."
else:
do_scriptsetup(self.allusers)
def do_scriptsetup(allusers=False):
print "\nSetting up environment to run scripts for %s..." % (allusers and "all users" or "the current user")
from _winreg import HKEY_CURRENT_USER, HKEY_LOCAL_MACHINE, HKEY_CLASSES_ROOT, \
REG_SZ, REG_EXPAND_SZ, KEY_QUERY_VALUE, KEY_SET_VALUE, \
OpenKey, CreateKey, QueryValueEx, SetValueEx, FlushKey, CloseKey
USER_ENV = "Environment"
try:
user_env = OpenKey(HKEY_CURRENT_USER, USER_ENV, 0, KEY_QUERY_VALUE)
except WindowsError, e:
raise DistutilsSetupError("I could not read the user environment from the registry.\n%r" % (e,))
SYSTEM_ENV = "SYSTEM\\CurrentControlSet\\Control\\Session Manager\\Environment"
try:
system_env = OpenKey(HKEY_LOCAL_MACHINE, SYSTEM_ENV, 0, KEY_QUERY_VALUE)
except WindowsError, e:
raise DistutilsSetupError("I could not read the system environment from the registry.\n%r" % (e,))
# HKEY_CLASSES_ROOT is a merged view that would only confuse us.
# <http://technet.microsoft.com/en-us/library/cc739822(WS.10).aspx>
USER_CLASSES = "SOFTWARE\\Classes"
try:
user_classes = OpenKey(HKEY_CURRENT_USER, USER_CLASSES, 0, KEY_QUERY_VALUE)
except WindowsError, e:
raise DistutilsSetupError("I could not read the user filetype associations from the registry.\n%r" % (e,))
SYSTEM_CLASSES = "SOFTWARE\\Classes"
try:
system_classes = OpenKey(HKEY_LOCAL_MACHINE, SYSTEM_CLASSES, 0, KEY_QUERY_VALUE)
except WindowsError, e:
raise DistutilsSetupError("I could not read the system filetype associations from the registry.\n%r" % (e,))
def query(key, subkey, what):
try:
(value, type) = QueryValueEx(key, subkey)
except WindowsError, e:
if e.winerror == 2: # not found
return None
raise DistutilsSetupError("I could not read %s from the registry.\n%r" % (what, e))
# It does not matter that we don't expand environment strings, in fact it's better not to.
if type != REG_SZ and type != REG_EXPAND_SZ:
raise DistutilsSetupError("I expected the registry entry for %s to have a string type (REG_SZ or REG_EXPAND_SZ), "
"and was flummoxed by it having type code %r." % (what, type))
return (value, type)
def open_and_query(key, path, subkey, what):
try:
read_key = OpenKey(key, path, 0, KEY_QUERY_VALUE)
except WindowsError, e:
if e.winerror == 2: # not found
return None
raise DistutilsSetupError("I could not read %s from the registry because I could not open "
"the parent key.\n%r" % (what, e))
try:
return query(read_key, subkey, what)
finally:
CloseKey(read_key)
def update(key_name_path, subkey, desired_value, desired_type, goal, what):
(key, name, path) = key_name_path
(old_value, old_type) = open_and_query(key, path, subkey, what) or (None, None)
if (old_value, old_type) == (desired_value, desired_type):
print "Already done: %s." % (goal,)
return False
try:
update_key = OpenKey(key, path, 0, KEY_SET_VALUE|KEY_QUERY_VALUE)
except WindowsError, e:
if e.winerror != 2:
raise DistutilsSetupError("I tried to %s, but was not successful because I could not open "
"the registry key %s\\%s for writing.\n%r"
% (goal, name, path, e))
try:
update_key = CreateKey(key, path)
except WindowsError, e:
raise DistutilsSetupError("I tried to %s, but was not successful because the registry key %s\\%s "
"did not exist, and I was unable to create it.\n%r"
% (goal, name, path, e))
(new_value, new_type) = (None, None)
try:
SetValueEx(update_key, subkey, 0, desired_type, desired_value)
except WindowsError, e:
raise DistutilsSetupError("I tried to %s, but was not able to set the subkey %r under %s\\%s to be %r.\n%r"
% (goal, subkey, name, path, desired_value))
else:
(new_value, new_type) = query(update_key, subkey, what) or (None, None)
finally:
FlushKey(update_key)
CloseKey(update_key)
if (new_value, new_type) != (desired_value, desired_type):
raise DistutilsSetupError("I tried to %s by setting the subkey %r under %s\\%s to be %r, "
"and the call to SetValueEx succeeded, but the value ended up as "
"%r instead (it was previously %r). Maybe the update was unexpectedly virtualized?"
% (goal, subkey, name, path, desired_value, new_value, old_value))
print "Done: %s." % (goal,)
return True
# Maintenance hazard: 'add_to_environment' and 'associate' use very similar, but not identical logic.
def add_to_environment(varname, addition, change_allusers):
changed = False
what = "the %s environment variable %s" % (change_allusers and "system" or "user", varname)
goal = "add %s to %s" % (addition, what)
system_valueandtype = query(system_env, varname, "the system environment variable %s" % (varname,))
user_valueandtype = query(user_env, varname, "the user environment variable %s" % (varname,))
if change_allusers:
(value, type) = system_valueandtype or (u'', REG_SZ)
key_name_path = (HKEY_LOCAL_MACHINE, "HKEY_LOCAL_MACHINE", SYSTEM_ENV)
else:
(value, type) = user_valueandtype or system_valueandtype or (u'', REG_SZ)
key_name_path = (HKEY_CURRENT_USER, "HKEY_CURRENT_USER", USER_ENV)
if addition.lower() in value.lower().split(u';'):
print "Already done: %s." % (goal,)
else:
changed |= update(key_name_path, varname, value + u';' + addition, type, goal, what)
if change_allusers:
# Also change any overriding environment entry for the current user.
(user_value, user_type) = user_valueandtype or (u'', REG_SZ)
split_value = user_value.lower().split(u';')
if not (addition.lower() in split_value or u'%'+varname.lower()+u'%' in split_value):
now_what = "the overriding user environment variable %s" % (varname,)
changed |= update((HKEY_CURRENT_USER, "HKEY_CURRENT_USER", USER_ENV),
varname, user_value + u';' + addition, user_type,
"add %s to %s" % (addition, now_what), now_what)
return changed
def associate(ext, target, change_allusers):
changed = False
what = "the %s association for %s" % (change_allusers and "system" or "user", ext)
goal = "associate the filetype %s with %s for %s" % (ext, target, change_allusers and "all users" or "the current user")
try:
if change_allusers:
target_key = OpenKey(HKEY_LOCAL_MACHINE, "%s\\%s" % (SYSTEM_CLASSES, target), 0, KEY_QUERY_VALUE)
else:
target_key = OpenKey(HKEY_CLASSES_ROOT, target, 0, KEY_QUERY_VALUE)
except WindowsError, e:
raise DistutilsSetupError("I was going to %s, but that won't work because the %s class does not exist in the registry, "
"as far as I can tell.\n%r" % (goal, target, e))
CloseKey(target_key)
system_key_name_path = (HKEY_LOCAL_MACHINE, "HKEY_LOCAL_MACHINE", "%s\\%s" % (SYSTEM_CLASSES, ext))
user_key_name_path = (HKEY_CURRENT_USER, "HKEY_CURRENT_USER", "%s\\%s" % (USER_CLASSES, ext))
system_valueandtype = open_and_query(system_classes, ext, "", "the system association for %s" % (ext,))
user_valueandtype = open_and_query(user_classes, ext, "", "the user association for %s" % (ext,))
if change_allusers:
(value, type) = system_valueandtype or (u'', REG_SZ)
key_name_path = system_key_name_path
else:
(value, type) = user_valueandtype or system_valueandtype or (u'', REG_SZ)
key_name_path = user_key_name_path
if value == target:
print "Already done: %s." % (goal,)
else:
changed |= update(key_name_path, "", unicode(target), REG_SZ, goal, what)
if change_allusers:
# Also change any overriding association for the current user.
(user_value, user_type) = user_valueandtype or (u'', REG_SZ)
if user_value != target:
changed |= update(user_key_name_path, "", unicode(target), REG_SZ,
"associate the filetype %s with %s for the current user " \
"(because the system association is overridden)" % (ext, target),
"the overriding user association for %s" % (ext,))
return changed
def broadcast_settingchange(change_allusers):
print "Broadcasting that the environment has changed, please wait..."
# <http://support.microsoft.com/kb/104011/en-us>
# <http://msdn.microsoft.com/en-us/library/ms644952(VS.85).aspx>
# LRESULT WINAPI SendMessageTimeoutW(HWND hWnd, UINT msg, WPARAM wParam, LPARAM lParam,
# UINT fuFlags, UINT uTimeout, PDWORD_PTR lpdwResult);
try:
from ctypes import WINFUNCTYPE, POINTER, windll, addressof, c_wchar_p
from ctypes.wintypes import LONG, HWND, UINT, WPARAM, LPARAM, DWORD
SendMessageTimeout = WINFUNCTYPE(POINTER(LONG), HWND, UINT, WPARAM, LPARAM, UINT, UINT, POINTER(POINTER(DWORD))) \
(("SendMessageTimeoutW", windll.user32))
HWND_BROADCAST = 0xFFFF
WM_SETTINGCHANGE = 0x001A
SMTO_ABORTIFHUNG = 0x0002
SendMessageTimeout(HWND_BROADCAST, WM_SETTINGCHANGE, change_allusers and 1 or 0,
addressof(c_wchar_p(u"Environment")), SMTO_ABORTIFHUNG, 5000, None);
except Exception, e:
print "Warning: %r" % (e,)
changed_assoc = associate(".pyscript", "Python.File", allusers)
changed_env = False
try:
changed_env |= add_to_environment("PATHEXT", ".pyscript", allusers)
changed_env |= add_to_environment("PATHEXT", ".pyw", allusers)
finally:
CloseKey(user_env)
CloseKey(system_env)
if changed_assoc or changed_env:
broadcast_settingchange(allusers)
if changed_env:
# whether logout is needed seems to randomly differ between installations
# of XP, but it is not needed in Vista or later.
try:
import platform, re
need_logout = not re.search(r'^[6-9]|([1-9][0-9]+)\.', platform.version())
except Exception, e:
e # hush pyflakes
need_logout = True
if need_logout:
print """
***********************************************************************
Changes have been made to the persistent environment, but they may not
take effect in this Windows session. Running installed Python scripts
from a Command Prompt may only work after you have logged out and back
in again, or rebooted.
***********************************************************************
"""
else:
print """
***********************************************************************
Changes have been made to the persistent environment, but not in this
Command Prompt. Running installed Python scripts will only work from
new Command Prompts opened from now on.
***********************************************************************
"""
|
mareuter/lct-python
|
refs/heads/master
|
tests/utils/test_moon_info.py
|
1
|
# -*- coding: utf-8 -*-
#------------------------------------------------------------------------------
# Copyright (c) 2014, Michael Reuter
# Distributed under the MIT License. See LICENSE.txt for more information.
#------------------------------------------------------------------------------
'''
Tests for the MoonInfo class.
'''
import unittest
import ephem
from lct.utils.moon_info import MoonInfo
class MoonInfoTestCase(unittest.TestCase):
def setUp(self):
obs = ephem.Observer()
# This is October 18, 2013 6 PM EDT
obs.date = ephem.Date('2013/10/18 22:00:00')
self.moon = MoonInfo()
self.moon.compute(obs)
def test_age(self):
age = self.moon.age()
self.assertEqual(age, 13.892695861570246)
def test_colongitude(self):
colong = self.moon.colong()
self.assertEqual(colong, 1.4655861265848968)
def test_illumination(self):
illum = self.moon.illumination()
self.assertEqual(illum, 0.9998519924481626)
def test_libration(self):
lon_lib = self.moon.libration("long")
self.assertEqual(lon_lib, 0.09129949120754838)
lat_lib = self.moon.libration("lat")
self.assertEqual(lat_lib, -0.025810296625959822)
def test_phase(self):
phase = self.moon.getPhase()
self.assertEqual(phase, 3)
def test_phase_name(self):
phase_name = self.moon.getPhaseAsString()
self.assertEqual(phase_name, "Waxing Gibbous")
def test_next_four_phases(self):
next_phases = self.moon.findNextFourPhases()
real_phases = [("full", 41564.48448662116),
("tq", 41572.486443861955),
("new", 41580.03469748699),
("fq", 41586.74803717344)]
self.assertEqual(next_phases, real_phases)
def test_time_from_new_moon(self):
truth_time_from_new_moon = 333.4247006776859 #hours
self.assertEqual(self.moon.timeFromNewMoon(), truth_time_from_new_moon)
def test_time_to_new_moon(self):
truth_time_to_new_moon = 374.8327396878158 #hours
self.assertEqual(self.moon.timeToNewMoon(), truth_time_to_new_moon)
def test_time_to_full_moon(self):
truth_time_to_full_moon = 0.06781995449273381 #days
self.assertEqual(self.moon.timeToFullMoon(), truth_time_to_full_moon)
def suite():
"""
Return a test suite consisting of all the test cases in the module.
"""
theSuite = unittest.TestSuite()
theSuite.addTest(unittest.makeSuite(MoonInfoTestCase))
return theSuite
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
3dfxsoftware/cbss-addons
|
refs/heads/master
|
lct_hr/models/hr_salary_rule.py
|
2
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class hr_salary_rule(osv.osv):
_inherit = 'hr.salary.rule'
_columns = {
# Override for translations
'name':fields.char('Name', size=256, required=True, readonly=False, translate=True),
}
|
a2211009/leetcode-1
|
refs/heads/master
|
solutions/098.Validate_Binary_Search_Tree/AC_dfs_n.py
|
7
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Author: illuz <iilluzen[at]gmail.com>
# File: AC_dfs_n.py
# Create Date: 2015-03-03 14:53:53
# Usage: AC_dfs_n.py
# Descripton:
# Definition for a binary tree node
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
# @param root, a tree node
# @return a boolean
def isValidBST(self, root):
def dfs(root, minval, maxval):
if root is None:
return True
return minval < root.val < maxval and dfs(root.left, minval, root.val) and dfs(root.right, root.val, maxval)
return dfs(root, -1<<32, 1<<32)
|
dpassante/ansible
|
refs/heads/devel
|
lib/ansible/utils/cmd_functions.py
|
233
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import select
import shlex
import subprocess
import sys
from ansible.module_utils.six import PY2, PY3
from ansible.module_utils._text import to_bytes
def run_cmd(cmd, live=False, readsize=10):
# readsize = 10
# On python2, shlex needs byte strings
if PY2:
cmd = to_bytes(cmd, errors='surrogate_or_strict')
cmdargs = shlex.split(cmd)
# subprocess should be passed byte strings. (on python2.6 it must be
# passed byte strtings)
cmdargs = [to_bytes(a, errors='surrogate_or_strict') for a in cmdargs]
p = subprocess.Popen(cmdargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = b''
stderr = b''
rpipes = [p.stdout, p.stderr]
while True:
rfd, wfd, efd = select.select(rpipes, [], rpipes, 1)
if p.stdout in rfd:
dat = os.read(p.stdout.fileno(), readsize)
if live:
# On python3, stdout has a codec to go from text type to bytes
if PY3:
sys.stdout.buffer.write(dat)
else:
sys.stdout.write(dat)
stdout += dat
if dat == b'':
rpipes.remove(p.stdout)
if p.stderr in rfd:
dat = os.read(p.stderr.fileno(), readsize)
stderr += dat
if live:
# On python3, stdout has a codec to go from text type to bytes
if PY3:
sys.stdout.buffer.write(dat)
else:
sys.stdout.write(dat)
if dat == b'':
rpipes.remove(p.stderr)
# only break out if we've emptied the pipes, or there is nothing to
# read from and the process has finished.
if (not rpipes or not rfd) and p.poll() is not None:
break
# Calling wait while there are still pipes to read can cause a lock
elif not rpipes and p.poll() is None:
p.wait()
return p.returncode, stdout, stderr
|
Delphine-L/tools-iuc
|
refs/heads/master
|
tools/anndata/loompy_to_tsv.py
|
16
|
#!/usr/bin/env python
"""Converts a loompy file to tsv file(s). Each layer becomes a new file."""
import argparse
import loompy
parser = argparse.ArgumentParser(description="Loompy file converter flags")
parser.add_argument('--version', action='version', version='%(prog)s 0.1.0',
help="Displays tool version")
parser.add_argument("-f", "--file", help="loom file to import")
args = parser.parse_args()
file = args.file
matrices = []
allcols = []
colstrings = []
allrows = []
# Build background info for all attributes and layers
loompyfile = loompy.connect(file)
row_attributes = loompyfile.ra.keys() # List of row attributes
for row in row_attributes: # Each list represents rownames for row_attributes
c_row = loompyfile.ra[row]
c_row = [str(r) for r in c_row]
allrows.append(c_row)
col_attributes = loompyfile.ca.keys() # List of column attributes
for col in col_attributes: # each list represents colnames for col_attributes
c_col = loompyfile.ca[col]
c_col = [str(c) for c in c_col]
allcols.append(c_col)
layers = loompyfile.layers.keys() # List of layers
for layer in layers: # List with each element being a loompy layer
c_layer = loompyfile[layer]
c_layer = c_layer[:, :]
c_layer = c_layer.astype(str)
matrices.append(c_layer)
# Create column attribute output
with open("attributes/col_attr.tsv", "w") as colout:
col_attributes = "\t".join(col_attributes) + "\n"
colout.write(col_attributes)
for length in range(0, len(c_col)):
attributestring = ""
for col in allcols:
attributestring = attributestring + col[length] + "\t"
while attributestring[-1] == "\t":
attributestring = attributestring[:-1]
colout.write(attributestring)
colout.write("\n")
# Create row attribute output
with open("attributes/row_attr.tsv", "w") as rowout:
row_attributes = "\t".join(row_attributes) + "\n"
rowout.write(row_attributes)
for length in range(0, len(c_row)):
attributestring = ""
for row in allrows:
attributestring = attributestring + row[length] + "\t"
while attributestring[-1] == "\t":
attributestring = attributestring[:-1]
rowout.write(attributestring)
rowout.write("\n")
# Build output files for each layer
for x in range(0, len(layers)):
# Output file name generation
if layers[x] in layers[0: x]: # Different output names if layers have same names somehow
repeats = layers[0, x].count(layer[x])
outputname = "output/" + layers[x] + repeats + ".tsv"
elif layers[x] == "": # Empty layer name
outputname = "output/mainmatrix.tsv"
else:
outputname = "output/" + str(layers[x]) + ".tsv" # Usual case
# Matrix output
with open(outputname, "w") as outputmatrix:
for line in matrices[x]:
line = "\t".join(line)
line += "\n"
line = line
outputmatrix.write(line)
|
nugget/home-assistant
|
refs/heads/dev
|
homeassistant/components/media_player/pioneer.py
|
4
|
"""
Support for Pioneer Network Receivers.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.pioneer/
"""
import logging
import telnetlib
import voluptuous as vol
from homeassistant.components.media_player import (
MediaPlayerDevice, PLATFORM_SCHEMA)
from homeassistant.components.media_player.const import (
SUPPORT_PAUSE, SUPPORT_PLAY, SUPPORT_SELECT_SOURCE,
SUPPORT_TURN_OFF, SUPPORT_TURN_ON, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET)
from homeassistant.const import (
CONF_HOST, CONF_NAME, CONF_PORT, CONF_TIMEOUT, STATE_OFF, STATE_ON)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'Pioneer AVR'
DEFAULT_PORT = 23 # telnet default. Some Pioneer AVRs use 8102
DEFAULT_TIMEOUT = None
SUPPORT_PIONEER = SUPPORT_PAUSE | SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE | \
SUPPORT_TURN_ON | SUPPORT_TURN_OFF | \
SUPPORT_SELECT_SOURCE | SUPPORT_PLAY
MAX_VOLUME = 185
MAX_SOURCE_NUMBERS = 60
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.socket_timeout,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Pioneer platform."""
pioneer = PioneerDevice(
config.get(CONF_NAME), config.get(CONF_HOST), config.get(CONF_PORT),
config.get(CONF_TIMEOUT))
if pioneer.update():
add_entities([pioneer])
class PioneerDevice(MediaPlayerDevice):
"""Representation of a Pioneer device."""
def __init__(self, name, host, port, timeout):
"""Initialize the Pioneer device."""
self._name = name
self._host = host
self._port = port
self._timeout = timeout
self._pwstate = 'PWR1'
self._volume = 0
self._muted = False
self._selected_source = ''
self._source_name_to_number = {}
self._source_number_to_name = {}
@classmethod
def telnet_request(cls, telnet, command, expected_prefix):
"""Execute `command` and return the response."""
try:
telnet.write(command.encode("ASCII") + b"\r")
except telnetlib.socket.timeout:
_LOGGER.debug("Pioneer command %s timed out", command)
return None
# The receiver will randomly send state change updates, make sure
# we get the response we are looking for
for _ in range(3):
result = telnet.read_until(b"\r\n", timeout=0.2).decode("ASCII") \
.strip()
if result.startswith(expected_prefix):
return result
return None
def telnet_command(self, command):
"""Establish a telnet connection and sends command."""
try:
try:
telnet = telnetlib.Telnet(
self._host, self._port, self._timeout)
except (ConnectionRefusedError, OSError):
_LOGGER.warning("Pioneer %s refused connection", self._name)
return
telnet.write(command.encode("ASCII") + b"\r")
telnet.read_very_eager() # skip response
telnet.close()
except telnetlib.socket.timeout:
_LOGGER.debug(
"Pioneer %s command %s timed out", self._name, command)
def update(self):
"""Get the latest details from the device."""
try:
telnet = telnetlib.Telnet(self._host, self._port, self._timeout)
except (ConnectionRefusedError, OSError):
_LOGGER.warning("Pioneer %s refused connection", self._name)
return False
pwstate = self.telnet_request(telnet, "?P", "PWR")
if pwstate:
self._pwstate = pwstate
volume_str = self.telnet_request(telnet, "?V", "VOL")
self._volume = int(volume_str[3:]) / MAX_VOLUME if volume_str else None
muted_value = self.telnet_request(telnet, "?M", "MUT")
self._muted = (muted_value == "MUT0") if muted_value else None
# Build the source name dictionaries if necessary
if not self._source_name_to_number:
for i in range(MAX_SOURCE_NUMBERS):
result = self.telnet_request(
telnet, "?RGB" + str(i).zfill(2), "RGB")
if not result:
continue
source_name = result[6:]
source_number = str(i).zfill(2)
self._source_name_to_number[source_name] = source_number
self._source_number_to_name[source_number] = source_name
source_number = self.telnet_request(telnet, "?F", "FN")
if source_number:
self._selected_source = self._source_number_to_name \
.get(source_number[2:])
else:
self._selected_source = None
telnet.close()
return True
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
if self._pwstate == "PWR1":
return STATE_OFF
if self._pwstate == "PWR0":
return STATE_ON
return None
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._volume
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._muted
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_PIONEER
@property
def source(self):
"""Return the current input source."""
return self._selected_source
@property
def source_list(self):
"""List of available input sources."""
return list(self._source_name_to_number.keys())
@property
def media_title(self):
"""Title of current playing media."""
return self._selected_source
def turn_off(self):
"""Turn off media player."""
self.telnet_command("PF")
def volume_up(self):
"""Volume up media player."""
self.telnet_command("VU")
def volume_down(self):
"""Volume down media player."""
self.telnet_command("VD")
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
# 60dB max
self.telnet_command(str(round(volume * MAX_VOLUME)).zfill(3) + "VL")
def mute_volume(self, mute):
"""Mute (true) or unmute (false) media player."""
self.telnet_command("MO" if mute else "MF")
def turn_on(self):
"""Turn the media player on."""
self.telnet_command("PO")
def select_source(self, source):
"""Select input source."""
self.telnet_command(self._source_name_to_number.get(source) + "FN")
|
ted-gould/nova
|
refs/heads/master
|
nova/tests/unit/test_policy.py
|
20
|
# Copyright 2011 Piston Cloud Computing, Inc.
# All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test of Policy Engine For Nova."""
import os.path
from six.moves import StringIO
import mock
from oslo_serialization import jsonutils
import six.moves.urllib.request as urlrequest
from nova import context
from nova import exception
from nova.openstack.common import policy as common_policy
from nova import policy
from nova import test
from nova.tests.unit import fake_policy
from nova.tests.unit import policy_fixture
from nova import utils
class PolicyFileTestCase(test.NoDBTestCase):
def setUp(self):
super(PolicyFileTestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake')
self.target = {}
def test_modified_policy_reloads(self):
with utils.tempdir() as tmpdir:
tmpfilename = os.path.join(tmpdir, 'policy')
self.flags(policy_file=tmpfilename)
# NOTE(uni): context construction invokes policy check to determin
# is_admin or not. As a side-effect, policy reset is needed here
# to flush existing policy cache.
policy.reset()
action = "example:test"
with open(tmpfilename, "w") as policyfile:
policyfile.write('{"example:test": ""}')
policy.enforce(self.context, action, self.target)
with open(tmpfilename, "w") as policyfile:
policyfile.write('{"example:test": "!"}')
policy._ENFORCER.load_rules(True)
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
self.context, action, self.target)
class PolicyTestCase(test.NoDBTestCase):
def setUp(self):
super(PolicyTestCase, self).setUp()
rules = {
"true": '@',
"example:allowed": '@',
"example:denied": "!",
"example:get_http": "http://www.example.com",
"example:my_file": "role:compute_admin or "
"project_id:%(project_id)s",
"example:early_and_fail": "! and @",
"example:early_or_success": "@ or !",
"example:lowercase_admin": "role:admin or role:sysadmin",
"example:uppercase_admin": "role:ADMIN or role:sysadmin",
}
policy.reset()
policy.init()
policy.set_rules({k: common_policy.parse_rule(v)
for k, v in rules.items()})
self.context = context.RequestContext('fake', 'fake', roles=['member'])
self.target = {}
def test_enforce_nonexistent_action_throws(self):
action = "example:noexist"
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
self.context, action, self.target)
def test_enforce_bad_action_throws(self):
action = "example:denied"
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
self.context, action, self.target)
def test_enforce_bad_action_noraise(self):
action = "example:denied"
result = policy.enforce(self.context, action, self.target, False)
self.assertEqual(result, False)
def test_enforce_good_action(self):
action = "example:allowed"
result = policy.enforce(self.context, action, self.target)
self.assertEqual(result, True)
@mock.patch.object(urlrequest, 'urlopen')
def test_enforce_http_true(self, mock_urlrequest):
mock_urlrequest.return_value = StringIO("True")
action = "example:get_http"
target = {}
result = policy.enforce(self.context, action, target)
self.assertEqual(result, True)
@mock.patch.object(urlrequest, 'urlopen')
def test_enforce_http_false(self, mock_urlrequest):
mock_urlrequest.return_value = StringIO("False")
action = "example:get_http"
target = {}
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
self.context, action, target)
def test_templatized_enforcement(self):
target_mine = {'project_id': 'fake'}
target_not_mine = {'project_id': 'another'}
action = "example:my_file"
policy.enforce(self.context, action, target_mine)
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
self.context, action, target_not_mine)
def test_early_AND_enforcement(self):
action = "example:early_and_fail"
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
self.context, action, self.target)
def test_early_OR_enforcement(self):
action = "example:early_or_success"
policy.enforce(self.context, action, self.target)
def test_ignore_case_role_check(self):
lowercase_action = "example:lowercase_admin"
uppercase_action = "example:uppercase_admin"
# NOTE(dprince) we mix case in the Admin role here to ensure
# case is ignored
admin_context = context.RequestContext('admin',
'fake',
roles=['AdMiN'])
policy.enforce(admin_context, lowercase_action, self.target)
policy.enforce(admin_context, uppercase_action, self.target)
class DefaultPolicyTestCase(test.NoDBTestCase):
def setUp(self):
super(DefaultPolicyTestCase, self).setUp()
self.rules = {
"default": '',
"example:exist": "!",
}
self._set_rules('default')
self.context = context.RequestContext('fake', 'fake')
def _set_rules(self, default_rule):
policy.reset()
rules = {k: common_policy.parse_rule(v)
for k, v in self.rules.items()}
policy.init(rules=rules, default_rule=default_rule, use_conf=False)
def test_policy_called(self):
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
self.context, "example:exist", {})
def test_not_found_policy_calls_default(self):
policy.enforce(self.context, "example:noexist", {})
def test_default_not_found(self):
self._set_rules("default_noexist")
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
self.context, "example:noexist", {})
class IsAdminCheckTestCase(test.NoDBTestCase):
def setUp(self):
super(IsAdminCheckTestCase, self).setUp()
policy.init()
def test_init_true(self):
check = policy.IsAdminCheck('is_admin', 'True')
self.assertEqual(check.kind, 'is_admin')
self.assertEqual(check.match, 'True')
self.assertEqual(check.expected, True)
def test_init_false(self):
check = policy.IsAdminCheck('is_admin', 'nottrue')
self.assertEqual(check.kind, 'is_admin')
self.assertEqual(check.match, 'False')
self.assertEqual(check.expected, False)
def test_call_true(self):
check = policy.IsAdminCheck('is_admin', 'True')
self.assertEqual(check('target', dict(is_admin=True),
policy._ENFORCER), True)
self.assertEqual(check('target', dict(is_admin=False),
policy._ENFORCER), False)
def test_call_false(self):
check = policy.IsAdminCheck('is_admin', 'False')
self.assertEqual(check('target', dict(is_admin=True),
policy._ENFORCER), False)
self.assertEqual(check('target', dict(is_admin=False),
policy._ENFORCER), True)
class AdminRolePolicyTestCase(test.NoDBTestCase):
def setUp(self):
super(AdminRolePolicyTestCase, self).setUp()
self.policy = self.useFixture(policy_fixture.RoleBasedPolicyFixture())
self.context = context.RequestContext('fake', 'fake', roles=['member'])
self.actions = policy.get_rules().keys()
self.target = {}
def test_enforce_admin_actions_with_nonadmin_context_throws(self):
"""Check if non-admin context passed to admin actions throws
Policy not authorized exception
"""
for action in self.actions:
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
self.context, action, self.target)
class RealRolePolicyTestCase(test.NoDBTestCase):
def setUp(self):
super(RealRolePolicyTestCase, self).setUp()
self.policy = self.useFixture(policy_fixture.RealPolicyFixture())
self.non_admin_context = context.RequestContext('fake', 'fake',
roles=['member'])
self.admin_context = context.RequestContext('fake', 'fake', True,
roles=['member'])
self.target = {}
self.fake_policy = jsonutils.loads(fake_policy.policy_data)
self.admin_only_rules = (
"cells_scheduler_filter:TargetCellFilter",
"compute:unlock_override",
"compute:get_all_tenants",
"compute:create:forced_host",
"compute_extension:accounts",
"compute_extension:admin_actions",
"compute_extension:admin_actions:resetNetwork",
"compute_extension:admin_actions:injectNetworkInfo",
"compute_extension:admin_actions:migrateLive",
"compute_extension:admin_actions:resetState",
"compute_extension:admin_actions:migrate",
"compute_extension:aggregates",
"compute_extension:agents",
"compute_extension:baremetal_nodes",
"compute_extension:cells",
"compute_extension:cells:create",
"compute_extension:cells:delete",
"compute_extension:cells:update",
"compute_extension:cells:sync_instances",
"compute_extension:cloudpipe",
"compute_extension:cloudpipe_update",
"compute_extension:evacuate",
"compute_extension:extended_server_attributes",
"compute_extension:fixed_ips",
"compute_extension:flavor_access:addTenantAccess",
"compute_extension:flavor_access:removeTenantAccess",
"compute_extension:flavorextraspecs:create",
"compute_extension:flavorextraspecs:update",
"compute_extension:flavorextraspecs:delete",
"compute_extension:flavormanage",
"compute_extension:floating_ips_bulk",
"compute_extension:fping:all_tenants",
"compute_extension:hosts",
"compute_extension:hypervisors",
"compute_extension:instance_actions:events",
"compute_extension:instance_usage_audit_log",
"compute_extension:networks",
"compute_extension:networks_associate",
"compute_extension:quotas:update",
"compute_extension:quotas:delete",
"compute_extension:security_group_default_rules",
"compute_extension:server_diagnostics",
"compute_extension:services",
"compute_extension:shelveOffload",
"compute_extension:simple_tenant_usage:list",
"compute_extension:users",
"compute_extension:availability_zone:detail",
"compute_extension:used_limits_for_admin",
"compute_extension:migrations:index",
"compute_extension:os-assisted-volume-snapshots:create",
"compute_extension:os-assisted-volume-snapshots:delete",
"compute_extension:console_auth_tokens",
"compute_extension:os-server-external-events:create",
"os_compute_api:servers:create:forced_host",
"os_compute_api:servers:detail:get_all_tenants",
"os_compute_api:servers:index:get_all_tenants",
"network:attach_external_network",
"os_compute_api:os-admin-actions",
"os_compute_api:os-admin-actions:reset_network",
"os_compute_api:os-admin-actions:inject_network_info",
"os_compute_api:os-admin-actions:reset_state",
"os_compute_api:os-aggregates:index",
"os_compute_api:os-aggregates:create",
"os_compute_api:os-aggregates:show",
"os_compute_api:os-aggregates:update",
"os_compute_api:os-aggregates:delete",
"os_compute_api:os-aggregates:add_host",
"os_compute_api:os-aggregates:remove_host",
"os_compute_api:os-aggregates:set_metadata",
"os_compute_api:os-agents",
"os_compute_api:os-baremetal-nodes",
"os_compute_api:os-cells",
"os_compute_api:os-cells:create",
"os_compute_api:os-cells:delete",
"os_compute_api:os-cells:update",
"os_compute_api:os-cells:sync_instances",
"os_compute_api:os-cloudpipe",
"os_compute_api:os-evacuate",
"os_compute_api:os-extended-server-attributes",
"os_compute_api:os-fixed-ips",
"os_compute_api:os-flavor-access:remove_tenant_access",
"os_compute_api:os-flavor-access:add_tenant_access",
"os_compute_api:os-flavor-extra-specs:create",
"os_compute_api:os-flavor-extra-specs:update",
"os_compute_api:os-flavor-extra-specs:delete",
"os_compute_api:os-flavor-manage",
"os_compute_api:os-floating-ips-bulk",
"os_compute_api:os-floating-ip-dns:domain:delete",
"os_compute_api:os-floating-ip-dns:domain:update",
"os_compute_api:os-fping:all_tenants",
"os_compute_api:os-hosts",
"os_compute_api:os-hypervisors",
"os_compute_api:os-instance-actions:events",
"os_compute_api:os-instance-usage-audit-log",
"os_compute_api:os-lock-server:unlock:unlock_override",
"os_compute_api:os-migrate-server:migrate",
"os_compute_api:os-migrate-server:migrate_live",
"os_compute_api:os-networks",
"os_compute_api:os-networks-associate",
"os_compute_api:os-pci:index",
"os_compute_api:os-pci:detail",
"os_compute_api:os-pci:show",
"os_compute_api:os-quota-sets:update",
"os_compute_api:os-quota-sets:delete",
"os_compute_api:os-quota-sets:detail",
"os_compute_api:os-security-group-default-rules",
"os_compute_api:os-server-diagnostics",
"os_compute_api:os-services",
"os_compute_api:os-shelve:shelve_offload",
"os_compute_api:os-simple-tenant-usage:list",
"os_compute_api:os-availability-zone:detail",
"os_compute_api:os-used-limits",
"os_compute_api:os-migrations:index",
"os_compute_api:os-assisted-volume-snapshots:create",
"os_compute_api:os-assisted-volume-snapshots:delete",
"os_compute_api:os-console-auth-tokens",
"os_compute_api:os-quota-class-sets:update",
"os_compute_api:os-server-external-events:create")
self.admin_or_owner_rules = (
"default",
"compute:start",
"compute:stop",
"compute_extension:admin_actions:pause",
"compute_extension:admin_actions:unpause",
"compute_extension:admin_actions:suspend",
"compute_extension:admin_actions:resume",
"compute_extension:admin_actions:lock",
"compute_extension:admin_actions:unlock",
"compute_extension:admin_actions:createBackup",
"compute_extension:simple_tenant_usage:show",
"os_compute_api:servers:start",
"os_compute_api:servers:stop",
"os_compute_api:os-create-backup",
"os_compute_api:ips:index",
"os_compute_api:ips:show",
"os_compute_api:os-keypairs:create",
"os_compute_api:os-keypairs:delete",
"os_compute_api:os-keypairs:index",
"os_compute_api:os-keypairs:show",
"os_compute_api:os-lock-server:lock",
"os_compute_api:os-lock-server:unlock",
"os_compute_api:os-pause-server:pause",
"os_compute_api:os-pause-server:unpause",
"os_compute_api:os-quota-sets:show",
"os_compute_api:server-metadata:index",
"os_compute_api:server-metadata:show",
"os_compute_api:server-metadata:delete",
"os_compute_api:server-metadata:create",
"os_compute_api:server-metadata:update",
"os_compute_api:server-metadata:update_all",
"os_compute_api:os-simple-tenant-usage:show",
"os_compute_api:os-suspend-server:suspend",
"os_compute_api:os-suspend-server:resume",
"os_compute_api:os-tenant-networks")
self.empty_rules = (
"compute:create",
"compute:create:attach_network",
"compute:create:attach_volume",
"compute:delete",
"compute:force_delete",
"compute:get_all_instance_metadata",
"compute:get_all_instance_system_metadata",
"compute:get_console_output",
"compute:get_diagnostics",
"compute:delete_instance_metadata",
"compute:get",
"compute:get_all",
"compute:shelve",
"compute:shelve_offload",
"compute:snapshot_volume_backed",
"compute:unshelve",
"compute:resize",
"compute:confirm_resize",
"compute:revert_resize",
"compute:rebuild",
"compute:reboot",
"compute:volume_snapshot_create",
"compute:volume_snapshot_delete",
"compute:add_fixed_ip",
"compute:attach_interface",
"compute:detach_interface",
"compute:attach_volume",
"compute:detach_volume",
"compute:backup",
"compute:get_instance_diagnostics",
"compute:get_instance_faults",
"compute:get_instance_metadata",
"compute:get_lock",
"compute:get_mks_console",
"compute:get_rdp_console",
"compute:get_serial_console",
"compute:get_spice_console",
"compute:get_vnc_console",
"compute:inject_network_info",
"compute:lock",
"compute:pause",
"compute:remove_fixed_ip",
"compute:rescue",
"compute:reset_network",
"compute:restore",
"compute:resume",
"compute:security_groups:add_to_instance",
"compute:security_groups:remove_from_instance",
"compute:set_admin_password",
"compute:snapshot",
"compute:soft_delete",
"compute:suspend",
"compute:swap_volume",
"compute:unlock",
"compute:unpause",
"compute:unrescue",
"compute:update",
"compute:update_instance_metadata",
"compute_extension:config_drive",
"compute_extension:os-tenant-networks",
"network:get_vif_by_mac_address",
"os_compute_api:extensions",
"os_compute_api:os-config-drive",
"os_compute_api:os-quota-sets:defaults",
"os_compute_api:servers:confirm_resize",
"os_compute_api:servers:create",
"os_compute_api:servers:create:attach_network",
"os_compute_api:servers:create:attach_volume",
"os_compute_api:servers:create_image",
"os_compute_api:servers:delete",
"os_compute_api:servers:detail",
"os_compute_api:servers:index",
"os_compute_api:servers:reboot",
"os_compute_api:servers:rebuild",
"os_compute_api:servers:resize",
"os_compute_api:servers:revert_resize",
"os_compute_api:servers:show",
"os_compute_api:servers:update",
"compute_extension:attach_interfaces",
"compute_extension:certificates",
"compute_extension:console_output",
"compute_extension:consoles",
"compute_extension:createserverext",
"compute_extension:deferred_delete",
"compute_extension:disk_config",
"compute_extension:extended_status",
"compute_extension:extended_availability_zone",
"compute_extension:extended_ips",
"compute_extension:extended_ips_mac",
"compute_extension:extended_vif_net",
"compute_extension:extended_volumes",
"compute_extension:flavor_access",
"compute_extension:flavor_disabled",
"compute_extension:flavor_rxtx",
"compute_extension:flavor_swap",
"compute_extension:flavorextradata",
"compute_extension:flavorextraspecs:index",
"compute_extension:flavorextraspecs:show",
"compute_extension:floating_ip_dns",
"compute_extension:floating_ip_pools",
"compute_extension:floating_ips",
"compute_extension:fping",
"compute_extension:image_size",
"compute_extension:instance_actions",
"compute_extension:keypairs",
"compute_extension:keypairs:index",
"compute_extension:keypairs:show",
"compute_extension:keypairs:create",
"compute_extension:keypairs:delete",
"compute_extension:multinic",
"compute_extension:networks:view",
"compute_extension:quotas:show",
"compute_extension:quota_classes",
"compute_extension:rescue",
"compute_extension:security_groups",
"compute_extension:server_groups",
"compute_extension:server_password",
"compute_extension:server_usage",
"compute_extension:shelve",
"compute_extension:unshelve",
"compute_extension:virtual_interfaces",
"compute_extension:virtual_storage_arrays",
"compute_extension:volumes",
"compute_extension:volume_attachments:index",
"compute_extension:volume_attachments:show",
"compute_extension:volume_attachments:create",
"compute_extension:volume_attachments:update",
"compute_extension:volume_attachments:delete",
"compute_extension:volumetypes",
"compute_extension:availability_zone:list",
"network:get_all",
"network:get",
"network:create",
"network:delete",
"network:associate",
"network:disassociate",
"network:get_vifs_by_instance",
"network:allocate_for_instance",
"network:deallocate_for_instance",
"network:validate_networks",
"network:get_instance_uuids_by_ip_filter",
"network:get_instance_id_by_floating_address",
"network:setup_networks_on_host",
"network:get_backdoor_port",
"network:get_floating_ip",
"network:get_floating_ip_pools",
"network:get_floating_ip_by_address",
"network:get_floating_ips_by_project",
"network:get_floating_ips_by_fixed_address",
"network:allocate_floating_ip",
"network:associate_floating_ip",
"network:disassociate_floating_ip",
"network:release_floating_ip",
"network:migrate_instance_start",
"network:migrate_instance_finish",
"network:get_fixed_ip",
"network:get_fixed_ip_by_address",
"network:add_fixed_ip_to_instance",
"network:remove_fixed_ip_from_instance",
"network:add_network_to_project",
"network:get_instance_nw_info",
"network:get_dns_domains",
"network:add_dns_entry",
"network:modify_dns_entry",
"network:delete_dns_entry",
"network:get_dns_entries_by_address",
"network:get_dns_entries_by_name",
"network:create_private_dns_domain",
"network:create_public_dns_domain",
"network:delete_dns_domain",
"os_compute_api:servers:create_image:allow_volume_backed",
"os_compute_api:os-access-ips:discoverable",
"os_compute_api:os-access-ips",
"os_compute_api:os-admin-actions:discoverable",
"os_compute_api:os-admin-password",
"os_compute_api:os-admin-password:discoverable",
"os_compute_api:os-aggregates:discoverable",
"os_compute_api:os-agents:discoverable",
"os_compute_api:os-attach-interfaces",
"os_compute_api:os-attach-interfaces:discoverable",
"os_compute_api:os-baremetal-nodes:discoverable",
"os_compute_api:os-block-device-mapping-v1:discoverable",
"os_compute_api:os-cells:discoverable",
"os_compute_api:os-certificates:create",
"os_compute_api:os-certificates:show",
"os_compute_api:os-certificates:discoverable",
"os_compute_api:os-cloudpipe:discoverable",
"os_compute_api:os-consoles:discoverable",
"os_compute_api:os-consoles:create",
"os_compute_api:os-consoles:delete",
"os_compute_api:os-consoles:index",
"os_compute_api:os-consoles:show",
"os_compute_api:os-console-output:discoverable",
"os_compute_api:os-console-output",
"os_compute_api:os-remote-consoles",
"os_compute_api:os-remote-consoles:discoverable",
"os_compute_api:os-create-backup:discoverable",
"os_compute_api:os-deferred-delete",
"os_compute_api:os-deferred-delete:discoverable",
"os_compute_api:os-disk-config",
"os_compute_api:os-disk-config:discoverable",
"os_compute_api:os-evacuate:discoverable",
"os_compute_api:os-extended-server-attributes:discoverable",
"os_compute_api:os-extended-status",
"os_compute_api:os-extended-status:discoverable",
"os_compute_api:os-extended-availability-zone",
"os_compute_api:os-extended-availability-zone:discoverable",
"os_compute_api:extension_info:discoverable",
"os_compute_api:os-extended-volumes",
"os_compute_api:os-extended-volumes:discoverable",
"os_compute_api:os-fixed-ips:discoverable",
"os_compute_api:os-flavor-access",
"os_compute_api:os-flavor-access:discoverable",
"os_compute_api:os-flavor-rxtx",
"os_compute_api:os-flavor-rxtx:discoverable",
"os_compute_api:flavors:discoverable",
"os_compute_api:os-flavor-extra-specs:discoverable",
"os_compute_api:os-flavor-extra-specs:index",
"os_compute_api:os-flavor-extra-specs:show",
"os_compute_api:os-flavor-manage:discoverable",
"os_compute_api:os-floating-ip-dns",
"os_compute_api:os-floating-ip-dns:discoverable",
"os_compute_api:os-floating-ip-pools",
"os_compute_api:os-floating-ip-pools:discoverable",
"os_compute_api:os-floating-ips",
"os_compute_api:os-floating-ips:discoverable",
"os_compute_api:os-floating-ips-bulk:discoverable",
"os_compute_api:os-fping",
"os_compute_api:os-fping:discoverable",
"os_compute_api:os-hide-server-addresses:discoverable",
"os_compute_api:os-hosts:discoverable",
"os_compute_api:os-hypervisors:discoverable",
"os_compute_api:images:discoverable",
"os_compute_api:image-size",
"os_compute_api:image-size:discoverable",
"os_compute_api:os-instance-actions",
"os_compute_api:os-instance-actions:discoverable",
"os_compute_api:os-instance-usage-audit-log:discoverable",
"os_compute_api:ips:discoverable",
"os_compute_api:os-keypairs:discoverable",
"os_compute_api:os-keypairs",
"os_compute_api:limits",
"os_compute_api:limits:discoverable",
"os_compute_api:os-lock-server:discoverable",
"os_compute_api:os-migrate-server:discoverable",
"os_compute_api:os-multinic",
"os_compute_api:os-multinic:discoverable",
"os_compute_api:os-networks:view",
"os_compute_api:os-networks:discoverable",
"os_compute_api:os-networks-associate:discoverable",
"os_compute_api:os-pause-server:discoverable",
"os_compute_api:os-pci:pci_servers",
"os_compute_api:os-pci:discoverable",
"os_compute_api:os-personality:discoverable",
"os_compute_api:os-preserve-ephemeral-rebuild:discoverable",
"os_compute_api:os-quota-sets:discoverable",
"os_compute_api:os-quota-class-sets:discoverable",
"os_compute_api:os-rescue",
"os_compute_api:os-rescue:discoverable",
"os_compute_api:os-scheduler-hints:discoverable",
"os_compute_api:os-security-group-default-rules:discoverable",
"os_compute_api:os-security-groups",
"os_compute_api:os-security-groups:discoverable",
"os_compute_api:os-server-diagnostics:discoverable",
"os_compute_api:os-server-password",
"os_compute_api:os-server-password:discoverable",
"os_compute_api:os-server-usage",
"os_compute_api:os-server-usage:discoverable",
"os_compute_api:os-server-groups",
"os_compute_api:os-server-groups:discoverable",
"os_compute_api:os-services:discoverable",
"os_compute_api:server-metadata:discoverable",
"os_compute_api:servers:discoverable",
"os_compute_api:os-shelve:shelve",
"os_compute_api:os-shelve:shelve:discoverable",
"os_compute_api:os-simple-tenant-usage:discoverable",
"os_compute_api:os-suspend-server:discoverable",
"os_compute_api:os-tenant-networks:discoverable",
"os_compute_api:os-shelve:unshelve",
"os_compute_api:os-user-data:discoverable",
"os_compute_api:os-virtual-interfaces",
"os_compute_api:os-virtual-interfaces:discoverable",
"os_compute_api:os-volumes",
"os_compute_api:os-volumes:discoverable",
"os_compute_api:os-volumes-attachments:index",
"os_compute_api:os-volumes-attachments:show",
"os_compute_api:os-volumes-attachments:create",
"os_compute_api:os-volumes-attachments:update",
"os_compute_api:os-volumes-attachments:delete",
"os_compute_api:os-volumes-attachments:discoverable",
"os_compute_api:os-availability-zone:list",
"os_compute_api:os-availability-zone:discoverable",
"os_compute_api:os-used-limits:discoverable",
"os_compute_api:os-migrations:discoverable",
"os_compute_api:os-assisted-volume-snapshots:discoverable")
self.non_admin_only_rules = (
"compute_extension:hide_server_addresses",
"os_compute_api:os-hide-server-addresses")
def test_all_rules_in_sample_file(self):
special_rules = ["context_is_admin", "admin_or_owner", "default"]
for (name, rule) in self.fake_policy.items():
if name in special_rules:
continue
self.assertIn(name, policy.get_rules())
def test_admin_only_rules(self):
for rule in self.admin_only_rules:
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
self.non_admin_context, rule, self.target)
policy.enforce(self.admin_context, rule, self.target)
def test_non_admin_only_rules(self):
for rule in self.non_admin_only_rules:
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
self.admin_context, rule, self.target)
policy.enforce(self.non_admin_context, rule, self.target)
def test_admin_or_owner_rules(self):
for rule in self.admin_or_owner_rules:
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
self.non_admin_context, rule, self.target)
policy.enforce(self.non_admin_context, rule,
{'project_id': 'fake', 'user_id': 'fake'})
def test_empty_rules(self):
rules = policy.get_rules()
for rule in self.empty_rules:
self.assertEqual('@', str(rules[rule]),
"%s isn't empty rule" % rule)
def test_rule_missing(self):
rules = policy.get_rules()
# eliqiao os_compute_api:os-quota-class-sets:show requires
# admin=True or quota_class match, this rule wont' belone to
# admin_only, non_admin, admin_or_user, empty_rule
special_rules = ('admin_api', 'admin_or_owner', 'context_is_admin',
'os_compute_api:os-quota-class-sets:show')
result = set(rules.keys()) - set(self.admin_only_rules +
self.admin_or_owner_rules + self.empty_rules +
self.non_admin_only_rules + special_rules)
self.assertEqual(set([]), result)
|
mareuter/pylunar
|
refs/heads/master
|
pylunar/moon_info.py
|
1
|
# This file is part of pylunar.
#
# Developed by Michael Reuter.
#
# See the LICENSE file at the top-level directory of this distribution
# for details of code ownership.
#
# Use of this source code is governed by a 3-clause BSD-style
# license that can be found in the LICENSE file.
from datetime import datetime
from enum import Enum
import math
from operator import itemgetter
import ephem
import pytz
from pylunar import mjd_to_date_tuple, tuple_to_string
__all__ = ["MoonInfo"]
class PhaseName(Enum):
NEW_MOON = 0
WAXING_CRESCENT = 1
FIRST_QUARTER = 2
WAXING_GIBBOUS = 3
FULL_MOON = 4
WANING_GIBBOUS = 5
LAST_QUARTER = 6
WANING_CRESCENT = 7
class TimeOfDay(Enum):
MORNING = 0
EVENING = 1
class MoonInfo(object):
"""Handle all moon information.
Attributes
----------
observer : ephem.Observer instance.
The instance containing the observer's location information.
moon : ephem.Moon instance
The instance of the moon object.
"""
DAYS_TO_HOURS = 24.0
MAIN_PHASE_CUTOFF = 2.0
# Time cutoff (hours) around the NM, FQ, FM, and LQ phases
FEATURE_CUTOFF = 15.0
# The offset (degrees) from the colongitude used for visibility check
NO_CUTOFF_TYPE = ("Landing Site", "Mare", "Oceanus")
# Feature types that are not subject to longitude cutoffs
LIBRATION_ZONE = 80.0
# Latitude and/or longitude where librations have a big effect
MAXIMUM_LIBRATION_PHASE_ANGLE_CUTOFF = 65.0
# The maximum value of the libration phase angle difference for a feature
reverse_phase_lookup = {
"new_moon": (ephem.previous_last_quarter_moon, "last_quarter"),
"first_quarter": (ephem.previous_new_moon, "new_moon"),
"full_moon": (ephem.previous_first_quarter_moon, "first_quarter"),
"last_quarter": (ephem.previous_full_moon, "full_moon")
}
def __init__(self, latitude, longitude, name=None):
"""Initialize the class.
Parameters
----------
latitude : tuple of 3 ints
The latitude of the observer.
longitude : tuple of 3 ints
The longitude of the observer.
name : str, optional
A name for the observer's location.
"""
self.observer = ephem.Observer()
self.observer.lat = tuple_to_string(latitude)
self.observer.long = tuple_to_string(longitude)
self.moon = ephem.Moon()
def age(self):
"""The moon's age in days.
Returns
-------
float
"""
prev_new = ephem.previous_new_moon(self.observer.date)
return self.observer.date - prev_new
def altitude(self):
"""The moon's altitude in degrees.
Returns
-------
float
"""
return math.degrees(self.moon.alt)
def angular_size(self):
"""The moon's current angular size in degrees.
Returns
-------
float
"""
return self.moon.size / 3600.0
def azimuth(self):
"""The moon's azimuth in degrees.
Returns
-------
float
"""
return math.degrees(self.moon.az)
def colong(self):
"""The moon's selenographic colongitude in degrees.
Returns
-------
float
"""
return math.degrees(self.moon.colong)
def dec(self):
"""The moon's current declination in degrees.
Returns
-------
float
"""
return math.degrees(self.moon.dec)
def earth_distance(self):
"""The moon's current distance from the earth in km.
Returns
-------
float
"""
return self.moon.earth_distance * ephem.meters_per_au / 1000.0
def elongation(self):
"""The moon's elongation from the sun in degrees.
Returns
-------
float
"""
elongation = math.degrees(self.moon.elong)
if elongation < 0:
elongation += 360.0
return elongation
def fractional_phase(self):
"""The moon's fractional illumination. Always less than 1.0.
Returns
-------
float
"""
return self.moon.moon_phase
def libration_lat(self):
"""The moon's current latitudinal libration in degrees.
Returns
-------
float
"""
return math.degrees(self.moon.libration_lat)
def libration_lon(self):
"""The moon's current longitudinal libration in degrees.
Returns
-------
float
"""
return math.degrees(self.moon.libration_long)
def libration_phase_angle(self):
"""The phase angle of the moon's current libration in degrees.
Returns
-------
float
"""
phase_angle = math.atan2(self.moon.libration_long, self.moon.libration_lat)
phase_angle += 2.0 * math.pi if phase_angle < 0 else 0.0
return math.degrees(phase_angle)
def magnitude(self):
""" The moon's current magnitude.
Returns
-------
float
"""
return self.moon.mag
def colong_to_long(self):
"""The selenographic longitude in degrees based on the terminator.
Returns
-------
float
"""
colong = self.colong()
if 90.0 <= colong < 270.0:
longitude = 180.0 - colong
elif 270.0 <= colong < 360.0:
longitude = 360.0 - colong
else:
longitude = -colong
return longitude
def is_libration_ok(self, feature):
"""Determine if lunar feature is visible due to libration effect.
Parameters
----------
feature : :class:`.LunarFeature`
The Lunar feature instance to check.
Returns
-------
bool
True if visible, False if not.
"""
is_lon_in_zone = math.fabs(feature.longitude) > self.LIBRATION_ZONE
is_lat_in_zone = math.fabs(feature.latitude) > self.LIBRATION_ZONE
if is_lat_in_zone or is_lon_in_zone:
feature_angle = feature.feature_angle()
libration_phase_angle = self.libration_phase_angle()
delta_phase_angle = libration_phase_angle - feature_angle
delta_phase_angle -= 360.0 if delta_phase_angle > 180.0 else 0.0
if math.fabs(delta_phase_angle) <= self.MAXIMUM_LIBRATION_PHASE_ANGLE_CUTOFF:
return True
else:
return False
return True
def is_visible(self, feature):
"""Determine if lunar feature is visible.
Parameters
----------
feature : :class:`.LunarFeature`
The Lunar feature instance to check.
Returns
-------
bool
True if visible, False if not.
"""
selco_lon = self.colong_to_long()
current_tod = self.time_of_day()
min_lon = feature.longitude - feature.delta_longitude / 2
max_lon = feature.longitude + feature.delta_longitude / 2
if min_lon > max_lon:
min_lon, max_lon = max_lon, min_lon
is_visible = False
latitude_scaling = math.cos(math.radians(feature.latitude))
if feature.feature_type not in MoonInfo.NO_CUTOFF_TYPE:
cutoff = MoonInfo.FEATURE_CUTOFF / latitude_scaling
else:
cutoff = MoonInfo.FEATURE_CUTOFF
if current_tod == TimeOfDay.MORNING.name:
# Minimum longitude for morning visibility
lon_cutoff = min_lon - cutoff
if feature.feature_type in MoonInfo.NO_CUTOFF_TYPE:
is_visible = selco_lon <= min_lon
else:
is_visible = lon_cutoff <= selco_lon <= min_lon
else:
# Maximum longitude for evening visibility
lon_cutoff = max_lon + cutoff
if feature.feature_type in MoonInfo.NO_CUTOFF_TYPE:
is_visible = max_lon <= selco_lon
else:
is_visible = max_lon <= selco_lon <= lon_cutoff
return is_visible and self.is_libration_ok(feature)
def next_four_phases(self):
"""The next for phases in date sorted order (closest phase first).
Returns
-------
list[(str, float)]
Set of moon phases specified by an abbreviated phase name and Modified Julian Date.
"""
phases = {}
phases["new_moon"] = ephem.next_new_moon(self.observer.date)
phases["first_quarter"] = ephem.next_first_quarter_moon(self.observer.date)
phases["full_moon"] = ephem.next_full_moon(self.observer.date)
phases["last_quarter"] = ephem.next_last_quarter_moon(self.observer.date)
sorted_phases = sorted(phases.items(), key=itemgetter(1))
sorted_phases = [(phase[0], mjd_to_date_tuple(phase[1])) for phase in sorted_phases]
return sorted_phases
def phase_name(self):
"""The standard name of the moon's phase, i.e. Waxing Cresent
This function returns a standard name for the moon's phase based on the current selenographic
colongitude.
Returns
-------
str
"""
next_phase_name = self.next_four_phases()[0][0]
try:
next_phase_time = getattr(ephem, "next_{}".format(next_phase_name))(self.observer.date)
except AttributeError:
next_phase_time = getattr(ephem, "next_{}_moon".format(next_phase_name))(self.observer.date)
previous_phase = self.reverse_phase_lookup[next_phase_name]
time_to_next_phase = math.fabs(next_phase_time - self.observer.date) * self.DAYS_TO_HOURS
time_to_previous_phase = math.fabs(self.observer.date -
previous_phase[0](self.observer.date)) * self.DAYS_TO_HOURS
previous_phase_name = previous_phase[1]
if time_to_previous_phase < self.MAIN_PHASE_CUTOFF:
return getattr(PhaseName, previous_phase_name.upper()).name
elif time_to_next_phase < self.MAIN_PHASE_CUTOFF:
return getattr(PhaseName, next_phase_name.upper()).name
else:
if previous_phase_name == "new_moon" and next_phase_name == "first_quarter":
return PhaseName.WAXING_CRESCENT.name
elif previous_phase_name == "first_quarter" and next_phase_name == "full_moon":
return PhaseName.WAXING_GIBBOUS.name
elif previous_phase_name == "full_moon" and next_phase_name == "last_quarter":
return PhaseName.WANING_GIBBOUS.name
elif previous_phase_name == "last_quarter" and next_phase_name == "new_moon":
return PhaseName.WANING_CRESCENT.name
def ra(self):
"""The moon's current right ascension in degrees.
Returns
-------
float
"""
return math.degrees(self.moon.ra)
def rise_set_times(self, timezone):
"""Calculate the rise, set and transit times in the local time system.
Parameters
----------
timezone : str
The timezone identifier for the calculations.
Returns
-------
list[(str, tuple)]
Set of rise, set, and transit times in the local time system. If event
does not happen, 'Does not xxx' is tuple value.
"""
utc = pytz.utc
try:
tz = pytz.timezone(timezone)
except pytz.UnknownTimeZoneError:
tz = utc
func_map = {"rise": "rising", "transit": "transit", "set": "setting"}
# Need to set observer's horizon and pressure to get times
old_pressure = self.observer.pressure
old_horizon = self.observer.horizon
self.observer.pressure = 0
self.observer.horizon = "-0:34"
current_date_utc = datetime(*mjd_to_date_tuple(self.observer.date,
round_off=True), tzinfo=utc)
current_date = current_date_utc.astimezone(tz)
current_day = current_date.day
times = {}
does_not = None
for time_type in ("rise", "transit", "set"):
mjd_time = getattr(self.observer,
"{}_{}".format("next",
func_map[time_type]))(self.moon)
utc_time = datetime(*mjd_to_date_tuple(mjd_time, round_off=True),
tzinfo=utc)
local_date = utc_time.astimezone(tz)
if local_date.day == current_day:
times[time_type] = local_date
else:
mjd_time = getattr(self.observer,
"{}_{}".format("previous",
func_map[time_type]))(self.moon)
utc_time = datetime(*mjd_to_date_tuple(mjd_time, round_off=True),
tzinfo=utc)
local_date = utc_time.astimezone(tz)
if local_date.day == current_day:
times[time_type] = local_date
else:
does_not = (time_type, "Does not {}".format(time_type))
# Return observer and moon to previous state
self.observer.pressure = old_pressure
self.observer.horizon = old_horizon
self.moon.compute(self.observer)
sorted_times = sorted(times.items(), key=itemgetter(1))
sorted_times = [(xtime[0], xtime[1].timetuple()[:6]) for xtime in sorted_times]
if does_not is not None:
sorted_times.insert(0, does_not)
return sorted_times
def subsolar_lat(self):
"""The latitude in degress on the moon where the sun is overhead.
Returns
-------
float
"""
return math.degrees(self.moon.subsolar_lat)
def time_of_day(self):
"""Determine if the terminator is sunrise (morning) or sunset (evening).
Returns
-------
float
"""
colong = self.colong()
if 90.0 <= colong < 270.0:
return TimeOfDay.EVENING.name
else:
return TimeOfDay.MORNING.name
def time_from_new_moon(self):
"""The time (hours) from the previous new moon.
This function calculates the time from the previous new moon.
Returns
-------
float
"""
previous_new_moon = ephem.previous_new_moon(self.observer.date)
return MoonInfo.DAYS_TO_HOURS * (self.observer.date - previous_new_moon)
def time_to_full_moon(self):
"""The time (days) to the next full moon.
This function calculates the time to the next full moon.
Returns
-------
float
"""
next_full_moon = ephem.next_full_moon(self.observer.date)
return next_full_moon - self.observer.date
def time_to_new_moon(self):
"""The time (hours) to the next new moon.
This function calculates the time to the next new moon.
Returns
-------
float
"""
next_new_moon = ephem.next_new_moon(self.observer.date)
return MoonInfo.DAYS_TO_HOURS * (next_new_moon - self.observer.date)
def update(self, datetime):
"""Update the moon information based on time.
This fuction updates the Observer instance's datetime setting. The incoming datetime tuple should be
in UTC with the following placement of values: (YYYY, m, d, H, M, S) as defined below::
YYYY
Four digit year
m
month (1-12)
d
day (1-31)
H
hours (0-23)
M
minutes (0-59)
S
seconds (0-59)
Parameters
----------
datetime : tuple
The current UTC time in a tuple of numbers.
"""
self.observer.date = datetime
self.moon.compute(self.observer)
|
RichDijk/eXe
|
refs/heads/master
|
nevow/rend.py
|
14
|
# Copyright (c) 2004 Divmod.
# See LICENSE for details.
"""Page, Fragment and other standard renderers.
This module contains classes and function responsible for rendering
dynamic content and a few useful mixin classes for inheriting common
functionality.
Mostly, you'll use the renderers:
- B{Page} - Nevow's main resource type for rendering web pages and
locating child resource.
- B{Fragment} - useful for rendering more complex parts of a document
that require a set of data_* and render_* methods.
- B{sequence} - render each item in a sequence.
- B{mapping} - publish a dictionary by filling slots
"""
from cStringIO import StringIO
import os.path
import urllib
import warnings
import traceback
from nevow.context import WovenContext, NodeNotFound, PageContext, RequestContext
from nevow import compy
from nevow import inevow
from nevow import tags
from nevow import flat
from nevow.util import log
from nevow import util
import formless
from formless import iformless
from time import time as now
try:
import random
except ImportError:
import whrandom as random
class RenderFactory(object):
__implements__ = inevow.IRendererFactory,
def renderer(self, context, name):
"""Return a renderer with the given name.
"""
# The named renderer can be parameterised, i.e. 'renderIt one,two,three'
args = []
if name.find(' ') != -1:
name, args = name.split(None, 1)
args = [arg.strip() for arg in args.split(',')]
callable = getattr(self, 'render_%s' % name, None)
if callable is None:
callable = lambda context, data: context.tag[
"The renderer named '%s' was not found in %r." % (name, self)]
if args:
return callable(*args)
return callable
render_sequence = lambda self, context, data: sequence(context, data)
render_mapping = lambda self, context, data: mapping(context, data)
render_string = lambda self, context, data: string(context, data)
render_xml = lambda self, context, data: context.tag.clear()[tags.xml(data)]
render_data = lambda self, context, data_: data(context, data_)
class MacroFactory(object):
__implements__ = inevow.IMacroFactory,
def macro(self, ctx, name):
"""Return a macro with the given name.
"""
# The named macro can be parameterized, i.e. 'macroFoo foo,bar,baz'
args = []
if name.find(' ') != -1:
name, args = name.split(None, 1)
args = [arg.strip() for arg in args.split(',')]
callable = getattr(self, 'macro_%s' % name, None)
if callable is None:
callable = lambda ctx, *args: ctx.tag[
"The macro named '%s' was not found in %r." % (name, self)]
if args:
## Macros are expanded in TagSerializer by calling them with a single arg, the context
return lambda ctx: callable(ctx, *args)
return callable
class DataFactory(object):
__implements__ = inevow.IContainer,
def child(self, context, n):
args = []
if n.find(' ') != -1:
name, args = n.split(None, 1)
args = [arg.strip() for arg in args.split(',')]
else:
name = n
callable = getattr(self, 'data_%s' % name, None)
## If this page doesn't have an appropriate data_* method...
if callable is None:
## See if our self.original has an IContainer...
container = inevow.IContainer(self.original, None)
if container is None:
util.log.msg("ERROR: The data named '%s' was not found in %r." % (name, self))
callable = lambda context, data: context.tag["The data named '%s' was not found in %r." % (name, self)]
else:
## And delegate to it if so.
return container.child(context, n)
if args:
return callable(*args)
return callable
class LiveEvilChildMixin:
"""Mixin that provides the LiveEvil child resources."""
def child_nevow_liveOutput(self, ctx):
from nevow import liveevil
self.child_nevow_liveOutput = liveevil.liveOutput
return liveevil.liveOutput
def child_nevow_liveInput(self, ctx):
from nevow import liveevil
self.child_nevow_liveInput = liveevil.liveInput
return liveevil.liveInput
class FreeformChildMixin:
"""Mixin that handles locateChild for freeform segments."""
def locateChild(self, ctx, segments):
request = inevow.IRequest(ctx)
## The method or property name we are going to validate against/affect
bindingName = None
name = segments[0]
if name.startswith('freeform_post!'):
configurableName, bindingName = name.split('!')[1:3]
elif name.startswith('freeform-action-post!'):
configurableName, request.args['freeform-actee'] = name.split('!')[1:3]
bindingName = request.args['freeform-action'][0]
if bindingName:
ctx.remember(self, inevow.IResource)
ctx.remember(request, inevow.IRequest)
cf = iformless.IConfigurableFactory(self)
def checkC(c):
if c is not None:
return self.webFormPost(request, self, c, ctx, bindingName, request.args)
return util.maybeDeferred(cf.locateConfigurable, ctx, configurableName).addCallback(checkC)
return NotFound
class ConfigurableFactory:
"""Locates configurables by looking for methods that start with
configurable_ and end with the name of the configurable. The method
should take a single arg (other than self) - the current context.
"""
__implements__ = iformless.IConfigurableFactory
def locateConfigurable(self, context, name):
"""formless.webform.renderForms calls locateConfigurable on the IConfigurableFactory
instance it retrieves from the context. It passes the "name" that was passed to it,
so if renderForms() was placed in the DOM, locateConfigurable will be called with
name = ''; if renderForms('foo') was placed in the DOM, locateConfigurable will
be called with name = 'foo'.
This default implementation of locateConfigurable looks for a configurable_* method
corresponding to the name which was passed.
"""
return util.maybeDeferred(getattr(self, 'configurable_%s'%name),
context).addCallback(iformless.IConfigurable)
def configurable_(self, context):
"""Configurable factory for use when self is a configurable;
aka it implements IConfigurable or one or more TypedInterface
subclasses. Usage:
>>> class IFoo(TypedInterface):
... def bar(self): pass
... bar = autocallable(bar)
...
>>> class Foo(Page):
... __implements__ = IFoo,
...
... def bar(self):
... print "bar called through the web!"
...
... def render_forms(self, ctx, data):
... return renderForms() # or renderForms('')
...
... docFactory = stan(render_forms).
"""
return self
def configurable_original(self, ctx):
"""Configurable factory for use when self.original is a configurable;
aka it implements IConfigurable or one or more TypedInterface
subclasses. Usage:
>>> class Foo(Page):
... def __init__(self):
... self.original = SomeConfigurable()
... def render_forms(self, ctx, data):
... return renderForms('original')
... docFactory = stan(render_forms)
"""
return self.original
_CARRYOVER = {}
def defaultsFactory(ctx):
co = _CARRYOVER.get(
ctx.tag.args.get('_nevow_carryover_', [None])[0], None)
from formless import webform
defaults = webform.FormDefaults()
if co is not None:
e = iformless.IFormErrors(co, {})
for k, v in e.items():
defaults.getAllDefaults(k).update(v.partialForm)
return defaults
def errorsFactory(ctx):
co = _CARRYOVER.get(
ctx.tag.args.get('_nevow_carryover_', [None])[0], None)
from formless import webform
errs = webform.FormErrors()
if co is not None:
e = iformless.IFormErrors(co, {})
for k, v in e.items():
errs.updateErrors(k, v.errors)
errs.setError(k, v.formErrorMessage)
return errs
def handFactory(ctx):
co = _CARRYOVER.get(
ctx.tag.args.get('_nevow_carryover_', [None])[0], None)
return inevow.IHand(co, None)
def statusFactory(ctx):
co = _CARRYOVER.get(
ctx.tag.args.get('_nevow_carryover_', [None])[0], None)
return inevow.IStatusMessage(co, None)
def originalFactory(ctx):
return ctx.tag
class Fragment(DataFactory, RenderFactory, MacroFactory):
"""A fragment is a renderer that can be embedded in a stan document and
hooks its template (from the docFactory) up to its data_ and render_
methods, i.e. it remembers itself as the IRendererFactory and IContainer.
Fragment primarily serves as the base for Page, Nevow's web resource, but
it can be used for more complex rendering. For instance, a fragment might
be used to encapsulate the rendering of a complex piece of data where the
template is read from disk and contains standard renderers (sequence,
mapping etc) and/or custom render methods.
"""
__implements__ = (
inevow.IRenderer,
inevow.IGettable,
RenderFactory.__implements__,
DataFactory.__implements__,
MacroFactory.__implements__)
docFactory = None
original = None
def __init__(self, original=None, docFactory=None):
if original is not None:
self.original = original
self.toremember = []
self._context = None
if docFactory is not None:
self.docFactory = docFactory
def get(self, context):
return self.original
def rend(self, context, data):
self.rememberStuff(context)
# This tidbit is to enable us to include Page objects inside
# stan expressions and render_* methods and the like. But
# because of the way objects can get intertwined, we shouldn't
# leave the pattern changed.
old = self.docFactory.pattern
self.docFactory.pattern = 'content'
self.docFactory.precompiledDoc = None
try:
doc = self.docFactory.load(context)
self.docFactory.pattern = old
self.docFactory.precompiledDoc = None
except NodeNotFound:
self.docFactory.pattern = old
self.docFactory.precompiledDoc = None
doc = self.docFactory.load(context)
return doc
def remember(self, obj, inter=None):
"""Remember an object for an interface on new PageContexts which are
constructed around this Page. Whenever this Page is involved in object
traversal in the future, all objects will be visible to .locate() calls
at the level of a PageContext wrapped around this Page and all contexts
below it.
This does not affect existing Context instances.
"""
self.toremember.append((obj, inter))
def rememberStuff(self, ctx):
ctx.remember(self, inevow.IRenderer)
ctx.remember(self, inevow.IRendererFactory)
ctx.remember(self, inevow.IMacroFactory)
ctx.remember(self, inevow.IData)
class ChildLookupMixin(FreeformChildMixin, LiveEvilChildMixin):
##
# IResource methods
##
children = None
def locateChild(self, ctx, segments):
"""Locate a child page of this one. ctx is a
nevow.context.PageContext representing the parent Page, and segments
is a tuple of each element in the URI. An tuple (page, segments) should be
returned, where page is an instance of nevow.rend.Page and segments a tuple
representing the remaining segments of the URI. If the child is not found, return
NotFound instead.
locateChild is designed to be easily overridden to perform fancy lookup tricks.
However, the default locateChild is useful, and looks for children in three places,
in this order:
- in a dictionary, self.children
- a member of self named child_<childname>. This can be either an
attribute or a method. If an attribute, it should be an object which
can be adapted to IResource. If a method, it should take the context
and return an object which can be adapted to IResource.
- by calling self.childFactory(ctx, name). Name is a single string instead
of a tuple of strings. This should return an object that can be adapted
to IResource.
"""
if self.children is not None:
r = self.children.get(segments[0], None)
if r is not None:
return r, segments[1:]
w = getattr(self, 'child_%s'%segments[0], None)
if w is not None:
if inevow.IResource(w, default=None) is not None:
return w, segments[1:]
r = w(ctx)
if r is not None:
return r, segments[1:]
r = self.childFactory(ctx, segments[0])
if r is not None:
return r, segments[1:]
return FreeformChildMixin.locateChild(self, ctx, segments)
def childFactory(self, ctx, name):
"""Used by locateChild to return children which are generated
dynamically. Note that higher level interfaces use only locateChild,
and only nevow.rend.Page.locateChild uses this.
segment is a string represnting one element of the URI. Request is a
nevow.appserver.NevowRequest.
The default implementation of this always returns None; it is intended
to be overridden."""
rv = self.getDynamicChild(name, ctx)
if rv is not None:
warnings.warn("getDynamicChild is deprecated; use childFactory instead.", stacklevel=1)
return rv
def getDynamicChild(self, segment, request):
"""Deprecated, use childFactory instead. The name is different and the
order of the arguments is reversed."""
return None
def putChild(self, name, child):
if self.children is None:
self.children = {}
self.children[name] = child
class Page(Fragment, ConfigurableFactory, ChildLookupMixin):
"""A page is the main Nevow resource and renders a document loaded
via the document factory (docFactory).
"""
__implements__ = Fragment.__implements__, inevow.IResource, ConfigurableFactory.__implements__
buffered = False
beforeRender = None
afterRender = None
addSlash = None
flattenFactory = lambda self, *args: flat.flattenFactory(*args)
def renderHTTP(self, ctx):
if self.beforeRender is not None:
return util.maybeDeferred(self.beforeRender,ctx).addCallback(
lambda result,ctx: self._renderHTTP(ctx),ctx)
return self._renderHTTP(ctx)
def _renderHTTP(self, ctx):
request = inevow.IRequest(ctx)
## XXX request is really ctx now, change the name here
if self.addSlash and inevow.ICurrentSegments(ctx)[-1] != '':
request.redirect(request.URLPath().child(''))
return ''
log.msg(http_render=None, uri=request.uri)
self.rememberStuff(ctx)
def finishRequest():
carryover = request.args.get('_nevow_carryover_', [None])[0]
if carryover is not None and _CARRYOVER.has_key(carryover):
del _CARRYOVER[carryover]
if self.afterRender is not None:
return util.maybeDeferred(self.afterRender,ctx)
if self.buffered:
io = StringIO()
writer = io.write
def finisher(result):
request.write(io.getvalue())
return util.maybeDeferred(finishRequest).addCallback(lambda r: result)
else:
writer = request.write
def finisher(result):
return util.maybeDeferred(finishRequest).addCallback(lambda r: result)
doc = self.docFactory.load(ctx)
ctx = WovenContext(ctx, tags.invisible[doc])
return self.flattenFactory(doc, ctx, writer, finisher)
def rememberStuff(self, ctx):
Fragment.rememberStuff(self, ctx)
ctx.remember(self, inevow.IResource)
def renderString(self):
"""Render this page outside of the context of a web request, returning
a Deferred which will result in a string.
If twisted is not installed, this method will return a string result immediately,
and this method is equivalent to renderSynchronously.
"""
io = StringIO()
writer = io.write
def finisher(result):
return io.getvalue()
ctx = PageContext(tag=self)
self.rememberStuff(ctx)
doc = self.docFactory.load(ctx)
ctx = WovenContext(ctx, tags.invisible[doc])
return self.flattenFactory(doc, ctx, writer, finisher)
def renderSynchronously(self):
"""Render this page synchronously, returning a string result immediately.
Raise an exception if a Deferred is required to complete the rendering
process.
"""
io = StringIO()
ctx = PageContext(tag=self)
self.rememberStuff(ctx)
doc = self.docFactory.load(ctx)
ctx = WovenContext(ctx, tags.invisible[doc])
def raiseAlways(item):
raise NotImplementedError("renderSynchronously can not support"
" rendering: %s" % (item, ))
list(flat.iterflatten(doc, ctx, io.write, raiseAlways))
return io.getvalue()
def child_(self, ctx):
"""When addSlash is True, a page rendered at a url with no
trailing slash and a page rendered at a url with a trailing
slash will be identical. addSlash is useful for the root
resource of a site or directory-like resources.
"""
# Only allow an empty child, by default, if it's on the end
# and we're a directoryish resource (addSlash = True)
if self.addSlash and len(inevow.IRemainingSegments(ctx)) == 1:
return self
# After deprecation is removed, this should return None
warnings.warn(
"Allowing an empty child ('/') of resources automatically is "
"deprecated. If the class '%s' is a directory-index-like resource, "
"please add addSlash=True to the class definition." %
(self.__class__), DeprecationWarning, 2)
return self
def webFormPost(self, request, res, configurable, ctx, bindingName, args):
def redirectAfterPost(aspects):
redirectAfterPost = request.getComponent(iformless.IRedirectAfterPost, None)
if redirectAfterPost is None:
ref = request.getHeader('referer') or ''
else:
## Use the redirectAfterPost url
ref = str(redirectAfterPost)
from nevow import url
refpath = url.URL.fromString(ref)
magicCookie = '%s%s%s' % (now(),request.getClientIP(),random.random())
refpath = refpath.replace('_nevow_carryover_', magicCookie)
_CARRYOVER[magicCookie] = C = compy.Componentized(aspects)
request.redirect(str(refpath))
from nevow import static
return static.Data('You posted a form to %s' % bindingName, 'text/plain'), ()
return util.maybeDeferred(
configurable.postForm, ctx, bindingName, args
).addCallback(
self.onPostSuccess, request, ctx, bindingName, redirectAfterPost
).addErrback(
self.onPostFailure, request, ctx, bindingName, redirectAfterPost
)
def onPostSuccess(self, result, request, ctx, bindingName, redirectAfterPost):
if result is None:
message = "%s success." % formless.nameToLabel(bindingName)
else:
message = result
return redirectAfterPost({inevow.IHand: result, inevow.IStatusMessage: message})
def onPostFailure(self, reason, request, ctx, bindingName, redirectAfterPost):
reason.trap(formless.ValidateError)
return redirectAfterPost({iformless.IFormErrors: {bindingName: reason.value}})
def sequence(context, data):
"""Renders each item in the sequence using patterns found in the
children of the element.
Sequence recognises the following patterns:
- header: Rendered at the start, before the first item. If multiple
header patterns are provided they are rendered together in the
order they were defined.
- footer: Just like the header only renderer at the end, after the
last item.
- item: Rendered once for each item in the sequence. If multiple
item patterns are provided then the pattern is cycled in the
order defined.
- divider: Rendered once between each item in the sequence. Multiple
divider patterns are cycled.
- empty: Rendered instead of item and divider patterns when the
sequence contains no items.
Example::
<table nevow:render="sequence" nevow:data="peopleSeq">
<tr nevow:pattern="header">
<th>name</th>
<th>email</th>
</tr>
<tr nevow:pattern="item" class="odd">
<td>name goes here</td>
<td>email goes here</td>
</tr>
<tr nevow:pattern="item" class="even">
<td>name goes here</td>
<td>email goes here</td>
</tr>
<tr nevow:pattern="empty">
<td colspan="2"><em>they've all gone!</em></td>
</tr>
</table>
"""
tag = context.tag
headers = tag.allPatterns('header')
pattern = tag.patternGenerator('item')
divider = tag.patternGenerator('divider', default=tags.invisible)
content = [(pattern(data=element), divider(data=element)) for element in data]
if not content:
content = tag.allPatterns('empty')
else:
## No divider after the last thing.
content[-1] = content[-1][0]
footers = tag.allPatterns('footer')
return tag.clear()[ headers, content, footers ]
def mapping(context, data):
"""Fills any slots in the element's children with data from a
dictionary. The dict keys are used as the slot names, the dict
values are used as filling.
Example::
<tr nevow:render="mapping" nevow:data="personDict">
<td><nevow:slot name="name"/></td>
<td><nevow:slot name="email"/></td>
</tr>
"""
for k, v in data.items():
context.fillSlots(k, v)
return context.tag
def string(context, data):
return context.tag.clear()[str(data)]
def data(context, data):
"""Replace the tag's content with the current data.
"""
return context.tag.clear()[data]
class FourOhFour:
"""A simple 404 (not found) page.
"""
__implements__ = inevow.IResource,
notFound = "<html><head><title>Page Not Found</title><head><body>Sorry, but I couldn't find the object you requested.</body></html>"
original = None
def locateChild(self, ctx, segments):
return NotFound
def renderHTTP(self, ctx):
from twisted.protocols import http
inevow.IRequest(ctx).setResponseCode(404)
try:
notFoundHandler = ctx.locate(inevow.ICanHandleNotFound)
return notFoundHandler.renderHTTP_notFound(PageContext(parent=ctx, tag=notFoundHandler))
except KeyError, e:
return self.notFound
except:
log.err()
return self.notFound
def __nonzero__(self):
return False
# Not found singleton
NotFound = None, ()
|
live-clones/dolfin-adjoint
|
refs/heads/master
|
timestepping/tests/long/mantle_convection/composition.py
|
3
|
#!/usr/bin/env python2
# Copyright (C) 2011 Simula Research Laboratory and Lyudmyla Vynnytska and Marie
# E. Rognes
# Copyright (C) 2011-2012 by Imperial College London
# Copyright (C) 2013 University of Oxford
# Copyright (C) 2014 University of Edinburgh
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3 of the License
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Copyright (C) 2011 Simula Research Laboratory and Lyudmyla Vynnytska and Marie
# E. Rognes from dolfin-adjoint file tests/mantle_convection/composition.py, bzr
# trunk 573
# Code first added: 2013-02-26
# Modified version of mantle_convection test from dolfin-adjoint bzr trunk 513
__license__ = "GNU LGPL Version 3"
from dolfin import *
from timestepping import *
from numerics import advection, diffusion, backward_euler
def transport(Q, dt, u, phi_):
# Define test and trial functions
phi = TrialFunction(Q)
v = TestFunction(Q)
# Constants associated with DG scheme
alpha = StaticConstant(10.0)
mesh = Q.mesh()
h = CellSize(mesh)
n = FacetNormal(mesh)
# Diffusivity constant
kappa = StaticConstant(0.0001)
# Define discrete time derivative operator
Dt = lambda phi: backward_euler(phi, phi_, dt)
a_A = lambda phi, v: advection(phi, v, u, n)
a_D = lambda phi, v: diffusion(phi, v, kappa, alpha, n, h)
# Define form
F = Dt(phi)*v*dx + a_A(phi, v) + a_D(phi, v)
return (lhs(F), rhs(F))
|
codesy/codesy
|
refs/heads/master
|
auctions/tests/utils_tests.py
|
1
|
import fudge
from django.test import TestCase
from github import UnknownObjectException
from ..utils import issue_state
class IssueStateTest(TestCase):
def test_issue_state(self):
url = 'https://github.com/codesy/codesy/issues/158'
fake_gh_client = fudge.Fake()
(fake_gh_client.expects('get_repo').with_args('codesy/codesy')
.returns_fake().expects('get_issue').with_args(158)
.returns_fake().has_attr(state='open'))
self.assertEqual('open', issue_state(url, fake_gh_client))
def test_issue_state_catches_UnknownObjectException(self):
url = 'https://github.com/codesy/codesy/issues/158'
fake_gh_client = fudge.Fake()
(fake_gh_client.expects('get_repo').with_args('codesy/codesy')
.raises(UnknownObjectException(404,
"Cannot find repo.")))
self.assertEqual(None, issue_state(url, fake_gh_client))
|
aiven/kafkajournalpump
|
refs/heads/master
|
journalpump/statsd.py
|
3
|
"""
StatsD client
Supports telegraf's statsd protocol extension for 'key=value' tags:
https://github.com/influxdata/telegraf/tree/master/plugins/inputs/statsd
"""
import socket
class StatsClient:
def __init__(self, host="127.0.0.1", port=8125, tags=None):
self._dest_addr = (host, port)
self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._tags = tags or {}
def gauge(self, metric, value, tags=None):
self._send(metric, b"g", value, tags)
def increase(self, metric, inc_value=1, tags=None):
self._send(metric, b"c", inc_value, tags)
def timing(self, metric, value, tags=None):
self._send(metric, b"ms", value, tags)
def unexpected_exception(self, ex, where, tags=None):
all_tags = {
"exception": ex.__class__.__name__,
"where": where,
}
all_tags.update(tags or {})
self.increase("exception", tags=all_tags)
def _send(self, metric, metric_type, value, tags):
if None in self._dest_addr:
# stats sending is disabled
return
# format: "user.logins,service=payroll,region=us-west:1|c"
parts = [metric.encode("utf-8"), b":", str(value).encode("utf-8"), b"|", metric_type]
send_tags = self._tags.copy()
send_tags.update(tags or {})
for tag, tag_value in send_tags.items():
parts.insert(1, ",{}={}".format(tag, tag_value).encode("utf-8"))
self._socket.sendto(b"".join(parts), self._dest_addr)
|
Micronaet/micronaet-mx8
|
refs/heads/master
|
mx_fiscalposition/__init__.py
|
18
|
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import model
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
gencer/sentry
|
refs/heads/master
|
src/sentry/api/serializers/models/organization_access_request.py
|
3
|
from __future__ import absolute_import
import six
from sentry.api.serializers import Serializer, register, serialize
from sentry.models import OrganizationAccessRequest
@register(OrganizationAccessRequest)
class OrganizationAccessRequestSerializer(Serializer):
def serialize(self, obj, attrs, user):
d = {
'id': six.text_type(obj.id),
'member': serialize(obj.member),
'team': serialize(obj.team),
}
return d
|
fnordahl/nova
|
refs/heads/master
|
nova/tests/unit/api/openstack/compute/test_pci.py
|
21
|
# Copyright 2013 Intel Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from webob import exc
from nova.api.openstack.compute import pci
from nova.api.openstack import wsgi
from nova import context
from nova import db
from nova import exception
from nova import objects
from nova.objects import fields
from nova.objects import pci_device_pool
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit.objects import test_pci_device
pci_stats = [{"count": 3,
"vendor_id": "8086",
"product_id": "1520",
"numa_node": 1}]
fake_compute_node = objects.ComputeNode(
pci_device_pools=pci_device_pool.from_pci_stats(pci_stats))
class FakeResponse(wsgi.ResponseObject):
pass
class PciServerControllerTestV21(test.NoDBTestCase):
def setUp(self):
super(PciServerControllerTestV21, self).setUp()
self.controller = pci.PciServerController()
self.fake_obj = {'server': {'addresses': {},
'id': 'fb08',
'name': 'a3',
'status': 'ACTIVE',
'tenant_id': '9a3af784c',
'user_id': 'e992080ac0',
}}
self.fake_list = {'servers': [{'addresses': {},
'id': 'fb08',
'name': 'a3',
'status': 'ACTIVE',
'tenant_id': '9a3af784c',
'user_id': 'e992080ac',
}]}
self._create_fake_instance()
self._create_fake_pci_device()
self.pci_device.claim(self.inst)
self.pci_device.allocate(self.inst)
def _create_fake_instance(self):
self.inst = objects.Instance()
self.inst.uuid = 'fake-inst-uuid'
self.inst.pci_devices = objects.PciDeviceList()
def _create_fake_pci_device(self):
def fake_pci_device_get_by_addr(ctxt, id, addr):
return test_pci_device.fake_db_dev
ctxt = context.get_admin_context()
self.stubs.Set(db, 'pci_device_get_by_addr',
fake_pci_device_get_by_addr)
self.pci_device = objects.PciDevice.get_by_dev_addr(ctxt, 1, 'a')
def test_show(self):
def fake_get_db_instance(id):
return self.inst
resp = FakeResponse(self.fake_obj, '')
req = fakes.HTTPRequest.blank('/os-pci/1', use_admin_context=True)
self.stubs.Set(req, 'get_db_instance', fake_get_db_instance)
self.controller.show(req, resp, '1')
self.assertEqual([{'id': 1}],
resp.obj['server']['os-pci:pci_devices'])
def test_detail(self):
def fake_get_db_instance(id):
return self.inst
resp = FakeResponse(self.fake_list, '')
req = fakes.HTTPRequest.blank('/os-pci/detail',
use_admin_context=True)
self.stubs.Set(req, 'get_db_instance', fake_get_db_instance)
self.controller.detail(req, resp)
self.assertEqual([{'id': 1}],
resp.obj['servers'][0]['os-pci:pci_devices'])
class PciHypervisorControllerTestV21(test.NoDBTestCase):
def setUp(self):
super(PciHypervisorControllerTestV21, self).setUp()
self.controller = pci.PciHypervisorController()
self.fake_objs = dict(hypervisors=[
dict(id=1,
service=dict(id=1, host="compute1"),
hypervisor_type="xen",
hypervisor_version=3,
hypervisor_hostname="hyper1")])
self.fake_obj = dict(hypervisor=dict(
id=1,
service=dict(id=1, host="compute1"),
hypervisor_type="xen",
hypervisor_version=3,
hypervisor_hostname="hyper1"))
def test_show(self):
def fake_get_db_compute_node(id):
return fake_compute_node
req = fakes.HTTPRequest.blank('/os-hypervisors/1',
use_admin_context=True)
resp = FakeResponse(self.fake_obj, '')
self.stubs.Set(req, 'get_db_compute_node', fake_get_db_compute_node)
self.controller.show(req, resp, '1')
self.assertIn('os-pci:pci_stats', resp.obj['hypervisor'])
self.assertEqual(pci_stats[0],
resp.obj['hypervisor']['os-pci:pci_stats'][0])
def test_detail(self):
def fake_get_db_compute_node(id):
return fake_compute_node
req = fakes.HTTPRequest.blank('/os-hypervisors/detail',
use_admin_context=True)
resp = FakeResponse(self.fake_objs, '')
self.stubs.Set(req, 'get_db_compute_node', fake_get_db_compute_node)
self.controller.detail(req, resp)
self.assertIn('os-pci:pci_stats', resp.obj['hypervisors'][0])
self.assertEqual(pci_stats[0],
resp.obj['hypervisors'][0]['os-pci:pci_stats'][0])
class PciControlletestV21(test.NoDBTestCase):
def setUp(self):
super(PciControlletestV21, self).setUp()
self.controller = pci.PciController()
def test_show(self):
def fake_pci_device_get_by_id(context, id):
return test_pci_device.fake_db_dev
self.stubs.Set(db, 'pci_device_get_by_id', fake_pci_device_get_by_id)
req = fakes.HTTPRequest.blank('/os-pci/1', use_admin_context=True)
result = self.controller.show(req, '1')
dist = {'pci_device': {'address': 'a',
'compute_node_id': 1,
'dev_id': 'i',
'extra_info': {},
'dev_type': fields.PciDeviceType.STANDARD,
'id': 1,
'server_uuid': None,
'label': 'l',
'product_id': 'p',
'status': 'available',
'vendor_id': 'v'}}
self.assertEqual(dist, result)
def test_show_error_id(self):
def fake_pci_device_get_by_id(context, id):
raise exception.PciDeviceNotFoundById(id=id)
self.stubs.Set(db, 'pci_device_get_by_id', fake_pci_device_get_by_id)
req = fakes.HTTPRequest.blank('/os-pci/0', use_admin_context=True)
self.assertRaises(exc.HTTPNotFound, self.controller.show, req, '0')
def _fake_compute_node_get_all(self, context):
return [objects.ComputeNode(id=1,
service_id=1,
host='fake',
cpu_info='cpu_info',
disk_available_least=100)]
def _fake_pci_device_get_all_by_node(self, context, node):
return [test_pci_device.fake_db_dev, test_pci_device.fake_db_dev_1]
def test_index(self):
self.stubs.Set(self.controller.host_api, 'compute_node_get_all',
self._fake_compute_node_get_all)
self.stubs.Set(db, 'pci_device_get_all_by_node',
self._fake_pci_device_get_all_by_node)
req = fakes.HTTPRequest.blank('/os-pci', use_admin_context=True)
result = self.controller.index(req)
dist = {'pci_devices': [test_pci_device.fake_db_dev,
test_pci_device.fake_db_dev_1]}
for i in range(len(result['pci_devices'])):
self.assertEqual(dist['pci_devices'][i]['vendor_id'],
result['pci_devices'][i]['vendor_id'])
self.assertEqual(dist['pci_devices'][i]['id'],
result['pci_devices'][i]['id'])
self.assertEqual(dist['pci_devices'][i]['status'],
result['pci_devices'][i]['status'])
self.assertEqual(dist['pci_devices'][i]['address'],
result['pci_devices'][i]['address'])
def test_detail(self):
self.stubs.Set(self.controller.host_api, 'compute_node_get_all',
self._fake_compute_node_get_all)
self.stubs.Set(db, 'pci_device_get_all_by_node',
self._fake_pci_device_get_all_by_node)
req = fakes.HTTPRequest.blank('/os-pci/detail',
use_admin_context=True)
result = self.controller.detail(req)
dist = {'pci_devices': [test_pci_device.fake_db_dev,
test_pci_device.fake_db_dev_1]}
for i in range(len(result['pci_devices'])):
self.assertEqual(dist['pci_devices'][i]['vendor_id'],
result['pci_devices'][i]['vendor_id'])
self.assertEqual(dist['pci_devices'][i]['id'],
result['pci_devices'][i]['id'])
self.assertEqual(dist['pci_devices'][i]['label'],
result['pci_devices'][i]['label'])
self.assertEqual(dist['pci_devices'][i]['dev_id'],
result['pci_devices'][i]['dev_id'])
class PciControllerPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(PciControllerPolicyEnforcementV21, self).setUp()
self.controller = pci.PciController()
self.req = fakes.HTTPRequest.blank('')
def _test_policy_failed(self, action, *args):
rule_name = "os_compute_api:os-pci:%s" % action
rule = {rule_name: "project:non_fake"}
self.policy.set_rules(rule)
exc = self.assertRaises(
exception.PolicyNotAuthorized, getattr(self.controller, action),
self.req, *args)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_index_policy_failed(self):
self._test_policy_failed('index')
def test_detail_policy_failed(self):
self._test_policy_failed('detail')
def test_show_policy_failed(self):
self._test_policy_failed('show', 1)
|
devlin85/p2pool
|
refs/heads/master
|
p2pool/networks/catcoin.py
|
3
|
from p2pool.bitcoin import networks
PARENT = networks.nets['catcoin']
SHARE_PERIOD = 15 # seconds target spacing
CHAIN_LENGTH = 12*60*60//15 # shares
REAL_CHAIN_LENGTH = 12*60*60//15 # shares
TARGET_LOOKBEHIND = 20 # shares coinbase maturity
SPREAD = 10 # blocks
IDENTIFIER = 'c1c2cacfe0e1eae6'.decode('hex')
PREFIX = 'f1facfff9e0a3414'.decode('hex')
P2P_PORT = 8331
MIN_TARGET = 0
MAX_TARGET = 2**256//2**20 - 1
PERSIST = False
WORKER_PORT = 9331
BOOTSTRAP_ADDRS = 'rav3n.dtdns.net pool.hostv.pl p2pool.org solidpool.org'.split(' ')
ANNOUNCE_CHANNEL = '#p2pool-alt'
VERSION_CHECK = lambda v: True
|
jroyal/plexpy
|
refs/heads/master
|
lib/cherrypy/lib/locking.py
|
68
|
import datetime
class NeverExpires(object):
def expired(self):
return False
class Timer(object):
"""
A simple timer that will indicate when an expiration time has passed.
"""
def __init__(self, expiration):
"Create a timer that expires at `expiration` (UTC datetime)"
self.expiration = expiration
@classmethod
def after(cls, elapsed):
"""
Return a timer that will expire after `elapsed` passes.
"""
return cls(datetime.datetime.utcnow() + elapsed)
def expired(self):
return datetime.datetime.utcnow() >= self.expiration
class LockTimeout(Exception):
"An exception when a lock could not be acquired before a timeout period"
class LockChecker(object):
"""
Keep track of the time and detect if a timeout has expired
"""
def __init__(self, session_id, timeout):
self.session_id = session_id
if timeout:
self.timer = Timer.after(timeout)
else:
self.timer = NeverExpires()
def expired(self):
if self.timer.expired():
raise LockTimeout(
"Timeout acquiring lock for %(session_id)s" % vars(self))
return False
|
dmsimard/ansible
|
refs/heads/devel
|
test/units/mock/vault_helper.py
|
206
|
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils._text import to_bytes
from ansible.parsing.vault import VaultSecret
class TextVaultSecret(VaultSecret):
'''A secret piece of text. ie, a password. Tracks text encoding.
The text encoding of the text may not be the default text encoding so
we keep track of the encoding so we encode it to the same bytes.'''
def __init__(self, text, encoding=None, errors=None, _bytes=None):
super(TextVaultSecret, self).__init__()
self.text = text
self.encoding = encoding or 'utf-8'
self._bytes = _bytes
self.errors = errors or 'strict'
@property
def bytes(self):
'''The text encoded with encoding, unless we specifically set _bytes.'''
return self._bytes or to_bytes(self.text, encoding=self.encoding, errors=self.errors)
|
krisaju95/NewsArticleClustering
|
refs/heads/master
|
IncrementalClustering/module3_calculateTfIdf.py
|
1
|
import pickle
import math
import os
import pandas as pd
import numpy as np
newsPaperName = "NewsPaper A"
path = "C:/Users/hp/Desktop/FINAL YEAR PROJECT/S8/"
words = pickle.load(open(os.path.join(path, 'Word Set','wordSet.p'), 'rb'))
dataFrame1 = pickle.load( open(os.path.join(path , 'Crawled Articles' , newsPaperName , 'Word Count','dataFrame1.p'), "rb" ))
wordSetSize = len(dataFrame1.columns)
numberOfDocuments = len(dataFrame1.index)
oldDataFrame2 = pickle.load(open(os.path.join(path, 'Feature Set', 'dataFrame2.p'), "rb" ))
originalNumberOfDocuments = len(oldDataFrame2.index)
# Calculates Tf-Idf score for each word using data from the data frame
def calculateTfIdf():
dataFrame2=dataFrame1.copy()
dataFrame2Matrix = dataFrame2.as_matrix()
oldDataFrame2Matrix = oldDataFrame2.as_matrix()
print "Normalising Term Frequencies"
# Normalises Term Frequency , Calculates total number of terms in each document and Divides term frequency by document term count (normalising)
for row in dataFrame2.index:
fileWordCount = dataFrame2Matrix[row].sum()
dataFrame2Matrix[row] = np.divide(dataFrame2Matrix[row] , float(fileWordCount))
dataFrame2 = pd.DataFrame(dataFrame2Matrix)
print "Normalisation completed"
print "Calculating IDF and corresponding TF-IDF values"
# Calculates document frequency for each word and multiply with TF component
for word in range(len(dataFrame2.columns)):
wordDocumentCount = 0
idfValue = 0
wordDocumentCount = np.count_nonzero(oldDataFrame2Matrix[:,word])
if wordDocumentCount != 0:
idfValue = math.log(originalNumberOfDocuments / (wordDocumentCount + 1))
dataFrame2Matrix[:,word] = np.multiply(dataFrame2Matrix[:,word] , idfValue)
dataFrame2 = pd.DataFrame(dataFrame2Matrix)
dataFrame2.columns = list(words)
print "TF-IDF Calculation completed"
print "Saving data in dataFrame2 as pickle package and CSV"
dataFrame2.to_pickle(os.path.join(path , 'Crawled Articles' , newsPaperName , 'Feature Set','dataFrame2.p'))
dataFrame2.to_csv(os.path.join(path , 'Crawled Articles' , newsPaperName , 'Feature Set','dataFrame2.csv'))
print "Dataframe2 has been saved"
calculateTfIdf()
|
heyfaraday/CMB_test
|
refs/heads/master
|
outdated/0.2.0/mapP.py
|
1
|
from numpy import *
from pylab import *
#a,b = genfromtxt('plot.dat').T
N=254
M=254
xlim([0,1])
ylim([0,1])
#axes().set_aspect('equal', 'datalim')
z,x,y = genfromtxt('P.dat').T
x = x.reshape(N,M)
y = y.reshape(N,M)
z = z.reshape(N,M)
pcolormesh(x,y,z)
#a,b = genfromtxt('levP.dat').T
#plot(a,b,'ko',ms=1)
xmax,ymax,fmax = genfromtxt('maxP.dat').T
plot(xmax,ymax,'ko',ms=1)
xmin,ymin,fmin = genfromtxt('minP.dat').T
plot(xmin,ymin,'ko',ms=4)
xsad,ysad = genfromtxt('sadP.dat').T
plot(xsad,ysad,'kx',ms=4)
#print 'Enter filename or nothing to show:'
#f = raw_input()
#if raw_input() != '':
# savefig(f)
#else:
show()
#savefig('fig.png', dpi=1500)
|
xiandiancloud/edx-platform
|
refs/heads/master
|
common/test/acceptance/pages/studio/textbooks.py
|
103
|
"""
Course Textbooks page.
"""
from .course_page import CoursePage
class TextbooksPage(CoursePage):
"""
Course Textbooks page.
"""
url_path = "textbooks"
def is_browser_on_page(self):
return self.q(css='body.view-textbooks').present
|
hobinyoon/apache-cassandra-2.2.3-src
|
refs/heads/master
|
mtdb/process-log/calc-cost-latency-plot-tablet-timeline/TabletAccessesForTabletSizeTimelinePlotDataGenerator.py
|
1
|
import os
import sys
sys.path.insert(0, "../../util/python")
import Cons
import Util
import CassLogReader
import Desc
import Event
import SimTime
import TabletSizeTimelinePlotDataGenerator
_fn_plot_data = None
_id_events = {}
_max_num_needto_read_datafile_per_day = 0
def Gen():
with Cons.MeasureTime("Generating tablet accesses timeline plot data ..."):
for l in CassLogReader._logs:
_BuildIdEventsMap(l)
_WriteToFile()
def MaxNumNeedtoReadDatafilePerDay():
return _max_num_needto_read_datafile_per_day
class Events:
def __init__(self):
self.time_cnts = {}
def AddAccStat(self, simulated_time, tablet_acc_stat):
# tablet_acc_stat is of type AccessStat.AccStat
self.time_cnts[simulated_time] = tablet_acc_stat
def __str__(self):
return "Events: " + ", ".join("%s: %s" % item for item in vars(self).items())
def _BuildIdEventsMap(e):
if type(e.event) is Event.AccessStat:
for e1 in e.event.entries:
if type(e1) is Event.AccessStat.MemtAccStat:
# We don't plot memtables for now
pass
elif type(e1) is Event.AccessStat.SstAccStat:
sst_gen = e1.id_
if TabletSizeTimelinePlotDataGenerator.SstExist(sst_gen):
if sst_gen not in _id_events:
_id_events[sst_gen] = Events()
_id_events[sst_gen].AddAccStat(e.simulated_time, e1)
def _WriteToFile():
global _fn_plot_data
_fn_plot_data = os.path.dirname(__file__) \
+ "/plot-data/" + Desc.ExpDatetime() + "-tablet-accesses-for-tablet-size-plot-by-time"
with open(_fn_plot_data, "w") as fo:
fmt = "%2s %10d %20s %20s" \
" %7.0f %7.0f %7.0f" \
" %7.0f %7.0f"
fo.write("%s\n" % Util.BuildHeader(fmt,
"id(sst_gen_memt_id_may_be_added_later) y_cord_base_tablet_size_plot time_begin time_end"
" num_reads_per_day num_needto_read_datafile_per_day num_bf_negatives_per_day"
" num_true_positives_per_day(not_complete) num_false_positives_per_day(not_complete)"
))
for id_, v in sorted(_id_events.iteritems()):
time_prev = None
num_reads_prev = 0
num_needto_read_datafile_prev = 0
num_negatives_prev = 0
# These are not complete numbers.
num_tp_prev = 0
num_fp_prev = 0
for time_, cnts in sorted(v.time_cnts.iteritems()):
if time_ > SimTime.SimulatedTimeEnd():
continue
num_negatives = cnts.num_reads - cnts.num_needto_read_datafile
if time_prev == None:
# We ignore the first time window, i.e., we don't print anything for
# it. There is a very small time window between the first access and
# it is logged.
pass
else:
if time_ == time_prev:
# It may happen.
raise RuntimeError("Unexpected: time_(%s) == time_prev" % time_)
time_dur_days = (time_ - time_prev).total_seconds() / (24.0 * 3600)
num_needto_read_datafile_per_day = (cnts.num_needto_read_datafile - num_needto_read_datafile_prev) / time_dur_days
global _max_num_needto_read_datafile_per_day
_max_num_needto_read_datafile_per_day = max(_max_num_needto_read_datafile_per_day, num_needto_read_datafile_per_day)
fo.write((fmt + "\n") % (id_
, TabletSizeTimelinePlotDataGenerator.GetBaseYCord(id_)
, time_prev.strftime("%y%m%d-%H%M%S.%f")
, time_.strftime("%y%m%d-%H%M%S.%f")
, (cnts.num_reads - num_reads_prev) / time_dur_days
, num_needto_read_datafile_per_day
, (num_negatives - num_negatives_prev) / time_dur_days
, (cnts.num_tp - num_tp_prev) / time_dur_days
, (cnts.num_fp - num_fp_prev) / time_dur_days
))
time_prev = time_
num_reads_prev = cnts.num_reads
num_needto_read_datafile_prev = cnts.num_needto_read_datafile
num_negatives_prev = num_negatives
num_tp_prev = cnts.num_tp
num_fp_prev = cnts.num_fp
fo.write("\n")
Cons.P("Created file %s %d" % (_fn_plot_data, os.path.getsize(_fn_plot_data)))
|
rresol/coala
|
refs/heads/master
|
tests/output/dbus/DbusTest.py
|
4
|
import os
import subprocess
import sys
import time
import unittest
from unittest.case import SkipTest
from coalib.misc import Constants
try:
import dbus
# Needed to determine if test needs skipping
from gi.repository import GLib
except ImportError as err:
raise SkipTest('python-dbus or python-gi is not installed')
def make_test_server():
# Make a dbus service in a new process. It cannot be in this process
# as that gives SegmentationFaults because the same bus is being used.
# For some reason this also fails on some systems if moved to another file
return subprocess.Popen([
sys.executable,
'-c',
"""
import sys
import dbus
import dbus.mainloop.glib
from gi.repository import GLib
from coalib.output.dbus.DbusServer import DbusServer
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
print('Creating session bus ...')
session_bus = dbus.SessionBus()
dbus_name = dbus.service.BusName("org.coala_analyzer.v1.test", session_bus)
print('Creating DbbusServer object ...')
dbus_server = DbusServer(session_bus, "/org/coala_analyzer/v1/test",
on_disconnected=lambda: GLib.idle_add(sys.exit))
mainloop = GLib.MainLoop()
print('Starting GLib mainloop ...')
mainloop.run()
"""],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
class DbusTest(unittest.TestCase):
def setUp(self):
self.config_path = os.path.abspath(
os.path.join(os.path.dirname(__file__),
"dbus_test_files",
".coafile"))
self.testcode_c_path = os.path.abspath(
os.path.join(os.path.dirname(__file__),
"dbus_test_files",
"testcode.c"))
self.subprocess = make_test_server()
trials_left = 50
while trials_left > 0:
time.sleep(0.1)
trials_left = trials_left - 1
try:
self.connect_to_test_server()
continue
except dbus.exceptions.DBusException as exception:
if trials_left == 0:
print("Stdout:")
print(self.subprocess.stdout.read().decode("utf-8"))
print("Stderr:")
print(self.subprocess.stderr.read().decode("utf-8"))
raise exception
def connect_to_test_server(self):
self.bus = dbus.SessionBus()
self.remote_object = self.bus.get_object("org.coala_analyzer.v1.test",
"/org/coala_analyzer/v1/test")
def test_dbus(self):
self.document_object_path = self.remote_object.CreateDocument(
self.testcode_c_path,
dbus_interface="org.coala_analyzer.v1")
self.assertRegex(str(self.document_object_path),
r"^/org/coala_analyzer/v1/test/\d+/documents/\d+$")
self.document_object = self.bus.get_object(
"org.coala_analyzer.v1.test",
self.document_object_path)
config_file = self.document_object.SetConfigFile(
"dummy_config",
dbus_interface="org.coala_analyzer.v1")
self.assertEqual(config_file, "dummy_config")
config_file = self.document_object.GetConfigFile(
dbus_interface="org.coala_analyzer.v1")
self.assertEqual(config_file, "dummy_config")
config_file = self.document_object.FindConfigFile(
dbus_interface="org.coala_analyzer.v1")
self.assertEqual(config_file, self.config_path)
analysis = self.document_object.Analyze(
dbus_interface="org.coala_analyzer.v1")
self.maxDiff = None
print(analysis)
# Run some basic analysis with good debug messages.
self.assertEqual(analysis[0], 1, "Exit code was not 1.")
self.assertEqual(len(analysis[1]), 0, "Unexpected log messages found.")
sections = analysis[2]
self.assertEqual(len(sections), 1, "Expected only 1 section to run.")
section = sections[0]
self.assertEqual(section[0], "default",
"Expected section to be named 'default'.")
self.assertTrue(section[1], "Section did not execute successfully.")
self.assertEqual(len(section[2]), 2, "Expected 2 results in section.")
# Remove the ids as they are hashes and cannot be asserted.
for result in section[2]:
result['id'] = 0
# We also test as a dictionary as dbus should be able to convert
# it into the correct python types.
self.assertEqual(analysis,
(1,
[],
[('default',
True,
[{'debug_msg': '',
'file': '',
'id': 0,
'line_nr': "",
'message': 'test msg',
'origin': 'LocalTestBear',
'severity': 'NORMAL'},
{'debug_msg': '',
'file': self.testcode_c_path,
'id': 0,
'line_nr': "",
'message': 'test msg',
'origin': 'GlobalTestBear',
'severity': 'NORMAL'}])]))
config_file = self.document_object.SetConfigFile(
self.config_path + "2",
dbus_interface="org.coala_analyzer.v1")
analysis = self.document_object.Analyze(
dbus_interface="org.coala_analyzer.v1")
self.assertEqual(analysis[0], 255)
self.assertEqual(analysis[1][1]["log_level"], "ERROR")
self.assertEqual(analysis[1][1]["message"], Constants.CRASH_MESSAGE)
# Skip file if file pattern doesn't match
# Also test if 2 documents can be opened simultaneously
self.document_object_path = self.remote_object.CreateDocument(
"test.unknown_ext",
dbus_interface="org.coala_analyzer.v1")
self.document_object = self.bus.get_object(
"org.coala_analyzer.v1.test",
self.document_object_path)
config_file = self.document_object.SetConfigFile(
self.config_path,
dbus_interface="org.coala_analyzer.v1")
analysis = self.document_object.Analyze(
dbus_interface="org.coala_analyzer.v1")
self.assertEqual(analysis, (0, [], []))
self.remote_object.DisposeDocument(
self.testcode_c_path,
dbus_interface="org.coala_analyzer.v1")
self.remote_object.DisposeDocument(
"test.unknown_ext",
dbus_interface="org.coala_analyzer.v1")
def tearDown(self):
if self.subprocess:
self.subprocess.kill()
|
ericMayer/tekton-master
|
refs/heads/master
|
backend/venv/lib/python2.7/site-packages/unidecode/x096.py
|
252
|
data = (
'Fa ', # 0x00
'Ge ', # 0x01
'He ', # 0x02
'Kun ', # 0x03
'Jiu ', # 0x04
'Yue ', # 0x05
'Lang ', # 0x06
'Du ', # 0x07
'Yu ', # 0x08
'Yan ', # 0x09
'Chang ', # 0x0a
'Xi ', # 0x0b
'Wen ', # 0x0c
'Hun ', # 0x0d
'Yan ', # 0x0e
'E ', # 0x0f
'Chan ', # 0x10
'Lan ', # 0x11
'Qu ', # 0x12
'Hui ', # 0x13
'Kuo ', # 0x14
'Que ', # 0x15
'Ge ', # 0x16
'Tian ', # 0x17
'Ta ', # 0x18
'Que ', # 0x19
'Kan ', # 0x1a
'Huan ', # 0x1b
'Fu ', # 0x1c
'Fu ', # 0x1d
'Le ', # 0x1e
'Dui ', # 0x1f
'Xin ', # 0x20
'Qian ', # 0x21
'Wu ', # 0x22
'Yi ', # 0x23
'Tuo ', # 0x24
'Yin ', # 0x25
'Yang ', # 0x26
'Dou ', # 0x27
'E ', # 0x28
'Sheng ', # 0x29
'Ban ', # 0x2a
'Pei ', # 0x2b
'Keng ', # 0x2c
'Yun ', # 0x2d
'Ruan ', # 0x2e
'Zhi ', # 0x2f
'Pi ', # 0x30
'Jing ', # 0x31
'Fang ', # 0x32
'Yang ', # 0x33
'Yin ', # 0x34
'Zhen ', # 0x35
'Jie ', # 0x36
'Cheng ', # 0x37
'E ', # 0x38
'Qu ', # 0x39
'Di ', # 0x3a
'Zu ', # 0x3b
'Zuo ', # 0x3c
'Dian ', # 0x3d
'Ling ', # 0x3e
'A ', # 0x3f
'Tuo ', # 0x40
'Tuo ', # 0x41
'Po ', # 0x42
'Bing ', # 0x43
'Fu ', # 0x44
'Ji ', # 0x45
'Lu ', # 0x46
'Long ', # 0x47
'Chen ', # 0x48
'Xing ', # 0x49
'Duo ', # 0x4a
'Lou ', # 0x4b
'Mo ', # 0x4c
'Jiang ', # 0x4d
'Shu ', # 0x4e
'Duo ', # 0x4f
'Xian ', # 0x50
'Er ', # 0x51
'Gui ', # 0x52
'Yu ', # 0x53
'Gai ', # 0x54
'Shan ', # 0x55
'Xun ', # 0x56
'Qiao ', # 0x57
'Xing ', # 0x58
'Chun ', # 0x59
'Fu ', # 0x5a
'Bi ', # 0x5b
'Xia ', # 0x5c
'Shan ', # 0x5d
'Sheng ', # 0x5e
'Zhi ', # 0x5f
'Pu ', # 0x60
'Dou ', # 0x61
'Yuan ', # 0x62
'Zhen ', # 0x63
'Chu ', # 0x64
'Xian ', # 0x65
'Tou ', # 0x66
'Nie ', # 0x67
'Yun ', # 0x68
'Xian ', # 0x69
'Pei ', # 0x6a
'Pei ', # 0x6b
'Zou ', # 0x6c
'Yi ', # 0x6d
'Dui ', # 0x6e
'Lun ', # 0x6f
'Yin ', # 0x70
'Ju ', # 0x71
'Chui ', # 0x72
'Chen ', # 0x73
'Pi ', # 0x74
'Ling ', # 0x75
'Tao ', # 0x76
'Xian ', # 0x77
'Lu ', # 0x78
'Sheng ', # 0x79
'Xian ', # 0x7a
'Yin ', # 0x7b
'Zhu ', # 0x7c
'Yang ', # 0x7d
'Reng ', # 0x7e
'Shan ', # 0x7f
'Chong ', # 0x80
'Yan ', # 0x81
'Yin ', # 0x82
'Yu ', # 0x83
'Ti ', # 0x84
'Yu ', # 0x85
'Long ', # 0x86
'Wei ', # 0x87
'Wei ', # 0x88
'Nie ', # 0x89
'Dui ', # 0x8a
'Sui ', # 0x8b
'An ', # 0x8c
'Huang ', # 0x8d
'Jie ', # 0x8e
'Sui ', # 0x8f
'Yin ', # 0x90
'Gai ', # 0x91
'Yan ', # 0x92
'Hui ', # 0x93
'Ge ', # 0x94
'Yun ', # 0x95
'Wu ', # 0x96
'Wei ', # 0x97
'Ai ', # 0x98
'Xi ', # 0x99
'Tang ', # 0x9a
'Ji ', # 0x9b
'Zhang ', # 0x9c
'Dao ', # 0x9d
'Ao ', # 0x9e
'Xi ', # 0x9f
'Yin ', # 0xa0
'[?] ', # 0xa1
'Rao ', # 0xa2
'Lin ', # 0xa3
'Tui ', # 0xa4
'Deng ', # 0xa5
'Pi ', # 0xa6
'Sui ', # 0xa7
'Sui ', # 0xa8
'Yu ', # 0xa9
'Xian ', # 0xaa
'Fen ', # 0xab
'Ni ', # 0xac
'Er ', # 0xad
'Ji ', # 0xae
'Dao ', # 0xaf
'Xi ', # 0xb0
'Yin ', # 0xb1
'E ', # 0xb2
'Hui ', # 0xb3
'Long ', # 0xb4
'Xi ', # 0xb5
'Li ', # 0xb6
'Li ', # 0xb7
'Li ', # 0xb8
'Zhui ', # 0xb9
'He ', # 0xba
'Zhi ', # 0xbb
'Zhun ', # 0xbc
'Jun ', # 0xbd
'Nan ', # 0xbe
'Yi ', # 0xbf
'Que ', # 0xc0
'Yan ', # 0xc1
'Qian ', # 0xc2
'Ya ', # 0xc3
'Xiong ', # 0xc4
'Ya ', # 0xc5
'Ji ', # 0xc6
'Gu ', # 0xc7
'Huan ', # 0xc8
'Zhi ', # 0xc9
'Gou ', # 0xca
'Jun ', # 0xcb
'Ci ', # 0xcc
'Yong ', # 0xcd
'Ju ', # 0xce
'Chu ', # 0xcf
'Hu ', # 0xd0
'Za ', # 0xd1
'Luo ', # 0xd2
'Yu ', # 0xd3
'Chou ', # 0xd4
'Diao ', # 0xd5
'Sui ', # 0xd6
'Han ', # 0xd7
'Huo ', # 0xd8
'Shuang ', # 0xd9
'Guan ', # 0xda
'Chu ', # 0xdb
'Za ', # 0xdc
'Yong ', # 0xdd
'Ji ', # 0xde
'Xi ', # 0xdf
'Chou ', # 0xe0
'Liu ', # 0xe1
'Li ', # 0xe2
'Nan ', # 0xe3
'Xue ', # 0xe4
'Za ', # 0xe5
'Ji ', # 0xe6
'Ji ', # 0xe7
'Yu ', # 0xe8
'Yu ', # 0xe9
'Xue ', # 0xea
'Na ', # 0xeb
'Fou ', # 0xec
'Se ', # 0xed
'Mu ', # 0xee
'Wen ', # 0xef
'Fen ', # 0xf0
'Pang ', # 0xf1
'Yun ', # 0xf2
'Li ', # 0xf3
'Li ', # 0xf4
'Ang ', # 0xf5
'Ling ', # 0xf6
'Lei ', # 0xf7
'An ', # 0xf8
'Bao ', # 0xf9
'Meng ', # 0xfa
'Dian ', # 0xfb
'Dang ', # 0xfc
'Xing ', # 0xfd
'Wu ', # 0xfe
'Zhao ', # 0xff
)
|
whitehorse-io/encarnia
|
refs/heads/master
|
pyenv/lib/python2.7/site-packages/twisted/python/test/test_setup.py
|
1
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for parts of our release automation system.
"""
import os
from setuptools.dist import Distribution
import twisted
from twisted.trial.unittest import TestCase
from twisted.python import _setup, filepath
from twisted.python.compat import _PY3
from twisted.python._setup import (
BuildPy3,
getSetupArgs,
ConditionalExtension,
_EXTRAS_REQUIRE,
)
class SetupTests(TestCase):
"""
Tests for L{getSetupArgs}.
"""
def test_conditionalExtensions(self):
"""
Will return the arguments with a custom build_ext which knows how to
check whether they should be built.
"""
good_ext = ConditionalExtension("whatever", ["whatever.c"],
condition=lambda b: True)
bad_ext = ConditionalExtension("whatever", ["whatever.c"],
condition=lambda b: False)
args = getSetupArgs(extensions=[good_ext, bad_ext])
# ext_modules should be set even though it's not used. See comment
# in getSetupArgs
self.assertEqual(args["ext_modules"], [good_ext, bad_ext])
cmdclass = args["cmdclass"]
build_ext = cmdclass["build_ext"]
builder = build_ext(Distribution())
builder.prepare_extensions()
self.assertEqual(builder.extensions, [good_ext])
def test_win32Definition(self):
"""
When building on Windows NT, the WIN32 macro will be defined as 1 on
the extensions.
"""
ext = ConditionalExtension("whatever", ["whatever.c"],
define_macros=[("whatever", 2)])
args = getSetupArgs(extensions=[ext])
builder = args["cmdclass"]["build_ext"](Distribution())
self.patch(os, "name", "nt")
builder.prepare_extensions()
self.assertEqual(ext.define_macros, [("whatever", 2), ("WIN32", 1)])
class OptionalDependenciesTests(TestCase):
"""
Tests for L{_EXTRAS_REQUIRE}
"""
def test_distributeTakesExtrasRequire(self):
"""
Setuptools' Distribution object parses and stores its C{extras_require}
argument as an attribute.
"""
extras = dict(im_an_extra_dependency="thing")
attrs = dict(extras_require=extras)
distribution = Distribution(attrs)
self.assertEqual(
extras,
distribution.extras_require
)
def test_extrasRequireDictContainsKeys(self):
"""
L{_EXTRAS_REQUIRE} contains options for all documented extras: C{dev},
C{tls}, C{conch}, C{soap}, C{serial}, C{all_non_platform},
C{osx_platform}, and C{windows_platform}.
"""
self.assertIn('dev', _EXTRAS_REQUIRE)
self.assertIn('tls', _EXTRAS_REQUIRE)
self.assertIn('conch', _EXTRAS_REQUIRE)
self.assertIn('soap', _EXTRAS_REQUIRE)
self.assertIn('serial', _EXTRAS_REQUIRE)
self.assertIn('all_non_platform', _EXTRAS_REQUIRE)
self.assertIn('osx_platform', _EXTRAS_REQUIRE)
self.assertIn('windows_platform', _EXTRAS_REQUIRE)
self.assertIn('http2', _EXTRAS_REQUIRE)
def test_extrasRequiresDevDeps(self):
"""
L{_EXTRAS_REQUIRE}'s C{dev} extra contains setuptools requirements for
the tools required for Twisted development.
"""
deps = _EXTRAS_REQUIRE['dev']
self.assertIn('pyflakes >= 1.0.0', deps)
self.assertIn('twisted-dev-tools >= 0.0.2', deps)
self.assertIn('python-subunit', deps)
self.assertIn('sphinx >= 1.3.1', deps)
if not _PY3:
self.assertIn('twistedchecker >= 0.4.0', deps)
self.assertIn('pydoctor >= 16.2.0', deps)
def test_extrasRequiresTlsDeps(self):
"""
L{_EXTRAS_REQUIRE}'s C{tls} extra contains setuptools requirements for
the packages required to make Twisted's transport layer security fully
work for both clients and servers.
"""
deps = _EXTRAS_REQUIRE['tls']
self.assertIn('pyopenssl >= 16.0.0', deps)
self.assertIn('service_identity', deps)
self.assertIn('idna >= 0.6, != 2.3', deps)
def test_extrasRequiresConchDeps(self):
"""
L{_EXTRAS_REQUIRE}'s C{conch} extra contains setuptools requirements
for the packages required to make Twisted Conch's secure shell server
work.
"""
deps = _EXTRAS_REQUIRE['conch']
self.assertIn('pyasn1', deps)
self.assertIn('cryptography >= 0.9.1', deps)
self.assertIn('appdirs >= 1.4.0', deps)
def test_extrasRequiresSoapDeps(self):
"""
L{_EXTRAS_REQUIRE}' C{soap} extra contains setuptools requirements for
the packages required to make the C{twisted.web.soap} module function.
"""
self.assertIn(
'soappy',
_EXTRAS_REQUIRE['soap']
)
def test_extrasRequiresSerialDeps(self):
"""
L{_EXTRAS_REQUIRE}'s C{serial} extra contains setuptools requirements
for the packages required to make Twisted's serial support work.
"""
self.assertIn(
'pyserial',
_EXTRAS_REQUIRE['serial']
)
def test_extrasRequiresHttp2Deps(self):
"""
L{_EXTRAS_REQUIRE}'s C{http2} extra contains setuptools requirements
for the packages required to make Twisted HTTP/2 support work.
"""
deps = _EXTRAS_REQUIRE['http2']
self.assertIn('h2 >= 3.0, < 4.0', deps)
self.assertIn('priority >= 1.1.0, < 2.0', deps)
def test_extrasRequiresAllNonPlatformDeps(self):
"""
L{_EXTRAS_REQUIRE}'s C{all_non_platform} extra contains setuptools
requirements for all of Twisted's optional dependencies which work on
all supported operating systems.
"""
deps = _EXTRAS_REQUIRE['all_non_platform']
self.assertIn('pyopenssl >= 16.0.0', deps)
self.assertIn('service_identity', deps)
self.assertIn('idna >= 0.6, != 2.3', deps)
self.assertIn('pyasn1', deps)
self.assertIn('cryptography >= 0.9.1', deps)
self.assertIn('soappy', deps)
self.assertIn('pyserial', deps)
self.assertIn('appdirs >= 1.4.0', deps)
self.assertIn('h2 >= 3.0, < 4.0', deps)
self.assertIn('priority >= 1.1.0, < 2.0', deps)
def test_extrasRequiresOsxPlatformDeps(self):
"""
L{_EXTRAS_REQUIRE}'s C{osx_platform} extra contains setuptools
requirements for all of Twisted's optional dependencies usable on the
Mac OS X platform.
"""
deps = _EXTRAS_REQUIRE['osx_platform']
self.assertIn('pyopenssl >= 16.0.0', deps)
self.assertIn('service_identity', deps)
self.assertIn('idna >= 0.6, != 2.3', deps)
self.assertIn('pyasn1', deps)
self.assertIn('cryptography >= 0.9.1', deps)
self.assertIn('soappy', deps)
self.assertIn('pyserial', deps)
self.assertIn('h2 >= 3.0, < 4.0', deps)
self.assertIn('priority >= 1.1.0, < 2.0', deps)
self.assertIn('pyobjc-core', deps)
def test_extrasRequiresWindowsPlatformDeps(self):
"""
L{_EXTRAS_REQUIRE}'s C{windows_platform} extra contains setuptools
requirements for all of Twisted's optional dependencies usable on the
Microsoft Windows platform.
"""
deps = _EXTRAS_REQUIRE['windows_platform']
self.assertIn('pyopenssl >= 16.0.0', deps)
self.assertIn('service_identity', deps)
self.assertIn('idna >= 0.6, != 2.3', deps)
self.assertIn('pyasn1', deps)
self.assertIn('cryptography >= 0.9.1', deps)
self.assertIn('soappy', deps)
self.assertIn('pyserial', deps)
self.assertIn('h2 >= 3.0, < 4.0', deps)
self.assertIn('priority >= 1.1.0, < 2.0', deps)
self.assertIn('pypiwin32', deps)
class FakeModule(object):
"""
A fake module, suitable for dependency injection in testing.
"""
def __init__(self, attrs):
"""
Initializes a fake module.
@param attrs: The attrs that will be accessible on the module.
@type attrs: C{dict} of C{str} (Python names) to objects
"""
self._attrs = attrs
def __getattr__(self, name):
"""
Gets an attribute of this fake module from its attrs.
@raise AttributeError: When the requested attribute is missing.
"""
try:
return self._attrs[name]
except KeyError:
raise AttributeError()
fakeCPythonPlatform = FakeModule({"python_implementation": lambda: "CPython"})
fakeOtherPlatform = FakeModule({"python_implementation": lambda: "lvhpy"})
class WithPlatformTests(TestCase):
"""
Tests for L{_checkCPython} when used with a (fake) C{platform} module.
"""
def test_cpython(self):
"""
L{_checkCPython} returns C{True} when C{platform.python_implementation}
says we're running on CPython.
"""
self.assertTrue(_setup._checkCPython(platform=fakeCPythonPlatform))
def test_other(self):
"""
L{_checkCPython} returns C{False} when C{platform.python_implementation}
says we're not running on CPython.
"""
self.assertFalse(_setup._checkCPython(platform=fakeOtherPlatform))
class BuildPy3Tests(TestCase):
"""
Tests for L{BuildPy3}.
"""
maxDiff = None
if not _PY3:
skip = "BuildPy3 setuptools command used with Python 3 only."
def test_find_package_modules(self):
"""
Will filter the found modules excluding the modules listed in
L{twisted.python.dist3}.
"""
distribution = Distribution()
distribution.script_name = 'setup.py'
distribution.script_args = 'build_py'
builder = BuildPy3(distribution)
# Rig the dist3 data so that we can reduce the scope of this test and
# reduce the risk of getting false failures, while doing a minimum
# level of patching.
self.patch(
_setup,
'notPortedModules',
[
"twisted.spread.test.test_pbfailure",
],
)
twistedPackageDir = filepath.FilePath(twisted.__file__).parent()
packageDir = twistedPackageDir.child("spread").child("test")
result = builder.find_package_modules('twisted.spread.test',
packageDir.path)
self.assertEqual(sorted([
('twisted.spread.test', '__init__',
packageDir.child('__init__.py').path),
('twisted.spread.test', 'test_banana',
packageDir.child('test_banana.py').path),
('twisted.spread.test', 'test_jelly',
packageDir.child('test_jelly.py').path),
('twisted.spread.test', 'test_pb',
packageDir.child('test_pb.py').path),
]),
sorted(result),
)
|
TNick/pylearn2
|
refs/heads/master
|
pylearn2/datasets/tests/test_utlc.py
|
44
|
from __future__ import print_function
import unittest
import numpy
import scipy.sparse
from pylearn2.testing.skip import skip_if_no_data
import pylearn2.datasets.utlc as utlc
def test_ule():
skip_if_no_data()
# Test loading of transfer data
train, valid, test, transfer = utlc.load_ndarray_dataset("ule",
normalize=True,
transfer=True)
assert train.shape[0] == transfer.shape[0]
# @unittest.skip("Slow and needs >8 GB of RAM")
def test_all_utlc():
skip_if_no_data()
# not testing rita, because it requires a lot of memorz and is slow
for name in ['avicenna', 'harry', 'ule']:
print("Loading ", name)
train, valid, test = utlc.load_ndarray_dataset(name, normalize=True)
print("dtype, max, min, mean, std")
print(train.dtype, train.max(), train.min(), train.mean(), train.std())
assert isinstance(train, numpy.ndarray)
assert isinstance(valid, numpy.ndarray)
assert isinstance(test, numpy.ndarray)
assert train.shape[1] == test.shape[1] == valid.shape[1]
def test_sparse_ule():
skip_if_no_data()
# Test loading of transfer data
train, valid, test, transfer = utlc.load_sparse_dataset("ule",
normalize=True,
transfer=True)
assert train.shape[0] == transfer.shape[0]
def test_all_sparse_utlc():
skip_if_no_data()
for name in ['harry', 'terry', 'ule']:
print("Loading sparse ", name)
train, valid, test = utlc.load_sparse_dataset(name, normalize=True)
nb_elem = numpy.prod(train.shape)
mi = train.data.min()
ma = train.data.max()
mi = min(0, mi)
ma = max(0, ma)
su = train.data.sum()
mean = float(su) / nb_elem
print(name, "dtype, max, min, mean, nb non-zero, nb element, %sparse")
print(train.dtype, ma, mi, mean, train.nnz, end='')
print(nb_elem, (nb_elem - float(train.nnz)) / nb_elem)
print(name, "max, min, mean, std (all stats on non-zero element)")
print(train.data.max(), train.data.min(), end='')
print(train.data.mean(), train.data.std())
assert scipy.sparse.issparse(train)
assert scipy.sparse.issparse(valid)
assert scipy.sparse.issparse(test)
assert train.shape[1] == test.shape[1] == valid.shape[1]
|
sudheesh001/mediadrop
|
refs/heads/master
|
mediacore/lib/decorators.py
|
14
|
from mediadrop.lib.decorators import *
|
akretion/connector-magento
|
refs/heads/7.0_catalog_export
|
magentoerpconnect_pricing/__init__.py
|
3
|
# -*- coding: utf-8 -*-
import connector
import magento_model
import product
import sale
|
TangHao1987/intellij-community
|
refs/heads/master
|
python/testData/quickFixes/PyMoveAttributeToInitQuickFixTest/createInit_after.py
|
83
|
__author__ = 'ktisha'
class A:
def __init__(self):
self.b = 1
def foo(self):
c = 1
|
diku-kmc/kleenexlang
|
refs/heads/master
|
bench/python/src/as.py
|
2
|
#!/usr/bin/env python
# Python version of as
import sys
import re
import datetime
regex = "(a*)"
pre_compile = datetime.datetime.now()
pattern = re.compile(regex)
lno = 0
# Start timing
start = datetime.datetime.now()
for line in sys.stdin:
lno += 1
m = pattern.match(line)
if m:
sys.stdout.write("%s\n" % m.group(1))
else:
sys.stderr.write("match error on line %s\n" % str(lno))
exit(1)
# End timing
end = datetime.datetime.now()
# Elapsed time
elaps = end - start
elaps_compile = start - pre_compile
elaps_ms = elaps.seconds * 1000 + elaps.microseconds / 1000
elaps_compile_ms = elaps_compile.seconds * 1000 + elaps_compile.microseconds / 1000
sys.stderr.write("\ncompilation (ms): %s\n" % str(elaps_compile_ms))
sys.stderr.write("matching (ms): %s\n" % str(elaps_ms))
|
yordan-desta/QgisIns
|
refs/heads/master
|
python/ext-libs/owslib/ows.py
|
28
|
# -*- coding: ISO-8859-15 -*-
# =============================================================================
# Copyright (c) 2008 Tom Kralidis
#
# Authors : Tom Kralidis <tomkralidis@gmail.com>
#
# Contact email: tomkralidis@gmail.com
# =============================================================================
"""
API for OGC Web Services Common (OWS) constructs and metadata.
OWS Common: http://www.opengeospatial.org/standards/common
Currently supports version 1.1.0 (06-121r3).
"""
from owslib.etree import etree
from owslib import crs, util
from owslib.namespaces import Namespaces
n = Namespaces()
OWS_NAMESPACE_1_0_0 = n.get_namespace("ows")
OWS_NAMESPACE_1_1_0 = n.get_namespace("ows110")
OWS_NAMESPACE_2_0 = n.get_namespace("ows200")
XSI_NAMESPACE = n.get_namespace("xsi")
XLINK_NAMESPACE = n.get_namespace("xlink")
DEFAULT_OWS_NAMESPACE=OWS_NAMESPACE_1_1_0 #Use this as default for OWSCommon objects
class OwsCommon(object):
"""Initialize OWS Common object"""
def __init__(self,version):
self.version = version
if version == '1.0.0':
self.namespace = OWS_NAMESPACE_1_0_0
else:
self.namespace = OWS_NAMESPACE_1_1_0
class ServiceIdentification(object):
"""Initialize an OWS Common ServiceIdentification construct"""
def __init__(self,infoset,namespace=DEFAULT_OWS_NAMESPACE):
self._root = infoset
val = self._root.find(util.nspath('Title', namespace))
self.title = util.testXMLValue(val)
val = self._root.find(util.nspath('Abstract', namespace))
self.abstract = util.testXMLValue(val)
self.keywords = []
for f in self._root.findall(util.nspath('Keywords/Keyword', namespace)):
if f.text is not None:
self.keywords.append(f.text)
val = self._root.find(util.nspath('AccessConstraints', namespace))
self.accessconstraints = util.testXMLValue(val)
val = self._root.find(util.nspath('Fees', namespace))
self.fees = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceType', namespace))
self.type = util.testXMLValue(val)
self.service=self.type #alternative? keep both?discuss
val = self._root.find(util.nspath('ServiceTypeVersion', namespace))
self.version = util.testXMLValue(val)
self.profiles = []
for p in self._root.findall(util.nspath('Profile', namespace)):
self.profiles.append(util.testXMLValue(val))
class ServiceProvider(object):
"""Initialize an OWS Common ServiceProvider construct"""
def __init__(self, infoset,namespace=DEFAULT_OWS_NAMESPACE):
self._root = infoset
val = self._root.find(util.nspath('ProviderName', namespace))
self.name = util.testXMLValue(val)
self.contact = ServiceContact(infoset, namespace)
val = self._root.find(util.nspath('ProviderSite', namespace))
if val is not None:
urlattrib=val.attrib[util.nspath('href', XLINK_NAMESPACE)]
self.url = util.testXMLValue(urlattrib, True)
else:
self.url =None
class ServiceContact(object):
"""Initialize an OWS Common ServiceContact construct"""
def __init__(self, infoset,namespace=DEFAULT_OWS_NAMESPACE):
self._root = infoset
val = self._root.find(util.nspath('ProviderName', namespace))
self.name = util.testXMLValue(val)
self.organization=util.testXMLValue(self._root.find(util.nspath('ContactPersonPrimary/ContactOrganization', namespace)))
val = self._root.find(util.nspath('ProviderSite', namespace))
if val is not None:
self.site = util.testXMLValue(val.attrib.get(util.nspath('href', XLINK_NAMESPACE)), True)
else:
self.site = None
val = self._root.find(util.nspath('ServiceContact/Role', namespace))
self.role = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/IndividualName', namespace))
self.name = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/PositionName', namespace))
self.position = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/ContactInfo/Phone/Voice', namespace))
self.phone = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/ContactInfo/Phone/Facsimile', namespace))
self.fax = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/ContactInfo/Address/DeliveryPoint', namespace))
self.address = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/ContactInfo/Address/City', namespace))
self.city = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/ContactInfo/Address/AdministrativeArea', namespace))
self.region = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/ContactInfo/Address/PostalCode', namespace))
self.postcode = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/ContactInfo/Address/Country', namespace))
self.country = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/ContactInfo/Address/ElectronicMailAddress', namespace))
self.email = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/ContactInfo/OnlineResource', namespace))
if val is not None:
self.url = util.testXMLValue(val.attrib.get(util.nspath('href', XLINK_NAMESPACE)), True)
else:
self.url = None
val = self._root.find(util.nspath('ServiceContact/ContactInfo/HoursOfService', namespace))
self.hours = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/ContactInfo/ContactInstructions', namespace))
self.instructions = util.testXMLValue(val)
class Constraint(object):
def __init__(self, elem, namespace=DEFAULT_OWS_NAMESPACE):
self.name = elem.attrib.get('name')
self.values = [i.text for i in elem.findall(util.nspath('Value', namespace))]
self.values += [i.text for i in elem.findall(util.nspath('AllowedValues/Value', namespace))]
def __repr__(self):
if self.values:
return "Constraint: %s - %s" % (self.name, self.values)
else:
return "Constraint: %s" % self.name
class OperationsMetadata(object):
"""Initialize an OWS OperationMetadata construct"""
def __init__(self, elem, namespace=DEFAULT_OWS_NAMESPACE):
self.name = elem.attrib['name']
self.formatOptions = ['text/xml']
parameters = []
self.methods = []
self.constraints = []
for verb in elem.findall(util.nspath('DCP/HTTP/*', namespace)):
url = util.testXMLAttribute(verb, util.nspath('href', XLINK_NAMESPACE))
if url is not None:
verb_constraints = [Constraint(conts, namespace) for conts in verb.findall(util.nspath('Constraint', namespace))]
self.methods.append({'constraints' : verb_constraints, 'type' : util.xmltag_split(verb.tag), 'url': url})
for parameter in elem.findall(util.nspath('Parameter', namespace)):
if namespace == OWS_NAMESPACE_1_1_0:
parameters.append((parameter.attrib['name'], {'values': [i.text for i in parameter.findall(util.nspath('AllowedValues/Value', namespace))]}))
else:
parameters.append((parameter.attrib['name'], {'values': [i.text for i in parameter.findall(util.nspath('Value', namespace))]}))
self.parameters = dict(parameters)
for constraint in elem.findall(util.nspath('Constraint', namespace)):
self.constraints.append(Constraint(constraint, namespace))
class BoundingBox(object):
"""Initialize an OWS BoundingBox construct"""
def __init__(self, elem, namespace=DEFAULT_OWS_NAMESPACE):
self.minx = None
self.miny = None
self.maxx = None
self.maxy = None
val = elem.attrib.get('crs')
if val is not None:
self.crs = crs.Crs(val)
else:
self.crs = None
val = elem.attrib.get('dimensions')
if val is not None:
self.dimensions = int(util.testXMLValue(val, True))
else: # assume 2
self.dimensions = 2
val = elem.find(util.nspath('LowerCorner', namespace))
tmp = util.testXMLValue(val)
if tmp is not None:
xy = tmp.split()
if len(xy) > 1:
if self.crs is not None and self.crs.axisorder == 'yx':
self.minx, self.miny = xy[1], xy[0]
else:
self.minx, self.miny = xy[0], xy[1]
val = elem.find(util.nspath('UpperCorner', namespace))
tmp = util.testXMLValue(val)
if tmp is not None:
xy = tmp.split()
if len(xy) > 1:
if self.crs is not None and self.crs.axisorder == 'yx':
self.maxx, self.maxy = xy[1], xy[0]
else:
self.maxx, self.maxy = xy[0], xy[1]
class WGS84BoundingBox(BoundingBox):
"""WGS84 bbox, axis order xy"""
def __init__(self, elem, namespace=DEFAULT_OWS_NAMESPACE):
BoundingBox.__init__(self, elem, namespace)
self.dimensions = 2
self.crs = crs.Crs('urn:ogc:def:crs:OGC:2:84')
class ExceptionReport(Exception):
"""OWS ExceptionReport"""
def __init__(self, elem, namespace=DEFAULT_OWS_NAMESPACE):
self.exceptions = []
if hasattr(elem, 'getroot'):
elem = elem.getroot()
for i in elem.findall(util.nspath('Exception', namespace)):
tmp = {}
val = i.attrib.get('exceptionCode')
tmp['exceptionCode'] = util.testXMLValue(val, True)
val = i.attrib.get('locator')
tmp['locator'] = util.testXMLValue(val, True)
val = i.find(util.nspath('ExceptionText', namespace))
tmp['ExceptionText'] = util.testXMLValue(val)
self.exceptions.append(tmp)
# set topmost stacktrace as return message
self.code = self.exceptions[0]['exceptionCode']
self.locator = self.exceptions[0]['locator']
self.msg = self.exceptions[0]['ExceptionText']
self.xml = etree.tostring(elem)
def __str__(self):
return repr(self.msg)
|
Tejal011089/Medsyn2_app
|
refs/heads/master
|
utilities/doctype/sms_control/sms_control.py
|
19
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
from webnotes.utils import load_json, nowdate, cstr
from webnotes.model.code import get_obj
from webnotes.model.doc import Document
from webnotes import msgprint
from webnotes.model.bean import getlist, copy_doclist
class DocType:
def __init__(self, doc, doclist=[]):
self.doc = doc
self.doclist = doclist
def validate_receiver_nos(self,receiver_list):
validated_receiver_list = []
for d in receiver_list:
# remove invalid character
invalid_char_list = [' ', '+', '-', '(', ')']
for x in invalid_char_list:
d = d.replace(x, '')
validated_receiver_list.append(d)
if not validated_receiver_list:
msgprint("Please enter valid mobile nos", raise_exception=1)
return validated_receiver_list
def get_sender_name(self):
"returns name as SMS sender"
sender_name = webnotes.conn.get_value('Global Defaults', None, 'sms_sender_name') or \
'ERPNXT'
if len(sender_name) > 6 and \
webnotes.conn.get_value("Control Panel", None, "country") == "India":
msgprint("""
As per TRAI rule, sender name must be exactly 6 characters.
Kindly change sender name in Setup --> Global Defaults.
Note: Hyphen, space, numeric digit, special characters are not allowed.
""", raise_exception=1)
return sender_name
def get_contact_number(self, arg):
"returns mobile number of the contact"
args = load_json(arg)
number = webnotes.conn.sql("""select mobile_no, phone from tabContact where name=%s and %s=%s""" %
('%s', args['key'], '%s'), (args['contact_name'], args['value']))
return number and (number[0][0] or number[0][1]) or ''
def send_form_sms(self, arg):
"called from client side"
args = load_json(arg)
self.send_sms([str(args['number'])], str(args['message']))
def send_sms(self, receiver_list, msg, sender_name = ''):
receiver_list = self.validate_receiver_nos(receiver_list)
arg = {
'receiver_list' : receiver_list,
'message' : msg,
'sender_name' : sender_name or self.get_sender_name()
}
if webnotes.conn.get_value('SMS Settings', None, 'sms_gateway_url'):
ret = self.send_via_gateway(arg)
msgprint(ret)
def send_via_gateway(self, arg):
ss = get_obj('SMS Settings', 'SMS Settings', with_children=1)
args = {ss.doc.message_parameter : arg.get('message')}
for d in getlist(ss.doclist, 'static_parameter_details'):
args[d.parameter] = d.value
resp = []
for d in arg.get('receiver_list'):
args[ss.doc.receiver_parameter] = d
resp.append(self.send_request(ss.doc.sms_gateway_url, args))
return resp
# Send Request
# =========================================================
def send_request(self, gateway_url, args):
import httplib, urllib
server, api_url = self.scrub_gateway_url(gateway_url)
conn = httplib.HTTPConnection(server) # open connection
headers = {}
headers['Accept'] = "text/plain, text/html, */*"
conn.request('GET', api_url + urllib.urlencode(args), headers = headers) # send request
resp = conn.getresponse() # get response
resp = resp.read()
return resp
# Split gateway url to server and api url
# =========================================================
def scrub_gateway_url(self, url):
url = url.replace('http://', '').strip().split('/')
server = url.pop(0)
api_url = '/' + '/'.join(url)
if not api_url.endswith('?'):
api_url += '?'
return server, api_url
# Create SMS Log
# =========================================================
def create_sms_log(self, arg, sent_sms):
sl = Document('SMS Log')
sl.sender_name = arg['sender_name']
sl.sent_on = nowdate()
sl.receiver_list = cstr(arg['receiver_list'])
sl.message = arg['message']
sl.no_of_requested_sms = len(arg['receiver_list'])
sl.no_of_sent_sms = sent_sms
sl.save(new=1)
|
recognai/spaCy
|
refs/heads/master
|
spacy/lang/hi/lex_attrs.py
|
1
|
# coding: utf8
from __future__ import unicode_literals
from ..norm_exceptions import BASE_NORMS
from ...attrs import NORM
from ...attrs import LIKE_NUM
from ...util import add_lookups
_stem_suffixes = [
["ो","े","ू","ु","ी","ि","ा"],
["कर","ाओ","िए","ाई","ाए","ने","नी","ना","ते","ीं","ती","ता","ाँ","ां","ों","ें"],
["ाकर","ाइए","ाईं","ाया","ेगी","ेगा","ोगी","ोगे","ाने","ाना","ाते","ाती","ाता","तीं","ाओं","ाएं","ुओं","ुएं","ुआं"],
["ाएगी","ाएगा","ाओगी","ाओगे","एंगी","ेंगी","एंगे","ेंगे","ूंगी","ूंगा","ातीं","नाओं","नाएं","ताओं","ताएं","ियाँ","ियों","ियां"],
["ाएंगी","ाएंगे","ाऊंगी","ाऊंगा","ाइयाँ","ाइयों","ाइयां"]
]
#reference 1:https://en.wikipedia.org/wiki/Indian_numbering_system
#reference 2: https://blogs.transparent.com/hindi/hindi-numbers-1-100/
_num_words = ['शून्य', 'एक', 'दो', 'तीन', 'चार', 'पांच', 'छह', 'सात', 'आठ', 'नौ', 'दस',
'ग्यारह', 'बारह', 'तेरह', 'चौदह', 'पंद्रह', 'सोलह', 'सत्रह', 'अठारह', 'उन्नीस',
'बीस', 'तीस', 'चालीस', 'पचास', 'साठ', 'सत्तर', 'अस्सी', 'नब्बे', 'सौ', 'हज़ार',
'लाख', 'करोड़', 'अरब', 'खरब']
def norm(string):
# normalise base exceptions, e.g. punctuation or currency symbols
if string in BASE_NORMS:
return BASE_NORMS[string]
# set stem word as norm, if available, adapted from:
# http://computing.open.ac.uk/Sites/EACLSouthAsia/Papers/p6-Ramanathan.pdf
# http://research.variancia.com/hindi_stemmer/
# https://github.com/taranjeet/hindi-tokenizer/blob/master/HindiTokenizer.py#L142
for suffix_group in reversed(_stem_suffixes):
length = len(suffix_group[0])
if len(string) <= length:
break
for suffix in suffix_group:
if string.endswith(suffix):
return string[:-length]
return string
def like_num(text):
text = text.replace(',', '').replace('.', '')
if text.isdigit():
return True
if text.count('/') == 1:
num, denom = text.split('/')
if num.isdigit() and denom.isdigit():
return True
if text.lower() in _num_words:
return True
return False
LEX_ATTRS = {
NORM: norm,
LIKE_NUM: like_num
}
|
nvoron23/socialite
|
refs/heads/master
|
jython/Lib/distutils/text_file.py
|
7
|
"""text_file
provides the TextFile class, which gives an interface to text files
that (optionally) takes care of stripping comments, ignoring blank
lines, and joining lines with backslashes."""
__revision__ = "$Id$"
from types import *
import sys, os, string
class TextFile:
"""Provides a file-like object that takes care of all the things you
commonly want to do when processing a text file that has some
line-by-line syntax: strip comments (as long as "#" is your
comment character), skip blank lines, join adjacent lines by
escaping the newline (ie. backslash at end of line), strip
leading and/or trailing whitespace. All of these are optional
and independently controllable.
Provides a 'warn()' method so you can generate warning messages that
report physical line number, even if the logical line in question
spans multiple physical lines. Also provides 'unreadline()' for
implementing line-at-a-time lookahead.
Constructor is called as:
TextFile (filename=None, file=None, **options)
It bombs (RuntimeError) if both 'filename' and 'file' are None;
'filename' should be a string, and 'file' a file object (or
something that provides 'readline()' and 'close()' methods). It is
recommended that you supply at least 'filename', so that TextFile
can include it in warning messages. If 'file' is not supplied,
TextFile creates its own using the 'open()' builtin.
The options are all boolean, and affect the value returned by
'readline()':
strip_comments [default: true]
strip from "#" to end-of-line, as well as any whitespace
leading up to the "#" -- unless it is escaped by a backslash
lstrip_ws [default: false]
strip leading whitespace from each line before returning it
rstrip_ws [default: true]
strip trailing whitespace (including line terminator!) from
each line before returning it
skip_blanks [default: true}
skip lines that are empty *after* stripping comments and
whitespace. (If both lstrip_ws and rstrip_ws are false,
then some lines may consist of solely whitespace: these will
*not* be skipped, even if 'skip_blanks' is true.)
join_lines [default: false]
if a backslash is the last non-newline character on a line
after stripping comments and whitespace, join the following line
to it to form one "logical line"; if N consecutive lines end
with a backslash, then N+1 physical lines will be joined to
form one logical line.
collapse_join [default: false]
strip leading whitespace from lines that are joined to their
predecessor; only matters if (join_lines and not lstrip_ws)
Note that since 'rstrip_ws' can strip the trailing newline, the
semantics of 'readline()' must differ from those of the builtin file
object's 'readline()' method! In particular, 'readline()' returns
None for end-of-file: an empty string might just be a blank line (or
an all-whitespace line), if 'rstrip_ws' is true but 'skip_blanks' is
not."""
default_options = { 'strip_comments': 1,
'skip_blanks': 1,
'lstrip_ws': 0,
'rstrip_ws': 1,
'join_lines': 0,
'collapse_join': 0,
}
def __init__ (self, filename=None, file=None, **options):
"""Construct a new TextFile object. At least one of 'filename'
(a string) and 'file' (a file-like object) must be supplied.
They keyword argument options are described above and affect
the values returned by 'readline()'."""
if filename is None and file is None:
raise RuntimeError, \
"you must supply either or both of 'filename' and 'file'"
# set values for all options -- either from client option hash
# or fallback to default_options
for opt in self.default_options.keys():
if options.has_key (opt):
setattr (self, opt, options[opt])
else:
setattr (self, opt, self.default_options[opt])
# sanity check client option hash
for opt in options.keys():
if not self.default_options.has_key (opt):
raise KeyError, "invalid TextFile option '%s'" % opt
if file is None:
self.open (filename)
else:
self.filename = filename
self.file = file
self.current_line = 0 # assuming that file is at BOF!
# 'linebuf' is a stack of lines that will be emptied before we
# actually read from the file; it's only populated by an
# 'unreadline()' operation
self.linebuf = []
def open (self, filename):
"""Open a new file named 'filename'. This overrides both the
'filename' and 'file' arguments to the constructor."""
self.filename = filename
self.file = open (self.filename, 'r')
self.current_line = 0
def close (self):
"""Close the current file and forget everything we know about it
(filename, current line number)."""
self.file.close ()
self.file = None
self.filename = None
self.current_line = None
def gen_error (self, msg, line=None):
outmsg = []
if line is None:
line = self.current_line
outmsg.append(self.filename + ", ")
if type (line) in (ListType, TupleType):
outmsg.append("lines %d-%d: " % tuple (line))
else:
outmsg.append("line %d: " % line)
outmsg.append(str(msg))
return string.join(outmsg, "")
def error (self, msg, line=None):
raise ValueError, "error: " + self.gen_error(msg, line)
def warn (self, msg, line=None):
"""Print (to stderr) a warning message tied to the current logical
line in the current file. If the current logical line in the
file spans multiple physical lines, the warning refers to the
whole range, eg. "lines 3-5". If 'line' supplied, it overrides
the current line number; it may be a list or tuple to indicate a
range of physical lines, or an integer for a single physical
line."""
sys.stderr.write("warning: " + self.gen_error(msg, line) + "\n")
def readline (self):
"""Read and return a single logical line from the current file (or
from an internal buffer if lines have previously been "unread"
with 'unreadline()'). If the 'join_lines' option is true, this
may involve reading multiple physical lines concatenated into a
single string. Updates the current line number, so calling
'warn()' after 'readline()' emits a warning about the physical
line(s) just read. Returns None on end-of-file, since the empty
string can occur if 'rstrip_ws' is true but 'strip_blanks' is
not."""
# If any "unread" lines waiting in 'linebuf', return the top
# one. (We don't actually buffer read-ahead data -- lines only
# get put in 'linebuf' if the client explicitly does an
# 'unreadline()'.
if self.linebuf:
line = self.linebuf[-1]
del self.linebuf[-1]
return line
buildup_line = ''
while 1:
# read the line, make it None if EOF
line = self.file.readline()
if line == '': line = None
if self.strip_comments and line:
# Look for the first "#" in the line. If none, never
# mind. If we find one and it's the first character, or
# is not preceded by "\", then it starts a comment --
# strip the comment, strip whitespace before it, and
# carry on. Otherwise, it's just an escaped "#", so
# unescape it (and any other escaped "#"'s that might be
# lurking in there) and otherwise leave the line alone.
pos = string.find (line, "#")
if pos == -1: # no "#" -- no comments
pass
# It's definitely a comment -- either "#" is the first
# character, or it's elsewhere and unescaped.
elif pos == 0 or line[pos-1] != "\\":
# Have to preserve the trailing newline, because it's
# the job of a later step (rstrip_ws) to remove it --
# and if rstrip_ws is false, we'd better preserve it!
# (NB. this means that if the final line is all comment
# and has no trailing newline, we will think that it's
# EOF; I think that's OK.)
eol = (line[-1] == '\n') and '\n' or ''
line = line[0:pos] + eol
# If all that's left is whitespace, then skip line
# *now*, before we try to join it to 'buildup_line' --
# that way constructs like
# hello \\
# # comment that should be ignored
# there
# result in "hello there".
if string.strip(line) == "":
continue
else: # it's an escaped "#"
line = string.replace (line, "\\#", "#")
# did previous line end with a backslash? then accumulate
if self.join_lines and buildup_line:
# oops: end of file
if line is None:
self.warn ("continuation line immediately precedes "
"end-of-file")
return buildup_line
if self.collapse_join:
line = string.lstrip (line)
line = buildup_line + line
# careful: pay attention to line number when incrementing it
if type (self.current_line) is ListType:
self.current_line[1] = self.current_line[1] + 1
else:
self.current_line = [self.current_line,
self.current_line+1]
# just an ordinary line, read it as usual
else:
if line is None: # eof
return None
# still have to be careful about incrementing the line number!
if type (self.current_line) is ListType:
self.current_line = self.current_line[1] + 1
else:
self.current_line = self.current_line + 1
# strip whitespace however the client wants (leading and
# trailing, or one or the other, or neither)
if self.lstrip_ws and self.rstrip_ws:
line = string.strip (line)
elif self.lstrip_ws:
line = string.lstrip (line)
elif self.rstrip_ws:
line = string.rstrip (line)
# blank line (whether we rstrip'ed or not)? skip to next line
# if appropriate
if (line == '' or line == '\n') and self.skip_blanks:
continue
if self.join_lines:
if line[-1] == '\\':
buildup_line = line[:-1]
continue
if line[-2:] == '\\\n':
buildup_line = line[0:-2] + '\n'
continue
# well, I guess there's some actual content there: return it
return line
# readline ()
def readlines (self):
"""Read and return the list of all logical lines remaining in the
current file."""
lines = []
while 1:
line = self.readline()
if line is None:
return lines
lines.append (line)
def unreadline (self, line):
"""Push 'line' (a string) onto an internal buffer that will be
checked by future 'readline()' calls. Handy for implementing
a parser with line-at-a-time lookahead."""
self.linebuf.append (line)
if __name__ == "__main__":
test_data = """# test file
line 3 \\
# intervening comment
continues on next line
"""
# result 1: no fancy options
result1 = map (lambda x: x + "\n", string.split (test_data, "\n")[0:-1])
# result 2: just strip comments
result2 = ["\n",
"line 3 \\\n",
" continues on next line\n"]
# result 3: just strip blank lines
result3 = ["# test file\n",
"line 3 \\\n",
"# intervening comment\n",
" continues on next line\n"]
# result 4: default, strip comments, blank lines, and trailing whitespace
result4 = ["line 3 \\",
" continues on next line"]
# result 5: strip comments and blanks, plus join lines (but don't
# "collapse" joined lines
result5 = ["line 3 continues on next line"]
# result 6: strip comments and blanks, plus join lines (and
# "collapse" joined lines
result6 = ["line 3 continues on next line"]
def test_input (count, description, file, expected_result):
result = file.readlines ()
# result = string.join (result, '')
if result == expected_result:
print "ok %d (%s)" % (count, description)
else:
print "not ok %d (%s):" % (count, description)
print "** expected:"
print expected_result
print "** received:"
print result
filename = "test.txt"
out_file = open (filename, "w")
out_file.write (test_data)
out_file.close ()
in_file = TextFile (filename, strip_comments=0, skip_blanks=0,
lstrip_ws=0, rstrip_ws=0)
test_input (1, "no processing", in_file, result1)
in_file = TextFile (filename, strip_comments=1, skip_blanks=0,
lstrip_ws=0, rstrip_ws=0)
test_input (2, "strip comments", in_file, result2)
in_file = TextFile (filename, strip_comments=0, skip_blanks=1,
lstrip_ws=0, rstrip_ws=0)
test_input (3, "strip blanks", in_file, result3)
in_file = TextFile (filename)
test_input (4, "default processing", in_file, result4)
in_file = TextFile (filename, strip_comments=1, skip_blanks=1,
join_lines=1, rstrip_ws=1)
test_input (5, "join lines without collapsing", in_file, result5)
in_file = TextFile (filename, strip_comments=1, skip_blanks=1,
join_lines=1, rstrip_ws=1, collapse_join=1)
test_input (6, "join lines with collapsing", in_file, result6)
os.remove (filename)
|
andrewsmedina/horizon
|
refs/heads/master
|
horizon/horizon/models.py
|
27
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2011 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Stub file to work around django bug: https://code.djangoproject.com/ticket/7198
"""
|
sgallagher/anaconda
|
refs/heads/master
|
pyanaconda/modules/payloads/source/closest_mirror/closest_mirror.py
|
5
|
#
# Source module for the closest mirror.
#
# Copyright (C) 2020 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from pyanaconda.core.i18n import _
from pyanaconda.modules.payloads.constants import SourceType
from pyanaconda.modules.payloads.source.closest_mirror.closest_mirror_interface import \
ClosestMirrorSourceInterface
from pyanaconda.modules.payloads.source.repo_files.repo_files import RepoFilesSourceModule
from pyanaconda.anaconda_loggers import get_module_logger
log = get_module_logger(__name__)
class ClosestMirrorSourceModule(RepoFilesSourceModule):
"""The source payload module for the closest mirror."""
def for_publication(self):
"""Get the interface used to publish this source."""
return ClosestMirrorSourceInterface(self)
@property
def type(self):
"""Get type of this source."""
return SourceType.CLOSEST_MIRROR
@property
def description(self):
"""Get description of this source."""
return _("Closest mirror")
|
bottompawn/kbengine
|
refs/heads/master
|
kbe/res/scripts/common/Lib/test/test_pulldom.py
|
118
|
import io
import unittest
import sys
import xml.sax
from xml.sax.xmlreader import AttributesImpl
from xml.dom import pulldom
from test.support import run_unittest, findfile
tstfile = findfile("test.xml", subdir="xmltestdata")
# A handy XML snippet, containing attributes, a namespace prefix, and a
# self-closing tag:
SMALL_SAMPLE = """<?xml version="1.0"?>
<html xmlns="http://www.w3.org/1999/xhtml" xmlns:xdc="http://www.xml.com/books">
<!-- A comment -->
<title>Introduction to XSL</title>
<hr/>
<p><xdc:author xdc:attrib="prefixed attribute" attrib="other attrib">A. Namespace</xdc:author></p>
</html>"""
class PullDOMTestCase(unittest.TestCase):
def test_parse(self):
"""Minimal test of DOMEventStream.parse()"""
# This just tests that parsing from a stream works. Actual parser
# semantics are tested using parseString with a more focused XML
# fragment.
# Test with a filename:
handler = pulldom.parse(tstfile)
self.addCleanup(handler.stream.close)
list(handler)
# Test with a file object:
with open(tstfile, "rb") as fin:
list(pulldom.parse(fin))
def test_parse_semantics(self):
"""Test DOMEventStream parsing semantics."""
items = pulldom.parseString(SMALL_SAMPLE)
evt, node = next(items)
# Just check the node is a Document:
self.assertTrue(hasattr(node, "createElement"))
self.assertEqual(pulldom.START_DOCUMENT, evt)
evt, node = next(items)
self.assertEqual(pulldom.START_ELEMENT, evt)
self.assertEqual("html", node.tagName)
self.assertEqual(2, len(node.attributes))
self.assertEqual(node.attributes.getNamedItem("xmlns:xdc").value,
"http://www.xml.com/books")
evt, node = next(items)
self.assertEqual(pulldom.CHARACTERS, evt) # Line break
evt, node = next(items)
# XXX - A comment should be reported here!
# self.assertEqual(pulldom.COMMENT, evt)
# Line break after swallowed comment:
self.assertEqual(pulldom.CHARACTERS, evt)
evt, node = next(items)
self.assertEqual("title", node.tagName)
title_node = node
evt, node = next(items)
self.assertEqual(pulldom.CHARACTERS, evt)
self.assertEqual("Introduction to XSL", node.data)
evt, node = next(items)
self.assertEqual(pulldom.END_ELEMENT, evt)
self.assertEqual("title", node.tagName)
self.assertTrue(title_node is node)
evt, node = next(items)
self.assertEqual(pulldom.CHARACTERS, evt)
evt, node = next(items)
self.assertEqual(pulldom.START_ELEMENT, evt)
self.assertEqual("hr", node.tagName)
evt, node = next(items)
self.assertEqual(pulldom.END_ELEMENT, evt)
self.assertEqual("hr", node.tagName)
evt, node = next(items)
self.assertEqual(pulldom.CHARACTERS, evt)
evt, node = next(items)
self.assertEqual(pulldom.START_ELEMENT, evt)
self.assertEqual("p", node.tagName)
evt, node = next(items)
self.assertEqual(pulldom.START_ELEMENT, evt)
self.assertEqual("xdc:author", node.tagName)
evt, node = next(items)
self.assertEqual(pulldom.CHARACTERS, evt)
evt, node = next(items)
self.assertEqual(pulldom.END_ELEMENT, evt)
self.assertEqual("xdc:author", node.tagName)
evt, node = next(items)
self.assertEqual(pulldom.END_ELEMENT, evt)
evt, node = next(items)
self.assertEqual(pulldom.CHARACTERS, evt)
evt, node = next(items)
self.assertEqual(pulldom.END_ELEMENT, evt)
# XXX No END_DOCUMENT item is ever obtained:
#evt, node = next(items)
#self.assertEqual(pulldom.END_DOCUMENT, evt)
def test_expandItem(self):
"""Ensure expandItem works as expected."""
items = pulldom.parseString(SMALL_SAMPLE)
# Loop through the nodes until we get to a "title" start tag:
for evt, item in items:
if evt == pulldom.START_ELEMENT and item.tagName == "title":
items.expandNode(item)
self.assertEqual(1, len(item.childNodes))
break
else:
self.fail("No \"title\" element detected in SMALL_SAMPLE!")
# Loop until we get to the next start-element:
for evt, node in items:
if evt == pulldom.START_ELEMENT:
break
self.assertEqual("hr", node.tagName,
"expandNode did not leave DOMEventStream in the correct state.")
# Attempt to expand a standalone element:
items.expandNode(node)
self.assertEqual(next(items)[0], pulldom.CHARACTERS)
evt, node = next(items)
self.assertEqual(node.tagName, "p")
items.expandNode(node)
next(items) # Skip character data
evt, node = next(items)
self.assertEqual(node.tagName, "html")
with self.assertRaises(StopIteration):
next(items)
items.clear()
self.assertIsNone(items.parser)
self.assertIsNone(items.stream)
@unittest.expectedFailure
def test_comment(self):
"""PullDOM does not receive "comment" events."""
items = pulldom.parseString(SMALL_SAMPLE)
for evt, _ in items:
if evt == pulldom.COMMENT:
break
else:
self.fail("No comment was encountered")
@unittest.expectedFailure
def test_end_document(self):
"""PullDOM does not receive "end-document" events."""
items = pulldom.parseString(SMALL_SAMPLE)
# Read all of the nodes up to and including </html>:
for evt, node in items:
if evt == pulldom.END_ELEMENT and node.tagName == "html":
break
try:
# Assert that the next node is END_DOCUMENT:
evt, node = next(items)
self.assertEqual(pulldom.END_DOCUMENT, evt)
except StopIteration:
self.fail(
"Ran out of events, but should have received END_DOCUMENT")
class ThoroughTestCase(unittest.TestCase):
"""Test the hard-to-reach parts of pulldom."""
def test_thorough_parse(self):
"""Test some of the hard-to-reach parts of PullDOM."""
self._test_thorough(pulldom.parse(None, parser=SAXExerciser()))
@unittest.expectedFailure
def test_sax2dom_fail(self):
"""SAX2DOM can"t handle a PI before the root element."""
pd = SAX2DOMTestHelper(None, SAXExerciser(), 12)
self._test_thorough(pd)
def test_thorough_sax2dom(self):
"""Test some of the hard-to-reach parts of SAX2DOM."""
pd = SAX2DOMTestHelper(None, SAX2DOMExerciser(), 12)
self._test_thorough(pd, False)
def _test_thorough(self, pd, before_root=True):
"""Test some of the hard-to-reach parts of the parser, using a mock
parser."""
evt, node = next(pd)
self.assertEqual(pulldom.START_DOCUMENT, evt)
# Just check the node is a Document:
self.assertTrue(hasattr(node, "createElement"))
if before_root:
evt, node = next(pd)
self.assertEqual(pulldom.COMMENT, evt)
self.assertEqual("a comment", node.data)
evt, node = next(pd)
self.assertEqual(pulldom.PROCESSING_INSTRUCTION, evt)
self.assertEqual("target", node.target)
self.assertEqual("data", node.data)
evt, node = next(pd)
self.assertEqual(pulldom.START_ELEMENT, evt)
self.assertEqual("html", node.tagName)
evt, node = next(pd)
self.assertEqual(pulldom.COMMENT, evt)
self.assertEqual("a comment", node.data)
evt, node = next(pd)
self.assertEqual(pulldom.PROCESSING_INSTRUCTION, evt)
self.assertEqual("target", node.target)
self.assertEqual("data", node.data)
evt, node = next(pd)
self.assertEqual(pulldom.START_ELEMENT, evt)
self.assertEqual("p", node.tagName)
evt, node = next(pd)
self.assertEqual(pulldom.CHARACTERS, evt)
self.assertEqual("text", node.data)
evt, node = next(pd)
self.assertEqual(pulldom.END_ELEMENT, evt)
self.assertEqual("p", node.tagName)
evt, node = next(pd)
self.assertEqual(pulldom.END_ELEMENT, evt)
self.assertEqual("html", node.tagName)
evt, node = next(pd)
self.assertEqual(pulldom.END_DOCUMENT, evt)
class SAXExerciser(object):
"""A fake sax parser that calls some of the harder-to-reach sax methods to
ensure it emits the correct events"""
def setContentHandler(self, handler):
self._handler = handler
def parse(self, _):
h = self._handler
h.startDocument()
# The next two items ensure that items preceding the first
# start_element are properly stored and emitted:
h.comment("a comment")
h.processingInstruction("target", "data")
h.startElement("html", AttributesImpl({}))
h.comment("a comment")
h.processingInstruction("target", "data")
h.startElement("p", AttributesImpl({"class": "paraclass"}))
h.characters("text")
h.endElement("p")
h.endElement("html")
h.endDocument()
def stub(self, *args, **kwargs):
"""Stub method. Does nothing."""
pass
setProperty = stub
setFeature = stub
class SAX2DOMExerciser(SAXExerciser):
"""The same as SAXExerciser, but without the processing instruction and
comment before the root element, because S2D can"t handle it"""
def parse(self, _):
h = self._handler
h.startDocument()
h.startElement("html", AttributesImpl({}))
h.comment("a comment")
h.processingInstruction("target", "data")
h.startElement("p", AttributesImpl({"class": "paraclass"}))
h.characters("text")
h.endElement("p")
h.endElement("html")
h.endDocument()
class SAX2DOMTestHelper(pulldom.DOMEventStream):
"""Allows us to drive SAX2DOM from a DOMEventStream."""
def reset(self):
self.pulldom = pulldom.SAX2DOM()
# This content handler relies on namespace support
self.parser.setFeature(xml.sax.handler.feature_namespaces, 1)
self.parser.setContentHandler(self.pulldom)
class SAX2DOMTestCase(unittest.TestCase):
def confirm(self, test, testname="Test"):
self.assertTrue(test, testname)
def test_basic(self):
"""Ensure SAX2DOM can parse from a stream."""
with io.StringIO(SMALL_SAMPLE) as fin:
sd = SAX2DOMTestHelper(fin, xml.sax.make_parser(),
len(SMALL_SAMPLE))
for evt, node in sd:
if evt == pulldom.START_ELEMENT and node.tagName == "html":
break
# Because the buffer is the same length as the XML, all the
# nodes should have been parsed and added:
self.assertGreater(len(node.childNodes), 0)
def testSAX2DOM(self):
"""Ensure SAX2DOM expands nodes as expected."""
sax2dom = pulldom.SAX2DOM()
sax2dom.startDocument()
sax2dom.startElement("doc", {})
sax2dom.characters("text")
sax2dom.startElement("subelm", {})
sax2dom.characters("text")
sax2dom.endElement("subelm")
sax2dom.characters("text")
sax2dom.endElement("doc")
sax2dom.endDocument()
doc = sax2dom.document
root = doc.documentElement
(text1, elm1, text2) = root.childNodes
text3 = elm1.childNodes[0]
self.assertIsNone(text1.previousSibling)
self.assertIs(text1.nextSibling, elm1)
self.assertIs(elm1.previousSibling, text1)
self.assertIs(elm1.nextSibling, text2)
self.assertIs(text2.previousSibling, elm1)
self.assertIsNone(text2.nextSibling)
self.assertIsNone(text3.previousSibling)
self.assertIsNone(text3.nextSibling)
self.assertIs(root.parentNode, doc)
self.assertIs(text1.parentNode, root)
self.assertIs(elm1.parentNode, root)
self.assertIs(text2.parentNode, root)
self.assertIs(text3.parentNode, elm1)
doc.unlink()
def test_main():
run_unittest(PullDOMTestCase, ThoroughTestCase, SAX2DOMTestCase)
if __name__ == "__main__":
test_main()
|
zhangpf/vbox
|
refs/heads/master
|
src/VBox/ValidationKit/testmanager/core/testresults.py
|
3
|
# -*- coding: utf-8 -*-
# $Id$
# pylint: disable=C0302
## @todo Rename this file to testresult.py!
"""
Test Manager - Fetch test results.
"""
__copyright__ = \
"""
Copyright (C) 2012-2014 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
The contents of this file may alternatively be used under the terms
of the Common Development and Distribution License Version 1.0
(CDDL) only, as it comes in the "COPYING.CDDL" file of the
VirtualBox OSE distribution, in which case the provisions of the
CDDL are applicable instead of those of the GPL.
You may elect to license modified versions of this file under the
terms and conditions of either the GPL or the CDDL or both.
"""
__version__ = "$Revision$"
# Standard python imports.
import unittest;
# Validation Kit imports.
from common import constants;
from testmanager import config;
from testmanager.core.base import ModelDataBase, ModelLogicBase, ModelDataBaseTestCase, TMExceptionBase, TMTooManyRows;
from testmanager.core.testgroup import TestGroupData
from testmanager.core.build import BuildDataEx
from testmanager.core.testbox import TestBoxData
from testmanager.core.testcase import TestCaseData
from testmanager.core.schedgroup import SchedGroupData
from testmanager.core.systemlog import SystemLogData, SystemLogLogic;
class TestResultData(ModelDataBase):
"""
Test case execution result data
"""
## @name TestStatus_T
# @{
ksTestStatus_Running = 'running';
ksTestStatus_Success = 'success';
ksTestStatus_Skipped = 'skipped';
ksTestStatus_BadTestBox = 'bad-testbox';
ksTestStatus_Aborted = 'aborted';
ksTestStatus_Failure = 'failure';
ksTestStatus_TimedOut = 'timed-out';
ksTestStatus_Rebooted = 'rebooted';
## @}
## List of relatively harmless (to testgroup/case) statuses.
kasHarmlessTestStatuses = [ ksTestStatus_Skipped, ksTestStatus_BadTestBox, ksTestStatus_Aborted, ];
## List of bad statuses.
kasBadTestStatuses = [ ksTestStatus_Failure, ksTestStatus_TimedOut, ksTestStatus_Rebooted, ];
ksIdAttr = 'idTestResult';
ksParam_idTestResult = 'TestResultData_idTestResult';
ksParam_idTestResultParent = 'TestResultData_idTestResultParent';
ksParam_idTestSet = 'TestResultData_idTestSet';
ksParam_tsCreated = 'TestResultData_tsCreated';
ksParam_tsElapsed = 'TestResultData_tsElapsed';
ksParam_idStrName = 'TestResultData_idStrName';
ksParam_cErrors = 'TestResultData_cErrors';
ksParam_enmStatus = 'TestResultData_enmStatus';
ksParam_iNestingDepth = 'TestResultData_iNestingDepth';
kasValidValues_enmStatus = [
ksTestStatus_Running,
ksTestStatus_Success,
ksTestStatus_Skipped,
ksTestStatus_BadTestBox,
ksTestStatus_Aborted,
ksTestStatus_Failure,
ksTestStatus_TimedOut,
ksTestStatus_Rebooted
];
def __init__(self):
ModelDataBase.__init__(self)
self.idTestResult = None
self.idTestResultParent = None
self.idTestSet = None
self.tsCreated = None
self.tsElapsed = None
self.idStrName = None
self.cErrors = 0;
self.enmStatus = None
self.iNestingDepth = None
def initFromDbRow(self, aoRow):
"""
Reinitialize from a SELECT * FROM TestResults.
Return self. Raises exception if no row.
"""
if aoRow is None:
raise TMExceptionBase('Test result record not found.')
self.idTestResult = aoRow[0]
self.idTestResultParent = aoRow[1]
self.idTestSet = aoRow[2]
self.tsCreated = aoRow[3]
self.tsElapsed = aoRow[4]
self.idStrName = aoRow[5]
self.cErrors = aoRow[6]
self.enmStatus = aoRow[7]
self.iNestingDepth = aoRow[8]
return self;
def isFailure(self):
""" Check if it's a real failure. """
return self.enmStatus in self.kasBadTestStatuses;
class TestResultDataEx(TestResultData):
"""
Extended test result data class.
This is intended for use as a node in a result tree. This is not intended
for serialization to parameters or vice versa. Use TestResultLogic to
construct the tree.
"""
def __init__(self):
TestResultData.__init__(self)
self.sName = None; # idStrName resolved.
self.oParent = None; # idTestResultParent within the tree.
self.aoChildren = []; # TestResultDataEx;
self.aoValues = []; # TestResultValue;
self.aoMsgs = []; # TestResultMsg;
self.aoFiles = []; # TestResultFile;
def initFromDbRow(self, aoRow):
"""
Initialize from a query like this:
SELECT TestResults.*, TestResultStrTab.sValue
FROM TestResults, TestResultStrTab
WHERE TestResultStrTab.idStr = TestResults.idStrName
Note! The caller is expected to fetch children, values, failure
details, and files.
"""
self.sName = None;
self.oParent = None;
self.aoChildren = [];
self.aoValues = [];
self.aoMsgs = [];
self.aoFiles = [];
TestResultData.initFromDbRow(self, aoRow);
self.sName = aoRow[9];
return self;
class TestResultValueData(ModelDataBase):
"""
Test result value data.
"""
ksIdAttr = 'idTestResultValue';
ksParam_idTestResultValue = 'TestResultValue_idTestResultValue';
ksParam_idTestResult = 'TestResultValue_idTestResult';
ksParam_idTestSet = 'TestResultValue_idTestSet';
ksParam_tsCreated = 'TestResultValue_tsCreated';
ksParam_idStrName = 'TestResultValue_idStrName';
ksParam_lValue = 'TestResultValue_lValue';
ksParam_iUnit = 'TestResultValue_iUnit';
def __init__(self):
ModelDataBase.__init__(self)
self.idTestResultValue = None;
self.idTestResult = None;
self.idTestSet = None;
self.tsCreated = None;
self.idStrName = None;
self.lValue = None;
self.iUnit = 0;
def initFromDbRow(self, aoRow):
"""
Reinitialize from a SELECT * FROM TestResultValues.
Return self. Raises exception if no row.
"""
if aoRow is None:
raise TMExceptionBase('Test result value record not found.')
self.idTestResultValue = aoRow[0];
self.idTestResult = aoRow[1];
self.idTestSet = aoRow[2];
self.tsCreated = aoRow[3];
self.idStrName = aoRow[4];
self.lValue = aoRow[5];
self.iUnit = aoRow[6];
return self;
class TestResultValueDataEx(TestResultValueData):
"""
Extends TestResultValue by resolving the value name and unit string.
"""
def __init__(self):
TestResultValueData.__init__(self)
self.sName = None;
self.sUnit = '';
def initFromDbRow(self, aoRow):
"""
Reinitialize from a query like this:
SELECT TestResultValues.*, TestResultStrTab.sValue
FROM TestResultValues, TestResultStrTab
WHERE TestResultStrTab.idStr = TestResultValues.idStrName
Return self. Raises exception if no row.
"""
TestResultValueData.initFromDbRow(self, aoRow);
self.sName = aoRow[7];
if self.iUnit < len(constants.valueunit.g_asNames):
self.sUnit = constants.valueunit.g_asNames[self.iUnit];
else:
self.sUnit = '<%d>' % (self.iUnit,);
return self;
class TestResultMsgData(ModelDataBase):
"""
Test result message data.
"""
ksIdAttr = 'idTestResultMsg';
ksParam_idTestResultMsg = 'TestResultValue_idTestResultMsg';
ksParam_idTestResult = 'TestResultValue_idTestResult';
ksParam_tsCreated = 'TestResultValue_tsCreated';
ksParam_idStrMsg = 'TestResultValue_idStrMsg';
ksParam_enmLevel = 'TestResultValue_enmLevel';
def __init__(self):
ModelDataBase.__init__(self)
self.idTestResultMsg = None;
self.idTestResult = None;
self.tsCreated = None;
self.idStrMsg = None;
self.enmLevel = None;
def initFromDbRow(self, aoRow):
"""
Reinitialize from a SELECT * FROM TestResultMsgs.
Return self. Raises exception if no row.
"""
if aoRow is None:
raise TMExceptionBase('Test result value record not found.')
self.idTestResultMsg = aoRow[0];
self.idTestResult = aoRow[1];
self.tsCreated = aoRow[2];
self.idStrMsg = aoRow[3];
self.enmLevel = aoRow[4];
return self;
class TestResultMsgDataEx(TestResultMsgData):
"""
Extends TestResultMsg by resolving the message string.
"""
def __init__(self):
TestResultMsgData.__init__(self)
self.sMsg = None;
def initFromDbRow(self, aoRow):
"""
Reinitialize from a query like this:
SELECT TestResultMsg.*, TestResultStrTab.sValue
FROM TestResultMsg, TestResultStrTab
WHERE TestResultStrTab.idStr = TestResultMsgs.idStrName
Return self. Raises exception if no row.
"""
TestResultMsgData.initFromDbRow(self, aoRow);
self.sMsg = aoRow[5];
return self;
class TestResultFileData(ModelDataBase):
"""
Test result message data.
"""
ksIdAttr = 'idTestResultFile';
ksParam_idTestResultFile = 'TestResultFile_idTestResultFile';
ksParam_idTestResult = 'TestResultFile_idTestResult';
ksParam_tsCreated = 'TestResultFile_tsCreated';
ksParam_idStrFile = 'TestResultFile_idStrFile';
ksParam_idStrDescription = 'TestResultFile_idStrDescription';
ksParam_idStrKind = 'TestResultFile_idStrKind';
ksParam_idStrMime = 'TestResultFile_idStrMime';
def __init__(self):
ModelDataBase.__init__(self)
self.idTestResultFile = None;
self.idTestResult = None;
self.tsCreated = None;
self.idStrFile = None;
self.idStrDescription = None;
self.idStrKind = None;
self.idStrMime = None;
def initFromDbRow(self, aoRow):
"""
Reinitialize from a SELECT * FROM TestResultFiles.
Return self. Raises exception if no row.
"""
if aoRow is None:
raise TMExceptionBase('Test result file record not found.')
self.idTestResultFile = aoRow[0];
self.idTestResult = aoRow[1];
self.tsCreated = aoRow[2];
self.idStrFile = aoRow[3];
self.idStrDescription = aoRow[4];
self.idStrKind = aoRow[5];
self.idStrMime = aoRow[6];
return self;
class TestResultFileDataEx(TestResultFileData):
"""
Extends TestResultFile by resolving the strings.
"""
def __init__(self):
TestResultFileData.__init__(self)
self.sFile = None;
self.sDescription = None;
self.sKind = None;
self.sMime = None;
def initFromDbRow(self, aoRow):
"""
Reinitialize from a query like this:
SELECT TestResultFiles.*,
StrTabFile.sValue AS sFile,
StrTabDesc.sValue AS sDescription
StrTabKind.sValue AS sKind,
StrTabMime.sValue AS sMime,
FROM ...
Return self. Raises exception if no row.
"""
TestResultFileData.initFromDbRow(self, aoRow);
self.sFile = aoRow[7];
self.sDescription = aoRow[8];
self.sKind = aoRow[9];
self.sMime = aoRow[10];
return self;
def initFakeMainLog(self, oTestSet):
"""
Reinitializes to represent the main.log object (not in DB).
Returns self.
"""
self.idTestResultFile = 0;
self.idTestResult = oTestSet.idTestResult;
self.tsCreated = oTestSet.tsCreated;
self.idStrFile = None;
self.idStrDescription = None;
self.idStrKind = None;
self.idStrMime = None;
self.sFile = 'main.log';
self.sDescription = '';
self.sKind = 'log/main';
self.sMime = 'text/plain';
return self;
def isProbablyUtf8Encoded(self):
"""
Checks if the file is likely to be UTF-8 encoded.
"""
if self.sMime in [ 'text/plain', 'text/html' ]:
return True;
return False;
def getMimeWithEncoding(self):
"""
Gets the MIME type with encoding if likely to be UTF-8.
"""
if self.isProbablyUtf8Encoded():
return '%s; charset=utf-8' % (self.sMime,);
return self.sMime;
class TestResultListingData(ModelDataBase): # pylint: disable=R0902
"""
Test case result data representation for table listing
"""
def __init__(self):
"""Initialize"""
ModelDataBase.__init__(self)
self.idTestSet = None
self.idBuildCategory = None;
self.sProduct = None
self.sRepository = None;
self.sBranch = None
self.sType = None
self.idBuild = None;
self.sVersion = None;
self.iRevision = None
self.sOs = None;
self.sOsVersion = None;
self.sArch = None;
self.sCpuVendor = None;
self.sCpuName = None;
self.cCpus = None;
self.fCpuHwVirt = None;
self.fCpuNestedPaging = None;
self.fCpu64BitGuest = None;
self.idTestBox = None
self.sTestBoxName = None
self.tsCreated = None
self.tsElapsed = None
self.enmStatus = None
self.cErrors = None;
self.idTestCase = None
self.sTestCaseName = None
self.sBaseCmd = None
self.sArgs = None
self.idBuildTestSuite = None;
self.iRevisionTestSuite = None;
def initFromDbRow(self, aoRow):
"""
Reinitialize from a database query.
Return self. Raises exception if no row.
"""
if aoRow is None:
raise TMExceptionBase('Test result record not found.')
self.idTestSet = aoRow[0];
self.idBuildCategory = aoRow[1];
self.sProduct = aoRow[2];
self.sRepository = aoRow[3];
self.sBranch = aoRow[4];
self.sType = aoRow[5];
self.idBuild = aoRow[6];
self.sVersion = aoRow[7];
self.iRevision = aoRow[8];
self.sOs = aoRow[9];
self.sOsVersion = aoRow[10];
self.sArch = aoRow[11];
self.sCpuVendor = aoRow[12];
self.sCpuName = aoRow[13];
self.cCpus = aoRow[14];
self.fCpuHwVirt = aoRow[15];
self.fCpuNestedPaging = aoRow[16];
self.fCpu64BitGuest = aoRow[17];
self.idTestBox = aoRow[18];
self.sTestBoxName = aoRow[19];
self.tsCreated = aoRow[20];
self.tsElapsed = aoRow[21];
self.enmStatus = aoRow[22];
self.cErrors = aoRow[23];
self.idTestCase = aoRow[24];
self.sTestCaseName = aoRow[25];
self.sBaseCmd = aoRow[26];
self.sArgs = aoRow[27];
self.idBuildTestSuite = aoRow[28];
self.iRevisionTestSuite = aoRow[29];
return self
class TestResultHangingOffence(TMExceptionBase):
"""Hanging offence committed by test case."""
pass;
class TestResultLogic(ModelLogicBase): # pylint: disable=R0903
"""
Results grouped by scheduling group.
"""
#
# Result grinding for displaying in the WUI.
#
ksResultsGroupingTypeNone = 'ResultsGroupingTypeNone'
ksResultsGroupingTypeTestGroup = 'ResultsGroupingTypeTestGroup'
ksResultsGroupingTypeBuildRev = 'ResultsGroupingTypeBuild'
ksResultsGroupingTypeTestBox = 'ResultsGroupingTypeTestBox'
ksResultsGroupingTypeTestCase = 'ResultsGroupingTypeTestCase'
ksResultsGroupingTypeSchedGroup = 'ResultsGroupingTypeSchedGroup'
ksBaseTables = 'BuildCategories, Builds, TestBoxes, TestResults, TestCases, TestCaseArgs,\n' \
+ ' TestSets LEFT OUTER JOIN Builds AS TestSuiteBits\n' \
' ON TestSets.idBuildTestSuite = TestSuiteBits.idBuild\n';
ksBasePreCondition = 'TestSets.idTestSet = TestResults.idTestSet\n' \
+ ' AND TestResults.idTestResultParent is NULL\n' \
+ ' AND TestSets.idBuild = Builds.idBuild\n' \
+ ' AND Builds.tsExpire > TestSets.tsCreated\n' \
+ ' AND Builds.tsEffective <= TestSets.tsCreated\n' \
+ ' AND Builds.idBuildCategory = BuildCategories.idBuildCategory\n' \
+ ' AND TestSets.idGenTestBox = TestBoxes.idGenTestBox\n' \
+ ' AND TestSets.idGenTestCase = TestCases.idGenTestCase\n' \
+ ' AND TestSets.idGenTestCaseArgs = TestCaseArgs.idGenTestCaseArgs\n'
kdResultGroupingMap = {
ksResultsGroupingTypeNone: (ksBaseTables,
ksBasePreCondition,),
ksResultsGroupingTypeTestGroup: (ksBaseTables,
ksBasePreCondition + ' AND TestSets.idTestGroup',),
ksResultsGroupingTypeBuildRev: (ksBaseTables,
ksBasePreCondition + ' AND Builds.iRevision',),
ksResultsGroupingTypeTestBox: (ksBaseTables,
ksBasePreCondition + ' AND TestSets.idTestBox',),
ksResultsGroupingTypeTestCase: (ksBaseTables,
ksBasePreCondition + ' AND TestSets.idTestCase',),
ksResultsGroupingTypeSchedGroup: (ksBaseTables,
ksBasePreCondition + ' AND TestBoxes.idSchedGroup',),
}
def _getTimePeriodQueryPart(self, tsNow, sInterval):
"""
Get part of SQL query responsible for SELECT data within
specified period of time.
"""
assert sInterval is not None; # too many rows.
cMonthsMourningPeriod = 2; # Stop reminding everyone about testboxes after 2 months. (May also speed up the query.)
if tsNow is None:
sRet = '(TestSets.tsDone IS NULL OR TestSets.tsDone >= (CURRENT_TIMESTAMP - \'%s\'::interval))\n' \
' AND TestSets.tsCreated >= (CURRENT_TIMESTAMP - \'%s\'::interval - \'%u months\'::interval)\n' \
% (sInterval, sInterval, cMonthsMourningPeriod);
else:
sTsNow = '\'%s\'::TIMESTAMP' % (tsNow,); # It's actually a string already. duh.
sRet = 'TestSets.tsCreated <= %s\n' \
' AND TestSets.tsCreated >= (%s - \'%s\'::interval - \'%u months\'::interval)\n' \
' AND (TestSets.tsDone IS NULL OR TestSets.tsDone >= (%s - \'%s\'::interval))\n' \
% ( sTsNow,
sTsNow, sInterval, cMonthsMourningPeriod,
sTsNow, sInterval );
return sRet
def _getSqlQueryForGroupSearch(self, sWhat, tsNow, sInterval, enmResultsGroupingType, iResultsGroupingValue, fOnlyFailures):
"""
Returns an SQL query that limits SELECT result
in order to satisfy @param enmResultsGroupingType.
"""
if enmResultsGroupingType is None:
raise TMExceptionBase('Unknown grouping type')
if enmResultsGroupingType not in self.kdResultGroupingMap:
raise TMExceptionBase('Unknown grouping type')
# Get SQL query parameters
sTables, sCondition = self.kdResultGroupingMap[enmResultsGroupingType]
# Extend SQL query with time period limitation
sTimePeriodQuery = self._getTimePeriodQueryPart(tsNow, sInterval)
if iResultsGroupingValue is not None:
sCondition += ' = %d' % iResultsGroupingValue + '\n';
sCondition += ' AND ' + sTimePeriodQuery
# Extend the condition with test status limitations if requested.
if fOnlyFailures:
sCondition += '\n AND TestSets.enmStatus != \'success\'::TestStatus_T' \
'\n AND TestSets.enmStatus != \'running\'::TestStatus_T';
# Assemble the query.
sQuery = 'SELECT DISTINCT %s\n' % sWhat
sQuery += 'FROM %s\n' % sTables
sQuery += 'WHERE %s\n' % sCondition
return sQuery
def fetchResultsForListing(self, iStart, cMaxRows, tsNow, sInterval, enmResultsGroupingType, iResultsGroupingValue,
fOnlyFailures):
"""
Fetches TestResults table content.
If @param enmResultsGroupingType and @param iResultsGroupingValue
are not None, then resulting (returned) list contains only records
that match specified @param enmResultsGroupingType.
If @param enmResultsGroupingType is None, then
@param iResultsGroupingValue is ignored.
Returns an array (list) of TestResultData items, empty list if none.
Raises exception on error.
"""
sWhat = 'TestSets.idTestSet,\n' \
' BuildCategories.idBuildCategory,\n' \
' BuildCategories.sProduct,\n' \
' BuildCategories.sRepository,\n' \
' BuildCategories.sBranch,\n' \
' BuildCategories.sType,\n' \
' Builds.idBuild,\n' \
' Builds.sVersion,\n' \
' Builds.iRevision,\n' \
' TestBoxes.sOs,\n' \
' TestBoxes.sOsVersion,\n' \
' TestBoxes.sCpuArch,\n' \
' TestBoxes.sCpuVendor,\n' \
' TestBoxes.sCpuName,\n' \
' TestBoxes.cCpus,\n' \
' TestBoxes.fCpuHwVirt,\n' \
' TestBoxes.fCpuNestedPaging,\n' \
' TestBoxes.fCpu64BitGuest,\n' \
' TestBoxes.idTestBox,\n' \
' TestBoxes.sName,\n' \
' TestResults.tsCreated,\n' \
' COALESCE(TestResults.tsElapsed, CURRENT_TIMESTAMP - TestResults.tsCreated),\n' \
' TestSets.enmStatus,\n' \
' TestResults.cErrors,\n' \
' TestCases.idTestCase,\n' \
' TestCases.sName,\n' \
' TestCases.sBaseCmd,\n' \
' TestCaseArgs.sArgs,\n' \
' TestSuiteBits.idBuild AS idBuildTestSuite,\n' \
' TestSuiteBits.iRevision AS iRevisionTestSuite,\n' \
' (TestSets.tsDone IS NULL) SortRunningFirst' \
;
sSqlQuery = self._getSqlQueryForGroupSearch(sWhat, tsNow, sInterval, enmResultsGroupingType, iResultsGroupingValue,
fOnlyFailures);
sSqlQuery += 'ORDER BY SortRunningFirst DESC, TestSets.idTestSet DESC\n';
sSqlQuery += 'LIMIT %s OFFSET %s\n' % (cMaxRows, iStart,);
self._oDb.execute(sSqlQuery);
aoRows = [];
for aoRow in self._oDb.fetchAll():
aoRows.append(TestResultListingData().initFromDbRow(aoRow))
return aoRows
def getEntriesCount(self, tsNow, sInterval, enmResultsGroupingType, iResultsGroupingValue, fOnlyFailures):
"""
Get number of table records.
If @param enmResultsGroupingType and @param iResultsGroupingValue
are not None, then we count only only those records
that match specified @param enmResultsGroupingType.
If @param enmResultsGroupingType is None, then
@param iResultsGroupingValue is ignored.
"""
sSqlQuery = self._getSqlQueryForGroupSearch('COUNT(TestSets.idTestSet)', tsNow, sInterval,
enmResultsGroupingType, iResultsGroupingValue, fOnlyFailures)
self._oDb.execute(sSqlQuery)
return self._oDb.fetchOne()[0]
def getTestGroups(self, tsNow, sPeriod):
"""
Get list of uniq TestGroupData objects which
found in all test results.
"""
self._oDb.execute('SELECT DISTINCT TestGroups.*\n'
'FROM TestGroups, TestSets\n'
'WHERE TestSets.idTestGroup = TestGroups.idTestGroup\n'
' AND TestGroups.tsExpire > TestSets.tsCreated\n'
' AND TestGroups.tsEffective <= TestSets.tsCreated'
' AND ' + self._getTimePeriodQueryPart(tsNow, sPeriod))
aaoRows = self._oDb.fetchAll()
aoRet = []
for aoRow in aaoRows:
## @todo Need to take time into consideration. Will go belly up if we delete a testgroup.
aoRet.append(TestGroupData().initFromDbRow(aoRow))
return aoRet
def getBuilds(self, tsNow, sPeriod):
"""
Get list of uniq BuildDataEx objects which
found in all test results.
"""
self._oDb.execute('SELECT DISTINCT Builds.*, BuildCategories.*\n'
'FROM Builds, BuildCategories, TestSets\n'
'WHERE TestSets.idBuild = Builds.idBuild\n'
' AND Builds.idBuildCategory = BuildCategories.idBuildCategory\n'
' AND Builds.tsExpire > TestSets.tsCreated\n'
' AND Builds.tsEffective <= TestSets.tsCreated'
' AND ' + self._getTimePeriodQueryPart(tsNow, sPeriod))
aaoRows = self._oDb.fetchAll()
aoRet = []
for aoRow in aaoRows:
aoRet.append(BuildDataEx().initFromDbRow(aoRow))
return aoRet
def getTestBoxes(self, tsNow, sPeriod):
"""
Get list of uniq TestBoxData objects which
found in all test results.
"""
## @todo do all in one query.
self._oDb.execute('SELECT DISTINCT TestBoxes.idTestBox, TestBoxes.idGenTestBox\n'
'FROM TestBoxes, TestSets\n'
'WHERE TestSets.idGenTestBox = TestBoxes.idGenTestBox\n'
' AND ' + self._getTimePeriodQueryPart(tsNow, sPeriod) +
'ORDER BY TestBoxes.idTestBox, TestBoxes.idGenTestBox DESC' );
idPrevTestBox = -1;
asIdGenTestBoxes = [];
for aoRow in self._oDb.fetchAll():
if aoRow[0] != idPrevTestBox:
idPrevTestBox = aoRow[0];
asIdGenTestBoxes.append(str(aoRow[1]));
aoRet = []
if len(asIdGenTestBoxes) > 0:
self._oDb.execute('SELECT *\n'
'FROM TestBoxes\n'
'WHERE idGenTestBox IN (' + ','.join(asIdGenTestBoxes) + ')\n'
'ORDER BY sName');
for aoRow in self._oDb.fetchAll():
aoRet.append(TestBoxData().initFromDbRow(aoRow));
return aoRet
def getTestCases(self, tsNow, sPeriod):
"""
Get a list of unique TestCaseData objects which is appears in the test
specified result period.
"""
self._oDb.execute('SELECT DISTINCT TestCases.idTestCase, TestCases.idGenTestCase, TestSets.tsConfig\n'
'FROM TestCases, TestSets\n'
'WHERE TestSets.idTestCase = TestCases.idTestCase\n'
' AND TestCases.tsExpire > TestSets.tsCreated\n'
' AND TestCases.tsEffective <= TestSets.tsCreated\n'
' AND ' + self._getTimePeriodQueryPart(tsNow, sPeriod) +
'ORDER BY TestCases.idTestCase, TestCases.idGenTestCase DESC\n');
aaoRows = self._oDb.fetchAll()
aoRet = []
idPrevTestCase = -1;
for aoRow in aaoRows:
## @todo reduce subqueries
if aoRow[0] != idPrevTestCase:
idPrevTestCase = aoRow[0];
aoRet.append(TestCaseData().initFromDbWithGenId(self._oDb, aoRow[1], aoRow[2]))
return aoRet
def getSchedGroups(self, tsNow, sPeriod):
"""
Get list of uniq SchedGroupData objects which
found in all test results.
"""
self._oDb.execute('SELECT DISTINCT TestBoxes.idSchedGroup\n'
'FROM TestBoxes, TestSets\n'
'WHERE TestSets.idGenTestBox = TestBoxes.idGenTestBox\n'
' AND TestBoxes.tsExpire > TestSets.tsCreated\n'
' AND TestBoxes.tsEffective <= TestSets.tsCreated'
' AND ' + self._getTimePeriodQueryPart(tsNow, sPeriod))
aiRows = self._oDb.fetchAll()
aoRet = []
for iRow in aiRows:
## @todo reduce subqueries
aoRet.append(SchedGroupData().initFromDbWithId(self._oDb, iRow))
return aoRet
def getById(self, idTestResult):
"""
Get build record by its id
"""
self._oDb.execute('SELECT *\n'
'FROM TestResults\n'
'WHERE idTestResult = %s\n',
(idTestResult,))
aRows = self._oDb.fetchAll()
if len(aRows) not in (0, 1):
raise TMExceptionBase('Found more than one test result with the same credentials. Database structure is corrupted.')
try:
return TestResultData().initFromDbRow(aRows[0])
except IndexError:
return None
#
# Details view and interface.
#
def fetchResultTree(self, idTestSet, cMaxDepth = None):
"""
Fetches the result tree for the given test set.
Returns a tree of TestResultDataEx nodes.
Raises exception on invalid input and database issues.
"""
# Depth first, i.e. just like the XML added them.
## @todo this still isn't performing extremely well, consider optimizations.
sQuery = self._oDb.formatBindArgs(
'SELECT TestResults.*,\n'
' TestResultStrTab.sValue,\n'
' EXISTS ( SELECT idTestResultValue\n'
' FROM TestResultValues\n'
' WHERE TestResultValues.idTestResult = TestResults.idTestResult ) AS fHasValues,\n'
' EXISTS ( SELECT idTestResultMsg\n'
' FROM TestResultMsgs\n'
' WHERE TestResultMsgs.idTestResult = TestResults.idTestResult ) AS fHasMsgs,\n'
' EXISTS ( SELECT idTestResultFile\n'
' FROM TestResultFiles\n'
' WHERE TestResultFiles.idTestResult = TestResults.idTestResult ) AS fHasFiles\n'
'FROM TestResults, TestResultStrTab\n'
'WHERE TestResults.idTestSet = %s\n'
' AND TestResults.idStrName = TestResultStrTab.idStr\n'
, ( idTestSet, ));
if cMaxDepth is not None:
sQuery += self._oDb.formatBindArgs(' AND TestResults.iNestingDepth <= %s\n', (cMaxDepth,));
sQuery += 'ORDER BY idTestResult ASC\n'
self._oDb.execute(sQuery);
cRows = self._oDb.getRowCount();
if cRows > 65536:
raise TMTooManyRows('Too many rows returned for idTestSet=%d: %d' % (idTestSet, cRows,));
aaoRows = self._oDb.fetchAll();
if len(aaoRows) == 0:
raise TMExceptionBase('No test results for idTestSet=%d.' % (idTestSet,));
# Set up the root node first.
aoRow = aaoRows[0];
oRoot = TestResultDataEx().initFromDbRow(aoRow);
if oRoot.idTestResultParent is not None:
raise self._oDb.integrityException('The root TestResult (#%s) has a parent (#%s)!'
% (oRoot.idTestResult, oRoot.idTestResultParent));
self._fetchResultTreeNodeExtras(oRoot, aoRow[-3], aoRow[-2], aoRow[-1]);
# The chilren (if any).
dLookup = { oRoot.idTestResult: oRoot };
oParent = oRoot;
for iRow in range(1, len(aaoRows)):
aoRow = aaoRows[iRow];
oCur = TestResultDataEx().initFromDbRow(aoRow);
self._fetchResultTreeNodeExtras(oCur, aoRow[-3], aoRow[-2], aoRow[-1]);
# Figure out and vet the parent.
if oParent.idTestResult != oCur.idTestResultParent:
oParent = dLookup.get(oCur.idTestResultParent, None);
if oParent is None:
raise self._oDb.integrityException('TestResult #%d is orphaned from its parent #%s.'
% (oCur.idTestResult, oCur.idTestResultParent,));
if oParent.iNestingDepth + 1 != oCur.iNestingDepth:
raise self._oDb.integrityException('TestResult #%d has incorrect nesting depth (%d instead of %d)'
% (oCur.idTestResult, oCur.iNestingDepth, oParent.iNestingDepth + 1,));
# Link it up.
oCur.oParent = oParent;
oParent.aoChildren.append(oCur);
dLookup[oCur.idTestResult] = oCur;
return (oRoot, dLookup);
def _fetchResultTreeNodeExtras(self, oCurNode, fHasValues, fHasMsgs, fHasFiles):
"""
fetchResultTree worker that fetches values, message and files for the
specified node.
"""
assert(oCurNode.aoValues == []);
assert(oCurNode.aoMsgs == []);
assert(oCurNode.aoFiles == []);
if fHasValues:
self._oDb.execute('SELECT TestResultValues.*,\n'
' TestResultStrTab.sValue\n'
'FROM TestResultValues, TestResultStrTab\n'
'WHERE TestResultValues.idTestResult = %s\n'
' AND TestResultValues.idStrName = TestResultStrTab.idStr\n'
'ORDER BY idTestResultValue ASC\n'
, ( oCurNode.idTestResult, ));
for aoRow in self._oDb.fetchAll():
oCurNode.aoValues.append(TestResultValueDataEx().initFromDbRow(aoRow));
if fHasMsgs:
self._oDb.execute('SELECT TestResultMsgs.*,\n'
' TestResultStrTab.sValue\n'
'FROM TestResultMsgs, TestResultStrTab\n'
'WHERE TestResultMsgs.idTestResult = %s\n'
' AND TestResultMsgs.idStrMsg = TestResultStrTab.idStr\n'
'ORDER BY idTestResultMsg ASC\n'
, ( oCurNode.idTestResult, ));
for aoRow in self._oDb.fetchAll():
oCurNode.aoMsgs.append(TestResultMsgDataEx().initFromDbRow(aoRow));
if fHasFiles:
self._oDb.execute('SELECT TestResultFiles.*,\n'
' StrTabFile.sValue AS sFile,\n'
' StrTabDesc.sValue AS sDescription,\n'
' StrTabKind.sValue AS sKind,\n'
' StrTabMime.sValue AS sMime\n'
'FROM TestResultFiles,\n'
' TestResultStrTab AS StrTabFile,\n'
' TestResultStrTab AS StrTabDesc,\n'
' TestResultStrTab AS StrTabKind,\n'
' TestResultStrTab AS StrTabMime\n'
'WHERE TestResultFiles.idTestResult = %s\n'
' AND TestResultFiles.idStrFile = StrTabFile.idStr\n'
' AND TestResultFiles.idStrDescription = StrTabDesc.idStr\n'
' AND TestResultFiles.idStrKind = StrTabKind.idStr\n'
' AND TestResultFiles.idStrMime = StrTabMime.idStr\n'
'ORDER BY idTestResultFile ASC\n'
, ( oCurNode.idTestResult, ));
for aoRow in self._oDb.fetchAll():
oCurNode.aoFiles.append(TestResultFileDataEx().initFromDbRow(aoRow));
return True;
#
# TestBoxController interface(s).
#
def _inhumeTestResults(self, aoStack, idTestSet, sError):
"""
The test produces too much output, kill and bury it.
Note! We leave the test set open, only the test result records are
completed. Thus, _getResultStack will return an empty stack and
cause XML processing to fail immediately, while we can still
record when it actually completed in the test set the normal way.
"""
self._oDb.dprint('** _inhumeTestResults: idTestSet=%d\n%s' % (idTestSet, self._stringifyStack(aoStack),));
#
# First add a message.
#
self._newFailureDetails(aoStack[0].idTestResult, sError, None);
#
# The complete all open test results.
#
for oTestResult in aoStack:
oTestResult.cErrors += 1;
self._completeTestResults(oTestResult, None, TestResultData.ksTestStatus_Failure, oTestResult.cErrors);
# A bit of paranoia.
self._oDb.execute('UPDATE TestResults\n'
'SET cErrors = cErrors + 1,\n'
' enmStatus = \'failure\'::TestStatus_T,\n'
' tsElapsed = CURRENT_TIMESTAMP - tsCreated\n'
'WHERE idTestSet = %s\n'
' AND enmStatus = \'running\'::TestStatus_T\n'
, ( idTestSet, ));
self._oDb.commit();
return None;
def strTabString(self, sString, fCommit = False):
"""
Gets the string table id for the given string, adding it if new.
Note! A copy of this code is also in TestSetLogic.
"""
## @todo move this and make a stored procedure for it.
self._oDb.execute('SELECT idStr\n'
'FROM TestResultStrTab\n'
'WHERE sValue = %s'
, (sString,));
if self._oDb.getRowCount() == 0:
self._oDb.execute('INSERT INTO TestResultStrTab (sValue)\n'
'VALUES (%s)\n'
'RETURNING idStr\n'
, (sString,));
if fCommit:
self._oDb.commit();
return self._oDb.fetchOne()[0];
@staticmethod
def _stringifyStack(aoStack):
"""Returns a string rep of the stack."""
sRet = '';
for i in range(len(aoStack)):
sRet += 'aoStack[%d]=%s\n' % (i, aoStack[i]);
return sRet;
def _getResultStack(self, idTestSet):
"""
Gets the current stack of result sets.
"""
self._oDb.execute('SELECT *\n'
'FROM TestResults\n'
'WHERE idTestSet = %s\n'
' AND enmStatus = \'running\'::TestStatus_T\n'
'ORDER BY idTestResult DESC'
, ( idTestSet, ));
aoStack = [];
for aoRow in self._oDb.fetchAll():
aoStack.append(TestResultData().initFromDbRow(aoRow));
for i in range(len(aoStack)):
assert aoStack[i].iNestingDepth == len(aoStack) - i - 1, self._stringifyStack(aoStack);
return aoStack;
def _newTestResult(self, idTestResultParent, idTestSet, iNestingDepth, tsCreated, sName, dCounts, fCommit = False):
"""
Creates a new test result.
Returns the TestResultData object for the new record.
May raise exception on database error.
"""
assert idTestResultParent is not None;
assert idTestResultParent > 1;
#
# This isn't necessarily very efficient, but it's necessary to prevent
# a wild test or testbox from filling up the database.
#
sCountName = 'cTestResults';
if sCountName not in dCounts:
self._oDb.execute('SELECT COUNT(idTestResult)\n'
'FROM TestResults\n'
'WHERE idTestSet = %s\n'
, ( idTestSet,));
dCounts[sCountName] = self._oDb.fetchOne()[0];
dCounts[sCountName] += 1;
if dCounts[sCountName] > config.g_kcMaxTestResultsPerTS:
raise TestResultHangingOffence('Too many sub-tests in total!');
sCountName = 'cTestResultsIn%d' % (idTestResultParent,);
if sCountName not in dCounts:
self._oDb.execute('SELECT COUNT(idTestResult)\n'
'FROM TestResults\n'
'WHERE idTestResultParent = %s\n'
, ( idTestResultParent,));
dCounts[sCountName] = self._oDb.fetchOne()[0];
dCounts[sCountName] += 1;
if dCounts[sCountName] > config.g_kcMaxTestResultsPerTR:
raise TestResultHangingOffence('Too many immediate sub-tests!');
# This is also a hanging offence.
if iNestingDepth > config.g_kcMaxTestResultDepth:
raise TestResultHangingOffence('To deep sub-test nesting!');
# Ditto.
if len(sName) > config.g_kcchMaxTestResultName:
raise TestResultHangingOffence('Test name is too long: %d chars - "%s"' % (len(sName), sName));
#
# Within bounds, do the job.
#
idStrName = self.strTabString(sName, fCommit);
self._oDb.execute('INSERT INTO TestResults (\n'
' idTestResultParent,\n'
' idTestSet,\n'
' tsCreated,\n'
' idStrName,\n'
' iNestingDepth )\n'
'VALUES (%s, %s, TIMESTAMP WITH TIME ZONE %s, %s, %s)\n'
'RETURNING *\n'
, ( idTestResultParent, idTestSet, tsCreated, idStrName, iNestingDepth) )
oData = TestResultData().initFromDbRow(self._oDb.fetchOne());
self._oDb.maybeCommit(fCommit);
return oData;
def _newTestValue(self, idTestResult, idTestSet, sName, lValue, sUnit, dCounts, tsCreated = None, fCommit = False):
"""
Creates a test value.
May raise exception on database error.
"""
#
# Bounds checking.
#
sCountName = 'cTestValues';
if sCountName not in dCounts:
self._oDb.execute('SELECT COUNT(idTestResultValue)\n'
'FROM TestResultValues, TestResults\n'
'WHERE TestResultValues.idTestResult = TestResults.idTestResult\n'
' AND TestResults.idTestSet = %s\n'
, ( idTestSet,));
dCounts[sCountName] = self._oDb.fetchOne()[0];
dCounts[sCountName] += 1;
if dCounts[sCountName] > config.g_kcMaxTestValuesPerTS:
raise TestResultHangingOffence('Too many values in total!');
sCountName = 'cTestValuesIn%d' % (idTestResult,);
if sCountName not in dCounts:
self._oDb.execute('SELECT COUNT(idTestResultValue)\n'
'FROM TestResultValues\n'
'WHERE idTestResult = %s\n'
, ( idTestResult,));
dCounts[sCountName] = self._oDb.fetchOne()[0];
dCounts[sCountName] += 1;
if dCounts[sCountName] > config.g_kcMaxTestValuesPerTR:
raise TestResultHangingOffence('Too many immediate values for one test result!');
if len(sName) > config.g_kcchMaxTestValueName:
raise TestResultHangingOffence('Value name is too long: %d chars - "%s"' % (len(sName), sName));
#
# Do the job.
#
iUnit = constants.valueunit.g_kdNameToConst.get(sUnit, constants.valueunit.NONE);
idStrName = self.strTabString(sName, fCommit);
if tsCreated is None:
self._oDb.execute('INSERT INTO TestResultValues (\n'
' idTestResult,\n'
' idTestSet,\n'
' idStrName,\n'
' lValue,\n'
' iUnit)\n'
'VALUES ( %s, %s, %s, %s, %s )\n'
, ( idTestResult, idTestSet, idStrName, lValue, iUnit,) );
else:
self._oDb.execute('INSERT INTO TestResultValues (\n'
' idTestResult,\n'
' idTestSet,\n'
' tsCreated,\n'
' idStrName,\n'
' lValue,\n'
' iUnit)\n'
'VALUES ( %s, %s, TIMESTAMP WITH TIME ZONE %s, %s, %s, %s )\n'
, ( idTestResult, idTestSet, tsCreated, idStrName, lValue, iUnit,) );
self._oDb.maybeCommit(fCommit);
return True;
def _newFailureDetails(self, idTestResult, sText, dCounts, tsCreated = None, fCommit = False):
"""
Creates a record detailing cause of failure.
May raise exception on database error.
"""
#
# Overflow protection.
#
if dCounts is not None:
sCountName = 'cTestMsgsIn%d' % (idTestResult,);
if sCountName not in dCounts:
self._oDb.execute('SELECT COUNT(idTestResultMsg)\n'
'FROM TestResultMsgs\n'
'WHERE idTestResult = %s\n'
, ( idTestResult,));
dCounts[sCountName] = self._oDb.fetchOne()[0];
dCounts[sCountName] += 1;
if dCounts[sCountName] > config.g_kcMaxTestMsgsPerTR:
raise TestResultHangingOffence('Too many messages under for one test result!');
if len(sText) > config.g_kcchMaxTestMsg:
raise TestResultHangingOffence('Failure details message is too long: %d chars - "%s"' % (len(sText), sText));
#
# Do the job.
#
idStrMsg = self.strTabString(sText, fCommit);
if tsCreated is None:
self._oDb.execute('INSERT INTO TestResultMsgs (\n'
' idTestResult,\n'
' idStrMsg,\n'
' enmLevel)\n'
'VALUES ( %s, %s, %s)\n'
, ( idTestResult, idStrMsg, 'failure',) );
else:
self._oDb.execute('INSERT INTO TestResultMsgs (\n'
' idTestResult,\n'
' tsCreated,\n'
' idStrMsg,\n'
' enmLevel)\n'
'VALUES ( %s, TIMESTAMP WITH TIME ZONE %s, %s, %s)\n'
, ( idTestResult, tsCreated, idStrMsg, 'failure',) );
self._oDb.maybeCommit(fCommit);
return True;
def _completeTestResults(self, oTestResult, tsDone, enmStatus, cErrors = 0, fCommit = False):
"""
Completes a test result. Updates the oTestResult object.
May raise exception on database error.
"""
self._oDb.dprint('** _completeTestResults: cErrors=%s tsDone=%s enmStatus=%s oTestResults=\n%s'
% (cErrors, tsDone, enmStatus, oTestResult,));
#
# Sanity check: No open sub tests (aoStack should make sure about this!).
#
self._oDb.execute('SELECT COUNT(idTestResult)\n'
'FROM TestResults\n'
'WHERE idTestResultParent = %s\n'
' AND enmStatus = %s\n'
, ( oTestResult.idTestResult, TestResultData.ksTestStatus_Running,));
cOpenSubTest = self._oDb.fetchOne()[0];
assert cOpenSubTest == 0, 'cOpenSubTest=%d - %s' % (cOpenSubTest, oTestResult,);
assert oTestResult.enmStatus == TestResultData.ksTestStatus_Running;
#
# Make sure the reporter isn't lying about successes or error counts.
#
self._oDb.execute('SELECT COALESCE(SUM(cErrors), 0)\n'
'FROM TestResults\n'
'WHERE idTestResultParent = %s\n'
, ( oTestResult.idTestResult, ));
cMinErrors = self._oDb.fetchOne()[0] + oTestResult.cErrors;
if cErrors < cMinErrors:
cErrors = cMinErrors;
if cErrors > 0 and enmStatus == TestResultData.ksTestStatus_Success:
enmStatus = TestResultData.ksTestStatus_Failure
#
# Do the update.
#
if tsDone is None:
self._oDb.execute('UPDATE TestResults\n'
'SET cErrors = %s,\n'
' enmStatus = %s,\n'
' tsElapsed = CURRENT_TIMESTAMP - tsCreated\n'
'WHERE idTestResult = %s\n'
'RETURNING tsElapsed'
, ( cErrors, enmStatus, oTestResult.idTestResult,) );
else:
self._oDb.execute('UPDATE TestResults\n'
'SET cErrors = %s,\n'
' enmStatus = %s,\n'
' tsElapsed = TIMESTAMP WITH TIME ZONE %s - tsCreated\n'
'WHERE idTestResult = %s\n'
'RETURNING tsElapsed'
, ( cErrors, enmStatus, tsDone, oTestResult.idTestResult,) );
oTestResult.tsElapsed = self._oDb.fetchOne()[0];
oTestResult.enmStatus = enmStatus;
oTestResult.cErrors = cErrors;
self._oDb.maybeCommit(fCommit);
return None;
def _doPopHint(self, aoStack, cStackEntries, dCounts):
""" Executes a PopHint. """
assert cStackEntries >= 0;
while len(aoStack) > cStackEntries:
if aoStack[0].enmStatus == TestResultData.ksTestStatus_Running:
self._newFailureDetails(aoStack[0].idTestResult, 'XML error: Missing </Test>', dCounts);
self._completeTestResults(aoStack[0], tsDone = None, cErrors = 1,
enmStatus = TestResultData.ksTestStatus_Failure, fCommit = True);
aoStack.pop(0);
return True;
@staticmethod
def _validateElement(sName, dAttribs, fClosed):
"""
Validates an element and its attributes.
"""
#
# Validate attributes by name.
#
# Validate integer attributes.
for sAttr in [ 'errors', 'testdepth' ]:
if sAttr in dAttribs:
try:
_ = int(dAttribs[sAttr]);
except:
return 'Element %s has an invalid %s attribute value: %s.' % (sName, sAttr, dAttribs[sAttr],);
# Validate long attributes.
for sAttr in [ 'value', ]:
if sAttr in dAttribs:
try:
_ = long(dAttribs[sAttr]);
except:
return 'Element %s has an invalid %s attribute value: %s.' % (sName, sAttr, dAttribs[sAttr],);
# Validate string attributes.
for sAttr in [ 'name', 'unit', 'text' ]:
if sAttr in dAttribs and len(dAttribs[sAttr]) == 0:
return 'Element %s has an empty %s attribute value.' % (sName, sAttr,);
# Validate the timestamp attribute.
if 'timestamp' in dAttribs:
(dAttribs['timestamp'], sError) = ModelDataBase.validateTs(dAttribs['timestamp'], fAllowNull = False);
if sError is not None:
return 'Element %s has an invalid timestamp ("%s"): %s' % (sName, dAttribs['timestamp'], sError,);
#
# Check that attributes that are required are present.
# We ignore extra attributes.
#
dElementAttribs = \
{
'Test': [ 'timestamp', 'name', ],
'Value': [ 'timestamp', 'name', 'unit', 'value', ],
'FailureDetails': [ 'timestamp', 'text', ],
'Passed': [ 'timestamp', ],
'Skipped': [ 'timestamp', ],
'Failed': [ 'timestamp', 'errors', ],
'TimedOut': [ 'timestamp', 'errors', ],
'End': [ 'timestamp', ],
'PushHint': [ 'testdepth', ],
'PopHint': [ 'testdepth', ],
};
if sName not in dElementAttribs:
return 'Unknown element "%s".' % (sName,);
for sAttr in dElementAttribs[sName]:
if sAttr not in dAttribs:
return 'Element %s requires attribute "%s".' % (sName, sAttr);
#
# Only the Test element can (and must) remain open.
#
if sName == 'Test' and fClosed:
return '<Test/> is not allowed.';
if sName != 'Test' and not fClosed:
return 'All elements except <Test> must be closed.';
return None;
@staticmethod
def _parseElement(sElement):
"""
Parses an element.
"""
#
# Element level bits.
#
sName = sElement.split()[0];
sElement = sElement[len(sName):];
fClosed = sElement[-1] == '/';
if fClosed:
sElement = sElement[:-1];
#
# Attributes.
#
sError = None;
dAttribs = {};
sElement = sElement.strip();
while len(sElement) > 0:
# Extract attribute name.
off = sElement.find('=');
if off < 0 or not sElement[:off].isalnum():
sError = 'Attributes shall have alpha numberical names and have values.';
break;
sAttr = sElement[:off];
# Extract attribute value.
if off + 2 >= len(sElement) or sElement[off + 1] != '"':
sError = 'Attribute (%s) value is missing or not in double quotes.' % (sAttr,);
break;
off += 2;
offEndQuote = sElement.find('"', off);
if offEndQuote < 0:
sError = 'Attribute (%s) value is missing end quotation mark.' % (sAttr,);
break;
sValue = sElement[off:offEndQuote];
# Check for duplicates.
if sAttr in dAttribs:
sError = 'Attribute "%s" appears more than once.' % (sAttr,);
break;
# Unescape the value.
sValue = sValue.replace('<', '<');
sValue = sValue.replace('>', '>');
sValue = sValue.replace(''', '\'');
sValue = sValue.replace('"', '"');
sValue = sValue.replace('
', '\n');
sValue = sValue.replace('
', '\r');
sValue = sValue.replace('&', '&'); # last
# Done.
dAttribs[sAttr] = sValue;
# advance
sElement = sElement[offEndQuote + 1:];
sElement = sElement.lstrip();
#
# Validate the element before we return.
#
if sError is None:
sError = TestResultLogic._validateElement(sName, dAttribs, fClosed);
return (sName, dAttribs, sError)
def _handleElement(self, sName, dAttribs, idTestSet, aoStack, aaiHints, dCounts):
"""
Worker for processXmlStream that handles one element.
Returns None on success, error string on bad XML or similar.
Raises exception on hanging offence and on database error.
"""
if sName == 'Test':
iNestingDepth = aoStack[0].iNestingDepth + 1 if len(aoStack) > 0 else 0;
aoStack.insert(0, self._newTestResult(idTestResultParent = aoStack[0].idTestResult, idTestSet = idTestSet,
tsCreated = dAttribs['timestamp'], sName = dAttribs['name'],
iNestingDepth = iNestingDepth, dCounts = dCounts, fCommit = True) );
elif sName == 'Value':
self._newTestValue(idTestResult = aoStack[0].idTestResult, idTestSet = idTestSet, tsCreated = dAttribs['timestamp'],
sName = dAttribs['name'], sUnit = dAttribs['unit'], lValue = long(dAttribs['value']),
dCounts = dCounts, fCommit = True);
elif sName == 'FailureDetails':
self._newFailureDetails(idTestResult = aoStack[0].idTestResult, tsCreated = dAttribs['timestamp'],
sText = dAttribs['text'], dCounts = dCounts, fCommit = True);
elif sName == 'Passed':
self._completeTestResults(aoStack[0], tsDone = dAttribs['timestamp'],
enmStatus = TestResultData.ksTestStatus_Success, fCommit = True);
elif sName == 'Skipped':
self._completeTestResults(aoStack[0], tsDone = dAttribs['timestamp'],
enmStatus = TestResultData.ksTestStatus_Skipped, fCommit = True);
elif sName == 'Failed':
self._completeTestResults(aoStack[0], tsDone = dAttribs['timestamp'], cErrors = int(dAttribs['errors']),
enmStatus = TestResultData.ksTestStatus_Failure, fCommit = True);
elif sName == 'TimedOut':
self._completeTestResults(aoStack[0], tsDone = dAttribs['timestamp'], cErrors = int(dAttribs['errors']),
enmStatus = TestResultData.ksTestStatus_TimedOut, fCommit = True);
elif sName == 'End':
self._completeTestResults(aoStack[0], tsDone = dAttribs['timestamp'],
cErrors = int(dAttribs.get('errors', '1')),
enmStatus = TestResultData.ksTestStatus_Success, fCommit = True);
elif sName == 'PushHint':
if len(aaiHints) > 1:
return 'PushHint cannot be nested.'
aaiHints.insert(0, [len(aoStack), int(dAttribs['testdepth'])]);
elif sName == 'PopHint':
if len(aaiHints) < 1:
return 'No hint to pop.'
iDesiredTestDepth = int(dAttribs['testdepth']);
cStackEntries, iTestDepth = aaiHints.pop(0);
self._doPopHint(aoStack, cStackEntries, dCounts); # Fake the necessary '<End/></Test>' tags.
if iDesiredTestDepth != iTestDepth:
return 'PopHint tag has different testdepth: %d, on stack %d.' % (iDesiredTestDepth, iTestDepth);
else:
return 'Unexpected element "%s".' % (sName,);
return None;
def processXmlStream(self, sXml, idTestSet):
"""
Processes the "XML" stream section given in sXml.
The sXml isn't a complete XML document, even should we save up all sXml
for a given set, they may not form a complete and well formed XML
document since the test may be aborted, abend or simply be buggy. We
therefore do our own parsing and treat the XML tags as commands more
than anything else.
Returns (sError, fUnforgivable), where sError is None on success.
May raise database exception.
"""
aoStack = self._getResultStack(idTestSet); # [0] == top; [-1] == bottom.
if len(aoStack) == 0:
return ('No open results', True);
self._oDb.dprint('** processXmlStream len(aoStack)=%s' % (len(aoStack),));
#self._oDb.dprint('processXmlStream: %s' % (self._stringifyStack(aoStack),));
#self._oDb.dprint('processXmlStream: sXml=%s' % (sXml,));
dCounts = {};
aaiHints = [];
sError = None;
fExpectCloseTest = False;
sXml = sXml.strip();
while len(sXml) > 0:
if sXml.startswith('</Test>'): # Only closing tag.
offNext = len('</Test>');
if len(aoStack) <= 1:
sError = 'Trying to close the top test results.'
break;
# ASSUMES that we've just seen an <End/>, <Passed/>, <Failed/>,
# <TimedOut/> or <Skipped/> tag earlier in this call!
if aoStack[0].enmStatus == TestResultData.ksTestStatus_Running or not fExpectCloseTest:
sError = 'Missing <End/>, <Passed/>, <Failed/>, <TimedOut/> or <Skipped/> tag.';
break;
aoStack.pop(0);
fExpectCloseTest = False;
elif fExpectCloseTest:
sError = 'Expected </Test>.'
break;
elif sXml.startswith('<?xml '): # Ignore (included files).
offNext = sXml.find('?>');
if offNext < 0:
sError = 'Unterminated <?xml ?> element.';
break;
offNext += 2;
elif sXml[0] == '<':
# Parse and check the tag.
if not sXml[1].isalpha():
sError = 'Malformed element.';
break;
offNext = sXml.find('>')
if offNext < 0:
sError = 'Unterminated element.';
break;
(sName, dAttribs, sError) = self._parseElement(sXml[1:offNext]);
offNext += 1;
if sError is not None:
break;
# Handle it.
try:
sError = self._handleElement(sName, dAttribs, idTestSet, aoStack, aaiHints, dCounts);
except TestResultHangingOffence as oXcpt:
self._inhumeTestResults(aoStack, idTestSet, str(oXcpt));
return (str(oXcpt), True);
fExpectCloseTest = sName in [ 'End', 'Passed', 'Failed', 'TimedOut', 'Skipped', ];
else:
sError = 'Unexpected content.';
break;
# Advance.
sXml = sXml[offNext:];
sXml = sXml.lstrip();
#
# Post processing checks.
#
if sError is None and fExpectCloseTest:
sError = 'Expected </Test> before the end of the XML section.'
elif sError is None and len(aaiHints) > 0:
sError = 'Expected </PopHint> before the end of the XML section.'
if len(aaiHints) > 0:
self._doPopHint(aoStack, aaiHints[-1][0], dCounts);
#
# Log the error.
#
if sError is not None:
SystemLogLogic(self._oDb).addEntry(SystemLogData.ksEvent_XmlResultMalformed,
'idTestSet=%s idTestResult=%s XML="%s" %s'
% ( idTestSet,
aoStack[0].idTestResult if len(aoStack) > 0 else -1,
sXml[:30 if len(sXml) >= 30 else len(sXml)],
sError, ),
cHoursRepeat = 6, fCommit = True);
return (sError, False);
#
# Unit testing.
#
# pylint: disable=C0111
class TestResultDataTestCase(ModelDataBaseTestCase):
def setUp(self):
self.aoSamples = [TestResultData(),];
class TestResultValueDataTestCase(ModelDataBaseTestCase):
def setUp(self):
self.aoSamples = [TestResultValueData(),];
if __name__ == '__main__':
unittest.main();
# not reached.
|
grpc/grpc
|
refs/heads/master
|
src/python/grpcio_tests/tests/unit/test_common.py
|
13
|
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common code used throughout tests of gRPC."""
import collections
import threading
from concurrent import futures
import grpc
import six
INVOCATION_INITIAL_METADATA = (
('0', 'abc'),
('1', 'def'),
('2', 'ghi'),
)
SERVICE_INITIAL_METADATA = (
('3', 'jkl'),
('4', 'mno'),
('5', 'pqr'),
)
SERVICE_TERMINAL_METADATA = (
('6', 'stu'),
('7', 'vwx'),
('8', 'yza'),
)
DETAILS = 'test details'
def metadata_transmitted(original_metadata, transmitted_metadata):
"""Judges whether or not metadata was acceptably transmitted.
gRPC is allowed to insert key-value pairs into the metadata values given by
applications and to reorder key-value pairs with different keys but it is not
allowed to alter existing key-value pairs or to reorder key-value pairs with
the same key.
Args:
original_metadata: A metadata value used in a test of gRPC. An iterable over
iterables of length 2.
transmitted_metadata: A metadata value corresponding to original_metadata
after having been transmitted via gRPC. An iterable over iterables of
length 2.
Returns:
A boolean indicating whether transmitted_metadata accurately reflects
original_metadata after having been transmitted via gRPC.
"""
original = collections.defaultdict(list)
for key, value in original_metadata:
original[key].append(value)
transmitted = collections.defaultdict(list)
for key, value in transmitted_metadata:
transmitted[key].append(value)
for key, values in six.iteritems(original):
transmitted_values = transmitted[key]
transmitted_iterator = iter(transmitted_values)
try:
for value in values:
while True:
transmitted_value = next(transmitted_iterator)
if value == transmitted_value:
break
except StopIteration:
return False
else:
return True
def test_secure_channel(target, channel_credentials, server_host_override):
"""Creates an insecure Channel to a remote host.
Args:
host: The name of the remote host to which to connect.
port: The port of the remote host to which to connect.
channel_credentials: The implementations.ChannelCredentials with which to
connect.
server_host_override: The target name used for SSL host name checking.
Returns:
An implementations.Channel to the remote host through which RPCs may be
conducted.
"""
channel = grpc.secure_channel(target, channel_credentials, ((
'grpc.ssl_target_name_override',
server_host_override,
),))
return channel
def test_server(max_workers=10, reuse_port=False):
"""Creates an insecure grpc server.
These servers have SO_REUSEPORT disabled to prevent cross-talk.
"""
return grpc.server(futures.ThreadPoolExecutor(max_workers=max_workers),
options=(('grpc.so_reuseport', int(reuse_port)),))
class WaitGroup(object):
def __init__(self, n=0):
self.count = n
self.cv = threading.Condition()
def add(self, n):
self.cv.acquire()
self.count += n
self.cv.release()
def done(self):
self.cv.acquire()
self.count -= 1
if self.count == 0:
self.cv.notify_all()
self.cv.release()
def wait(self):
self.cv.acquire()
while self.count > 0:
self.cv.wait()
self.cv.release()
|
googleapis/python-automl
|
refs/heads/master
|
google/cloud/automl_v1beta1/services/auto_ml/async_client.py
|
1
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import Dict, Sequence, Tuple, Type, Union
import pkg_resources
import google.api_core.client_options as ClientOptions # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.automl_v1beta1.services.auto_ml import pagers
from google.cloud.automl_v1beta1.types import annotation_spec
from google.cloud.automl_v1beta1.types import classification
from google.cloud.automl_v1beta1.types import column_spec
from google.cloud.automl_v1beta1.types import column_spec as gca_column_spec
from google.cloud.automl_v1beta1.types import data_stats
from google.cloud.automl_v1beta1.types import data_types
from google.cloud.automl_v1beta1.types import dataset
from google.cloud.automl_v1beta1.types import dataset as gca_dataset
from google.cloud.automl_v1beta1.types import detection
from google.cloud.automl_v1beta1.types import image
from google.cloud.automl_v1beta1.types import io
from google.cloud.automl_v1beta1.types import model
from google.cloud.automl_v1beta1.types import model as gca_model
from google.cloud.automl_v1beta1.types import model_evaluation
from google.cloud.automl_v1beta1.types import operations
from google.cloud.automl_v1beta1.types import regression
from google.cloud.automl_v1beta1.types import service
from google.cloud.automl_v1beta1.types import table_spec
from google.cloud.automl_v1beta1.types import table_spec as gca_table_spec
from google.cloud.automl_v1beta1.types import tables
from google.cloud.automl_v1beta1.types import text
from google.cloud.automl_v1beta1.types import text_extraction
from google.cloud.automl_v1beta1.types import text_sentiment
from google.cloud.automl_v1beta1.types import translation
from google.cloud.automl_v1beta1.types import video
from google.protobuf import empty_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import AutoMlTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import AutoMlGrpcAsyncIOTransport
from .client import AutoMlClient
class AutoMlAsyncClient:
"""AutoML Server API.
The resource names are assigned by the server. The server never
reuses names that it has created after the resources with those
names are deleted.
An ID of a resource is the last element of the item's resource name.
For
``projects/{project_id}/locations/{location_id}/datasets/{dataset_id}``,
then the id for the item is ``{dataset_id}``.
Currently the only supported ``location_id`` is "us-central1".
On any input that is documented to expect a string parameter in
snake_case or kebab-case, either of those cases is accepted.
"""
_client: AutoMlClient
DEFAULT_ENDPOINT = AutoMlClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = AutoMlClient.DEFAULT_MTLS_ENDPOINT
annotation_spec_path = staticmethod(AutoMlClient.annotation_spec_path)
parse_annotation_spec_path = staticmethod(AutoMlClient.parse_annotation_spec_path)
column_spec_path = staticmethod(AutoMlClient.column_spec_path)
parse_column_spec_path = staticmethod(AutoMlClient.parse_column_spec_path)
dataset_path = staticmethod(AutoMlClient.dataset_path)
parse_dataset_path = staticmethod(AutoMlClient.parse_dataset_path)
model_path = staticmethod(AutoMlClient.model_path)
parse_model_path = staticmethod(AutoMlClient.parse_model_path)
model_evaluation_path = staticmethod(AutoMlClient.model_evaluation_path)
parse_model_evaluation_path = staticmethod(AutoMlClient.parse_model_evaluation_path)
table_spec_path = staticmethod(AutoMlClient.table_spec_path)
parse_table_spec_path = staticmethod(AutoMlClient.parse_table_spec_path)
common_billing_account_path = staticmethod(AutoMlClient.common_billing_account_path)
parse_common_billing_account_path = staticmethod(
AutoMlClient.parse_common_billing_account_path
)
common_folder_path = staticmethod(AutoMlClient.common_folder_path)
parse_common_folder_path = staticmethod(AutoMlClient.parse_common_folder_path)
common_organization_path = staticmethod(AutoMlClient.common_organization_path)
parse_common_organization_path = staticmethod(
AutoMlClient.parse_common_organization_path
)
common_project_path = staticmethod(AutoMlClient.common_project_path)
parse_common_project_path = staticmethod(AutoMlClient.parse_common_project_path)
common_location_path = staticmethod(AutoMlClient.common_location_path)
parse_common_location_path = staticmethod(AutoMlClient.parse_common_location_path)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AutoMlAsyncClient: The constructed client.
"""
return AutoMlClient.from_service_account_info.__func__(AutoMlAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AutoMlAsyncClient: The constructed client.
"""
return AutoMlClient.from_service_account_file.__func__(AutoMlAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@property
def transport(self) -> AutoMlTransport:
"""Returns the transport used by the client instance.
Returns:
AutoMlTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(
type(AutoMlClient).get_transport_class, type(AutoMlClient)
)
def __init__(
self,
*,
credentials: ga_credentials.Credentials = None,
transport: Union[str, AutoMlTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the auto ml client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.AutoMlTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = AutoMlClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def create_dataset(
self,
request: service.CreateDatasetRequest = None,
*,
parent: str = None,
dataset: gca_dataset.Dataset = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gca_dataset.Dataset:
r"""Creates a dataset.
Args:
request (:class:`google.cloud.automl_v1beta1.types.CreateDatasetRequest`):
The request object. Request message for
[AutoMl.CreateDataset][google.cloud.automl.v1beta1.AutoMl.CreateDataset].
parent (:class:`str`):
Required. The resource name of the
project to create the dataset for.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
dataset (:class:`google.cloud.automl_v1beta1.types.Dataset`):
Required. The dataset to create.
This corresponds to the ``dataset`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.automl_v1beta1.types.Dataset:
A workspace for solving a single,
particular machine learning (ML)
problem. A workspace contains examples
that may be annotated.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, dataset])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = service.CreateDatasetRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if dataset is not None:
request.dataset = dataset
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_dataset,
default_timeout=5.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def get_dataset(
self,
request: service.GetDatasetRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> dataset.Dataset:
r"""Gets a dataset.
Args:
request (:class:`google.cloud.automl_v1beta1.types.GetDatasetRequest`):
The request object. Request message for
[AutoMl.GetDataset][google.cloud.automl.v1beta1.AutoMl.GetDataset].
name (:class:`str`):
Required. The resource name of the
dataset to retrieve.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.automl_v1beta1.types.Dataset:
A workspace for solving a single,
particular machine learning (ML)
problem. A workspace contains examples
that may be annotated.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = service.GetDatasetRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_dataset,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=5.0,
),
default_timeout=5.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def list_datasets(
self,
request: service.ListDatasetsRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListDatasetsAsyncPager:
r"""Lists datasets in a project.
Args:
request (:class:`google.cloud.automl_v1beta1.types.ListDatasetsRequest`):
The request object. Request message for
[AutoMl.ListDatasets][google.cloud.automl.v1beta1.AutoMl.ListDatasets].
parent (:class:`str`):
Required. The resource name of the
project from which to list datasets.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.automl_v1beta1.services.auto_ml.pagers.ListDatasetsAsyncPager:
Response message for
[AutoMl.ListDatasets][google.cloud.automl.v1beta1.AutoMl.ListDatasets].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = service.ListDatasetsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_datasets,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=5.0,
),
default_timeout=5.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListDatasetsAsyncPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
async def update_dataset(
self,
request: service.UpdateDatasetRequest = None,
*,
dataset: gca_dataset.Dataset = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gca_dataset.Dataset:
r"""Updates a dataset.
Args:
request (:class:`google.cloud.automl_v1beta1.types.UpdateDatasetRequest`):
The request object. Request message for
[AutoMl.UpdateDataset][google.cloud.automl.v1beta1.AutoMl.UpdateDataset]
dataset (:class:`google.cloud.automl_v1beta1.types.Dataset`):
Required. The dataset which replaces
the resource on the server.
This corresponds to the ``dataset`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.automl_v1beta1.types.Dataset:
A workspace for solving a single,
particular machine learning (ML)
problem. A workspace contains examples
that may be annotated.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([dataset])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = service.UpdateDatasetRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if dataset is not None:
request.dataset = dataset
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.update_dataset,
default_timeout=5.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("dataset.name", request.dataset.name),)
),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def delete_dataset(
self,
request: service.DeleteDatasetRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Deletes a dataset and all of its contents. Returns empty
response in the
[response][google.longrunning.Operation.response] field when it
completes, and ``delete_details`` in the
[metadata][google.longrunning.Operation.metadata] field.
Args:
request (:class:`google.cloud.automl_v1beta1.types.DeleteDatasetRequest`):
The request object. Request message for
[AutoMl.DeleteDataset][google.cloud.automl.v1beta1.AutoMl.DeleteDataset].
name (:class:`str`):
Required. The resource name of the
dataset to delete.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = service.DeleteDatasetRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_dataset,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=5.0,
),
default_timeout=5.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
empty_pb2.Empty,
metadata_type=operations.OperationMetadata,
)
# Done; return the response.
return response
async def import_data(
self,
request: service.ImportDataRequest = None,
*,
name: str = None,
input_config: io.InputConfig = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Imports data into a dataset. For Tables this method can only be
called on an empty Dataset.
For Tables:
- A
[schema_inference_version][google.cloud.automl.v1beta1.InputConfig.params]
parameter must be explicitly set. Returns an empty response
in the [response][google.longrunning.Operation.response]
field when it completes.
Args:
request (:class:`google.cloud.automl_v1beta1.types.ImportDataRequest`):
The request object. Request message for
[AutoMl.ImportData][google.cloud.automl.v1beta1.AutoMl.ImportData].
name (:class:`str`):
Required. Dataset name. Dataset must
already exist. All imported annotations
and examples will be added.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
input_config (:class:`google.cloud.automl_v1beta1.types.InputConfig`):
Required. The desired input location
and its domain specific semantics, if
any.
This corresponds to the ``input_config`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name, input_config])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = service.ImportDataRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
if input_config is not None:
request.input_config = input_config
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.import_data,
default_timeout=5.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
empty_pb2.Empty,
metadata_type=operations.OperationMetadata,
)
# Done; return the response.
return response
async def export_data(
self,
request: service.ExportDataRequest = None,
*,
name: str = None,
output_config: io.OutputConfig = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Exports dataset's data to the provided output location. Returns
an empty response in the
[response][google.longrunning.Operation.response] field when it
completes.
Args:
request (:class:`google.cloud.automl_v1beta1.types.ExportDataRequest`):
The request object. Request message for
[AutoMl.ExportData][google.cloud.automl.v1beta1.AutoMl.ExportData].
name (:class:`str`):
Required. The resource name of the
dataset.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
output_config (:class:`google.cloud.automl_v1beta1.types.OutputConfig`):
Required. The desired output
location.
This corresponds to the ``output_config`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name, output_config])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = service.ExportDataRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
if output_config is not None:
request.output_config = output_config
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.export_data,
default_timeout=5.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
empty_pb2.Empty,
metadata_type=operations.OperationMetadata,
)
# Done; return the response.
return response
async def get_annotation_spec(
self,
request: service.GetAnnotationSpecRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> annotation_spec.AnnotationSpec:
r"""Gets an annotation spec.
Args:
request (:class:`google.cloud.automl_v1beta1.types.GetAnnotationSpecRequest`):
The request object. Request message for
[AutoMl.GetAnnotationSpec][google.cloud.automl.v1beta1.AutoMl.GetAnnotationSpec].
name (:class:`str`):
Required. The resource name of the
annotation spec to retrieve.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.automl_v1beta1.types.AnnotationSpec:
A definition of an annotation spec.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = service.GetAnnotationSpecRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_annotation_spec,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=5.0,
),
default_timeout=5.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def get_table_spec(
self,
request: service.GetTableSpecRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> table_spec.TableSpec:
r"""Gets a table spec.
Args:
request (:class:`google.cloud.automl_v1beta1.types.GetTableSpecRequest`):
The request object. Request message for
[AutoMl.GetTableSpec][google.cloud.automl.v1beta1.AutoMl.GetTableSpec].
name (:class:`str`):
Required. The resource name of the
table spec to retrieve.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.automl_v1beta1.types.TableSpec:
A specification of a relational table.
The table's schema is represented via its child
column specs. It is pre-populated as part of
ImportData by schema inference algorithm, the version
of which is a required parameter of ImportData
InputConfig. Note: While working with a table, at
times the schema may be inconsistent with the data in
the table (e.g. string in a FLOAT64 column). The
consistency validation is done upon creation of a
model. Used by: \* Tables
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = service.GetTableSpecRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_table_spec,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=5.0,
),
default_timeout=5.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def list_table_specs(
self,
request: service.ListTableSpecsRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListTableSpecsAsyncPager:
r"""Lists table specs in a dataset.
Args:
request (:class:`google.cloud.automl_v1beta1.types.ListTableSpecsRequest`):
The request object. Request message for
[AutoMl.ListTableSpecs][google.cloud.automl.v1beta1.AutoMl.ListTableSpecs].
parent (:class:`str`):
Required. The resource name of the
dataset to list table specs from.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.automl_v1beta1.services.auto_ml.pagers.ListTableSpecsAsyncPager:
Response message for
[AutoMl.ListTableSpecs][google.cloud.automl.v1beta1.AutoMl.ListTableSpecs].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = service.ListTableSpecsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_table_specs,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=5.0,
),
default_timeout=5.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListTableSpecsAsyncPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
async def update_table_spec(
self,
request: service.UpdateTableSpecRequest = None,
*,
table_spec: gca_table_spec.TableSpec = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gca_table_spec.TableSpec:
r"""Updates a table spec.
Args:
request (:class:`google.cloud.automl_v1beta1.types.UpdateTableSpecRequest`):
The request object. Request message for
[AutoMl.UpdateTableSpec][google.cloud.automl.v1beta1.AutoMl.UpdateTableSpec]
table_spec (:class:`google.cloud.automl_v1beta1.types.TableSpec`):
Required. The table spec which
replaces the resource on the server.
This corresponds to the ``table_spec`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.automl_v1beta1.types.TableSpec:
A specification of a relational table.
The table's schema is represented via its child
column specs. It is pre-populated as part of
ImportData by schema inference algorithm, the version
of which is a required parameter of ImportData
InputConfig. Note: While working with a table, at
times the schema may be inconsistent with the data in
the table (e.g. string in a FLOAT64 column). The
consistency validation is done upon creation of a
model. Used by: \* Tables
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([table_spec])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = service.UpdateTableSpecRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if table_spec is not None:
request.table_spec = table_spec
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.update_table_spec,
default_timeout=5.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("table_spec.name", request.table_spec.name),)
),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def get_column_spec(
self,
request: service.GetColumnSpecRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> column_spec.ColumnSpec:
r"""Gets a column spec.
Args:
request (:class:`google.cloud.automl_v1beta1.types.GetColumnSpecRequest`):
The request object. Request message for
[AutoMl.GetColumnSpec][google.cloud.automl.v1beta1.AutoMl.GetColumnSpec].
name (:class:`str`):
Required. The resource name of the
column spec to retrieve.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.automl_v1beta1.types.ColumnSpec:
A representation of a column in a relational table. When listing them, column specs are returned in the same order in which they were
given on import . Used by: \* Tables
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = service.GetColumnSpecRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_column_spec,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=5.0,
),
default_timeout=5.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def list_column_specs(
self,
request: service.ListColumnSpecsRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListColumnSpecsAsyncPager:
r"""Lists column specs in a table spec.
Args:
request (:class:`google.cloud.automl_v1beta1.types.ListColumnSpecsRequest`):
The request object. Request message for
[AutoMl.ListColumnSpecs][google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs].
parent (:class:`str`):
Required. The resource name of the
table spec to list column specs from.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.automl_v1beta1.services.auto_ml.pagers.ListColumnSpecsAsyncPager:
Response message for
[AutoMl.ListColumnSpecs][google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = service.ListColumnSpecsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_column_specs,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=5.0,
),
default_timeout=5.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListColumnSpecsAsyncPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
async def update_column_spec(
self,
request: service.UpdateColumnSpecRequest = None,
*,
column_spec: gca_column_spec.ColumnSpec = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gca_column_spec.ColumnSpec:
r"""Updates a column spec.
Args:
request (:class:`google.cloud.automl_v1beta1.types.UpdateColumnSpecRequest`):
The request object. Request message for
[AutoMl.UpdateColumnSpec][google.cloud.automl.v1beta1.AutoMl.UpdateColumnSpec]
column_spec (:class:`google.cloud.automl_v1beta1.types.ColumnSpec`):
Required. The column spec which
replaces the resource on the server.
This corresponds to the ``column_spec`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.automl_v1beta1.types.ColumnSpec:
A representation of a column in a relational table. When listing them, column specs are returned in the same order in which they were
given on import . Used by: \* Tables
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([column_spec])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = service.UpdateColumnSpecRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if column_spec is not None:
request.column_spec = column_spec
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.update_column_spec,
default_timeout=5.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("column_spec.name", request.column_spec.name),)
),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def create_model(
self,
request: service.CreateModelRequest = None,
*,
parent: str = None,
model: gca_model.Model = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Creates a model. Returns a Model in the
[response][google.longrunning.Operation.response] field when it
completes. When you create a model, several model evaluations
are created for it: a global evaluation, and one evaluation for
each annotation spec.
Args:
request (:class:`google.cloud.automl_v1beta1.types.CreateModelRequest`):
The request object. Request message for
[AutoMl.CreateModel][google.cloud.automl.v1beta1.AutoMl.CreateModel].
parent (:class:`str`):
Required. Resource name of the parent
project where the model is being
created.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
model (:class:`google.cloud.automl_v1beta1.types.Model`):
Required. The model to create.
This corresponds to the ``model`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.automl_v1beta1.types.Model` API
proto representing a trained machine learning model.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, model])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = service.CreateModelRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if model is not None:
request.model = model
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_model,
default_timeout=5.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
gca_model.Model,
metadata_type=operations.OperationMetadata,
)
# Done; return the response.
return response
async def get_model(
self,
request: service.GetModelRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> model.Model:
r"""Gets a model.
Args:
request (:class:`google.cloud.automl_v1beta1.types.GetModelRequest`):
The request object. Request message for
[AutoMl.GetModel][google.cloud.automl.v1beta1.AutoMl.GetModel].
name (:class:`str`):
Required. Resource name of the model.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.automl_v1beta1.types.Model:
API proto representing a trained
machine learning model.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = service.GetModelRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_model,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=5.0,
),
default_timeout=5.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def list_models(
self,
request: service.ListModelsRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListModelsAsyncPager:
r"""Lists models.
Args:
request (:class:`google.cloud.automl_v1beta1.types.ListModelsRequest`):
The request object. Request message for
[AutoMl.ListModels][google.cloud.automl.v1beta1.AutoMl.ListModels].
parent (:class:`str`):
Required. Resource name of the
project, from which to list the models.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.automl_v1beta1.services.auto_ml.pagers.ListModelsAsyncPager:
Response message for
[AutoMl.ListModels][google.cloud.automl.v1beta1.AutoMl.ListModels].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = service.ListModelsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_models,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=5.0,
),
default_timeout=5.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListModelsAsyncPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
async def delete_model(
self,
request: service.DeleteModelRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Deletes a model. Returns ``google.protobuf.Empty`` in the
[response][google.longrunning.Operation.response] field when it
completes, and ``delete_details`` in the
[metadata][google.longrunning.Operation.metadata] field.
Args:
request (:class:`google.cloud.automl_v1beta1.types.DeleteModelRequest`):
The request object. Request message for
[AutoMl.DeleteModel][google.cloud.automl.v1beta1.AutoMl.DeleteModel].
name (:class:`str`):
Required. Resource name of the model
being deleted.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = service.DeleteModelRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_model,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=5.0,
),
default_timeout=5.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
empty_pb2.Empty,
metadata_type=operations.OperationMetadata,
)
# Done; return the response.
return response
async def deploy_model(
self,
request: service.DeployModelRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Deploys a model. If a model is already deployed, deploying it
with the same parameters has no effect. Deploying with different
parametrs (as e.g. changing
[node_number][google.cloud.automl.v1beta1.ImageObjectDetectionModelDeploymentMetadata.node_number])
will reset the deployment state without pausing the model's
availability.
Only applicable for Text Classification, Image Object Detection
, Tables, and Image Segmentation; all other domains manage
deployment automatically.
Returns an empty response in the
[response][google.longrunning.Operation.response] field when it
completes.
Args:
request (:class:`google.cloud.automl_v1beta1.types.DeployModelRequest`):
The request object. Request message for
[AutoMl.DeployModel][google.cloud.automl.v1beta1.AutoMl.DeployModel].
name (:class:`str`):
Required. Resource name of the model
to deploy.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = service.DeployModelRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.deploy_model,
default_timeout=5.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
empty_pb2.Empty,
metadata_type=operations.OperationMetadata,
)
# Done; return the response.
return response
async def undeploy_model(
self,
request: service.UndeployModelRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Undeploys a model. If the model is not deployed this method has
no effect.
Only applicable for Text Classification, Image Object Detection
and Tables; all other domains manage deployment automatically.
Returns an empty response in the
[response][google.longrunning.Operation.response] field when it
completes.
Args:
request (:class:`google.cloud.automl_v1beta1.types.UndeployModelRequest`):
The request object. Request message for
[AutoMl.UndeployModel][google.cloud.automl.v1beta1.AutoMl.UndeployModel].
name (:class:`str`):
Required. Resource name of the model
to undeploy.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = service.UndeployModelRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.undeploy_model,
default_timeout=5.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
empty_pb2.Empty,
metadata_type=operations.OperationMetadata,
)
# Done; return the response.
return response
async def export_model(
self,
request: service.ExportModelRequest = None,
*,
name: str = None,
output_config: io.ModelExportOutputConfig = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Exports a trained, "export-able", model to a user specified
Google Cloud Storage location. A model is considered export-able
if and only if it has an export format defined for it in
[ModelExportOutputConfig][google.cloud.automl.v1beta1.ModelExportOutputConfig].
Returns an empty response in the
[response][google.longrunning.Operation.response] field when it
completes.
Args:
request (:class:`google.cloud.automl_v1beta1.types.ExportModelRequest`):
The request object. Request message for
[AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel].
Models need to be enabled for exporting, otherwise an
error code will be returned.
name (:class:`str`):
Required. The resource name of the
model to export.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
output_config (:class:`google.cloud.automl_v1beta1.types.ModelExportOutputConfig`):
Required. The desired output location
and configuration.
This corresponds to the ``output_config`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name, output_config])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = service.ExportModelRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
if output_config is not None:
request.output_config = output_config
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.export_model,
default_timeout=5.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
empty_pb2.Empty,
metadata_type=operations.OperationMetadata,
)
# Done; return the response.
return response
async def export_evaluated_examples(
self,
request: service.ExportEvaluatedExamplesRequest = None,
*,
name: str = None,
output_config: io.ExportEvaluatedExamplesOutputConfig = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Exports examples on which the model was evaluated (i.e. which
were in the TEST set of the dataset the model was created from),
together with their ground truth annotations and the annotations
created (predicted) by the model. The examples, ground truth and
predictions are exported in the state they were at the moment
the model was evaluated.
This export is available only for 30 days since the model
evaluation is created.
Currently only available for Tables.
Returns an empty response in the
[response][google.longrunning.Operation.response] field when it
completes.
Args:
request (:class:`google.cloud.automl_v1beta1.types.ExportEvaluatedExamplesRequest`):
The request object. Request message for
[AutoMl.ExportEvaluatedExamples][google.cloud.automl.v1beta1.AutoMl.ExportEvaluatedExamples].
name (:class:`str`):
Required. The resource name of the
model whose evaluated examples are to be
exported.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
output_config (:class:`google.cloud.automl_v1beta1.types.ExportEvaluatedExamplesOutputConfig`):
Required. The desired output location
and configuration.
This corresponds to the ``output_config`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name, output_config])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = service.ExportEvaluatedExamplesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
if output_config is not None:
request.output_config = output_config
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.export_evaluated_examples,
default_timeout=5.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
empty_pb2.Empty,
metadata_type=operations.OperationMetadata,
)
# Done; return the response.
return response
async def get_model_evaluation(
self,
request: service.GetModelEvaluationRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> model_evaluation.ModelEvaluation:
r"""Gets a model evaluation.
Args:
request (:class:`google.cloud.automl_v1beta1.types.GetModelEvaluationRequest`):
The request object. Request message for
[AutoMl.GetModelEvaluation][google.cloud.automl.v1beta1.AutoMl.GetModelEvaluation].
name (:class:`str`):
Required. Resource name for the model
evaluation.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.automl_v1beta1.types.ModelEvaluation:
Evaluation results of a model.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = service.GetModelEvaluationRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_model_evaluation,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=5.0,
),
default_timeout=5.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def list_model_evaluations(
self,
request: service.ListModelEvaluationsRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListModelEvaluationsAsyncPager:
r"""Lists model evaluations.
Args:
request (:class:`google.cloud.automl_v1beta1.types.ListModelEvaluationsRequest`):
The request object. Request message for
[AutoMl.ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations].
parent (:class:`str`):
Required. Resource name of the model
to list the model evaluations for. If
modelId is set as "-", this will list
model evaluations from across all models
of the parent location.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.automl_v1beta1.services.auto_ml.pagers.ListModelEvaluationsAsyncPager:
Response message for
[AutoMl.ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = service.ListModelEvaluationsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_model_evaluations,
default_timeout=5.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListModelEvaluationsAsyncPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-automl",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("AutoMlAsyncClient",)
|
mahak/neutron
|
refs/heads/master
|
neutron/db/rbac_db_models.py
|
2
|
# Copyright (c) 2015 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from neutron_lib.db import constants as db_const
from neutron_lib.db import model_base
from neutron_lib import exceptions as n_exc
from neutron_lib.plugins import directory
import sqlalchemy as sa
from sqlalchemy.ext import declarative
from sqlalchemy.orm import validates
from neutron._i18n import _
ACCESS_SHARED = 'access_as_shared'
ACCESS_EXTERNAL = 'access_as_external'
class InvalidActionForType(n_exc.InvalidInput):
message = _("Invalid action '%(action)s' for object type "
"'%(object_type)s'. Valid actions: %(valid_actions)s")
class RBACColumns(model_base.HasId, model_base.HasProject):
"""Mixin that object-specific RBAC tables should inherit.
All RBAC tables should inherit directly from this one because
the RBAC code uses the __subclasses__() method to discover the
RBAC types.
"""
# the target_tenant is the subject that the policy will affect. this may
# also be a wildcard '*' to indicate all tenants or it may be a role if
# neutron gets better integration with keystone
target_tenant = sa.Column(sa.String(db_const.PROJECT_ID_FIELD_SIZE),
nullable=False)
action = sa.Column(sa.String(255), nullable=False)
@property
@abc.abstractmethod
def object_type(self):
# this determines the name that users will use in the API
# to reference the type. sub-classes should set their own
pass
@declarative.declared_attr
def __table_args__(cls):
return (
sa.UniqueConstraint('target_tenant', 'object_id', 'action'),
model_base.BASEV2.__table_args__
)
@validates('action')
def _validate_action(self, key, action):
if action not in self.get_valid_actions():
raise InvalidActionForType(
action=action, object_type=self.object_type,
valid_actions=self.get_valid_actions())
return action
@staticmethod
@abc.abstractmethod
def get_valid_actions():
# object table needs to override this to return an interable
# with the valid actions rbac entries
pass
def get_type_model_map():
return {table.object_type: table for table in RBACColumns.__subclasses__()}
def _object_id_column(foreign_key):
return sa.Column(sa.String(36),
sa.ForeignKey(foreign_key, ondelete="CASCADE"),
nullable=False)
class NetworkRBAC(RBACColumns, model_base.BASEV2):
"""RBAC table for networks."""
object_id = _object_id_column('networks.id')
object_type = 'network'
revises_on_change = ('network', )
@staticmethod
def get_valid_actions():
actions = (ACCESS_SHARED,)
pl = directory.get_plugin()
if 'external-net' in pl.supported_extension_aliases:
actions += (ACCESS_EXTERNAL,)
return actions
class QosPolicyRBAC(RBACColumns, model_base.BASEV2):
"""RBAC table for qos policies."""
object_id = _object_id_column('qos_policies.id')
object_type = 'qos_policy'
@staticmethod
def get_valid_actions():
return (ACCESS_SHARED,)
class SecurityGroupRBAC(RBACColumns, model_base.BASEV2):
"""RBAC table for security groups."""
object_id = _object_id_column('securitygroups.id')
object_type = 'security_group'
@staticmethod
def get_valid_actions():
return (ACCESS_SHARED,)
class AddressScopeRBAC(RBACColumns, model_base.BASEV2):
"""RBAC table for address_scope."""
object_id = _object_id_column('address_scopes.id')
object_type = 'address_scope'
@staticmethod
def get_valid_actions():
return (ACCESS_SHARED,)
class SubnetPoolRBAC(RBACColumns, model_base.BASEV2):
"""RBAC table for subnetpool."""
object_id = _object_id_column('subnetpools.id')
object_type = 'subnetpool'
@staticmethod
def get_valid_actions():
return (ACCESS_SHARED,)
class AddressGroupRBAC(RBACColumns, model_base.BASEV2):
"""RBAC table for address_group."""
object_id = _object_id_column('address_groups.id')
object_type = 'address_group'
@staticmethod
def get_valid_actions():
return (ACCESS_SHARED,)
|
dragon788/plover
|
refs/heads/master
|
plover/gui/keyboard_config.py
|
5
|
# Copyright (c) 2013 Hesky Fisher
# See LICENSE.txt for details.
import wx
from wx.lib.utils import AdjustRectToScreen
import wx.lib.mixins.listctrl as listmix
from plover.machine.keymap import Keymap
DIALOG_TITLE = 'Keyboard Configuration'
ARPEGGIATE_LABEL = "Arpeggiate"
ARPEGGIATE_INSTRUCTIONS = """Arpeggiate allows using non-NKRO keyboards.
Each key can be pressed separately and the space bar
is pressed to send the stroke."""
UI_BORDER = 4
class EditableListCtrl(wx.ListCtrl, listmix.TextEditMixin, listmix.ListCtrlAutoWidthMixin):
"""Editable list with automatically sized columns."""
def __init__(self, parent, ID=wx.ID_ANY, pos=wx.DefaultPosition,
size=wx.DefaultSize, style=0):
wx.ListCtrl.__init__(self, parent, ID, pos, size, style)
listmix.ListCtrlAutoWidthMixin.__init__(self)
listmix.TextEditMixin.__init__(self)
self.Bind(wx.EVT_LIST_BEGIN_LABEL_EDIT, self.restrict_editing)
def restrict_editing(self, event):
"""Disallow editing of first column."""
if event.m_col == 0:
event.Veto()
else:
event.Skip()
def get_all_rows(self):
"""Return all items as a list of lists of strings."""
rowCount = self.GetItemCount()
colCount = self.GetColumnCount()
rows = []
for rowId in range(rowCount):
row = []
for colId in range(colCount):
item = self.GetItem(itemId=rowId, col=colId)
row.append(item.GetText())
rows.append(row)
return rows
class KeyboardConfigDialog(wx.Dialog):
"""Keyboard configuration dialog."""
def __init__(self, options, parent, config):
self.config = config
self.options = options
pos = (config.get_keyboard_config_frame_x(),
config.get_keyboard_config_frame_y())
wx.Dialog.__init__(self, parent, title=DIALOG_TITLE, pos=pos)
sizer = wx.BoxSizer(wx.VERTICAL)
instructions = wx.StaticText(self, label=ARPEGGIATE_INSTRUCTIONS)
sizer.Add(instructions, border=UI_BORDER, flag=wx.ALL)
self.arpeggiate_option = wx.CheckBox(self, label=ARPEGGIATE_LABEL)
self.arpeggiate_option.SetValue(options.arpeggiate)
sizer.Add(self.arpeggiate_option, border=UI_BORDER,
flag=wx.LEFT | wx.RIGHT | wx.BOTTOM)
# editable list for keymap bindings
self.keymap_list_ctrl = EditableListCtrl(self, style=wx.LC_REPORT, size=(300,200))
self.keymap_list_ctrl.InsertColumn(0, 'Steno Key')
self.keymap_list_ctrl.InsertColumn(1, 'Keys')
keymap = options.keymap.get()
stenoKeys = keymap.keys()
rows = map(lambda x: (x, ' '.join(keymap[x])), stenoKeys)
for index, row in enumerate(rows):
self.keymap_list_ctrl.InsertStringItem(index, row[0])
self.keymap_list_ctrl.SetStringItem(index, 1, row[1])
sizer.Add(self.keymap_list_ctrl, flag=wx.EXPAND)
ok_button = wx.Button(self, id=wx.ID_OK)
ok_button.SetDefault()
cancel_button = wx.Button(self, id=wx.ID_CANCEL)
button_sizer = wx.BoxSizer(wx.HORIZONTAL)
button_sizer.Add(ok_button, border=UI_BORDER, flag=wx.ALL)
button_sizer.Add(cancel_button, border=UI_BORDER, flag=wx.ALL)
sizer.Add(button_sizer, flag=wx.ALL | wx.ALIGN_RIGHT, border=UI_BORDER)
self.SetSizer(sizer)
sizer.Fit(self)
self.SetRect(AdjustRectToScreen(self.GetRect()))
self.Bind(wx.EVT_MOVE, self.on_move)
ok_button.Bind(wx.EVT_BUTTON, self.on_ok)
cancel_button.Bind(wx.EVT_BUTTON, self.on_cancel)
def on_move(self, event):
pos = self.GetScreenPositionTuple()
self.config.set_keyboard_config_frame_x(pos[0])
self.config.set_keyboard_config_frame_y(pos[1])
event.Skip()
def on_ok(self, event):
self.options.arpeggiate = self.arpeggiate_option.GetValue()
self.options.keymap = Keymap.from_rows(self.keymap_list_ctrl.get_all_rows())
self.EndModal(wx.ID_OK)
def on_cancel(self, event):
self.EndModal(wx.ID_CANCEL)
|
recognai/spaCy
|
refs/heads/master
|
spacy/lang/hu/lemmatizer.py
|
3
| null |
conejoninja/pelisalacarta
|
refs/heads/master
|
python/main-classic/lib/gdata/tlslite/TLSRecordLayer.py
|
270
|
"""Helper class for TLSConnection."""
from __future__ import generators
from utils.compat import *
from utils.cryptomath import *
from utils.cipherfactory import createAES, createRC4, createTripleDES
from utils.codec import *
from errors import *
from messages import *
from mathtls import *
from constants import *
from utils.cryptomath import getRandomBytes
from utils import hmac
from FileObject import FileObject
import sha
import md5
import socket
import errno
import traceback
class _ConnectionState:
def __init__(self):
self.macContext = None
self.encContext = None
self.seqnum = 0
def getSeqNumStr(self):
w = Writer(8)
w.add(self.seqnum, 8)
seqnumStr = bytesToString(w.bytes)
self.seqnum += 1
return seqnumStr
class TLSRecordLayer:
"""
This class handles data transmission for a TLS connection.
Its only subclass is L{tlslite.TLSConnection.TLSConnection}. We've
separated the code in this class from TLSConnection to make things
more readable.
@type sock: socket.socket
@ivar sock: The underlying socket object.
@type session: L{tlslite.Session.Session}
@ivar session: The session corresponding to this connection.
Due to TLS session resumption, multiple connections can correspond
to the same underlying session.
@type version: tuple
@ivar version: The TLS version being used for this connection.
(3,0) means SSL 3.0, and (3,1) means TLS 1.0.
@type closed: bool
@ivar closed: If this connection is closed.
@type resumed: bool
@ivar resumed: If this connection is based on a resumed session.
@type allegedSharedKeyUsername: str or None
@ivar allegedSharedKeyUsername: This is set to the shared-key
username asserted by the client, whether the handshake succeeded or
not. If the handshake fails, this can be inspected to
determine if a guessing attack is in progress against a particular
user account.
@type allegedSrpUsername: str or None
@ivar allegedSrpUsername: This is set to the SRP username
asserted by the client, whether the handshake succeeded or not.
If the handshake fails, this can be inspected to determine
if a guessing attack is in progress against a particular user
account.
@type closeSocket: bool
@ivar closeSocket: If the socket should be closed when the
connection is closed (writable).
If you set this to True, TLS Lite will assume the responsibility of
closing the socket when the TLS Connection is shutdown (either
through an error or through the user calling close()). The default
is False.
@type ignoreAbruptClose: bool
@ivar ignoreAbruptClose: If an abrupt close of the socket should
raise an error (writable).
If you set this to True, TLS Lite will not raise a
L{tlslite.errors.TLSAbruptCloseError} exception if the underlying
socket is unexpectedly closed. Such an unexpected closure could be
caused by an attacker. However, it also occurs with some incorrect
TLS implementations.
You should set this to True only if you're not worried about an
attacker truncating the connection, and only if necessary to avoid
spurious errors. The default is False.
@sort: __init__, read, readAsync, write, writeAsync, close, closeAsync,
getCipherImplementation, getCipherName
"""
def __init__(self, sock):
self.sock = sock
#My session object (Session instance; read-only)
self.session = None
#Am I a client or server?
self._client = None
#Buffers for processing messages
self._handshakeBuffer = []
self._readBuffer = ""
#Handshake digests
self._handshake_md5 = md5.md5()
self._handshake_sha = sha.sha()
#TLS Protocol Version
self.version = (0,0) #read-only
self._versionCheck = False #Once we choose a version, this is True
#Current and Pending connection states
self._writeState = _ConnectionState()
self._readState = _ConnectionState()
self._pendingWriteState = _ConnectionState()
self._pendingReadState = _ConnectionState()
#Is the connection open?
self.closed = True #read-only
self._refCount = 0 #Used to trigger closure
#Is this a resumed (or shared-key) session?
self.resumed = False #read-only
#What username did the client claim in his handshake?
self.allegedSharedKeyUsername = None
self.allegedSrpUsername = None
#On a call to close(), do we close the socket? (writeable)
self.closeSocket = False
#If the socket is abruptly closed, do we ignore it
#and pretend the connection was shut down properly? (writeable)
self.ignoreAbruptClose = False
#Fault we will induce, for testing purposes
self.fault = None
#*********************************************************
# Public Functions START
#*********************************************************
def read(self, max=None, min=1):
"""Read some data from the TLS connection.
This function will block until at least 'min' bytes are
available (or the connection is closed).
If an exception is raised, the connection will have been
automatically closed.
@type max: int
@param max: The maximum number of bytes to return.
@type min: int
@param min: The minimum number of bytes to return
@rtype: str
@return: A string of no more than 'max' bytes, and no fewer
than 'min' (unless the connection has been closed, in which
case fewer than 'min' bytes may be returned).
@raise socket.error: If a socket error occurs.
@raise tlslite.errors.TLSAbruptCloseError: If the socket is closed
without a preceding alert.
@raise tlslite.errors.TLSAlert: If a TLS alert is signalled.
"""
for result in self.readAsync(max, min):
pass
return result
def readAsync(self, max=None, min=1):
"""Start a read operation on the TLS connection.
This function returns a generator which behaves similarly to
read(). Successive invocations of the generator will return 0
if it is waiting to read from the socket, 1 if it is waiting
to write to the socket, or a string if the read operation has
completed.
@rtype: iterable
@return: A generator; see above for details.
"""
try:
while len(self._readBuffer)<min and not self.closed:
try:
for result in self._getMsg(ContentType.application_data):
if result in (0,1):
yield result
applicationData = result
self._readBuffer += bytesToString(applicationData.write())
except TLSRemoteAlert, alert:
if alert.description != AlertDescription.close_notify:
raise
except TLSAbruptCloseError:
if not self.ignoreAbruptClose:
raise
else:
self._shutdown(True)
if max == None:
max = len(self._readBuffer)
returnStr = self._readBuffer[:max]
self._readBuffer = self._readBuffer[max:]
yield returnStr
except:
self._shutdown(False)
raise
def write(self, s):
"""Write some data to the TLS connection.
This function will block until all the data has been sent.
If an exception is raised, the connection will have been
automatically closed.
@type s: str
@param s: The data to transmit to the other party.
@raise socket.error: If a socket error occurs.
"""
for result in self.writeAsync(s):
pass
def writeAsync(self, s):
"""Start a write operation on the TLS connection.
This function returns a generator which behaves similarly to
write(). Successive invocations of the generator will return
1 if it is waiting to write to the socket, or will raise
StopIteration if the write operation has completed.
@rtype: iterable
@return: A generator; see above for details.
"""
try:
if self.closed:
raise ValueError()
index = 0
blockSize = 16384
skipEmptyFrag = False
while 1:
startIndex = index * blockSize
endIndex = startIndex + blockSize
if startIndex >= len(s):
break
if endIndex > len(s):
endIndex = len(s)
block = stringToBytes(s[startIndex : endIndex])
applicationData = ApplicationData().create(block)
for result in self._sendMsg(applicationData, skipEmptyFrag):
yield result
skipEmptyFrag = True #only send an empy fragment on 1st message
index += 1
except:
self._shutdown(False)
raise
def close(self):
"""Close the TLS connection.
This function will block until it has exchanged close_notify
alerts with the other party. After doing so, it will shut down the
TLS connection. Further attempts to read through this connection
will return "". Further attempts to write through this connection
will raise ValueError.
If makefile() has been called on this connection, the connection
will be not be closed until the connection object and all file
objects have been closed.
Even if an exception is raised, the connection will have been
closed.
@raise socket.error: If a socket error occurs.
@raise tlslite.errors.TLSAbruptCloseError: If the socket is closed
without a preceding alert.
@raise tlslite.errors.TLSAlert: If a TLS alert is signalled.
"""
if not self.closed:
for result in self._decrefAsync():
pass
def closeAsync(self):
"""Start a close operation on the TLS connection.
This function returns a generator which behaves similarly to
close(). Successive invocations of the generator will return 0
if it is waiting to read from the socket, 1 if it is waiting
to write to the socket, or will raise StopIteration if the
close operation has completed.
@rtype: iterable
@return: A generator; see above for details.
"""
if not self.closed:
for result in self._decrefAsync():
yield result
def _decrefAsync(self):
self._refCount -= 1
if self._refCount == 0 and not self.closed:
try:
for result in self._sendMsg(Alert().create(\
AlertDescription.close_notify, AlertLevel.warning)):
yield result
alert = None
while not alert:
for result in self._getMsg((ContentType.alert, \
ContentType.application_data)):
if result in (0,1):
yield result
if result.contentType == ContentType.alert:
alert = result
if alert.description == AlertDescription.close_notify:
self._shutdown(True)
else:
raise TLSRemoteAlert(alert)
except (socket.error, TLSAbruptCloseError):
#If the other side closes the socket, that's okay
self._shutdown(True)
except:
self._shutdown(False)
raise
def getCipherName(self):
"""Get the name of the cipher used with this connection.
@rtype: str
@return: The name of the cipher used with this connection.
Either 'aes128', 'aes256', 'rc4', or '3des'.
"""
if not self._writeState.encContext:
return None
return self._writeState.encContext.name
def getCipherImplementation(self):
"""Get the name of the cipher implementation used with
this connection.
@rtype: str
@return: The name of the cipher implementation used with
this connection. Either 'python', 'cryptlib', 'openssl',
or 'pycrypto'.
"""
if not self._writeState.encContext:
return None
return self._writeState.encContext.implementation
#Emulate a socket, somewhat -
def send(self, s):
"""Send data to the TLS connection (socket emulation).
@raise socket.error: If a socket error occurs.
"""
self.write(s)
return len(s)
def sendall(self, s):
"""Send data to the TLS connection (socket emulation).
@raise socket.error: If a socket error occurs.
"""
self.write(s)
def recv(self, bufsize):
"""Get some data from the TLS connection (socket emulation).
@raise socket.error: If a socket error occurs.
@raise tlslite.errors.TLSAbruptCloseError: If the socket is closed
without a preceding alert.
@raise tlslite.errors.TLSAlert: If a TLS alert is signalled.
"""
return self.read(bufsize)
def makefile(self, mode='r', bufsize=-1):
"""Create a file object for the TLS connection (socket emulation).
@rtype: L{tlslite.FileObject.FileObject}
"""
self._refCount += 1
return FileObject(self, mode, bufsize)
def getsockname(self):
"""Return the socket's own address (socket emulation)."""
return self.sock.getsockname()
def getpeername(self):
"""Return the remote address to which the socket is connected
(socket emulation)."""
return self.sock.getpeername()
def settimeout(self, value):
"""Set a timeout on blocking socket operations (socket emulation)."""
return self.sock.settimeout(value)
def gettimeout(self):
"""Return the timeout associated with socket operations (socket
emulation)."""
return self.sock.gettimeout()
def setsockopt(self, level, optname, value):
"""Set the value of the given socket option (socket emulation)."""
return self.sock.setsockopt(level, optname, value)
#*********************************************************
# Public Functions END
#*********************************************************
def _shutdown(self, resumable):
self._writeState = _ConnectionState()
self._readState = _ConnectionState()
#Don't do this: self._readBuffer = ""
self.version = (0,0)
self._versionCheck = False
self.closed = True
if self.closeSocket:
self.sock.close()
#Even if resumable is False, we'll never toggle this on
if not resumable and self.session:
self.session.resumable = False
def _sendError(self, alertDescription, errorStr=None):
alert = Alert().create(alertDescription, AlertLevel.fatal)
for result in self._sendMsg(alert):
yield result
self._shutdown(False)
raise TLSLocalAlert(alert, errorStr)
def _sendMsgs(self, msgs):
skipEmptyFrag = False
for msg in msgs:
for result in self._sendMsg(msg, skipEmptyFrag):
yield result
skipEmptyFrag = True
def _sendMsg(self, msg, skipEmptyFrag=False):
bytes = msg.write()
contentType = msg.contentType
#Whenever we're connected and asked to send a message,
#we first send an empty Application Data message. This prevents
#an attacker from launching a chosen-plaintext attack based on
#knowing the next IV.
if not self.closed and not skipEmptyFrag and self.version == (3,1):
if self._writeState.encContext:
if self._writeState.encContext.isBlockCipher:
for result in self._sendMsg(ApplicationData(),
skipEmptyFrag=True):
yield result
#Update handshake hashes
if contentType == ContentType.handshake:
bytesStr = bytesToString(bytes)
self._handshake_md5.update(bytesStr)
self._handshake_sha.update(bytesStr)
#Calculate MAC
if self._writeState.macContext:
seqnumStr = self._writeState.getSeqNumStr()
bytesStr = bytesToString(bytes)
mac = self._writeState.macContext.copy()
mac.update(seqnumStr)
mac.update(chr(contentType))
if self.version == (3,0):
mac.update( chr( int(len(bytes)/256) ) )
mac.update( chr( int(len(bytes)%256) ) )
elif self.version in ((3,1), (3,2)):
mac.update(chr(self.version[0]))
mac.update(chr(self.version[1]))
mac.update( chr( int(len(bytes)/256) ) )
mac.update( chr( int(len(bytes)%256) ) )
else:
raise AssertionError()
mac.update(bytesStr)
macString = mac.digest()
macBytes = stringToBytes(macString)
if self.fault == Fault.badMAC:
macBytes[0] = (macBytes[0]+1) % 256
#Encrypt for Block or Stream Cipher
if self._writeState.encContext:
#Add padding and encrypt (for Block Cipher):
if self._writeState.encContext.isBlockCipher:
#Add TLS 1.1 fixed block
if self.version == (3,2):
bytes = self.fixedIVBlock + bytes
#Add padding: bytes = bytes + (macBytes + paddingBytes)
currentLength = len(bytes) + len(macBytes) + 1
blockLength = self._writeState.encContext.block_size
paddingLength = blockLength-(currentLength % blockLength)
paddingBytes = createByteArraySequence([paddingLength] * \
(paddingLength+1))
if self.fault == Fault.badPadding:
paddingBytes[0] = (paddingBytes[0]+1) % 256
endBytes = concatArrays(macBytes, paddingBytes)
bytes = concatArrays(bytes, endBytes)
#Encrypt
plaintext = stringToBytes(bytes)
ciphertext = self._writeState.encContext.encrypt(plaintext)
bytes = stringToBytes(ciphertext)
#Encrypt (for Stream Cipher)
else:
bytes = concatArrays(bytes, macBytes)
plaintext = bytesToString(bytes)
ciphertext = self._writeState.encContext.encrypt(plaintext)
bytes = stringToBytes(ciphertext)
#Add record header and send
r = RecordHeader3().create(self.version, contentType, len(bytes))
s = bytesToString(concatArrays(r.write(), bytes))
while 1:
try:
bytesSent = self.sock.send(s) #Might raise socket.error
except socket.error, why:
if why[0] == errno.EWOULDBLOCK:
yield 1
continue
else:
raise
if bytesSent == len(s):
return
s = s[bytesSent:]
yield 1
def _getMsg(self, expectedType, secondaryType=None, constructorType=None):
try:
if not isinstance(expectedType, tuple):
expectedType = (expectedType,)
#Spin in a loop, until we've got a non-empty record of a type we
#expect. The loop will be repeated if:
# - we receive a renegotiation attempt; we send no_renegotiation,
# then try again
# - we receive an empty application-data fragment; we try again
while 1:
for result in self._getNextRecord():
if result in (0,1):
yield result
recordHeader, p = result
#If this is an empty application-data fragment, try again
if recordHeader.type == ContentType.application_data:
if p.index == len(p.bytes):
continue
#If we received an unexpected record type...
if recordHeader.type not in expectedType:
#If we received an alert...
if recordHeader.type == ContentType.alert:
alert = Alert().parse(p)
#We either received a fatal error, a warning, or a
#close_notify. In any case, we're going to close the
#connection. In the latter two cases we respond with
#a close_notify, but ignore any socket errors, since
#the other side might have already closed the socket.
if alert.level == AlertLevel.warning or \
alert.description == AlertDescription.close_notify:
#If the sendMsg() call fails because the socket has
#already been closed, we will be forgiving and not
#report the error nor invalidate the "resumability"
#of the session.
try:
alertMsg = Alert()
alertMsg.create(AlertDescription.close_notify,
AlertLevel.warning)
for result in self._sendMsg(alertMsg):
yield result
except socket.error:
pass
if alert.description == \
AlertDescription.close_notify:
self._shutdown(True)
elif alert.level == AlertLevel.warning:
self._shutdown(False)
else: #Fatal alert:
self._shutdown(False)
#Raise the alert as an exception
raise TLSRemoteAlert(alert)
#If we received a renegotiation attempt...
if recordHeader.type == ContentType.handshake:
subType = p.get(1)
reneg = False
if self._client:
if subType == HandshakeType.hello_request:
reneg = True
else:
if subType == HandshakeType.client_hello:
reneg = True
#Send no_renegotiation, then try again
if reneg:
alertMsg = Alert()
alertMsg.create(AlertDescription.no_renegotiation,
AlertLevel.warning)
for result in self._sendMsg(alertMsg):
yield result
continue
#Otherwise: this is an unexpected record, but neither an
#alert nor renegotiation
for result in self._sendError(\
AlertDescription.unexpected_message,
"received type=%d" % recordHeader.type):
yield result
break
#Parse based on content_type
if recordHeader.type == ContentType.change_cipher_spec:
yield ChangeCipherSpec().parse(p)
elif recordHeader.type == ContentType.alert:
yield Alert().parse(p)
elif recordHeader.type == ContentType.application_data:
yield ApplicationData().parse(p)
elif recordHeader.type == ContentType.handshake:
#Convert secondaryType to tuple, if it isn't already
if not isinstance(secondaryType, tuple):
secondaryType = (secondaryType,)
#If it's a handshake message, check handshake header
if recordHeader.ssl2:
subType = p.get(1)
if subType != HandshakeType.client_hello:
for result in self._sendError(\
AlertDescription.unexpected_message,
"Can only handle SSLv2 ClientHello messages"):
yield result
if HandshakeType.client_hello not in secondaryType:
for result in self._sendError(\
AlertDescription.unexpected_message):
yield result
subType = HandshakeType.client_hello
else:
subType = p.get(1)
if subType not in secondaryType:
for result in self._sendError(\
AlertDescription.unexpected_message,
"Expecting %s, got %s" % (str(secondaryType), subType)):
yield result
#Update handshake hashes
sToHash = bytesToString(p.bytes)
self._handshake_md5.update(sToHash)
self._handshake_sha.update(sToHash)
#Parse based on handshake type
if subType == HandshakeType.client_hello:
yield ClientHello(recordHeader.ssl2).parse(p)
elif subType == HandshakeType.server_hello:
yield ServerHello().parse(p)
elif subType == HandshakeType.certificate:
yield Certificate(constructorType).parse(p)
elif subType == HandshakeType.certificate_request:
yield CertificateRequest().parse(p)
elif subType == HandshakeType.certificate_verify:
yield CertificateVerify().parse(p)
elif subType == HandshakeType.server_key_exchange:
yield ServerKeyExchange(constructorType).parse(p)
elif subType == HandshakeType.server_hello_done:
yield ServerHelloDone().parse(p)
elif subType == HandshakeType.client_key_exchange:
yield ClientKeyExchange(constructorType, \
self.version).parse(p)
elif subType == HandshakeType.finished:
yield Finished(self.version).parse(p)
else:
raise AssertionError()
#If an exception was raised by a Parser or Message instance:
except SyntaxError, e:
for result in self._sendError(AlertDescription.decode_error,
formatExceptionTrace(e)):
yield result
#Returns next record or next handshake message
def _getNextRecord(self):
#If there's a handshake message waiting, return it
if self._handshakeBuffer:
recordHeader, bytes = self._handshakeBuffer[0]
self._handshakeBuffer = self._handshakeBuffer[1:]
yield (recordHeader, Parser(bytes))
return
#Otherwise...
#Read the next record header
bytes = createByteArraySequence([])
recordHeaderLength = 1
ssl2 = False
while 1:
try:
s = self.sock.recv(recordHeaderLength-len(bytes))
except socket.error, why:
if why[0] == errno.EWOULDBLOCK:
yield 0
continue
else:
raise
#If the connection was abruptly closed, raise an error
if len(s)==0:
raise TLSAbruptCloseError()
bytes += stringToBytes(s)
if len(bytes)==1:
if bytes[0] in ContentType.all:
ssl2 = False
recordHeaderLength = 5
elif bytes[0] == 128:
ssl2 = True
recordHeaderLength = 2
else:
raise SyntaxError()
if len(bytes) == recordHeaderLength:
break
#Parse the record header
if ssl2:
r = RecordHeader2().parse(Parser(bytes))
else:
r = RecordHeader3().parse(Parser(bytes))
#Check the record header fields
if r.length > 18432:
for result in self._sendError(AlertDescription.record_overflow):
yield result
#Read the record contents
bytes = createByteArraySequence([])
while 1:
try:
s = self.sock.recv(r.length - len(bytes))
except socket.error, why:
if why[0] == errno.EWOULDBLOCK:
yield 0
continue
else:
raise
#If the connection is closed, raise a socket error
if len(s)==0:
raise TLSAbruptCloseError()
bytes += stringToBytes(s)
if len(bytes) == r.length:
break
#Check the record header fields (2)
#We do this after reading the contents from the socket, so that
#if there's an error, we at least don't leave extra bytes in the
#socket..
#
# THIS CHECK HAS NO SECURITY RELEVANCE (?), BUT COULD HURT INTEROP.
# SO WE LEAVE IT OUT FOR NOW.
#
#if self._versionCheck and r.version != self.version:
# for result in self._sendError(AlertDescription.protocol_version,
# "Version in header field: %s, should be %s" % (str(r.version),
# str(self.version))):
# yield result
#Decrypt the record
for result in self._decryptRecord(r.type, bytes):
if result in (0,1):
yield result
else:
break
bytes = result
p = Parser(bytes)
#If it doesn't contain handshake messages, we can just return it
if r.type != ContentType.handshake:
yield (r, p)
#If it's an SSLv2 ClientHello, we can return it as well
elif r.ssl2:
yield (r, p)
else:
#Otherwise, we loop through and add the handshake messages to the
#handshake buffer
while 1:
if p.index == len(bytes): #If we're at the end
if not self._handshakeBuffer:
for result in self._sendError(\
AlertDescription.decode_error, \
"Received empty handshake record"):
yield result
break
#There needs to be at least 4 bytes to get a header
if p.index+4 > len(bytes):
for result in self._sendError(\
AlertDescription.decode_error,
"A record has a partial handshake message (1)"):
yield result
p.get(1) # skip handshake type
msgLength = p.get(3)
if p.index+msgLength > len(bytes):
for result in self._sendError(\
AlertDescription.decode_error,
"A record has a partial handshake message (2)"):
yield result
handshakePair = (r, bytes[p.index-4 : p.index+msgLength])
self._handshakeBuffer.append(handshakePair)
p.index += msgLength
#We've moved at least one handshake message into the
#handshakeBuffer, return the first one
recordHeader, bytes = self._handshakeBuffer[0]
self._handshakeBuffer = self._handshakeBuffer[1:]
yield (recordHeader, Parser(bytes))
def _decryptRecord(self, recordType, bytes):
if self._readState.encContext:
#Decrypt if it's a block cipher
if self._readState.encContext.isBlockCipher:
blockLength = self._readState.encContext.block_size
if len(bytes) % blockLength != 0:
for result in self._sendError(\
AlertDescription.decryption_failed,
"Encrypted data not a multiple of blocksize"):
yield result
ciphertext = bytesToString(bytes)
plaintext = self._readState.encContext.decrypt(ciphertext)
if self.version == (3,2): #For TLS 1.1, remove explicit IV
plaintext = plaintext[self._readState.encContext.block_size : ]
bytes = stringToBytes(plaintext)
#Check padding
paddingGood = True
paddingLength = bytes[-1]
if (paddingLength+1) > len(bytes):
paddingGood=False
totalPaddingLength = 0
else:
if self.version == (3,0):
totalPaddingLength = paddingLength+1
elif self.version in ((3,1), (3,2)):
totalPaddingLength = paddingLength+1
paddingBytes = bytes[-totalPaddingLength:-1]
for byte in paddingBytes:
if byte != paddingLength:
paddingGood = False
totalPaddingLength = 0
else:
raise AssertionError()
#Decrypt if it's a stream cipher
else:
paddingGood = True
ciphertext = bytesToString(bytes)
plaintext = self._readState.encContext.decrypt(ciphertext)
bytes = stringToBytes(plaintext)
totalPaddingLength = 0
#Check MAC
macGood = True
macLength = self._readState.macContext.digest_size
endLength = macLength + totalPaddingLength
if endLength > len(bytes):
macGood = False
else:
#Read MAC
startIndex = len(bytes) - endLength
endIndex = startIndex + macLength
checkBytes = bytes[startIndex : endIndex]
#Calculate MAC
seqnumStr = self._readState.getSeqNumStr()
bytes = bytes[:-endLength]
bytesStr = bytesToString(bytes)
mac = self._readState.macContext.copy()
mac.update(seqnumStr)
mac.update(chr(recordType))
if self.version == (3,0):
mac.update( chr( int(len(bytes)/256) ) )
mac.update( chr( int(len(bytes)%256) ) )
elif self.version in ((3,1), (3,2)):
mac.update(chr(self.version[0]))
mac.update(chr(self.version[1]))
mac.update( chr( int(len(bytes)/256) ) )
mac.update( chr( int(len(bytes)%256) ) )
else:
raise AssertionError()
mac.update(bytesStr)
macString = mac.digest()
macBytes = stringToBytes(macString)
#Compare MACs
if macBytes != checkBytes:
macGood = False
if not (paddingGood and macGood):
for result in self._sendError(AlertDescription.bad_record_mac,
"MAC failure (or padding failure)"):
yield result
yield bytes
def _handshakeStart(self, client):
self._client = client
self._handshake_md5 = md5.md5()
self._handshake_sha = sha.sha()
self._handshakeBuffer = []
self.allegedSharedKeyUsername = None
self.allegedSrpUsername = None
self._refCount = 1
def _handshakeDone(self, resumed):
self.resumed = resumed
self.closed = False
def _calcPendingStates(self, clientRandom, serverRandom, implementations):
if self.session.cipherSuite in CipherSuite.aes128Suites:
macLength = 20
keyLength = 16
ivLength = 16
createCipherFunc = createAES
elif self.session.cipherSuite in CipherSuite.aes256Suites:
macLength = 20
keyLength = 32
ivLength = 16
createCipherFunc = createAES
elif self.session.cipherSuite in CipherSuite.rc4Suites:
macLength = 20
keyLength = 16
ivLength = 0
createCipherFunc = createRC4
elif self.session.cipherSuite in CipherSuite.tripleDESSuites:
macLength = 20
keyLength = 24
ivLength = 8
createCipherFunc = createTripleDES
else:
raise AssertionError()
if self.version == (3,0):
createMACFunc = MAC_SSL
elif self.version in ((3,1), (3,2)):
createMACFunc = hmac.HMAC
outputLength = (macLength*2) + (keyLength*2) + (ivLength*2)
#Calculate Keying Material from Master Secret
if self.version == (3,0):
keyBlock = PRF_SSL(self.session.masterSecret,
concatArrays(serverRandom, clientRandom),
outputLength)
elif self.version in ((3,1), (3,2)):
keyBlock = PRF(self.session.masterSecret,
"key expansion",
concatArrays(serverRandom,clientRandom),
outputLength)
else:
raise AssertionError()
#Slice up Keying Material
clientPendingState = _ConnectionState()
serverPendingState = _ConnectionState()
p = Parser(keyBlock)
clientMACBlock = bytesToString(p.getFixBytes(macLength))
serverMACBlock = bytesToString(p.getFixBytes(macLength))
clientKeyBlock = bytesToString(p.getFixBytes(keyLength))
serverKeyBlock = bytesToString(p.getFixBytes(keyLength))
clientIVBlock = bytesToString(p.getFixBytes(ivLength))
serverIVBlock = bytesToString(p.getFixBytes(ivLength))
clientPendingState.macContext = createMACFunc(clientMACBlock,
digestmod=sha)
serverPendingState.macContext = createMACFunc(serverMACBlock,
digestmod=sha)
clientPendingState.encContext = createCipherFunc(clientKeyBlock,
clientIVBlock,
implementations)
serverPendingState.encContext = createCipherFunc(serverKeyBlock,
serverIVBlock,
implementations)
#Assign new connection states to pending states
if self._client:
self._pendingWriteState = clientPendingState
self._pendingReadState = serverPendingState
else:
self._pendingWriteState = serverPendingState
self._pendingReadState = clientPendingState
if self.version == (3,2) and ivLength:
#Choose fixedIVBlock for TLS 1.1 (this is encrypted with the CBC
#residue to create the IV for each sent block)
self.fixedIVBlock = getRandomBytes(ivLength)
def _changeWriteState(self):
self._writeState = self._pendingWriteState
self._pendingWriteState = _ConnectionState()
def _changeReadState(self):
self._readState = self._pendingReadState
self._pendingReadState = _ConnectionState()
def _sendFinished(self):
#Send ChangeCipherSpec
for result in self._sendMsg(ChangeCipherSpec()):
yield result
#Switch to pending write state
self._changeWriteState()
#Calculate verification data
verifyData = self._calcFinished(True)
if self.fault == Fault.badFinished:
verifyData[0] = (verifyData[0]+1)%256
#Send Finished message under new state
finished = Finished(self.version).create(verifyData)
for result in self._sendMsg(finished):
yield result
def _getFinished(self):
#Get and check ChangeCipherSpec
for result in self._getMsg(ContentType.change_cipher_spec):
if result in (0,1):
yield result
changeCipherSpec = result
if changeCipherSpec.type != 1:
for result in self._sendError(AlertDescription.illegal_parameter,
"ChangeCipherSpec type incorrect"):
yield result
#Switch to pending read state
self._changeReadState()
#Calculate verification data
verifyData = self._calcFinished(False)
#Get and check Finished message under new state
for result in self._getMsg(ContentType.handshake,
HandshakeType.finished):
if result in (0,1):
yield result
finished = result
if finished.verify_data != verifyData:
for result in self._sendError(AlertDescription.decrypt_error,
"Finished message is incorrect"):
yield result
def _calcFinished(self, send=True):
if self.version == (3,0):
if (self._client and send) or (not self._client and not send):
senderStr = "\x43\x4C\x4E\x54"
else:
senderStr = "\x53\x52\x56\x52"
verifyData = self._calcSSLHandshakeHash(self.session.masterSecret,
senderStr)
return verifyData
elif self.version in ((3,1), (3,2)):
if (self._client and send) or (not self._client and not send):
label = "client finished"
else:
label = "server finished"
handshakeHashes = stringToBytes(self._handshake_md5.digest() + \
self._handshake_sha.digest())
verifyData = PRF(self.session.masterSecret, label, handshakeHashes,
12)
return verifyData
else:
raise AssertionError()
#Used for Finished messages and CertificateVerify messages in SSL v3
def _calcSSLHandshakeHash(self, masterSecret, label):
masterSecretStr = bytesToString(masterSecret)
imac_md5 = self._handshake_md5.copy()
imac_sha = self._handshake_sha.copy()
imac_md5.update(label + masterSecretStr + '\x36'*48)
imac_sha.update(label + masterSecretStr + '\x36'*40)
md5Str = md5.md5(masterSecretStr + ('\x5c'*48) + \
imac_md5.digest()).digest()
shaStr = sha.sha(masterSecretStr + ('\x5c'*40) + \
imac_sha.digest()).digest()
return stringToBytes(md5Str + shaStr)
|
bukzor/sympy
|
refs/heads/master
|
sympy/plotting/pygletplot/color_scheme.py
|
85
|
from __future__ import print_function, division
from sympy import Basic, Symbol, symbols, lambdify
from util import interpolate, rinterpolate, create_bounds, update_bounds
from sympy.core.compatibility import range
class ColorGradient(object):
colors = [0.4, 0.4, 0.4], [0.9, 0.9, 0.9]
intervals = 0.0, 1.0
def __init__(self, *args):
if len(args) == 2:
self.colors = list(args)
self.intervals = [0.0, 1.0]
elif len(args) > 0:
if len(args) % 2 != 0:
raise ValueError("len(args) should be even")
self.colors = [args[i] for i in range(1, len(args), 2)]
self.intervals = [args[i] for i in range(0, len(args), 2)]
assert len(self.colors) == len(self.intervals)
def copy(self):
c = ColorGradient()
c.colors = [e[::] for e in self.colors]
c.intervals = self.intervals[::]
return c
def _find_interval(self, v):
m = len(self.intervals)
i = 0
while i < m - 1 and self.intervals[i] <= v:
i += 1
return i
def _interpolate_axis(self, axis, v):
i = self._find_interval(v)
v = rinterpolate(self.intervals[i - 1], self.intervals[i], v)
return interpolate(self.colors[i - 1][axis], self.colors[i][axis], v)
def __call__(self, r, g, b):
c = self._interpolate_axis
return c(0, r), c(1, g), c(2, b)
default_color_schemes = {} # defined at the bottom of this file
class ColorScheme(object):
def __init__(self, *args, **kwargs):
self.args = args
self.f, self.gradient = None, ColorGradient()
if len(args) == 1 and not isinstance(args[0], Basic) and callable(args[0]):
self.f = args[0]
elif len(args) == 1 and isinstance(args[0], str):
if args[0] in default_color_schemes:
cs = default_color_schemes[args[0]]
self.f, self.gradient = cs.f, cs.gradient.copy()
else:
self.f = lambdify('x,y,z,u,v', args[0])
else:
self.f, self.gradient = self._interpret_args(args, kwargs)
self._test_color_function()
if not isinstance(self.gradient, ColorGradient):
raise ValueError("Color gradient not properly initialized. "
"(Not a ColorGradient instance.)")
def _interpret_args(self, args, kwargs):
f, gradient = None, self.gradient
atoms, lists = self._sort_args(args)
s = self._pop_symbol_list(lists)
s = self._fill_in_vars(s)
# prepare the error message for lambdification failure
f_str = ', '.join(str(fa) for fa in atoms)
s_str = (str(sa) for sa in s)
s_str = ', '.join(sa for sa in s_str if sa.find('unbound') < 0)
f_error = ValueError("Could not interpret arguments "
"%s as functions of %s." % (f_str, s_str))
# try to lambdify args
if len(atoms) == 1:
fv = atoms[0]
try:
f = lambdify(s, [fv, fv, fv])
except TypeError:
raise f_error
elif len(atoms) == 3:
fr, fg, fb = atoms
try:
f = lambdify(s, [fr, fg, fb])
except TypeError:
raise f_error
else:
raise ValueError("A ColorScheme must provide 1 or 3 "
"functions in x, y, z, u, and/or v.")
# try to intrepret any given color information
if len(lists) == 0:
gargs = []
elif len(lists) == 1:
gargs = lists[0]
elif len(lists) == 2:
try:
(r1, g1, b1), (r2, g2, b2) = lists
except TypeError:
raise ValueError("If two color arguments are given, "
"they must be given in the format "
"(r1, g1, b1), (r2, g2, b2).")
gargs = lists
elif len(lists) == 3:
try:
(r1, r2), (g1, g2), (b1, b2) = lists
except Exception:
raise ValueError("If three color arguments are given, "
"they must be given in the format "
"(r1, r2), (g1, g2), (b1, b2). To create "
"a multi-step gradient, use the syntax "
"[0, colorStart, step1, color1, ..., 1, "
"colorEnd].")
gargs = [[r1, g1, b1], [r2, g2, b2]]
else:
raise ValueError("Don't know what to do with collection "
"arguments %s." % (', '.join(str(l) for l in lists)))
if gargs:
try:
gradient = ColorGradient(*gargs)
except Exception as ex:
raise ValueError(("Could not initialize a gradient "
"with arguments %s. Inner "
"exception: %s") % (gargs, str(ex)))
return f, gradient
def _pop_symbol_list(self, lists):
symbol_lists = []
for l in lists:
mark = True
for s in l:
if s is not None and not isinstance(s, Symbol):
mark = False
break
if mark:
lists.remove(l)
symbol_lists.append(l)
if len(symbol_lists) == 1:
return symbol_lists[0]
elif len(symbol_lists) == 0:
return []
else:
raise ValueError("Only one list of Symbols "
"can be given for a color scheme.")
def _fill_in_vars(self, args):
defaults = symbols('x,y,z,u,v')
if len(args) == 0:
return defaults
if not isinstance(args, (tuple, list)):
raise v_error
if len(args) == 0:
return defaults
for s in args:
if s is not None and not isinstance(s, Symbol):
raise v_error
# when vars are given explicitly, any vars
# not given are marked 'unbound' as to not
# be accidentally used in an expression
vars = [Symbol('unbound%i' % (i)) for i in range(1, 6)]
# interpret as t
if len(args) == 1:
vars[3] = args[0]
# interpret as u,v
elif len(args) == 2:
if args[0] is not None:
vars[3] = args[0]
if args[1] is not None:
vars[4] = args[1]
# interpret as x,y,z
elif len(args) >= 3:
# allow some of x,y,z to be
# left unbound if not given
if args[0] is not None:
vars[0] = args[0]
if args[1] is not None:
vars[1] = args[1]
if args[2] is not None:
vars[2] = args[2]
# interpret the rest as t
if len(args) >= 4:
vars[3] = args[3]
# ...or u,v
if len(args) >= 5:
vars[4] = args[4]
return vars
def _sort_args(self, args):
atoms, lists = [], []
for a in args:
if isinstance(a, (tuple, list)):
lists.append(a)
else:
atoms.append(a)
return atoms, lists
def _test_color_function(self):
if not callable(self.f):
raise ValueError("Color function is not callable.")
try:
result = self.f(0, 0, 0, 0, 0)
if len(result) != 3:
raise ValueError("length should be equal to 3")
except TypeError as te:
raise ValueError("Color function needs to accept x,y,z,u,v, "
"as arguments even if it doesn't use all of them.")
except AssertionError as ae:
raise ValueError("Color function needs to return 3-tuple r,g,b.")
except Exception as ie:
pass # color function probably not valid at 0,0,0,0,0
def __call__(self, x, y, z, u, v):
try:
return self.f(x, y, z, u, v)
except Exception as e:
return None
def apply_to_curve(self, verts, u_set, set_len=None, inc_pos=None):
"""
Apply this color scheme to a
set of vertices over a single
independent variable u.
"""
bounds = create_bounds()
cverts = list()
if callable(set_len):
set_len(len(u_set)*2)
# calculate f() = r,g,b for each vert
# and find the min and max for r,g,b
for _u in range(len(u_set)):
if verts[_u] is None:
cverts.append(None)
else:
x, y, z = verts[_u]
u, v = u_set[_u], None
c = self(x, y, z, u, v)
if c is not None:
c = list(c)
update_bounds(bounds, c)
cverts.append(c)
if callable(inc_pos):
inc_pos()
# scale and apply gradient
for _u in range(len(u_set)):
if cverts[_u] is not None:
for _c in range(3):
# scale from [f_min, f_max] to [0,1]
cverts[_u][_c] = rinterpolate(bounds[_c][0], bounds[_c][1],
cverts[_u][_c])
# apply gradient
cverts[_u] = self.gradient(*cverts[_u])
if callable(inc_pos):
inc_pos()
return cverts
def apply_to_surface(self, verts, u_set, v_set, set_len=None, inc_pos=None):
"""
Apply this color scheme to a
set of vertices over two
independent variables u and v.
"""
bounds = create_bounds()
cverts = list()
if callable(set_len):
set_len(len(u_set)*len(v_set)*2)
# calculate f() = r,g,b for each vert
# and find the min and max for r,g,b
for _u in range(len(u_set)):
column = list()
for _v in range(len(v_set)):
if verts[_u][_v] is None:
column.append(None)
else:
x, y, z = verts[_u][_v]
u, v = u_set[_u], v_set[_v]
c = self(x, y, z, u, v)
if c is not None:
c = list(c)
update_bounds(bounds, c)
column.append(c)
if callable(inc_pos):
inc_pos()
cverts.append(column)
# scale and apply gradient
for _u in range(len(u_set)):
for _v in range(len(v_set)):
if cverts[_u][_v] is not None:
# scale from [f_min, f_max] to [0,1]
for _c in range(3):
cverts[_u][_v][_c] = rinterpolate(bounds[_c][0],
bounds[_c][1], cverts[_u][_v][_c])
# apply gradient
cverts[_u][_v] = self.gradient(*cverts[_u][_v])
if callable(inc_pos):
inc_pos()
return cverts
def str_base(self):
return ", ".join(str(a) for a in self.args)
def __repr__(self):
return "%s" % (self.str_base())
x, y, z, t, u, v = symbols('x,y,z,t,u,v')
default_color_schemes['rainbow'] = ColorScheme(z, y, x)
default_color_schemes['zfade'] = ColorScheme(z, (0.4, 0.4, 0.97),
(0.97, 0.4, 0.4), (None, None, z))
default_color_schemes['zfade3'] = ColorScheme(z, (None, None, z),
[0.00, (0.2, 0.2, 1.0),
0.35, (0.2, 0.8, 0.4),
0.50, (0.3, 0.9, 0.3),
0.65, (0.4, 0.8, 0.2),
1.00, (1.0, 0.2, 0.2)])
default_color_schemes['zfade4'] = ColorScheme(z, (None, None, z),
[0.0, (0.3, 0.3, 1.0),
0.30, (0.3, 1.0, 0.3),
0.55, (0.95, 1.0, 0.2),
0.65, (1.0, 0.95, 0.2),
0.85, (1.0, 0.7, 0.2),
1.0, (1.0, 0.3, 0.2)])
|
fuzzing/bifuz
|
refs/heads/master
|
create_templates.py
|
3
|
#!/usr/bin/env python
# Module for generating all possible raw templates
#
# Copyright (C) 2015 Intel Corporation
# Author: Andreea Brindusa Proca <andreea.brindusa.proca@intel.com>
# Author: Razvan-Costin Ionescu <razvan.ionescu@intel.com>
#
# Licensed under the MIT license, see COPYING.MIT for details
import os
'''
Sample of a raw template
#Do not add lines to this template; possible options for each item: fuzz, nofuzz; if nofuzz, then a list is expected
action
category
data_uri
e_key
e_val
flag
'''
current_path = os.getcwd()
path_to_templates = current_path+"/templates"
with open(path_to_templates+"/raw.tem", 'r') as f:
raw = f.readlines()
#there are 6 possible parameters, so 2^6 = 64 possible templates
for nr in range(64):
index = bin(nr)[2:].zfill(6)
if index[0]=="0":
a_status = "nofuzz"
else:
a_status = "fuzz"
if index[1]=="0":
c_status = "nofuzz"
else:
c_status = "fuzz"
if index[2]=="0":
duri_status = "nofuzz"
else:
duri_status = "fuzz"
if index[3]=="0":
ek_status = "nofuzz"
else:
ek_status = "fuzz"
if index[4]=="0":
ev_status = "nofuzz"
else:
ev_status = "fuzz"
if index[5]=="0":
flag_status = "nofuzz"
else:
flag_status = "fuzz"
with open(path_to_templates+"/tem_%s.tem"%index,'w') as t:
t.write(raw[0])
t.write(raw[1].replace("action", "action "+a_status))
t.write(raw[2].replace("category", "category "+c_status))
t.write(raw[3].replace("data_uri", "data_uri "+duri_status))
t.write(raw[4].replace("e_key", "e_key "+ek_status))
t.write(raw[5].replace("e_val", "e_val "+ev_status))
t.write(raw[6].replace("flag", "flag "+flag_status))
print "All raw templates have been generated"
#session0 folder contains the tem_111111.tem file - with 0 0s in its name
#session1 folder contains the tem_011111.tem, tem_101111.tem, tem_110111.tem and so on up to tem_111110.tem file - with 1 0s in its name
#...
#session6 folder contains the tem_000000.tem - with 6 0s in its name
ok=True
for session in range(7):
if os.path.isdir("session_%s"%(str(session))):
print "Folder session_%s was already created. Please remove it first"%(str(session))
ok=False
break
os.mkdir("session_%s"%(str(session)))
for i in range(64):
#myStr will keep the index of the file converted in binary: e.g. 000000
myStr = str(bin(i)[2:].zfill(6))
if myStr.count("0") == session:
os.system("cp templates/tem_%s.tem session_%s"%(myStr,session))
if ok:
print("All testing session folders have been created")
print("Path to the template folder: "+os.getcwd()+"/templates/ or "+os.getcwd()+"/session_n/ where n runs from 0 to 6")
|
bbannier/ROOT
|
refs/heads/master
|
tutorials/pyroot/gui_ex.py
|
28
|
import os, sys, ROOT
def pygaus( x, par ):
import math
if (par[2] != 0.0):
arg1 = (x[0]-par[1])/par[2]
arg2 = (0.01*0.39894228)/par[2]
arg3 = par[0]/(1+par[3])
gauss = arg3*arg2*math.exp(-0.5*arg1*arg1)
else:
print 'returning 0'
gauss = 0.
return gauss
tpygaus = ROOT.TF1( 'pygaus', pygaus, -4, 4, 4 )
tpygaus.SetParameters( 1., 0., 1. )
def MyDraw():
btn = ROOT.BindObject( ROOT.gTQSender, ROOT.TGTextButton )
if btn.WidgetId() == 10:
global tpygaus, window
tpygaus.Draw()
ROOT.gPad.Update()
m = ROOT.TPyDispatcher( MyDraw )
class pMainFrame( ROOT.TGMainFrame ):
def __init__( self, parent, width, height ):
ROOT.TGMainFrame.__init__( self, parent, width, height )
self.Canvas = ROOT.TRootEmbeddedCanvas( 'Canvas', self, 200, 200 )
self.AddFrame( self.Canvas, ROOT.TGLayoutHints() )
self.ButtonsFrame = ROOT.TGHorizontalFrame( self, 200, 40 )
self.DrawButton = ROOT.TGTextButton( self.ButtonsFrame, '&Draw', 10 )
self.DrawButton.Connect( 'Clicked()', "TPyDispatcher", m, 'Dispatch()' )
self.ButtonsFrame.AddFrame( self.DrawButton, ROOT.TGLayoutHints() )
self.ExitButton = ROOT.TGTextButton( self.ButtonsFrame, '&Exit', 20 )
self.ExitButton.SetCommand( 'TPython::Exec( "raise SystemExit" )' )
self.ButtonsFrame.AddFrame( self.ExitButton, ROOT.TGLayoutHints() )
self.AddFrame( self.ButtonsFrame, ROOT.TGLayoutHints() )
self.SetWindowName( 'My first GUI' )
self.MapSubwindows()
self.Resize( self.GetDefaultSize() )
self.MapWindow()
def __del__(self):
self.Cleanup()
if __name__ == '__main__':
window = pMainFrame( ROOT.gClient.GetRoot(), 200, 200 )
|
tomviner/pytest
|
refs/heads/master
|
testing/example_scripts/fixtures/fill_fixtures/test_extend_fixture_conftest_conftest/conftest.py
|
46
|
import pytest
@pytest.fixture
def spam():
return "spam"
|
boshnivolo/TIY-Assignments
|
refs/heads/master
|
node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/MSVSUtil.py
|
1812
|
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions shared amongst the Windows generators."""
import copy
import os
# A dictionary mapping supported target types to extensions.
TARGET_TYPE_EXT = {
'executable': 'exe',
'loadable_module': 'dll',
'shared_library': 'dll',
'static_library': 'lib',
}
def _GetLargePdbShimCcPath():
"""Returns the path of the large_pdb_shim.cc file."""
this_dir = os.path.abspath(os.path.dirname(__file__))
src_dir = os.path.abspath(os.path.join(this_dir, '..', '..'))
win_data_dir = os.path.join(src_dir, 'data', 'win')
large_pdb_shim_cc = os.path.join(win_data_dir, 'large-pdb-shim.cc')
return large_pdb_shim_cc
def _DeepCopySomeKeys(in_dict, keys):
"""Performs a partial deep-copy on |in_dict|, only copying the keys in |keys|.
Arguments:
in_dict: The dictionary to copy.
keys: The keys to be copied. If a key is in this list and doesn't exist in
|in_dict| this is not an error.
Returns:
The partially deep-copied dictionary.
"""
d = {}
for key in keys:
if key not in in_dict:
continue
d[key] = copy.deepcopy(in_dict[key])
return d
def _SuffixName(name, suffix):
"""Add a suffix to the end of a target.
Arguments:
name: name of the target (foo#target)
suffix: the suffix to be added
Returns:
Target name with suffix added (foo_suffix#target)
"""
parts = name.rsplit('#', 1)
parts[0] = '%s_%s' % (parts[0], suffix)
return '#'.join(parts)
def _ShardName(name, number):
"""Add a shard number to the end of a target.
Arguments:
name: name of the target (foo#target)
number: shard number
Returns:
Target name with shard added (foo_1#target)
"""
return _SuffixName(name, str(number))
def ShardTargets(target_list, target_dicts):
"""Shard some targets apart to work around the linkers limits.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
Returns:
Tuple of the new sharded versions of the inputs.
"""
# Gather the targets to shard, and how many pieces.
targets_to_shard = {}
for t in target_dicts:
shards = int(target_dicts[t].get('msvs_shard', 0))
if shards:
targets_to_shard[t] = shards
# Shard target_list.
new_target_list = []
for t in target_list:
if t in targets_to_shard:
for i in range(targets_to_shard[t]):
new_target_list.append(_ShardName(t, i))
else:
new_target_list.append(t)
# Shard target_dict.
new_target_dicts = {}
for t in target_dicts:
if t in targets_to_shard:
for i in range(targets_to_shard[t]):
name = _ShardName(t, i)
new_target_dicts[name] = copy.copy(target_dicts[t])
new_target_dicts[name]['target_name'] = _ShardName(
new_target_dicts[name]['target_name'], i)
sources = new_target_dicts[name].get('sources', [])
new_sources = []
for pos in range(i, len(sources), targets_to_shard[t]):
new_sources.append(sources[pos])
new_target_dicts[name]['sources'] = new_sources
else:
new_target_dicts[t] = target_dicts[t]
# Shard dependencies.
for t in new_target_dicts:
for deptype in ('dependencies', 'dependencies_original'):
dependencies = copy.copy(new_target_dicts[t].get(deptype, []))
new_dependencies = []
for d in dependencies:
if d in targets_to_shard:
for i in range(targets_to_shard[d]):
new_dependencies.append(_ShardName(d, i))
else:
new_dependencies.append(d)
new_target_dicts[t][deptype] = new_dependencies
return (new_target_list, new_target_dicts)
def _GetPdbPath(target_dict, config_name, vars):
"""Returns the path to the PDB file that will be generated by a given
configuration.
The lookup proceeds as follows:
- Look for an explicit path in the VCLinkerTool configuration block.
- Look for an 'msvs_large_pdb_path' variable.
- Use '<(PRODUCT_DIR)/<(product_name).(exe|dll).pdb' if 'product_name' is
specified.
- Use '<(PRODUCT_DIR)/<(target_name).(exe|dll).pdb'.
Arguments:
target_dict: The target dictionary to be searched.
config_name: The name of the configuration of interest.
vars: A dictionary of common GYP variables with generator-specific values.
Returns:
The path of the corresponding PDB file.
"""
config = target_dict['configurations'][config_name]
msvs = config.setdefault('msvs_settings', {})
linker = msvs.get('VCLinkerTool', {})
pdb_path = linker.get('ProgramDatabaseFile')
if pdb_path:
return pdb_path
variables = target_dict.get('variables', {})
pdb_path = variables.get('msvs_large_pdb_path', None)
if pdb_path:
return pdb_path
pdb_base = target_dict.get('product_name', target_dict['target_name'])
pdb_base = '%s.%s.pdb' % (pdb_base, TARGET_TYPE_EXT[target_dict['type']])
pdb_path = vars['PRODUCT_DIR'] + '/' + pdb_base
return pdb_path
def InsertLargePdbShims(target_list, target_dicts, vars):
"""Insert a shim target that forces the linker to use 4KB pagesize PDBs.
This is a workaround for targets with PDBs greater than 1GB in size, the
limit for the 1KB pagesize PDBs created by the linker by default.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
vars: A dictionary of common GYP variables with generator-specific values.
Returns:
Tuple of the shimmed version of the inputs.
"""
# Determine which targets need shimming.
targets_to_shim = []
for t in target_dicts:
target_dict = target_dicts[t]
# We only want to shim targets that have msvs_large_pdb enabled.
if not int(target_dict.get('msvs_large_pdb', 0)):
continue
# This is intended for executable, shared_library and loadable_module
# targets where every configuration is set up to produce a PDB output.
# If any of these conditions is not true then the shim logic will fail
# below.
targets_to_shim.append(t)
large_pdb_shim_cc = _GetLargePdbShimCcPath()
for t in targets_to_shim:
target_dict = target_dicts[t]
target_name = target_dict.get('target_name')
base_dict = _DeepCopySomeKeys(target_dict,
['configurations', 'default_configuration', 'toolset'])
# This is the dict for copying the source file (part of the GYP tree)
# to the intermediate directory of the project. This is necessary because
# we can't always build a relative path to the shim source file (on Windows
# GYP and the project may be on different drives), and Ninja hates absolute
# paths (it ends up generating the .obj and .obj.d alongside the source
# file, polluting GYPs tree).
copy_suffix = 'large_pdb_copy'
copy_target_name = target_name + '_' + copy_suffix
full_copy_target_name = _SuffixName(t, copy_suffix)
shim_cc_basename = os.path.basename(large_pdb_shim_cc)
shim_cc_dir = vars['SHARED_INTERMEDIATE_DIR'] + '/' + copy_target_name
shim_cc_path = shim_cc_dir + '/' + shim_cc_basename
copy_dict = copy.deepcopy(base_dict)
copy_dict['target_name'] = copy_target_name
copy_dict['type'] = 'none'
copy_dict['sources'] = [ large_pdb_shim_cc ]
copy_dict['copies'] = [{
'destination': shim_cc_dir,
'files': [ large_pdb_shim_cc ]
}]
# This is the dict for the PDB generating shim target. It depends on the
# copy target.
shim_suffix = 'large_pdb_shim'
shim_target_name = target_name + '_' + shim_suffix
full_shim_target_name = _SuffixName(t, shim_suffix)
shim_dict = copy.deepcopy(base_dict)
shim_dict['target_name'] = shim_target_name
shim_dict['type'] = 'static_library'
shim_dict['sources'] = [ shim_cc_path ]
shim_dict['dependencies'] = [ full_copy_target_name ]
# Set up the shim to output its PDB to the same location as the final linker
# target.
for config_name, config in shim_dict.get('configurations').iteritems():
pdb_path = _GetPdbPath(target_dict, config_name, vars)
# A few keys that we don't want to propagate.
for key in ['msvs_precompiled_header', 'msvs_precompiled_source', 'test']:
config.pop(key, None)
msvs = config.setdefault('msvs_settings', {})
# Update the compiler directives in the shim target.
compiler = msvs.setdefault('VCCLCompilerTool', {})
compiler['DebugInformationFormat'] = '3'
compiler['ProgramDataBaseFileName'] = pdb_path
# Set the explicit PDB path in the appropriate configuration of the
# original target.
config = target_dict['configurations'][config_name]
msvs = config.setdefault('msvs_settings', {})
linker = msvs.setdefault('VCLinkerTool', {})
linker['GenerateDebugInformation'] = 'true'
linker['ProgramDatabaseFile'] = pdb_path
# Add the new targets. They must go to the beginning of the list so that
# the dependency generation works as expected in ninja.
target_list.insert(0, full_copy_target_name)
target_list.insert(0, full_shim_target_name)
target_dicts[full_copy_target_name] = copy_dict
target_dicts[full_shim_target_name] = shim_dict
# Update the original target to depend on the shim target.
target_dict.setdefault('dependencies', []).append(full_shim_target_name)
return (target_list, target_dicts)
|
ESTUDIANTEGIT/tuconsejocomunal
|
refs/heads/master
|
usuarios_venezuela/__openerp__.py
|
4
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# Generated by the OpenERP plugin for Dia !
#
{
'name': 'Usuarios Venezuela',
'version': '1.0',
'depends': ['base'],
'author': 'Felipe Villamizar (Comunidad Bachaco.ve)',
'category': 'Configuración Técnica',
'description': """
Este módulo le Agrega al modulo base usuarios del sistema lo siguiente:
Cédula (Campo obligatorio y único)
Teléfono
Dirección
Estado
Municipio
Parroquia
Sector
Calle / Avenida
Casa /Edificio
Piso y Apartamento
""",
'update_xml': [],
"data" : [
'vistas/usuarios_venezuela_view.xml',
'vistas/direccion.xml',
'data/estados.xml',
'data/municipios.xml',
'data/parroquias.xml'
],
'installable': True,
'auto_install': False,
}
|
GunoH/intellij-community
|
refs/heads/master
|
python/testData/formatter/noAlignmentAfterDictHangingIndentInFunctionCall.py
|
79
|
def test_function(*args):
pass
test_function({
'a': 'b',
}, 5)
test_function(1,
2,
3)
|
jlmdegoede/Invoicegen
|
refs/heads/master
|
agreements/apps.py
|
1
|
from django.apps import AppConfig
class AgreementmoduleConfig(AppConfig):
name = 'agreements'
|
ComputationalPhysics/atomify-lammps
|
refs/heads/dev
|
libs/lammps/tools/eff/bohr2ang.py
|
57
|
Info="""
Module name: bohr2ang.py
Author: (c) Andres Jaramillo-Botero
California Institute of Technology
ajaramil@caltech.edu
Project: pEFF
Version: August 2009
Usage: python bohr2ang.py
>>Name of data file (bohr): [datafile]
Results:
creates a datafile with extension .ang in real units
"""
import os
currdir=os.getcwd()
datafile=raw_input("Name of data file (bohr): ")
bohr2ang=0.529177249
bperatu2angperfs=0.512396271120794
f=open(currdir+'/'+datafile,'r')
w=open(currdir+'/'+datafile+'.ang','w')
lines=f.readlines()
atom_flag=False
vel_flag=False
for line in lines:
if line.find("xlo") > 0:
parse=line.split()
w.write("%f %f xlo xhi\n"%(float(parse[0])*bohr2ang,float(parse[1])*bohr2ang))
elif line.find("ylo") > 0:
parse=line.split()
w.write("%f %f ylo yhi\n"%(float(parse[0])*bohr2ang,float(parse[1])*bohr2ang))
elif line.find("zlo") > 0:
parse=line.split()
w.write("%f %f zlo zhi\n"%(float(parse[0])*bohr2ang,float(parse[1])*bohr2ang))
elif line.find("xy") >= 0:
parse=line.split()
w.write("%f %f %f xy xz yz\n"%(float(parse[0])*bohr2ang,float(parse[1])*bohr2ang,float(parse[2])*bohr2ang))
elif atom_flag and line.strip():
parse=line.split()
id=parse[0]
type=parse[1]
q=parse[2]
spin=parse[3]
eradius=float(parse[4])*bohr2ang
x=float(parse[5])*bohr2ang
y=float(parse[6])*bohr2ang
z=float(parse[7])*bohr2ang
rest=" ".join(parse[8:])
w.write("%s %s %s %s %f %f %f %f %s\n"%(id,type,q,spin,eradius,x,y,z,rest))
elif line.find("Atoms") >= 0:
w.write(line)
atom_flag=True
continue
elif vel_flag and line != "\n":
parse=line.split()
id=parse[0]
vx=float(parse[1])*bperatu2angperfs
vy=float(parse[2])*bperatu2angperfs
vz=float(parse[3])*bperatu2angperfs
erv=float(parse[4])*bperatu2angperfs
w.write("%s %f %f %f\n"%(id,vx,vy,vz,erv))
elif line.find("Velocities") >= 0:
w.write(line)
atom_flag=False
vel_flag=True
continue
else:
w.write(line)
f.close()
w.close()
|
minghuascode/pyj
|
refs/heads/master
|
examples/gcharttestapp/GChartExample03.py
|
6
|
from pyjamas.chart.GChart import GChart
from pyjamas.chart import SymbolType
from pyjamas.chart.GChartConsts import Y_AXIS, Y2_AXIS
"""*
* Defines a chart with a scatterplot on one y-axis, and a
* barchart on the other.
"""
class GChartExample03(GChart):
def __init__(self):
GChart.__init__(self)
do_axis2 = True
self.setChartTitle("<h2>10x and x<sup>2</sup></h2>")
self.setChartSize(300, 300)
self.addCurve()
self.getCurve().setLegendLabel("<i>10x</i>")
self.getCurve().setYAxis(Y_AXIS)
self.getCurve().getSymbol().setSymbolType(SymbolType.VBAR_SOUTH)
self.getCurve().getSymbol().setBackgroundColor("#DDF")
self.getCurve().getSymbol().setBorderColor("red")
self.getCurve().getSymbol().setBorderWidth(1)
self.getCurve().getSymbol().setModelWidth(0.5)
for i in range(10):
self.getCurve().addPoint(i,i*10)
if do_axis2:
self.addCurve()
self.getCurve().setLegendLabel("<i>x<sup>2</sup></i>")
self.getCurve().setYAxis(Y2_AXIS)
self.getCurve().getSymbol().setSymbolType(SymbolType.BOX_CENTER)
self.getCurve().getSymbol().setWidth(5)
self.getCurve().getSymbol().setHeight(5)
self.getCurve().getSymbol().setBorderWidth(0)
self.getCurve().getSymbol().setBackgroundColor("navy")
self.getCurve().getSymbol().setFillThickness(2)
self.getCurve().getSymbol().setFillSpacing(5)
for i in range(self.getCurve(0).getNPoints()):
self.getCurve().addPoint(i,i*i)
self.getXAxis().setAxisLabel("<i>x</i>")
self.getXAxis().setHasGridlines(True)
self.getXAxis().setTickThickness(0); # hide tick marks...
self.getXAxis().setTickLength(3); # but leave a small gap
self.getYAxis().setAxisLabel("<i>10x</i>")
self.getYAxis().setAxisMax(100)
self.getYAxis().setAxisMin(0)
self.getYAxis().setTickLabelFormat("#.#")
self.getYAxis().setTickCount(11)
if do_axis2:
self.getY2Axis().setAxisLabel("<i>x<sup>2</sup></i>")
self.getY2Axis().setHasGridlines(True)
# last bar 'sticks out' over right edge, so extend 'grid' right:
self.getY2Axis().setTickLength(15)
|
skosukhin/spack
|
refs/heads/esiwace
|
var/spack/repos/builtin/packages/emacs/package.py
|
1
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Emacs(AutotoolsPackage):
"""The Emacs programmable text editor."""
homepage = "https://www.gnu.org/software/emacs"
url = "http://ftp.gnu.org/gnu/emacs/emacs-24.5.tar.gz"
version('25.3', '74ddd373dc52ac05ca7a8c63b1ddbf58')
version('25.2', '0a36d1cdbba6024d4dbbac027f87995f')
version('25.1', '95c12e6a9afdf0dcbdd7d2efa26ca42c')
version('24.5', 'd74b597503a68105e61b5b9f6d065b44')
variant('X', default=False, description="Enable an X toolkit")
variant(
'toolkit',
default='gtk',
values=('gtk', 'athena'),
description="Select an X toolkit (gtk, athena)"
)
depends_on('pkg-config@0.9.0:', type='build')
depends_on('ncurses')
depends_on('zlib')
depends_on('libtiff', when='+X')
depends_on('libpng', when='+X')
depends_on('libxpm', when='+X')
depends_on('giflib', when='+X')
depends_on('libx11', when='+X')
depends_on('libxaw', when='+X toolkit=athena')
depends_on('gtkplus+X', when='+X toolkit=gtk')
def configure_args(self):
spec = self.spec
toolkit = spec.variants['toolkit'].value
if '+X' in spec:
args = [
'--with-x',
'--with-x-toolkit={0}'.format(toolkit)
]
else:
args = ['--without-x']
return args
|
HPCGISLab/STDataViz
|
refs/heads/master
|
WorkingVersion/Scatter_plot.py
|
1
|
import numpy as np
from tvtk.api import tvtk
from mayavi.scripts import mayavi2
@mayavi2.standalone
def main():
# Create some random points to view.
pd = tvtk.PolyData()
pd.points = np.random.random((1000, 3))
verts = np.arange(0, 1000, 1)
verts.shape = (1000, 1)
pd.verts = verts
pd.point_data.scalars = np.random.random(1000)
pd.point_data.scalars.name = 'scalars'
# Now visualize it using mayavi2.
from mayavi.sources.vtk_data_source import VTKDataSource
from mayavi.modules.outline import Outline
from mayavi.modules.surface import Surface
mayavi.new_scene()
d = VTKDataSource()
d.data = pd
mayavi.add_source(d)
mayavi.add_module(Outline())
s = Surface()
mayavi.add_module(s)
s.actor.property.set(representation='p', point_size=2)
# You could also use glyphs to render the points via the Glyph module.
if __name__ == '__main__':
main()
|
peragro/django-project
|
refs/heads/master
|
django_project/mixins.py
|
2
|
from django.db import models
from django.db import transaction
from reversion import revisions as reversion
from follow.models import Follow
from django_project import signals
class ProjectMixin(object):
def save(self, *args, **kwargs):
ret = super(ProjectMixin, self).save(*args, **kwargs)
#Author of the project is always following!
Follow.objects.get_or_create(self.author, self)
return ret
class TaskMixin(object):
def versions(self):
#version_list = reversion.get_for_object(self)
version_list = reversion.get_unique_for_object(self)
return version_list
def nr_of_versions(self):
version_list = reversion.get_unique_for_object(self)
return len(version_list)
def save_revision(self, user, comment, *args, **kwargs):
with transaction.atomic(), reversion.create_revision():
self.save()
reversion.set_user(user)
reversion.set_comment(comment)
def save(self, *args, **kwargs):
from django_project.models import Task, Transition
exists = self.id is not None
if exists:
old_task = Task.objects.get(pk=self.id)
old_state = old_task.status
ret = super(TaskMixin, self).save(*args, **kwargs)
if exists:
new_state = self.status
# Only signal if the states belong to the same project(else assume saving from admin)
if new_state.project == old_state.project:
#print('TaskMixin::save 1', old_state, new_state)
transition = Transition.objects.get(source=old_state, destination=new_state)
print('TaskMixin::save 2', transition)
if new_state.is_resolved:
signals.workflow_task_resolved.send(sender=Task, instance=self, transition=transition, old_state=old_state, new_state=new_state)
else:
signals.workflow_task_transition.send(sender=Task, instance=self, transition=transition, old_state=old_state, new_state=new_state)
else:
signals.workflow_task_new.send(sender=Task, instance=self)
return ret
class CommentMixin(object):
def save(self, *args, **kwargs):
from django_project.models import Comment
exists = self.id is not None
ret = super(CommentMixin, self).save(*args, **kwargs)
if not exists:
signals.commented.send(sender=Comment, instance=self.content_object, comment=self)
return ret
|
ebagdasa/tempest
|
refs/heads/master
|
tempest/api_schema/response/compute/quotas.py
|
12
|
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
common_quota_set = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'quota_set': {
'type': 'object',
'properties': {
'instances': {'type': 'integer'},
'cores': {'type': 'integer'},
'ram': {'type': 'integer'},
'floating_ips': {'type': 'integer'},
'fixed_ips': {'type': 'integer'},
'metadata_items': {'type': 'integer'},
'key_pairs': {'type': 'integer'},
'security_groups': {'type': 'integer'},
'security_group_rules': {'type': 'integer'}
},
'required': ['instances', 'cores', 'ram',
'floating_ips', 'fixed_ips',
'metadata_items', 'key_pairs',
'security_groups', 'security_group_rules']
}
},
'required': ['quota_set']
}
}
|
apark263/tensorflow
|
refs/heads/master
|
tensorflow/contrib/saved_model/python/saved_model/keras_saved_model.py
|
3
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Utility functions to save/load keras Model to/from SavedModel."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras import saving
# TODO(kathywu): Remove all contrib callers, switch to tf.keras.
save_keras_model = saving.export
load_keras_model = saving.load_from_saved_model
|
lmazuel/azure-sdk-for-python
|
refs/heads/master
|
azure-mgmt-network/azure/mgmt/network/v2017_08_01/operations/__init__.py
|
4
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .application_gateways_operations import ApplicationGatewaysOperations
from .available_endpoint_services_operations import AvailableEndpointServicesOperations
from .express_route_circuit_authorizations_operations import ExpressRouteCircuitAuthorizationsOperations
from .express_route_circuit_peerings_operations import ExpressRouteCircuitPeeringsOperations
from .express_route_circuits_operations import ExpressRouteCircuitsOperations
from .express_route_service_providers_operations import ExpressRouteServiceProvidersOperations
from .load_balancers_operations import LoadBalancersOperations
from .load_balancer_backend_address_pools_operations import LoadBalancerBackendAddressPoolsOperations
from .load_balancer_frontend_ip_configurations_operations import LoadBalancerFrontendIPConfigurationsOperations
from .inbound_nat_rules_operations import InboundNatRulesOperations
from .load_balancer_load_balancing_rules_operations import LoadBalancerLoadBalancingRulesOperations
from .load_balancer_network_interfaces_operations import LoadBalancerNetworkInterfacesOperations
from .load_balancer_probes_operations import LoadBalancerProbesOperations
from .network_interfaces_operations import NetworkInterfacesOperations
from .network_interface_ip_configurations_operations import NetworkInterfaceIPConfigurationsOperations
from .network_interface_load_balancers_operations import NetworkInterfaceLoadBalancersOperations
from .network_security_groups_operations import NetworkSecurityGroupsOperations
from .security_rules_operations import SecurityRulesOperations
from .default_security_rules_operations import DefaultSecurityRulesOperations
from .network_watchers_operations import NetworkWatchersOperations
from .packet_captures_operations import PacketCapturesOperations
from .public_ip_addresses_operations import PublicIPAddressesOperations
from .route_filters_operations import RouteFiltersOperations
from .route_filter_rules_operations import RouteFilterRulesOperations
from .route_tables_operations import RouteTablesOperations
from .routes_operations import RoutesOperations
from .bgp_service_communities_operations import BgpServiceCommunitiesOperations
from .usages_operations import UsagesOperations
from .virtual_networks_operations import VirtualNetworksOperations
from .subnets_operations import SubnetsOperations
from .virtual_network_peerings_operations import VirtualNetworkPeeringsOperations
from .virtual_network_gateways_operations import VirtualNetworkGatewaysOperations
from .virtual_network_gateway_connections_operations import VirtualNetworkGatewayConnectionsOperations
from .local_network_gateways_operations import LocalNetworkGatewaysOperations
__all__ = [
'ApplicationGatewaysOperations',
'AvailableEndpointServicesOperations',
'ExpressRouteCircuitAuthorizationsOperations',
'ExpressRouteCircuitPeeringsOperations',
'ExpressRouteCircuitsOperations',
'ExpressRouteServiceProvidersOperations',
'LoadBalancersOperations',
'LoadBalancerBackendAddressPoolsOperations',
'LoadBalancerFrontendIPConfigurationsOperations',
'InboundNatRulesOperations',
'LoadBalancerLoadBalancingRulesOperations',
'LoadBalancerNetworkInterfacesOperations',
'LoadBalancerProbesOperations',
'NetworkInterfacesOperations',
'NetworkInterfaceIPConfigurationsOperations',
'NetworkInterfaceLoadBalancersOperations',
'NetworkSecurityGroupsOperations',
'SecurityRulesOperations',
'DefaultSecurityRulesOperations',
'NetworkWatchersOperations',
'PacketCapturesOperations',
'PublicIPAddressesOperations',
'RouteFiltersOperations',
'RouteFilterRulesOperations',
'RouteTablesOperations',
'RoutesOperations',
'BgpServiceCommunitiesOperations',
'UsagesOperations',
'VirtualNetworksOperations',
'SubnetsOperations',
'VirtualNetworkPeeringsOperations',
'VirtualNetworkGatewaysOperations',
'VirtualNetworkGatewayConnectionsOperations',
'LocalNetworkGatewaysOperations',
]
|
mancoast/CPythonPyc_test
|
refs/heads/master
|
fail/321_test_imaplib.py
|
49
|
from test import support
# If we end up with a significant number of tests that don't require
# threading, this test module should be split. Right now we skip
# them all if we don't have threading.
threading = support.import_module('threading')
from contextlib import contextmanager
import imaplib
import os.path
import socketserver
import time
import calendar
from test.support import reap_threads, verbose, transient_internet
import unittest
try:
import ssl
except ImportError:
ssl = None
CERTFILE = None
class TestImaplib(unittest.TestCase):
def test_Internaldate2tuple(self):
t0 = calendar.timegm((2000, 1, 1, 0, 0, 0, -1, -1, -1))
tt = imaplib.Internaldate2tuple(
b'25 (INTERNALDATE "01-Jan-2000 00:00:00 +0000")')
self.assertEqual(time.mktime(tt), t0)
tt = imaplib.Internaldate2tuple(
b'25 (INTERNALDATE "01-Jan-2000 11:30:00 +1130")')
self.assertEqual(time.mktime(tt), t0)
tt = imaplib.Internaldate2tuple(
b'25 (INTERNALDATE "31-Dec-1999 12:30:00 -1130")')
self.assertEqual(time.mktime(tt), t0)
def test_that_Time2Internaldate_returns_a_result(self):
# We can check only that it successfully produces a result,
# not the correctness of the result itself, since the result
# depends on the timezone the machine is in.
timevalues = [2000000000, 2000000000.0, time.localtime(2000000000),
'"18-May-2033 05:33:20 +0200"']
for t in timevalues:
imaplib.Time2Internaldate(t)
if ssl:
class SecureTCPServer(socketserver.TCPServer):
def get_request(self):
newsocket, fromaddr = self.socket.accept()
connstream = ssl.wrap_socket(newsocket,
server_side=True,
certfile=CERTFILE)
return connstream, fromaddr
IMAP4_SSL = imaplib.IMAP4_SSL
else:
class SecureTCPServer:
pass
IMAP4_SSL = None
class SimpleIMAPHandler(socketserver.StreamRequestHandler):
timeout = 1
def _send(self, message):
if verbose: print("SENT: %r" % message.strip())
self.wfile.write(message)
def handle(self):
# Send a welcome message.
self._send(b'* OK IMAP4rev1\r\n')
while 1:
# Gather up input until we receive a line terminator or we timeout.
# Accumulate read(1) because it's simpler to handle the differences
# between naked sockets and SSL sockets.
line = b''
while 1:
try:
part = self.rfile.read(1)
if part == b'':
# Naked sockets return empty strings..
return
line += part
except IOError:
# ..but SSLSockets throw exceptions.
return
if line.endswith(b'\r\n'):
break
if verbose: print('GOT: %r' % line.strip())
splitline = line.split()
tag = splitline[0].decode('ASCII')
cmd = splitline[1].decode('ASCII')
args = splitline[2:]
if hasattr(self, 'cmd_'+cmd):
getattr(self, 'cmd_'+cmd)(tag, args)
else:
self._send('{} BAD {} unknown\r\n'.format(tag, cmd).encode('ASCII'))
def cmd_CAPABILITY(self, tag, args):
self._send(b'* CAPABILITY IMAP4rev1\r\n')
self._send('{} OK CAPABILITY completed\r\n'.format(tag).encode('ASCII'))
class BaseThreadedNetworkedTests(unittest.TestCase):
def make_server(self, addr, hdlr):
class MyServer(self.server_class):
def handle_error(self, request, client_address):
self.close_request(request)
self.server_close()
raise
if verbose: print("creating server")
server = MyServer(addr, hdlr)
self.assertEqual(server.server_address, server.socket.getsockname())
if verbose:
print("server created")
print("ADDR =", addr)
print("CLASS =", self.server_class)
print("HDLR =", server.RequestHandlerClass)
t = threading.Thread(
name='%s serving' % self.server_class,
target=server.serve_forever,
# Short poll interval to make the test finish quickly.
# Time between requests is short enough that we won't wake
# up spuriously too many times.
kwargs={'poll_interval':0.01})
t.daemon = True # In case this function raises.
t.start()
if verbose: print("server running")
return server, t
def reap_server(self, server, thread):
if verbose: print("waiting for server")
server.shutdown()
server.server_close()
thread.join()
if verbose: print("done")
@contextmanager
def reaped_server(self, hdlr):
server, thread = self.make_server((support.HOST, 0), hdlr)
try:
yield server
finally:
self.reap_server(server, thread)
@reap_threads
def test_connect(self):
with self.reaped_server(SimpleIMAPHandler) as server:
client = self.imap_class(*server.server_address)
client.shutdown()
@reap_threads
def test_issue5949(self):
class EOFHandler(socketserver.StreamRequestHandler):
def handle(self):
# EOF without sending a complete welcome message.
self.wfile.write(b'* OK')
with self.reaped_server(EOFHandler) as server:
self.assertRaises(imaplib.IMAP4.abort,
self.imap_class, *server.server_address)
@reap_threads
def test_line_termination(self):
class BadNewlineHandler(SimpleIMAPHandler):
def cmd_CAPABILITY(self, tag, args):
self._send(b'* CAPABILITY IMAP4rev1 AUTH\n')
self._send('{} OK CAPABILITY completed\r\n'.format(tag).encode('ASCII'))
with self.reaped_server(BadNewlineHandler) as server:
self.assertRaises(imaplib.IMAP4.abort,
self.imap_class, *server.server_address)
class ThreadedNetworkedTests(BaseThreadedNetworkedTests):
server_class = socketserver.TCPServer
imap_class = imaplib.IMAP4
@unittest.skipUnless(ssl, "SSL not available")
class ThreadedNetworkedTestsSSL(BaseThreadedNetworkedTests):
server_class = SecureTCPServer
imap_class = IMAP4_SSL
class RemoteIMAPTest(unittest.TestCase):
host = 'cyrus.andrew.cmu.edu'
port = 143
username = 'anonymous'
password = 'pass'
imap_class = imaplib.IMAP4
def setUp(self):
with transient_internet(self.host):
self.server = self.imap_class(self.host, self.port)
def tearDown(self):
if self.server is not None:
with transient_internet(self.host):
self.server.logout()
def test_logincapa(self):
with transient_internet(self.host):
for cap in self.server.capabilities:
self.assertIsInstance(cap, str)
self.assertTrue('LOGINDISABLED' in self.server.capabilities)
self.assertTrue('AUTH=ANONYMOUS' in self.server.capabilities)
rs = self.server.login(self.username, self.password)
self.assertEqual(rs[0], 'OK')
def test_logout(self):
with transient_internet(self.host):
rs = self.server.logout()
self.server = None
self.assertEqual(rs[0], 'BYE')
@unittest.skipUnless(ssl, "SSL not available")
class RemoteIMAP_STARTTLSTest(RemoteIMAPTest):
def setUp(self):
super().setUp()
with transient_internet(self.host):
rs = self.server.starttls()
self.assertEqual(rs[0], 'OK')
def test_logincapa(self):
for cap in self.server.capabilities:
self.assertIsInstance(cap, str)
self.assertFalse('LOGINDISABLED' in self.server.capabilities)
@unittest.skipUnless(ssl, "SSL not available")
class RemoteIMAP_SSLTest(RemoteIMAPTest):
port = 993
imap_class = IMAP4_SSL
def test_logincapa(self):
for cap in self.server.capabilities:
self.assertIsInstance(cap, str)
self.assertFalse('LOGINDISABLED' in self.server.capabilities)
self.assertTrue('AUTH=PLAIN' in self.server.capabilities)
def test_main():
tests = [TestImaplib]
if support.is_resource_enabled('network'):
if ssl:
global CERTFILE
CERTFILE = os.path.join(os.path.dirname(__file__) or os.curdir,
"keycert.pem")
if not os.path.exists(CERTFILE):
raise support.TestFailed("Can't read certificate files!")
tests.extend([
ThreadedNetworkedTests, ThreadedNetworkedTestsSSL,
RemoteIMAPTest, RemoteIMAP_SSLTest, RemoteIMAP_STARTTLSTest,
])
support.run_unittest(*tests)
if __name__ == "__main__":
support.use_resources = ['network']
test_main()
|
hassanabidpk/django
|
refs/heads/master
|
django/contrib/gis/gdal/prototypes/srs.py
|
471
|
from ctypes import POINTER, c_char_p, c_int, c_void_p
from django.contrib.gis.gdal.libgdal import lgdal, std_call
from django.contrib.gis.gdal.prototypes.generation import (
const_string_output, double_output, int_output, srs_output, string_output,
void_output,
)
# Shortcut generation for routines with known parameters.
def srs_double(f):
"""
Creates a function prototype for the OSR routines that take
the OSRSpatialReference object and
"""
return double_output(f, [c_void_p, POINTER(c_int)], errcheck=True)
def units_func(f):
"""
Creates a ctypes function prototype for OSR units functions, e.g.,
OSRGetAngularUnits, OSRGetLinearUnits.
"""
return double_output(f, [c_void_p, POINTER(c_char_p)], strarg=True)
# Creation & destruction.
clone_srs = srs_output(std_call('OSRClone'), [c_void_p])
new_srs = srs_output(std_call('OSRNewSpatialReference'), [c_char_p])
release_srs = void_output(lgdal.OSRRelease, [c_void_p], errcheck=False)
destroy_srs = void_output(std_call('OSRDestroySpatialReference'), [c_void_p], errcheck=False)
srs_validate = void_output(lgdal.OSRValidate, [c_void_p])
# Getting the semi_major, semi_minor, and flattening functions.
semi_major = srs_double(lgdal.OSRGetSemiMajor)
semi_minor = srs_double(lgdal.OSRGetSemiMinor)
invflattening = srs_double(lgdal.OSRGetInvFlattening)
# WKT, PROJ, EPSG, XML importation routines.
from_wkt = void_output(lgdal.OSRImportFromWkt, [c_void_p, POINTER(c_char_p)])
from_proj = void_output(lgdal.OSRImportFromProj4, [c_void_p, c_char_p])
from_epsg = void_output(std_call('OSRImportFromEPSG'), [c_void_p, c_int])
from_xml = void_output(lgdal.OSRImportFromXML, [c_void_p, c_char_p])
from_user_input = void_output(std_call('OSRSetFromUserInput'), [c_void_p, c_char_p])
# Morphing to/from ESRI WKT.
morph_to_esri = void_output(lgdal.OSRMorphToESRI, [c_void_p])
morph_from_esri = void_output(lgdal.OSRMorphFromESRI, [c_void_p])
# Identifying the EPSG
identify_epsg = void_output(lgdal.OSRAutoIdentifyEPSG, [c_void_p])
# Getting the angular_units, linear_units functions
linear_units = units_func(lgdal.OSRGetLinearUnits)
angular_units = units_func(lgdal.OSRGetAngularUnits)
# For exporting to WKT, PROJ.4, "Pretty" WKT, and XML.
to_wkt = string_output(std_call('OSRExportToWkt'), [c_void_p, POINTER(c_char_p)], decoding='ascii')
to_proj = string_output(std_call('OSRExportToProj4'), [c_void_p, POINTER(c_char_p)], decoding='ascii')
to_pretty_wkt = string_output(std_call('OSRExportToPrettyWkt'),
[c_void_p, POINTER(c_char_p), c_int], offset=-2, decoding='ascii'
)
# Memory leak fixed in GDAL 1.5; still exists in 1.4.
to_xml = string_output(lgdal.OSRExportToXML, [c_void_p, POINTER(c_char_p), c_char_p], offset=-2, decoding='ascii')
# String attribute retrival routines.
get_attr_value = const_string_output(std_call('OSRGetAttrValue'), [c_void_p, c_char_p, c_int], decoding='ascii')
get_auth_name = const_string_output(lgdal.OSRGetAuthorityName, [c_void_p, c_char_p], decoding='ascii')
get_auth_code = const_string_output(lgdal.OSRGetAuthorityCode, [c_void_p, c_char_p], decoding='ascii')
# SRS Properties
isgeographic = int_output(lgdal.OSRIsGeographic, [c_void_p])
islocal = int_output(lgdal.OSRIsLocal, [c_void_p])
isprojected = int_output(lgdal.OSRIsProjected, [c_void_p])
# Coordinate transformation
new_ct = srs_output(std_call('OCTNewCoordinateTransformation'), [c_void_p, c_void_p])
destroy_ct = void_output(std_call('OCTDestroyCoordinateTransformation'), [c_void_p], errcheck=False)
|
omprakasha/odoo
|
refs/heads/8.0
|
addons/point_of_sale/test/test_frontend.py
|
309
|
import openerp.tests
@openerp.tests.common.at_install(False)
@openerp.tests.common.post_install(True)
class TestUi(openerp.tests.HttpCase):
def test_01_pos_basic_order(self):
self.phantom_js("/", "openerp.Tour.run('pos_basic_order', 'test')", "openerp.Tour.tours.pos_basic_order", login="admin")
|
Serag8/Bachelor
|
refs/heads/master
|
google_appengine/lib/django-0.96/django/conf/urls/defaults.py
|
32
|
from django.core.urlresolvers import RegexURLPattern, RegexURLResolver
__all__ = ['handler404', 'handler500', 'include', 'patterns']
handler404 = 'django.views.defaults.page_not_found'
handler500 = 'django.views.defaults.server_error'
include = lambda urlconf_module: [urlconf_module]
def patterns(prefix, *tuples):
pattern_list = []
for t in tuples:
regex, view_or_include = t[:2]
default_kwargs = t[2:]
if type(view_or_include) == list:
pattern_list.append(RegexURLResolver(regex, view_or_include[0], *default_kwargs))
else:
pattern_list.append(RegexURLPattern(regex, prefix and (prefix + '.' + view_or_include) or view_or_include, *default_kwargs))
return pattern_list
|
sciflow/pad
|
refs/heads/master
|
bin/DocumentConverter.py
|
7
|
#!/usr/bin/python
#
# PyODConverter (Python OpenDocument Converter) v1.1 - 2009-11-14
# Modifications by Mikko Rantalainen <mikko.rantalainen@peda.net>
#
# This script converts a document from one office format to another by
# connecting to an OpenOffice.org instance via Python-UNO bridge.
#
# Copyright (C) 2008-2009 Mirko Nasato <mirko@artofsolving.com>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl-2.1.html
# - or any later version.
#
# See also:
# http://www.artofsolving.com/opensource/pyodconverter
# http://www.linuxjournal.com/content/starting-stopping-and-connecting-openoffice-python
#
DEFAULT_OPENOFFICE_PORT = 8100
import sys
import os
import time
# Find OpenOffice.
_oopaths=(
('/usr/lib/openoffice/program', '/usr/lib/openoffice/program'),
('/usr/lib64/ooo-2.0/program', '/usr/lib64/ooo-2.0/program'),
('/opt/openoffice.org3/program', '/opt/openoffice.org/basis3.0/program'),
)
for p in _oopaths:
if os.path.exists(p[0]):
OPENOFFICE_PATH = p[0]
OPENOFFICE_BIN = os.path.join(OPENOFFICE_PATH, 'soffice')
OPENOFFICE_LIBPATH = p[1]
# Add to path so we can find uno.
if sys.path.count(OPENOFFICE_LIBPATH) == 0:
sys.path.insert(0, OPENOFFICE_LIBPATH)
break
import uno
from os.path import abspath, isfile, splitext
from com.sun.star.beans import PropertyValue
from com.sun.star.task import ErrorCodeIOException
from com.sun.star.connection import NoConnectException
FAMILY_TEXT = "Text"
FAMILY_WEB = "Web"
FAMILY_SPREADSHEET = "Spreadsheet"
FAMILY_PRESENTATION = "Presentation"
FAMILY_DRAWING = "Drawing"
#---------------------#
# Configuration Start #
#---------------------#
# see http://wiki.services.openoffice.org/wiki/Framework/Article/Filter
# most formats are auto-detected; only those requiring options are defined here
IMPORT_FILTER_MAP = {
"txt": {
"FilterName": "Text (encoded)",
"FilterOptions": "utf8"
},
"csv": {
"FilterName": "Text - txt - csv (StarCalc)",
"FilterOptions": "44,34,0"
}
}
EXPORT_FILTER_MAP = {
"pdf": {
FAMILY_TEXT: { "FilterName": "writer_pdf_Export" },
FAMILY_WEB: { "FilterName": "writer_web_pdf_Export" },
FAMILY_SPREADSHEET: { "FilterName": "calc_pdf_Export" },
FAMILY_PRESENTATION: { "FilterName": "impress_pdf_Export" },
FAMILY_DRAWING: { "FilterName": "draw_pdf_Export" }
},
"html": {
FAMILY_TEXT: { "FilterName": "HTML (StarWriter)" },
FAMILY_SPREADSHEET: { "FilterName": "HTML (StarCalc)" },
FAMILY_PRESENTATION: { "FilterName": "impress_html_Export" }
},
"odt": {
FAMILY_TEXT: { "FilterName": "writer8" },
FAMILY_WEB: { "FilterName": "writerweb8_writer" }
},
"doc": {
FAMILY_TEXT: { "FilterName": "MS Word 97" }
},
"rtf": {
FAMILY_TEXT: { "FilterName": "Rich Text Format" }
},
"txt": {
FAMILY_TEXT: {
"FilterName": "Text",
"FilterOptions": "utf8"
}
},
"ods": {
FAMILY_SPREADSHEET: { "FilterName": "calc8" }
},
"xls": {
FAMILY_SPREADSHEET: { "FilterName": "MS Excel 97" }
},
"csv": {
FAMILY_SPREADSHEET: {
"FilterName": "Text - txt - csv (StarCalc)",
"FilterOptions": "44,34,0"
}
},
"odp": {
FAMILY_PRESENTATION: { "FilterName": "impress8" }
},
"ppt": {
FAMILY_PRESENTATION: { "FilterName": "MS PowerPoint 97" }
},
"swf": {
FAMILY_DRAWING: { "FilterName": "draw_flash_Export" },
FAMILY_PRESENTATION: { "FilterName": "impress_flash_Export" }
}
}
PAGE_STYLE_OVERRIDE_PROPERTIES = {
FAMILY_SPREADSHEET: {
#--- Scale options: uncomment 1 of the 3 ---
# a) 'Reduce / enlarge printout': 'Scaling factor'
"PageScale": 100,
# b) 'Fit print range(s) to width / height': 'Width in pages' and 'Height in pages'
#"ScaleToPagesX": 1, "ScaleToPagesY": 1000,
# c) 'Fit print range(s) on number of pages': 'Fit print range(s) on number of pages'
#"ScaleToPages": 1,
"PrintGrid": False
}
}
#-------------------#
# Configuration End #
#-------------------#
class OOService:
"""
Start, stop, and connect to OpenOffice.
"""
def __init__(self, port=DEFAULT_OPENOFFICE_PORT):
""" Create OORunner that connects on the specified port. """
self.port = port
def connect(self, no_startup=False):
"""
Connect to OpenOffice.
If a connection cannot be established try to start OpenOffice.
"""
localContext = uno.getComponentContext()
resolver = localContext.ServiceManager.createInstanceWithContext("com.sun.star.bridge.UnoUrlResolver", localContext)
context = None
n = 0
while n < 6:
try:
context = resolver.resolve("uno:socket,host=localhost,port=%d;urp;StarOffice.ComponentContext" % self.port)
break
except NoConnectException:
pass
# If first connect failed then try starting OpenOffice.
if n == 0:
# Exit loop if startup not desired.
if no_startup:
break
self.startup()
# Pause and try again to connect
time.sleep(1)
n += 1
if not context:
raise Exception, "Failed to connect to OpenOffice on port %d" % self.port
desktop = context.ServiceManager.createInstanceWithContext("com.sun.star.frame.Desktop", context)
if not desktop:
raise Exception, "Failed to create OpenOffice desktop on port %d" % self.port
return desktop
def startup(self):
"""
Start a headless instance of OpenOffice.
"""
args = [OPENOFFICE_BIN,
'-accept=socket,host=localhost,port=%d;urp;StarOffice.ServiceManager' % self.port,
'-norestore',
'-nofirststartwizard',
'-nologo',
'-headless',
]
env = {'PATH' : '/bin:/usr/bin:%s' % OPENOFFICE_PATH,
'PYTHONPATH' : OPENOFFICE_LIBPATH,
}
try:
pid = os.spawnve(os.P_NOWAIT, args[0], args, env)
except Exception, e:
raise Exception, "Failed to start OpenOffice on port %d: %s" % (self.port, e.message)
if pid <= 0:
raise Exception, "Failed to start OpenOffice on port %d" % self.port
def shutdown(self):
"""
Shutdown OpenOffice.
"""
try:
desktop = self.connect(True)
if desktop:
desktop.terminate()
except Exception, e:
# pass
raise Exception, "Failed to shutdown the process: %s" % (e.message)
class DocumentConversionException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class DocumentConverter:
def __init__(self, port=DEFAULT_OPENOFFICE_PORT):
localContext = uno.getComponentContext()
resolver = localContext.ServiceManager.createInstanceWithContext("com.sun.star.bridge.UnoUrlResolver", localContext)
try:
context = resolver.resolve("uno:socket,host=localhost,port=%s;urp;StarOffice.ComponentContext" % port)
except NoConnectException:
raise DocumentConversionException, "failed to connect to OpenOffice.org on port %s" % port
self.desktop = context.ServiceManager.createInstanceWithContext("com.sun.star.frame.Desktop", context)
def terminate(self):
self.desktop.terminate()
def convert(self, inputFile, outputFile):
inputUrl = self._toFileUrl(inputFile)
outputUrl = self._toFileUrl(outputFile)
loadProperties = { "Hidden": True }
inputExt = self._getFileExt(inputFile)
if IMPORT_FILTER_MAP.has_key(inputExt):
loadProperties.update(IMPORT_FILTER_MAP[inputExt])
document = self.desktop.loadComponentFromURL(inputUrl, "_blank", 0, self._toProperties(loadProperties))
try:
document.refresh()
except AttributeError:
pass
family = self._detectFamily(document)
self._overridePageStyleProperties(document, family)
outputExt = self._getFileExt(outputFile)
storeProperties = self._getStoreProperties(document, outputExt)
try:
document.storeToURL(outputUrl, self._toProperties(storeProperties))
finally:
document.close(True)
def _overridePageStyleProperties(self, document, family):
if PAGE_STYLE_OVERRIDE_PROPERTIES.has_key(family):
properties = PAGE_STYLE_OVERRIDE_PROPERTIES[family]
pageStyles = document.getStyleFamilies().getByName('PageStyles')
for styleName in pageStyles.getElementNames():
pageStyle = pageStyles.getByName(styleName)
for name, value in properties.items():
pageStyle.setPropertyValue(name, value)
def _getStoreProperties(self, document, outputExt):
family = self._detectFamily(document)
try:
propertiesByFamily = EXPORT_FILTER_MAP[outputExt]
except KeyError:
raise DocumentConversionException, "unknown output format: '%s'" % outputExt
try:
return propertiesByFamily[family]
except KeyError:
raise DocumentConversionException, "unsupported conversion: from '%s' to '%s'" % (family, outputExt)
def _detectFamily(self, document):
if document.supportsService("com.sun.star.text.WebDocument"):
return FAMILY_WEB
if document.supportsService("com.sun.star.text.GenericTextDocument"):
# must be TextDocument or GlobalDocument
return FAMILY_TEXT
if document.supportsService("com.sun.star.sheet.SpreadsheetDocument"):
return FAMILY_SPREADSHEET
if document.supportsService("com.sun.star.presentation.PresentationDocument"):
return FAMILY_PRESENTATION
if document.supportsService("com.sun.star.drawing.DrawingDocument"):
return FAMILY_DRAWING
raise DocumentConversionException, "unknown document family: %s" % document
def _getFileExt(self, path):
ext = splitext(path)[1]
if ext is not None:
return ext[1:].lower()
def _toFileUrl(self, path):
return uno.systemPathToFileUrl(abspath(path))
def _toProperties(self, dict):
props = []
for key in dict:
prop = PropertyValue()
prop.Name = key
prop.Value = dict[key]
props.append(prop)
return tuple(props)
if __name__ == "__main__":
from sys import argv, exit
if argv[1] == "--daemon":
try:
service = OOService()
service.startup()
exit(0)
except ErrorCodeIOException, e:
print "Failed to start daemon process: %s" % e.message
exit(1)
if argv[1] == "--shutdown":
try:
service = OOService()
service.shutdown()
exit(0)
except ErrorCodeIOException, e:
print "Failed to shut down daemon process: %s" % e.message
exit(1)
if len(argv) < 3:
print "USAGE: python %s <input-file> <output-file>" % argv[0]
exit(255)
elif not isfile(argv[1]):
print "no such input file: %s" % argv[1]
exit(1)
try:
converter = DocumentConverter()
converter.convert(argv[1], argv[2])
except DocumentConversionException, exception:
print "ERROR! " + str(exception)
exit(1)
except ErrorCodeIOException, exception:
print "ERROR! ErrorCodeIOException %d" % exception.ErrCode
exit(1)
|
Manolaru/Python_Mantis
|
refs/heads/master
|
Working version/fixture/application.py
|
2
|
from selenium import webdriver
from fixture.session import SessionHelper
from fixture.project import ProjectHelper
class Application:
def __init__(self, browser, base_url):
if browser == "firefox":
self.wd = webdriver.Firefox()
elif browser == "chrome":
self.wd = webdriver.Chrome()
elif browser == "ie":
self.wd = webdriver.Ie()
else:
raise ValueError("Unrecognized browser %s" % browser)
self.session = SessionHelper(self)
self.project = ProjectHelper(self)
self.base_url = base_url
def is_valid(self):
try:
self.wd.current_url
return True
except:
return False
def open_home_page(self):
wd = self.wd
wd.get(self.base_url)
def destroy(self):
self.wd.quit()
|
telwertowski/Books-Mac-OS-X
|
refs/heads/master
|
Versions/Books_3.0b6/Library of Congress.plugin/Contents/Resources/PyZ3950/pqf.py
|
30
|
#!/usr/local/bin/python2.3
try:
from cStringIO import StringIO
except:
from StringIO import StringIO
from PyZ3950 import z3950, oids,asn1
from PyZ3950.zdefs import make_attr
from types import IntType, StringType, ListType
from PyZ3950.CQLParser import CQLshlex
"""
Parser for PQF directly into RPN structure.
PQF docs: http://www.indexdata.dk/yaz/doc/tools.html
NB: This does not implement /everything/ in PQF, in particular: @attr 2=3 @and @attr 1=4 title @attr 1=1003 author (eg that 2 should be 3 for all subsequent clauses)
"""
class PQFParser:
lexer = None
currentToken = None
nextToken = None
def __init__(self, l):
self.lexer = l
self.fetch_token()
def fetch_token(self):
""" Read ahead one token """
tok = self.lexer.get_token()
self.currentToken = self.nextToken
self.nextToken = tok
def is_boolean(self):
if (self.currentToken.lower() in ['@and', '@or', '@not', '@prox']):
return 1
else:
return 0
def defaultClause(self, t):
# Assign a default clause: anywhere =
clause = z3950.AttributesPlusTerm()
attrs = [(oids.Z3950_ATTRS_BIB1, 1, 1016), (oids.Z3950_ATTRS_BIB1, 2, 3)]
clause.attributes = [make_attr(*e) for e in attrs]
clause.term = t
return ('op', ('attrTerm', clause))
# Grammar fns
def query(self):
set = self.top_set()
qst = self.query_struct()
# Pull in a (hopefully) null token
self.fetch_token()
if (self.currentToken):
# Nope, unprocessed tokens remain
raise(ValueError)
rpnq = z3950.RPNQuery()
if set:
rpnq.attributeSet = set
else:
rpnq.attributeSet = oids.Z3950_ATTRS_BIB1_ov
rpnq.rpn = qst
return ('type_1', rpnq)
def top_set(self):
if (self.nextToken == '@attrset'):
self.fetch_token()
self.fetch_token()
n = self.currentToken.upper()
if (n[:14] == "1.2.840.10003."):
return asn1.OidVal(map(int, n.split('.')))
return oids.oids['Z3950']['ATTRS'][n]['oid']
else:
return None
# This totally ignores the BNF, but does the 'right' thing
def query_struct(self):
self.fetch_token()
if (self.currentToken == '@attr'):
attrs = []
while self.currentToken == '@attr':
attrs.append(self.attr_spec())
self.fetch_token()
t = self.term()
# Now we have attrs + term
clause = z3950.AttributesPlusTerm()
clause.attributes = [make_attr(*e) for e in attrs]
clause.term = t
return ('op', ('attrTerm', clause))
elif (self.is_boolean()):
# @operator query query
return self.complex()
elif (self.currentToken == '@set'):
return self.result_set()
elif (self.currentToken == "{"):
# Parens
s = self.query_struct()
if (self.nextToken <> "}"):
raise(ValueError)
else:
self.fetch_token()
return s
else:
t = self.term()
return self.defaultClause(t)
def term(self):
# Need to split to allow attrlist then @term
type = 'general'
if (self.currentToken == '@term'):
self.fetch_token()
type = self.currentToken.lower()
types = {'general' : 'general', 'string' : 'characterString', 'numeric' : 'numeric', 'external' : 'external'}
type = types[type]
self.fetch_token()
if (self.currentToken[0] == '"' and self.currentToken[-1] == '"'):
term = self.currentToken[1:-1]
else:
term = self.currentToken
return (type, term)
def result_set(self):
self.fetch_token()
return ('op', ('resultSet', self.currentToken))
def attr_spec(self):
# @attr is CT
self.fetch_token()
if (self.currentToken.find('=') == -1):
# attrset
set = self.currentToken
if (set[:14] == "1.2.840.10003."):
set = asn1.OidVal(map(int, set.split('.')))
else:
set = oids.oids['Z3950']['ATTRS'][set.upper()]['oid']
self.fetch_token()
else:
set = None
# May raise
(atype, val) = self.currentToken.split('=')
if (not atype.isdigit()):
raise ValueError
atype = int(atype)
if (val.isdigit()):
val = int(val)
return (set, atype, val)
def complex(self):
op = z3950.RpnRpnOp()
op.op = self.boolean()
op.rpn1 = self.query_struct()
op.rpn2 = self.query_struct()
return ('rpnRpnOp', op)
def boolean(self):
b = self.currentToken[1:]
b = b.lower()
if (b == 'prox'):
self.fetch_token()
exclusion = self.currentToken
self.fetch_token()
distance = self.currentToken
self.fetch_token()
ordered = self.currentToken
self.fetch_token()
relation = self.currentToken
self.fetch_token()
which = self.currentToken
self.fetch_token()
unit = self.currentToken
prox = z3950.ProximityOperator()
if (not (relation.isdigit() and exclusion.isdigit() and distance.isdigit() and unit.isdigit())):
raise ValueError
prox.relationType = int(relation)
prox.exclusion = bool(exclusion)
prox.distance = int(distance)
if (which[0] == 'k'):
prox.unit = ('known', int(unit))
elif (which[0] == 'p'):
prox.unit = ('private', int(unit))
else:
raise ValueError
return (b, prox)
elif b == 'not':
return ('and-not', None)
else:
return (b, None)
def parse(q):
query = StringIO(q)
lexer = CQLshlex(query)
# Override CQL's wordchars list to include /=><()
lexer.wordchars += "!@#$%^&*-+[];,.?|~`:\\><=/'()"
parser = PQFParser(lexer)
return parser.query()
def rpn2pqf(rpn):
# Turn RPN structure into PQF equivalent
q = rpn[1]
if (rpn[0] == 'type_1'):
# Top level
if (q.attributeSet):
query = '@attrset %s ' % ( '.'.join(map(str, q.attributeSet.lst)))
else:
query = ""
rest = rpn2pqf(q.rpn)
return "%s%s" % (query, rest)
elif (rpn[0] == 'rpnRpnOp'):
# boolean
if (q.op[0] in ['and', 'or']):
query = ['@', q.op[0], ' ']
elif (q.op[0] == 'and-not'):
query = ['@not ']
else:
query = ['@prox']
# XXX
query.append(' ')
query.append(rpn2pqf(q.rpn1))
query.append(' ')
query.append(rpn2pqf(q.rpn2))
return ''.join(query)
elif (rpn[0] == 'op'):
if (q[0] == 'attrTerm'):
query = []
for a in q[1].attributes:
if (a.attributeValue[0] == 'numeric'):
val = str(a.attributeValue[1])
else:
val = a.attributeValue[1].list[0][1]
query.append("@attr %i=%s " % (a.attributeType, val))
query.append('"%s" ' % (q[1].term[1]))
return ''.join(query)
elif (q[0] == 'resultSet'):
return "@set %s" % (q[1])
|
javierag/samba
|
refs/heads/master
|
python/samba/netcmd/processes.py
|
38
|
# Unix SMB/CIFS implementation.
# List processes (to aid debugging on systems without setproctitle)
# Copyright (C) 2010-2011 Jelmer Vernooij <jelmer@samba.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Testbed for loadparm.c/params.c
#
# This module simply loads a specified configuration file and
# if successful, dumps it's contents to stdout. Note that the
# operation is performed with DEBUGLEVEL at 3.
#
# Useful for a quick 'syntax check' of a configuration file.
#
import os
import sys
import samba
import samba.getopt as options
from samba.netcmd import Command, CommandError, Option
from samba.messaging import Messaging
class cmd_processes(Command):
"""List processes (to aid debugging on systems without setproctitle)."""
synopsis = "%prog [options]"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"versionopts": options.VersionOptions
}
takes_options = [
Option("--name", type=str,
help="Return only processes associated with one particular name"),
Option("--pid", type=int,
help="Return only names assoicated with one particular PID"),
]
takes_args = []
def run(self, sambaopts, versionopts, section_name=None,
name=None, pid=None):
lp = sambaopts.get_loadparm()
logger = self.get_logger("processes")
msg_ctx = Messaging()
if name is not None:
ids = msg_ctx.irpc_servers_byname(name)
for server_id in ids:
self.outf.write("%d\n" % server_id.pid)
elif pid is not None:
names = msg_ctx.irpc_all_servers()
for name in names:
for server_id in name.ids:
if server_id.pid == int(pid):
self.outf.write("%s\n" % name.name)
else:
names = msg_ctx.irpc_all_servers()
self.outf.write(" Service: PID \n")
self.outf.write("-----------------------------\n")
for name in names:
for server_id in name.ids:
self.outf.write("%-16s %6d\n" % (name.name, server_id.pid))
|
jfantom/incubator-airflow
|
refs/heads/master
|
airflow/contrib/example_dags/example_twitter_dag.py
|
12
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------------------------------------------------------
# Written By: Ekhtiar Syed
# Last Update: 8th April 2016
# Caveat: This Dag will not run because of missing scripts.
# The purpose of this is to give you a sample of a real world example DAG!
# --------------------------------------------------------------------------------
# --------------------------------------------------------------------------------
# Load The Dependencies
# --------------------------------------------------------------------------------
import airflow
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import PythonOperator
from airflow.operators.hive_operator import HiveOperator
from datetime import date, timedelta
# --------------------------------------------------------------------------------
# Create a few placeholder scripts. In practice these would be different python
# script files, which are imported in this section with absolute or relative imports
# --------------------------------------------------------------------------------
def fetchtweets():
return None
def cleantweets():
return None
def analyzetweets():
return None
def transfertodb():
return None
# --------------------------------------------------------------------------------
# set default arguments
# --------------------------------------------------------------------------------
default_args = {
'owner': 'Ekhtiar',
'depends_on_past': False,
'start_date': airflow.utils.dates.days_ago(5),
'email': ['airflow@example.com'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=5),
# 'queue': 'bash_queue',
# 'pool': 'backfill',
# 'priority_weight': 10,
# 'end_date': datetime(2016, 1, 1),
}
dag = DAG(
'example_twitter_dag', default_args=default_args,
schedule_interval="@daily")
# --------------------------------------------------------------------------------
# This task should call Twitter API and retrieve tweets from yesterday from and to
# for the four twitter users (Twitter_A,..,Twitter_D) There should be eight csv
# output files generated by this task and naming convention
# is direction(from or to)_twitterHandle_date.csv
# --------------------------------------------------------------------------------
fetch_tweets = PythonOperator(
task_id='fetch_tweets',
python_callable=fetchtweets,
dag=dag)
# --------------------------------------------------------------------------------
# Clean the eight files. In this step you can get rid of or cherry pick columns
# and different parts of the text
# --------------------------------------------------------------------------------
clean_tweets = PythonOperator(
task_id='clean_tweets',
python_callable=cleantweets,
dag=dag)
clean_tweets.set_upstream(fetch_tweets)
# --------------------------------------------------------------------------------
# In this section you can use a script to analyze the twitter data. Could simply
# be a sentiment analysis through algorithms like bag of words or something more
# complicated. You can also take a look at Web Services to do such tasks
# --------------------------------------------------------------------------------
analyze_tweets = PythonOperator(
task_id='analyze_tweets',
python_callable=analyzetweets,
dag=dag)
analyze_tweets.set_upstream(clean_tweets)
# --------------------------------------------------------------------------------
# Although this is the last task, we need to declare it before the next tasks as we
# will use set_downstream This task will extract summary from Hive data and store
# it to MySQL
# --------------------------------------------------------------------------------
hive_to_mysql = PythonOperator(
task_id='hive_to_mysql',
python_callable=transfertodb,
dag=dag)
# --------------------------------------------------------------------------------
# The following tasks are generated using for loop. The first task puts the eight
# csv files to HDFS. The second task loads these files from HDFS to respected Hive
# tables. These two for loops could be combined into one loop. However, in most cases,
# you will be running different analysis on your incoming incoming and outgoing tweets,
# and hence they are kept separated in this example.
# --------------------------------------------------------------------------------
from_channels = ['fromTwitter_A', 'fromTwitter_B', 'fromTwitter_C', 'fromTwitter_D']
to_channels = ['toTwitter_A', 'toTwitter_B', 'toTwitter_C', 'toTwitter_D']
yesterday = date.today() - timedelta(days=1)
dt = yesterday.strftime("%Y-%m-%d")
# define where you want to store the tweets csv file in your local directory
local_dir = "/tmp/"
# define the location where you want to store in HDFS
hdfs_dir = " /tmp/"
for channel in to_channels:
file_name = "to_" + channel + "_" + yesterday.strftime("%Y-%m-%d") + ".csv"
load_to_hdfs = BashOperator(
task_id="put_" + channel + "_to_hdfs",
bash_command="HADOOP_USER_NAME=hdfs hadoop fs -put -f " +
local_dir + file_name +
hdfs_dir + channel + "/",
dag=dag)
load_to_hdfs.set_upstream(analyze_tweets)
load_to_hive = HiveOperator(
task_id="load_" + channel + "_to_hive",
hql="LOAD DATA INPATH '" +
hdfs_dir + channel + "/" + file_name + "' "
"INTO TABLE " + channel + " "
"PARTITION(dt='" + dt + "')",
dag=dag)
load_to_hive.set_upstream(load_to_hdfs)
load_to_hive.set_downstream(hive_to_mysql)
for channel in from_channels:
file_name = "from_" + channel + "_" + yesterday.strftime("%Y-%m-%d") + ".csv"
load_to_hdfs = BashOperator(
task_id="put_" + channel + "_to_hdfs",
bash_command="HADOOP_USER_NAME=hdfs hadoop fs -put -f " +
local_dir + file_name +
hdfs_dir + channel + "/",
dag=dag)
load_to_hdfs.set_upstream(analyze_tweets)
load_to_hive = HiveOperator(
task_id="load_" + channel + "_to_hive",
hql="LOAD DATA INPATH '" +
hdfs_dir + channel + "/" + file_name + "' "
"INTO TABLE " + channel + " "
"PARTITION(dt='" + dt + "')",
dag=dag)
load_to_hive.set_upstream(load_to_hdfs)
load_to_hive.set_downstream(hive_to_mysql)
|
spmaniato/LTLMoP
|
refs/heads/development
|
src/lib/configEditor.py
|
7
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# generated by wxGlade 0.6.3 on Fri Dec 16 03:13:38 2011
import wx, wx.richtext, wx.grid, wx.lib.intctrl
import sys, os, re
# Climb the tree to find out where we are
p = os.path.abspath(__file__)
t = ""
while t != "src":
(p, t) = os.path.split(p)
if p == "":
print "I have no idea where I am; this is ridiculous"
sys.exit(1)
sys.path.append(os.path.join(p,"src","lib"))
import project
from copy import deepcopy
from numpy import *
import subprocess
import socket
import handlerSubsystem
from hsubParsingUtils import parseCallString
import lib.handlers.handlerTemplates as ht
import lib.globalConfig
from lib.hsubConfigObjects import ExperimentConfig, RobotConfig
# begin wxGlade: extracode
# end wxGlade
CALIB_PORT = 23460
def drawParamConfigPane(target, method, proj):
if target.GetSizer() is not None:
target.GetSizer().Clear(deleteWindows=True)
list_sizer = wx.BoxSizer(wx.VERTICAL)
label_info = wx.StaticText(target, -1, method.comment)
label_info.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD, 0, ""))
static_line = wx.StaticLine(target, -1)
list_sizer.Add(label_info, 0, wx.ALL|wx.EXPAND, 5)
list_sizer.Add(static_line, 0, wx.EXPAND, 0)
param_controls = {}
for p in method.para:
#print "name: %s, para_type: %s, default: %s, value: %s" % (p.name, p.para_type, p.default, p.value)
item_sizer = wx.BoxSizer(wx.HORIZONTAL)
param_label = wx.StaticText(target, -1, "%s:" % p.name)
if p.para_type is None:
continue
if p.para_type.lower() == "region":
r_names = [r.name for r in proj.rfi.regions if r.name.lower() != "boundary" and not r.isObstacle]
param_controls[p] = wx.ComboBox(target, -1, choices=r_names, style=wx.CB_DROPDOWN)
if p.value is not None and p.value in r_names:
param_controls[p].SetStringSelection(p.value)
elif p.default is not None and p.value in r_names:
p.value = p.default
param_controls[p].SetStringSelection(p.default)
else:
p.value = r_names[0]
param_controls[p].SetSelection(0)
elif p.para_type.lower().startswith("bool"):
param_controls[p] = wx.CheckBox(target, -1, "")
if p.value is not None:
param_controls[p].SetValue(p.value)
elif p.default is not None:
p.value = p.default
param_controls[p].SetValue(p.default)
else:
p.value = "False"
param_controls[p].SetValue(False)
elif p.para_type.lower().startswith("int"):
param_controls[p] = wx.lib.intctrl.IntCtrl(target, -1, 0)
if p.min_val is not None:
param_controls[p].SetMin(p.min_val)
param_controls[p].SetLimited(True)
if p.max_val is not None:
param_controls[p].SetMax(p.max_val)
param_controls[p].SetLimited(True)
if p.value is not None:
param_controls[p].SetValue(p.value)
elif p.default is not None:
p.value = p.default
param_controls[p].SetValue(p.default)
else:
p.value = "0"
param_controls[p].SetValue(0)
else:
if p.value is not None:
param_controls[p] = wx.TextCtrl(target, -1, str(p.value))
elif p.default is not None:
p.value = p.default
param_controls[p] = wx.TextCtrl(target, -1, str(p.default))
else:
p.value = ""
param_controls[p] = wx.TextCtrl(target, -1, "")
param_label.SetToolTip(wx.ToolTip(p.desc))
item_sizer = wx.BoxSizer(wx.HORIZONTAL)
item_sizer.Add(param_label, 0, wx.ALL, 5)
item_sizer.Add(param_controls[p], 1, wx.ALL, 5)
list_sizer.Add(item_sizer, 0, wx.EXPAND, 0)
# TODO: is there a better way to do this?
def paramPaneCallback(event):
this_param = None
for p in method.para:
if event.GetEventObject() is param_controls[p]:
this_param = p
break
if this_param is None:
# Ignore; from another control (e.g. calib matrix)
return
this_param.setValue(param_controls[this_param].GetValue())
target.Bind(wx.EVT_TEXT, paramPaneCallback)
target.Bind(wx.EVT_COMBOBOX, paramPaneCallback)
target.Bind(wx.EVT_CHECKBOX, paramPaneCallback)
target.Bind(wx.lib.intctrl.EVT_INT, paramPaneCallback)
target.SetSizer(list_sizer)
target.Layout()
label_info.Wrap(list_sizer.GetSize()[0])
class regionTagsDialog(wx.Dialog):
def __init__(self, parent, *args, **kwds):
# begin wxGlade: regionTagsDialog.__init__
kwds["style"] = wx.DEFAULT_DIALOG_STYLE
wx.Dialog.__init__(self, *args, **kwds)
self.label_5 = wx.StaticText(self, wx.ID_ANY, "Tags:")
self.list_box_tags = wx.ListBox(self, wx.ID_ANY, choices=[], style=wx.LB_SINGLE)
self.button_add_tag = wx.Button(self, wx.ID_ADD, "")
self.button_remove_tag = wx.Button(self, wx.ID_REMOVE, "")
self.label_12 = wx.StaticText(self, wx.ID_ANY, "Regions:")
self.list_box_regions = wx.CheckListBox(self, wx.ID_ANY, choices=[])
self.static_line_2 = wx.StaticLine(self, wx.ID_ANY)
self.button_5 = wx.Button(self, wx.ID_OK, "")
self.button_8 = wx.Button(self, wx.ID_CANCEL, "")
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_LISTBOX, self.onClickTag, self.list_box_tags)
self.Bind(wx.EVT_BUTTON, self.onClickAddTag, self.button_add_tag)
self.Bind(wx.EVT_BUTTON, self.onClickRemoveTag, self.button_remove_tag)
# end wxGlade
self.proj = parent.proj
self.Bind(wx.EVT_CHECKLISTBOX, self.onCheckRegion, self.list_box_regions)
def __set_properties(self):
# begin wxGlade: regionTagsDialog.__set_properties
self.SetTitle("Edit Region Tags...")
self.SetSize((577, 419))
# end wxGlade
def __do_layout(self):
# begin wxGlade: regionTagsDialog.__do_layout
sizer_31 = wx.BoxSizer(wx.VERTICAL)
sizer_34 = wx.BoxSizer(wx.HORIZONTAL)
sizer_32 = wx.BoxSizer(wx.HORIZONTAL)
sizer_35 = wx.BoxSizer(wx.VERTICAL)
sizer_33 = wx.BoxSizer(wx.VERTICAL)
sizer_36 = wx.BoxSizer(wx.HORIZONTAL)
sizer_33.Add(self.label_5, 0, 0, 0)
sizer_33.Add(self.list_box_tags, 1, wx.TOP | wx.BOTTOM | wx.EXPAND, 5)
sizer_36.Add(self.button_add_tag, 0, 0, 0)
sizer_36.Add(self.button_remove_tag, 0, wx.LEFT, 10)
sizer_33.Add(sizer_36, 0, wx.EXPAND, 0)
sizer_32.Add(sizer_33, 1, wx.RIGHT | wx.EXPAND, 5)
sizer_35.Add(self.label_12, 0, 0, 0)
sizer_35.Add(self.list_box_regions, 1, wx.TOP | wx.EXPAND, 5)
sizer_32.Add(sizer_35, 1, wx.EXPAND, 0)
sizer_31.Add(sizer_32, 1, wx.ALL | wx.EXPAND, 5)
sizer_31.Add(self.static_line_2, 0, wx.EXPAND, 0)
sizer_34.Add((20, 20), 1, wx.EXPAND, 0)
sizer_34.Add(self.button_5, 0, wx.RIGHT, 10)
sizer_34.Add(self.button_8, 0, 0, 0)
sizer_31.Add(sizer_34, 0, wx.ALL | wx.EXPAND, 10)
self.SetSizer(sizer_31)
self.Layout()
# end wxGlade
def _tags2dialog(self, tags):
self.tags = tags
# Populate tags and regions
self.list_box_tags.Set(self.tags.keys())
if self.list_box_tags.GetCount() > 0:
self.list_box_tags.SetSelection(0)
self.button_remove_tag.Enable(True)
self.onClickTag(None)
else:
self.button_remove_tag.Enable(False)
def onCheckRegion(self, event):
tag = self.list_box_tags.GetStringSelection()
self.tags[tag] = self.list_box_regions.GetCheckedStrings()
event.Skip()
def onClickTag(self, event): # wxGlade: regionTagsDialog.<event_handler>
if event is not None:
tag = event.GetString()
else:
tag = self.list_box_tags.GetStringSelection()
if tag == '':
self.list_box_regions.Set([])
return
self.list_box_regions.Set([r.name for r in self.proj.rfi.regions if r.name.lower() != "boundary" and not r.isObstacle])
for i, rname in enumerate(self.list_box_regions.GetItems()):
self.list_box_regions.Check(i, rname in self.tags[tag])
if event is not None:
event.Skip()
def onClickAddTag(self, event): # wxGlade: regionTagsDialog.<event_handler>
# Ask the user for a tag name
name = wx.GetTextFromUser("Name:", "New Tag")
if name != "":
if name in self.tags:
wx.MessageBox("Tag with that name already exists.", "Invalid tag name",
style = wx.OK | wx.ICON_ERROR)
return
# If it's valid, add it, select it and enable it
self.list_box_tags.Insert(name, self.list_box_tags.GetCount())
self.list_box_tags.Select(self.list_box_tags.GetCount()-1)
self.tags[name] = []
self.onClickTag(None)
self.button_remove_tag.Enable(True)
event.Skip()
def onClickRemoveTag(self, event): # wxGlade: regionTagsDialog.<event_handler>
numel = self.list_box_tags.GetCount()
if numel > 0:
pos = self.list_box_tags.GetSelection()
tag = self.list_box_tags.GetStringSelection()
self.list_box_tags.Delete(pos)
del self.tags[tag]
if pos == numel - 1:
# If the very last element was deleted, move the selection up one
newpos = pos - 1
else:
newpos = pos
if newpos != -1:
self.list_box_tags.Select(newpos)
else:
self.button_remove_tag.Enable(False)
self.onClickTag(None)
event.Skip()
# end of class regionTagsDialog
class handlerConfigDialog(wx.Dialog):
def __init__(self, parent, *args, **kwds):
# begin wxGlade: handlerConfigDialog.__init__
kwds["style"] = wx.DEFAULT_DIALOG_STYLE
wx.Dialog.__init__(self, *args, **kwds)
self.panel_configs = wx.ScrolledWindow(self, wx.ID_ANY, style=wx.SUNKEN_BORDER | wx.TAB_TRAVERSAL)
self.button_defaults = wx.Button(self, wx.ID_ANY, "Reset to Defaults")
self.button_OK = wx.Button(self, wx.ID_OK, "")
self.button_1 = wx.Button(self, wx.ID_CANCEL, "")
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_BUTTON, self.onClickDefaults, self.button_defaults)
# end wxGlade
self.hsub = parent.hsub
self.proj = parent.proj
self.robot = parent.robot
def __set_properties(self):
# begin wxGlade: handlerConfigDialog.__set_properties
self.SetTitle("Configure XXXhandler")
self.panel_configs.SetScrollRate(10, 10)
self.button_OK.SetDefault()
# end wxGlade
def __do_layout(self):
# begin wxGlade: handlerConfigDialog.__do_layout
sizer_10 = wx.BoxSizer(wx.VERTICAL)
sizer_26 = wx.BoxSizer(wx.HORIZONTAL)
sizer_10.Add(self.panel_configs, 1, wx.EXPAND, 0)
sizer_26.Add(self.button_defaults, 0, wx.ALL, 5)
sizer_26.Add((20, 20), 1, 0, 0)
sizer_26.Add(self.button_OK, 0, wx.ALL, 5)
sizer_26.Add(self.button_1, 0, wx.ALL, 5)
sizer_10.Add(sizer_26, 0, wx.EXPAND, 0)
self.SetSizer(sizer_10)
sizer_10.Fit(self)
self.Layout()
# end wxGlade
def _onCalibEdit(self, event):
r = event.GetRow()
c = event.GetCol()
self.robot.calibration_matrix[r,c] = self.sheet.GetCellValue(r,c)
event.Skip()
def _onClickCalibrate(self, event):
event.Skip()
# Check that a region file is associated
if self.proj.rfi is None:
wx.MessageBox("Please define regions before calibrating.", "Error",
style = wx.OK | wx.ICON_ERROR)
return
# Check that an init handler is selected
if ht.InitHandler not in self.robot.handlers.keys():
wx.MessageBox("Please choose an Initialization Handler before calibrating.", "Error",
style = wx.OK | wx.ICON_ERROR)
return
# Create a copy of the project in its current state
proj_copy = deepcopy(self.proj)
# Create a temp config with one robot, with
# the currently selected init and pose handlers
cfg = ExperimentConfig()
robot = deepcopy(self.robot)
cfg.name = 'calibrate'
cfg.file_name = os.path.join(proj_copy.project_root, 'configs', 'calibrate.config')
cfg.complete = True
robot.name = "calibrate"
robot.handlers[ht.PoseHandler] = self.handler
# If the inithandler takes an init_region argument (i.e. playerstage, ODE), set it to the origin
try:
p = robot.handlers[ht.InitHandler].getMethodByName("__init__").getParaByName("init_region")
except ValueError:
pass
else:
p.setValue("__origin__")
cfg.main_robot = robot.name
cfg.robots.append(robot)
proj_copy.current_config = cfg.name
proj_copy.writeSpecFile(proj_copy.getFilenamePrefix()+".spec_calibtmp")
cfg.saveConfig()
print "Running calibration tool..."
proc = subprocess.Popen(["python", "-u", "-m", "lib.calibrate", proj_copy.getFilenamePrefix() + ".spec_calibtmp", str(CALIB_PORT)])
# Listen on socket for return value
host = 'localhost'
buf = 1024
addr = (host, CALIB_PORT)
UDPSock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
UDPSock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
UDPSock.settimeout(0.1)
UDPSock.bind(addr)
while proc.returncode is None:
proc.poll()
# Wait for and receive a message from the calibration tool
try:
data, addrFrom = UDPSock.recvfrom(1024)
except socket.timeout:
wx.Yield()
else:
try:
self.robot.calibration_matrix = eval(data)
except SyntaxError:
print "ERROR: Received invalid data from calibration tool."
else:
# Update the display
wx.CallAfter(self._handler2dialog, self.handler)
break
print "Connection with calibration tool closed."
UDPSock.close()
# delete files
os.remove(proj_copy.getFilenamePrefix() + ".spec_calibtmp")
os.remove(os.path.join(proj_copy.project_root, "configs", "calibrate.config"))
def _handler2dialog(self, handler):
self.handler = handler
self.SetTitle("Configure %s.%s" % (handler.getType(), handler.name))
methodObj = handler.getMethodByName('__init__')
drawParamConfigPane(self.panel_configs, methodObj, self.proj)
# Add in calibration configuration pane for pose handler
if handler.h_type is ht.PoseHandler:
# Default to identity matrix
if self.robot.calibration_matrix is None:
self.robot.calibration_matrix = eye(3)
label = wx.StaticText(self.panel_configs, -1, "Calibration Matrix:")
self.sheet = wx.grid.Grid(self.panel_configs)
self.sheet.CreateGrid(3, 3)
self.sheet.SetColLabelSize(0)
self.sheet.SetRowLabelSize(0)
for x in range(0,3):
self.sheet.SetColFormatFloat(x)
for y in range(0,3):
self.sheet.SetCellValue(x, y, str(self.robot.calibration_matrix[x,y]))
button_calibrate = wx.Button(self.panel_configs, -1, "Run calibration tool...")
self.panel_configs.GetSizer().Add(label, 0, wx.ALL, 5)
self.Bind(wx.grid.EVT_GRID_CELL_CHANGE, self._onCalibEdit, self.sheet)
self.panel_configs.GetSizer().Add(self.sheet, 0, wx.EXPAND | wx.ALL, 5)
self.panel_configs.GetSizer().Add(button_calibrate, 0, wx.ALIGN_RIGHT | wx.ALL, 5)
self.Bind(wx.EVT_BUTTON, self._onClickCalibrate, button_calibrate)
# If this robot has a pre-defined calibration matrix, don't allow for calibration
if self.hsub.getRobotByType(self.robot.r_type).calibration_matrix is not None:
button_calibrate.SetLabel("Calibration is pre-defined by simulator.")
button_calibrate.Enable(False)
self.panel_configs.Layout()
# FIXME: this is a sizing hack, because I can't figure out how to get Fit() to work
a = self.panel_configs.GetSizer().GetMinSize()
b = self.GetSizer().GetMinSize()
self.SetSize((max(a[0],b[0]),a[1]+b[1]))
self.Refresh()
def onClickDefaults(self, event): # wxGlade: handlerConfigDialog.<event_handler>
print "Event handler `onClickDefaults' not implemented"
event.Skip()
# end of class handlerConfigDialog
class simSetupDialog(wx.Dialog):
def __init__(self, *args, **kwds):
# begin wxGlade: simSetupDialog.__init__
kwds["style"] = wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER | wx.THICK_FRAME
wx.Dialog.__init__(self, *args, **kwds)
self.list_box_experiment_name = wx.ListBox(self, wx.ID_ANY, choices=[])
self.button_cfg_new = wx.Button(self, wx.ID_NEW, "")
self.button_cfg_import = wx.Button(self, wx.ID_ANY, "Import...")
self.button_cfg_delete = wx.Button(self, wx.ID_DELETE, "")
self.sizer_28_staticbox = wx.StaticBox(self, wx.ID_ANY, "Experiment Configurations:")
self.label_9 = wx.StaticText(self, wx.ID_ANY, "Experiment Name: ")
self.text_ctrl_sim_experiment_name = wx.TextCtrl(self, wx.ID_ANY, "")
self.label_2 = wx.StaticText(self, wx.ID_ANY, "Custom Propositions:")
self.list_box_init_customs = wx.CheckListBox(self, wx.ID_ANY, choices=["1", "2"])
self.label_2_copy = wx.StaticText(self, wx.ID_ANY, "Action Propositions:")
self.list_box_init_actions = wx.CheckListBox(self, wx.ID_ANY, choices=["3", "4"])
self.button_edit_region_tags = wx.Button(self, wx.ID_ANY, "Edit region tags...")
self.sizer_22_staticbox = wx.StaticBox(self, wx.ID_ANY, "Initial Conditions")
self.label_1 = wx.StaticText(self, wx.ID_ANY, "Robots:")
self.list_box_robots = wx.ListBox(self, wx.ID_ANY, choices=[])
self.button_addrobot = wx.Button(self, wx.ID_ANY, "Add robot...")
self.button_2 = wx.Button(self, wx.ID_ANY, "Configure robot...")
self.button_3 = wx.Button(self, wx.ID_ANY, "Remove robot")
self.button_defaultrobot = wx.Button(self, wx.ID_ANY, "Set as Main Robot")
self.button_4 = wx.Button(self, wx.ID_ANY, "Edit proposition mapping...")
self.sizer_1_staticbox = wx.StaticBox(self, wx.ID_ANY, "Execution Environment")
self.sizer_27_staticbox = wx.StaticBox(self, wx.ID_ANY, "Experiment Settings")
self.button_sim_apply = wx.Button(self, wx.ID_APPLY, "")
self.button_sim_ok = wx.Button(self, wx.ID_OK, "")
self.button_sim_cancel = wx.Button(self, wx.ID_CANCEL, "")
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_LISTBOX, self.onSimLoad, self.list_box_experiment_name)
self.Bind(wx.EVT_BUTTON, self.onConfigNew, self.button_cfg_new)
self.Bind(wx.EVT_BUTTON, self.onConfigImport, self.button_cfg_import)
self.Bind(wx.EVT_BUTTON, self.onConfigDelete, self.button_cfg_delete)
self.Bind(wx.EVT_TEXT, self.onSimNameEdit, self.text_ctrl_sim_experiment_name)
self.Bind(wx.EVT_BUTTON, self.onClickEditRegionTags, self.button_edit_region_tags)
self.Bind(wx.EVT_BUTTON, self.onClickAddRobot, self.button_addrobot)
self.Bind(wx.EVT_BUTTON, self.onClickConfigureRobot, self.button_2)
self.Bind(wx.EVT_BUTTON, self.onClickRemoveRobot, self.button_3)
self.Bind(wx.EVT_BUTTON, self.onSetMainRobot, self.button_defaultrobot)
self.Bind(wx.EVT_BUTTON, self.onClickEditMapping, self.button_4)
self.Bind(wx.EVT_BUTTON, self.onClickApply, self.button_sim_apply)
self.Bind(wx.EVT_BUTTON, self.onClickOK, self.button_sim_ok)
self.Bind(wx.EVT_BUTTON, self.onClickCancel, self.button_sim_cancel)
# end wxGlade
self.Bind(wx.EVT_CHECKLISTBOX, self.onCheckProp, self.list_box_init_customs)
self.Bind(wx.EVT_CHECKLISTBOX, self.onCheckProp, self.list_box_init_actions)
self.list_box_experiment_name.Bind(wx.EVT_LEFT_DOWN, self.onLoseFocusSimName)
self.Bind(wx.EVT_CLOSE, self.doClose)
if len(sys.argv) < 2:
print "You must specify a specification file."
print "Usage: %s [spec_file]" % sys.argv[0]
sys.exit(2)
# Load project
self.proj = project.Project()
self.proj.loadProject(sys.argv[1])
self.hsub = handlerSubsystem.HandlerSubsystem(None, self.proj.project_root)
# Set up the list of configs
self.list_box_experiment_name.Clear()
print "Loading handlers..."
self.hsub.loadAllHandlers()
print "Loading robots..."
self.hsub.loadAllRobots()
print "Loading experiment configs..."
self.hsub.loadAllConfigFiles()
for cfg in self.hsub.configs:
self.list_box_experiment_name.Append(cfg.name, cfg)
if self.proj.current_config!= "" :
self.list_box_experiment_name.SetStringSelection(self.proj.current_config)
# Check for case where no config files are present
if self.list_box_experiment_name.GetCount() == 0:
# Create blank default config
cfg = ExperimentConfig()
# TODO: Check for existing untitleds and add a number at the end (steal from reged)
cfg.name = "Untitled configuration"
cfg.file_name = os.path.join(self.hsub.config_path,cfg.name.replace(' ','_'))
# since this config is not loaded, we assume it is complete
self.hsub.configs.append(cfg)
self.list_box_experiment_name.Append(cfg.name, cfg)
# By default, select the first one
if self.list_box_experiment_name.GetSelection() < 0:
self.list_box_experiment_name.SetSelection(0)
self._cfg2dialog(self._getSelectedExperimentConfig())
def __set_properties(self):
# begin wxGlade: simSetupDialog.__set_properties
self.SetTitle("Configure Execution")
self.SetSize((935, 580))
self.text_ctrl_sim_experiment_name.SetMinSize((300, 27))
self.list_box_init_customs.SetSelection(0)
self.list_box_init_actions.SetSelection(0)
# end wxGlade
def __do_layout(self):
# begin wxGlade: simSetupDialog.__do_layout
sizer_6 = wx.BoxSizer(wx.HORIZONTAL)
sizer_12 = wx.BoxSizer(wx.VERTICAL)
sizer_13 = wx.BoxSizer(wx.HORIZONTAL)
self.sizer_27_staticbox.Lower()
sizer_27 = wx.StaticBoxSizer(self.sizer_27_staticbox, wx.VERTICAL)
self.sizer_1_staticbox.Lower()
sizer_1 = wx.StaticBoxSizer(self.sizer_1_staticbox, wx.HORIZONTAL)
sizer_2 = wx.BoxSizer(wx.HORIZONTAL)
sizer_4 = wx.BoxSizer(wx.VERTICAL)
sizer_3 = wx.BoxSizer(wx.VERTICAL)
self.sizer_22_staticbox.Lower()
sizer_22 = wx.StaticBoxSizer(self.sizer_22_staticbox, wx.VERTICAL)
sizer_23 = wx.BoxSizer(wx.HORIZONTAL)
sizer_17_copy = wx.BoxSizer(wx.VERTICAL)
sizer_17 = wx.BoxSizer(wx.VERTICAL)
sizer_30 = wx.BoxSizer(wx.HORIZONTAL)
sizer_29 = wx.BoxSizer(wx.VERTICAL)
self.sizer_28_staticbox.Lower()
sizer_28 = wx.StaticBoxSizer(self.sizer_28_staticbox, wx.VERTICAL)
sizer_29_copy = wx.BoxSizer(wx.HORIZONTAL)
sizer_6.Add((20, 20), 0, 0, 0)
sizer_29.Add((20, 20), 0, 0, 0)
sizer_28.Add((20, 10), 0, 0, 0)
sizer_28.Add(self.list_box_experiment_name, 1, wx.EXPAND, 0)
sizer_28.Add((20, 20), 0, 0, 0)
sizer_29_copy.Add(self.button_cfg_new, 0, 0, 0)
sizer_29_copy.Add((10, 20), 0, 0, 0)
sizer_29_copy.Add(self.button_cfg_import, 0, 0, 0)
sizer_29_copy.Add((10, 20), 0, 0, 0)
sizer_29_copy.Add(self.button_cfg_delete, 0, 0, 0)
sizer_28.Add(sizer_29_copy, 0, wx.EXPAND, 0)
sizer_28.Add((20, 10), 0, 0, 0)
sizer_29.Add(sizer_28, 1, wx.EXPAND, 0)
sizer_6.Add(sizer_29, 1, wx.EXPAND, 0)
sizer_6.Add((20, 20), 0, 0, 0)
sizer_12.Add((20, 20), 0, 0, 0)
sizer_30.Add(self.label_9, 0, wx.ALIGN_CENTER_VERTICAL, 0)
sizer_30.Add((20, 20), 0, 0, 0)
sizer_30.Add(self.text_ctrl_sim_experiment_name, 0, 0, 0)
sizer_12.Add(sizer_30, 0, wx.EXPAND, 0)
sizer_12.Add((20, 20), 0, 0, 0)
sizer_23.Add((5, 20), 0, 0, 0)
sizer_17.Add(self.label_2, 0, 0, 0)
sizer_17.Add(self.list_box_init_customs, 1, wx.EXPAND, 0)
sizer_23.Add(sizer_17, 1, wx.EXPAND, 0)
sizer_23.Add((20, 20), 0, 0, 0)
sizer_17_copy.Add(self.label_2_copy, 0, 0, 0)
sizer_17_copy.Add(self.list_box_init_actions, 1, wx.EXPAND, 0)
sizer_23.Add(sizer_17_copy, 1, wx.EXPAND, 0)
sizer_23.Add((5, 20), 0, 0, 0)
sizer_22.Add(sizer_23, 5, wx.EXPAND, 0)
sizer_22.Add(self.button_edit_region_tags, 0, wx.LEFT | wx.TOP | wx.ALIGN_CENTER_VERTICAL, 5)
sizer_27.Add(sizer_22, 1, wx.ALL | wx.EXPAND, 10)
sizer_3.Add(self.label_1, 0, 0, 0)
sizer_3.Add(self.list_box_robots, 1, wx.EXPAND, 0)
sizer_2.Add(sizer_3, 1, wx.EXPAND, 0)
sizer_2.Add((20, 20), 0, 0, 0)
sizer_4.Add(self.button_addrobot, 0, wx.BOTTOM, 5)
sizer_4.Add(self.button_2, 0, wx.BOTTOM, 5)
sizer_4.Add(self.button_3, 0, 0, 0)
sizer_4.Add((20, 30), 0, 0, 0)
sizer_4.Add(self.button_defaultrobot, 0, wx.BOTTOM, 5)
sizer_4.Add(self.button_4, 0, 0, 0)
sizer_2.Add(sizer_4, 1, wx.EXPAND, 0)
sizer_1.Add(sizer_2, 1, wx.EXPAND, 0)
sizer_27.Add(sizer_1, 0, wx.ALL | wx.EXPAND, 10)
sizer_12.Add(sizer_27, 1, wx.EXPAND, 0)
sizer_13.Add(self.button_sim_apply, 0, 0, 0)
sizer_13.Add((10, 20), 0, 0, 0)
sizer_13.Add(self.button_sim_ok, 0, 0, 0)
sizer_13.Add((10, 20), 0, 0, 0)
sizer_13.Add(self.button_sim_cancel, 0, 0, 0)
sizer_13.Add((10, 10), 0, 0, 0)
sizer_12.Add(sizer_13, 0, wx.ALIGN_RIGHT, 0)
sizer_12.Add((20, 10), 0, 0, 0)
sizer_6.Add(sizer_12, 2, wx.EXPAND, 0)
sizer_6.Add((20, 20), 0, 0, 0)
self.SetSizer(sizer_6)
self.Layout()
self.Centre()
# end wxGlade
def doClose(self, event):
# TODO: Check for dirty?
self.Destroy()
def _cfg2dialog(self, cfg):
self.text_ctrl_sim_experiment_name.SetValue(cfg.name)
# Set up the initial actions checklist as appropriate
self.list_box_init_actions.Set([])
for i, action in enumerate(self.proj.all_actuators):
self.list_box_init_actions.Insert(action, i)
if action in cfg.initial_truths:
self.list_box_init_actions.Check(i)
# Set up the initial customs checklist as appropriate
self.list_box_init_customs.Set([])
for i, custom in enumerate(self.proj.all_customs):
self.list_box_init_customs.Insert(custom, i)
if custom in cfg.initial_truths:
self.list_box_init_customs.Check(i)
# Set up the robots list
self.list_box_robots.Set([])
for i, robot in enumerate(cfg.robots):
if robot.name == cfg.main_robot:
self.list_box_robots.Insert(robot.name + " (Main)", i, robot)
else:
self.list_box_robots.Insert(robot.name, i, robot)
if len(cfg.robots) > 0:
self.list_box_robots.Select(0)
def onLoseFocusSimName(self, event):
if len(self.text_ctrl_sim_experiment_name.GetValue().strip()) == 0:
d = wx.MessageDialog(self, "Current experiment config needs a name. Please add one.", style = wx.OK | wx.ICON_ERROR)
d.ShowModal()
event.Skip(False)
return
if [c.name.strip() for c in self.hsub.configs].count(self.text_ctrl_sim_experiment_name.GetValue().strip()) > 1:
d = wx.MessageDialog(self, "Current experiment config has the same name with another config. Please change it.", style = wx.OK | wx.ICON_ERROR)
d.ShowModal()
event.Skip(False)
return
event.Skip()
def onSimLoad(self, event): # wxGlade: simSetupDialog.<event_handler>
cfg = event.GetClientData()
if cfg is not None:
self._cfg2dialog(cfg)
event.Skip()
def onConfigNew(self, event): # wxGlade: simSetupDialog.<event_handler>
# Create blank default config
cfg = ExperimentConfig()
# TODO: Check for existing untitleds and add a number at the end (steal from reged)
cfg.name = "Untitled configuration"
cfg.name = self._normalizeConfigName(cfg.name)
cfg.file_name = os.path.join(self.hsub.config_path, cfg.name.replace(' ','_'))
# since this config is not loaded, we assume it is complete
self.hsub.configs.append(cfg)
self.list_box_experiment_name.Append(cfg.name, cfg)
self.list_box_experiment_name.Select(self.list_box_experiment_name.GetCount()-1)
self._cfg2dialog(cfg)
event.Skip()
def _normalizeConfigName(self, name):
""" Make sure the config name is not taken already"""
# Make sure another config doesn't already have this name
while name in (r.name for r in self.hsub.configs):
name = name + " copy"
return name
def onConfigImport(self, event): # wxGlade: simSetupDialog.<event_handler>
file_name = wx.FileSelector("Import Config File", default_extension="config",
wildcard="Experiment config files (*.config)|*.config",
flags = wx.OPEN | wx.FILE_MUST_EXIST)
if file_name == "": return
# import the config file
cfg = ExperimentConfig()
cfg.fromFile(file_name, self.hsub)
cfg.name = self._normalizeConfigName(cfg.name)
self.hsub.configs.append(cfg)
self.list_box_experiment_name.Append(cfg.name, cfg)
self.list_box_experiment_name.Select(self.list_box_experiment_name.GetCount()-1)
self._cfg2dialog(cfg)
event.Skip()
def onConfigDelete(self, event): # wxGlade: simSetupDialog.<event_handler>
if self.list_box_experiment_name.GetSelection() == -1:
return
numel = self.list_box_experiment_name.GetCount()
if numel > 1: # don't allow deletion of final remaining element
# TODO: gray out button when no action possible
pos = self.list_box_experiment_name.GetSelection()
self.list_box_experiment_name.Delete(pos)
self.hsub.configs.pop(pos)
if pos == numel - 1:
# If the very last element was deleted, move the selection up one
newpos = pos - 1
else:
newpos = pos
self.list_box_experiment_name.Select(newpos)
self._cfg2dialog(self.list_box_experiment_name.GetClientData(newpos))
event.Skip()
def onSimNameEdit(self, event): # wxGlade: simSetupDialog.<event_handler>
pos = self.list_box_experiment_name.GetSelection()
self.list_box_experiment_name.GetClientData(pos).name = event.GetString().strip()
self.list_box_experiment_name.SetString(pos, event.GetString().strip())
event.Skip()
def onClickAddRobot(self, event): # wxGlade: simSetupDialog.<event_handler>
dlg = addRobotDialog(self, None, -1, "")
if dlg.ShowModal() != wx.ID_CANCEL:
obj = self._getSelectedExperimentConfig()
obj.robots += [dlg.robot]
if obj.main_robot == '':
obj.main_robot = dlg.robot.name
self._cfg2dialog(obj)
dlg.Destroy()
event.Skip()
def onClickConfigureRobot(self, event): # wxGlade: simSetupDialog.<event_handler>
# TODO: gray out button when no action possible
if self.list_box_robots.GetSelection() == -1:
return
dlg = addRobotDialog(self, None, -1, "")
pos = self.list_box_robots.GetSelection()
r = self.list_box_robots.GetClientData(pos)
dlg._robot2dialog(deepcopy(r), original=True)
if dlg.ShowModal() != wx.ID_CANCEL:
obj = self._getSelectedExperimentConfig()
# Update the name of the main robot if necessary
if obj.main_robot == obj.robots[pos].name:
obj.main_robot = dlg.robot.name
# Update any propmappings with new name, if necessary
for k,v in obj.prop_mapping.iteritems():
obj.prop_mapping[k] = re.sub("^"+r.name+"\.", dlg.robot.name+".", v)
obj.robots[pos] = dlg.robot
self._cfg2dialog(obj)
dlg.Destroy()
event.Skip()
def onClickRemoveRobot(self, event): # wxGlade: simSetupDialog.<event_handler>
if self.list_box_robots.GetSelection() == -1:
return
numel = self.list_box_robots.GetCount()
obj = self._getSelectedExperimentConfig()
# TODO: gray out button when no action possible
if numel > 0:
pos = self.list_box_robots.GetSelection()
# Clear the main_robot string if we're deleting that robot
if obj.main_robot == obj.robots[pos].name:
obj.main_robot = ''
obj.robots.pop(pos)
self._cfg2dialog(obj)
if pos == numel - 1:
# If the very last element was deleted, move the selection up one
newpos = pos -1
else:
newpos = pos
if pos != -1:
self.list_box_robots.Select(newpos)
event.Skip()
def onClickEditMapping(self, event): # wxGlade: simSetupDialog.<event_handler>
dlg = propMappingDialog(self, None, -1, "")
obj = self._getSelectedExperimentConfig()
dlg._mapping2dialog(deepcopy(obj.prop_mapping))
if dlg.ShowModal() != wx.ID_CANCEL:
obj.prop_mapping = dlg.mapping
dlg.Destroy()
event.Skip()
def onClickApply(self, event): # wxGlade: simSetupDialog.<event_handler>
# Get the current experiment config
self.proj.current_config = self._getSelectedExperimentConfig().name
self.hsub.setExecutingConfig(self.proj.current_config)
if len(self.hsub.executing_config.robots) == 0:
d = wx.MessageDialog(self, "There is no robot in the current experiment config. Please add one before saving.", style = wx.OK | wx.ICON_ERROR)
d.ShowModal()
event.Skip(False)
return
if len(self.hsub.executing_config.name) == 0:
d = wx.MessageDialog(self, "Current experiment config needs a name. Please add one before saving.", style = wx.OK | wx.ICON_ERROR)
d.ShowModal()
event.Skip(False)
return
if [c.name.strip() for c in self.hsub.configs].count(self.text_ctrl_sim_experiment_name.GetValue().strip()) > 1:
d = wx.MessageDialog(self, "Current experiment config has the same name with another config. Please change it.", style = wx.OK | wx.ICON_ERROR)
d.ShowModal()
event.Skip(False)
return
# clean up prop_mapping of the current executing config
default_prop_mapping = self.hsub.getDefaultPropMapping(self.proj.all_sensors, self.proj.all_actuators)
self.hsub.executing_config.normalizePropMapping(default_prop_mapping)
# Save the config files
self.hsub.saveAllConfigFiles()
# Save the name of the currently active config in the spec file
self.proj.writeSpecFile()
event.Skip()
def onClickOK(self, event): # wxGlade: simSetupDialog.<event_handler>
self.onClickApply(event)
# Clean up
if event.GetSkipped():
self.doClose(event)
event.Skip()
def _getSelectedExperimentConfig(self):
pos = self.list_box_experiment_name.GetSelection()
obj = self.list_box_experiment_name.GetClientData(pos)
return obj
def onCheckProp(self, event): # wxGlade: simSetupDialog.<event_handler>
obj = event.GetEventObject()
i = event.GetInt()
newstate = obj.IsChecked(i)
name = obj.GetString(i)
obj = self._getSelectedExperimentConfig()
if newstate == True:
obj.initial_truths += [name]
else:
obj.initial_truths.remove(name)
event.Skip()
def onSetMainRobot(self, event): # wxGlade: simSetupDialog.<event_handler>
pos = self.list_box_robots.GetSelection()
obj = self.list_box_robots.GetClientData(pos)
if obj is None:
return
self._getSelectedExperimentConfig().main_robot = obj.name
self._cfg2dialog(self._getSelectedExperimentConfig())
self.list_box_robots.SetSelection(pos)
event.Skip()
def onClickCancel(self, event): # wxGlade: simSetupDialog.<event_handler>
# Clean up
self.doClose(event)
event.Skip()
def onClickEditRegionTags(self, event): # wxGlade: simSetupDialog.<event_handler>
dlg = regionTagsDialog(self, None, -1, "")
obj = self._getSelectedExperimentConfig()
dlg._tags2dialog(deepcopy(obj.region_tags))
if dlg.ShowModal() != wx.ID_CANCEL:
obj.region_tags = dlg.tags
dlg.Destroy()
event.Skip()
# end of class simSetupDialog
class addRobotDialog(wx.Dialog):
def __init__(self, parent, *args, **kwds):
# begin wxGlade: addRobotDialog.__init__
kwds["style"] = wx.DEFAULT_DIALOG_STYLE
wx.Dialog.__init__(self, *args, **kwds)
self.label_3 = wx.StaticText(self, wx.ID_ANY, "Robot type:")
self.combo_box_robottype = wx.ComboBox(self, wx.ID_ANY, choices=[], style=wx.CB_DROPDOWN | wx.CB_READONLY)
self.label_4 = wx.StaticText(self, wx.ID_ANY, "Robot name:")
self.text_ctrl_robotname = wx.TextCtrl(self, wx.ID_ANY, "")
self.static_line_1 = wx.StaticLine(self, wx.ID_ANY)
self.button_7 = wx.Button(self, wx.ID_CANCEL, "")
self.button_6 = wx.Button(self, wx.ID_OK, "")
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_COMBOBOX, self.onChooseRobot, self.combo_box_robottype)
self.Bind(wx.EVT_TEXT, self.onEditRobotName, self.text_ctrl_robotname)
self.Bind(wx.EVT_BUTTON, self.onClickOK, self.button_6)
# end wxGlade
self.parent = parent
self.hsub = parent.hsub
self.proj = parent.proj
self.robot = RobotConfig()
self.original_robot = RobotConfig()
self.handler_labels = {}
self.handler_combos = {}
self.handler_buttons = {}
for handler_type_name in ht.getAllHandlerTypeName():
handler_type_class = ht.getHandlerTypeClass(handler_type_name)
self.handler_labels[handler_type_class] = wx.StaticText(self, -1, "%s handler:" % handler_type_name)
self.handler_combos[handler_type_class] = wx.ComboBox(self, -1, choices=[], style=wx.CB_DROPDOWN|wx.CB_READONLY)
self.handler_buttons[handler_type_class] = wx.Button(self, -1, "Configure...")
self.sizer_9.Add(self.handler_labels[handler_type_class], 0, wx.ALL|wx.ALIGN_RIGHT, 0)
self.sizer_9.Add(self.handler_combos[handler_type_class], 1, wx.ALL|wx.EXPAND, 0)
self.sizer_9.Add(self.handler_buttons[handler_type_class], 0, wx.ALL, 0)
self.Bind(wx.EVT_BUTTON, self.onClickConfigure, self.handler_buttons[handler_type_class])
self.Bind(wx.EVT_COMBOBOX, self.onChangeHandler, self.handler_combos[handler_type_class])
self.Layout()
self.SetSizeHints(self.GetSize()[0], 0) # Force width to stay the same
self.Fit()
# Set up the list of robot types
self.combo_box_robottype.Clear()
for r in self.parent.hsub.robot_configs:
self.combo_box_robottype.Append(r.r_type + (" (Not successfully loaded)" if not self.robot.successfully_loaded else ""))
def _populateHandlerCombos(self):
# Populate based on current robot type
for handler_type_class in ht.getAllHandlerTypeClass():
self.handler_combos[handler_type_class].Clear()
self.handler_combos[handler_type_class].SetValue("")
self.handler_buttons[handler_type_class].Enable(False)
# Load handlers under this robot
if handler_type_class in self.parent.hsub.handler_configs[self.robot.r_type]:
for handler_config in self.parent.hsub.handler_configs[self.robot.r_type][handler_type_class]:
self.handler_combos[handler_type_class].Insert(handler_config.name, 0, handler_config)
# Load handlers under shared folder for pose, motionControl, drive
if handler_type_class in self.parent.hsub.handler_configs['share'] and \
handler_type_class in [ht.PoseHandler, ht.MotionControlHandler, ht.DriveHandler]:
for handler_config in self.parent.hsub.handler_configs['share'][handler_type_class]:
self.handler_combos[handler_type_class].Insert(handler_config.name, 0, handler_config)
def __set_properties(self):
# begin wxGlade: addRobotDialog.__set_properties
self.SetTitle("Add/Configure Robot")
self.SetSize((637, 410))
# end wxGlade
def __do_layout(self):
# begin wxGlade: addRobotDialog.__do_layout
sizer_5 = wx.BoxSizer(wx.VERTICAL)
sizer_11 = wx.BoxSizer(wx.HORIZONTAL)
sizer_9 = wx.FlexGridSizer(0, 3, 2, 7)
sizer_8 = wx.BoxSizer(wx.HORIZONTAL)
sizer_7 = wx.BoxSizer(wx.HORIZONTAL)
sizer_7.Add(self.label_3, 0, wx.ALL, 5)
sizer_7.Add(self.combo_box_robottype, 1, wx.ALL, 5)
sizer_5.Add(sizer_7, 0, wx.EXPAND, 0)
sizer_8.Add(self.label_4, 0, wx.ALL, 5)
sizer_8.Add(self.text_ctrl_robotname, 1, wx.ALL, 5)
sizer_5.Add(sizer_8, 0, wx.EXPAND, 0)
sizer_5.Add(self.static_line_1, 0, wx.EXPAND, 0)
sizer_9.AddGrowableCol(1)
sizer_5.Add(sizer_9, 1, wx.ALL | wx.EXPAND, 10)
sizer_5.Add((20, 5), 0, wx.EXPAND, 0)
sizer_11.Add((20, 20), 1, wx.EXPAND, 0)
sizer_11.Add(self.button_7, 0, wx.ALL, 5)
sizer_11.Add(self.button_6, 0, wx.ALL, 5)
sizer_5.Add(sizer_11, 0, wx.EXPAND, 0)
self.SetSizer(sizer_5)
self.Layout()
# end wxGlade
self.sizer_9 = sizer_9
def _robot2dialog(self, robot, original=False):
"""
Update the GUI based on a robot object.
If `original` is True, save a reference to allow for reversion to defaults.
"""
self.robot = robot
if original:
self.original_robot = deepcopy(robot)
self.combo_box_robottype.SetStringSelection(self.robot.r_type + (" (Not successfully loaded)" if not self.robot.successfully_loaded else ""))
self.text_ctrl_robotname.SetValue(self.robot.name)
self._populateHandlerCombos()
for handler_type_class, handler_config in self.robot.handlers.iteritems():
# for each handler type, a robot can only have one handler config
self.handler_combos[handler_type_class].SetValue("")
self.handler_combos[handler_type_class].SetStringSelection(handler_config.name)
# Disable the "Configure" button if there are no parameters (with an exception for pose)
if len(handler_config.getMethodByName("__init__").para) == 0 and \
handler_config.h_type is not ht.PoseHandler:
self.handler_buttons[handler_type_class].Enable(False)
else:
self.handler_buttons[handler_type_class].Enable(True)
if self.handler_combos[handler_type_class].GetStringSelection() == "":
# when neither the robot or the share folder has the handler loaded
logging.warning('Cannot find and handler config in the options for handler type {!r}'\
.format(handler_type_class))
self.handler_buttons[handler_type_class].Enable(False)
def onClickConfigure(self, event):
src = event.GetEventObject()
# Figure out which "Configure..." button was pressed
for htype, b in self.handler_buttons.iteritems():
if src is b:
# TODO: gray out button when no action possible
if self.handler_combos[htype].GetValue() == "":
return
dlg = handlerConfigDialog(self, None, -1, "")
# Edit existing handler object
dlg._handler2dialog(deepcopy(self.robot.handlers[htype]))
if dlg.ShowModal() != wx.ID_CANCEL:
self.robot.handlers[htype] = dlg.handler
#self._robot2dialog(self.robot)
dlg.Destroy()
break
event.Skip()
def onChangeHandler(self, event):
src = event.GetEventObject()
# Figure out which handler was changed
for htype, b in self.handler_combos.iteritems():
if src is b:
hname = src.GetValue()
# If this handler has default values from the selected robot file, use them
# TODO: this will erase any previous config settings...
default_robot = self.parent.hsub.getRobotByType(self.robot.r_type)
handler_config_changed = default_robot.getHandlerOfRobot(htype)
if handler_config_changed.name != hname:
handler_config_changed = None
if handler_config_changed is None:
# just grab the plain handler
rname = self.robot.r_type
handler_config_changed = self.parent.hsub.getHandlerConfigDefault(rname, htype, hname)
if handler_config_changed is None:
# this handler might be a shared one
rname = 'share'
handler_config_changed = self.parent.hsub.getHandlerConfigDefault(rname, htype, hname)
if handler_config_changed is not None:
self.robot.handlers[htype] = handler_config_changed
else:
logging.warning('Cannot find the selected handler config.')
break
self._robot2dialog(self.robot)
event.Skip()
def _normalizeRobotName(self, name):
""" Clean a robot name and make sure it's not taken already"""
# Disallow empty names, because that would be super confusing
if name is None or name == "":
raise ValueError("Your robot needs a name!")
# Replace spaces and non-alphanums with underscores
name = re.sub(r"\W", "_", name.strip())
# Make sure another robot doesn't already have this name
if name != self.original_robot.name and \
name in (r.name for r in self.parent._getSelectedExperimentConfig().robots):
raise ValueError('Current configuration already contains a robot with name "{}".\n\nPlease rename.'.format(name))
return name
def onClickOK(self, event): # wxGlade: addRobotDialog.<event_handler>
# TODO: add in checks for all combo boxes (don't allow null handlers)
# Make sure that all required handler parameters have been specified
incomplete_params = []
for h_type, handler in self.robot.handlers.iteritems():
for param in handler.getMethodByName("__init__").para:
if param.getValue() is None:
incomplete_params.append((handler.name, param.name))
if len(incomplete_params) > 0:
wx.MessageBox("The following parameters need to be specified:\n" + \
"\n".join([" - {}.{}".format(hn, pn) for hn, pn in incomplete_params]),
"Error", style = wx.OK | wx.ICON_ERROR)
event.Skip(False)
return
# Make sure the robot name is OK
try:
self.robot.name = self._normalizeRobotName(self.robot.name)
except ValueError as e:
wx.MessageBox(e.message, "Error", style = wx.OK | wx.ICON_ERROR)
event.Skip(False)
return
event.Skip()
def onChooseRobot(self, event): # wxGlade: addRobotDialog.<event_handler>
# Strip the trailing note
robot_type = event.GetEventObject().GetValue().replace(" (Not successfully loaded)", "")
self.robot = deepcopy(self.parent.hsub.getRobotByType(robot_type))
self._robot2dialog(self.robot)
event.Skip()
def onEditRobotName(self, event): # wxGlade: addRobotDialog.<event_handler>
self.robot.name = event.GetString()
event.Skip()
# end of class addRobotDialog
class propMappingDialog(wx.Dialog):
def __init__(self, parent, *args, **kwds):
# begin wxGlade: propMappingDialog.__init__
kwds["style"] = wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER | wx.THICK_FRAME
wx.Dialog.__init__(self, *args, **kwds)
self.label_6 = wx.StaticText(self, wx.ID_ANY, "Propositions:")
self.list_box_props = wx.ListBox(self, wx.ID_ANY, choices=[], style=wx.LB_SINGLE | wx.LB_ALWAYS_SB)
self.label_11 = wx.StaticText(self, wx.ID_ANY, "Continuous controller mapping:")
self.text_ctrl_mapping = wx.richtext.RichTextCtrl(self, wx.ID_ANY, "")
self.button_9 = wx.Button(self, wx.ID_ANY, " ^\nInsert/Apply")
self.label_7 = wx.StaticText(self, wx.ID_ANY, "Robots:")
self.list_box_robots = wx.ListBox(self, wx.ID_ANY, choices=[])
self.label_8 = wx.StaticText(self, wx.ID_ANY, "Sensors/Actuators:")
self.list_box_functions = wx.ListBox(self, wx.ID_ANY, choices=[])
self.label_10 = wx.StaticText(self, wx.ID_ANY, "Parameters:")
self.panel_method_cfg = wx.ScrolledWindow(self, wx.ID_ANY, style=wx.SUNKEN_BORDER | wx.TAB_TRAVERSAL)
self.button_11 = wx.Button(self, wx.ID_OK, "")
self.button_10 = wx.Button(self, wx.ID_CANCEL, "")
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_LISTBOX, self.onSelectProp, self.list_box_props)
self.Bind(wx.EVT_TEXT, self.onEditMapping, self.text_ctrl_mapping)
self.Bind(wx.EVT_BUTTON, self.onClickApply, self.button_9)
self.Bind(wx.EVT_LISTBOX, self.onSelectRobot, self.list_box_robots)
self.Bind(wx.EVT_LISTBOX, self.onSelectHandler, self.list_box_functions)
self.Bind(wx.EVT_BUTTON, self.onClickOK, self.button_10)
# end wxGlade
self.text_ctrl_mapping.Bind(wx.EVT_TEXT, self.onEditMapping)
self.text_ctrl_mapping.Bind(wx.EVT_LEFT_UP, self.onClickMapping)
#self.Bind(wx.EVT_LEFT_UP, self.onClickMapping, self.text_ctrl_mapping)
#self.text_ctrl_mapping.Bind(wx.EVT_LEFT_DOWN, self.onClickMapping)
self.text_ctrl_mapping.Bind(wx.EVT_KEY_UP, self.onClickMapping)
self.text_ctrl_mapping.Bind(wx.EVT_KEY_DOWN, self.onClickMapping)
self.proj = parent.proj
self.hsub = parent.hsub
self.robots = parent._getSelectedExperimentConfig().robots
# Set up the list of robots
for i, r in enumerate(self.robots):
self.list_box_robots.Insert("%s (%s)" % (r.name, r.r_type), i, r)
self.list_box_robots.Append("(Simulated)")
self.list_box_robots.SetSelection(0)
# Set up the list of props
self.list_box_props.Clear()
self.list_box_props.Append("=== Sensors ===")
#self.list_box_props.SetItemFont(n, wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD, 0, ""))
#self.list_box_props.SetItemBackgroundColour(n, wx.Color(100,100,100))
for p in self.proj.all_sensors:
self.list_box_props.Append(p)
self.list_box_props.Append("")
self.list_box_props.Append("=== Actuators ===")
for p in self.proj.all_actuators:
self.list_box_props.Append(p)
self.mapping = None
self.tempMethod = None
self.list_box_props.SetSelection(0)
self.onSelectProp(None)
def _mapping2dialog(self, mapping):
self.mapping = mapping
# Set defaults as necessary
for p in self.proj.all_sensors:
if p not in mapping or self.mapping[p].strip() == "":
m = deepcopy(self.hsub.handler_configs["share"][ht.SensorHandler][0].getMethodByName("buttonPress"))
para = m.getParaByName("button_name")
para.setValue(p)
self.mapping[p] = self.hsub.method2String(m, "share")
for p in self.proj.all_actuators:
if p not in mapping or self.mapping[p].strip() == "":
m = deepcopy(self.hsub.handler_configs["share"][ht.ActuatorHandler][0].getMethodByName("setActuator"))
para = m.getParaByName("name")
para.setValue(p)
self.mapping[p] = self.hsub.method2String(m, "share")
def __set_properties(self):
# begin wxGlade: propMappingDialog.__set_properties
self.SetTitle("Proposition Mapping")
self.SetSize((981, 419))
self.panel_method_cfg.SetScrollRate(10, 10)
# end wxGlade
def __do_layout(self):
# begin wxGlade: propMappingDialog.__do_layout
sizer_14 = wx.BoxSizer(wx.HORIZONTAL)
sizer_16 = wx.BoxSizer(wx.VERTICAL)
sizer_25 = wx.BoxSizer(wx.HORIZONTAL)
sizer_19 = wx.BoxSizer(wx.HORIZONTAL)
sizer_24 = wx.BoxSizer(wx.VERTICAL)
sizer_21 = wx.BoxSizer(wx.VERTICAL)
sizer_20 = wx.BoxSizer(wx.VERTICAL)
sizer_18 = wx.BoxSizer(wx.HORIZONTAL)
sizer_15 = wx.BoxSizer(wx.VERTICAL)
sizer_15.Add(self.label_6, 0, wx.LEFT | wx.RIGHT | wx.TOP, 5)
sizer_15.Add(self.list_box_props, 1, wx.ALL | wx.EXPAND, 5)
sizer_14.Add(sizer_15, 1, wx.EXPAND, 0)
sizer_16.Add(self.label_11, 0, wx.ALL, 5)
sizer_16.Add(self.text_ctrl_mapping, 1, wx.ALL | wx.EXPAND, 5)
sizer_18.Add((20, 20), 1, wx.EXPAND, 0)
sizer_18.Add(self.button_9, 0, wx.ALL, 5)
sizer_18.Add((20, 20), 1, wx.EXPAND, 0)
sizer_16.Add(sizer_18, 0, wx.EXPAND, 0)
sizer_20.Add(self.label_7, 0, wx.ALL, 5)
sizer_20.Add(self.list_box_robots, 1, wx.ALL | wx.EXPAND, 5)
sizer_19.Add(sizer_20, 1, wx.EXPAND, 0)
sizer_21.Add(self.label_8, 0, wx.ALL, 5)
sizer_21.Add(self.list_box_functions, 1, wx.ALL | wx.EXPAND, 5)
sizer_19.Add(sizer_21, 1, wx.EXPAND, 0)
sizer_24.Add(self.label_10, 0, wx.ALL, 5)
sizer_24.Add(self.panel_method_cfg, 1, wx.ALL | wx.EXPAND, 5)
sizer_19.Add(sizer_24, 3, wx.EXPAND, 0)
sizer_16.Add(sizer_19, 5, wx.EXPAND, 0)
sizer_25.Add((20, 20), 1, wx.EXPAND, 0)
sizer_25.Add(self.button_11, 0, wx.ALL, 5)
sizer_25.Add(self.button_10, 0, wx.ALL, 5)
sizer_16.Add(sizer_25, 0, wx.EXPAND, 0)
sizer_14.Add(sizer_16, 4, wx.EXPAND, 0)
self.SetSizer(sizer_14)
self.Layout()
# end wxGlade
def onSelectProp(self, event): # wxGlade: propMappingDialog.<event_handler>
# If you've selected a header, not a proposition, then gray out the edit box
if self.list_box_props.GetStringSelection().startswith("===") or self.list_box_props.GetStringSelection() == "":
self.text_ctrl_mapping.Enable(False)
self.text_ctrl_mapping.SetValue("")
self.list_box_robots.Enable(False)
self.list_box_functions.Clear()
self.list_box_functions.Enable(False)
else:
self.text_ctrl_mapping.Enable(True)
self.list_box_robots.Enable(True)
self.list_box_functions.Enable(True)
self.onSelectRobot(None)
if event.GetString() in self.mapping:
self.text_ctrl_mapping.SetValue(self.mapping[event.GetString()])
else:
self.text_ctrl_mapping.SetValue("")
# Auto-select the first term
self.onClickMapping(None)
if event is not None:
event.Skip()
def onClickApply(self, event): # wxGlade: propMappingDialog.<event_handler>
if self.tempMethod is not None:
#for p in self.tempMethod.para:
# print p.name, p.value
rname = self.list_box_robots.GetStringSelection().split(" ")[0]
if rname == "(Simulated)":
rname = "share"
method_string = self.hsub.method2String(self.tempMethod, rname)
if method_string is None:
print "ERROR: Method cannot be mapped to string"
else:
start, end = self.text_ctrl_mapping.GetSelection()
if start < 0:
# If nothing is selected, just insert
start = self.text_ctrl_mapping.GetInsertionPoint()
end = start
self.text_ctrl_mapping.Replace(start, end, method_string)
self.text_ctrl_mapping.SetSelection(start, start + len(method_string))
event.Skip()
def onSelectRobot(self, event): # wxGlade: propMappingDialog.<event_handler>
# Populate list of functions
self.list_box_functions.Clear()
pos = self.list_box_robots.GetSelection()
r = self.list_box_robots.GetClientData(pos)
# Only show sensors for sensor props, and actuators for actuator props
if self.list_box_props.GetStringSelection() in self.proj.all_sensors:
if self.list_box_robots.GetStringSelection() == "(Simulated)":
# TODO: might there be more than one type of handler in share?
methods = self.hsub.handler_configs["share"][ht.SensorHandler][0].methods
else:
methods = getattr(r.getHandlerOfRobot(ht.SensorHandler), 'methods', [])
elif self.list_box_props.GetStringSelection() in self.proj.all_actuators:
if self.list_box_robots.GetStringSelection() == "(Simulated)":
# TODO: might there be more than one type of handler in share?
methods = self.hsub.handler_configs["share"][ht.ActuatorHandler][0].methods
else:
methods = getattr(r.getHandlerOfRobot(ht.ActuatorHandler), 'methods', [])
else:
print ("WARNING: Selected proposition '%s' that is neither sensor nor actuator. " +
"This should be impossible.") % (self.list_box_props.GetStringSelection())
for i, m in enumerate([m for m in methods if not m.name.startswith("_")]):
self.list_box_functions.Insert("%s" % (m.name), i, m)
if event is not None:
event.Skip()
def onSelectHandler(self, event): # wxGlade: propMappingDialog.<event_handler>
if event is not None:
event.Skip()
pos = self.list_box_functions.GetSelection()
if pos < 0:
if self.panel_method_cfg.GetSizer() is not None:
self.panel_method_cfg.GetSizer().Clear(deleteWindows=True)
return
m = self.list_box_functions.GetClientData(pos)
self.tempMethod = deepcopy(m)
drawParamConfigPane(self.panel_method_cfg, self.tempMethod, self.proj)
self.Layout()
def onClickOK(self, event): # wxGlade: propMappingDialog.<event_handler>
#print "Event handler `onClickOK' not implemented!"
event.Skip()
def onClickMapping(self, event):
if event is not None:
event.Skip()
if event.GetEventType() in [wx.wxEVT_KEY_DOWN, wx.wxEVT_KEY_UP] and \
event.GetKeyCode() not in [wx.WXK_LEFT, wx.WXK_RIGHT, wx.WXK_UP, wx.WXK_DOWN, wx.WXK_HOME, wx.WXK_END,
wx.WXK_NUMPAD_LEFT, wx.WXK_NUMPAD_RIGHT, wx.WXK_NUMPAD_UP, wx.WXK_NUMPAD_DOWN]:
# wx.WXK_BACK, wx.WXK_DELETE]:
return
# TODO: Make backspace work as expected; maybe colorize/bold
i = self.text_ctrl_mapping.GetInsertionPoint()
# Special case for beginning or end of field
if i == 0 or i == self.text_ctrl_mapping.GetLastPosition():
self.text_ctrl_mapping.SelectNone()
return
else:
# Select first term
i = 1
s = self.text_ctrl_mapping.GetValue()
# Don't bother going any further if it's blank
if s.strip() == "":
return
start, end = self.text_ctrl_mapping.GetSelection()
if start >= 0:
# If something is selected, check to make sure neither side is inside a methodstring
check_pts = [start, end]
else:
# Otherwise just make sure the insertion point hasn't moved inside a methodstring
check_pts = [i]
try:
cds, _ = parseCallString(s, mode="sensor") # Sensor mode is more lenient than actuator
except SyntaxError:
# If there was a parsing error, it's not a proper methodstring anyways
return
cd_local = None
for cd in cds:
if any([i > cd.start_pos and i < cd.end_pos for i in check_pts]):
cd_local = cd
break
if cd_local is None:
return
# Make sure the name is the correct length
if len(cd_local.name) != 3:
return
# Make sure the robot name is valid
rname = cd_local.name[0]
if rname == "share":
rname = "(Simulated)"
corresponding_robots = [n for n in self.list_box_robots.GetItems() if n.startswith(rname)]
if len(corresponding_robots) != 1:
print "WARNING: No unique robot corresponding to name '%s'." % m.group("robot_name")
return
# Force selection of the entire keyword, and place insertion caret as appropriate
self.text_ctrl_mapping.SetSelection(cd_local.start_pos, cd_local.end_pos)
if event is not None:
if event.GetEventType() in [wx.wxEVT_KEY_DOWN, wx.wxEVT_KEY_UP]:
if event.GetKeyCode() in [wx.WXK_LEFT, wx.WXK_HOME, wx.WXK_UP, wx.WXK_NUMPAD_LEFT, wx.WXK_NUMPAD_UP]:
self.text_ctrl_mapping.MoveCaret(cd_local.start_pos-1)
elif event.GetKeyCode() in [wx.WXK_RIGHT, wx.WXK_END, wx.WXK_DOWN, wx.WXK_NUMPAD_RIGHT, wx.WXK_NUMPAD_DOWN]:
self.text_ctrl_mapping.MoveCaret(cd_local.end_pos-1)
# Load detailed view of keyword below
self.list_box_robots.SetStringSelection(corresponding_robots[0])
self.onSelectRobot(None)
self.list_box_functions.SetStringSelection(cd_local.name[2])
self.tempMethod = self.hsub.string2Method(s[cd_local.start_pos:cd_local.end_pos], self.robots)
drawParamConfigPane(self.panel_method_cfg, self.tempMethod, self.proj)
self.Layout()
def onEditMapping(self, event): # wxGlade: propMappingDialog.<event_handler>
if not self.text_ctrl_mapping.IsEnabled():
return
prop_name = self.list_box_props.GetStringSelection()
self.mapping[prop_name] = self.text_ctrl_mapping.GetValue()
event.Skip()
# end of class propMappingDialog
if __name__ == "__main__":
SimConfigEditor = wx.PySimpleApp(0)
wx.InitAllImageHandlers()
SimSetupDialog = simSetupDialog(None, -1, "")
SimConfigEditor.SetTopWindow(SimSetupDialog)
SimSetupDialog.Show()
SimConfigEditor.MainLoop()
|
johnkeepmoving/oss-ftp
|
refs/heads/master
|
python27/win32/Lib/test/test_sha.py
|
137
|
# Testing sha module (NIST's Secure Hash Algorithm)
# use the three examples from Federal Information Processing Standards
# Publication 180-1, Secure Hash Standard, 1995 April 17
# http://www.itl.nist.gov/div897/pubs/fip180-1.htm
import warnings
warnings.filterwarnings("ignore", "the sha module is deprecated.*",
DeprecationWarning)
import sha
import unittest
from test import test_support
class SHATestCase(unittest.TestCase):
def check(self, data, digest):
# Check digest matches the expected value
obj = sha.new(data)
computed = obj.hexdigest()
self.assertTrue(computed == digest)
# Verify that the value doesn't change between two consecutive
# digest operations.
computed_again = obj.hexdigest()
self.assertTrue(computed == computed_again)
# Check hexdigest() output matches digest()'s output
digest = obj.digest()
hexd = ""
for c in digest:
hexd += '%02x' % ord(c)
self.assertTrue(computed == hexd)
def test_case_1(self):
self.check("abc",
"a9993e364706816aba3e25717850c26c9cd0d89d")
def test_case_2(self):
self.check("abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq",
"84983e441c3bd26ebaae4aa1f95129e5e54670f1")
def test_case_3(self):
self.check("a" * 1000000,
"34aa973cd4c4daa4f61eeb2bdbad27316534016f")
def test_case_4(self):
self.check(chr(0xAA) * 80,
'4ca0ef38f1794b28a8f8ee110ee79d48ce13be25')
def test_main():
test_support.run_unittest(SHATestCase)
if __name__ == "__main__":
test_main()
|
mbayon/TFG-MachineLearning
|
refs/heads/master
|
venv/lib/python3.6/site-packages/numpy/distutils/fcompiler/hpux.py
|
229
|
from __future__ import division, absolute_import, print_function
from numpy.distutils.fcompiler import FCompiler
compilers = ['HPUXFCompiler']
class HPUXFCompiler(FCompiler):
compiler_type = 'hpux'
description = 'HP Fortran 90 Compiler'
version_pattern = r'HP F90 (?P<version>[^\s*,]*)'
executables = {
'version_cmd' : ["f90", "+version"],
'compiler_f77' : ["f90"],
'compiler_fix' : ["f90"],
'compiler_f90' : ["f90"],
'linker_so' : ["ld", "-b"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
module_dir_switch = None #XXX: fix me
module_include_switch = None #XXX: fix me
pic_flags = ['+Z']
def get_flags(self):
return self.pic_flags + ['+ppu', '+DD64']
def get_flags_opt(self):
return ['-O3']
def get_libraries(self):
return ['m']
def get_library_dirs(self):
opt = ['/usr/lib/hpux64']
return opt
def get_version(self, force=0, ok_status=[256, 0, 1]):
# XXX status==256 may indicate 'unrecognized option' or
# 'no input file'. So, version_cmd needs more work.
return FCompiler.get_version(self, force, ok_status)
if __name__ == '__main__':
from distutils import log
log.set_verbosity(10)
from numpy.distutils.fcompiler import new_fcompiler
compiler = new_fcompiler(compiler='hpux')
compiler.customize()
print(compiler.get_version())
|
saintbird/mezzanine
|
refs/heads/master
|
mezzanine/blog/management/commands/import_blogger.py
|
8
|
from __future__ import unicode_literals
from datetime import datetime, timedelta
from optparse import make_option
from time import timezone
import re
from django.core.management.base import CommandError
from mezzanine.blog.management.base import BaseImporterCommand
# TODO: update this to use v3 of the blogger API.
class Command(BaseImporterCommand):
"""
Implements a Blogger importer. Takes a Blogger ID in order to be able to
determine which blog it should point to and harvest the XML from.
"""
option_list = BaseImporterCommand.option_list + (
make_option("-b", "--blogger-id", dest="blog_id",
help="Blogger Blog ID from blogger dashboard"),
)
def handle_import(self, options):
"""
Gets posts from Blogger.
"""
blog_id = options.get("blog_id")
if blog_id is None:
raise CommandError("Usage is import_blogger %s" % self.args)
try:
from gdata import service
except ImportError:
raise CommandError("Could not import the gdata library.")
blogger = service.GDataService()
blogger.service = "blogger"
blogger.server = "www.blogger.com"
start_index = 1
processed_posts = []
new_posts = 1
while new_posts:
new_posts = 0
query = service.Query()
query.feed = "/feeds/%s/posts/full" % blog_id
query.max_results = 500
query.start_index = start_index
try:
feed = blogger.Get(query.ToUri())
except service.RequestError as err:
message = "There was a service error. The response was: " \
"%(status)s %(reason)s - %(body)s" % err.message
raise CommandError(message, blogger.server + query.feed,
err.message["status"])
for (i, entry) in enumerate(feed.entry):
# this basically gets the unique post ID from the URL to itself
# and pulls the ID off the end.
post_id = entry.GetSelfLink().href.split("/")[-1]
# Skip duplicate posts. Important for the last query.
if post_id in processed_posts:
continue
title = entry.title.text
content = entry.content.text
# this strips off the time zone info off the end as we want UTC
clean_date = entry.published.text[:re.search(r"\.\d{3}",
entry.published.text).end()]
published_date = datetime.strptime(clean_date,
"%Y-%m-%dT%H:%M:%S.%f") - timedelta(seconds=timezone)
# TODO - issues with content not generating correct <P> tags
tags = [tag.term for tag in entry.category]
post = self.add_post(title=title, content=content,
pub_date=published_date, tags=tags)
# get the comments from the post feed and then add them to
# the post details
comment_url = "/feeds/%s/%s/comments/full?max-results=1000"
comments = blogger.Get(comment_url % (blog_id, post_id))
for comment in comments.entry:
email = comment.author[0].email.text
author_name = comment.author[0].name.text
# Strip off the time zone info off the end as we want UTC
clean_date = comment.published.text[:re.search(r"\.\d{3}",
comment.published.text).end()]
comment_date = datetime.strptime(clean_date,
"%Y-%m-%dT%H:%M:%S.%f") - timedelta(seconds=timezone)
website = ""
if comment.author[0].uri:
website = comment.author[0].uri.text
body = comment.content.text
# add the comment as a dict to the end of the comments list
self.add_comment(post=post, name=author_name, email=email,
body=body, website=website,
pub_date=comment_date)
processed_posts.append(post_id)
new_posts += 1
start_index += 500
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.