repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
taedla01/MissionPlanner | refs/heads/master | Lib/site-packages/numpy/core/tests/test_scalarmath.py | 53 | import sys
from numpy.testing import *
import numpy as np
types = [np.bool_, np.byte, np.ubyte, np.short, np.ushort, np.intc, np.uintc,
np.int_, np.uint, np.longlong, np.ulonglong,
np.single, np.double, np.longdouble, np.csingle,
np.cdouble, np.clongdouble]
real_types = [ np.byte, np.ubyte, np.short, np.ushort,
np.intc, np.uintc,
np.int_, np.uint, np.longlong, np.ulonglong,
np.single, np.double, np.longdouble ]
complex_types = [ np.csingle, np.cdouble, np.clongdouble ]
# This compares scalarmath against ufuncs.
class TestTypes(TestCase):
def test_types(self, level=1):
for atype in types:
a = atype(1)
assert a == 1, "error with %r: got %r" % (atype,a)
def test_type_add(self, level=1):
# list of types
for k, atype in enumerate(types):
vala = atype(3)
val1 = np.array([3],dtype=atype)
for l, btype in enumerate(types):
valb = btype(1)
val2 = np.array([1],dtype=btype)
val = vala + valb
valo = val1 + val2
assert val.dtype.num == valo.dtype.num and \
val.dtype.char == valo.dtype.char, \
"error with (%d,%d)" % (k,l)
def test_type_subtract(self, level=1):
# list of types
for k, atype in enumerate(types):
vala = atype(3)
val1 = np.array([3],dtype=atype)
for l, btype in enumerate(types):
valb = btype(1)
val2 = np.array([1],dtype=btype)
val = vala - valb
valo = val1 - val2
assert val.dtype.num == valo.dtype.num and \
val.dtype.char == valo.dtype.char, \
"error with (%d,%d)" % (k,l)
def test_type_multiply(self, level=1):
# list of types
for k, atype in enumerate(types):
vala = atype(3)
val1 = np.array([3],dtype=atype)
for l, btype in enumerate(types):
valb = btype(1)
val2 = np.array([1],dtype=btype)
val = vala * valb
valo = val1 * val2
assert val.dtype.num == valo.dtype.num and \
val.dtype.char == valo.dtype.char, \
"error with (%d,%d)" % (k,l)
def test_type_divide(self, level=1):
# Choose more interesting operands for this operation.
# list of types
for k, atype in enumerate(types):
vala = atype(6)
val1 = np.array([6],dtype=atype)
for l, btype in enumerate(types):
valb = btype(2)
val2 = np.array([2],dtype=btype)
val = vala / valb
valo = val1 / val2
assert val.dtype.num == valo.dtype.num and \
val.dtype.char == valo.dtype.char, \
"error with (%d,%d)" % (k,l)
def test_type_remainder(self, level=1):
# Choose more interesting operands for this operation.
# list of types
for k, atype in enumerate(types):
vala = atype(6)
val1 = np.array([6],dtype=atype)
for l, btype in enumerate(types):
valb = btype(2)
val2 = np.array([2],dtype=btype)
try:
val = vala % valb
valo = val1 % val2
if hasattr(val, "dtype") and hasattr(valo, "dtype"):
# Invalid operands in IronPython don't have dtype attributes.
assert val.dtype.num == valo.dtype.num and \
val.dtype.char == valo.dtype.char, \
"error with (%d,%d)" % (k,l)
except TypeError, e:
# Some combos just don't work, like byte % complex. We
# just don't worry about classifying the cases here, and
# instead just ignore these types of problems. <grin>
pass
def test_type_negative(self, level=1):
# Uhh, "negate" ??? Or maybe "unary minus".
# But shouldn't this fail for unsigned types? Hmmm...
# list of types
# NOTE: unary operators don't require the double loop over types,
# since there's only one operand.
for k, atype in enumerate(types):
vala = atype(3)
val1 = np.array([3],dtype=atype)
val = -vala
valo = -val1
assert val.dtype.num == valo.dtype.num and \
val.dtype.char == valo.dtype.char, \
"error with (%d)" % (k)
def test_type_positive(self, level=1):
# Otherwise known as "unary plus".
# list of types
# NOTE: unary operators don't require the double loop over types,
# since there's only one operand.
for k, atype in enumerate(types):
vala = atype(3)
val1 = np.array([3],dtype=atype)
val = +vala
valo = +val1
assert val.dtype.num == valo.dtype.num and \
val.dtype.char == valo.dtype.char, \
"error with (%d)" % (k)
def test_type_power(self, level=1):
# Choose more interesting operands for this operation.
# list of types
for k, atype in enumerate(types):
vala = atype(2)
val1 = np.array([2],dtype=atype)
# Skip the boolean types
if vala.dtype.char == '?': continue
for l, btype in enumerate(types):
valb = btype(3)
val2 = np.array([3],dtype=btype)
# Skip the boolean types
if valb.dtype.char == '?': continue
val = vala ** valb
valo = val1 ** val2
assert val.dtype.num == valo.dtype.num and \
val.dtype.char == valo.dtype.char, \
"error with (%d,%d)" % (k,l)
def test_type_absolute(self, level=1):
# list of types
for k, atype in enumerate(types):
vala = atype(-3)
val1 = np.array([-3],dtype=atype)
val = abs(vala)
valo = abs(val1)
assert val.dtype.num == valo.dtype.num and \
val.dtype.char == valo.dtype.char, \
"error with (%d)" % (k)
# I guess we can't really test for the right result here, unless
# we can figure out how to exclude the unsigned types.
#assert val == atype(3) and valo == atype(3), \
# "error taking absolute value (%d)." % k
def test_type_hex(self, level=1):
# list of types
for k, atype in enumerate(types):
vala = atype(3)
val1 = np.array([3],dtype=atype)
try:
val = hex(vala)
valo = hex(val1)
except:
#print "Can't hexify ", k
pass
#assert val.dtype.num == valo.dtype.num and \
# val.dtype.char == valo.dtype.char, \
# "error with (%d)" % (k)
# We can't demand equivalent repr's either.
#assert val == valo, "Trouble with hex (%d)" % k
# So there's not really so much we can check here, beyond simply
# that the code executes without throwing exceptions.
def test_type_oct(self, level=1):
# list of types
for k, atype in enumerate(types):
vala = atype(3)
val1 = np.array([3],dtype=atype)
try:
val = oct(vala)
valo = oct(val1)
except:
#print "Can't hexify ", k
pass
#assert val.dtype.num == valo.dtype.num and \
# val.dtype.char == valo.dtype.char, \
# "error with (%d)" % (k)
# We can't demand equivalent repr's either.
#assert val == valo, "Trouble with hex (%d)" % k
# So there's not really so much we can check here, beyond simply
# that the code executes without throwing exceptions.
def test_type_float(self, level=1):
# list of types
for k, atype in enumerate(types):
vala = atype(3)
val1 = np.array([3],dtype=atype)
try:
val = float(vala)
valo = float(val1)
except TypeError, e:
# The complex type, for example, can't be cast to float, so
# just skip it.
continue
assert val == valo, "Trouble with float (%d)" % k
# Skip over bool.
if vala.dtype.char == '?': continue
assert val == 3 and valo == 3, "Trouble with float (%d)" % k
def test_gentype_nonzero( self ):
# This exercises gentype_nonzero, and thus may point the path to
# executing other gentype_* functions.
z = np.clongdouble( 4 + 5j )
r = np.nonzero( z )
@dec.skipif(sys.platform == 'cli',
"Construction of arrays by passing sequences to scalar constructors is not supported on IronPython")
def test_type_create(self, level=1):
for k, atype in enumerate(types):
a = np.array([1,2,3],atype)
b = atype([1,2,3])
assert_equal(a,b)
def test_scalartype_ops( self ):
int_types = [ np.byte, np.ubyte, np.short, np.ushort,
np.intc, np.uintc,
np.int_, np.uint, np.longlong, np.ulonglong ]
for t in int_types:
x = t(7)
y = x ^ x
assert y == 0, "xor on scalartype"
x = t(1)
y = x << 1
assert y == 2, "left shift on scalartype"
# NOTE: y came back not the same type as t, so a right shift on
# y doesn't exercise the <t>_rshift function. To get the
# <t>_rshift, we have to go back to a <t> instance.
y = t(2)
z = y >> 1
assert z == 1, "right shift on scalartype"
assert np.invert(x) != 1, "invert on scalartype"
assert np.invert( np.invert( x ) ) == x, "invert on scalartype"
y = t(0)
z = x & y
assert z == 0, "bitwise and on scalartype"
z = x | y
assert z == 1, "bitwise or on scalartype"
assert z, "nonzero on scalartype"
x = t(0)
assert ~x, "Invert on numpy scalar types"
#x = t(5)
#y = x // 2
#assert y, "nonzero on numpy scalar types"
#assert y == 2, "floor divide on numpy scalar types"
for t in real_types:
x = t(5)
assert x, "nonzero on scalartype"
y = x // 2.
assert y == 2, "floor divide on scalartype"
y = t(2)
n,r = divmod( x, y )
assert n == t(2), "divmod on scalartype"
assert r == t(1), "divmod on scalartype"
for t in complex_types:
x = t(5)
assert x, "nonzero on complex scalartype"
y = x // 2
assert y == 2, "Floor divide on complex scalartype"
from operator import itruediv
itruediv( z, x )
for t in types[1:]:
z = t(5)
x = t(2)
itruediv( z, x )
x = t(5)
y = np.long(x)
assert y == x, "Cast scalartype to long"
y = np.int(x)
assert y == x, "Cast scalartype to int"
class TestPower(TestCase):
def test_small_types(self):
for t in [np.int8, np.int16]:
a = t(3)
b = a ** 4
assert b == 81, "error with %r: got %r" % (t,b)
def test_large_types(self):
for t in [np.int32, np.int64, np.float32, np.float64, np.longdouble]:
a = t(51)
b = a ** 4
msg = "error with %r: got %r" % (t,b)
if np.issubdtype(t, np.integer):
assert b == 6765201, msg
else:
assert_almost_equal(b, 6765201, err_msg=msg)
class TestConversion(TestCase):
def test_int_from_long(self):
l = [1e6, 1e12, 1e18, -1e6, -1e12, -1e18]
li = [10**6, 10**12, 10**18, -10**6, -10**12, -10**18]
for T in [None, np.float64, np.int64]:
a = np.array(l,dtype=T)
assert_equal(map(int,a), li)
a = np.array(l[:3], dtype=np.uint64)
assert_equal(map(int,a), li[:3])
#class TestRepr(TestCase):
# def test_repr(self):
# for t in types:
# val = t(1197346475.0137341)
# val_repr = repr(val)
# val2 = eval(val_repr)
# assert_equal( val, val2 )
class TestRepr(TestCase):
def _test_type_repr(self, t):
finfo=np.finfo(t)
last_fraction_bit_idx = finfo.nexp + finfo.nmant
last_exponent_bit_idx = finfo.nexp
storage_bytes = np.dtype(t).itemsize*8
# could add some more types to the list below
for which in ['small denorm','small norm']:
# Values from http://en.wikipedia.org/wiki/IEEE_754
constr = np.array([0x00]*storage_bytes,dtype=np.uint8)
if which == 'small denorm':
byte = last_fraction_bit_idx // 8
bytebit = 7-(last_fraction_bit_idx % 8)
constr[byte] = 1<<bytebit
elif which == 'small norm':
byte = last_exponent_bit_idx // 8
bytebit = 7-(last_exponent_bit_idx % 8)
constr[byte] = 1<<bytebit
else:
raise ValueError('hmm')
val = constr.view(t)[0]
val_repr = repr(val)
val2 = t(eval(val_repr))
if not (val2 == 0 and val < 1e-100):
assert_equal(val, val2)
def test_float_repr(self):
# long double test cannot work, because eval goes through a python
# float
for t in [np.float32, np.float64]:
yield test_float_repr, t
if __name__ == "__main__":
run_module_suite()
|
jamespcole/home-assistant | refs/heads/master | homeassistant/components/aftership/sensor.py | 4 | """
Support for non-delivered packages recorded in AfterShip.
For more details about this platform, please refer to the documentation at
https://www.home-assistant.io/components/sensor.aftership/
"""
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import ATTR_ATTRIBUTION, CONF_API_KEY, CONF_NAME
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
REQUIREMENTS = ['pyaftership==0.1.2']
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = 'Information provided by AfterShip'
CONF_SLUG = 'slug'
CONF_TITLE = 'title'
CONF_TRACKING_NUMBER = 'tracking_number'
DEFAULT_NAME = 'aftership'
ICON = 'mdi:package-variant-closed'
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=30)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set up the AfterShip sensor platform."""
from pyaftership.tracker import Tracking
apikey = config[CONF_API_KEY]
name = config[CONF_NAME]
session = async_get_clientsession(hass)
aftership = Tracking(hass.loop, session, apikey)
await aftership.get_trackings()
if not aftership.meta or aftership.meta['code'] != 200:
_LOGGER.error("No tracking data found. Check API key is correct: %s",
aftership.meta)
return
async_add_entities([AfterShipSensor(aftership, name)], True)
class AfterShipSensor(Entity):
"""Representation of a AfterShip sensor."""
def __init__(self, aftership, name):
"""Initialize the sensor."""
self._attributes = {}
self._name = name
self._state = None
self.aftership = aftership
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return 'packages'
@property
def device_state_attributes(self):
"""Return attributes for the sensor."""
return self._attributes
@property
def icon(self):
"""Icon to use in the frontend."""
return ICON
@Throttle(MIN_TIME_BETWEEN_UPDATES)
async def async_update(self):
"""Get the latest data from the AfterShip API."""
await self.aftership.get_trackings()
if not self.aftership.meta:
_LOGGER.error("Unknown errors when querying")
return
if self.aftership.meta['code'] != 200:
_LOGGER.error(
"Errors when querying AfterShip. %s", str(self.aftership.meta))
return
status_to_ignore = {'delivered'}
status_counts = {}
not_delivered_count = 0
for tracking in self.aftership.trackings['trackings']:
status = tracking['tag'].lower()
name = tracking['tracking_number']
status_counts[status] = status_counts.get(status, 0)+1
if status not in status_to_ignore:
not_delivered_count += 1
else:
_LOGGER.debug("Ignoring %s as it has status: %s", name, status)
self._attributes = {
ATTR_ATTRIBUTION: ATTRIBUTION,
**status_counts
}
self._state = not_delivered_count
|
paul80/CMPUT333A1 | refs/heads/master | blockreciever.py | 1 | import time
import random
import start
import statsandprint
import control
import sys
#def recieve_send_detect(numberofbits, error, feedbacktime, seed, doparity):
def recieve_send_detect(numberofbits, error,seed, doparity):
instaerror = float(error)
instaerrorcount = 0
#Commenting out lines here-------
#instadelay = int(feedbacktime)
#-------------------------------
numberofpacketerror = 0
paritycounter = 0
checkflagcounter = 1
while((checkflagcounter == 1) and (doparity != 0)):
#find the parity bits needed
if(numberofbits >= 2**paritycounter):
paritycounter=paritycounter + 1
else:
checkflagcounter = 0
doparity = 0
#rnd = random.Random(seed)
'''
for x in range(0,numberofbits+paritycountchecker):
p = rnd.uniform(0,1)
#p = random.uniform(0,1)
if p > instaerror:
numberofpacketerror = numberofpacketerror + 1
'''
#numberofbits was interpreted as a float leading to error so converted to int
counter=0
while (counter<int(numberofbits)+paritycounter):
p = random.random() #returns floating point number between 0 and 1
#print(str(p))
#p = random.uniform(0,1)
if p < instaerror:
numberofpacketerror = numberofpacketerror + 1
counter+=1
#print(counter)
#print(paritycounter)
#sys.exit()
'''
for x in range(0,int(numberofbits)+paritycounter):
p = random.random() #returns floating point number between 0 and 1
#print(str(p))
#p = random.uniform(0,1)
if p > instaerror:
numberofpacketerror = numberofpacketerror + 1
'''
#------------------------------
#time.sleep(instadelay)
#--------------------------------
if numberofpacketerror > 1:
#returns 0 to retransmit the whole frame when this block fails
return 0
else:
#block sent successfully
return 1
|
tectronics/bayes-swarm | refs/heads/master | mean-machine/ui/igraphdrawingarea.py | 1 | #!/usr/bin/env python
#
# Mean Machine: subclass gtk.DrawingArea in order to plot a graph
#
# == Author
# Matteo Zandi [matteo.zandi@bayesfor.eu]
#
# == Copyright
# Copyright(c) 2008 - bayes-swarm project.
# Licensed under the GNU General Public License v2.
# USA
import gtk
from gtk import gdk
import cairo
import igraph
class IGraphDrawingArea(gtk.DrawingArea):
def __init__(self):
gtk.DrawingArea.__init__(self)
#self.set_size_request(300, 300)
self.connect("expose_event", self.expose)
self.g = None
def expose(self, widget, event):
context = widget.window.cairo_create()
# set a clip region for the expose event
context.rectangle(event.area.x, event.area.y,
event.area.width, event.area.height)
context.clip()
self.draw(context)
return False
def draw(self, context):
rect = self.get_allocation()
context.set_source_rgb(255, 255, 255)
# FIXME: leave blank background while adding igraph plot
if self.g is not None:
surface = cairo.ImageSurface (cairo.FORMAT_ARGB32,
rect.width,
rect.height)
plot = igraph.drawing.Plot(surface, (0, 0, rect.width, rect.height))
seed_layout = self.g.layout("fr", seed = self.g.vs['fr_seed_coords'])
plot.add(self.g, layout=seed_layout, margin=(20,20,20,20), weights = self.g.es['weight'], vertex_size = self.g.vs['size']) #, fixed = self.g.vs['fixed'])
plot.redraw()
context.set_source_surface (surface)
context.paint()
return False
def redraw_canvas(self):
if self.window:
alloc = self.get_allocation()
rect = gdk.Rectangle(0, 0, alloc.width, alloc.height)
self.window.invalidate_rect(rect, True)
self.window.process_updates(True)
def change_graph(self, g):
self.g = g
self.redraw_canvas()
|
getnikola/plugins | refs/heads/master | v7/slides/slides.py | 2 | # -*- coding: utf-8 -*-
# Copyright © 2012-2017 Roberto Alsina and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Slides directive for reStructuredText."""
import uuid
from docutils import nodes
from docutils.parsers.rst import Directive, directives
from nikola.plugin_categories import RestExtension
class Plugin(RestExtension):
"""Plugin for reST slides directive."""
name = "rest_slides"
def set_site(self, site):
"""Set Nikola site."""
self.site = site
directives.register_directive('slides', Slides)
Slides.site = site
return super(Plugin, self).set_site(site)
class Slides(Directive):
"""reST extension for inserting slideshows."""
has_content = True
def run(self):
"""Run the slides directive."""
if len(self.content) == 0: # pragma: no cover
return
if self.site.invariant: # for testing purposes
carousel_id = 'slides_' + 'fixedvaluethatisnotauuid'
else:
carousel_id = 'slides_' + uuid.uuid4().hex
output = self.site.template_system.render_template(
'slides.tmpl',
None,
{
'slides_content': self.content,
'carousel_id': carousel_id,
}
)
return [nodes.raw('', output, format='html')]
directives.register_directive('slides', Slides)
|
jcrumb/httpie | refs/heads/master | tests/test_unicode.py | 49 | # coding=utf-8
"""
Various unicode handling related tests.
"""
from utils import http, HTTP_OK
from fixtures import UNICODE
class TestUnicode:
def test_unicode_headers(self, httpbin):
# httpbin doesn't interpret utf8 headers
r = http(httpbin.url + '/headers', u'Test:%s' % UNICODE)
assert HTTP_OK in r
def test_unicode_headers_verbose(self, httpbin):
# httpbin doesn't interpret utf8 headers
r = http('--verbose', httpbin.url + '/headers', u'Test:%s' % UNICODE)
assert HTTP_OK in r
assert UNICODE in r
def test_unicode_form_item(self, httpbin):
r = http('--form', 'POST', httpbin.url + '/post', u'test=%s' % UNICODE)
assert HTTP_OK in r
assert r.json['form'] == {'test': UNICODE}
def test_unicode_form_item_verbose(self, httpbin):
r = http('--verbose', '--form',
'POST', httpbin.url + '/post', u'test=%s' % UNICODE)
assert HTTP_OK in r
assert UNICODE in r
def test_unicode_json_item(self, httpbin):
r = http('--json', 'POST', httpbin.url + '/post', u'test=%s' % UNICODE)
assert HTTP_OK in r
assert r.json['json'] == {'test': UNICODE}
def test_unicode_json_item_verbose(self, httpbin):
r = http('--verbose', '--json',
'POST', httpbin.url + '/post', u'test=%s' % UNICODE)
assert HTTP_OK in r
assert UNICODE in r
def test_unicode_raw_json_item(self, httpbin):
r = http('--json', 'POST', httpbin.url + '/post',
u'test:={ "%s" : [ "%s" ] }' % (UNICODE, UNICODE))
assert HTTP_OK in r
assert r.json['json'] == {'test': {UNICODE: [UNICODE]}}
def test_unicode_raw_json_item_verbose(self, httpbin):
r = http('--json', 'POST', httpbin.url + '/post',
u'test:={ "%s" : [ "%s" ] }' % (UNICODE, UNICODE))
assert HTTP_OK in r
assert r.json['json'] == {'test': {UNICODE: [UNICODE]}}
def test_unicode_url_query_arg_item(self, httpbin):
r = http(httpbin.url + '/get', u'test==%s' % UNICODE)
assert HTTP_OK in r
assert r.json['args'] == {'test': UNICODE}, r
def test_unicode_url_query_arg_item_verbose(self, httpbin):
r = http('--verbose', httpbin.url + '/get', u'test==%s' % UNICODE)
assert HTTP_OK in r
assert UNICODE in r
def test_unicode_url(self, httpbin):
r = http(httpbin.url + u'/get?test=' + UNICODE)
assert HTTP_OK in r
assert r.json['args'] == {'test': UNICODE}
# def test_unicode_url_verbose(self):
# r = http(httpbin.url + '--verbose', u'/get?test=' + UNICODE)
# assert HTTP_OK in r
def test_unicode_basic_auth(self, httpbin):
# it doesn't really authenticate us because httpbin
# doesn't interpret the utf8-encoded auth
http('--verbose', '--auth', u'test:%s' % UNICODE,
httpbin.url + u'/basic-auth/test/' + UNICODE)
def test_unicode_digest_auth(self, httpbin):
# it doesn't really authenticate us because httpbin
# doesn't interpret the utf8-encoded auth
http('--auth-type=digest',
'--auth', u'test:%s' % UNICODE,
httpbin.url + u'/digest-auth/auth/test/' + UNICODE)
|
stiphyMT/plantcv | refs/heads/master | plantcv/plantcv/auto_crop.py | 2 | # Resize image
import os
import cv2
import numpy as np
from plantcv.plantcv import print_image
from plantcv.plantcv import plot_image
from plantcv.plantcv import params
from plantcv.plantcv import fatal_error
def auto_crop(img, obj, padding_x=0, padding_y=0, color='black'):
"""Resize image.
Inputs:
img = RGB or grayscale image data
obj = contours
padding_x = integer or tuple to add padding the x direction
padding_y = integer or tuple to add padding the y direction
color = either 'black', 'white', or 'image'
Returns:
cropped = cropped image
:param img: numpy.ndarray
:param obj: list
:param padding_x: int
:param padding_y: int
:param color: str
:return cropped: numpy.ndarray
"""
params.device += 1
img_copy = np.copy(img)
img_copy2 = np.copy(img)
# Get the height and width of the reference image
height, width = np.shape(img)[:2]
x, y, w, h = cv2.boundingRect(obj)
cv2.rectangle(img_copy, (x, y), (x + w, y + h), (0, 255, 0), 5)
crop_img = img[y:y + h, x:x + w]
if type(padding_x) is int and type(padding_y) is int:
offsetx_left = int(np.rint(padding_x))
offsetx_right = int(np.rint(padding_x))
offsety_top = int(np.rint(padding_y))
offsety_bottom = int(np.rint(padding_y))
elif type(padding_x) is tuple and type(padding_y) is tuple:
offsetx_left = padding_x[0]
offsetx_right = padding_x[1]
offsety_top = padding_y[0]
offsety_bottom = padding_y[1]
else:
fatal_error('Both padding_x and padding_x parameters must be either int or tuple.')
if color.upper() == 'BLACK':
colorval = (0, 0, 0)
cropped = cv2.copyMakeBorder(crop_img, offsety_top, offsety_bottom, offsetx_left, offsetx_right, cv2.BORDER_CONSTANT, value=colorval)
elif color.upper() == 'WHITE':
colorval = (255, 255, 255)
cropped = cv2.copyMakeBorder(crop_img, offsety_top, offsety_bottom, offsetx_left, offsetx_right, cv2.BORDER_CONSTANT, value=colorval)
elif color.upper() == 'IMAGE':
# Check whether the ROI is correctly bounded inside the image
if x - offsetx_right < 0 or y - offsety_top < 0 or x + w + offsetx_right > width or y + h + offsety_bottom > height:
cropped = img_copy2[y:y + h, x:x + w]
else:
# If padding is the image, crop the image with a buffer rather than cropping and adding a buffer
cropped = img_copy2[y - offsety_top:y + h + offsety_bottom, x - offsetx_left:x + w + offsetx_right]
else:
fatal_error('Color was provided but ' + str(color) + ' is not "white", "black", or "image"!')
if params.debug == 'print':
print_image(img_copy, os.path.join(params.debug_outdir, str(params.device) + "_crop_area.png"))
print_image(cropped, os.path.join(params.debug_outdir, str(params.device) + "_auto_cropped.png"))
elif params.debug == 'plot':
if len(np.shape(img_copy)) == 3:
plot_image(img_copy)
plot_image(cropped)
else:
plot_image(img_copy, cmap='gray')
plot_image(cropped, cmap='gray')
return cropped
|
abramhindle/UnnaturalCodeFork | refs/heads/master | python/testdata/launchpad/lib/lp/codehosting/puller/tests/test_worker.py | 1 | # Copyright 2009-2011 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Unit tests for worker.py."""
__metaclass__ = type
import gc
from StringIO import StringIO
import bzrlib.branch
from bzrlib.branch import BranchReferenceFormat
from bzrlib.bzrdir import BzrDir
from bzrlib.errors import (
IncompatibleRepositories,
NotBranchError,
NotStacked,
)
from bzrlib.revision import NULL_REVISION
from bzrlib.tests import (
TestCaseInTempDir,
TestCaseWithTransport,
)
from bzrlib.transport import get_transport
from lp.code.enums import BranchType
from lp.codehosting.puller.tests import (
FixedHttpServer,
PullerWorkerMixin,
)
from lp.codehosting.puller.worker import (
BadUrlLaunchpad,
BadUrlScheme,
BadUrlSsh,
BranchMirrorerPolicy,
ImportedBranchPolicy,
install_worker_ui_factory,
MirroredBranchPolicy,
PullerWorkerProtocol,
SafeBranchOpener,
WORKER_ACTIVITY_NETWORK,
)
from lp.codehosting.safe_open import (
AcceptAnythingPolicy,
BadUrl,
BranchOpenPolicy,
)
from lp.testing import TestCase
from lp.testing.factory import (
LaunchpadObjectFactory,
ObjectFactory,
)
def get_netstrings(line):
"""Parse `line` as a sequence of netstrings.
:return: A list of strings.
"""
strings = []
while len(line) > 0:
colon_index = line.find(':')
length = int(line[:colon_index])
strings.append(line[(colon_index + 1):(colon_index + 1 + length)])
assert ',' == line[colon_index + 1 + length], (
'Expected %r == %r' % (',', line[colon_index + 1 + length]))
line = line[colon_index + length + 2:]
return strings
class PrearrangedStackedBranchPolicy(BranchMirrorerPolicy,
AcceptAnythingPolicy):
"""A branch policy that returns a pre-configurable stack-on URL."""
def __init__(self, stack_on_url):
AcceptAnythingPolicy.__init__(self)
self.stack_on_url = stack_on_url
def getStackedOnURLForDestinationBranch(self, source_branch,
destination_url):
return self.stack_on_url
class TestPullerWorker(TestCaseWithTransport, PullerWorkerMixin):
"""Test the mirroring functionality of PullerWorker."""
def setUp(self):
super(TestPullerWorker, self).setUp()
SafeBranchOpener.install_hook()
def test_mirror_opener_with_stacked_on_url(self):
# A PullerWorker for a mirrored branch gets a MirroredBranchPolicy as
# the policy of its branch_mirrorer. The default stacked-on URL is
# passed through.
url = '/~foo/bar/baz'
worker = self.makePullerWorker(
branch_type=BranchType.MIRRORED, default_stacked_on_url=url)
policy = worker.branch_mirrorer.policy
self.assertIsInstance(policy, MirroredBranchPolicy)
self.assertEqual(url, policy.stacked_on_url)
def test_mirror_opener_without_stacked_on_url(self):
# A PullerWorker for a mirrored branch get a MirroredBranchPolicy as
# the policy of its mirrorer. If a default stacked-on URL is not
# specified (indicated by an empty string), then the stacked_on_url is
# None.
worker = self.makePullerWorker(
branch_type=BranchType.MIRRORED, default_stacked_on_url='')
policy = worker.branch_mirrorer.policy
self.assertIsInstance(policy, MirroredBranchPolicy)
self.assertIs(None, policy.stacked_on_url)
def testImportedOpener(self):
# A PullerWorker for an imported branch gets a ImportedBranchPolicy as
# the policy of its branch_mirrorer.
worker = self.makePullerWorker(branch_type=BranchType.IMPORTED)
self.assertIsInstance(
worker.branch_mirrorer.policy, ImportedBranchPolicy)
def testMirrorActuallyMirrors(self):
# Check that mirror() will mirror the Bazaar branch.
source_tree = self.make_branch_and_tree('source-branch')
to_mirror = self.makePullerWorker(
source_tree.branch.base, self.get_url('dest'))
source_tree.commit('commit message')
to_mirror.mirrorWithoutChecks()
mirrored_branch = bzrlib.branch.Branch.open(to_mirror.dest)
self.assertEqual(
source_tree.last_revision(), mirrored_branch.last_revision())
def testMirrorEmptyBranch(self):
# We can mirror an empty branch.
source_branch = self.make_branch('source-branch')
to_mirror = self.makePullerWorker(
source_branch.base, self.get_url('dest'))
to_mirror.mirrorWithoutChecks()
mirrored_branch = bzrlib.branch.Branch.open(to_mirror.dest)
self.assertEqual(NULL_REVISION, mirrored_branch.last_revision())
def testCanMirrorWhenDestDirExists(self):
# We can mirror a branch even if the destination exists, and contains
# data but is not a branch.
source_tree = self.make_branch_and_tree('source-branch')
to_mirror = self.makePullerWorker(
source_tree.branch.base, self.get_url('destdir'))
source_tree.commit('commit message')
# Make the directory.
dest = get_transport(to_mirror.dest)
dest.create_prefix()
dest.mkdir('.bzr')
# 'dest' is not a branch.
self.assertRaises(
NotBranchError, bzrlib.branch.Branch.open, to_mirror.dest)
to_mirror.mirrorWithoutChecks()
mirrored_branch = bzrlib.branch.Branch.open(to_mirror.dest)
self.assertEqual(
source_tree.last_revision(), mirrored_branch.last_revision())
def testHttpTransportStillThere(self):
# We tweak the http:// transport in the worker. Make sure that it's
# still available after mirroring.
http = get_transport('http://example.com')
source_branch = self.make_branch('source-branch')
to_mirror = self.makePullerWorker(
source_branch.base, self.get_url('destdir'))
to_mirror.mirrorWithoutChecks()
new_http = get_transport('http://example.com')
self.assertEqual(get_transport('http://example.com').base, http.base)
self.assertEqual(new_http.__class__, http.__class__)
def test_defaultStackedOnBranchDoesNotForceStacking(self):
# If the policy supplies a stacked on URL but the source branch does
# not support stacking, the destination branch does not support
# stacking.
stack_on = self.make_branch('default-stack-on')
source_branch = self.make_branch('source-branch', format='pack-0.92')
self.assertFalse(source_branch._format.supports_stacking())
to_mirror = self.makePullerWorker(
source_branch.base, self.get_url('destdir'),
policy=PrearrangedStackedBranchPolicy(stack_on.base))
to_mirror.mirrorWithoutChecks()
dest = bzrlib.branch.Branch.open(self.get_url('destdir'))
self.assertFalse(dest._format.supports_stacking())
def test_defaultStackedOnBranchIncompatibleMirrorsOK(self):
# If the policy supplies a stacked on URL for a branch which is
# incompatible with the branch we're mirroring, the mirroring
# completes successfully and the destination branch is not stacked.
stack_on = self.make_branch('default-stack-on', format='2a')
source_branch = self.make_branch('source-branch', format='1.9')
to_mirror = self.makePullerWorker(
source_branch.base, self.get_url('destdir'),
policy=PrearrangedStackedBranchPolicy(stack_on.base))
to_mirror.mirrorWithoutChecks()
dest = bzrlib.branch.Branch.open(self.get_url('destdir'))
self.assertRaises(NotStacked, dest.get_stacked_on_url)
def testCanMirrorWithIncompatibleRepos(self):
# If the destination branch cannot be opened because its repository is
# not compatible with that of the branch it is stacked on, we delete
# the destination branch and start again.
self.get_transport('dest').ensure_base()
# Make a branch to stack on in 1.6 format
self.make_branch('dest/stacked-on', format='1.6')
# Make a branch stacked on this.
stacked_branch = self.make_branch('dest/stacked', format='1.6')
stacked_branch.set_stacked_on_url(self.get_url('dest/stacked-on'))
# Delete the stacked-on branch and replace it with a 2a format branch.
self.get_transport('dest').delete_tree('stacked-on')
self.make_branch('dest/stacked-on', format='2a')
# Check our setup: trying to open the stacked branch raises
# IncompatibleRepositories.
self.assertRaises(
IncompatibleRepositories,
bzrlib.branch.Branch.open, 'dest/stacked')
source_branch = self.make_branch(
'source-branch', format='2a')
to_mirror = self.makePullerWorker(
source_branch.base, self.get_url('dest/stacked'))
# The branch can be mirrored without errors and the destionation
# location is upgraded to match the source format.
to_mirror.mirrorWithoutChecks()
mirrored_branch = bzrlib.branch.Branch.open(to_mirror.dest)
self.assertEqual(
source_branch.repository._format,
mirrored_branch.repository._format)
def getStackedOnUrlFromNetStringOutput(self, netstring_output):
netstrings = get_netstrings(netstring_output)
branchChanged_index = netstrings.index('branchChanged')
return netstrings[branchChanged_index + 2]
def testSendsStackedInfo(self):
# When the puller worker stacks a branch, it reports the stacked on
# URL to the master.
base_branch = self.make_branch('base_branch', format='1.9')
stacked_branch = self.make_branch('stacked-branch', format='1.9')
protocol_output = StringIO()
to_mirror = self.makePullerWorker(
stacked_branch.base, self.get_url('destdir'),
protocol=PullerWorkerProtocol(protocol_output),
policy=PrearrangedStackedBranchPolicy(base_branch.base))
to_mirror.mirror()
stacked_on_url = self.getStackedOnUrlFromNetStringOutput(
protocol_output.getvalue())
self.assertEqual(base_branch.base, stacked_on_url)
def testDoesntSendStackedInfoUnstackableFormat(self):
# Mirroring an unstackable branch sends '' as the stacked-on location
# to the master.
source_branch = self.make_branch('source-branch', format='pack-0.92')
protocol_output = StringIO()
to_mirror = self.makePullerWorker(
source_branch.base, self.get_url('destdir'),
protocol=PullerWorkerProtocol(protocol_output))
to_mirror.mirror()
stacked_on_url = self.getStackedOnUrlFromNetStringOutput(
protocol_output.getvalue())
self.assertEqual('', stacked_on_url)
def testDoesntSendStackedInfoNotStacked(self):
# Mirroring a non-stacked branch sends '' as the stacked-on location
# to the master.
source_branch = self.make_branch('source-branch', format='1.9')
protocol_output = StringIO()
to_mirror = self.makePullerWorker(
source_branch.base, self.get_url('destdir'),
protocol=PullerWorkerProtocol(protocol_output))
to_mirror.mirror()
stacked_on_url = self.getStackedOnUrlFromNetStringOutput(
protocol_output.getvalue())
self.assertEqual('', stacked_on_url)
class TestReferenceOpener(TestCaseWithTransport):
"""Feature tests for safe opening of branch references."""
def setUp(self):
super(TestReferenceOpener, self).setUp()
SafeBranchOpener.install_hook()
def createBranchReference(self, url):
"""Create a pure branch reference that points to the specified URL.
:param url: target of the branch reference.
:return: file url to the created pure branch reference.
"""
# XXX DavidAllouche 2007-09-12 bug=139109:
# We do this manually because the bzrlib API does not support creating
# a branch reference without opening it.
t = get_transport(self.get_url('.'))
t.mkdir('reference')
a_bzrdir = BzrDir.create(self.get_url('reference'))
branch_reference_format = BranchReferenceFormat()
branch_transport = a_bzrdir.get_branch_transport(
branch_reference_format)
branch_transport.put_bytes('location', url)
branch_transport.put_bytes(
'format', branch_reference_format.get_format_string())
return a_bzrdir.root_transport.base
def testCreateBranchReference(self):
# createBranchReference creates a branch reference and returns a URL
# that points to that branch reference.
# First create a branch and a reference to that branch.
target_branch = self.make_branch('repo')
reference_url = self.createBranchReference(target_branch.base)
# References are transparent, so we can't test much about them. The
# least we can do is confirm that the reference URL isn't the branch
# URL.
self.assertNotEqual(reference_url, target_branch.base)
# Open the branch reference and check that the result is indeed the
# branch we wanted it to point at.
opened_branch = bzrlib.branch.Branch.open(reference_url)
self.assertEqual(opened_branch.base, target_branch.base)
def testFollowReferenceValue(self):
# SafeBranchOpener.followReference gives the reference value for
# a branch reference.
opener = SafeBranchOpener(BranchOpenPolicy())
reference_value = 'http://example.com/branch'
reference_url = self.createBranchReference(reference_value)
self.assertEqual(
reference_value, opener.followReference(reference_url))
def testFollowReferenceNone(self):
# SafeBranchOpener.followReference gives None for a normal branch.
self.make_branch('repo')
branch_url = self.get_url('repo')
opener = SafeBranchOpener(BranchOpenPolicy())
self.assertIs(None, opener.followReference(branch_url))
class TestMirroredBranchPolicy(TestCase):
"""Tests specific to `MirroredBranchPolicy`."""
def setUp(self):
super(TestMirroredBranchPolicy, self).setUp()
self.factory = LaunchpadObjectFactory()
def testNoFileURL(self):
policy = MirroredBranchPolicy()
self.assertRaises(
BadUrlScheme, policy.checkOneURL,
self.factory.getUniqueURL(scheme='file'))
def testNoUnknownSchemeURLs(self):
policy = MirroredBranchPolicy()
self.assertRaises(
BadUrlScheme, policy.checkOneURL,
self.factory.getUniqueURL(scheme='decorator+scheme'))
def testNoSSHURL(self):
policy = MirroredBranchPolicy()
self.assertRaises(
BadUrlSsh, policy.checkOneURL,
self.factory.getUniqueURL(scheme='bzr+ssh'))
def testNoSftpURL(self):
policy = MirroredBranchPolicy()
self.assertRaises(
BadUrlSsh, policy.checkOneURL,
self.factory.getUniqueURL(scheme='sftp'))
def testNoLaunchpadURL(self):
policy = MirroredBranchPolicy()
self.assertRaises(
BadUrlLaunchpad, policy.checkOneURL,
self.factory.getUniqueURL(host='bazaar.launchpad.dev'))
def testNoHTTPSLaunchpadURL(self):
policy = MirroredBranchPolicy()
self.assertRaises(
BadUrlLaunchpad, policy.checkOneURL,
self.factory.getUniqueURL(
host='bazaar.launchpad.dev', scheme='https'))
def testNoOtherHostLaunchpadURL(self):
policy = MirroredBranchPolicy()
self.assertRaises(
BadUrlLaunchpad, policy.checkOneURL,
self.factory.getUniqueURL(host='code.launchpad.dev'))
def testLocalhost(self):
self.pushConfig(
'codehosting', blacklisted_hostnames='localhost,127.0.0.1')
policy = MirroredBranchPolicy()
localhost_url = self.factory.getUniqueURL(host='localhost')
self.assertRaises(BadUrl, policy.checkOneURL, localhost_url)
localhost_url = self.factory.getUniqueURL(host='127.0.0.1')
self.assertRaises(BadUrl, policy.checkOneURL, localhost_url)
def test_no_stacked_on_url(self):
# By default, a MirroredBranchPolicy does not stack branches.
policy = MirroredBranchPolicy()
# This implementation of the method doesn't actually care about the
# arguments.
self.assertIs(
None, policy.getStackedOnURLForDestinationBranch(None, None))
def test_specified_stacked_on_url(self):
# If a default stacked-on URL is specified, then the
# MirroredBranchPolicy will tell branches to be stacked on that.
stacked_on_url = '/foo'
policy = MirroredBranchPolicy(stacked_on_url)
destination_url = 'http://example.com/bar'
self.assertEqual(
'/foo',
policy.getStackedOnURLForDestinationBranch(None, destination_url))
def test_stacked_on_url_for_mirrored_branch(self):
# If the default stacked-on URL is also the URL for the branch being
# mirrored, then the stacked-on URL for destination branch is None.
stacked_on_url = '/foo'
policy = MirroredBranchPolicy(stacked_on_url)
destination_url = 'http://example.com/foo'
self.assertIs(
None,
policy.getStackedOnURLForDestinationBranch(None, destination_url))
class TestWorkerProtocol(TestCaseInTempDir, PullerWorkerMixin):
"""Tests for the client-side implementation of the protocol used to
communicate to the master process.
"""
def setUp(self):
TestCaseInTempDir.setUp(self)
self.output = StringIO()
self.protocol = PullerWorkerProtocol(self.output)
self.factory = ObjectFactory()
def assertSentNetstrings(self, expected_netstrings):
"""Assert that the protocol sent the given netstrings (in order)."""
observed_netstrings = get_netstrings(self.output.getvalue())
self.assertEqual(expected_netstrings, observed_netstrings)
def resetBuffers(self):
# Empty the test output and error buffers.
self.output.truncate(0)
self.assertEqual('', self.output.getvalue())
def test_nothingSentOnConstruction(self):
# The protocol sends nothing until it receives an event.
self.branch_to_mirror = self.makePullerWorker(protocol=self.protocol)
self.assertSentNetstrings([])
def test_startMirror(self):
# Calling startMirroring sends 'startMirroring' as a netstring.
self.protocol.startMirroring()
self.assertSentNetstrings(['startMirroring', '0'])
def test_branchChanged(self):
# Calling 'branchChanged' sends the arguments.
arbitrary_args = [self.factory.getUniqueString() for x in range(6)]
self.protocol.startMirroring()
self.resetBuffers()
self.protocol.branchChanged(*arbitrary_args)
self.assertSentNetstrings(['branchChanged', '6'] + arbitrary_args)
def test_mirrorFailed(self):
# Calling 'mirrorFailed' sends the error message.
self.protocol.startMirroring()
self.resetBuffers()
self.protocol.mirrorFailed('Error Message', 'OOPS')
self.assertSentNetstrings(
['mirrorFailed', '2', 'Error Message', 'OOPS'])
def test_progressMade(self):
# Calling 'progressMade' sends an arbitrary string indicating
# progress.
self.protocol.progressMade('test')
self.assertSentNetstrings(['progressMade', '0'])
def test_log(self):
# Calling 'log' sends 'log' as a netstring and its arguments, after
# formatting as a string.
self.protocol.log('logged %s', 'message')
self.assertSentNetstrings(['log', '1', 'logged message'])
class TestWorkerProgressReporting(TestCaseWithTransport):
"""Tests for the progress reporting mechanism."""
class StubProtocol:
"""A stub for PullerWorkerProtocol that just defines progressMade."""
def __init__(self):
self.calls = []
def progressMade(self, type):
self.calls.append(type)
def setUp(self):
super(TestWorkerProgressReporting, self).setUp()
SafeBranchOpener.install_hook()
self.saved_factory = bzrlib.ui.ui_factory
self.disable_directory_isolation()
self.addCleanup(setattr, bzrlib.ui, 'ui_factory', self.saved_factory)
def getHttpServerForCwd(self):
"""Get an `HttpServer` instance that serves from '.'."""
server = FixedHttpServer()
server.start_server()
self.addCleanup(server.stop_server)
# The gc.collect allows the threads behind any HTTP requests to exit.
self.addCleanup(gc.collect)
return server
def test_simple(self):
# Even the simplest of pulls should call progressMade at least once.
b1 = self.make_branch('some-branch')
b2_tree = self.make_branch_and_tree('some-other-branch')
b2_tree.commit('rev1', allow_pointless=True)
p = self.StubProtocol()
install_worker_ui_factory(p)
b1.pull(b2_tree.branch)
self.assertPositive(len(p.calls))
def test_network(self):
# Even the simplest of pulls over a transport that reports activity
# (here, HTTP) should call progressMade with a type of 'activity'.
b1 = self.make_branch('some-branch')
b2_tree = self.make_branch_and_tree('some-other-branch')
b2_tree.commit('rev1', allow_pointless=True)
http_server = self.getHttpServerForCwd()
p = self.StubProtocol()
install_worker_ui_factory(p)
b2_http = bzrlib.branch.Branch.open(
http_server.get_url() + 'some-other-branch')
b1.pull(b2_http)
self.assertSubset([WORKER_ACTIVITY_NETWORK], p.calls)
|
2014c2g1/c2g1 | refs/heads/master | exts/wsgi/static/Brython2.1.0-20140419-113919/Lib/xml/etree/cElementTree.py | 876 | # Deprecated alias for xml.etree.ElementTree
from xml.etree.ElementTree import *
|
SusanJL/iris | refs/heads/master | docs/iris/example_tests/test_global_map.py | 11 | # (C) British Crown Copyright 2010 - 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import Iris tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
from .extest_util import (add_examples_to_path,
show_replaced_by_check_graphic,
fail_any_deprecation_warnings)
class TestGlobalMap(tests.GraphicsTest):
"""Test the global_map example code."""
def test_global_map(self):
with fail_any_deprecation_warnings():
with add_examples_to_path():
import global_map
with show_replaced_by_check_graphic(self):
global_map.main()
if __name__ == '__main__':
tests.main()
|
rouge8/pex | refs/heads/master | tests/test_crawler.py | 2 | # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
from twitter.common.contextutil import temporary_dir
from pex.crawler import Crawler, PageParser
from pex.http import Context
from pex.link import Link
try:
from unittest import mock
except ImportError:
import mock
def lpp(page):
links = PageParser.links(page)
rels = PageParser.rel_links(page)
return list(links), list(rels)
def test_page_parser_empty():
links, rels = lpp("")
assert links == []
assert rels == []
def test_page_parser_basic():
for target in ('href', 'href =', 'href =""'):
assert lpp(target.lower()) == ([], [])
assert lpp(target.upper()) == ([], [])
for target in ('a href=', 'a href=""'):
assert lpp(target.lower()) == ([''], [])
assert lpp(target.upper()) == ([''], [])
assert lpp('a href=11') == (['11'], [])
assert lpp('a href=12') == (['12'], [])
for href in ('pooping', '{};a[32[32{#@'):
for start, end in (('', ''), ('"', '"'), ("'", "'")):
target = '%s%s%s' % (start, href, end)
assert lpp('<a href=%s>' % target) == ([href], [])
assert lpp("<a href=%s>" % target) == ([href], [])
assert lpp('anything <a href=%s> anything' % target) == ([href], [])
assert lpp("<a href=%s> <a href='stuff'>" % target) == ([href, 'stuff'], [])
assert lpp("<a href='stuff'> <a href=%s>" % target) == (['stuff', href], [])
def test_page_parser_escaped_html():
url = 'url?param1=val¶m2=val2'
link = 'a href="%s"' % url.replace('&', '&')
assert lpp(link) == ([url], [])
def test_page_parser_rels():
VALID_RELS = tuple(PageParser.REL_TYPES)
for rel in VALID_RELS + ('', ' ', 'blah'):
for start, end in (('', ''), ('"', '"'), ("'", "'")):
target = 'rel=%s%s%s' % (start, rel, end)
links, rels = lpp("<a href='things' %s> <a href='stuff'>" % target)
assert links == ['things', 'stuff']
if rel in VALID_RELS:
assert rels == ['things']
else:
assert rels == []
links, rels = lpp("<a href='stuff' %s> <a href='things'>" % target)
assert links == ['stuff', 'things']
if rel in VALID_RELS:
assert rels == ['stuff']
else:
assert rels == []
def test_page_parser_skips_data_rels():
for ext in PageParser.REL_SKIP_EXTENSIONS:
things = 'things%s' % ext
assert lpp("<a href='%s' rel=download>" % things) == ([things], [])
for ext in ('.html', '.xml', '', '.txt', '.tar.gz.txt'):
things = 'things%s' % ext
assert lpp("<a href='%s' rel=download>" % things) == ([things], [things])
def test_crawler_local():
FL = ('a.txt', 'b.txt', 'c.txt')
with temporary_dir() as td:
for fn in FL:
with open(os.path.join(td, fn), 'w'):
pass
for dn in (1, 2):
os.mkdir(os.path.join(td, 'dir%d' % dn))
for fn in FL:
with open(os.path.join(td, 'dir%d' % dn, fn), 'w'):
pass
# basic file / dir rel splitting
links, rels = Crawler.crawl_local(Link.wrap(td))
assert set(links) == set(Link.wrap(os.path.join(td, fn)) for fn in FL)
assert set(rels) == set(Link.wrap(os.path.join(td, 'dir%d' % n)) for n in (1, 2))
# recursive crawling, single vs multi-threaded
for caching in (False, True):
for threads in (1, 2, 3):
links = Crawler(threads=threads).crawl([td], follow_links=True)
expect_links = (set(Link.wrap(os.path.join(td, fn)) for fn in FL) |
set(Link.wrap(os.path.join(td, 'dir1', fn)) for fn in FL) |
set(Link.wrap(os.path.join(td, 'dir2', fn)) for fn in FL))
assert set(links) == expect_links
def test_crawler_unknown_scheme():
# skips unknown url schemes
Crawler().crawl('ftp://ftp.cdrom.com') == (set(), set())
MOCK_INDEX_TMPL = '''
<h1>Index of /home/third_party/python</h1>
<table>
<tr>
<td valign="top"><img src="/icons/back.gif" alt="[DIR]"></td>
<td> </td>
<td align="right"> - </td>
<td> </td>
</tr>
%s
</table>
'''
MOCK_INDEX_A = MOCK_INDEX_TMPL % '''
<tr>
<td valign="top"><img src="/icons/compressed.gif" alt="[ ]"></td>
<td><a href="3to2-1.0.tar.gz">3to2-1.0.tar.gz</a></td>
<td align="right">16-Apr-2015 23:18 </td>
<td align="right"> 45K</td>
<td>GZIP compressed docume></td>
</tr>
'''
MOCK_INDEX_B = MOCK_INDEX_TMPL % '''
<tr>
<td valign="top"><img src="/icons/compressed.gif" alt="[ ]"></td>
<td>
<a href="APScheduler-2.1.0.tar.gz">APScheduler-2.1.0.tar.gz</a>
</td>
<td align="right">16-Apr-2015 23:18 </td>
<td align="right"> 41K</td>
<td>GZIP compressed docume></td>
</tr>
'''
def test_crawler_remote():
Crawler.reset_cache()
mock_context = mock.create_autospec(Context, spec_set=True)
mock_context.content.side_effect = [MOCK_INDEX_A, MOCK_INDEX_B, Exception('shouldnt get here')]
expected_output = set([Link('http://url1.test.com/3to2-1.0.tar.gz'),
Link('http://url2.test.com/APScheduler-2.1.0.tar.gz')])
c = Crawler(mock_context)
test_links = [Link('http://url1.test.com'), Link('http://url2.test.com')]
assert c.crawl(test_links) == expected_output
# Test memoization of Crawler.crawl().
assert c.crawl(test_links) == expected_output
# TODO(wickman): test page decoding via mock
|
varunkamra/kuma | refs/heads/master | vendor/packages/logilab/astng/setup.py | 24 | #!/usr/bin/env python
# pylint: disable=W0404,W0622,W0704,W0613
# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-astng.
#
# logilab-astng is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-astng is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-astng. If not, see <http://www.gnu.org/licenses/>.
"""Generic Setup script, takes package info from __pkginfo__.py file.
"""
__docformat__ = "restructuredtext en"
import os
import sys
import shutil
from os.path import isdir, exists, join
try:
if os.environ.get('NO_SETUPTOOLS'):
raise ImportError()
from setuptools import setup
from setuptools.command import install_lib
USE_SETUPTOOLS = 1
except ImportError:
from distutils.core import setup
from distutils.command import install_lib
USE_SETUPTOOLS = 0
try:
# python3
from distutils.command.build_py import build_py_2to3 as build_py
except ImportError:
# python2.x
from distutils.command.build_py import build_py
sys.modules.pop('__pkginfo__', None)
# import optional features
__pkginfo__ = __import__("__pkginfo__")
# import required features
from __pkginfo__ import modname, version, license, description, \
web, author, author_email
distname = getattr(__pkginfo__, 'distname', modname)
scripts = getattr(__pkginfo__, 'scripts', [])
data_files = getattr(__pkginfo__, 'data_files', None)
subpackage_of = getattr(__pkginfo__, 'subpackage_of', None)
include_dirs = getattr(__pkginfo__, 'include_dirs', [])
ext_modules = getattr(__pkginfo__, 'ext_modules', None)
install_requires = getattr(__pkginfo__, 'install_requires', None)
dependency_links = getattr(__pkginfo__, 'dependency_links', [])
classifiers = getattr(__pkginfo__, 'classifiers', [])
STD_BLACKLIST = ('CVS', '.svn', '.hg', 'debian', 'dist', 'build')
IGNORED_EXTENSIONS = ('.pyc', '.pyo', '.elc', '~')
if exists('README'):
long_description = open('README').read()
else:
long_description = ''
def ensure_scripts(linux_scripts):
"""Creates the proper script names required for each platform
(taken from 4Suite)
"""
from distutils import util
if util.get_platform()[:3] == 'win':
scripts_ = [script + '.bat' for script in linux_scripts]
else:
scripts_ = linux_scripts
return scripts_
def get_packages(directory, prefix):
"""return a list of subpackages for the given directory"""
result = []
for package in os.listdir(directory):
absfile = join(directory, package)
if isdir(absfile):
if exists(join(absfile, '__init__.py')) or \
package in ('test', 'tests'):
if prefix:
result.append('%s.%s' % (prefix, package))
else:
result.append(package)
result += get_packages(absfile, result[-1])
return result
EMPTY_FILE = '''"""generated file, don't modify or your data will be lost"""
try:
__import__('pkg_resources').declare_namespace(__name__)
except ImportError:
pass
'''
class MyInstallLib(install_lib.install_lib):
"""extend install_lib command to handle package __init__.py if necessary
"""
def run(self):
"""overridden from install_lib class"""
install_lib.install_lib.run(self)
# create Products.__init__.py if needed
if subpackage_of:
product_init = join(self.install_dir, subpackage_of, '__init__.py')
if not exists(product_init):
self.announce('creating %s' % product_init)
stream = open(product_init, 'w')
stream.write(EMPTY_FILE)
stream.close()
class MyBuildPy(build_py):
"""extend build_by command to handle include_dirs variable if necessary
"""
def run(self):
"""overridden from install_lib class"""
build_py.run(self)
# manually install included directories if any
if include_dirs:
if subpackage_of:
base = join(subpackage_of, modname)
else:
base = modname
basedir = os.path.join(self.build_lib, base)
for directory in include_dirs:
dest = join(basedir, directory)
shutil.rmtree(dest, ignore_errors=True)
shutil.copytree(directory, dest)
if sys.version_info >= (3, 0):
# process manually python file in include_dirs (test data)
from subprocess import check_call
print('running 2to3 on', dest) # parens are NOT optional here for py3k compat
check_call(['2to3', '-wn', dest])
def install(**kwargs):
"""setup entry point"""
if USE_SETUPTOOLS:
if '--force-manifest' in sys.argv:
sys.argv.remove('--force-manifest')
# install-layout option was introduced in 2.5.3-1~exp1
elif sys.version_info < (2, 5, 4) and '--install-layout=deb' in sys.argv:
sys.argv.remove('--install-layout=deb')
if subpackage_of:
package = subpackage_of + '.' + modname
kwargs['package_dir'] = {package : '.'}
packages = [package] + get_packages(os.getcwd(), package)
if USE_SETUPTOOLS:
kwargs['namespace_packages'] = [subpackage_of]
else:
kwargs['package_dir'] = {modname : '.'}
packages = [modname] + get_packages(os.getcwd(), modname)
if USE_SETUPTOOLS and install_requires:
kwargs['install_requires'] = install_requires
kwargs['dependency_links'] = dependency_links
kwargs['packages'] = packages
return setup(name = distname,
version = version,
license = license,
description = description,
long_description = long_description,
classifiers = classifiers,
author = author,
author_email = author_email,
url = web,
scripts = ensure_scripts(scripts),
data_files = data_files,
ext_modules = ext_modules,
cmdclass = {'install_lib': MyInstallLib,
'build_py': MyBuildPy},
**kwargs
)
if __name__ == '__main__' :
install()
|
MebiusHKU/flask-web | refs/heads/master | flask/lib/python2.7/site-packages/whoosh/idsets.py | 52 | """
An implementation of an object that acts like a collection of on/off bits.
"""
import operator
from array import array
from bisect import bisect_left, bisect_right, insort
from whoosh.compat import integer_types, izip, izip_longest, next, xrange
from whoosh.util.numeric import bytes_for_bits
# Number of '1' bits in each byte (0-255)
_1SPERBYTE = array('B', [0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2,
2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4,
3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 1, 2, 2, 3, 2, 3,
3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5,
5, 6, 5, 6, 6, 7, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4,
3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5,
5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 2, 3, 3, 4, 3, 4, 4, 5,
3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 3, 4,
4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7,
6, 7, 7, 8])
class DocIdSet(object):
"""Base class for a set of positive integers, implementing a subset of the
built-in ``set`` type's interface with extra docid-related methods.
This is a superclass for alternative set implementations to the built-in
``set`` which are more memory-efficient and specialized toward storing
sorted lists of positive integers, though they will inevitably be slower
than ``set`` for most operations since they're pure Python.
"""
def __eq__(self, other):
for a, b in izip(self, other):
if a != b:
return False
return True
def __neq__(self, other):
return not self.__eq__(other)
def __len__(self):
raise NotImplementedError
def __iter__(self):
raise NotImplementedError
def __contains__(self, i):
raise NotImplementedError
def __or__(self, other):
return self.union(other)
def __and__(self, other):
return self.intersection(other)
def __sub__(self, other):
return self.difference(other)
def copy(self):
raise NotImplementedError
def add(self, n):
raise NotImplementedError
def discard(self, n):
raise NotImplementedError
def update(self, other):
add = self.add
for i in other:
add(i)
def intersection_update(self, other):
for n in self:
if n not in other:
self.discard(n)
def difference_update(self, other):
for n in other:
self.discard(n)
def invert_update(self, size):
"""Updates the set in-place to contain numbers in the range
``[0 - size)`` except numbers that are in this set.
"""
for i in xrange(size):
if i in self:
self.discard(i)
else:
self.add(i)
def intersection(self, other):
c = self.copy()
c.intersection_update(other)
return c
def union(self, other):
c = self.copy()
c.update(other)
return c
def difference(self, other):
c = self.copy()
c.difference_update(other)
return c
def invert(self, size):
c = self.copy()
c.invert_update(size)
return c
def isdisjoint(self, other):
a = self
b = other
if len(other) < len(self):
a, b = other, self
for num in a:
if num in b:
return False
return True
def before(self, i):
"""Returns the previous integer in the set before ``i``, or None.
"""
raise NotImplementedError
def after(self, i):
"""Returns the next integer in the set after ``i``, or None.
"""
raise NotImplementedError
def first(self):
"""Returns the first (lowest) integer in the set.
"""
raise NotImplementedError
def last(self):
"""Returns the last (highest) integer in the set.
"""
raise NotImplementedError
class BaseBitSet(DocIdSet):
# Methods to override
def byte_count(self):
raise NotImplementedError
def _get_byte(self, i):
raise NotImplementedError
def _iter_bytes(self):
raise NotImplementedError
# Base implementations
def __len__(self):
return sum(_1SPERBYTE[b] for b in self._iter_bytes())
def __iter__(self):
base = 0
for byte in self._iter_bytes():
for i in xrange(8):
if byte & (1 << i):
yield base + i
base += 8
def __nonzero__(self):
return any(n for n in self._iter_bytes())
__bool__ = __nonzero__
def __contains__(self, i):
bucket = i // 8
if bucket >= self.byte_count():
return False
return bool(self._get_byte(bucket) & (1 << (i & 7)))
def first(self):
return self.after(-1)
def last(self):
return self.before(self.byte_count() * 8 + 1)
def before(self, i):
_get_byte = self._get_byte
size = self.byte_count() * 8
if i <= 0:
return None
elif i >= size:
i = size - 1
else:
i -= 1
bucket = i // 8
while i >= 0:
byte = _get_byte(bucket)
if not byte:
bucket -= 1
i = bucket * 8 + 7
continue
if byte & (1 << (i & 7)):
return i
if i % 8 == 0:
bucket -= 1
i -= 1
return None
def after(self, i):
_get_byte = self._get_byte
size = self.byte_count() * 8
if i >= size:
return None
elif i < 0:
i = 0
else:
i += 1
bucket = i // 8
while i < size:
byte = _get_byte(bucket)
if not byte:
bucket += 1
i = bucket * 8
continue
if byte & (1 << (i & 7)):
return i
i += 1
if i % 8 == 0:
bucket += 1
return None
class OnDiskBitSet(BaseBitSet):
"""A DocIdSet backed by an array of bits on disk.
>>> st = RamStorage()
>>> f = st.create_file("test.bin")
>>> bs = BitSet([1, 10, 15, 7, 2])
>>> bytecount = bs.to_disk(f)
>>> f.close()
>>> # ...
>>> f = st.open_file("test.bin")
>>> odbs = OnDiskBitSet(f, bytecount)
>>> list(odbs)
[1, 2, 7, 10, 15]
"""
def __init__(self, dbfile, basepos, bytecount):
"""
:param dbfile: a :class:`~whoosh.filedb.structfile.StructFile` object
to read from.
:param basepos: the base position of the bytes in the given file.
:param bytecount: the number of bytes to use for the bit array.
"""
self._dbfile = dbfile
self._basepos = basepos
self._bytecount = bytecount
def __repr__(self):
return "%s(%s, %d, %d)" % (self.__class__.__name__, self.dbfile,
self._basepos, self.bytecount)
def byte_count(self):
return self._bytecount
def _get_byte(self, n):
return self._dbfile.get_byte(self._basepos + n)
def _iter_bytes(self):
dbfile = self._dbfile
dbfile.seek(self._basepos)
for _ in xrange(self._bytecount):
yield dbfile.read_byte()
class BitSet(BaseBitSet):
"""A DocIdSet backed by an array of bits. This can also be useful as a bit
array (e.g. for a Bloom filter). It is much more memory efficient than a
large built-in set of integers, but wastes memory for sparse sets.
"""
def __init__(self, source=None, size=0):
"""
:param maxsize: the maximum size of the bit array.
:param source: an iterable of positive integers to add to this set.
:param bits: an array of unsigned bytes ("B") to use as the underlying
bit array. This is used by some of the object's methods.
"""
# If the source is a list, tuple, or set, we can guess the size
if not size and isinstance(source, (list, tuple, set, frozenset)):
size = max(source)
bytecount = bytes_for_bits(size)
self.bits = array("B", (0 for _ in xrange(bytecount)))
if source:
add = self.add
for num in source:
add(num)
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, list(self))
def byte_count(self):
return len(self.bits)
def _get_byte(self, n):
return self.bits[n]
def _iter_bytes(self):
return iter(self.bits)
def _trim(self):
bits = self.bits
last = len(self.bits) - 1
while last >= 0 and not bits[last]:
last -= 1
del self.bits[last + 1:]
def _resize(self, tosize):
curlength = len(self.bits)
newlength = bytes_for_bits(tosize)
if newlength > curlength:
self.bits.extend((0,) * (newlength - curlength))
elif newlength < curlength:
del self.bits[newlength + 1:]
def _zero_extra_bits(self, size):
bits = self.bits
spill = size - ((len(bits) - 1) * 8)
if spill:
mask = 2 ** spill - 1
bits[-1] = bits[-1] & mask
def _logic(self, obj, op, other):
objbits = obj.bits
for i, (byte1, byte2) in enumerate(izip_longest(objbits, other.bits,
fillvalue=0)):
value = op(byte1, byte2) & 0xFF
if i >= len(objbits):
objbits.append(value)
else:
objbits[i] = value
obj._trim()
return obj
def to_disk(self, dbfile):
dbfile.write_array(self.bits)
return len(self.bits)
@classmethod
def from_bytes(cls, bs):
b = cls()
b.bits = array("B", bs)
return b
@classmethod
def from_disk(cls, dbfile, bytecount):
return cls.from_bytes(dbfile.read_array("B", bytecount))
def copy(self):
b = self.__class__()
b.bits = array("B", iter(self.bits))
return b
def clear(self):
for i in xrange(len(self.bits)):
self.bits[i] = 0
def add(self, i):
bucket = i >> 3
if bucket >= len(self.bits):
self._resize(i + 1)
self.bits[bucket] |= 1 << (i & 7)
def discard(self, i):
bucket = i >> 3
self.bits[bucket] &= ~(1 << (i & 7))
def _resize_to_other(self, other):
if isinstance(other, (list, tuple, set, frozenset)):
maxbit = max(other)
if maxbit // 8 > len(self.bits):
self._resize(maxbit)
def update(self, iterable):
self._resize_to_other(iterable)
DocIdSet.update(self, iterable)
def intersection_update(self, other):
if isinstance(other, BitSet):
return self._logic(self, operator.__and__, other)
discard = self.discard
for n in self:
if n not in other:
discard(n)
def difference_update(self, other):
if isinstance(other, BitSet):
return self._logic(self, lambda x, y: x & ~y, other)
discard = self.discard
for n in other:
discard(n)
def invert_update(self, size):
bits = self.bits
for i in xrange(len(bits)):
bits[i] = ~bits[i] & 0xFF
self._zero_extra_bits(size)
def union(self, other):
if isinstance(other, BitSet):
return self._logic(self.copy(), operator.__or__, other)
b = self.copy()
b.update(other)
return b
def intersection(self, other):
if isinstance(other, BitSet):
return self._logic(self.copy(), operator.__and__, other)
return BitSet(source=(n for n in self if n in other))
def difference(self, other):
if isinstance(other, BitSet):
return self._logic(self.copy(), lambda x, y: x & ~y, other)
return BitSet(source=(n for n in self if n not in other))
class SortedIntSet(DocIdSet):
"""A DocIdSet backed by a sorted array of integers.
"""
def __init__(self, source=None, typecode="I"):
if source:
self.data = array(typecode, sorted(source))
else:
self.data = array(typecode)
self.typecode = typecode
def copy(self):
sis = SortedIntSet()
sis.data = array(self.typecode, self.data)
return sis
def size(self):
return len(self.data) * self.data.itemsize
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self.data)
def __len__(self):
return len(self.data)
def __iter__(self):
return iter(self.data)
def __nonzero__(self):
return bool(self.data)
__bool__ = __nonzero__
def __contains__(self, i):
data = self.data
if not data or i < data[0] or i > data[-1]:
return False
pos = bisect_left(data, i)
if pos == len(data):
return False
return data[pos] == i
def add(self, i):
data = self.data
if not data or i > data[-1]:
data.append(i)
else:
mn = data[0]
mx = data[-1]
if i == mn or i == mx:
return
elif i > mx:
data.append(i)
elif i < mn:
data.insert(0, i)
else:
pos = bisect_left(data, i)
if data[pos] != i:
data.insert(pos, i)
def discard(self, i):
data = self.data
pos = bisect_left(data, i)
if data[pos] == i:
data.pop(pos)
def clear(self):
self.data = array(self.typecode)
def intersection_update(self, other):
self.data = array(self.typecode, (num for num in self if num in other))
def difference_update(self, other):
self.data = array(self.typecode,
(num for num in self if num not in other))
def intersection(self, other):
return SortedIntSet((num for num in self if num in other))
def difference(self, other):
return SortedIntSet((num for num in self if num not in other))
def first(self):
return self.data[0]
def last(self):
return self.data[-1]
def before(self, i):
data = self.data
pos = bisect_left(data, i)
if pos < 1:
return None
else:
return data[pos - 1]
def after(self, i):
data = self.data
if not data or i >= data[-1]:
return None
elif i < data[0]:
return data[0]
pos = bisect_right(data, i)
return data[pos]
class ReverseIdSet(DocIdSet):
"""
Wraps a DocIdSet object and reverses its semantics, so docs in the wrapped
set are not in this set, and vice-versa.
"""
def __init__(self, idset, limit):
"""
:param idset: the DocIdSet object to wrap.
:param limit: the highest possible ID plus one.
"""
self.idset = idset
self.limit = limit
def __len__(self):
return self.limit - len(self.idset)
def __contains__(self, i):
return i not in self.idset
def __iter__(self):
ids = iter(self.idset)
try:
nx = next(ids)
except StopIteration:
nx = -1
for i in xrange(self.limit):
if i == nx:
try:
nx = next(ids)
except StopIteration:
nx = -1
else:
yield i
def add(self, n):
self.idset.discard(n)
def discard(self, n):
self.idset.add(n)
def first(self):
for i in self:
return i
def last(self):
idset = self.idset
maxid = self.limit - 1
if idset.last() < maxid - 1:
return maxid
for i in xrange(maxid, -1, -1):
if i not in idset:
return i
ROARING_CUTOFF = 1 << 12
class RoaringIdSet(DocIdSet):
"""
Separates IDs into ranges of 2^16 bits, and stores each range in the most
efficient type of doc set, either a BitSet (if the range has >= 2^12 IDs)
or a sorted ID set of 16-bit shorts.
"""
cutoff = 2**12
def __init__(self, source=None):
self.idsets = []
if source:
self.update(source)
def __len__(self):
if not self.idsets:
return 0
return sum(len(idset) for idset in self.idsets)
def __contains__(self, n):
bucket = n >> 16
if bucket >= len(self.idsets):
return False
return (n - (bucket << 16)) in self.idsets[bucket]
def __iter__(self):
for i, idset in self.idsets:
floor = i << 16
for n in idset:
yield floor + n
def _find(self, n):
bucket = n >> 16
floor = n << 16
if bucket >= len(self.idsets):
self.idsets.extend([SortedIntSet() for _
in xrange(len(self.idsets), bucket + 1)])
idset = self.idsets[bucket]
return bucket, floor, idset
def add(self, n):
bucket, floor, idset = self._find(n)
oldlen = len(idset)
idset.add(n - floor)
if oldlen <= ROARING_CUTOFF < len(idset):
self.idsets[bucket] = BitSet(idset)
def discard(self, n):
bucket, floor, idset = self._find(n)
oldlen = len(idset)
idset.discard(n - floor)
if oldlen > ROARING_CUTOFF >= len(idset):
self.idsets[bucket] = SortedIntSet(idset)
class MultiIdSet(DocIdSet):
"""Wraps multiple SERIAL sub-DocIdSet objects and presents them as an
aggregated, read-only set.
"""
def __init__(self, idsets, offsets):
"""
:param idsets: a list of DocIdSet objects.
:param offsets: a list of offsets corresponding to the DocIdSet objects
in ``idsets``.
"""
assert len(idsets) == len(offsets)
self.idsets = idsets
self.offsets = offsets
def _document_set(self, n):
offsets = self.offsets
return max(bisect_left(offsets, n), len(self.offsets) - 1)
def _set_and_docnum(self, n):
setnum = self._document_set(n)
offset = self.offsets[setnum]
return self.idsets[setnum], n - offset
def __len__(self):
return sum(len(idset) for idset in self.idsets)
def __iter__(self):
for idset, offset in izip(self.idsets, self.offsets):
for docnum in idset:
yield docnum + offset
def __contains__(self, item):
idset, n = self._set_and_docnum(item)
return n in idset
|
40223141/gear | refs/heads/master | static/Brython3.1.1-20150328-091302/Lib/logging/handlers.py | 736 | # Copyright 2001-2013 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Additional handlers for the logging package for Python. The core package is
based on PEP 282 and comments thereto in comp.lang.python.
Copyright (C) 2001-2013 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging.handlers' and log away!
"""
import errno, logging, socket, os, pickle, struct, time, re
from codecs import BOM_UTF8
from stat import ST_DEV, ST_INO, ST_MTIME
import queue
try:
import threading
except ImportError: #pragma: no cover
threading = None
#
# Some constants...
#
DEFAULT_TCP_LOGGING_PORT = 9020
DEFAULT_UDP_LOGGING_PORT = 9021
DEFAULT_HTTP_LOGGING_PORT = 9022
DEFAULT_SOAP_LOGGING_PORT = 9023
SYSLOG_UDP_PORT = 514
SYSLOG_TCP_PORT = 514
_MIDNIGHT = 24 * 60 * 60 # number of seconds in a day
class BaseRotatingHandler(logging.FileHandler):
"""
Base class for handlers that rotate log files at a certain point.
Not meant to be instantiated directly. Instead, use RotatingFileHandler
or TimedRotatingFileHandler.
"""
def __init__(self, filename, mode, encoding=None, delay=False):
"""
Use the specified filename for streamed logging
"""
logging.FileHandler.__init__(self, filename, mode, encoding, delay)
self.mode = mode
self.encoding = encoding
self.namer = None
self.rotator = None
def emit(self, record):
"""
Emit a record.
Output the record to the file, catering for rollover as described
in doRollover().
"""
try:
if self.shouldRollover(record):
self.doRollover()
logging.FileHandler.emit(self, record)
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
self.handleError(record)
def rotation_filename(self, default_name):
"""
Modify the filename of a log file when rotating.
This is provided so that a custom filename can be provided.
The default implementation calls the 'namer' attribute of the
handler, if it's callable, passing the default name to
it. If the attribute isn't callable (the default is None), the name
is returned unchanged.
:param default_name: The default name for the log file.
"""
if not callable(self.namer):
result = default_name
else:
result = self.namer(default_name)
return result
def rotate(self, source, dest):
"""
When rotating, rotate the current log.
The default implementation calls the 'rotator' attribute of the
handler, if it's callable, passing the source and dest arguments to
it. If the attribute isn't callable (the default is None), the source
is simply renamed to the destination.
:param source: The source filename. This is normally the base
filename, e.g. 'test.log'
:param dest: The destination filename. This is normally
what the source is rotated to, e.g. 'test.log.1'.
"""
if not callable(self.rotator):
# Issue 18940: A file may not have been created if delay is True.
if os.path.exists(source):
os.rename(source, dest)
else:
self.rotator(source, dest)
class RotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a set of files, which switches from one file
to the next when the current file reaches a certain size.
"""
def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None, delay=False):
"""
Open the specified file and use it as the stream for logging.
By default, the file grows indefinitely. You can specify particular
values of maxBytes and backupCount to allow the file to rollover at
a predetermined size.
Rollover occurs whenever the current log file is nearly maxBytes in
length. If backupCount is >= 1, the system will successively create
new files with the same pathname as the base file, but with extensions
".1", ".2" etc. appended to it. For example, with a backupCount of 5
and a base file name of "app.log", you would get "app.log",
"app.log.1", "app.log.2", ... through to "app.log.5". The file being
written to is always "app.log" - when it gets filled up, it is closed
and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
exist, then they are renamed to "app.log.2", "app.log.3" etc.
respectively.
If maxBytes is zero, rollover never occurs.
"""
# If rotation/rollover is wanted, it doesn't make sense to use another
# mode. If for example 'w' were specified, then if there were multiple
# runs of the calling application, the logs from previous runs would be
# lost if the 'w' is respected, because the log file would be truncated
# on each run.
if maxBytes > 0:
mode = 'a'
BaseRotatingHandler.__init__(self, filename, mode, encoding, delay)
self.maxBytes = maxBytes
self.backupCount = backupCount
def doRollover(self):
"""
Do a rollover, as described in __init__().
"""
if self.stream:
self.stream.close()
self.stream = None
if self.backupCount > 0:
for i in range(self.backupCount - 1, 0, -1):
sfn = self.rotation_filename("%s.%d" % (self.baseFilename, i))
dfn = self.rotation_filename("%s.%d" % (self.baseFilename,
i + 1))
if os.path.exists(sfn):
if os.path.exists(dfn):
os.remove(dfn)
os.rename(sfn, dfn)
dfn = self.rotation_filename(self.baseFilename + ".1")
if os.path.exists(dfn):
os.remove(dfn)
self.rotate(self.baseFilename, dfn)
if not self.delay:
self.stream = self._open()
def shouldRollover(self, record):
"""
Determine if rollover should occur.
Basically, see if the supplied record would cause the file to exceed
the size limit we have.
"""
if self.stream is None: # delay was set...
self.stream = self._open()
if self.maxBytes > 0: # are we rolling over?
msg = "%s\n" % self.format(record)
self.stream.seek(0, 2) #due to non-posix-compliant Windows feature
if self.stream.tell() + len(msg) >= self.maxBytes:
return 1
return 0
class TimedRotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a file, rotating the log file at certain timed
intervals.
If backupCount is > 0, when rollover is done, no more than backupCount
files are kept - the oldest ones are deleted.
"""
def __init__(self, filename, when='h', interval=1, backupCount=0, encoding=None, delay=False, utc=False):
BaseRotatingHandler.__init__(self, filename, 'a', encoding, delay)
self.when = when.upper()
self.backupCount = backupCount
self.utc = utc
# Calculate the real rollover interval, which is just the number of
# seconds between rollovers. Also set the filename suffix used when
# a rollover occurs. Current 'when' events supported:
# S - Seconds
# M - Minutes
# H - Hours
# D - Days
# midnight - roll over at midnight
# W{0-6} - roll over on a certain day; 0 - Monday
#
# Case of the 'when' specifier is not important; lower or upper case
# will work.
if self.when == 'S':
self.interval = 1 # one second
self.suffix = "%Y-%m-%d_%H-%M-%S"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}(\.\w+)?$"
elif self.when == 'M':
self.interval = 60 # one minute
self.suffix = "%Y-%m-%d_%H-%M"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}(\.\w+)?$"
elif self.when == 'H':
self.interval = 60 * 60 # one hour
self.suffix = "%Y-%m-%d_%H"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}(\.\w+)?$"
elif self.when == 'D' or self.when == 'MIDNIGHT':
self.interval = 60 * 60 * 24 # one day
self.suffix = "%Y-%m-%d"
self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
elif self.when.startswith('W'):
self.interval = 60 * 60 * 24 * 7 # one week
if len(self.when) != 2:
raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when)
if self.when[1] < '0' or self.when[1] > '6':
raise ValueError("Invalid day specified for weekly rollover: %s" % self.when)
self.dayOfWeek = int(self.when[1])
self.suffix = "%Y-%m-%d"
self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
else:
raise ValueError("Invalid rollover interval specified: %s" % self.when)
self.extMatch = re.compile(self.extMatch, re.ASCII)
self.interval = self.interval * interval # multiply by units requested
if os.path.exists(filename):
t = os.stat(filename)[ST_MTIME]
else:
t = int(time.time())
self.rolloverAt = self.computeRollover(t)
def computeRollover(self, currentTime):
"""
Work out the rollover time based on the specified time.
"""
result = currentTime + self.interval
# If we are rolling over at midnight or weekly, then the interval is already known.
# What we need to figure out is WHEN the next interval is. In other words,
# if you are rolling over at midnight, then your base interval is 1 day,
# but you want to start that one day clock at midnight, not now. So, we
# have to fudge the rolloverAt value in order to trigger the first rollover
# at the right time. After that, the regular interval will take care of
# the rest. Note that this code doesn't care about leap seconds. :)
if self.when == 'MIDNIGHT' or self.when.startswith('W'):
# This could be done with less code, but I wanted it to be clear
if self.utc:
t = time.gmtime(currentTime)
else:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
# r is the number of seconds left between now and midnight
r = _MIDNIGHT - ((currentHour * 60 + currentMinute) * 60 +
currentSecond)
result = currentTime + r
# If we are rolling over on a certain day, add in the number of days until
# the next rollover, but offset by 1 since we just calculated the time
# until the next day starts. There are three cases:
# Case 1) The day to rollover is today; in this case, do nothing
# Case 2) The day to rollover is further in the interval (i.e., today is
# day 2 (Wednesday) and rollover is on day 6 (Sunday). Days to
# next rollover is simply 6 - 2 - 1, or 3.
# Case 3) The day to rollover is behind us in the interval (i.e., today
# is day 5 (Saturday) and rollover is on day 3 (Thursday).
# Days to rollover is 6 - 5 + 3, or 4. In this case, it's the
# number of days left in the current week (1) plus the number
# of days in the next week until the rollover day (3).
# The calculations described in 2) and 3) above need to have a day added.
# This is because the above time calculation takes us to midnight on this
# day, i.e. the start of the next day.
if self.when.startswith('W'):
day = t[6] # 0 is Monday
if day != self.dayOfWeek:
if day < self.dayOfWeek:
daysToWait = self.dayOfWeek - day
else:
daysToWait = 6 - day + self.dayOfWeek + 1
newRolloverAt = result + (daysToWait * (60 * 60 * 24))
if not self.utc:
dstNow = t[-1]
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
addend = -3600
else: # DST bows out before next rollover, so we need to add an hour
addend = 3600
newRolloverAt += addend
result = newRolloverAt
return result
def shouldRollover(self, record):
"""
Determine if rollover should occur.
record is not used, as we are just comparing times, but it is needed so
the method signatures are the same
"""
t = int(time.time())
if t >= self.rolloverAt:
return 1
return 0
def getFilesToDelete(self):
"""
Determine the files to delete when rolling over.
More specific than the earlier method, which just used glob.glob().
"""
dirName, baseName = os.path.split(self.baseFilename)
fileNames = os.listdir(dirName)
result = []
prefix = baseName + "."
plen = len(prefix)
for fileName in fileNames:
if fileName[:plen] == prefix:
suffix = fileName[plen:]
if self.extMatch.match(suffix):
result.append(os.path.join(dirName, fileName))
result.sort()
if len(result) < self.backupCount:
result = []
else:
result = result[:len(result) - self.backupCount]
return result
def doRollover(self):
"""
do a rollover; in this case, a date/time stamp is appended to the filename
when the rollover happens. However, you want the file to be named for the
start of the interval, not the current time. If there is a backup count,
then we have to get a list of matching filenames, sort them and remove
the one with the oldest suffix.
"""
if self.stream:
self.stream.close()
self.stream = None
# get the time that this sequence started at and make it a TimeTuple
currentTime = int(time.time())
dstNow = time.localtime(currentTime)[-1]
t = self.rolloverAt - self.interval
if self.utc:
timeTuple = time.gmtime(t)
else:
timeTuple = time.localtime(t)
dstThen = timeTuple[-1]
if dstNow != dstThen:
if dstNow:
addend = 3600
else:
addend = -3600
timeTuple = time.localtime(t + addend)
dfn = self.rotation_filename(self.baseFilename + "." +
time.strftime(self.suffix, timeTuple))
if os.path.exists(dfn):
os.remove(dfn)
self.rotate(self.baseFilename, dfn)
if self.backupCount > 0:
for s in self.getFilesToDelete():
os.remove(s)
if not self.delay:
self.stream = self._open()
newRolloverAt = self.computeRollover(currentTime)
while newRolloverAt <= currentTime:
newRolloverAt = newRolloverAt + self.interval
#If DST changes and midnight or weekly rollover, adjust for this.
if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
addend = -3600
else: # DST bows out before next rollover, so we need to add an hour
addend = 3600
newRolloverAt += addend
self.rolloverAt = newRolloverAt
class WatchedFileHandler(logging.FileHandler):
"""
A handler for logging to a file, which watches the file
to see if it has changed while in use. This can happen because of
usage of programs such as newsyslog and logrotate which perform
log file rotation. This handler, intended for use under Unix,
watches the file to see if it has changed since the last emit.
(A file has changed if its device or inode have changed.)
If it has changed, the old file stream is closed, and the file
opened to get a new stream.
This handler is not appropriate for use under Windows, because
under Windows open files cannot be moved or renamed - logging
opens the files with exclusive locks - and so there is no need
for such a handler. Furthermore, ST_INO is not supported under
Windows; stat always returns zero for this value.
This handler is based on a suggestion and patch by Chad J.
Schroeder.
"""
def __init__(self, filename, mode='a', encoding=None, delay=False):
logging.FileHandler.__init__(self, filename, mode, encoding, delay)
self.dev, self.ino = -1, -1
self._statstream()
def _statstream(self):
if self.stream:
sres = os.fstat(self.stream.fileno())
self.dev, self.ino = sres[ST_DEV], sres[ST_INO]
def emit(self, record):
"""
Emit a record.
First check if the underlying file has changed, and if it
has, close the old stream and reopen the file to get the
current stream.
"""
# Reduce the chance of race conditions by stat'ing by path only
# once and then fstat'ing our new fd if we opened a new log stream.
# See issue #14632: Thanks to John Mulligan for the problem report
# and patch.
try:
# stat the file by path, checking for existence
sres = os.stat(self.baseFilename)
except OSError as err:
if err.errno == errno.ENOENT:
sres = None
else:
raise
# compare file system stat with that of our stream file handle
if not sres or sres[ST_DEV] != self.dev or sres[ST_INO] != self.ino:
if self.stream is not None:
# we have an open file handle, clean it up
self.stream.flush()
self.stream.close()
# open a new file handle and get new stat info from that fd
self.stream = self._open()
self._statstream()
logging.FileHandler.emit(self, record)
class SocketHandler(logging.Handler):
"""
A handler class which writes logging records, in pickle format, to
a streaming socket. The socket is kept open across logging calls.
If the peer resets it, an attempt is made to reconnect on the next call.
The pickle which is sent is that of the LogRecord's attribute dictionary
(__dict__), so that the receiver does not need to have the logging module
installed in order to process the logging event.
To unpickle the record at the receiving end into a LogRecord, use the
makeLogRecord function.
"""
def __init__(self, host, port):
"""
Initializes the handler with a specific host address and port.
When the attribute *closeOnError* is set to True - if a socket error
occurs, the socket is silently closed and then reopened on the next
logging call.
"""
logging.Handler.__init__(self)
self.host = host
self.port = port
self.sock = None
self.closeOnError = False
self.retryTime = None
#
# Exponential backoff parameters.
#
self.retryStart = 1.0
self.retryMax = 30.0
self.retryFactor = 2.0
def makeSocket(self, timeout=1):
"""
A factory method which allows subclasses to define the precise
type of socket they want.
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if hasattr(s, 'settimeout'):
s.settimeout(timeout)
try:
s.connect((self.host, self.port))
return s
except socket.error:
s.close()
raise
def createSocket(self):
"""
Try to create a socket, using an exponential backoff with
a max retry time. Thanks to Robert Olson for the original patch
(SF #815911) which has been slightly refactored.
"""
now = time.time()
# Either retryTime is None, in which case this
# is the first time back after a disconnect, or
# we've waited long enough.
if self.retryTime is None:
attempt = True
else:
attempt = (now >= self.retryTime)
if attempt:
try:
self.sock = self.makeSocket()
self.retryTime = None # next time, no delay before trying
except socket.error:
#Creation failed, so set the retry time and return.
if self.retryTime is None:
self.retryPeriod = self.retryStart
else:
self.retryPeriod = self.retryPeriod * self.retryFactor
if self.retryPeriod > self.retryMax:
self.retryPeriod = self.retryMax
self.retryTime = now + self.retryPeriod
def send(self, s):
"""
Send a pickled string to the socket.
This function allows for partial sends which can happen when the
network is busy.
"""
if self.sock is None:
self.createSocket()
#self.sock can be None either because we haven't reached the retry
#time yet, or because we have reached the retry time and retried,
#but are still unable to connect.
if self.sock:
try:
if hasattr(self.sock, "sendall"):
self.sock.sendall(s)
else: #pragma: no cover
sentsofar = 0
left = len(s)
while left > 0:
sent = self.sock.send(s[sentsofar:])
sentsofar = sentsofar + sent
left = left - sent
except socket.error: #pragma: no cover
self.sock.close()
self.sock = None # so we can call createSocket next time
def makePickle(self, record):
"""
Pickles the record in binary format with a length prefix, and
returns it ready for transmission across the socket.
"""
ei = record.exc_info
if ei:
# just to get traceback text into record.exc_text ...
dummy = self.format(record)
# See issue #14436: If msg or args are objects, they may not be
# available on the receiving end. So we convert the msg % args
# to a string, save it as msg and zap the args.
d = dict(record.__dict__)
d['msg'] = record.getMessage()
d['args'] = None
d['exc_info'] = None
s = pickle.dumps(d, 1)
slen = struct.pack(">L", len(s))
return slen + s
def handleError(self, record):
"""
Handle an error during logging.
An error has occurred during logging. Most likely cause -
connection lost. Close the socket so that we can retry on the
next event.
"""
if self.closeOnError and self.sock:
self.sock.close()
self.sock = None #try to reconnect next time
else:
logging.Handler.handleError(self, record)
def emit(self, record):
"""
Emit a record.
Pickles the record and writes it to the socket in binary format.
If there is an error with the socket, silently drop the packet.
If there was a problem with the socket, re-establishes the
socket.
"""
try:
s = self.makePickle(record)
self.send(s)
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
self.handleError(record)
def close(self):
"""
Closes the socket.
"""
self.acquire()
try:
if self.sock:
self.sock.close()
self.sock = None
logging.Handler.close(self)
finally:
self.release()
class DatagramHandler(SocketHandler):
"""
A handler class which writes logging records, in pickle format, to
a datagram socket. The pickle which is sent is that of the LogRecord's
attribute dictionary (__dict__), so that the receiver does not need to
have the logging module installed in order to process the logging event.
To unpickle the record at the receiving end into a LogRecord, use the
makeLogRecord function.
"""
def __init__(self, host, port):
"""
Initializes the handler with a specific host address and port.
"""
SocketHandler.__init__(self, host, port)
self.closeOnError = False
def makeSocket(self):
"""
The factory method of SocketHandler is here overridden to create
a UDP socket (SOCK_DGRAM).
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return s
def send(self, s):
"""
Send a pickled string to a socket.
This function no longer allows for partial sends which can happen
when the network is busy - UDP does not guarantee delivery and
can deliver packets out of sequence.
"""
if self.sock is None:
self.createSocket()
self.sock.sendto(s, (self.host, self.port))
class SysLogHandler(logging.Handler):
"""
A handler class which sends formatted logging records to a syslog
server. Based on Sam Rushing's syslog module:
http://www.nightmare.com/squirl/python-ext/misc/syslog.py
Contributed by Nicolas Untz (after which minor refactoring changes
have been made).
"""
# from <linux/sys/syslog.h>:
# ======================================================================
# priorities/facilities are encoded into a single 32-bit quantity, where
# the bottom 3 bits are the priority (0-7) and the top 28 bits are the
# facility (0-big number). Both the priorities and the facilities map
# roughly one-to-one to strings in the syslogd(8) source code. This
# mapping is included in this file.
#
# priorities (these are ordered)
LOG_EMERG = 0 # system is unusable
LOG_ALERT = 1 # action must be taken immediately
LOG_CRIT = 2 # critical conditions
LOG_ERR = 3 # error conditions
LOG_WARNING = 4 # warning conditions
LOG_NOTICE = 5 # normal but significant condition
LOG_INFO = 6 # informational
LOG_DEBUG = 7 # debug-level messages
# facility codes
LOG_KERN = 0 # kernel messages
LOG_USER = 1 # random user-level messages
LOG_MAIL = 2 # mail system
LOG_DAEMON = 3 # system daemons
LOG_AUTH = 4 # security/authorization messages
LOG_SYSLOG = 5 # messages generated internally by syslogd
LOG_LPR = 6 # line printer subsystem
LOG_NEWS = 7 # network news subsystem
LOG_UUCP = 8 # UUCP subsystem
LOG_CRON = 9 # clock daemon
LOG_AUTHPRIV = 10 # security/authorization messages (private)
LOG_FTP = 11 # FTP daemon
# other codes through 15 reserved for system use
LOG_LOCAL0 = 16 # reserved for local use
LOG_LOCAL1 = 17 # reserved for local use
LOG_LOCAL2 = 18 # reserved for local use
LOG_LOCAL3 = 19 # reserved for local use
LOG_LOCAL4 = 20 # reserved for local use
LOG_LOCAL5 = 21 # reserved for local use
LOG_LOCAL6 = 22 # reserved for local use
LOG_LOCAL7 = 23 # reserved for local use
priority_names = {
"alert": LOG_ALERT,
"crit": LOG_CRIT,
"critical": LOG_CRIT,
"debug": LOG_DEBUG,
"emerg": LOG_EMERG,
"err": LOG_ERR,
"error": LOG_ERR, # DEPRECATED
"info": LOG_INFO,
"notice": LOG_NOTICE,
"panic": LOG_EMERG, # DEPRECATED
"warn": LOG_WARNING, # DEPRECATED
"warning": LOG_WARNING,
}
facility_names = {
"auth": LOG_AUTH,
"authpriv": LOG_AUTHPRIV,
"cron": LOG_CRON,
"daemon": LOG_DAEMON,
"ftp": LOG_FTP,
"kern": LOG_KERN,
"lpr": LOG_LPR,
"mail": LOG_MAIL,
"news": LOG_NEWS,
"security": LOG_AUTH, # DEPRECATED
"syslog": LOG_SYSLOG,
"user": LOG_USER,
"uucp": LOG_UUCP,
"local0": LOG_LOCAL0,
"local1": LOG_LOCAL1,
"local2": LOG_LOCAL2,
"local3": LOG_LOCAL3,
"local4": LOG_LOCAL4,
"local5": LOG_LOCAL5,
"local6": LOG_LOCAL6,
"local7": LOG_LOCAL7,
}
#The map below appears to be trivially lowercasing the key. However,
#there's more to it than meets the eye - in some locales, lowercasing
#gives unexpected results. See SF #1524081: in the Turkish locale,
#"INFO".lower() != "info"
priority_map = {
"DEBUG" : "debug",
"INFO" : "info",
"WARNING" : "warning",
"ERROR" : "error",
"CRITICAL" : "critical"
}
def __init__(self, address=('localhost', SYSLOG_UDP_PORT),
facility=LOG_USER, socktype=None):
"""
Initialize a handler.
If address is specified as a string, a UNIX socket is used. To log to a
local syslogd, "SysLogHandler(address="/dev/log")" can be used.
If facility is not specified, LOG_USER is used.
"""
logging.Handler.__init__(self)
self.address = address
self.facility = facility
self.socktype = socktype
if isinstance(address, str):
self.unixsocket = True
self._connect_unixsocket(address)
else:
self.unixsocket = False
if socktype is None:
socktype = socket.SOCK_DGRAM
self.socket = socket.socket(socket.AF_INET, socktype)
if socktype == socket.SOCK_STREAM:
self.socket.connect(address)
self.socktype = socktype
self.formatter = None
def _connect_unixsocket(self, address):
use_socktype = self.socktype
if use_socktype is None:
use_socktype = socket.SOCK_DGRAM
self.socket = socket.socket(socket.AF_UNIX, use_socktype)
try:
self.socket.connect(address)
# it worked, so set self.socktype to the used type
self.socktype = use_socktype
except socket.error:
self.socket.close()
if self.socktype is not None:
# user didn't specify falling back, so fail
raise
use_socktype = socket.SOCK_STREAM
self.socket = socket.socket(socket.AF_UNIX, use_socktype)
try:
self.socket.connect(address)
# it worked, so set self.socktype to the used type
self.socktype = use_socktype
except socket.error:
self.socket.close()
raise
def encodePriority(self, facility, priority):
"""
Encode the facility and priority. You can pass in strings or
integers - if strings are passed, the facility_names and
priority_names mapping dictionaries are used to convert them to
integers.
"""
if isinstance(facility, str):
facility = self.facility_names[facility]
if isinstance(priority, str):
priority = self.priority_names[priority]
return (facility << 3) | priority
def close (self):
"""
Closes the socket.
"""
self.acquire()
try:
self.socket.close()
logging.Handler.close(self)
finally:
self.release()
def mapPriority(self, levelName):
"""
Map a logging level name to a key in the priority_names map.
This is useful in two scenarios: when custom levels are being
used, and in the case where you can't do a straightforward
mapping by lowercasing the logging level name because of locale-
specific issues (see SF #1524081).
"""
return self.priority_map.get(levelName, "warning")
ident = '' # prepended to all messages
append_nul = True # some old syslog daemons expect a NUL terminator
def emit(self, record):
"""
Emit a record.
The record is formatted, and then sent to the syslog server. If
exception information is present, it is NOT sent to the server.
"""
msg = self.format(record)
if self.ident:
msg = self.ident + msg
if self.append_nul:
msg += '\000'
"""
We need to convert record level to lowercase, maybe this will
change in the future.
"""
prio = '<%d>' % self.encodePriority(self.facility,
self.mapPriority(record.levelname))
prio = prio.encode('utf-8')
# Message is a string. Convert to bytes as required by RFC 5424
msg = msg.encode('utf-8')
msg = prio + msg
try:
if self.unixsocket:
try:
self.socket.send(msg)
except socket.error:
self.socket.close()
self._connect_unixsocket(self.address)
self.socket.send(msg)
elif self.socktype == socket.SOCK_DGRAM:
self.socket.sendto(msg, self.address)
else:
self.socket.sendall(msg)
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
self.handleError(record)
class SMTPHandler(logging.Handler):
"""
A handler class which sends an SMTP email for each logging event.
"""
def __init__(self, mailhost, fromaddr, toaddrs, subject,
credentials=None, secure=None, timeout=5.0):
"""
Initialize the handler.
Initialize the instance with the from and to addresses and subject
line of the email. To specify a non-standard SMTP port, use the
(host, port) tuple format for the mailhost argument. To specify
authentication credentials, supply a (username, password) tuple
for the credentials argument. To specify the use of a secure
protocol (TLS), pass in a tuple for the secure argument. This will
only be used when authentication credentials are supplied. The tuple
will be either an empty tuple, or a single-value tuple with the name
of a keyfile, or a 2-value tuple with the names of the keyfile and
certificate file. (This tuple is passed to the `starttls` method).
A timeout in seconds can be specified for the SMTP connection (the
default is one second).
"""
logging.Handler.__init__(self)
if isinstance(mailhost, tuple):
self.mailhost, self.mailport = mailhost
else:
self.mailhost, self.mailport = mailhost, None
if isinstance(credentials, tuple):
self.username, self.password = credentials
else:
self.username = None
self.fromaddr = fromaddr
if isinstance(toaddrs, str):
toaddrs = [toaddrs]
self.toaddrs = toaddrs
self.subject = subject
self.secure = secure
self.timeout = timeout
def getSubject(self, record):
"""
Determine the subject for the email.
If you want to specify a subject line which is record-dependent,
override this method.
"""
return self.subject
def emit(self, record):
"""
Emit a record.
Format the record and send it to the specified addressees.
"""
try:
import smtplib
from email.utils import formatdate
port = self.mailport
if not port:
port = smtplib.SMTP_PORT
smtp = smtplib.SMTP(self.mailhost, port, timeout=self.timeout)
msg = self.format(record)
msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % (
self.fromaddr,
",".join(self.toaddrs),
self.getSubject(record),
formatdate(), msg)
if self.username:
if self.secure is not None:
smtp.ehlo()
smtp.starttls(*self.secure)
smtp.ehlo()
smtp.login(self.username, self.password)
smtp.sendmail(self.fromaddr, self.toaddrs, msg)
smtp.quit()
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
self.handleError(record)
class NTEventLogHandler(logging.Handler):
"""
A handler class which sends events to the NT Event Log. Adds a
registry entry for the specified application name. If no dllname is
provided, win32service.pyd (which contains some basic message
placeholders) is used. Note that use of these placeholders will make
your event logs big, as the entire message source is held in the log.
If you want slimmer logs, you have to pass in the name of your own DLL
which contains the message definitions you want to use in the event log.
"""
def __init__(self, appname, dllname=None, logtype="Application"):
logging.Handler.__init__(self)
try:
import win32evtlogutil, win32evtlog
self.appname = appname
self._welu = win32evtlogutil
if not dllname:
dllname = os.path.split(self._welu.__file__)
dllname = os.path.split(dllname[0])
dllname = os.path.join(dllname[0], r'win32service.pyd')
self.dllname = dllname
self.logtype = logtype
self._welu.AddSourceToRegistry(appname, dllname, logtype)
self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE
self.typemap = {
logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE,
logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE,
logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE,
logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE,
logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE,
}
except ImportError:
print("The Python Win32 extensions for NT (service, event "\
"logging) appear not to be available.")
self._welu = None
def getMessageID(self, record):
"""
Return the message ID for the event record. If you are using your
own messages, you could do this by having the msg passed to the
logger being an ID rather than a formatting string. Then, in here,
you could use a dictionary lookup to get the message ID. This
version returns 1, which is the base message ID in win32service.pyd.
"""
return 1
def getEventCategory(self, record):
"""
Return the event category for the record.
Override this if you want to specify your own categories. This version
returns 0.
"""
return 0
def getEventType(self, record):
"""
Return the event type for the record.
Override this if you want to specify your own types. This version does
a mapping using the handler's typemap attribute, which is set up in
__init__() to a dictionary which contains mappings for DEBUG, INFO,
WARNING, ERROR and CRITICAL. If you are using your own levels you will
either need to override this method or place a suitable dictionary in
the handler's typemap attribute.
"""
return self.typemap.get(record.levelno, self.deftype)
def emit(self, record):
"""
Emit a record.
Determine the message ID, event category and event type. Then
log the message in the NT event log.
"""
if self._welu:
try:
id = self.getMessageID(record)
cat = self.getEventCategory(record)
type = self.getEventType(record)
msg = self.format(record)
self._welu.ReportEvent(self.appname, id, cat, type, [msg])
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
self.handleError(record)
def close(self):
"""
Clean up this handler.
You can remove the application name from the registry as a
source of event log entries. However, if you do this, you will
not be able to see the events as you intended in the Event Log
Viewer - it needs to be able to access the registry to get the
DLL name.
"""
#self._welu.RemoveSourceFromRegistry(self.appname, self.logtype)
logging.Handler.close(self)
class HTTPHandler(logging.Handler):
"""
A class which sends records to a Web server, using either GET or
POST semantics.
"""
def __init__(self, host, url, method="GET", secure=False, credentials=None):
"""
Initialize the instance with the host, the request URL, and the method
("GET" or "POST")
"""
logging.Handler.__init__(self)
method = method.upper()
if method not in ["GET", "POST"]:
raise ValueError("method must be GET or POST")
self.host = host
self.url = url
self.method = method
self.secure = secure
self.credentials = credentials
def mapLogRecord(self, record):
"""
Default implementation of mapping the log record into a dict
that is sent as the CGI data. Overwrite in your class.
Contributed by Franz Glasner.
"""
return record.__dict__
def emit(self, record):
"""
Emit a record.
Send the record to the Web server as a percent-encoded dictionary
"""
try:
import http.client, urllib.parse
host = self.host
if self.secure:
h = http.client.HTTPSConnection(host)
else:
h = http.client.HTTPConnection(host)
url = self.url
data = urllib.parse.urlencode(self.mapLogRecord(record))
if self.method == "GET":
if (url.find('?') >= 0):
sep = '&'
else:
sep = '?'
url = url + "%c%s" % (sep, data)
h.putrequest(self.method, url)
# support multiple hosts on one IP address...
# need to strip optional :port from host, if present
i = host.find(":")
if i >= 0:
host = host[:i]
h.putheader("Host", host)
if self.method == "POST":
h.putheader("Content-type",
"application/x-www-form-urlencoded")
h.putheader("Content-length", str(len(data)))
if self.credentials:
import base64
s = ('u%s:%s' % self.credentials).encode('utf-8')
s = 'Basic ' + base64.b64encode(s).strip()
h.putheader('Authorization', s)
h.endheaders()
if self.method == "POST":
h.send(data.encode('utf-8'))
h.getresponse() #can't do anything with the result
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
self.handleError(record)
class BufferingHandler(logging.Handler):
"""
A handler class which buffers logging records in memory. Whenever each
record is added to the buffer, a check is made to see if the buffer should
be flushed. If it should, then flush() is expected to do what's needed.
"""
def __init__(self, capacity):
"""
Initialize the handler with the buffer size.
"""
logging.Handler.__init__(self)
self.capacity = capacity
self.buffer = []
def shouldFlush(self, record):
"""
Should the handler flush its buffer?
Returns true if the buffer is up to capacity. This method can be
overridden to implement custom flushing strategies.
"""
return (len(self.buffer) >= self.capacity)
def emit(self, record):
"""
Emit a record.
Append the record. If shouldFlush() tells us to, call flush() to process
the buffer.
"""
self.buffer.append(record)
if self.shouldFlush(record):
self.flush()
def flush(self):
"""
Override to implement custom flushing behaviour.
This version just zaps the buffer to empty.
"""
self.acquire()
try:
self.buffer = []
finally:
self.release()
def close(self):
"""
Close the handler.
This version just flushes and chains to the parent class' close().
"""
self.flush()
logging.Handler.close(self)
class MemoryHandler(BufferingHandler):
"""
A handler class which buffers logging records in memory, periodically
flushing them to a target handler. Flushing occurs whenever the buffer
is full, or when an event of a certain severity or greater is seen.
"""
def __init__(self, capacity, flushLevel=logging.ERROR, target=None):
"""
Initialize the handler with the buffer size, the level at which
flushing should occur and an optional target.
Note that without a target being set either here or via setTarget(),
a MemoryHandler is no use to anyone!
"""
BufferingHandler.__init__(self, capacity)
self.flushLevel = flushLevel
self.target = target
def shouldFlush(self, record):
"""
Check for buffer full or a record at the flushLevel or higher.
"""
return (len(self.buffer) >= self.capacity) or \
(record.levelno >= self.flushLevel)
def setTarget(self, target):
"""
Set the target handler for this handler.
"""
self.target = target
def flush(self):
"""
For a MemoryHandler, flushing means just sending the buffered
records to the target, if there is one. Override if you want
different behaviour.
The record buffer is also cleared by this operation.
"""
self.acquire()
try:
if self.target:
for record in self.buffer:
self.target.handle(record)
self.buffer = []
finally:
self.release()
def close(self):
"""
Flush, set the target to None and lose the buffer.
"""
self.flush()
self.acquire()
try:
self.target = None
BufferingHandler.close(self)
finally:
self.release()
class QueueHandler(logging.Handler):
"""
This handler sends events to a queue. Typically, it would be used together
with a multiprocessing Queue to centralise logging to file in one process
(in a multi-process application), so as to avoid file write contention
between processes.
This code is new in Python 3.2, but this class can be copy pasted into
user code for use with earlier Python versions.
"""
def __init__(self, queue):
"""
Initialise an instance, using the passed queue.
"""
logging.Handler.__init__(self)
self.queue = queue
def enqueue(self, record):
"""
Enqueue a record.
The base implementation uses put_nowait. You may want to override
this method if you want to use blocking, timeouts or custom queue
implementations.
"""
self.queue.put_nowait(record)
def prepare(self, record):
"""
Prepares a record for queuing. The object returned by this method is
enqueued.
The base implementation formats the record to merge the message
and arguments, and removes unpickleable items from the record
in-place.
You might want to override this method if you want to convert
the record to a dict or JSON string, or send a modified copy
of the record while leaving the original intact.
"""
# The format operation gets traceback text into record.exc_text
# (if there's exception data), and also puts the message into
# record.message. We can then use this to replace the original
# msg + args, as these might be unpickleable. We also zap the
# exc_info attribute, as it's no longer needed and, if not None,
# will typically not be pickleable.
self.format(record)
record.msg = record.message
record.args = None
record.exc_info = None
return record
def emit(self, record):
"""
Emit a record.
Writes the LogRecord to the queue, preparing it for pickling first.
"""
try:
self.enqueue(self.prepare(record))
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
self.handleError(record)
if threading:
class QueueListener(object):
"""
This class implements an internal threaded listener which watches for
LogRecords being added to a queue, removes them and passes them to a
list of handlers for processing.
"""
_sentinel = None
def __init__(self, queue, *handlers):
"""
Initialise an instance with the specified queue and
handlers.
"""
self.queue = queue
self.handlers = handlers
self._stop = threading.Event()
self._thread = None
def dequeue(self, block):
"""
Dequeue a record and return it, optionally blocking.
The base implementation uses get. You may want to override this method
if you want to use timeouts or work with custom queue implementations.
"""
return self.queue.get(block)
def start(self):
"""
Start the listener.
This starts up a background thread to monitor the queue for
LogRecords to process.
"""
self._thread = t = threading.Thread(target=self._monitor)
t.setDaemon(True)
t.start()
def prepare(self , record):
"""
Prepare a record for handling.
This method just returns the passed-in record. You may want to
override this method if you need to do any custom marshalling or
manipulation of the record before passing it to the handlers.
"""
return record
def handle(self, record):
"""
Handle a record.
This just loops through the handlers offering them the record
to handle.
"""
record = self.prepare(record)
for handler in self.handlers:
handler.handle(record)
def _monitor(self):
"""
Monitor the queue for records, and ask the handler
to deal with them.
This method runs on a separate, internal thread.
The thread will terminate if it sees a sentinel object in the queue.
"""
q = self.queue
has_task_done = hasattr(q, 'task_done')
while not self._stop.isSet():
try:
record = self.dequeue(True)
if record is self._sentinel:
break
self.handle(record)
if has_task_done:
q.task_done()
except queue.Empty:
pass
# There might still be records in the queue.
while True:
try:
record = self.dequeue(False)
if record is self._sentinel:
break
self.handle(record)
if has_task_done:
q.task_done()
except queue.Empty:
break
def enqueue_sentinel(self):
"""
This is used to enqueue the sentinel record.
The base implementation uses put_nowait. You may want to override this
method if you want to use timeouts or work with custom queue
implementations.
"""
self.queue.put_nowait(self._sentinel)
def stop(self):
"""
Stop the listener.
This asks the thread to terminate, and then waits for it to do so.
Note that if you don't call this before your application exits, there
may be some records still left on the queue, which won't be processed.
"""
self._stop.set()
self.enqueue_sentinel()
self._thread.join()
self._thread = None
|
mgarg1/ecg | refs/heads/master | ecg_visualizer_ble_PC/galry/datanormalizer.py | 9 | import numpy as np
__all__ = ['DataNormalizer']
class DataNormalizer(object):
"""Handles normalizing data so that it fits the fixed [-1,1]^2 viewport."""
def __init__(self, data=None):
self.data = data
def normalize(self, initial_viewbox=None, symmetric=False):
"""Normalize data given the initial view box.
This function also defines the four following methods:
* `(un)normalize_[x|y]`: normalize or un-normalize in the x or y
dimension. Un-normalization can be useful for e.g. retrieving the
original coordinates of a point in the window.
Arguments:
* initial_viewbox=None: the initial view box is a 4-tuple
(x0, y0, x1, y1) describing the initial view of the data and
defining the normalization. By default, it is the bounding box of the
data (min/max x/y). x0 and/or y0 can be None, meaning no
normalization for that dimension.
Returns:
* normalized_data: the normalized data.
"""
if not initial_viewbox:
initial_viewbox = (None, None, None, None)
dx0, dy0, dx1, dy1 = initial_viewbox
if self.data is not None:
x, y = self.data[:,0], self.data[:,1]
# default: replace None by min/max
if self.data.size == 0:
dx0 = dy0 = dx1 = dy1 = 0.
else:
if dx0 is None:
dx0 = x.min()
if dy0 is None:
dy0 = y.min()
if dx1 is None:
dx1 = x.max()
if dy1 is None:
dy1 = y.max()
else:
if dx0 is None:
dx0 = -1.
if dy0 is None:
dy0 = -1.
if dx1 is None:
dx1 = 1.
if dy1 is None:
dy1 = 1.
if dx0 == dx1:
dx0 -= .5
dx1 += .5
if dy0 == dy1:
dy0 -= .5
dy1 += .5
# force a symmetric viewbox
if symmetric:
vx = max(np.abs(dx0), np.abs(dx1))
vy = max(np.abs(dy0), np.abs(dy1))
dx0, dx1 = -vx, vx
dy0, dy1 = -vy, vy
if dx0 is None:
self.normalize_x = self.unnormalize_x = lambda X: X
else:
self.normalize_x = lambda X: -1+2*(X-dx0)/(dx1-dx0)
self.unnormalize_x = lambda X: dx0 + (dx1 - dx0) * (1+X)/2.
if dy0 is None:
self.normalize_y = self.unnormalize_y = lambda Y: Y
else:
self.normalize_y = lambda Y: -1+2*(Y-dy0)/(dy1-dy0)
self.unnormalize_y = lambda Y: dy0 + (dy1 - dy0) * (1+Y)/2.
if self.data is not None:
data_normalized = np.empty(self.data.shape, dtype=self.data.dtype)
data_normalized[:,0] = self.normalize_x(x)
data_normalized[:,1] = self.normalize_y(y)
return data_normalized
|
gusyussh/learntosolveit | refs/heads/version1 | languages/python/algorithm_fact2.py | 7 | import operator
print reduce(operator.mul, xrange(1,10))
|
kiwiheretic/logos-v2 | refs/heads/master | reddit/migrations/0007_auto_20160619_2130.py | 1 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('reddit', '0006_auto_20160518_0036'),
]
operations = [
migrations.CreateModel(
name='MySubreddits',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
],
),
migrations.CreateModel(
name='Subreddits',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=40)),
('url', models.URLField()),
],
),
migrations.AddField(
model_name='mysubreddits',
name='subreddits',
field=models.ManyToManyField(related_name='subscriptions', to='reddit.Subreddits'),
),
migrations.AddField(
model_name='mysubreddits',
name='user',
field=models.OneToOneField(to=settings.AUTH_USER_MODEL),
),
]
|
zeza/gnuradio-rc-testcode | refs/heads/master | gr-flysky/docs/doxygen/doxyxml/generated/indexsuper.py | 348 | #!/usr/bin/env python
#
# Generated Thu Jun 11 18:43:54 2009 by generateDS.py.
#
import sys
import getopt
from string import lower as str_lower
from xml.dom import minidom
from xml.dom import Node
#
# User methods
#
# Calls to the methods in these classes are generated by generateDS.py.
# You can replace these methods by re-implementing the following class
# in a module named generatedssuper.py.
try:
from generatedssuper import GeneratedsSuper
except ImportError, exp:
class GeneratedsSuper:
def format_string(self, input_data, input_name=''):
return input_data
def format_integer(self, input_data, input_name=''):
return '%d' % input_data
def format_float(self, input_data, input_name=''):
return '%f' % input_data
def format_double(self, input_data, input_name=''):
return '%e' % input_data
def format_boolean(self, input_data, input_name=''):
return '%s' % input_data
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
## ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
## exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Globals
#
ExternalEncoding = 'ascii'
#
# Support/utility functions.
#
def showIndent(outfile, level):
for idx in range(level):
outfile.write(' ')
def quote_xml(inStr):
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1
def quote_attrib(inStr):
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
if '"' in s1:
if "'" in s1:
s1 = '"%s"' % s1.replace('"', """)
else:
s1 = "'%s'" % s1
else:
s1 = '"%s"' % s1
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants for content_type:
TypeNone = 0
TypeText = 1
TypeString = 2
TypeInteger = 3
TypeFloat = 4
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
def getCategory(self):
return self.category
def getContenttype(self, content_type):
return self.content_type
def getValue(self):
return self.value
def getName(self):
return self.name
def export(self, outfile, level, name, namespace):
if self.category == MixedContainer.CategoryText:
outfile.write(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(outfile, level, name)
else: # category == MixedContainer.CategoryComplex
self.value.export(outfile, level, namespace,name)
def exportSimple(self, outfile, level, name):
if self.content_type == MixedContainer.TypeString:
outfile.write('<%s>%s</%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeInteger or \
self.content_type == MixedContainer.TypeBoolean:
outfile.write('<%s>%d</%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeFloat or \
self.content_type == MixedContainer.TypeDecimal:
outfile.write('<%s>%f</%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
outfile.write('<%s>%g</%s>' % (self.name, self.value, self.name))
def exportLiteral(self, outfile, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(outfile, level)
outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' % \
(self.category, self.content_type, self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(outfile, level)
outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' % \
(self.category, self.content_type, self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(outfile, level)
outfile.write('MixedContainer(%d, %d, "%s",\n' % \
(self.category, self.content_type, self.name,))
self.value.exportLiteral(outfile, level + 1)
showIndent(outfile, level)
outfile.write(')\n')
class _MemberSpec(object):
def __init__(self, name='', data_type='', container=0):
self.name = name
self.data_type = data_type
self.container = container
def set_name(self, name): self.name = name
def get_name(self): return self.name
def set_data_type(self, data_type): self.data_type = data_type
def get_data_type(self): return self.data_type
def set_container(self, container): self.container = container
def get_container(self): return self.container
#
# Data representation classes.
#
class DoxygenType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, version=None, compound=None):
self.version = version
if compound is None:
self.compound = []
else:
self.compound = compound
def factory(*args_, **kwargs_):
if DoxygenType.subclass:
return DoxygenType.subclass(*args_, **kwargs_)
else:
return DoxygenType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_compound(self): return self.compound
def set_compound(self, compound): self.compound = compound
def add_compound(self, value): self.compound.append(value)
def insert_compound(self, index, value): self.compound[index] = value
def get_version(self): return self.version
def set_version(self, version): self.version = version
def export(self, outfile, level, namespace_='', name_='DoxygenType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='DoxygenType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='DoxygenType'):
outfile.write(' version=%s' % (self.format_string(quote_attrib(self.version).encode(ExternalEncoding), input_name='version'), ))
def exportChildren(self, outfile, level, namespace_='', name_='DoxygenType'):
for compound_ in self.compound:
compound_.export(outfile, level, namespace_, name_='compound')
def hasContent_(self):
if (
self.compound is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='DoxygenType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
if self.version is not None:
showIndent(outfile, level)
outfile.write('version = %s,\n' % (self.version,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('compound=[\n')
level += 1
for compound in self.compound:
showIndent(outfile, level)
outfile.write('model_.compound(\n')
compound.exportLiteral(outfile, level, name_='compound')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('version'):
self.version = attrs.get('version').value
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'compound':
obj_ = CompoundType.factory()
obj_.build(child_)
self.compound.append(obj_)
# end class DoxygenType
class CompoundType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, kind=None, refid=None, name=None, member=None):
self.kind = kind
self.refid = refid
self.name = name
if member is None:
self.member = []
else:
self.member = member
def factory(*args_, **kwargs_):
if CompoundType.subclass:
return CompoundType.subclass(*args_, **kwargs_)
else:
return CompoundType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_member(self): return self.member
def set_member(self, member): self.member = member
def add_member(self, value): self.member.append(value)
def insert_member(self, index, value): self.member[index] = value
def get_kind(self): return self.kind
def set_kind(self, kind): self.kind = kind
def get_refid(self): return self.refid
def set_refid(self, refid): self.refid = refid
def export(self, outfile, level, namespace_='', name_='CompoundType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='CompoundType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='CompoundType'):
outfile.write(' kind=%s' % (quote_attrib(self.kind), ))
outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
def exportChildren(self, outfile, level, namespace_='', name_='CompoundType'):
if self.name is not None:
showIndent(outfile, level)
outfile.write('<%sname>%s</%sname>\n' % (namespace_, self.format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_))
for member_ in self.member:
member_.export(outfile, level, namespace_, name_='member')
def hasContent_(self):
if (
self.name is not None or
self.member is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='CompoundType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
if self.kind is not None:
showIndent(outfile, level)
outfile.write('kind = "%s",\n' % (self.kind,))
if self.refid is not None:
showIndent(outfile, level)
outfile.write('refid = %s,\n' % (self.refid,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding))
showIndent(outfile, level)
outfile.write('member=[\n')
level += 1
for member in self.member:
showIndent(outfile, level)
outfile.write('model_.member(\n')
member.exportLiteral(outfile, level, name_='member')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('kind'):
self.kind = attrs.get('kind').value
if attrs.get('refid'):
self.refid = attrs.get('refid').value
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'name':
name_ = ''
for text__content_ in child_.childNodes:
name_ += text__content_.nodeValue
self.name = name_
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'member':
obj_ = MemberType.factory()
obj_.build(child_)
self.member.append(obj_)
# end class CompoundType
class MemberType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, kind=None, refid=None, name=None):
self.kind = kind
self.refid = refid
self.name = name
def factory(*args_, **kwargs_):
if MemberType.subclass:
return MemberType.subclass(*args_, **kwargs_)
else:
return MemberType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_kind(self): return self.kind
def set_kind(self, kind): self.kind = kind
def get_refid(self): return self.refid
def set_refid(self, refid): self.refid = refid
def export(self, outfile, level, namespace_='', name_='MemberType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, ))
self.exportAttributes(outfile, level, namespace_, name_='MemberType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='MemberType'):
outfile.write(' kind=%s' % (quote_attrib(self.kind), ))
outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), ))
def exportChildren(self, outfile, level, namespace_='', name_='MemberType'):
if self.name is not None:
showIndent(outfile, level)
outfile.write('<%sname>%s</%sname>\n' % (namespace_, self.format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_))
def hasContent_(self):
if (
self.name is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='MemberType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
if self.kind is not None:
showIndent(outfile, level)
outfile.write('kind = "%s",\n' % (self.kind,))
if self.refid is not None:
showIndent(outfile, level)
outfile.write('refid = %s,\n' % (self.refid,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding))
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[-1]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('kind'):
self.kind = attrs.get('kind').value
if attrs.get('refid'):
self.refid = attrs.get('refid').value
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'name':
name_ = ''
for text__content_ in child_.childNodes:
name_ += text__content_.nodeValue
self.name = name_
# end class MemberType
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
Options:
-s Use the SAX parser, not the minidom parser.
"""
def usage():
print USAGE_TEXT
sys.exit(1)
def parse(inFileName):
doc = minidom.parse(inFileName)
rootNode = doc.documentElement
rootObj = DoxygenType.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(sys.stdout, 0, name_="doxygenindex",
namespacedef_='')
return rootObj
def parseString(inString):
doc = minidom.parseString(inString)
rootNode = doc.documentElement
rootObj = DoxygenType.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(sys.stdout, 0, name_="doxygenindex",
namespacedef_='')
return rootObj
def parseLiteral(inFileName):
doc = minidom.parse(inFileName)
rootNode = doc.documentElement
rootObj = DoxygenType.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('from index import *\n\n')
sys.stdout.write('rootObj = doxygenindex(\n')
rootObj.exportLiteral(sys.stdout, 0, name_="doxygenindex")
sys.stdout.write(')\n')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 1:
parse(args[0])
else:
usage()
if __name__ == '__main__':
main()
#import pdb
#pdb.run('main()')
|
pdxwebdev/yadapy | refs/heads/master | yada/lib/python2.7/site-packages/bson/code.py | 28 | # Copyright 2009-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for representing JavaScript code in BSON.
"""
import collections
from bson.py3compat import string_type
class Code(str):
"""BSON's JavaScript code type.
Raises :class:`TypeError` if `code` is not an instance of
:class:`basestring` (:class:`str` in python 3) or `scope`
is not ``None`` or an instance of :class:`dict`.
Scope variables can be set by passing a dictionary as the `scope`
argument or by using keyword arguments. If a variable is set as a
keyword argument it will override any setting for that variable in
the `scope` dictionary.
:Parameters:
- `code`: string containing JavaScript code to be evaluated
- `scope` (optional): dictionary representing the scope in which
`code` should be evaluated - a mapping from identifiers (as
strings) to values
- `**kwargs` (optional): scope variables can also be passed as
keyword arguments
"""
_type_marker = 13
def __new__(cls, code, scope=None, **kwargs):
if not isinstance(code, string_type):
raise TypeError("code must be an "
"instance of %s" % (string_type.__name__))
self = str.__new__(cls, code)
try:
self.__scope = code.scope
except AttributeError:
self.__scope = {}
if scope is not None:
if not isinstance(scope, collections.Mapping):
raise TypeError("scope must be an instance of dict")
self.__scope.update(scope)
self.__scope.update(kwargs)
return self
@property
def scope(self):
"""Scope dictionary for this instance.
"""
return self.__scope
def __repr__(self):
return "Code(%s, %r)" % (str.__repr__(self), self.__scope)
def __eq__(self, other):
if isinstance(other, Code):
return (self.__scope, str(self)) == (other.__scope, str(other))
return False
__hash__ = None
def __ne__(self, other):
return not self == other
|
40223245/2015cd_midterm | refs/heads/master | static/Brython3.1.1-20150328-091302/Lib/_strptime.py | 518 | """Strptime-related classes and functions.
CLASSES:
LocaleTime -- Discovers and stores locale-specific time information
TimeRE -- Creates regexes for pattern matching a string of text containing
time information
FUNCTIONS:
_getlang -- Figure out what language is being used for the locale
strptime -- Calculates the time struct represented by the passed-in string
"""
import time
import locale
import calendar
from re import compile as re_compile
from re import IGNORECASE
from re import escape as re_escape
from datetime import (date as datetime_date,
timedelta as datetime_timedelta,
timezone as datetime_timezone)
try:
from _thread import allocate_lock as _thread_allocate_lock
except ImportError:
from _dummy_thread import allocate_lock as _thread_allocate_lock
__all__ = []
def _getlang():
# Figure out what the current language is set to.
return locale.getlocale(locale.LC_TIME)
class LocaleTime(object):
"""Stores and handles locale-specific information related to time.
ATTRIBUTES:
f_weekday -- full weekday names (7-item list)
a_weekday -- abbreviated weekday names (7-item list)
f_month -- full month names (13-item list; dummy value in [0], which
is added by code)
a_month -- abbreviated month names (13-item list, dummy value in
[0], which is added by code)
am_pm -- AM/PM representation (2-item list)
LC_date_time -- format string for date/time representation (string)
LC_date -- format string for date representation (string)
LC_time -- format string for time representation (string)
timezone -- daylight- and non-daylight-savings timezone representation
(2-item list of sets)
lang -- Language used by instance (2-item tuple)
"""
def __init__(self):
"""Set all attributes.
Order of methods called matters for dependency reasons.
The locale language is set at the offset and then checked again before
exiting. This is to make sure that the attributes were not set with a
mix of information from more than one locale. This would most likely
happen when using threads where one thread calls a locale-dependent
function while another thread changes the locale while the function in
the other thread is still running. Proper coding would call for
locks to prevent changing the locale while locale-dependent code is
running. The check here is done in case someone does not think about
doing this.
Only other possible issue is if someone changed the timezone and did
not call tz.tzset . That is an issue for the programmer, though,
since changing the timezone is worthless without that call.
"""
self.lang = _getlang()
self.__calc_weekday()
self.__calc_month()
self.__calc_am_pm()
self.__calc_timezone()
self.__calc_date_time()
if _getlang() != self.lang:
raise ValueError("locale changed during initialization")
def __pad(self, seq, front):
# Add '' to seq to either the front (is True), else the back.
seq = list(seq)
if front:
seq.insert(0, '')
else:
seq.append('')
return seq
def __calc_weekday(self):
# Set self.a_weekday and self.f_weekday using the calendar
# module.
a_weekday = [calendar.day_abbr[i].lower() for i in range(7)]
f_weekday = [calendar.day_name[i].lower() for i in range(7)]
self.a_weekday = a_weekday
self.f_weekday = f_weekday
def __calc_month(self):
# Set self.f_month and self.a_month using the calendar module.
a_month = [calendar.month_abbr[i].lower() for i in range(13)]
f_month = [calendar.month_name[i].lower() for i in range(13)]
self.a_month = a_month
self.f_month = f_month
def __calc_am_pm(self):
# Set self.am_pm by using time.strftime().
# The magic date (1999,3,17,hour,44,55,2,76,0) is not really that
# magical; just happened to have used it everywhere else where a
# static date was needed.
am_pm = []
for hour in (1, 22):
time_tuple = time.struct_time((1999,3,17,hour,44,55,2,76,0))
am_pm.append(time.strftime("%p", time_tuple).lower())
self.am_pm = am_pm
def __calc_date_time(self):
# Set self.date_time, self.date, & self.time by using
# time.strftime().
# Use (1999,3,17,22,44,55,2,76,0) for magic date because the amount of
# overloaded numbers is minimized. The order in which searches for
# values within the format string is very important; it eliminates
# possible ambiguity for what something represents.
time_tuple = time.struct_time((1999,3,17,22,44,55,2,76,0))
date_time = [None, None, None]
date_time[0] = time.strftime("%c", time_tuple).lower()
date_time[1] = time.strftime("%x", time_tuple).lower()
date_time[2] = time.strftime("%X", time_tuple).lower()
replacement_pairs = [('%', '%%'), (self.f_weekday[2], '%A'),
(self.f_month[3], '%B'), (self.a_weekday[2], '%a'),
(self.a_month[3], '%b'), (self.am_pm[1], '%p'),
('1999', '%Y'), ('99', '%y'), ('22', '%H'),
('44', '%M'), ('55', '%S'), ('76', '%j'),
('17', '%d'), ('03', '%m'), ('3', '%m'),
# '3' needed for when no leading zero.
('2', '%w'), ('10', '%I')]
replacement_pairs.extend([(tz, "%Z") for tz_values in self.timezone
for tz in tz_values])
for offset,directive in ((0,'%c'), (1,'%x'), (2,'%X')):
current_format = date_time[offset]
for old, new in replacement_pairs:
# Must deal with possible lack of locale info
# manifesting itself as the empty string (e.g., Swedish's
# lack of AM/PM info) or a platform returning a tuple of empty
# strings (e.g., MacOS 9 having timezone as ('','')).
if old:
current_format = current_format.replace(old, new)
# If %W is used, then Sunday, 2005-01-03 will fall on week 0 since
# 2005-01-03 occurs before the first Monday of the year. Otherwise
# %U is used.
time_tuple = time.struct_time((1999,1,3,1,1,1,6,3,0))
if '00' in time.strftime(directive, time_tuple):
U_W = '%W'
else:
U_W = '%U'
date_time[offset] = current_format.replace('11', U_W)
self.LC_date_time = date_time[0]
self.LC_date = date_time[1]
self.LC_time = date_time[2]
def __calc_timezone(self):
# Set self.timezone by using time.tzname.
# Do not worry about possibility of time.tzname[0] == timetzname[1]
# and time.daylight; handle that in strptime .
#try:
#time.tzset()
#except AttributeError:
#pass
no_saving = frozenset(["utc", "gmt", time.tzname[0].lower()])
if time.daylight:
has_saving = frozenset([time.tzname[1].lower()])
else:
has_saving = frozenset()
self.timezone = (no_saving, has_saving)
class TimeRE(dict):
"""Handle conversion from format directives to regexes."""
def __init__(self, locale_time=None):
"""Create keys/values.
Order of execution is important for dependency reasons.
"""
if locale_time:
self.locale_time = locale_time
else:
self.locale_time = LocaleTime()
base = super()
base.__init__({
# The " \d" part of the regex is to make %c from ANSI C work
'd': r"(?P<d>3[0-1]|[1-2]\d|0[1-9]|[1-9]| [1-9])",
'f': r"(?P<f>[0-9]{1,6})",
'H': r"(?P<H>2[0-3]|[0-1]\d|\d)",
'I': r"(?P<I>1[0-2]|0[1-9]|[1-9])",
'j': r"(?P<j>36[0-6]|3[0-5]\d|[1-2]\d\d|0[1-9]\d|00[1-9]|[1-9]\d|0[1-9]|[1-9])",
'm': r"(?P<m>1[0-2]|0[1-9]|[1-9])",
'M': r"(?P<M>[0-5]\d|\d)",
'S': r"(?P<S>6[0-1]|[0-5]\d|\d)",
'U': r"(?P<U>5[0-3]|[0-4]\d|\d)",
'w': r"(?P<w>[0-6])",
# W is set below by using 'U'
'y': r"(?P<y>\d\d)",
#XXX: Does 'Y' need to worry about having less or more than
# 4 digits?
'Y': r"(?P<Y>\d\d\d\d)",
'z': r"(?P<z>[+-]\d\d[0-5]\d)",
'A': self.__seqToRE(self.locale_time.f_weekday, 'A'),
'a': self.__seqToRE(self.locale_time.a_weekday, 'a'),
'B': self.__seqToRE(self.locale_time.f_month[1:], 'B'),
'b': self.__seqToRE(self.locale_time.a_month[1:], 'b'),
'p': self.__seqToRE(self.locale_time.am_pm, 'p'),
'Z': self.__seqToRE((tz for tz_names in self.locale_time.timezone
for tz in tz_names),
'Z'),
'%': '%'})
base.__setitem__('W', base.__getitem__('U').replace('U', 'W'))
base.__setitem__('c', self.pattern(self.locale_time.LC_date_time))
base.__setitem__('x', self.pattern(self.locale_time.LC_date))
base.__setitem__('X', self.pattern(self.locale_time.LC_time))
def __seqToRE(self, to_convert, directive):
"""Convert a list to a regex string for matching a directive.
Want possible matching values to be from longest to shortest. This
prevents the possibility of a match occurring for a value that also
a substring of a larger value that should have matched (e.g., 'abc'
matching when 'abcdef' should have been the match).
"""
to_convert = sorted(to_convert, key=len, reverse=True)
for value in to_convert:
if value != '':
break
else:
return ''
regex = '|'.join(re_escape(stuff) for stuff in to_convert)
regex = '(?P<%s>%s' % (directive, regex)
return '%s)' % regex
def pattern(self, format):
"""Return regex pattern for the format string.
Need to make sure that any characters that might be interpreted as
regex syntax are escaped.
"""
processed_format = ''
# The sub() call escapes all characters that might be misconstrued
# as regex syntax. Cannot use re.escape since we have to deal with
# format directives (%m, etc.).
regex_chars = re_compile(r"([\\.^$*+?\(\){}\[\]|])")
format = regex_chars.sub(r"\\\1", format)
whitespace_replacement = re_compile('\s+')
format = whitespace_replacement.sub('\s+', format)
while '%' in format:
directive_index = format.index('%')+1
processed_format = "%s%s%s" % (processed_format,
format[:directive_index-1],
self[format[directive_index]])
format = format[directive_index+1:]
return "%s%s" % (processed_format, format)
def compile(self, format):
"""Return a compiled re object for the format string."""
return re_compile(self.pattern(format), IGNORECASE)
_cache_lock = _thread_allocate_lock()
# DO NOT modify _TimeRE_cache or _regex_cache without acquiring the cache lock
# first!
_TimeRE_cache = TimeRE()
_CACHE_MAX_SIZE = 5 # Max number of regexes stored in _regex_cache
_regex_cache = {}
def _calc_julian_from_U_or_W(year, week_of_year, day_of_week, week_starts_Mon):
"""Calculate the Julian day based on the year, week of the year, and day of
the week, with week_start_day representing whether the week of the year
assumes the week starts on Sunday or Monday (6 or 0)."""
first_weekday = datetime_date(year, 1, 1).weekday()
# If we are dealing with the %U directive (week starts on Sunday), it's
# easier to just shift the view to Sunday being the first day of the
# week.
if not week_starts_Mon:
first_weekday = (first_weekday + 1) % 7
day_of_week = (day_of_week + 1) % 7
# Need to watch out for a week 0 (when the first day of the year is not
# the same as that specified by %U or %W).
week_0_length = (7 - first_weekday) % 7
if week_of_year == 0:
return 1 + day_of_week - first_weekday
else:
days_to_week = week_0_length + (7 * (week_of_year - 1))
return 1 + days_to_week + day_of_week
def _strptime(data_string, format="%a %b %d %H:%M:%S %Y"):
"""Return a 2-tuple consisting of a time struct and an int containing
the number of microseconds based on the input string and the
format string."""
for index, arg in enumerate([data_string, format]):
if not isinstance(arg, str):
msg = "strptime() argument {} must be str, not {}"
raise TypeError(msg.format(index, type(arg)))
global _TimeRE_cache, _regex_cache
with _cache_lock:
if _getlang() != _TimeRE_cache.locale_time.lang:
_TimeRE_cache = TimeRE()
_regex_cache.clear()
if len(_regex_cache) > _CACHE_MAX_SIZE:
_regex_cache.clear()
locale_time = _TimeRE_cache.locale_time
format_regex = _regex_cache.get(format)
if not format_regex:
try:
format_regex = _TimeRE_cache.compile(format)
# KeyError raised when a bad format is found; can be specified as
# \\, in which case it was a stray % but with a space after it
except KeyError as err:
bad_directive = err.args[0]
if bad_directive == "\\":
bad_directive = "%"
del err
raise ValueError("'%s' is a bad directive in format '%s'" %
(bad_directive, format)) from None
# IndexError only occurs when the format string is "%"
except IndexError:
raise ValueError("stray %% in format '%s'" % format) from None
_regex_cache[format] = format_regex
found = format_regex.match(data_string)
if not found:
raise ValueError("time data %r does not match format %r" %
(data_string, format))
if len(data_string) != found.end():
raise ValueError("unconverted data remains: %s" %
data_string[found.end():])
year = None
month = day = 1
hour = minute = second = fraction = 0
tz = -1
tzoffset = None
# Default to -1 to signify that values not known; not critical to have,
# though
week_of_year = -1
week_of_year_start = -1
# weekday and julian defaulted to -1 so as to signal need to calculate
# values
weekday = julian = -1
found_dict = found.groupdict()
for group_key in found_dict.keys():
# Directives not explicitly handled below:
# c, x, X
# handled by making out of other directives
# U, W
# worthless without day of the week
if group_key == 'y':
year = int(found_dict['y'])
# Open Group specification for strptime() states that a %y
#value in the range of [00, 68] is in the century 2000, while
#[69,99] is in the century 1900
if year <= 68:
year += 2000
else:
year += 1900
elif group_key == 'Y':
year = int(found_dict['Y'])
elif group_key == 'm':
month = int(found_dict['m'])
elif group_key == 'B':
month = locale_time.f_month.index(found_dict['B'].lower())
elif group_key == 'b':
month = locale_time.a_month.index(found_dict['b'].lower())
elif group_key == 'd':
day = int(found_dict['d'])
elif group_key == 'H':
hour = int(found_dict['H'])
elif group_key == 'I':
hour = int(found_dict['I'])
ampm = found_dict.get('p', '').lower()
# If there was no AM/PM indicator, we'll treat this like AM
if ampm in ('', locale_time.am_pm[0]):
# We're in AM so the hour is correct unless we're
# looking at 12 midnight.
# 12 midnight == 12 AM == hour 0
if hour == 12:
hour = 0
elif ampm == locale_time.am_pm[1]:
# We're in PM so we need to add 12 to the hour unless
# we're looking at 12 noon.
# 12 noon == 12 PM == hour 12
if hour != 12:
hour += 12
elif group_key == 'M':
minute = int(found_dict['M'])
elif group_key == 'S':
second = int(found_dict['S'])
elif group_key == 'f':
s = found_dict['f']
# Pad to always return microseconds.
s += "0" * (6 - len(s))
fraction = int(s)
elif group_key == 'A':
weekday = locale_time.f_weekday.index(found_dict['A'].lower())
elif group_key == 'a':
weekday = locale_time.a_weekday.index(found_dict['a'].lower())
elif group_key == 'w':
weekday = int(found_dict['w'])
if weekday == 0:
weekday = 6
else:
weekday -= 1
elif group_key == 'j':
julian = int(found_dict['j'])
elif group_key in ('U', 'W'):
week_of_year = int(found_dict[group_key])
if group_key == 'U':
# U starts week on Sunday.
week_of_year_start = 6
else:
# W starts week on Monday.
week_of_year_start = 0
elif group_key == 'z':
z = found_dict['z']
tzoffset = int(z[1:3]) * 60 + int(z[3:5])
if z.startswith("-"):
tzoffset = -tzoffset
elif group_key == 'Z':
# Since -1 is default value only need to worry about setting tz if
# it can be something other than -1.
found_zone = found_dict['Z'].lower()
for value, tz_values in enumerate(locale_time.timezone):
if found_zone in tz_values:
# Deal with bad locale setup where timezone names are the
# same and yet time.daylight is true; too ambiguous to
# be able to tell what timezone has daylight savings
if (time.tzname[0] == time.tzname[1] and
time.daylight and found_zone not in ("utc", "gmt")):
break
else:
tz = value
break
leap_year_fix = False
if year is None and month == 2 and day == 29:
year = 1904 # 1904 is first leap year of 20th century
leap_year_fix = True
elif year is None:
year = 1900
# If we know the week of the year and what day of that week, we can figure
# out the Julian day of the year.
if julian == -1 and week_of_year != -1 and weekday != -1:
week_starts_Mon = True if week_of_year_start == 0 else False
julian = _calc_julian_from_U_or_W(year, week_of_year, weekday,
week_starts_Mon)
# Cannot pre-calculate datetime_date() since can change in Julian
# calculation and thus could have different value for the day of the week
# calculation.
if julian == -1:
# Need to add 1 to result since first day of the year is 1, not 0.
julian = datetime_date(year, month, day).toordinal() - \
datetime_date(year, 1, 1).toordinal() + 1
else: # Assume that if they bothered to include Julian day it will
# be accurate.
datetime_result = datetime_date.fromordinal((julian - 1) + datetime_date(year, 1, 1).toordinal())
year = datetime_result.year
month = datetime_result.month
day = datetime_result.day
if weekday == -1:
weekday = datetime_date(year, month, day).weekday()
# Add timezone info
tzname = found_dict.get("Z")
if tzoffset is not None:
gmtoff = tzoffset * 60
else:
gmtoff = None
if leap_year_fix:
# the caller didn't supply a year but asked for Feb 29th. We couldn't
# use the default of 1900 for computations. We set it back to ensure
# that February 29th is smaller than March 1st.
year = 1900
return (year, month, day,
hour, minute, second,
weekday, julian, tz, tzname, gmtoff), fraction
def _strptime_time(data_string, format="%a %b %d %H:%M:%S %Y"):
"""Return a time struct based on the input string and the
format string."""
tt = _strptime(data_string, format)[0]
return time.struct_time(tt[:time._STRUCT_TM_ITEMS])
def _strptime_datetime(cls, data_string, format="%a %b %d %H:%M:%S %Y"):
"""Return a class cls instance based on the input string and the
format string."""
tt, fraction = _strptime(data_string, format)
tzname, gmtoff = tt[-2:]
args = tt[:6] + (fraction,)
if gmtoff is not None:
tzdelta = datetime_timedelta(seconds=gmtoff)
if tzname:
tz = datetime_timezone(tzdelta, tzname)
else:
tz = datetime_timezone(tzdelta)
args += (tz,)
return cls(*args)
|
jcftang/ansible | refs/heads/devel | lib/ansible/modules/cloud/openstack/os_security_group.py | 29 | #!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2013, Benno Joy <benno@ansible.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: os_security_group
short_description: Add/Delete security groups from an OpenStack cloud.
extends_documentation_fragment: openstack
author: "Monty Taylor (@emonty)"
version_added: "2.0"
description:
- Add or Remove security groups from an OpenStack cloud.
options:
name:
description:
- Name that has to be given to the security group. This module
requires that security group names be unique.
required: true
description:
description:
- Long description of the purpose of the security group
required: false
default: None
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
'''
EXAMPLES = '''
# Create a security group
- os_security_group:
cloud: mordred
state: present
name: foo
description: security group for foo servers
# Update the existing 'foo' security group description
- os_security_group:
cloud: mordred
state: present
name: foo
description: updated description for the foo security group
'''
def _needs_update(module, secgroup):
"""Check for differences in the updatable values.
NOTE: We don't currently allow name updates.
"""
if secgroup['description'] != module.params['description']:
return True
return False
def _system_state_change(module, secgroup):
state = module.params['state']
if state == 'present':
if not secgroup:
return True
return _needs_update(module, secgroup)
if state == 'absent' and secgroup:
return True
return False
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=True),
description=dict(default=''),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
name = module.params['name']
state = module.params['state']
description = module.params['description']
try:
cloud = shade.openstack_cloud(**module.params)
secgroup = cloud.get_security_group(name)
if module.check_mode:
module.exit_json(changed=_system_state_change(module, secgroup))
changed = False
if state == 'present':
if not secgroup:
secgroup = cloud.create_security_group(name, description)
changed = True
else:
if _needs_update(module, secgroup):
secgroup = cloud.update_security_group(
secgroup['id'], description=description)
changed = True
module.exit_json(
changed=changed, id=secgroup['id'], secgroup=secgroup)
if state == 'absent':
if secgroup:
cloud.delete_security_group(secgroup['id'])
changed = True
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == "__main__":
main()
|
kushalbhola/MyStuff | refs/heads/master | Practice/PythonApplication/env/Lib/site-packages/pandas/tests/series/test_api.py | 2 | from collections import OrderedDict
import pydoc
import warnings
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
Series,
TimedeltaIndex,
date_range,
period_range,
timedelta_range,
)
from pandas.core.arrays import PeriodArray
from pandas.core.indexes.datetimes import Timestamp
import pandas.util.testing as tm
from pandas.util.testing import assert_series_equal, ensure_clean
import pandas.io.formats.printing as printing
from .common import TestData
class SharedWithSparse:
"""
A collection of tests Series and SparseSeries can share.
In generic tests on this class, use ``self._assert_series_equal()``
which is implemented in sub-classes.
"""
def _assert_series_equal(self, left, right):
"""Dispatch to series class dependent assertion"""
raise NotImplementedError
def test_scalarop_preserve_name(self):
result = self.ts * 2
assert result.name == self.ts.name
def test_copy_name(self):
result = self.ts.copy()
assert result.name == self.ts.name
def test_copy_index_name_checking(self):
# don't want to be able to modify the index stored elsewhere after
# making a copy
self.ts.index.name = None
assert self.ts.index.name is None
assert self.ts is self.ts
cp = self.ts.copy()
cp.index.name = "foo"
printing.pprint_thing(self.ts.index.name)
assert self.ts.index.name is None
def test_append_preserve_name(self):
result = self.ts[:5].append(self.ts[5:])
assert result.name == self.ts.name
def test_binop_maybe_preserve_name(self):
# names match, preserve
result = self.ts * self.ts
assert result.name == self.ts.name
result = self.ts.mul(self.ts)
assert result.name == self.ts.name
result = self.ts * self.ts[:-2]
assert result.name == self.ts.name
# names don't match, don't preserve
cp = self.ts.copy()
cp.name = "something else"
result = self.ts + cp
assert result.name is None
result = self.ts.add(cp)
assert result.name is None
ops = ["add", "sub", "mul", "div", "truediv", "floordiv", "mod", "pow"]
ops = ops + ["r" + op for op in ops]
for op in ops:
# names match, preserve
s = self.ts.copy()
result = getattr(s, op)(s)
assert result.name == self.ts.name
# names don't match, don't preserve
cp = self.ts.copy()
cp.name = "changed"
result = getattr(s, op)(cp)
assert result.name is None
def test_combine_first_name(self):
result = self.ts.combine_first(self.ts[:5])
assert result.name == self.ts.name
def test_getitem_preserve_name(self):
result = self.ts[self.ts > 0]
assert result.name == self.ts.name
result = self.ts[[0, 2, 4]]
assert result.name == self.ts.name
result = self.ts[5:10]
assert result.name == self.ts.name
def test_pickle(self):
unp_series = self._pickle_roundtrip(self.series)
unp_ts = self._pickle_roundtrip(self.ts)
assert_series_equal(unp_series, self.series)
assert_series_equal(unp_ts, self.ts)
def _pickle_roundtrip(self, obj):
with ensure_clean() as path:
obj.to_pickle(path)
unpickled = pd.read_pickle(path)
return unpickled
def test_argsort_preserve_name(self):
result = self.ts.argsort()
assert result.name == self.ts.name
def test_sort_index_name(self):
result = self.ts.sort_index(ascending=False)
assert result.name == self.ts.name
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
@pytest.mark.filterwarnings("ignore:Series.to_sparse:FutureWarning")
def test_to_sparse_pass_name(self):
result = self.ts.to_sparse()
assert result.name == self.ts.name
def test_constructor_dict(self):
d = {"a": 0.0, "b": 1.0, "c": 2.0}
result = self.series_klass(d)
expected = self.series_klass(d, index=sorted(d.keys()))
self._assert_series_equal(result, expected)
result = self.series_klass(d, index=["b", "c", "d", "a"])
expected = self.series_klass([1, 2, np.nan, 0], index=["b", "c", "d", "a"])
self._assert_series_equal(result, expected)
def test_constructor_subclass_dict(self):
data = tm.TestSubDict((x, 10.0 * x) for x in range(10))
series = self.series_klass(data)
expected = self.series_klass(dict(data.items()))
self._assert_series_equal(series, expected)
def test_constructor_ordereddict(self):
# GH3283
data = OrderedDict(
("col{i}".format(i=i), np.random.random()) for i in range(12)
)
series = self.series_klass(data)
expected = self.series_klass(list(data.values()), list(data.keys()))
self._assert_series_equal(series, expected)
# Test with subclass
class A(OrderedDict):
pass
series = self.series_klass(A(data))
self._assert_series_equal(series, expected)
def test_constructor_dict_multiindex(self):
d = {("a", "a"): 0.0, ("b", "a"): 1.0, ("b", "c"): 2.0}
_d = sorted(d.items())
result = self.series_klass(d)
expected = self.series_klass(
[x[1] for x in _d], index=pd.MultiIndex.from_tuples([x[0] for x in _d])
)
self._assert_series_equal(result, expected)
d["z"] = 111.0
_d.insert(0, ("z", d["z"]))
result = self.series_klass(d)
expected = self.series_klass(
[x[1] for x in _d], index=pd.Index([x[0] for x in _d], tupleize_cols=False)
)
result = result.reindex(index=expected.index)
self._assert_series_equal(result, expected)
def test_constructor_dict_timedelta_index(self):
# GH #12169 : Resample category data with timedelta index
# construct Series from dict as data and TimedeltaIndex as index
# will result NaN in result Series data
expected = self.series_klass(
data=["A", "B", "C"], index=pd.to_timedelta([0, 10, 20], unit="s")
)
result = self.series_klass(
data={
pd.to_timedelta(0, unit="s"): "A",
pd.to_timedelta(10, unit="s"): "B",
pd.to_timedelta(20, unit="s"): "C",
},
index=pd.to_timedelta([0, 10, 20], unit="s"),
)
self._assert_series_equal(result, expected)
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_from_array_deprecated(self):
# multiple FutureWarnings, so can't assert stacklevel
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
self.series_klass.from_array([1, 2, 3])
def test_sparse_accessor_updates_on_inplace(self):
s = pd.Series([1, 1, 2, 3], dtype="Sparse[int]")
s.drop([0, 1], inplace=True)
assert s.sparse.density == 1.0
class TestSeriesMisc(TestData, SharedWithSparse):
series_klass = Series
# SharedWithSparse tests use generic, series_klass-agnostic assertion
_assert_series_equal = staticmethod(tm.assert_series_equal)
def test_tab_completion(self):
# GH 9910
s = Series(list("abcd"))
# Series of str values should have .str but not .dt/.cat in __dir__
assert "str" in dir(s)
assert "dt" not in dir(s)
assert "cat" not in dir(s)
# similarly for .dt
s = Series(date_range("1/1/2015", periods=5))
assert "dt" in dir(s)
assert "str" not in dir(s)
assert "cat" not in dir(s)
# Similarly for .cat, but with the twist that str and dt should be
# there if the categories are of that type first cat and str.
s = Series(list("abbcd"), dtype="category")
assert "cat" in dir(s)
assert "str" in dir(s) # as it is a string categorical
assert "dt" not in dir(s)
# similar to cat and str
s = Series(date_range("1/1/2015", periods=5)).astype("category")
assert "cat" in dir(s)
assert "str" not in dir(s)
assert "dt" in dir(s) # as it is a datetime categorical
def test_tab_completion_with_categorical(self):
# test the tab completion display
ok_for_cat = [
"name",
"index",
"categorical",
"categories",
"codes",
"ordered",
"set_categories",
"add_categories",
"remove_categories",
"rename_categories",
"reorder_categories",
"remove_unused_categories",
"as_ordered",
"as_unordered",
]
def get_dir(s):
results = [r for r in s.cat.__dir__() if not r.startswith("_")]
return list(sorted(set(results)))
s = Series(list("aabbcde")).astype("category")
results = get_dir(s)
tm.assert_almost_equal(results, list(sorted(set(ok_for_cat))))
@pytest.mark.parametrize(
"index",
[
tm.makeUnicodeIndex(10),
tm.makeStringIndex(10),
tm.makeCategoricalIndex(10),
Index(["foo", "bar", "baz"] * 2),
tm.makeDateIndex(10),
tm.makePeriodIndex(10),
tm.makeTimedeltaIndex(10),
tm.makeIntIndex(10),
tm.makeUIntIndex(10),
tm.makeIntIndex(10),
tm.makeFloatIndex(10),
Index([True, False]),
Index(["a{}".format(i) for i in range(101)]),
pd.MultiIndex.from_tuples(zip("ABCD", "EFGH")),
pd.MultiIndex.from_tuples(zip([0, 1, 2, 3], "EFGH")),
],
)
def test_index_tab_completion(self, index):
# dir contains string-like values of the Index.
s = pd.Series(index=index)
dir_s = dir(s)
for i, x in enumerate(s.index.unique(level=0)):
if i < 100:
assert not isinstance(x, str) or not x.isidentifier() or x in dir_s
else:
assert x not in dir_s
def test_not_hashable(self):
s_empty = Series()
s = Series([1])
msg = "'Series' objects are mutable, thus they cannot be hashed"
with pytest.raises(TypeError, match=msg):
hash(s_empty)
with pytest.raises(TypeError, match=msg):
hash(s)
def test_contains(self):
tm.assert_contains_all(self.ts.index, self.ts)
def test_iter(self):
for i, val in enumerate(self.series):
assert val == self.series[i]
for i, val in enumerate(self.ts):
assert val == self.ts[i]
def test_keys(self):
# HACK: By doing this in two stages, we avoid 2to3 wrapping the call
# to .keys() in a list()
getkeys = self.ts.keys
assert getkeys() is self.ts.index
def test_values(self):
tm.assert_almost_equal(self.ts.values, self.ts, check_dtype=False)
def test_iteritems(self):
for idx, val in self.series.iteritems():
assert val == self.series[idx]
for idx, val in self.ts.iteritems():
assert val == self.ts[idx]
# assert is lazy (genrators don't define reverse, lists do)
assert not hasattr(self.series.iteritems(), "reverse")
def test_items(self):
for idx, val in self.series.items():
assert val == self.series[idx]
for idx, val in self.ts.items():
assert val == self.ts[idx]
# assert is lazy (genrators don't define reverse, lists do)
assert not hasattr(self.series.items(), "reverse")
def test_raise_on_info(self):
s = Series(np.random.randn(10))
msg = "'Series' object has no attribute 'info'"
with pytest.raises(AttributeError, match=msg):
s.info()
def test_copy(self):
for deep in [None, False, True]:
s = Series(np.arange(10), dtype="float64")
# default deep is True
if deep is None:
s2 = s.copy()
else:
s2 = s.copy(deep=deep)
s2[::2] = np.NaN
if deep is None or deep is True:
# Did not modify original Series
assert np.isnan(s2[0])
assert not np.isnan(s[0])
else:
# we DID modify the original Series
assert np.isnan(s2[0])
assert np.isnan(s[0])
def test_copy_tzaware(self):
# GH#11794
# copy of tz-aware
expected = Series([Timestamp("2012/01/01", tz="UTC")])
expected2 = Series([Timestamp("1999/01/01", tz="UTC")])
for deep in [None, False, True]:
s = Series([Timestamp("2012/01/01", tz="UTC")])
if deep is None:
s2 = s.copy()
else:
s2 = s.copy(deep=deep)
s2[0] = pd.Timestamp("1999/01/01", tz="UTC")
# default deep is True
if deep is None or deep is True:
# Did not modify original Series
assert_series_equal(s2, expected2)
assert_series_equal(s, expected)
else:
# we DID modify the original Series
assert_series_equal(s2, expected2)
assert_series_equal(s, expected2)
def test_axis_alias(self):
s = Series([1, 2, np.nan])
assert_series_equal(s.dropna(axis="rows"), s.dropna(axis="index"))
assert s.dropna().sum("rows") == 3
assert s._get_axis_number("rows") == 0
assert s._get_axis_name("rows") == "index"
def test_class_axis(self):
# https://github.com/pandas-dev/pandas/issues/18147
# no exception and no empty docstring
assert pydoc.getdoc(Series.index)
def test_numpy_unique(self):
# it works!
np.unique(self.ts)
def test_ndarray_compat(self):
# test numpy compat with Series as sub-class of NDFrame
tsdf = DataFrame(
np.random.randn(1000, 3),
columns=["A", "B", "C"],
index=date_range("1/1/2000", periods=1000),
)
def f(x):
return x[x.idxmax()]
result = tsdf.apply(f)
expected = tsdf.max()
tm.assert_series_equal(result, expected)
# .item()
with tm.assert_produces_warning(FutureWarning):
s = Series([1])
result = s.item()
assert result == 1
assert s.item() == s.iloc[0]
# using an ndarray like function
s = Series(np.random.randn(10))
result = Series(np.ones_like(s))
expected = Series(1, index=range(10), dtype="float64")
tm.assert_series_equal(result, expected)
# ravel
s = Series(np.random.randn(10))
tm.assert_almost_equal(s.ravel(order="F"), s.values.ravel(order="F"))
# compress
# GH 6658
s = Series([0, 1.0, -1], index=list("abc"))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = np.compress(s > 0, s)
tm.assert_series_equal(result, Series([1.0], index=["b"]))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = np.compress(s < -1, s)
# result empty Index(dtype=object) as the same as original
exp = Series([], dtype="float64", index=Index([], dtype="object"))
tm.assert_series_equal(result, exp)
s = Series([0, 1.0, -1], index=[0.1, 0.2, 0.3])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = np.compress(s > 0, s)
tm.assert_series_equal(result, Series([1.0], index=[0.2]))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = np.compress(s < -1, s)
# result empty Float64Index as the same as original
exp = Series([], dtype="float64", index=Index([], dtype="float64"))
tm.assert_series_equal(result, exp)
def test_str_accessor_updates_on_inplace(self):
s = pd.Series(list("abc"))
s.drop([0], inplace=True)
assert len(s.str.lower()) == 2
def test_str_attribute(self):
# GH9068
methods = ["strip", "rstrip", "lstrip"]
s = Series([" jack", "jill ", " jesse ", "frank"])
for method in methods:
expected = Series([getattr(str, method)(x) for x in s.values])
assert_series_equal(getattr(Series.str, method)(s.str), expected)
# str accessor only valid with string values
s = Series(range(5))
with pytest.raises(AttributeError, match="only use .str accessor"):
s.str.repeat(2)
def test_empty_method(self):
s_empty = pd.Series()
assert s_empty.empty
for full_series in [pd.Series([1]), pd.Series(index=[1])]:
assert not full_series.empty
def test_tab_complete_warning(self, ip):
# https://github.com/pandas-dev/pandas/issues/16409
pytest.importorskip("IPython", minversion="6.0.0")
from IPython.core.completer import provisionalcompleter
code = "import pandas as pd; s = pd.Series()"
ip.run_code(code)
with tm.assert_produces_warning(None):
with provisionalcompleter("ignore"):
list(ip.Completer.completions("s.", 1))
def test_integer_series_size(self):
# GH 25580
s = Series(range(9))
assert s.size == 9
s = Series(range(9), dtype="Int64")
assert s.size == 9
def test_get_values_deprecation(self):
s = Series(range(9))
with tm.assert_produces_warning(FutureWarning):
res = s.get_values()
tm.assert_numpy_array_equal(res, s.values)
class TestCategoricalSeries:
@pytest.mark.parametrize(
"method",
[
lambda x: x.cat.set_categories([1, 2, 3]),
lambda x: x.cat.reorder_categories([2, 3, 1], ordered=True),
lambda x: x.cat.rename_categories([1, 2, 3]),
lambda x: x.cat.remove_unused_categories(),
lambda x: x.cat.remove_categories([2]),
lambda x: x.cat.add_categories([4]),
lambda x: x.cat.as_ordered(),
lambda x: x.cat.as_unordered(),
],
)
def test_getname_categorical_accessor(self, method):
# GH 17509
s = Series([1, 2, 3], name="A").astype("category")
expected = "A"
result = method(s).name
assert result == expected
def test_cat_accessor(self):
s = Series(Categorical(["a", "b", np.nan, "a"]))
tm.assert_index_equal(s.cat.categories, Index(["a", "b"]))
assert not s.cat.ordered, False
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
s.cat.set_categories(["b", "a"], inplace=True)
tm.assert_categorical_equal(s.values, exp)
res = s.cat.set_categories(["b", "a"])
tm.assert_categorical_equal(res.values, exp)
s[:] = "a"
s = s.cat.remove_unused_categories()
tm.assert_index_equal(s.cat.categories, Index(["a"]))
def test_cat_accessor_api(self):
# GH 9322
from pandas.core.arrays.categorical import CategoricalAccessor
assert Series.cat is CategoricalAccessor
s = Series(list("aabbcde")).astype("category")
assert isinstance(s.cat, CategoricalAccessor)
invalid = Series([1])
with pytest.raises(AttributeError, match="only use .cat accessor"):
invalid.cat
assert not hasattr(invalid, "cat")
def test_cat_accessor_no_new_attributes(self):
# https://github.com/pandas-dev/pandas/issues/10673
c = Series(list("aabbcde")).astype("category")
with pytest.raises(AttributeError, match="You cannot add any new attribute"):
c.cat.xlabel = "a"
def test_cat_accessor_updates_on_inplace(self):
s = Series(list("abc")).astype("category")
s.drop(0, inplace=True)
s.cat.remove_unused_categories(inplace=True)
assert len(s.cat.categories) == 2
def test_categorical_delegations(self):
# invalid accessor
msg = r"Can only use \.cat accessor with a 'category' dtype"
with pytest.raises(AttributeError, match=msg):
Series([1, 2, 3]).cat
with pytest.raises(AttributeError, match=msg):
Series([1, 2, 3]).cat()
with pytest.raises(AttributeError, match=msg):
Series(["a", "b", "c"]).cat
with pytest.raises(AttributeError, match=msg):
Series(np.arange(5.0)).cat
with pytest.raises(AttributeError, match=msg):
Series([Timestamp("20130101")]).cat
# Series should delegate calls to '.categories', '.codes', '.ordered'
# and the methods '.set_categories()' 'drop_unused_categories()' to the
# categorical
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = Index(["a", "b", "c"])
tm.assert_index_equal(s.cat.categories, exp_categories)
s.cat.categories = [1, 2, 3]
exp_categories = Index([1, 2, 3])
tm.assert_index_equal(s.cat.categories, exp_categories)
exp_codes = Series([0, 1, 2, 0], dtype="int8")
tm.assert_series_equal(s.cat.codes, exp_codes)
assert s.cat.ordered
s = s.cat.as_unordered()
assert not s.cat.ordered
s.cat.as_ordered(inplace=True)
assert s.cat.ordered
# reorder
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = Index(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"], dtype=np.object_)
s = s.cat.set_categories(["c", "b", "a"])
tm.assert_index_equal(s.cat.categories, exp_categories)
tm.assert_numpy_array_equal(s.values.__array__(), exp_values)
tm.assert_numpy_array_equal(s.__array__(), exp_values)
# remove unused categories
s = Series(Categorical(["a", "b", "b", "a"], categories=["a", "b", "c"]))
exp_categories = Index(["a", "b"])
exp_values = np.array(["a", "b", "b", "a"], dtype=np.object_)
s = s.cat.remove_unused_categories()
tm.assert_index_equal(s.cat.categories, exp_categories)
tm.assert_numpy_array_equal(s.values.__array__(), exp_values)
tm.assert_numpy_array_equal(s.__array__(), exp_values)
# This method is likely to be confused, so test that it raises an error
# on wrong inputs:
msg = "'Series' object has no attribute 'set_categories'"
with pytest.raises(AttributeError, match=msg):
s.set_categories([4, 3, 2, 1])
# right: s.cat.set_categories([4,3,2,1])
# GH18862 (let Series.cat.rename_categories take callables)
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
result = s.cat.rename_categories(lambda x: x.upper())
expected = Series(
Categorical(["A", "B", "C", "A"], categories=["A", "B", "C"], ordered=True)
)
tm.assert_series_equal(result, expected)
def test_dt_accessor_api_for_categorical(self):
# https://github.com/pandas-dev/pandas/issues/10661
from pandas.core.indexes.accessors import Properties
s_dr = Series(date_range("1/1/2015", periods=5, tz="MET"))
c_dr = s_dr.astype("category")
s_pr = Series(period_range("1/1/2015", freq="D", periods=5))
c_pr = s_pr.astype("category")
s_tdr = Series(timedelta_range("1 days", "10 days"))
c_tdr = s_tdr.astype("category")
# only testing field (like .day)
# and bool (is_month_start)
get_ops = lambda x: x._datetimelike_ops
test_data = [
("Datetime", get_ops(DatetimeIndex), s_dr, c_dr),
("Period", get_ops(PeriodArray), s_pr, c_pr),
("Timedelta", get_ops(TimedeltaIndex), s_tdr, c_tdr),
]
assert isinstance(c_dr.dt, Properties)
special_func_defs = [
("strftime", ("%Y-%m-%d",), {}),
("tz_convert", ("EST",), {}),
("round", ("D",), {}),
("floor", ("D",), {}),
("ceil", ("D",), {}),
("asfreq", ("D",), {}),
# ('tz_localize', ("UTC",), {}),
]
_special_func_names = [f[0] for f in special_func_defs]
# the series is already localized
_ignore_names = ["tz_localize", "components"]
for name, attr_names, s, c in test_data:
func_names = [
f
for f in dir(s.dt)
if not (
f.startswith("_")
or f in attr_names
or f in _special_func_names
or f in _ignore_names
)
]
func_defs = [(f, (), {}) for f in func_names]
for f_def in special_func_defs:
if f_def[0] in dir(s.dt):
func_defs.append(f_def)
for func, args, kwargs in func_defs:
with warnings.catch_warnings():
if func == "to_period":
# dropping TZ
warnings.simplefilter("ignore", UserWarning)
res = getattr(c.dt, func)(*args, **kwargs)
exp = getattr(s.dt, func)(*args, **kwargs)
if isinstance(res, DataFrame):
tm.assert_frame_equal(res, exp)
elif isinstance(res, Series):
tm.assert_series_equal(res, exp)
else:
tm.assert_almost_equal(res, exp)
for attr in attr_names:
try:
res = getattr(c.dt, attr)
exp = getattr(s.dt, attr)
except Exception as e:
print(name, attr)
raise e
if isinstance(res, DataFrame):
tm.assert_frame_equal(res, exp)
elif isinstance(res, Series):
tm.assert_series_equal(res, exp)
else:
tm.assert_almost_equal(res, exp)
invalid = Series([1, 2, 3]).astype("category")
msg = "Can only use .dt accessor with datetimelike"
with pytest.raises(AttributeError, match=msg):
invalid.dt
assert not hasattr(invalid, "str")
|
AndrewIngram/django-extra-views | refs/heads/master | extra_views_tests/urls.py | 1 | from django.urls import path
from django.views.generic import TemplateView
from .formsets import AddressFormSet
from .views import (
AddressFormSetView,
AddressFormSetViewKwargs,
AddressFormSetViewNamed,
EventCalendarView,
FormAndFormSetOverrideView,
ItemModelFormSetExcludeView,
ItemModelFormSetView,
OrderCreateNamedView,
OrderCreateView,
OrderItemFormSetView,
OrderTagsView,
OrderUpdateView,
PagedModelFormSetView,
SearchableItemListView,
SortableItemListView,
)
urlpatterns = [
path("formset/simple/", AddressFormSetView.as_view()),
path("formset/simple/named/", AddressFormSetViewNamed.as_view()),
path("formset/simple/kwargs/", AddressFormSetViewKwargs.as_view()),
path(
"formset/simple_redirect/",
AddressFormSetView.as_view(success_url="/formset/simple_redirect/valid/"),
),
path(
"formset/simple_redirect/valid/",
TemplateView.as_view(template_name="extra_views/success.html"),
),
path("formset/custom/", AddressFormSetView.as_view(formset_class=AddressFormSet)),
path("modelformset/simple/", ItemModelFormSetView.as_view()),
path("modelformset/exclude/", ItemModelFormSetExcludeView.as_view()),
path("modelformset/custom/", FormAndFormSetOverrideView.as_view()),
path("modelformset/paged/", PagedModelFormSetView.as_view()),
path("inlineformset/<int:pk>/", OrderItemFormSetView.as_view()),
path("inlines/<int:pk>/new/", OrderCreateView.as_view()),
path("inlines/new/", OrderCreateView.as_view()),
path("inlines/new/named/", OrderCreateNamedView.as_view()),
path("inlines/<int:pk>/", OrderUpdateView.as_view()),
path("genericinlineformset/<int:pk>/", OrderTagsView.as_view()),
path("sortable/<str:flag>/", SortableItemListView.as_view()),
path("events/<int:year>/<str:month>/", EventCalendarView.as_view()),
path("searchable/", SearchableItemListView.as_view()),
path(
"searchable/predefined_query/",
SearchableItemListView.as_view(define_query=True),
),
path("searchable/exact_query/", SearchableItemListView.as_view(exact_query=True)),
path("searchable/wrong_lookup/", SearchableItemListView.as_view(wrong_lookup=True)),
]
|
RossBrunton/django | refs/heads/master | tests/utils_tests/test_feedgenerator.py | 163 | from __future__ import unicode_literals
import datetime
import unittest
from django.utils import feedgenerator
from django.utils.timezone import get_fixed_timezone
class FeedgeneratorTest(unittest.TestCase):
"""
Tests for the low-level syndication feed framework.
"""
def test_get_tag_uri(self):
"""
Test get_tag_uri() correctly generates TagURIs.
"""
self.assertEqual(
feedgenerator.get_tag_uri('http://example.org/foo/bar#headline', datetime.date(2004, 10, 25)),
'tag:example.org,2004-10-25:/foo/bar/headline')
def test_get_tag_uri_with_port(self):
"""
Test that get_tag_uri() correctly generates TagURIs from URLs with port
numbers.
"""
self.assertEqual(
feedgenerator.get_tag_uri('http://www.example.org:8000/2008/11/14/django#headline', datetime.datetime(2008, 11, 14, 13, 37, 0)),
'tag:www.example.org,2008-11-14:/2008/11/14/django/headline')
def test_rfc2822_date(self):
"""
Test rfc2822_date() correctly formats datetime objects.
"""
self.assertEqual(
feedgenerator.rfc2822_date(datetime.datetime(2008, 11, 14, 13, 37, 0)),
"Fri, 14 Nov 2008 13:37:00 -0000"
)
def test_rfc2822_date_with_timezone(self):
"""
Test rfc2822_date() correctly formats datetime objects with tzinfo.
"""
self.assertEqual(
feedgenerator.rfc2822_date(datetime.datetime(2008, 11, 14, 13, 37, 0, tzinfo=get_fixed_timezone(60))),
"Fri, 14 Nov 2008 13:37:00 +0100"
)
def test_rfc2822_date_without_time(self):
"""
Test rfc2822_date() correctly formats date objects.
"""
self.assertEqual(
feedgenerator.rfc2822_date(datetime.date(2008, 11, 14)),
"Fri, 14 Nov 2008 00:00:00 -0000"
)
def test_rfc3339_date(self):
"""
Test rfc3339_date() correctly formats datetime objects.
"""
self.assertEqual(
feedgenerator.rfc3339_date(datetime.datetime(2008, 11, 14, 13, 37, 0)),
"2008-11-14T13:37:00Z"
)
def test_rfc3339_date_with_timezone(self):
"""
Test rfc3339_date() correctly formats datetime objects with tzinfo.
"""
self.assertEqual(
feedgenerator.rfc3339_date(datetime.datetime(2008, 11, 14, 13, 37, 0, tzinfo=get_fixed_timezone(120))),
"2008-11-14T13:37:00+02:00"
)
def test_rfc3339_date_without_time(self):
"""
Test rfc3339_date() correctly formats date objects.
"""
self.assertEqual(
feedgenerator.rfc3339_date(datetime.date(2008, 11, 14)),
"2008-11-14T00:00:00Z"
)
def test_atom1_mime_type(self):
"""
Test to make sure Atom MIME type has UTF8 Charset parameter set
"""
atom_feed = feedgenerator.Atom1Feed("title", "link", "description")
self.assertEqual(
atom_feed.content_type, "application/atom+xml; charset=utf-8"
)
def test_rss_mime_type(self):
"""
Test to make sure RSS MIME type has UTF8 Charset parameter set
"""
rss_feed = feedgenerator.Rss201rev2Feed("title", "link", "description")
self.assertEqual(
rss_feed.content_type, "application/rss+xml; charset=utf-8"
)
# Two regression tests for #14202
def test_feed_without_feed_url_gets_rendered_without_atom_link(self):
feed = feedgenerator.Rss201rev2Feed('title', '/link/', 'descr')
self.assertEqual(feed.feed['feed_url'], None)
feed_content = feed.writeString('utf-8')
self.assertNotIn('<atom:link', feed_content)
self.assertNotIn('href="/feed/"', feed_content)
self.assertNotIn('rel="self"', feed_content)
def test_feed_with_feed_url_gets_rendered_with_atom_link(self):
feed = feedgenerator.Rss201rev2Feed('title', '/link/', 'descr', feed_url='/feed/')
self.assertEqual(feed.feed['feed_url'], '/feed/')
feed_content = feed.writeString('utf-8')
self.assertIn('<atom:link', feed_content)
self.assertIn('href="/feed/"', feed_content)
self.assertIn('rel="self"', feed_content)
|
mmulazzani/HoneyConnector | refs/heads/master | client/stem/util/log.py | 1 | # Copyright 2011-2013, Damian Johnson
# See LICENSE for licensing information
"""
Functions to aid library logging. The default logging
:data:`~stem.util.log.Runlevel` is usually NOTICE and above.
**Stem users are more than welcome to listen for stem events, but these
functions are not being vended to our users. They may change in the future, use
them at your own risk.**
**Module Overview:**
::
get_logger - provides the stem's Logger instance
logging_level - converts a runlevel to its logging number
escape - escapes special characters in a message in preparation for logging
log - logs a message at the given runlevel
log_once - logs a message, deduplicating if it has already been logged
trace - logs a message at the TRACE runlevel
debug - logs a message at the DEBUG runlevel
info - logs a message at the INFO runlevel
notice - logs a message at the NOTICE runlevel
warn - logs a message at the WARN runlevel
error - logs a message at the ERROR runlevel
LogBuffer - Buffers logged events so they can be iterated over.
|- is_empty - checks if there's events in our buffer
+- __iter__ - iterates over and removes the buffered events
log_to_stdout - reports further logged events to stdout
.. data:: Runlevel (enum)
Enumeration for logging runlevels.
========== ===========
Runlevel Description
========== ===========
**ERROR** critical issue occurred, the user needs to be notified
**WARN** non-critical issue occurred that the user should be aware of
**NOTICE** information that is helpful to the user
**INFO** high level library activity
**DEBUG** low level library activity
**TRACE** request/reply logging
========== ===========
"""
import logging
import stem.util.enum
# Logging runlevels. These are *very* commonly used so including shorter
# aliases (so they can be referenced as log.DEBUG, log.WARN, etc).
Runlevel = stem.util.enum.UppercaseEnum("TRACE", "DEBUG", "INFO", "NOTICE", "WARN", "ERROR")
TRACE, DEBUG, INFO, NOTICE, WARN, ERR = list(Runlevel)
# mapping of runlevels to the logger module's values, TRACE and DEBUG aren't
# built into the module
LOG_VALUES = {
Runlevel.TRACE: logging.DEBUG - 5,
Runlevel.DEBUG: logging.DEBUG,
Runlevel.INFO: logging.INFO,
Runlevel.NOTICE: logging.INFO + 5,
Runlevel.WARN: logging.WARN,
Runlevel.ERROR: logging.ERROR,
}
logging.addLevelName(LOG_VALUES[TRACE], "TRACE")
logging.addLevelName(LOG_VALUES[NOTICE], "NOTICE")
LOGGER = logging.getLogger("stem")
LOGGER.setLevel(LOG_VALUES[TRACE])
# There's some messages that we don't want to log more than once. This set has
# the messages IDs that we've logged which fall into this category.
DEDUPLICATION_MESSAGE_IDS = set()
# Adds a default nullhandler for the stem logger, suppressing the 'No handlers
# could be found for logger "stem"' warning as per...
# http://docs.python.org/release/3.1.3/library/logging.html#configuring-logging-for-a-library
class _NullHandler(logging.Handler):
def emit(self, record):
pass
if not LOGGER.handlers:
LOGGER.addHandler(_NullHandler())
def get_logger():
"""
Provides the stem logger.
:return: **logging.Logger** for stem
"""
return LOGGER
def logging_level(runlevel):
"""
Translates a runlevel into the value expected by the logging module.
:param stem.util.log.Runlevel runlevel: runlevel to be returned, no logging if **None**
"""
if runlevel:
return LOG_VALUES[runlevel]
else:
return logging.FATAL + 5
def escape(message):
"""
Escapes specific sequences for logging (newlines, tabs, carriage returns).
:param str message: string to be escaped
:returns: str that is escaped
"""
for pattern, replacement in (("\n", "\\n"), ("\r", "\\r"), ("\t", "\\t")):
message = message.replace(pattern, replacement)
return message
def log(runlevel, message):
"""
Logs a message at the given runlevel.
:param stem.util.log.Runlevel runlevel: runlevel to log the message at, logging is skipped if **None**
:param str message: message to be logged
"""
if runlevel:
LOGGER.log(LOG_VALUES[runlevel], message)
def log_once(message_id, runlevel, message):
"""
Logs a message at the given runlevel. If a message with this ID has already
been logged then this is a no-op.
:param str message_id: unique message identifier to deduplicate on
:param stem.util.log.Runlevel runlevel: runlevel to log the message at, logging is skipped if **None**
:param str message: message to be logged
:returns: **True** if we log the message, **False** otherwise
"""
if not runlevel or message_id in DEDUPLICATION_MESSAGE_IDS:
return False
else:
DEDUPLICATION_MESSAGE_IDS.add(message_id)
log(runlevel, message)
# shorter aliases for logging at a runlevel
def trace(message):
log(Runlevel.TRACE, message)
def debug(message):
log(Runlevel.DEBUG, message)
def info(message):
log(Runlevel.INFO, message)
def notice(message):
log(Runlevel.NOTICE, message)
def warn(message):
log(Runlevel.WARN, message)
def error(message):
log(Runlevel.ERROR, message)
class LogBuffer(logging.Handler):
"""
Basic log handler that listens for stem events and stores them so they can be
read later. Log entries are cleared as they are read.
"""
def __init__(self, runlevel):
# TODO: At least in python 2.6 logging.Handler has a bug in that it doesn't
# extend object, causing our super() call to fail. When we drop python 2.5
# support we should switch back to using super() instead.
#super(LogBuffer, self).__init__(level = logging_level(runlevel))
logging.Handler.__init__(self, level = logging_level(runlevel))
self.formatter = logging.Formatter(
fmt = '%(asctime)s [%(levelname)s] %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S')
self._buffer = []
def is_empty(self):
return not bool(self._buffer)
def __iter__(self):
while self._buffer:
yield self.formatter.format(self._buffer.pop(0))
def emit(self, record):
self._buffer.append(record)
class _StdoutLogger(logging.Handler):
def __init__(self, runlevel):
logging.Handler.__init__(self, level = logging_level(runlevel))
self.formatter = logging.Formatter(
fmt = '%(asctime)s [%(levelname)s] %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S')
def emit(self, record):
print self.formatter.format(record)
def log_to_stdout(runlevel):
"""
Logs further events to stdout.
:param stem.util.log.Runlevel runlevel: minimum runlevel a message needs to be to be logged
"""
get_logger().addHandler(_StdoutLogger(runlevel))
|
xxiaoxiao/flasky | refs/heads/master | migrations/versions/190163627111_account_confirmation.py | 144 | """account confirmation
Revision ID: 190163627111
Revises: 456a945560f6
Create Date: 2013-12-29 02:58:45.577428
"""
# revision identifiers, used by Alembic.
revision = '190163627111'
down_revision = '456a945560f6'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('confirmed', sa.Boolean(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'confirmed')
### end Alembic commands ###
|
robwarm/gpaw-symm | refs/heads/master | gpaw/test/lebedev.py | 3 | from gpaw.sphere.lebedev import run, weight_n, Y_nL, R_nv
weight0_n, Y0_nL, R0_nv = run()
assert (abs(weight0_n - weight_n).sum() +
abs(Y0_nL - Y_nL).sum() +
abs(R0_nv - R_nv).sum()) < 1e-13
|
fitermay/intellij-community | refs/heads/master | python/testData/intentions/PyConvertCollectionLiteralIntentionTest/convertListToSet.py | 156 | xs = [<caret>1, 2] |
LasLabs/website | refs/heads/8.0 | website_crm_address/__init__.py | 12 | # -*- coding: utf-8 -*-
##############################################################################
#
# This module copyright (C) 2015 Therp BV <http://therp.nl>.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import controllers
|
ClearCorp-dev/odoo | refs/heads/8.0 | addons/mrp/wizard/change_production_qty.py | 245 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class change_production_qty(osv.osv_memory):
_name = 'change.production.qty'
_description = 'Change Quantity of Products'
_columns = {
'product_qty': fields.float('Product Qty', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
}
def default_get(self, cr, uid, fields, context=None):
""" To get default values for the object.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param fields: List of fields for which we want default values
@param context: A standard dictionary
@return: A dictionary which of fields with values.
"""
if context is None:
context = {}
res = super(change_production_qty, self).default_get(cr, uid, fields, context=context)
prod_obj = self.pool.get('mrp.production')
prod = prod_obj.browse(cr, uid, context.get('active_id'), context=context)
if 'product_qty' in fields:
res.update({'product_qty': prod.product_qty})
return res
def _update_product_to_produce(self, cr, uid, prod, qty, context=None):
move_lines_obj = self.pool.get('stock.move')
for m in prod.move_created_ids:
move_lines_obj.write(cr, uid, [m.id], {'product_uom_qty': qty})
def change_prod_qty(self, cr, uid, ids, context=None):
"""
Changes the Quantity of Product.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
@return:
"""
record_id = context and context.get('active_id',False)
assert record_id, _('Active Id not found')
prod_obj = self.pool.get('mrp.production')
bom_obj = self.pool.get('mrp.bom')
move_obj = self.pool.get('stock.move')
for wiz_qty in self.browse(cr, uid, ids, context=context):
prod = prod_obj.browse(cr, uid, record_id, context=context)
prod_obj.write(cr, uid, [prod.id], {'product_qty': wiz_qty.product_qty})
prod_obj.action_compute(cr, uid, [prod.id])
for move in prod.move_lines:
bom_point = prod.bom_id
bom_id = prod.bom_id.id
if not bom_point:
bom_id = bom_obj._bom_find(cr, uid, product_id=prod.product_id.id, context=context)
if not bom_id:
raise osv.except_osv(_('Error!'), _("Cannot find bill of material for this product."))
prod_obj.write(cr, uid, [prod.id], {'bom_id': bom_id})
bom_point = bom_obj.browse(cr, uid, [bom_id])[0]
if not bom_id:
raise osv.except_osv(_('Error!'), _("Cannot find bill of material for this product."))
factor = prod.product_qty * prod.product_uom.factor / bom_point.product_uom.factor
product_details, workcenter_details = \
bom_obj._bom_explode(cr, uid, bom_point, prod.product_id, factor / bom_point.product_qty, [], context=context)
for r in product_details:
if r['product_id'] == move.product_id.id:
move_obj.write(cr, uid, [move.id], {'product_uom_qty': r['product_qty']})
if prod.move_prod_id:
move_obj.write(cr, uid, [prod.move_prod_id.id], {'product_uom_qty' : wiz_qty.product_qty})
self._update_product_to_produce(cr, uid, prod, wiz_qty.product_qty, context=context)
return {}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
newsapps/panda | refs/heads/master | panda/tests/test_api_data_upload.py | 6 | #!/usr/bin/env python
import os.path
from django.conf import settings
from django.test import TransactionTestCase
from django.test.client import Client
from django.utils import simplejson as json
from panda.models import DataUpload
from panda.tests import utils
class TestAPIDataUpload(TransactionTestCase):
fixtures = ['init_panda.json', 'test_users.json']
def setUp(self):
self.user = utils.get_panda_user()
self.dataset = utils.get_test_dataset(self.user)
self.upload = utils.get_test_data_upload(self.user, self.dataset)
self.auth_headers = utils.get_auth_headers()
self.client = Client()
def test_get(self):
response = self.client.get('/api/1.0/data_upload/%i/' % self.upload.id, **self.auth_headers)
self.assertEqual(response.status_code, 200)
body = json.loads(response.content)
self.assertEqual(body['filename'], self.upload.filename)
self.assertEqual(body['original_filename'], self.upload.original_filename)
self.assertEqual(body['size'], self.upload.size)
self.assertEqual(body['creator']['resource_uri'], '/api/1.0/user/%i/' % self.user.id)
self.assertNotEqual(body['creation_date'], None)
self.assertEqual(body['dataset'], '/api/1.0/dataset/%s/' % self.dataset.slug)
self.assertEqual(body['data_type'], 'csv')
self.assertEqual(body['columns'], ['id', 'first_name', 'last_name', 'employer'])
self.assertEqual(len(body['sample_data']), 4)
self.assertEqual(body['sample_data'][0], ['1', 'Brian', 'Boyer', 'Chicago Tribune'])
def test_get_unauthorized(self):
response = self.client.get('/api/1.0/data_upload/%i/' % self.upload.id)
self.assertEqual(response.status_code, 401)
def test_list(self):
response = self.client.get('/api/1.0/data_upload/', data={ 'limit': 5 }, **self.auth_headers)
self.assertEqual(response.status_code, 200)
body = json.loads(response.content)
self.assertEqual(len(body['objects']), 1)
self.assertEqual(body['meta']['total_count'], 1)
self.assertEqual(body['meta']['limit'], 5)
self.assertEqual(body['meta']['offset'], 0)
self.assertEqual(body['meta']['next'], None)
self.assertEqual(body['meta']['previous'], None)
def test_create_denied(self):
new_upload = {
'filename': 'test.csv',
'original_filename': 'test.csv',
'size': 20
}
response = self.client.post('/api/1.0/data_upload/', content_type='application/json', data=json.dumps(new_upload), **self.auth_headers)
self.assertEqual(response.status_code, 405)
def test_download(self):
response = self.client.get('/api/1.0/data_upload/%i/download/' % self.upload.id, **self.auth_headers)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Disposition'], 'attachment; filename=%s' % self.upload.original_filename)
self.assertEqual(int(response['Content-Length']), self.upload.size)
with open(os.path.join(settings.MEDIA_ROOT, utils.TEST_DATA_FILENAME)) as f:
self.assertEqual(f.read(), response.content)
def test_download_unauthorized(self):
response = self.client.get('/api/1.0/data_upload/%i/download/' % self.upload.id)
self.assertEqual(response.status_code, 401)
def test_upload_file(self):
with open(os.path.join(settings.MEDIA_ROOT, utils.TEST_DATA_FILENAME)) as f:
response = self.client.post('/data_upload/', data={ 'file': f, 'dataset_slug': self.dataset.slug }, **self.auth_headers)
self.assertEqual(response.status_code, 200)
body = json.loads(response.content)
self.assertEqual(body['success'], True)
upload = DataUpload.objects.get(id=body['id'])
self.assertEqual(body['original_filename'], upload.original_filename)
self.assertEqual(body['size'], os.path.getsize(os.path.join(settings.MEDIA_ROOT, utils.TEST_DATA_FILENAME)))
self.assertEqual(body['size'], upload.size)
self.assertEqual(body['creator']['resource_uri'], '/api/1.0/user/%i/' % self.user.id)
def test_upload_unauthorized(self):
with open(os.path.join(settings.MEDIA_ROOT, utils.TEST_DATA_FILENAME)) as f:
response = self.client.post('/data_upload/', data={ 'file': f })
self.assertEqual(response.status_code, 200)
body = json.loads(response.content)
self.assertEqual(body['success'], False)
self.assertEqual(body['forbidden'], True)
def test_delete(self):
path = self.upload.get_path()
self.assertEqual(os.path.isfile(path), True)
response = self.client.delete('/api/1.0/data_upload/%i/' % self.upload.id, **self.auth_headers)
self.assertEqual(response.status_code, 204)
self.assertEqual(os.path.exists(path), False)
|
dya2/python-for-android | refs/heads/master | python3-alpha/python3-src/Lib/nturl2path.py | 56 | """Convert a NT pathname to a file URL and vice versa."""
def url2pathname(url):
"""OS-specific conversion from a relative URL of the 'file' scheme
to a file system path; not recommended for general use."""
# e.g.
# ///C|/foo/bar/spam.foo
# becomes
# C:\foo\bar\spam.foo
import string, urllib.parse
# Windows itself uses ":" even in URLs.
url = url.replace(':', '|')
if not '|' in url:
# No drive specifier, just convert slashes
if url[:4] == '////':
# path is something like ////host/path/on/remote/host
# convert this to \\host\path\on\remote\host
# (notice halving of slashes at the start of the path)
url = url[2:]
components = url.split('/')
# make sure not to convert quoted slashes :-)
return urllib.parse.unquote('\\'.join(components))
comp = url.split('|')
if len(comp) != 2 or comp[0][-1] not in string.ascii_letters:
error = 'Bad URL: ' + url
raise IOError(error)
drive = comp[0][-1].upper()
components = comp[1].split('/')
path = drive + ':'
for comp in components:
if comp:
path = path + '\\' + urllib.parse.unquote(comp)
# Issue #11474 - handing url such as |c/|
if path.endswith(':') and url.endswith('/'):
path += '\\'
return path
def pathname2url(p):
"""OS-specific conversion from a file system path to a relative URL
of the 'file' scheme; not recommended for general use."""
# e.g.
# C:\foo\bar\spam.foo
# becomes
# ///C|/foo/bar/spam.foo
import urllib.parse
if not ':' in p:
# No drive specifier, just convert slashes and quote the name
if p[:2] == '\\\\':
# path is something like \\host\path\on\remote\host
# convert this to ////host/path/on/remote/host
# (notice doubling of slashes at the start of the path)
p = '\\\\' + p
components = p.split('\\')
return urllib.parse.quote('/'.join(components))
comp = p.split(':')
if len(comp) != 2 or len(comp[0]) > 1:
error = 'Bad path: ' + p
raise IOError(error)
drive = urllib.parse.quote(comp[0].upper())
components = comp[1].split('\\')
path = '///' + drive + ':'
for comp in components:
if comp:
path = path + '/' + urllib.parse.quote(comp)
return path
|
jiachenning/CodeIgniter | refs/heads/develop | user_guide_src/cilexer/setup.py | 397 | """
Install and setup CodeIgniter highlighting for Pygments.
"""
from setuptools import setup
entry_points = """
[pygments.lexers]
cilexer = cilexer.cilexer:CodeIgniterLexer
"""
setup(
name='pycilexer',
version='0.1',
description=__doc__,
author="EllisLab, Inc.",
packages=['cilexer'],
install_requires=(
'sphinx >= 1.0.7',
'sphinxcontrib-phpdomain >= 0.1.3-1'
),
entry_points=entry_points
)
|
azumimuo/family-xbmc-addon | refs/heads/master | script.module.youtube.dl/lib/youtube_dl/extractor/xfileshare.py | 19 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
decode_packed_codes,
determine_ext,
ExtractorError,
int_or_none,
NO_DEFAULT,
urlencode_postdata,
)
class XFileShareIE(InfoExtractor):
_SITES = (
(r'daclips\.(?:in|com)', 'DaClips'),
(r'filehoot\.com', 'FileHoot'),
(r'gorillavid\.(?:in|com)', 'GorillaVid'),
(r'movpod\.in', 'MovPod'),
(r'powerwatch\.pw', 'PowerWatch'),
(r'rapidvideo\.ws', 'Rapidvideo.ws'),
(r'thevideobee\.to', 'TheVideoBee'),
(r'vidto\.me', 'Vidto'),
(r'streamin\.to', 'Streamin.To'),
(r'xvidstage\.com', 'XVIDSTAGE'),
(r'vidabc\.com', 'Vid ABC'),
(r'vidbom\.com', 'VidBom'),
(r'vidlo\.us', 'vidlo'),
(r'rapidvideo\.(?:cool|org)', 'RapidVideo.TV'),
(r'fastvideo\.me', 'FastVideo.me'),
)
IE_DESC = 'XFileShare based sites: %s' % ', '.join(list(zip(*_SITES))[1])
_VALID_URL = (r'https?://(?P<host>(?:www\.)?(?:%s))/(?:embed-)?(?P<id>[0-9a-zA-Z]+)'
% '|'.join(site for site in list(zip(*_SITES))[0]))
_FILE_NOT_FOUND_REGEXES = (
r'>(?:404 - )?File Not Found<',
r'>The file was removed by administrator<',
)
_TESTS = [{
'url': 'http://gorillavid.in/06y9juieqpmi',
'md5': '5ae4a3580620380619678ee4875893ba',
'info_dict': {
'id': '06y9juieqpmi',
'ext': 'mp4',
'title': 'Rebecca Black My Moment Official Music Video Reaction-6GK87Rc8bzQ',
'thumbnail': r're:http://.*\.jpg',
},
}, {
'url': 'http://gorillavid.in/embed-z08zf8le23c6-960x480.html',
'only_matching': True,
}, {
'url': 'http://daclips.in/3rso4kdn6f9m',
'md5': '1ad8fd39bb976eeb66004d3a4895f106',
'info_dict': {
'id': '3rso4kdn6f9m',
'ext': 'mp4',
'title': 'Micro Pig piglets ready on 16th July 2009-bG0PdrCdxUc',
'thumbnail': r're:http://.*\.jpg',
}
}, {
'url': 'http://movpod.in/0wguyyxi1yca',
'only_matching': True,
}, {
'url': 'http://filehoot.com/3ivfabn7573c.html',
'info_dict': {
'id': '3ivfabn7573c',
'ext': 'mp4',
'title': 'youtube-dl test video \'äBaW_jenozKc.mp4.mp4',
'thumbnail': r're:http://.*\.jpg',
},
'skip': 'Video removed',
}, {
'url': 'http://vidto.me/ku5glz52nqe1.html',
'info_dict': {
'id': 'ku5glz52nqe1',
'ext': 'mp4',
'title': 'test'
}
}, {
'url': 'http://powerwatch.pw/duecjibvicbu',
'info_dict': {
'id': 'duecjibvicbu',
'ext': 'mp4',
'title': 'Big Buck Bunny trailer',
},
}, {
'url': 'http://xvidstage.com/e0qcnl03co6z',
'info_dict': {
'id': 'e0qcnl03co6z',
'ext': 'mp4',
'title': 'Chucky Prank 2015.mp4',
},
}, {
# removed by administrator
'url': 'http://xvidstage.com/amfy7atlkx25',
'only_matching': True,
}, {
'url': 'http://vidabc.com/i8ybqscrphfv',
'info_dict': {
'id': 'i8ybqscrphfv',
'ext': 'mp4',
'title': 're:Beauty and the Beast 2017',
},
'params': {
'skip_download': True,
},
}, {
'url': 'http://www.rapidvideo.cool/b667kprndr8w',
'only_matching': True,
}, {
'url': 'http://www.fastvideo.me/k8604r8nk8sn/FAST_FURIOUS_8_-_Trailer_italiano_ufficiale.mp4.html',
'only_matching': True
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
url = 'http://%s/%s' % (mobj.group('host'), video_id)
webpage = self._download_webpage(url, video_id)
if any(re.search(p, webpage) for p in self._FILE_NOT_FOUND_REGEXES):
raise ExtractorError('Video %s does not exist' % video_id, expected=True)
fields = self._hidden_inputs(webpage)
if fields['op'] == 'download1':
countdown = int_or_none(self._search_regex(
r'<span id="countdown_str">(?:[Ww]ait)?\s*<span id="cxc">(\d+)</span>\s*(?:seconds?)?</span>',
webpage, 'countdown', default=None))
if countdown:
self._sleep(countdown, video_id)
webpage = self._download_webpage(
url, video_id, 'Downloading video page',
data=urlencode_postdata(fields), headers={
'Referer': url,
'Content-type': 'application/x-www-form-urlencoded',
})
title = (self._search_regex(
(r'style="z-index: [0-9]+;">([^<]+)</span>',
r'<td nowrap>([^<]+)</td>',
r'h4-fine[^>]*>([^<]+)<',
r'>Watch (.+) ',
r'<h2 class="video-page-head">([^<]+)</h2>',
r'<h2 style="[^"]*color:#403f3d[^"]*"[^>]*>([^<]+)<'), # streamin.to
webpage, 'title', default=None) or self._og_search_title(
webpage, default=None) or video_id).strip()
def extract_formats(default=NO_DEFAULT):
urls = []
for regex in (
r'(?:file|src)\s*:\s*(["\'])(?P<url>http(?:(?!\1).)+\.(?:m3u8|mp4|flv)(?:(?!\1).)*)\1',
r'file_link\s*=\s*(["\'])(?P<url>http(?:(?!\1).)+)\1',
r'addVariable\((\\?["\'])file\1\s*,\s*(\\?["\'])(?P<url>http(?:(?!\2).)+)\2\)',
r'<embed[^>]+src=(["\'])(?P<url>http(?:(?!\1).)+\.(?:m3u8|mp4|flv)(?:(?!\1).)*)\1'):
for mobj in re.finditer(regex, webpage):
video_url = mobj.group('url')
if video_url not in urls:
urls.append(video_url)
formats = []
for video_url in urls:
if determine_ext(video_url) == 'm3u8':
formats.extend(self._extract_m3u8_formats(
video_url, video_id, 'mp4',
entry_protocol='m3u8_native', m3u8_id='hls',
fatal=False))
else:
formats.append({
'url': video_url,
'format_id': 'sd',
})
if not formats and default is not NO_DEFAULT:
return default
self._sort_formats(formats)
return formats
formats = extract_formats(default=None)
if not formats:
webpage = decode_packed_codes(self._search_regex(
r"(}\('(.+)',(\d+),(\d+),'[^']*\b(?:file|embed)\b[^']*'\.split\('\|'\))",
webpage, 'packed code'))
formats = extract_formats()
thumbnail = self._search_regex(
r'image\s*:\s*["\'](http[^"\']+)["\'],', webpage, 'thumbnail', default=None)
return {
'id': video_id,
'title': title,
'thumbnail': thumbnail,
'formats': formats,
}
|
JIoJIaJIu/servo | refs/heads/master | tests/wpt/css-tests/tools/wptserve/wptserve/request.py | 123 | import base64
import cgi
import Cookie
import os
import StringIO
import tempfile
import urlparse
import stash
from utils import HTTPException
missing = object()
class Server(object):
"""Data about the server environment
.. attribute:: config
Environment configuration information with information about the
various servers running, their hostnames and ports.
.. attribute:: stash
Stash object holding state stored on the server between requests.
"""
config = None
def __init__(self, request):
self._stash = None
self._request = request
@property
def stash(self):
if self._stash is None:
address, authkey = stash.load_env_config()
self._stash = stash.Stash(self._request.url_parts.path, address, authkey)
return self._stash
class InputFile(object):
max_buffer_size = 1024*1024
def __init__(self, rfile, length):
"""File-like object used to provide a seekable view of request body data"""
self._file = rfile
self.length = length
self._file_position = 0
if length > self.max_buffer_size:
self._buf = tempfile.TemporaryFile(mode="rw+b")
else:
self._buf = StringIO.StringIO()
@property
def _buf_position(self):
rv = self._buf.tell()
assert rv <= self._file_position
return rv
def read(self, bytes=-1):
assert self._buf_position <= self._file_position
if bytes < 0:
bytes = self.length - self._buf_position
bytes_remaining = min(bytes, self.length - self._buf_position)
if bytes_remaining == 0:
return ""
if self._buf_position != self._file_position:
buf_bytes = min(bytes_remaining, self._file_position - self._buf_position)
old_data = self._buf.read(buf_bytes)
bytes_remaining -= buf_bytes
else:
old_data = ""
assert self._buf_position == self._file_position, (
"Before reading buffer position (%i) didn't match file position (%i)" %
(self._buf_position, self._file_position))
new_data = self._file.read(bytes_remaining)
self._buf.write(new_data)
self._file_position += bytes_remaining
assert self._buf_position == self._file_position, (
"After reading buffer position (%i) didn't match file position (%i)" %
(self._buf_position, self._file_position))
return old_data + new_data
def tell(self):
return self._buf_position
def seek(self, offset):
if offset > self.length or offset < 0:
raise ValueError
if offset <= self._file_position:
self._buf.seek(offset)
else:
self.read(offset - self._file_position)
def readline(self, max_bytes=None):
if max_bytes is None:
max_bytes = self.length - self._buf_position
if self._buf_position < self._file_position:
data = self._buf.readline(max_bytes)
if data.endswith("\n") or len(data) == max_bytes:
return data
else:
data = ""
assert self._buf_position == self._file_position
initial_position = self._file_position
found = False
buf = []
max_bytes -= len(data)
while not found:
readahead = self.read(min(2, max_bytes))
max_bytes -= len(readahead)
for i, c in enumerate(readahead):
if c == "\n":
buf.append(readahead[:i+1])
found = True
break
if not found:
buf.append(readahead)
if not readahead or not max_bytes:
break
new_data = "".join(buf)
data += new_data
self.seek(initial_position + len(new_data))
return data
def readlines(self):
rv = []
while True:
data = self.readline()
if data:
rv.append(data)
else:
break
return rv
def next(self):
data = self.readline()
if data:
return data
else:
raise StopIteration
def __iter__(self):
return self
class Request(object):
"""Object representing a HTTP request.
.. attribute:: doc_root
The local directory to use as a base when resolving paths
.. attribute:: route_match
Regexp match object from matching the request path to the route
selected for the request.
.. attribute:: protocol_version
HTTP version specified in the request.
.. attribute:: method
HTTP method in the request.
.. attribute:: request_path
Request path as it appears in the HTTP request.
.. attribute:: url
Absolute URL for the request.
.. attribute:: headers
List of request headers.
.. attribute:: raw_input
File-like object representing the body of the request.
.. attribute:: url_parts
Parts of the requested URL as obtained by urlparse.urlsplit(path)
.. attribute:: request_line
Raw request line
.. attribute:: headers
RequestHeaders object providing a dictionary-like representation of
the request headers.
.. attribute:: body
Request body as a string
.. attribute:: GET
MultiDict representing the parameters supplied with the request.
Note that these may be present on non-GET requests; the name is
chosen to be familiar to users of other systems such as PHP.
.. attribute:: POST
MultiDict representing the request body parameters. Most parameters
are present as string values, but file uploads have file-like
values.
.. attribute:: cookies
Cookies object representing cookies sent with the request with a
dictionary-like interface.
.. attribute:: auth
Object with username and password properties representing any
credentials supplied using HTTP authentication.
.. attribute:: server
Server object containing information about the server environment.
"""
def __init__(self, request_handler):
self.doc_root = request_handler.server.router.doc_root
self.route_match = None # Set by the router
self.protocol_version = request_handler.protocol_version
self.method = request_handler.command
scheme = request_handler.server.scheme
host = request_handler.headers.get("Host")
port = request_handler.server.server_address[1]
if host is None:
host = request_handler.server.server_address[0]
else:
if ":" in host:
host, port = host.split(":", 1)
self.request_path = request_handler.path
if self.request_path.startswith(scheme + "://"):
self.url = request_handler.path
else:
self.url = "%s://%s:%s%s" % (scheme,
host,
port,
self.request_path)
self.url_parts = urlparse.urlsplit(self.url)
self._raw_headers = request_handler.headers
self.request_line = request_handler.raw_requestline
self._headers = None
self.raw_input = InputFile(request_handler.rfile,
int(self.headers.get("Content-Length", 0)))
self._body = None
self._GET = None
self._POST = None
self._cookies = None
self._auth = None
self.server = Server(self)
def __repr__(self):
return "<Request %s %s>" % (self.method, self.url)
@property
def GET(self):
if self._GET is None:
params = urlparse.parse_qsl(self.url_parts.query, keep_blank_values=True)
self._GET = MultiDict()
for key, value in params:
self._GET.add(key, value)
return self._GET
@property
def POST(self):
if self._POST is None:
#Work out the post parameters
pos = self.raw_input.tell()
self.raw_input.seek(0)
fs = cgi.FieldStorage(fp=self.raw_input,
environ={"REQUEST_METHOD": self.method},
headers=self.headers,
keep_blank_values=True)
self._POST = MultiDict.from_field_storage(fs)
self.raw_input.seek(pos)
return self._POST
@property
def cookies(self):
if self._cookies is None:
parser = Cookie.BaseCookie()
cookie_headers = self.headers.get("cookie", "")
parser.load(cookie_headers)
cookies = Cookies()
for key, value in parser.iteritems():
cookies[key] = CookieValue(value)
self._cookies = cookies
return self._cookies
@property
def headers(self):
if self._headers is None:
self._headers = RequestHeaders(self._raw_headers)
return self._headers
@property
def body(self):
if self._body is None:
pos = self.raw_input.tell()
self.raw_input.seek(0)
self._body = self.raw_input.read()
self.raw_input.seek(pos)
return self._body
@property
def auth(self):
if self._auth is None:
self._auth = Authentication(self.headers)
return self._auth
class RequestHeaders(dict):
"""Dictionary-like API for accessing request headers."""
def __init__(self, items):
for key, value in zip(items.keys(), items.values()):
key = key.lower()
if key in self:
self[key].append(value)
else:
dict.__setitem__(self, key, [value])
def __getitem__(self, key):
"""Get all headers of a certain (case-insensitive) name. If there is
more than one, the values are returned comma separated"""
values = dict.__getitem__(self, key.lower())
if len(values) == 1:
return values[0]
else:
return ", ".join(values)
def __setitem__(self, name, value):
raise Exception
def get(self, key, default=None):
"""Get a string representing all headers with a particular value,
with multiple headers separated by a comma. If no header is found
return a default value
:param key: The header name to look up (case-insensitive)
:param default: The value to return in the case of no match
"""
try:
return self[key]
except KeyError:
return default
def get_list(self, key, default=missing):
"""Get all the header values for a particular field name as
a list"""
try:
return dict.__getitem__(self, key.lower())
except KeyError:
if default is not missing:
return default
else:
raise
def __contains__(self, key):
return dict.__contains__(self, key.lower())
def iteritems(self):
for item in self:
yield item, self[item]
def itervalues(self):
for item in self:
yield self[item]
class CookieValue(object):
"""Representation of cookies.
Note that cookies are considered read-only and the string value
of the cookie will not change if you update the field values.
However this is not enforced.
.. attribute:: key
The name of the cookie.
.. attribute:: value
The value of the cookie
.. attribute:: expires
The expiry date of the cookie
.. attribute:: path
The path of the cookie
.. attribute:: comment
The comment of the cookie.
.. attribute:: domain
The domain with which the cookie is associated
.. attribute:: max_age
The max-age value of the cookie.
.. attribute:: secure
Whether the cookie is marked as secure
.. attribute:: httponly
Whether the cookie is marked as httponly
"""
def __init__(self, morsel):
self.key = morsel.key
self.value = morsel.value
for attr in ["expires", "path",
"comment", "domain", "max-age",
"secure", "version", "httponly"]:
setattr(self, attr.replace("-", "_"), morsel[attr])
self._str = morsel.OutputString()
def __str__(self):
return self._str
def __repr__(self):
return self._str
def __eq__(self, other):
"""Equality comparison for cookies. Compares to other cookies
based on value alone and on non-cookies based on the equality
of self.value with the other object so that a cookie with value
"ham" compares equal to the string "ham"
"""
if hasattr(other, "value"):
return self.value == other.value
return self.value == other
class MultiDict(dict):
"""Dictionary type that holds multiple values for each
key"""
#TODO: this should perhaps also order the keys
def __init__(self):
pass
def __setitem__(self, name, value):
dict.__setitem__(self, name, [value])
def add(self, name, value):
if name in self:
dict.__getitem__(self, name).append(value)
else:
dict.__setitem__(self, name, [value])
def __getitem__(self, key):
"""Get the first value with a given key"""
#TODO: should this instead be the last value?
return self.first(key)
def first(self, key, default=missing):
"""Get the first value with a given key
:param key: The key to lookup
:param default: The default to return if key is
not found (throws if nothing is
specified)
"""
if key in self and dict.__getitem__(self, key):
return dict.__getitem__(self, key)[0]
elif default is not missing:
return default
raise KeyError
def last(self, key, default=missing):
"""Get the last value with a given key
:param key: The key to lookup
:param default: The default to return if key is
not found (throws if nothing is
specified)
"""
if key in self and dict.__getitem__(self, key):
return dict.__getitem__(self, key)[-1]
elif default is not missing:
return default
raise KeyError
def get_list(self, key):
"""Get all values with a given key as a list
:param key: The key to lookup
"""
return dict.__getitem__(self, key)
@classmethod
def from_field_storage(cls, fs):
self = cls()
if fs.list is None:
return self
for key in fs:
values = fs[key]
if not isinstance(values, list):
values = [values]
for value in values:
if value.filename:
value = value
else:
value = value.value
self.add(key, value)
return self
class Cookies(MultiDict):
"""MultiDict specialised for Cookie values"""
def __init__(self):
pass
def __getitem__(self, key):
return self.last(key)
class Authentication(object):
"""Object for dealing with HTTP Authentication
.. attribute:: username
The username supplied in the HTTP Authorization
header, or None
.. attribute:: password
The password supplied in the HTTP Authorization
header, or None
"""
def __init__(self, headers):
self.username = None
self.password = None
auth_schemes = {"Basic": self.decode_basic}
if "authorization" in headers:
header = headers.get("authorization")
auth_type, data = header.split(" ", 1)
if auth_type in auth_schemes:
self.username, self.password = auth_schemes[auth_type](data)
else:
raise HTTPException(400, "Unsupported authentication scheme %s" % auth_type)
def decode_basic(self, data):
decoded_data = base64.decodestring(data)
return decoded_data.split(":", 1)
|
fbossy/SickRage | refs/heads/master | lib/guessit/transfo/__init__.py | 51 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2013 Nicolas Wack <wackou@gmail.com>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function, unicode_literals
class TransformerException(Exception):
def __init__(self, transformer, message):
# Call the base class constructor with the parameters it needs
Exception.__init__(self, message)
self.transformer = transformer |
prospwro/odoo | refs/heads/8.0 | addons/base_action_rule/test_models.py | 333 | from openerp.osv import fields, osv
from openerp import api
AVAILABLE_STATES = [
('draft', 'New'),
('cancel', 'Cancelled'),
('open', 'In Progress'),
('pending', 'Pending'),
('done', 'Closed')
]
class lead_test(osv.Model):
_name = "base.action.rule.lead.test"
_columns = {
'name': fields.char('Subject', required=True, select=1),
'user_id': fields.many2one('res.users', 'Responsible'),
'state': fields.selection(AVAILABLE_STATES, string="Status", readonly=True),
'active': fields.boolean('Active', required=False),
'partner_id': fields.many2one('res.partner', 'Partner', ondelete='set null'),
'date_action_last': fields.datetime('Last Action', readonly=1),
}
_defaults = {
'state' : 'draft',
'active' : True,
}
@api.cr_uid_ids_context
def message_post(self, cr, uid, thread_id, body='', subject=None, type='notification', subtype=None, parent_id=False, attachments=None, context=None, **kwargs):
pass
def message_subscribe(self, cr, uid, ids, partner_ids, subtype_ids=None, context=None):
pass
|
ar7z1/ansible | refs/heads/devel | lib/ansible/modules/network/cloudengine/ce_snmp_contact.py | 43 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ce_snmp_contact
version_added: "2.4"
short_description: Manages SNMP contact configuration on HUAWEI CloudEngine switches.
description:
- Manages SNMP contact configurations on HUAWEI CloudEngine switches.
author:
- wangdezhuang (@CloudEngine-Ansible)
options:
contact:
description:
- Contact information.
required: true
state:
description:
- Manage the state of the resource.
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- name: CloudEngine snmp contact test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: "Config SNMP contact"
ce_snmp_contact:
state: present
contact: call Operator at 010-99999999
provider: "{{ cli }}"
- name: "Undo SNMP contact"
ce_snmp_contact:
state: absent
contact: call Operator at 010-99999999
provider: "{{ cli }}"
'''
RETURN = '''
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"contact": "call Operator at 010-99999999",
"state": "present"}
existing:
description: k/v pairs of existing aaa server
returned: always
type: dict
sample: {}
end_state:
description: k/v pairs of aaa params after module execution
returned: always
type: dict
sample: {"contact": "call Operator at 010-99999999"}
updates:
description: command sent to the device
returned: always
type: list
sample: ["snmp-agent sys-info contact call Operator at 010-99999999"]
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import get_config, load_config, ce_argument_spec
class SnmpContact(object):
""" Manages SNMP contact configuration """
def __init__(self, **kwargs):
""" Class init """
# module
argument_spec = kwargs["argument_spec"]
self.spec = argument_spec
self.module = AnsibleModule(argument_spec=self.spec, supports_check_mode=True)
# config
self.cur_cfg = dict()
# module args
self.state = self.module.params['state']
self.contact = self.module.params['contact']
# state
self.changed = False
self.updates_cmd = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
def check_args(self):
""" Check invalid args """
if self.contact:
if len(self.contact) > 255 or len(self.contact) < 1:
self.module.fail_json(
msg='Error: The len of contact %s is out of [1 - 255].' % self.contact)
else:
self.module.fail_json(
msg='Error: The len of contact is 0.')
def get_proposed(self):
""" Get proposed state """
self.proposed["state"] = self.state
if self.contact:
self.proposed["contact"] = self.contact
def get_existing(self):
""" Get existing state """
tmp_cfg = self.cli_get_config()
if tmp_cfg:
temp_data = tmp_cfg.split(r"contact ")
self.cur_cfg["contact"] = temp_data[1]
self.existing["contact"] = temp_data[1]
def get_end_state(self):
""" Get end state """
tmp_cfg = self.cli_get_config()
if tmp_cfg:
temp_data = tmp_cfg.split(r"contact ")
self.end_state["contact"] = temp_data[1]
def cli_load_config(self, commands):
""" Load configure by cli """
if not self.module.check_mode:
load_config(self.module, commands)
def cli_get_config(self):
""" Get configure by cli """
regular = "| include snmp | include contact"
flags = list()
flags.append(regular)
tmp_cfg = get_config(self.module, flags)
return tmp_cfg
def set_config(self):
""" Set configure by cli """
cmd = "snmp-agent sys-info contact %s" % self.contact
self.updates_cmd.append(cmd)
cmds = list()
cmds.append(cmd)
self.cli_load_config(cmds)
self.changed = True
def undo_config(self):
""" Undo configure by cli """
cmd = "undo snmp-agent sys-info contact"
self.updates_cmd.append(cmd)
cmds = list()
cmds.append(cmd)
self.cli_load_config(cmds)
self.changed = True
def work(self):
""" Main work function """
self.check_args()
self.get_proposed()
self.get_existing()
if self.state == "present":
if "contact" in self.cur_cfg.keys() and self.contact == self.cur_cfg["contact"]:
pass
else:
self.set_config()
else:
if "contact" in self.cur_cfg.keys() and self.contact == self.cur_cfg["contact"]:
self.undo_config()
self.get_end_state()
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
self.results['updates'] = self.updates_cmd
self.module.exit_json(**self.results)
def main():
""" Module main """
argument_spec = dict(
state=dict(choices=['present', 'absent'], default='present'),
contact=dict(type='str', required=True)
)
argument_spec.update(ce_argument_spec)
module = SnmpContact(argument_spec=argument_spec)
module.work()
if __name__ == '__main__':
main()
|
gi11es/thumbor | refs/heads/master | perf/static_app.py | 3 | from starlette.applications import Starlette
from starlette.routing import Mount
from starlette.staticfiles import StaticFiles
ROUTES = [
Mount("/", app=StaticFiles(directory="static"), name="static"),
]
APP = Starlette(routes=ROUTES)
|
tragiclifestories/django | refs/heads/master | tests/user_commands/management/commands/dance.py | 314 | from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
help = "Dance around like a madman."
args = ''
requires_system_checks = True
def add_arguments(self, parser):
parser.add_argument("-s", "--style", default="Rock'n'Roll")
parser.add_argument("-x", "--example")
parser.add_argument("--opt-3", action='store_true', dest='option3')
def handle(self, *args, **options):
example = options["example"]
if example == "raise":
raise CommandError()
if options['verbosity'] > 0:
self.stdout.write("I don't feel like dancing %s." % options["style"])
self.stdout.write(','.join(options.keys()))
|
NikitaKoshelev/exam4sovzond | refs/heads/master | apps/dynamic_models/templatetags/dynamic_models_extras.py | 1 | # coding: utf-8
import pymorphy2
from django.template import Library, defaultfilters
from django.db.models import Model
morph = pymorphy2.MorphAnalyzer()
register = Library()
@register.filter
@defaultfilters.stringfilter
def inflect(word, grammeme):
return morph.parse(word)[0].inflect(set(grammeme.split(','))).word
@register.filter
@defaultfilters.stringfilter
def right_gramm(adjective, noun):
noun = morph.parse(noun)[0]
return morph.parse(adjective)[0].inflect({noun.tag.gender, noun.tag.case}).word |
sinbazhou/odoo | refs/heads/8.0 | addons/l10n_hn/__init__.py | 411 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2009-2010 Salvatore J. Trimarchi <salvatore@trimarchi.co.cc>
# (http://salvatoreweb.co.cc)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
zigitax/pupy | refs/heads/master | pupy/modules/pyexec.py | 27 | # -*- coding: UTF8 -*-
from pupylib.PupyModule import *
import StringIO
import pupylib.utils
__class_name__="PythonExec"
class PythonExec(PupyModule):
""" execute python code on a remote system """
def init_argparse(self):
self.arg_parser = PupyArgumentParser(prog='pyexec', description=self.__doc__)
group=self.arg_parser.add_mutually_exclusive_group(required=True)
group.add_argument('--file', metavar="<path>", help="execute code from .py file")
group.add_argument('-c','--code', metavar='<code string>', help="execute python oneliner code. ex : 'import platform;print platform.uname()'")
def run(self, args):
code=""
if args.file:
self.info("loading code from %s ..."%args.file)
with open(args.file,'r') as f:
code=f.read()
else:
code=args.code
stdout=StringIO.StringIO()
stderr=StringIO.StringIO()
try:
with pupylib.utils.redirected_stdo(self.client.conn, stdout, stderr):
self.client.conn.execute(code+"\n")
res=stdout.getvalue()
err=stderr.getvalue()
if err.strip():
err="\n"+err
self.rawlog(res+err)
finally:
stdout.close()
stderr.close()
|
pymedusa/Medusa | refs/heads/master | ext/knowit/properties/duration.py | 2 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
from datetime import timedelta
from six import text_type
from ..property import Property
class Duration(Property):
"""Duration property."""
duration_re = re.compile(r'(?P<hours>\d{1,2}):'
r'(?P<minutes>\d{1,2}):'
r'(?P<seconds>\d{1,2})(?:\.'
r'(?P<millis>\d{3})'
r'(?P<micro>\d{3})?\d*)?')
def handle(self, value, context):
"""Return duration as timedelta."""
if isinstance(value, timedelta):
return value
elif isinstance(value, int):
return timedelta(milliseconds=value)
try:
return timedelta(milliseconds=int(float(value)))
except ValueError:
pass
try:
h, m, s, ms, mc = self.duration_re.match(text_type(value)).groups('0')
return timedelta(hours=int(h), minutes=int(m), seconds=int(s), milliseconds=int(ms), microseconds=int(mc))
except ValueError:
pass
self.report(value, context)
|
zstyblik/infernal-twin | refs/heads/master | build/pillow/Tests/test_image_transform.py | 11 | from helper import unittest, PillowTestCase, hopper
from PIL import Image
class TestImageTransform(PillowTestCase):
def test_sanity(self):
from PIL import ImageTransform
im = Image.new("L", (100, 100))
seq = tuple(range(10))
transform = ImageTransform.AffineTransform(seq[:6])
im.transform((100, 100), transform)
transform = ImageTransform.ExtentTransform(seq[:4])
im.transform((100, 100), transform)
transform = ImageTransform.QuadTransform(seq[:8])
im.transform((100, 100), transform)
transform = ImageTransform.MeshTransform([(seq[:4], seq[:8])])
im.transform((100, 100), transform)
def test_extent(self):
im = hopper('RGB')
(w, h) = im.size
transformed = im.transform(im.size, Image.EXTENT,
(0, 0,
w//2, h//2), # ul -> lr
Image.BILINEAR)
scaled = im.resize((w*2, h*2), Image.BILINEAR).crop((0, 0, w, h))
# undone -- precision?
self.assert_image_similar(transformed, scaled, 23)
def test_quad(self):
# one simple quad transform, equivalent to scale & crop upper left quad
im = hopper('RGB')
(w, h) = im.size
transformed = im.transform(im.size, Image.QUAD,
(0, 0, 0, h//2,
# ul -> ccw around quad:
w//2, h//2, w//2, 0),
Image.BILINEAR)
scaled = im.transform((w, h), Image.AFFINE,
(.5, 0, 0, 0, .5, 0),
Image.BILINEAR)
self.assert_image_equal(transformed, scaled)
def test_mesh(self):
# this should be a checkerboard of halfsized hoppers in ul, lr
im = hopper('RGBA')
(w, h) = im.size
transformed = im.transform(im.size, Image.MESH,
[((0, 0, w//2, h//2), # box
(0, 0, 0, h,
w, h, w, 0)), # ul -> ccw around quad
((w//2, h//2, w, h), # box
(0, 0, 0, h,
w, h, w, 0))], # ul -> ccw around quad
Image.BILINEAR)
scaled = im.transform((w//2, h//2), Image.AFFINE,
(2, 0, 0, 0, 2, 0),
Image.BILINEAR)
checker = Image.new('RGBA', im.size)
checker.paste(scaled, (0, 0))
checker.paste(scaled, (w//2, h//2))
self.assert_image_equal(transformed, checker)
# now, check to see that the extra area is (0, 0, 0, 0)
blank = Image.new('RGBA', (w//2, h//2), (0, 0, 0, 0))
self.assert_image_equal(blank, transformed.crop((w//2, 0, w, h//2)))
self.assert_image_equal(blank, transformed.crop((0, h//2, w//2, h)))
def _test_alpha_premult(self, op):
# create image with half white, half black,
# with the black half transparent.
# do op,
# there should be no darkness in the white section.
im = Image.new('RGBA', (10, 10), (0, 0, 0, 0))
im2 = Image.new('RGBA', (5, 10), (255, 255, 255, 255))
im.paste(im2, (0, 0))
im = op(im, (40, 10))
im_background = Image.new('RGB', (40, 10), (255, 255, 255))
im_background.paste(im, (0, 0), im)
hist = im_background.histogram()
self.assertEqual(40*10, hist[-1])
def test_alpha_premult_resize(self):
def op(im, sz):
return im.resize(sz, Image.LINEAR)
self._test_alpha_premult(op)
def test_alpha_premult_transform(self):
def op(im, sz):
(w, h) = im.size
return im.transform(sz, Image.EXTENT,
(0, 0,
w, h),
Image.BILINEAR)
self._test_alpha_premult(op)
def test_blank_fill(self):
# attempting to hit
# https://github.com/python-pillow/Pillow/issues/254 reported
#
# issue is that transforms with transparent overflow area
# contained junk from previous images, especially on systems with
# constrained memory. So, attempt to fill up memory with a
# pattern, free it, and then run the mesh test again. Using a 1Mp
# image with 4 bands, for 4 megs of data allocated, x 64. OMM (64
# bit 12.04 VM with 512 megs available, this fails with Pillow <
# a0eaf06cc5f62a6fb6de556989ac1014ff3348ea
#
# Running by default, but I'd totally understand not doing it in
# the future
pattern = [
Image.new('RGBA', (1024, 1024), (a, a, a, a))
for a in range(1, 65)
]
# Yeah. Watch some JIT optimize this out.
pattern = None
self.test_mesh()
if __name__ == '__main__':
unittest.main()
# End of file
|
nchammas/click | refs/heads/master | examples/naval/naval.py | 43 | import click
@click.group()
@click.version_option()
def cli():
"""Naval Fate.
This is the docopt example adopted to Click but with some actual
commands implemented and not just the empty parsing which really
is not all that interesting.
"""
@cli.group()
def ship():
"""Manages ships."""
@ship.command('new')
@click.argument('name')
def ship_new(name):
"""Creates a new ship."""
click.echo('Created ship %s' % name)
@ship.command('move')
@click.argument('ship')
@click.argument('x', type=float)
@click.argument('y', type=float)
@click.option('--speed', metavar='KN', default=10,
help='Speed in knots.')
def ship_move(ship, x, y, speed):
"""Moves SHIP to the new location X,Y."""
click.echo('Moving ship %s to %s,%s with speed %s' % (ship, x, y, speed))
@ship.command('shoot')
@click.argument('ship')
@click.argument('x', type=float)
@click.argument('y', type=float)
def ship_shoot(ship, x, y):
"""Makes SHIP fire to X,Y."""
click.echo('Ship %s fires to %s,%s' % (ship, x, y))
@cli.group('mine')
def mine():
"""Manages mines."""
@mine.command('set')
@click.argument('x', type=float)
@click.argument('y', type=float)
@click.option('ty', '--moored', flag_value='moored',
default=True,
help='Moored (anchored) mine. Default.')
@click.option('ty', '--drifting', flag_value='drifting',
help='Drifting mine.')
def mine_set(x, y, ty):
"""Sets a mine at a specific coordinate."""
click.echo('Set %s mine at %s,%s' % (ty, x, y))
@mine.command('remove')
@click.argument('x', type=float)
@click.argument('y', type=float)
def mine_remove(x, y):
"""Removes a mine at a specific coordinate."""
click.echo('Removed mine at %s,%s' % (x, y))
|
edx/luigi | refs/heads/master | luigi/contrib/sge.py | 15 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""SGE batch system Tasks.
Adapted by Jake Feala (@jfeala) from
`LSF extension <https://github.com/dattalab/luigi/blob/lsf/luigi/lsf.py>`_
by Alex Wiltschko (@alexbw)
Maintained by Jake Feala (@jfeala)
SunGrid Engine is a job scheduler used to allocate compute resources on a
shared cluster. Jobs are submitted using the ``qsub`` command and monitored
using ``qstat``. To get started, install luigi on all nodes.
To run luigi workflows on an SGE cluster, subclass
:class:`luigi.contrib.sge.SGEJobTask` as you would any :class:`luigi.Task`,
but override the ``work()`` method, instead of ``run()``, to define the job
code. Then, run your Luigi workflow from the master node, assigning > 1
``workers`` in order to distribute the tasks in parallel across the cluster.
The following is an example usage (and can also be found in ``sge_tests.py``)
.. code-block:: python
import logging
import luigi
import os
from luigi.contrib.sge import SGEJobTask
logger = logging.getLogger('luigi-interface')
class TestJobTask(SGEJobTask):
i = luigi.Parameter()
def work(self):
logger.info('Running test job...')
with open(self.output().path, 'w') as f:
f.write('this is a test')
def output(self):
return luigi.LocalTarget(os.path.join('/home', 'testfile_' + str(self.i)))
if __name__ == '__main__':
tasks = [TestJobTask(i=str(i), n_cpu=i+1) for i in range(3)]
luigi.build(tasks, local_scheduler=True, workers=3)
The ``n-cpu`` parameter allows you to define different compute resource
requirements (or slots, in SGE terms) for each task. In this example, the
third Task asks for 3 CPU slots. If your cluster only contains nodes with
2 CPUs, this task will hang indefinitely in the queue. See the docs for
:class:`luigi.contrib.sge.SGEJobTask` for other SGE parameters. As for any
task, you can also set these in your luigi configuration file as shown below.
The default values below were matched to the values used by MIT StarCluster,
an open-source SGE cluster manager for use with Amazon EC2::
[SGEJobTask]
shared-tmp-dir = /home
parallel-env = orte
n-cpu = 2
"""
# This extension is modeled after the hadoop.py approach.
#
# Implementation notes
# The procedure:
# - Pickle the class
# - Construct a qsub argument that runs a generic runner function with the path to the pickled class
# - Runner function loads the class from pickle
# - Runner function hits the work button on it
import os
import subprocess
import time
import sys
import logging
import random
try:
import cPickle as pickle
except ImportError:
import pickle
import luigi
from luigi.contrib.hadoop import create_packages_archive
from luigi.contrib import sge_runner
logger = logging.getLogger('luigi-interface')
logger.propagate = 0
POLL_TIME = 5 # decided to hard-code rather than configure here
def _parse_qstat_state(qstat_out, job_id):
"""Parse "state" column from `qstat` output for given job_id
Returns state for the *first* job matching job_id. Returns 'u' if
`qstat` output is empty or job_id is not found.
"""
if qstat_out.strip() == '':
return 'u'
lines = qstat_out.split('\n')
# skip past header
while not lines.pop(0).startswith('---'):
pass
for line in lines:
if line:
job, prior, name, user, state = line.strip().split()[0:5]
if int(job) == int(job_id):
return state
return 'u'
def _parse_qsub_job_id(qsub_out):
"""Parse job id from qsub output string.
Assume format:
"Your job <job_id> ("<job_name>") has been submitted"
"""
return int(qsub_out.split()[2])
def _build_qsub_command(cmd, job_name, outfile, errfile, pe, n_cpu):
"""Submit shell command to SGE queue via `qsub`"""
qsub_template = """echo {cmd} | qsub -o ":{outfile}" -e ":{errfile}" -V -r y -pe {pe} {n_cpu} -N {job_name}"""
return qsub_template.format(
cmd=cmd, job_name=job_name, outfile=outfile, errfile=errfile,
pe=pe, n_cpu=n_cpu)
class SGEJobTask(luigi.Task):
"""Base class for executing a job on SunGrid Engine
Override ``work()`` (rather than ``run()``) with your job code.
Parameters:
- n_cpu: Number of CPUs (or "slots") to allocate for the Task. This
value is passed as ``qsub -pe {pe} {n_cpu}``
- parallel_env: SGE parallel environment name. The default is "orte",
the parallel environment installed with MIT StarCluster. If you
are using a different cluster environment, check with your
sysadmin for the right pe to use. This value is passed as {pe}
to the qsub command above.
- shared_tmp_dir: Shared drive accessible from all nodes in the cluster.
Task classes and dependencies are pickled to a temporary folder on
this drive. The default is ``/home``, the NFS share location setup
by StarCluster
- job_name_format: String that can be passed in to customize the job name
string passed to qsub; e.g. "Task123_{task_family}_{n_cpu}...".
- job_name: Exact job name to pass to qsub.
- run_locally: Run locally instead of on the cluster.
- poll_time: the length of time to wait in order to poll qstat
- dont_remove_tmp_dir: Instead of deleting the temporary directory, keep it.
- no_tarball: Don't create a tarball of the luigi project directory. Can be
useful to reduce I/O requirements when the luigi directory is accessible
from cluster nodes already.
"""
n_cpu = luigi.IntParameter(default=2, significant=False)
shared_tmp_dir = luigi.Parameter(default='/home', significant=False)
parallel_env = luigi.Parameter(default='orte', significant=False)
job_name_format = luigi.Parameter(
significant=False, default=None, description="A string that can be "
"formatted with class variables to name the job with qsub.")
job_name = luigi.Parameter(
significant=False, default=None,
description="Explicit job name given via qsub.")
run_locally = luigi.BoolParameter(
significant=False,
description="run locally instead of on the cluster")
poll_time = luigi.IntParameter(
significant=False, default=POLL_TIME,
description="specify the wait time to poll qstat for the job status")
dont_remove_tmp_dir = luigi.BoolParameter(
significant=False,
description="don't delete the temporary directory used (for debugging)")
no_tarball = luigi.BoolParameter(
significant=False,
description="don't tarball (and extract) the luigi project files")
def __init__(self, *args, **kwargs):
super(SGEJobTask, self).__init__(*args, **kwargs)
if self.job_name:
# use explicitly provided job name
pass
elif self.job_name_format:
# define the job name with the provided format
self.job_name = self.job_name_format.format(
task_family=self.task_family, **self.__dict__)
else:
# default to the task family
self.job_name = self.task_family
def _fetch_task_failures(self):
if not os.path.exists(self.errfile):
logger.info('No error file')
return []
with open(self.errfile, "r") as f:
errors = f.readlines()
if errors == []:
return errors
if errors[0].strip() == 'stdin: is not a tty': # SGE complains when we submit through a pipe
errors.pop(0)
return errors
def _init_local(self):
# Set up temp folder in shared directory (trim to max filename length)
base_tmp_dir = self.shared_tmp_dir
random_id = '%016x' % random.getrandbits(64)
folder_name = self.task_id + '-' + random_id
self.tmp_dir = os.path.join(base_tmp_dir, folder_name)
max_filename_length = os.fstatvfs(0).f_namemax
self.tmp_dir = self.tmp_dir[:max_filename_length]
logger.info("Tmp dir: %s", self.tmp_dir)
os.makedirs(self.tmp_dir)
# Dump the code to be run into a pickle file
logging.debug("Dumping pickled class")
self._dump(self.tmp_dir)
if not self.no_tarball:
# Make sure that all the class's dependencies are tarred and available
# This is not necessary if luigi is importable from the cluster node
logging.debug("Tarballing dependencies")
# Grab luigi and the module containing the code to be run
packages = [luigi] + [__import__(self.__module__, None, None, 'dummy')]
create_packages_archive(packages, os.path.join(self.tmp_dir, "packages.tar"))
def run(self):
if self.run_locally:
self.work()
else:
self._init_local()
self._run_job()
# The procedure:
# - Pickle the class
# - Tarball the dependencies
# - Construct a qsub argument that runs a generic runner function with the path to the pickled class
# - Runner function loads the class from pickle
# - Runner class untars the dependencies
# - Runner function hits the button on the class's work() method
def work(self):
"""Override this method, rather than ``run()``, for your actual work."""
pass
def _dump(self, out_dir=''):
"""Dump instance to file."""
with self.no_unpicklable_properties():
self.job_file = os.path.join(out_dir, 'job-instance.pickle')
if self.__module__ == '__main__':
d = pickle.dumps(self)
module_name = os.path.basename(sys.argv[0]).rsplit('.', 1)[0]
d = d.replace('(c__main__', "(c" + module_name)
open(self.job_file, "w").write(d)
else:
pickle.dump(self, open(self.job_file, "w"))
def _run_job(self):
# Build a qsub argument that will run sge_runner.py on the directory we've specified
runner_path = sge_runner.__file__
if runner_path.endswith("pyc"):
runner_path = runner_path[:-3] + "py"
job_str = 'python {0} "{1}" "{2}"'.format(
runner_path, self.tmp_dir, os.getcwd()) # enclose tmp_dir in quotes to protect from special escape chars
if self.no_tarball:
job_str += ' "--no-tarball"'
# Build qsub submit command
self.outfile = os.path.join(self.tmp_dir, 'job.out')
self.errfile = os.path.join(self.tmp_dir, 'job.err')
submit_cmd = _build_qsub_command(job_str, self.task_family, self.outfile,
self.errfile, self.parallel_env, self.n_cpu)
logger.debug('qsub command: \n' + submit_cmd)
# Submit the job and grab job ID
output = subprocess.check_output(submit_cmd, shell=True)
self.job_id = _parse_qsub_job_id(output)
logger.debug("Submitted job to qsub with response:\n" + output)
self._track_job()
# Now delete the temporaries, if they're there.
if (self.tmp_dir and os.path.exists(self.tmp_dir) and not self.dont_remove_tmp_dir):
logger.info('Removing temporary directory %s' % self.tmp_dir)
subprocess.call(["rm", "-rf", self.tmp_dir])
def _track_job(self):
while True:
# Sleep for a little bit
time.sleep(self.poll_time)
# See what the job's up to
# ASSUMPTION
qstat_out = subprocess.check_output(['qstat'])
sge_status = _parse_qstat_state(qstat_out, self.job_id)
if sge_status == 'r':
logger.info('Job is running...')
elif sge_status == 'qw':
logger.info('Job is pending...')
elif 'E' in sge_status:
logger.error('Job has FAILED:\n' + '\n'.join(self._fetch_task_failures()))
break
elif sge_status == 't' or sge_status == 'u':
# Then the job could either be failed or done.
errors = self._fetch_task_failures()
if not errors:
logger.info('Job is done')
else:
logger.error('Job has FAILED:\n' + '\n'.join(errors))
break
else:
logger.info('Job status is UNKNOWN!')
logger.info('Status is : %s' % sge_status)
raise Exception("job status isn't one of ['r', 'qw', 'E*', 't', 'u']: %s" % sge_status)
class LocalSGEJobTask(SGEJobTask):
"""A local version of SGEJobTask, for easier debugging.
This version skips the ``qsub`` steps and simply runs ``work()``
on the local node, so you don't need to be on an SGE cluster to
use your Task in a test workflow.
"""
def run(self):
self.work()
|
mapbased/phantomjs | refs/heads/master | src/qt/qtwebkit/Tools/QueueStatusServer/filters/__init__.py | 6014 | # Required for Python to search this directory for module files
|
maxis1314/pyutils | refs/heads/master | ml/kmeans/run.py | 1 | # -*- coding: utf-8 -*-
from matplotlib import pyplot
import scipy as sp
import numpy as np
from sklearn import svm
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from scipy import sparse
import sys
reload(sys)
sys.setdefaultencoding('utf8')
#数据读入
data = np.loadtxt('1.txt')
x_p = data[:, :2] # 取前2列
y_p = data[:, 2] # 取前2列
x = (sparse.csc_matrix((data[:,2], x_p.T)).astype(float))[:, :].todense()
nUser = x.shape[0]
#可视化矩阵
pyplot.imshow(x, interpolation='nearest')
pyplot.xlabel('User')
pyplot.ylabel('User')
pyplot.xticks(range(nUser))
pyplot.yticks(range(nUser))
pyplot.show()
#使用默认的K-Means算法
num_clusters = 2
clf = KMeans(n_clusters=num_clusters, n_init=1, verbose=1)
clf.fit(x)
print(clf.labels_)
#指定用户0与用户5作为初始化聚类中心
init = np.vstack([ x[0], x[5] ])
clf = KMeans(n_clusters=2, init=init)
clf.fit(x)
print(clf.labels_) |
mollstam/UnrealPy | refs/heads/master | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Python-2.7.10/Lib/aifc.py | 29 | """Stuff to parse AIFF-C and AIFF files.
Unless explicitly stated otherwise, the description below is true
both for AIFF-C files and AIFF files.
An AIFF-C file has the following structure.
+-----------------+
| FORM |
+-----------------+
| <size> |
+----+------------+
| | AIFC |
| +------------+
| | <chunks> |
| | . |
| | . |
| | . |
+----+------------+
An AIFF file has the string "AIFF" instead of "AIFC".
A chunk consists of an identifier (4 bytes) followed by a size (4 bytes,
big endian order), followed by the data. The size field does not include
the size of the 8 byte header.
The following chunk types are recognized.
FVER
<version number of AIFF-C defining document> (AIFF-C only).
MARK
<# of markers> (2 bytes)
list of markers:
<marker ID> (2 bytes, must be > 0)
<position> (4 bytes)
<marker name> ("pstring")
COMM
<# of channels> (2 bytes)
<# of sound frames> (4 bytes)
<size of the samples> (2 bytes)
<sampling frequency> (10 bytes, IEEE 80-bit extended
floating point)
in AIFF-C files only:
<compression type> (4 bytes)
<human-readable version of compression type> ("pstring")
SSND
<offset> (4 bytes, not used by this program)
<blocksize> (4 bytes, not used by this program)
<sound data>
A pstring consists of 1 byte length, a string of characters, and 0 or 1
byte pad to make the total length even.
Usage.
Reading AIFF files:
f = aifc.open(file, 'r')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods read(), seek(), and close().
In some types of audio files, if the setpos() method is not used,
the seek() method is not necessary.
This returns an instance of a class with the following public methods:
getnchannels() -- returns number of audio channels (1 for
mono, 2 for stereo)
getsampwidth() -- returns sample width in bytes
getframerate() -- returns sampling frequency
getnframes() -- returns number of audio frames
getcomptype() -- returns compression type ('NONE' for AIFF files)
getcompname() -- returns human-readable version of
compression type ('not compressed' for AIFF files)
getparams() -- returns a tuple consisting of all of the
above in the above order
getmarkers() -- get the list of marks in the audio file or None
if there are no marks
getmark(id) -- get mark with the specified id (raises an error
if the mark does not exist)
readframes(n) -- returns at most n frames of audio
rewind() -- rewind to the beginning of the audio stream
setpos(pos) -- seek to the specified position
tell() -- return the current position
close() -- close the instance (make it unusable)
The position returned by tell(), the position given to setpos() and
the position of marks are all compatible and have nothing to do with
the actual position in the file.
The close() method is called automatically when the class instance
is destroyed.
Writing AIFF files:
f = aifc.open(file, 'w')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods write(), tell(), seek(), and
close().
This returns an instance of a class with the following public methods:
aiff() -- create an AIFF file (AIFF-C default)
aifc() -- create an AIFF-C file
setnchannels(n) -- set the number of channels
setsampwidth(n) -- set the sample width
setframerate(n) -- set the frame rate
setnframes(n) -- set the number of frames
setcomptype(type, name)
-- set the compression type and the
human-readable compression type
setparams(tuple)
-- set all parameters at once
setmark(id, pos, name)
-- add specified mark to the list of marks
tell() -- return current position in output file (useful
in combination with setmark())
writeframesraw(data)
-- write audio frames without pathing up the
file header
writeframes(data)
-- write audio frames and patch up the file header
close() -- patch up the file header and close the
output file
You should set the parameters before the first writeframesraw or
writeframes. The total number of frames does not need to be set,
but when it is set to the correct value, the header does not have to
be patched up.
It is best to first set all parameters, perhaps possibly the
compression type, and then write audio frames using writeframesraw.
When all frames have been written, either call writeframes('') or
close() to patch up the sizes in the header.
Marks can be added anytime. If there are any marks, you must call
close() after all frames have been written.
The close() method is called automatically when the class instance
is destroyed.
When a file is opened with the extension '.aiff', an AIFF file is
written, otherwise an AIFF-C file is written. This default can be
changed by calling aiff() or aifc() before the first writeframes or
writeframesraw.
"""
import struct
import __builtin__
__all__ = ["Error","open","openfp"]
class Error(Exception):
pass
_AIFC_version = 0xA2805140L # Version 1 of AIFF-C
def _read_long(file):
try:
return struct.unpack('>l', file.read(4))[0]
except struct.error:
raise EOFError
def _read_ulong(file):
try:
return struct.unpack('>L', file.read(4))[0]
except struct.error:
raise EOFError
def _read_short(file):
try:
return struct.unpack('>h', file.read(2))[0]
except struct.error:
raise EOFError
def _read_ushort(file):
try:
return struct.unpack('>H', file.read(2))[0]
except struct.error:
raise EOFError
def _read_string(file):
length = ord(file.read(1))
if length == 0:
data = ''
else:
data = file.read(length)
if length & 1 == 0:
dummy = file.read(1)
return data
_HUGE_VAL = 1.79769313486231e+308 # See <limits.h>
def _read_float(f): # 10 bytes
expon = _read_short(f) # 2 bytes
sign = 1
if expon < 0:
sign = -1
expon = expon + 0x8000
himant = _read_ulong(f) # 4 bytes
lomant = _read_ulong(f) # 4 bytes
if expon == himant == lomant == 0:
f = 0.0
elif expon == 0x7FFF:
f = _HUGE_VAL
else:
expon = expon - 16383
f = (himant * 0x100000000L + lomant) * pow(2.0, expon - 63)
return sign * f
def _write_short(f, x):
f.write(struct.pack('>h', x))
def _write_ushort(f, x):
f.write(struct.pack('>H', x))
def _write_long(f, x):
f.write(struct.pack('>l', x))
def _write_ulong(f, x):
f.write(struct.pack('>L', x))
def _write_string(f, s):
if len(s) > 255:
raise ValueError("string exceeds maximum pstring length")
f.write(struct.pack('B', len(s)))
f.write(s)
if len(s) & 1 == 0:
f.write(chr(0))
def _write_float(f, x):
import math
if x < 0:
sign = 0x8000
x = x * -1
else:
sign = 0
if x == 0:
expon = 0
himant = 0
lomant = 0
else:
fmant, expon = math.frexp(x)
if expon > 16384 or fmant >= 1 or fmant != fmant: # Infinity or NaN
expon = sign|0x7FFF
himant = 0
lomant = 0
else: # Finite
expon = expon + 16382
if expon < 0: # denormalized
fmant = math.ldexp(fmant, expon)
expon = 0
expon = expon | sign
fmant = math.ldexp(fmant, 32)
fsmant = math.floor(fmant)
himant = long(fsmant)
fmant = math.ldexp(fmant - fsmant, 32)
fsmant = math.floor(fmant)
lomant = long(fsmant)
_write_ushort(f, expon)
_write_ulong(f, himant)
_write_ulong(f, lomant)
from chunk import Chunk
class Aifc_read:
# Variables used in this class:
#
# These variables are available to the user though appropriate
# methods of this class:
# _file -- the open file with methods read(), close(), and seek()
# set through the __init__() method
# _nchannels -- the number of audio channels
# available through the getnchannels() method
# _nframes -- the number of audio frames
# available through the getnframes() method
# _sampwidth -- the number of bytes per audio sample
# available through the getsampwidth() method
# _framerate -- the sampling frequency
# available through the getframerate() method
# _comptype -- the AIFF-C compression type ('NONE' if AIFF)
# available through the getcomptype() method
# _compname -- the human-readable AIFF-C compression type
# available through the getcomptype() method
# _markers -- the marks in the audio file
# available through the getmarkers() and getmark()
# methods
# _soundpos -- the position in the audio stream
# available through the tell() method, set through the
# setpos() method
#
# These variables are used internally only:
# _version -- the AIFF-C version number
# _decomp -- the decompressor from builtin module cl
# _comm_chunk_read -- 1 iff the COMM chunk has been read
# _aifc -- 1 iff reading an AIFF-C file
# _ssnd_seek_needed -- 1 iff positioned correctly in audio
# file for readframes()
# _ssnd_chunk -- instantiation of a chunk class for the SSND chunk
# _framesize -- size of one frame in the file
def initfp(self, file):
self._version = 0
self._decomp = None
self._convert = None
self._markers = []
self._soundpos = 0
self._file = file
chunk = Chunk(file)
if chunk.getname() != 'FORM':
raise Error, 'file does not start with FORM id'
formdata = chunk.read(4)
if formdata == 'AIFF':
self._aifc = 0
elif formdata == 'AIFC':
self._aifc = 1
else:
raise Error, 'not an AIFF or AIFF-C file'
self._comm_chunk_read = 0
while 1:
self._ssnd_seek_needed = 1
try:
chunk = Chunk(self._file)
except EOFError:
break
chunkname = chunk.getname()
if chunkname == 'COMM':
self._read_comm_chunk(chunk)
self._comm_chunk_read = 1
elif chunkname == 'SSND':
self._ssnd_chunk = chunk
dummy = chunk.read(8)
self._ssnd_seek_needed = 0
elif chunkname == 'FVER':
self._version = _read_ulong(chunk)
elif chunkname == 'MARK':
self._readmark(chunk)
chunk.skip()
if not self._comm_chunk_read or not self._ssnd_chunk:
raise Error, 'COMM chunk and/or SSND chunk missing'
if self._aifc and self._decomp:
import cl
params = [cl.ORIGINAL_FORMAT, 0,
cl.BITS_PER_COMPONENT, self._sampwidth * 8,
cl.FRAME_RATE, self._framerate]
if self._nchannels == 1:
params[1] = cl.MONO
elif self._nchannels == 2:
params[1] = cl.STEREO_INTERLEAVED
else:
raise Error, 'cannot compress more than 2 channels'
self._decomp.SetParams(params)
def __init__(self, f):
if type(f) == type(''):
f = __builtin__.open(f, 'rb')
# else, assume it is an open file object already
self.initfp(f)
#
# User visible methods.
#
def getfp(self):
return self._file
def rewind(self):
self._ssnd_seek_needed = 1
self._soundpos = 0
def close(self):
decomp = self._decomp
try:
if decomp:
self._decomp = None
decomp.CloseDecompressor()
finally:
self._file.close()
def tell(self):
return self._soundpos
def getnchannels(self):
return self._nchannels
def getnframes(self):
return self._nframes
def getsampwidth(self):
return self._sampwidth
def getframerate(self):
return self._framerate
def getcomptype(self):
return self._comptype
def getcompname(self):
return self._compname
## def getversion(self):
## return self._version
def getparams(self):
return self.getnchannels(), self.getsampwidth(), \
self.getframerate(), self.getnframes(), \
self.getcomptype(), self.getcompname()
def getmarkers(self):
if len(self._markers) == 0:
return None
return self._markers
def getmark(self, id):
for marker in self._markers:
if id == marker[0]:
return marker
raise Error, 'marker %r does not exist' % (id,)
def setpos(self, pos):
if pos < 0 or pos > self._nframes:
raise Error, 'position not in range'
self._soundpos = pos
self._ssnd_seek_needed = 1
def readframes(self, nframes):
if self._ssnd_seek_needed:
self._ssnd_chunk.seek(0)
dummy = self._ssnd_chunk.read(8)
pos = self._soundpos * self._framesize
if pos:
self._ssnd_chunk.seek(pos + 8)
self._ssnd_seek_needed = 0
if nframes == 0:
return ''
data = self._ssnd_chunk.read(nframes * self._framesize)
if self._convert and data:
data = self._convert(data)
self._soundpos = self._soundpos + len(data) // (self._nchannels * self._sampwidth)
return data
#
# Internal methods.
#
def _decomp_data(self, data):
import cl
dummy = self._decomp.SetParam(cl.FRAME_BUFFER_SIZE,
len(data) * 2)
return self._decomp.Decompress(len(data) // self._nchannels,
data)
def _ulaw2lin(self, data):
import audioop
return audioop.ulaw2lin(data, 2)
def _adpcm2lin(self, data):
import audioop
if not hasattr(self, '_adpcmstate'):
# first time
self._adpcmstate = None
data, self._adpcmstate = audioop.adpcm2lin(data, 2,
self._adpcmstate)
return data
def _read_comm_chunk(self, chunk):
self._nchannels = _read_short(chunk)
self._nframes = _read_long(chunk)
self._sampwidth = (_read_short(chunk) + 7) // 8
self._framerate = int(_read_float(chunk))
self._framesize = self._nchannels * self._sampwidth
if self._aifc:
#DEBUG: SGI's soundeditor produces a bad size :-(
kludge = 0
if chunk.chunksize == 18:
kludge = 1
print 'Warning: bad COMM chunk size'
chunk.chunksize = 23
#DEBUG end
self._comptype = chunk.read(4)
#DEBUG start
if kludge:
length = ord(chunk.file.read(1))
if length & 1 == 0:
length = length + 1
chunk.chunksize = chunk.chunksize + length
chunk.file.seek(-1, 1)
#DEBUG end
self._compname = _read_string(chunk)
if self._comptype != 'NONE':
if self._comptype == 'G722':
try:
import audioop
except ImportError:
pass
else:
self._convert = self._adpcm2lin
self._sampwidth = 2
return
# for ULAW and ALAW try Compression Library
try:
import cl
except ImportError:
if self._comptype in ('ULAW', 'ulaw'):
try:
import audioop
self._convert = self._ulaw2lin
self._sampwidth = 2
return
except ImportError:
pass
raise Error, 'cannot read compressed AIFF-C files'
if self._comptype in ('ULAW', 'ulaw'):
scheme = cl.G711_ULAW
elif self._comptype in ('ALAW', 'alaw'):
scheme = cl.G711_ALAW
else:
raise Error, 'unsupported compression type'
self._decomp = cl.OpenDecompressor(scheme)
self._convert = self._decomp_data
self._sampwidth = 2
else:
self._comptype = 'NONE'
self._compname = 'not compressed'
def _readmark(self, chunk):
nmarkers = _read_short(chunk)
# Some files appear to contain invalid counts.
# Cope with this by testing for EOF.
try:
for i in range(nmarkers):
id = _read_short(chunk)
pos = _read_long(chunk)
name = _read_string(chunk)
if pos or name:
# some files appear to have
# dummy markers consisting of
# a position 0 and name ''
self._markers.append((id, pos, name))
except EOFError:
print 'Warning: MARK chunk contains only',
print len(self._markers),
if len(self._markers) == 1: print 'marker',
else: print 'markers',
print 'instead of', nmarkers
class Aifc_write:
# Variables used in this class:
#
# These variables are user settable through appropriate methods
# of this class:
# _file -- the open file with methods write(), close(), tell(), seek()
# set through the __init__() method
# _comptype -- the AIFF-C compression type ('NONE' in AIFF)
# set through the setcomptype() or setparams() method
# _compname -- the human-readable AIFF-C compression type
# set through the setcomptype() or setparams() method
# _nchannels -- the number of audio channels
# set through the setnchannels() or setparams() method
# _sampwidth -- the number of bytes per audio sample
# set through the setsampwidth() or setparams() method
# _framerate -- the sampling frequency
# set through the setframerate() or setparams() method
# _nframes -- the number of audio frames written to the header
# set through the setnframes() or setparams() method
# _aifc -- whether we're writing an AIFF-C file or an AIFF file
# set through the aifc() method, reset through the
# aiff() method
#
# These variables are used internally only:
# _version -- the AIFF-C version number
# _comp -- the compressor from builtin module cl
# _nframeswritten -- the number of audio frames actually written
# _datalength -- the size of the audio samples written to the header
# _datawritten -- the size of the audio samples actually written
def __init__(self, f):
if type(f) == type(''):
filename = f
f = __builtin__.open(f, 'wb')
else:
# else, assume it is an open file object already
filename = '???'
self.initfp(f)
if filename[-5:] == '.aiff':
self._aifc = 0
else:
self._aifc = 1
def initfp(self, file):
self._file = file
self._version = _AIFC_version
self._comptype = 'NONE'
self._compname = 'not compressed'
self._comp = None
self._convert = None
self._nchannels = 0
self._sampwidth = 0
self._framerate = 0
self._nframes = 0
self._nframeswritten = 0
self._datawritten = 0
self._datalength = 0
self._markers = []
self._marklength = 0
self._aifc = 1 # AIFF-C is default
def __del__(self):
if self._file:
self.close()
#
# User visible methods.
#
def aiff(self):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
self._aifc = 0
def aifc(self):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
self._aifc = 1
def setnchannels(self, nchannels):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
if nchannels < 1:
raise Error, 'bad # of channels'
self._nchannels = nchannels
def getnchannels(self):
if not self._nchannels:
raise Error, 'number of channels not set'
return self._nchannels
def setsampwidth(self, sampwidth):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
if sampwidth < 1 or sampwidth > 4:
raise Error, 'bad sample width'
self._sampwidth = sampwidth
def getsampwidth(self):
if not self._sampwidth:
raise Error, 'sample width not set'
return self._sampwidth
def setframerate(self, framerate):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
if framerate <= 0:
raise Error, 'bad frame rate'
self._framerate = framerate
def getframerate(self):
if not self._framerate:
raise Error, 'frame rate not set'
return self._framerate
def setnframes(self, nframes):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
self._nframes = nframes
def getnframes(self):
return self._nframeswritten
def setcomptype(self, comptype, compname):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
if comptype not in ('NONE', 'ULAW', 'ulaw', 'ALAW', 'alaw', 'G722'):
raise Error, 'unsupported compression type'
self._comptype = comptype
self._compname = compname
def getcomptype(self):
return self._comptype
def getcompname(self):
return self._compname
## def setversion(self, version):
## if self._nframeswritten:
## raise Error, 'cannot change parameters after starting to write'
## self._version = version
def setparams(self, info):
nchannels, sampwidth, framerate, nframes, comptype, compname = info
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
if comptype not in ('NONE', 'ULAW', 'ulaw', 'ALAW', 'alaw', 'G722'):
raise Error, 'unsupported compression type'
self.setnchannels(nchannels)
self.setsampwidth(sampwidth)
self.setframerate(framerate)
self.setnframes(nframes)
self.setcomptype(comptype, compname)
def getparams(self):
if not self._nchannels or not self._sampwidth or not self._framerate:
raise Error, 'not all parameters set'
return self._nchannels, self._sampwidth, self._framerate, \
self._nframes, self._comptype, self._compname
def setmark(self, id, pos, name):
if id <= 0:
raise Error, 'marker ID must be > 0'
if pos < 0:
raise Error, 'marker position must be >= 0'
if type(name) != type(''):
raise Error, 'marker name must be a string'
for i in range(len(self._markers)):
if id == self._markers[i][0]:
self._markers[i] = id, pos, name
return
self._markers.append((id, pos, name))
def getmark(self, id):
for marker in self._markers:
if id == marker[0]:
return marker
raise Error, 'marker %r does not exist' % (id,)
def getmarkers(self):
if len(self._markers) == 0:
return None
return self._markers
def tell(self):
return self._nframeswritten
def writeframesraw(self, data):
self._ensure_header_written(len(data))
nframes = len(data) // (self._sampwidth * self._nchannels)
if self._convert:
data = self._convert(data)
self._file.write(data)
self._nframeswritten = self._nframeswritten + nframes
self._datawritten = self._datawritten + len(data)
def writeframes(self, data):
self.writeframesraw(data)
if self._nframeswritten != self._nframes or \
self._datalength != self._datawritten:
self._patchheader()
def close(self):
if self._file is None:
return
try:
self._ensure_header_written(0)
if self._datawritten & 1:
# quick pad to even size
self._file.write(chr(0))
self._datawritten = self._datawritten + 1
self._writemarkers()
if self._nframeswritten != self._nframes or \
self._datalength != self._datawritten or \
self._marklength:
self._patchheader()
if self._comp:
self._comp.CloseCompressor()
self._comp = None
finally:
# Prevent ref cycles
self._convert = None
f = self._file
self._file = None
f.close()
#
# Internal methods.
#
def _comp_data(self, data):
import cl
dummy = self._comp.SetParam(cl.FRAME_BUFFER_SIZE, len(data))
dummy = self._comp.SetParam(cl.COMPRESSED_BUFFER_SIZE, len(data))
return self._comp.Compress(self._nframes, data)
def _lin2ulaw(self, data):
import audioop
return audioop.lin2ulaw(data, 2)
def _lin2adpcm(self, data):
import audioop
if not hasattr(self, '_adpcmstate'):
self._adpcmstate = None
data, self._adpcmstate = audioop.lin2adpcm(data, 2,
self._adpcmstate)
return data
def _ensure_header_written(self, datasize):
if not self._nframeswritten:
if self._comptype in ('ULAW', 'ulaw', 'ALAW', 'alaw'):
if not self._sampwidth:
self._sampwidth = 2
if self._sampwidth != 2:
raise Error, 'sample width must be 2 when compressing with ULAW or ALAW'
if self._comptype == 'G722':
if not self._sampwidth:
self._sampwidth = 2
if self._sampwidth != 2:
raise Error, 'sample width must be 2 when compressing with G7.22 (ADPCM)'
if not self._nchannels:
raise Error, '# channels not specified'
if not self._sampwidth:
raise Error, 'sample width not specified'
if not self._framerate:
raise Error, 'sampling rate not specified'
self._write_header(datasize)
def _init_compression(self):
if self._comptype == 'G722':
self._convert = self._lin2adpcm
return
try:
import cl
except ImportError:
if self._comptype in ('ULAW', 'ulaw'):
try:
import audioop
self._convert = self._lin2ulaw
return
except ImportError:
pass
raise Error, 'cannot write compressed AIFF-C files'
if self._comptype in ('ULAW', 'ulaw'):
scheme = cl.G711_ULAW
elif self._comptype in ('ALAW', 'alaw'):
scheme = cl.G711_ALAW
else:
raise Error, 'unsupported compression type'
self._comp = cl.OpenCompressor(scheme)
params = [cl.ORIGINAL_FORMAT, 0,
cl.BITS_PER_COMPONENT, self._sampwidth * 8,
cl.FRAME_RATE, self._framerate,
cl.FRAME_BUFFER_SIZE, 100,
cl.COMPRESSED_BUFFER_SIZE, 100]
if self._nchannels == 1:
params[1] = cl.MONO
elif self._nchannels == 2:
params[1] = cl.STEREO_INTERLEAVED
else:
raise Error, 'cannot compress more than 2 channels'
self._comp.SetParams(params)
# the compressor produces a header which we ignore
dummy = self._comp.Compress(0, '')
self._convert = self._comp_data
def _write_header(self, initlength):
if self._aifc and self._comptype != 'NONE':
self._init_compression()
self._file.write('FORM')
if not self._nframes:
self._nframes = initlength // (self._nchannels * self._sampwidth)
self._datalength = self._nframes * self._nchannels * self._sampwidth
if self._datalength & 1:
self._datalength = self._datalength + 1
if self._aifc:
if self._comptype in ('ULAW', 'ulaw', 'ALAW', 'alaw'):
self._datalength = self._datalength // 2
if self._datalength & 1:
self._datalength = self._datalength + 1
elif self._comptype == 'G722':
self._datalength = (self._datalength + 3) // 4
if self._datalength & 1:
self._datalength = self._datalength + 1
try:
self._form_length_pos = self._file.tell()
except (AttributeError, IOError):
self._form_length_pos = None
commlength = self._write_form_length(self._datalength)
if self._aifc:
self._file.write('AIFC')
self._file.write('FVER')
_write_ulong(self._file, 4)
_write_ulong(self._file, self._version)
else:
self._file.write('AIFF')
self._file.write('COMM')
_write_ulong(self._file, commlength)
_write_short(self._file, self._nchannels)
if self._form_length_pos is not None:
self._nframes_pos = self._file.tell()
_write_ulong(self._file, self._nframes)
if self._comptype in ('ULAW', 'ulaw', 'ALAW', 'alaw', 'G722'):
_write_short(self._file, 8)
else:
_write_short(self._file, self._sampwidth * 8)
_write_float(self._file, self._framerate)
if self._aifc:
self._file.write(self._comptype)
_write_string(self._file, self._compname)
self._file.write('SSND')
if self._form_length_pos is not None:
self._ssnd_length_pos = self._file.tell()
_write_ulong(self._file, self._datalength + 8)
_write_ulong(self._file, 0)
_write_ulong(self._file, 0)
def _write_form_length(self, datalength):
if self._aifc:
commlength = 18 + 5 + len(self._compname)
if commlength & 1:
commlength = commlength + 1
verslength = 12
else:
commlength = 18
verslength = 0
_write_ulong(self._file, 4 + verslength + self._marklength + \
8 + commlength + 16 + datalength)
return commlength
def _patchheader(self):
curpos = self._file.tell()
if self._datawritten & 1:
datalength = self._datawritten + 1
self._file.write(chr(0))
else:
datalength = self._datawritten
if datalength == self._datalength and \
self._nframes == self._nframeswritten and \
self._marklength == 0:
self._file.seek(curpos, 0)
return
self._file.seek(self._form_length_pos, 0)
dummy = self._write_form_length(datalength)
self._file.seek(self._nframes_pos, 0)
_write_ulong(self._file, self._nframeswritten)
self._file.seek(self._ssnd_length_pos, 0)
_write_ulong(self._file, datalength + 8)
self._file.seek(curpos, 0)
self._nframes = self._nframeswritten
self._datalength = datalength
def _writemarkers(self):
if len(self._markers) == 0:
return
self._file.write('MARK')
length = 2
for marker in self._markers:
id, pos, name = marker
length = length + len(name) + 1 + 6
if len(name) & 1 == 0:
length = length + 1
_write_ulong(self._file, length)
self._marklength = length + 8
_write_short(self._file, len(self._markers))
for marker in self._markers:
id, pos, name = marker
_write_short(self._file, id)
_write_ulong(self._file, pos)
_write_string(self._file, name)
def open(f, mode=None):
if mode is None:
if hasattr(f, 'mode'):
mode = f.mode
else:
mode = 'rb'
if mode in ('r', 'rb'):
return Aifc_read(f)
elif mode in ('w', 'wb'):
return Aifc_write(f)
else:
raise Error, "mode must be 'r', 'rb', 'w', or 'wb'"
openfp = open # B/W compatibility
if __name__ == '__main__':
import sys
if not sys.argv[1:]:
sys.argv.append('/usr/demos/data/audio/bach.aiff')
fn = sys.argv[1]
f = open(fn, 'r')
try:
print "Reading", fn
print "nchannels =", f.getnchannels()
print "nframes =", f.getnframes()
print "sampwidth =", f.getsampwidth()
print "framerate =", f.getframerate()
print "comptype =", f.getcomptype()
print "compname =", f.getcompname()
if sys.argv[2:]:
gn = sys.argv[2]
print "Writing", gn
g = open(gn, 'w')
try:
g.setparams(f.getparams())
while 1:
data = f.readframes(1024)
if not data:
break
g.writeframes(data)
finally:
g.close()
print "Done."
finally:
f.close()
|
ClearCorp/account-financial-tools | refs/heads/8.0 | contract_multic_fix/account.py | 1 | # -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import models, fields, api
# from openerp import SUPERUSER_ID
class account_analytic_invoice_line(models.Model):
_inherit = "account.analytic.invoice.line"
tax_id = fields.Many2many(
'account.tax',
'analytic_account_tax',
'analytic_accountr_line_id',
'tax_id',
'Taxes',
domain="[('parent_id','=',False), ('company_id', '=', "
"parent.company_id), ('type_tax_use','in', ['sale', 'all'])]",
)
@api.multi
def product_id_change(self, product, uom_id, qty=0, name='', partner_id=False, price_unit=False, pricelist_id=False, company_id=None):
result = super(account_analytic_invoice_line, self).product_id_change(
product, uom_id, qty=qty, name=name, partner_id=partner_id, price_unit=price_unit, pricelist_id=pricelist_id, company_id=company_id)
product = self.env['product.product'].browse(product)
# if self._uid == SUPERUSER_ID and self._context.get('company_id'):
# taxes = product.taxes_id.filtered(
# lambda r: r.company_id.id == self._context['company_id'])
# else:
# taxes = product.taxes_id
taxes = product.taxes_id.filtered(
lambda r: r.company_id.id == company_id)
result['value']['tax_id'] = self.env[
'account.fiscal.position'].map_tax(taxes)
return result
class account_analytic_account(models.Model):
_inherit = "account.analytic.account"
@api.model
def _prepare_invoice_line(self, line, fiscal_position):
values = super(account_analytic_account, self)._prepare_invoice_line(
line, fiscal_position)
values['invoice_line_tax_id'] = [(6, 0, line.tax_id.ids)]
return values
|
harterj/moose | refs/heads/devel | python/TestHarness/tests/test_DistributedMesh.py | 9 | #* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
from TestHarnessTestCase import TestHarnessTestCase
class TestHarnessTester(TestHarnessTestCase):
def testSyntax(self):
"""
Test for correct operation with distributed mesh tests
"""
# Verify the distributed mesh test is skipped
output = self.runExceptionTests('-i', 'mesh_mode_distributed', '--no-color').decode('utf-8')
self.assertIn('[MESH_MODE!=DISTRIBUTED] SKIP', output)
# Verify the distributed mesh test is passing when providing --distributed
# To be acurate, test for OK rather than asserting if 'distributed' is
# missing from the output.
output = self.runTests('--distributed', '-i', 'mesh_mode_distributed')
self.assertRegex(output.decode('utf-8'), 'test_harness.distributed_mesh.*?OK')
|
yuanagain/seniorthesis | refs/heads/master | venv/bin/activate_this.py | 1076 | """By using execfile(this_file, dict(__file__=this_file)) you will
activate this virtualenv environment.
This can be used when you must use an existing Python interpreter, not
the virtualenv bin/python
"""
try:
__file__
except NameError:
raise AssertionError(
"You must run this like execfile('path/to/activate_this.py', dict(__file__='path/to/activate_this.py'))")
import sys
import os
old_os_path = os.environ.get('PATH', '')
os.environ['PATH'] = os.path.dirname(os.path.abspath(__file__)) + os.pathsep + old_os_path
base = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if sys.platform == 'win32':
site_packages = os.path.join(base, 'Lib', 'site-packages')
else:
site_packages = os.path.join(base, 'lib', 'python%s' % sys.version[:3], 'site-packages')
prev_sys_path = list(sys.path)
import site
site.addsitedir(site_packages)
sys.real_prefix = sys.prefix
sys.prefix = base
# Move the added items to the front of the path:
new_sys_path = []
for item in list(sys.path):
if item not in prev_sys_path:
new_sys_path.append(item)
sys.path.remove(item)
sys.path[:0] = new_sys_path
|
theonlynexus/gui2py | refs/heads/master | gui/windows/htmlwin.py | 14 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"gui2py's HTML Top Level Window (uses wx.Frame and gui.HtmlBox)"
__author__ = "Mariano Reingart (reingart@gmail.com)"
__copyright__ = "Copyright (C) 2013- Mariano Reingart"
__license__ = "LGPL 3.0"
# This is an experimental approach to show rich internet applications
# There is no PythonCard equivalent, so code is almost completely new
import wx
from ..component import Component, Spec, InitSpec, EventSpec, StyleSpec
from ..controls import HtmlBox
from ..windows import Window
from ..event import UIEvent
class HtmlWindow(Window):
"A window that contains a html document (including embedded controls)"
def __init__(self, parent=None, **kwargs):
Window.__init__(self, parent=parent, **kwargs)
self.html = HtmlBox(self, name="document", scrollbars=True,
left="0", top="0", bgcolor="black", resizable=False,
width="auto", height="auto")
self.html.visible = True
def open(self, location=None):
if not location:
print "none!"
pass
elif location.startswith(("http://", "https://", "file://")):
print "loading page", location
self.html.load_page(location)
else:
print "loading file", location
self.html.load_file(location)
print "done!"
def write(self, text):
self.html.write(text)
self.html.redraw()
if __name__ == "__main__":
# basic test until proper unit_test
from ..controls import Button
app = wx.App(redirect=False)
w = HtmlWindow(title="hello world", name="frmTest", resizable=True,
visible=False)
#w.open("http://www.google.com/")
w.write("<a href='hola'>hello</a")
w.show()
app.MainLoop()
|
koniiiik/django | refs/heads/master | tests/template_tests/syntax_tests/test_exceptions.py | 513 | from django.template import TemplateDoesNotExist, TemplateSyntaxError
from django.test import SimpleTestCase
from ..utils import setup
from .test_extends import inheritance_templates
class ExceptionsTests(SimpleTestCase):
@setup({'exception01': "{% extends 'nonexistent' %}"})
def test_exception01(self):
"""
Raise exception for invalid template name
"""
with self.assertRaises(TemplateDoesNotExist):
self.engine.render_to_string('exception01')
@setup({'exception02': '{% extends nonexistent %}'})
def test_exception02(self):
"""
Raise exception for invalid variable template name
"""
if self.engine.string_if_invalid:
with self.assertRaises(TemplateDoesNotExist):
self.engine.render_to_string('exception02')
else:
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('exception02')
@setup(
{'exception03': "{% extends 'inheritance01' %}"
"{% block first %}2{% endblock %}{% extends 'inheritance16' %}"},
inheritance_templates,
)
def test_exception03(self):
"""
Raise exception for extra {% extends %} tags
"""
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('exception03')
@setup(
{'exception04': "{% extends 'inheritance17' %}{% block first %}{% echo 400 %}5678{% endblock %}"},
inheritance_templates,
)
def test_exception04(self):
"""
Raise exception for custom tags used in child with {% load %} tag in parent, not in child
"""
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('exception04')
@setup({'exception05': '{% block first %}{{ block.super }}{% endblock %}'})
def test_exception05(self):
"""
Raise exception for block.super used in base template
"""
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('exception05')
|
totcoindev/totcoin | refs/heads/master | qa/rpc-tests/invalidblockrequest.py | 38 | #!/usr/bin/env python2
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.comptool import TestManager, TestInstance, RejectResult
from test_framework.blocktools import *
import copy
import time
'''
In this test we connect to one node over p2p, and test block requests:
1) Valid blocks should be requested and become chain tip.
2) Invalid block with duplicated transaction should be re-requested.
3) Invalid block with bad coinbase value should be rejected and not
re-requested.
'''
# Use the ComparisonTestFramework with 1 node: only use --testbinary.
class InvalidBlockRequestTest(ComparisonTestFramework):
''' Can either run this test as 1 node with expected answers, or two and compare them.
Change the "outcome" variable from each TestInstance object to only do the comparison. '''
def __init__(self):
self.num_nodes = 1
def run_test(self):
test = TestManager(self, self.options.tmpdir)
test.add_all_connections(self.nodes)
self.tip = None
self.block_time = None
NetworkThread().start() # Start up network handling in another thread
sync_masternodes(self.nodes)
test.run()
def get_tests(self):
if self.tip is None:
self.tip = int ("0x" + self.nodes[0].getbestblockhash() + "L", 0)
self.block_time = int(time.time())+1
'''
Create a new block with an anyone-can-spend coinbase
'''
height = 1
block = create_block(self.tip, create_coinbase(height), self.block_time)
self.block_time += 1
block.solve()
# Save the coinbase for later
self.block1 = block
self.tip = block.sha256
height += 1
yield TestInstance([[block, True]])
'''
Now we need that block to mature so we can spend the coinbase.
'''
test = TestInstance(sync_every_block=False)
for i in xrange(100):
block = create_block(self.tip, create_coinbase(height), self.block_time)
block.solve()
self.tip = block.sha256
self.block_time += 1
test.blocks_and_transactions.append([block, True])
height += 1
yield test
'''
Now we use merkle-root malleability to generate an invalid block with
same blockheader.
Manufacture a block with 3 transactions (coinbase, spend of prior
coinbase, spend of that spend). Duplicate the 3rd transaction to
leave merkle root and blockheader unchanged but invalidate the block.
'''
block2 = create_block(self.tip, create_coinbase(height), self.block_time)
self.block_time += 1
# b'0x51' is OP_TRUE
tx1 = create_transaction(self.block1.vtx[0], 0, b'\x51', 50 * COIN)
tx2 = create_transaction(tx1, 0, b'\x51', 50 * COIN)
block2.vtx.extend([tx1, tx2])
block2.hashMerkleRoot = block2.calc_merkle_root()
block2.rehash()
block2.solve()
orig_hash = block2.sha256
block2_orig = copy.deepcopy(block2)
# Mutate block 2
block2.vtx.append(tx2)
assert_equal(block2.hashMerkleRoot, block2.calc_merkle_root())
assert_equal(orig_hash, block2.rehash())
assert(block2_orig.vtx != block2.vtx)
self.tip = block2.sha256
yield TestInstance([[block2, RejectResult(16, b'bad-txns-duplicate')], [block2_orig, True]])
height += 1
'''
Make sure that a totally screwed up block is not valid.
'''
block3 = create_block(self.tip, create_coinbase(height), self.block_time)
self.block_time += 1
block3.vtx[0].vout[0].nValue = 1000 * COIN # Too high!
block3.vtx[0].sha256=None
block3.vtx[0].calc_sha256()
block3.hashMerkleRoot = block3.calc_merkle_root()
block3.rehash()
block3.solve()
yield TestInstance([[block3, RejectResult(16, b'bad-cb-amount')]])
if __name__ == '__main__':
InvalidBlockRequestTest().main()
|
mrlegion/web-portfolio-server | refs/heads/master | node_modules/node-gyp/gyp/gyptest.py | 1752 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
__doc__ = """
gyptest.py -- test runner for GYP tests.
"""
import os
import optparse
import subprocess
import sys
class CommandRunner(object):
"""
Executor class for commands, including "commands" implemented by
Python functions.
"""
verbose = True
active = True
def __init__(self, dictionary={}):
self.subst_dictionary(dictionary)
def subst_dictionary(self, dictionary):
self._subst_dictionary = dictionary
def subst(self, string, dictionary=None):
"""
Substitutes (via the format operator) the values in the specified
dictionary into the specified command.
The command can be an (action, string) tuple. In all cases, we
perform substitution on strings and don't worry if something isn't
a string. (It's probably a Python function to be executed.)
"""
if dictionary is None:
dictionary = self._subst_dictionary
if dictionary:
try:
string = string % dictionary
except TypeError:
pass
return string
def display(self, command, stdout=None, stderr=None):
if not self.verbose:
return
if type(command) == type(()):
func = command[0]
args = command[1:]
s = '%s(%s)' % (func.__name__, ', '.join(map(repr, args)))
if type(command) == type([]):
# TODO: quote arguments containing spaces
# TODO: handle meta characters?
s = ' '.join(command)
else:
s = self.subst(command)
if not s.endswith('\n'):
s += '\n'
sys.stdout.write(s)
sys.stdout.flush()
def execute(self, command, stdout=None, stderr=None):
"""
Executes a single command.
"""
if not self.active:
return 0
if type(command) == type(''):
command = self.subst(command)
cmdargs = shlex.split(command)
if cmdargs[0] == 'cd':
command = (os.chdir,) + tuple(cmdargs[1:])
if type(command) == type(()):
func = command[0]
args = command[1:]
return func(*args)
else:
if stdout is sys.stdout:
# Same as passing sys.stdout, except python2.4 doesn't fail on it.
subout = None
else:
# Open pipe for anything else so Popen works on python2.4.
subout = subprocess.PIPE
if stderr is sys.stderr:
# Same as passing sys.stderr, except python2.4 doesn't fail on it.
suberr = None
elif stderr is None:
# Merge with stdout if stderr isn't specified.
suberr = subprocess.STDOUT
else:
# Open pipe for anything else so Popen works on python2.4.
suberr = subprocess.PIPE
p = subprocess.Popen(command,
shell=(sys.platform == 'win32'),
stdout=subout,
stderr=suberr)
p.wait()
if stdout is None:
self.stdout = p.stdout.read()
elif stdout is not sys.stdout:
stdout.write(p.stdout.read())
if stderr not in (None, sys.stderr):
stderr.write(p.stderr.read())
return p.returncode
def run(self, command, display=None, stdout=None, stderr=None):
"""
Runs a single command, displaying it first.
"""
if display is None:
display = command
self.display(display)
return self.execute(command, stdout, stderr)
class Unbuffered(object):
def __init__(self, fp):
self.fp = fp
def write(self, arg):
self.fp.write(arg)
self.fp.flush()
def __getattr__(self, attr):
return getattr(self.fp, attr)
sys.stdout = Unbuffered(sys.stdout)
sys.stderr = Unbuffered(sys.stderr)
def is_test_name(f):
return f.startswith('gyptest') and f.endswith('.py')
def find_all_gyptest_files(directory):
result = []
for root, dirs, files in os.walk(directory):
if '.svn' in dirs:
dirs.remove('.svn')
result.extend([ os.path.join(root, f) for f in files if is_test_name(f) ])
result.sort()
return result
def main(argv=None):
if argv is None:
argv = sys.argv
usage = "gyptest.py [-ahlnq] [-f formats] [test ...]"
parser = optparse.OptionParser(usage=usage)
parser.add_option("-a", "--all", action="store_true",
help="run all tests")
parser.add_option("-C", "--chdir", action="store", default=None,
help="chdir to the specified directory")
parser.add_option("-f", "--format", action="store", default='',
help="run tests with the specified formats")
parser.add_option("-G", '--gyp_option', action="append", default=[],
help="Add -G options to the gyp command line")
parser.add_option("-l", "--list", action="store_true",
help="list available tests and exit")
parser.add_option("-n", "--no-exec", action="store_true",
help="no execute, just print the command line")
parser.add_option("--passed", action="store_true",
help="report passed tests")
parser.add_option("--path", action="append", default=[],
help="additional $PATH directory")
parser.add_option("-q", "--quiet", action="store_true",
help="quiet, don't print test command lines")
opts, args = parser.parse_args(argv[1:])
if opts.chdir:
os.chdir(opts.chdir)
if opts.path:
extra_path = [os.path.abspath(p) for p in opts.path]
extra_path = os.pathsep.join(extra_path)
os.environ['PATH'] = extra_path + os.pathsep + os.environ['PATH']
if not args:
if not opts.all:
sys.stderr.write('Specify -a to get all tests.\n')
return 1
args = ['test']
tests = []
for arg in args:
if os.path.isdir(arg):
tests.extend(find_all_gyptest_files(os.path.normpath(arg)))
else:
if not is_test_name(os.path.basename(arg)):
print >>sys.stderr, arg, 'is not a valid gyp test name.'
sys.exit(1)
tests.append(arg)
if opts.list:
for test in tests:
print test
sys.exit(0)
CommandRunner.verbose = not opts.quiet
CommandRunner.active = not opts.no_exec
cr = CommandRunner()
os.environ['PYTHONPATH'] = os.path.abspath('test/lib')
if not opts.quiet:
sys.stdout.write('PYTHONPATH=%s\n' % os.environ['PYTHONPATH'])
passed = []
failed = []
no_result = []
if opts.format:
format_list = opts.format.split(',')
else:
# TODO: not duplicate this mapping from pylib/gyp/__init__.py
format_list = {
'aix5': ['make'],
'freebsd7': ['make'],
'freebsd8': ['make'],
'openbsd5': ['make'],
'cygwin': ['msvs'],
'win32': ['msvs', 'ninja'],
'linux2': ['make', 'ninja'],
'linux3': ['make', 'ninja'],
'darwin': ['make', 'ninja', 'xcode', 'xcode-ninja'],
}[sys.platform]
for format in format_list:
os.environ['TESTGYP_FORMAT'] = format
if not opts.quiet:
sys.stdout.write('TESTGYP_FORMAT=%s\n' % format)
gyp_options = []
for option in opts.gyp_option:
gyp_options += ['-G', option]
if gyp_options and not opts.quiet:
sys.stdout.write('Extra Gyp options: %s\n' % gyp_options)
for test in tests:
status = cr.run([sys.executable, test] + gyp_options,
stdout=sys.stdout,
stderr=sys.stderr)
if status == 2:
no_result.append(test)
elif status:
failed.append(test)
else:
passed.append(test)
if not opts.quiet:
def report(description, tests):
if tests:
if len(tests) == 1:
sys.stdout.write("\n%s the following test:\n" % description)
else:
fmt = "\n%s the following %d tests:\n"
sys.stdout.write(fmt % (description, len(tests)))
sys.stdout.write("\t" + "\n\t".join(tests) + "\n")
if opts.passed:
report("Passed", passed)
report("Failed", failed)
report("No result from", no_result)
if failed:
return 1
else:
return 0
if __name__ == "__main__":
sys.exit(main())
|
jzoldak/edx-platform | refs/heads/master | lms/djangoapps/verify_student/startup.py | 63 | """
Setup the signals on startup.
"""
import lms.djangoapps.verify_student.signals # pylint: disable=unused-import
|
sauloal/cnidaria | refs/heads/master | scripts/venv/local/lib/python2.7/encodings/hz.py | 817 | #
# hz.py: Python Unicode Codec for HZ
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_cn, codecs
import _multibytecodec as mbc
codec = _codecs_cn.getcodec('hz')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='hz',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
xujun10110/golismero | refs/heads/master | tools/xsser/XSSer/tokenhub.py | 7 | #!/usr/bin/python
# -*- coding: iso-8859-15 -*-
"""
$Id$
This file is part of the xsser project, http://xsser.sourceforge.net.
Copyright (c) 2011/2012 psy <root@lordepsylon.net> - <epsylon@riseup.net>
xsser is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation version 3 of the License.
xsser is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
details.
You should have received a copy of the GNU General Public License along
with xsser; if not, write to the Free Software Foundation, Inc., 51
Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
from threading import Thread
import socket
import time
class ReceiverThread(Thread):
def __init__(self, client, addr, parent):
Thread.__init__(self)
self.daemon = True
self.client = client
self.parent = parent
def run(self):
data = self.client.recv(1024)
if data:
self.parent.data_arrived(data)
self.client.send('thanks for coming!')
self.client.close()
self.parent.client_finished(self)
class HubThread(Thread):
def __init__(self, parent):
Thread.__init__(self)
self.daemon = True
self._clients = []
self._armed = True
self.ready = False
self.running =False
self.parent = parent
def url_request(self, url):
split_url = url.split("/")
if len(split_url) > 2:
if split_url[1] == 'success':
self.parent.token_arrived(split_url[2])
def data_arrived(self, data):
data.split("\n")[0]
if data.startswith("GET"):
split_data = data.split()
if len(split_data) > 1:
self.url_request(split_data[1])
def client_finished(self, _thread):
self._clients.remove(_thread)
def shutdown(self):
if self.ready:
self.socket.shutdown(socket.SHUT_RDWR)
self.socket.close()
self.running = False
self._armed = False
self.ready = False
def run(self):
while not self.running and self._armed:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('localhost', 19084))
self.running = True
except socket.error as e:
print("socket busy, retry opening")
if e.errno == 98: # its in use wait a bit and retry
time.sleep(3)
if not self._armed:
return
self.socket = s
self.ready = True
s.listen(1)
while self.running and self._armed:
try:
conn, addr = s.accept()
except socket.timeout:
pass
except socket.error, e:
if self.ready == False:
return
else:
break
else:
t = ReceiverThread(conn, addr, self)
t.start()
self._clients.append(t)
if self.ready:
s.close()
self.ready = False
|
symbolicdata/code | refs/heads/master | src/sdeval/classes/TaskFolder_test.py | 1 | import unittest
from TaskFolder import TaskFolder
from Task import Task
from TaskFolderTree import TaskFolderTree
from MachineSettings import MachineSettings
class TestTaskFolder(unittest.TestCase):
"""
Tests for the class TaskFolder
.. moduleauthor:: Albert Heinle <aheinle@uwaterloo.ca>
"""
def setUp(self):
testName = "PrettyTestTask"
testComputationProblem = "PrettierComputationProblem"
testSDTables = ["sdtable1", "sdtable2"]
testPIs = ["PI1", "PI2", "PI3", "PI4"]
testCASs = ["Singular", "Magma", "Maple"]
self.testTask = Task(testName, testComputationProblem, testSDTables, testPIs, testCASs)
casDict = {"Singular":"Singular", "Magma":"magma", "Maple":"maple"}
timeCommand = "time -p"
self.msTest = MachineSettings(casDict,timeCommand)
self.tfTree = TaskFolderTree()
for a in testSDTables:
for b in testPIs:
for c in testCASs:
self.tfTree.addCode(a,b,c,"/*Test Code*/")
def testTaskFolder(self):
"""
Here, we test the functionality of the TaskFolder Class
Tests include:
1. Initialization of the TaskFolder Class
1.a. A test, that initializes the taskFolder with None for all values.
1.b. A test, that the entries inside of the taskfoldertree coincide with the values that
can be found in the task-instance, which is given to the function.
1.c Correct initialization of the TaskFolder instance.
"""
#1.a
testVar =0
try:
tf = TaskFolder(None,None,None)
testVar = 1
except:
pass
if (testVar==1):
self.fail("I was able to initialize a TaskFolder instance with lots of None's as input")
#1.b
try:
tf = TaskFolder(self.testTask, self.tfTree, TaskFolderTree())
testVar =1
except:
pass
if testVar == 1:
self.fail("Could initialize the a Taskfolder with not consistent input.")
#1.c
try:
tf = TaskFolder(self.testTask, self.tfTree, self.msTest)
except:
self.fail("Could not initialize correct Initialization of tfTree.")
if __name__=="__main__":
unittest.main()
|
CZ-NIC/conpot | refs/heads/master | conpot/protocols/modbus/slave.py | 2 | import struct
import logging
from modbus_tk.modbus import Slave, ModbusError, ModbusInvalidRequestError, InvalidArgumentError, DuplicatedKeyError,\
InvalidModbusBlockError, OverlapModbusBlockError
from modbus_tk import defines, utils
from modbus_block_databus_mediator import ModbusBlockDatabusMediator
logger = logging.getLogger(__name__)
class MBSlave(Slave):
def __init__(self, slave_id, dom):
Slave.__init__(self, slave_id)
self._fn_code_map = {defines.READ_COILS: self._read_coils,
defines.READ_DISCRETE_INPUTS: self._read_discrete_inputs,
defines.READ_INPUT_REGISTERS: self._read_input_registers,
defines.READ_HOLDING_REGISTERS: self._read_holding_registers,
defines.WRITE_SINGLE_COIL: self._write_single_coil,
defines.WRITE_SINGLE_REGISTER: self._write_single_register,
defines.WRITE_MULTIPLE_COILS: self._write_multiple_coils,
defines.WRITE_MULTIPLE_REGISTERS: self._write_multiple_registers,
defines.DEVICE_INFO: self._device_info,
}
self.dom = dom
def _device_info(self, request_pdu):
info_root = self.dom.xpath('//modbus/device_info')[0]
vendor_name = info_root.xpath('./VendorName/text()')[0]
product_code = info_root.xpath('./ProductCode/text()')[0]
major_minor_revision = info_root.xpath('./MajorMinorRevision/text()')[0]
(req_device_id, req_object_id) = struct.unpack(">BB", request_pdu[2:4])
device_info = {
0: vendor_name,
1: product_code,
2: major_minor_revision
}
# MEI type
response = struct.pack(">B", 0x0E)
# requested device id
response += struct.pack(">B", req_device_id)
# conformity level
response += struct.pack(">B", 0x01)
# followup data 0x00 is False
response += struct.pack(">B", 0x00)
# No next object id
response += struct.pack(">B", 0x00)
# Number of objects
response += struct.pack(">B", len(device_info))
for i in range(len(device_info)):
# Object id
response += struct.pack(">B", i)
# Object length
response += struct.pack(">B", len(device_info[i]))
response += device_info[i]
return response
def handle_request(self, request_pdu, broadcast=False):
"""
parse the request pdu, makes the corresponding action
and returns the response pdu
"""
with self._data_lock: # thread-safe
try:
# get the function code
(self.function_code, ) = struct.unpack(">B", request_pdu[0])
# check if the function code is valid. If not returns error response
if not self.function_code in self._fn_code_map:
raise ModbusError(defines.ILLEGAL_FUNCTION)
can_broadcast = [defines.WRITE_MULTIPLE_COILS, defines.WRITE_MULTIPLE_REGISTERS,
defines.WRITE_SINGLE_COIL, defines.WRITE_SINGLE_REGISTER]
if broadcast and (self.function_code not in can_broadcast):
raise ModbusInvalidRequestError("Function %d can not be broadcasted" % self.function_code)
# execute the corresponding function
try:
response_pdu = self._fn_code_map[self.function_code](request_pdu)
except struct.error:
raise ModbusError(exception_code=3)
if response_pdu:
if broadcast:
# not really sure whats going on here - better log it!
logger.info("Modbus broadcast: %s" % (utils.get_log_buffer("!!", response_pdu)))
return ""
else:
return struct.pack(">B", self.function_code) + response_pdu
raise Exception("No response for function %d" % self.function_code)
except ModbusError as e:
logger.error('Exception caught: %s. (A proper response will be sent to the peer)', e)
return struct.pack(">BB", self.function_code + 128, e.get_exception_code())
def add_block(self, block_name, block_type, starting_address, size):
"""Add a new block identified by its name"""
with self._data_lock: # thread-safe
if size <= 0:
raise InvalidArgumentError("size must be a positive number")
if starting_address < 0:
raise InvalidArgumentError("starting address must be zero or positive number")
if block_name in self._blocks:
raise DuplicatedKeyError("Block %s already exists. " % block_name)
if block_type not in self._memory:
raise InvalidModbusBlockError("Invalid block type %d" % block_type)
# check that the new block doesn't overlap an existing block
# it means that only 1 block per type must correspond to a given address
# for example: it must not have 2 holding registers at address 100
index = 0
for i in xrange(len(self._memory[block_type])):
block = self._memory[block_type][i]
if block.is_in(starting_address, size):
raise OverlapModbusBlockError, "Overlap block at %d size %d" % (block.starting_address, block.size)
if block.starting_address > starting_address:
index = i
break
# if the block is ok: register it
self._blocks[block_name] = (block_type, starting_address)
# add it in the 'per type' shortcut
self._memory[block_type].insert(index, ModbusBlockDatabusMediator(block_name, starting_address))
|
stephane-martin/salt-debian-packaging | refs/heads/master | salt-2016.3.2/salt/modules/solaris_fmadm.py | 2 | # -*- coding: utf-8 -*-
'''
Module for running fmadm and fmdump on Solaris
:maintainer: Jorge Schrauwen <sjorge@blackdot.be>
:maturity: new
:platform: solaris,illumos
.. versionadded:: 2016.3.0
'''
from __future__ import absolute_import
# Import Python libs
import logging
# Import Salt libs
import salt.utils
import salt.utils.decorators as decorators
from salt.utils.odict import OrderedDict
log = logging.getLogger(__name__)
# Function aliases
__func_alias__ = {
'list_records': 'list',
}
# Define the module's virtual name
__virtualname__ = 'fmadm'
@decorators.memoize
def _check_fmadm():
'''
Looks to see if fmadm is present on the system
'''
return salt.utils.which('fmadm')
def _check_fmdump():
'''
Looks to see if fmdump is present on the system
'''
return salt.utils.which('fmdump')
def __virtual__():
'''
Provides fmadm only on Solaris
'''
if salt.utils.is_sunos() and \
_check_fmadm() and _check_fmdump():
return __virtualname__
return (
False,
'{0} module can only be loaded on Solaris with the fault management installed'.format(
__virtualname__
)
)
def _parse_fmdump(output):
'''
Parses fmdump output
'''
result = []
output = output.split("\n")
# extract header
header = [field for field in output[0].lower().split(" ") if field]
del output[0]
# parse entries
for entry in output:
entry = [item for item in entry.split(" ") if item]
entry = ['{0} {1} {2}'.format(entry[0], entry[1], entry[2])] + entry[3:]
# prepare faults
fault = OrderedDict()
for field in header:
fault[field] = entry[header.index(field)]
result.append(fault)
return result
def _parse_fmdump_verbose(output):
'''
Parses fmdump verbose output
'''
result = []
output = output.split("\n")
fault = []
verbose_fault = {}
for line in output:
if line.startswith('TIME'):
fault.append(line)
if len(verbose_fault) > 0:
result.append(verbose_fault)
verbose_fault = {}
elif len(fault) == 1:
fault.append(line)
verbose_fault = _parse_fmdump("\n".join(fault))[0]
fault = []
elif len(verbose_fault) > 0:
if 'details' not in verbose_fault:
verbose_fault['details'] = ""
if line.strip() == '':
continue
verbose_fault['details'] = '{0}{1}\n'.format(
verbose_fault['details'],
line
)
if len(verbose_fault) > 0:
result.append(verbose_fault)
return result
def _parse_fmadm_config(output):
'''
Parsbb fmdump/fmadm output
'''
result = []
output = output.split("\n")
# extract header
header = [field for field in output[0].lower().split(" ") if field]
del output[0]
# parse entries
for entry in output:
entry = [item for item in entry.split(" ") if item]
entry = entry[0:3] + [" ".join(entry[3:])]
# prepare component
component = OrderedDict()
for field in header:
component[field] = entry[header.index(field)]
result.append(component)
# keying
keyed_result = OrderedDict()
for component in result:
keyed_result[component['module']] = component
del keyed_result[component['module']]['module']
result = keyed_result
return result
def _fmadm_action_fmri(action, fmri):
'''
Internal function for fmadm.repqired, fmadm.replaced, fmadm.flush
'''
ret = {}
fmadm = _check_fmadm()
cmd = '{cmd} {action} {fmri}'.format(
cmd=fmadm,
action=action,
fmri=fmri
)
res = __salt__['cmd.run_all'](cmd)
retcode = res['retcode']
result = {}
if retcode != 0:
result['Error'] = res['stderr']
else:
result = True
return result
def _parse_fmadm_faulty(output):
'''
Parse fmadm faulty output
'''
def _merge_data(summary, fault):
result = {}
uuid = summary['event-id']
del summary['event-id']
result[uuid] = OrderedDict()
result[uuid]['summary'] = summary
result[uuid]['fault'] = fault
return result
result = {}
summary = []
summary_data = {}
fault_data = {}
data_key = None
for line in output.split("\n"):
# we hit a divider
if line.startswith('-'):
if summary and summary_data and fault_data:
# we have data, store it and reset
result.update(_merge_data(summary_data, fault_data))
summary = []
summary_data = {}
fault_data = {}
continue
else:
# we don't have all data, colelct more
continue
# if we do not have the header, store it
if not summary:
summary.append(line)
continue
# if we have the header but no data, store the data and parse it
if summary and not summary_data:
summary.append(line)
summary_data = _parse_fmdump("\n".join(summary))[0]
continue
# if we have a header and data, assume the other lines are details
if summary and summary_data:
# if line starts with a whitespace and we already have a key, append
if line.startswith(' ') and data_key:
fault_data[data_key] = "{0}\n{1}".format(
fault_data[data_key],
line.strip()
)
# we have a key : value line, parse it
elif ':' in line:
line = line.split(':')
data_key = line[0].strip()
fault_data[data_key] = ":".join(line[1:]).strip()
# note: for some reason Chassis_id is lobbed ofter Platform, fix that here
if data_key == 'Platform':
fault_data['Chassis_id'] = fault_data[data_key][fault_data[data_key].index('Chassis_id'):].split(':')[-1].strip()
fault_data[data_key] = fault_data[data_key][0:fault_data[data_key].index('Chassis_id')].strip()
# we have data, store it and reset
result.update(_merge_data(summary_data, fault_data))
return result
def list_records(after=None, before=None):
'''
Display fault management logs
after : string
filter events after time, see man fmdump for format
before : string
filter events before time, see man fmdump for format
CLI Example:
.. code-block:: bash
salt '*' fmadm.list
'''
ret = {}
fmdump = _check_fmdump()
cmd = '{cmd}{after}{before}'.format(
cmd=fmdump,
after=' -t {0}'.format(after) if after else '',
before=' -T {0}'.format(before) if before else ''
)
res = __salt__['cmd.run_all'](cmd)
retcode = res['retcode']
result = {}
if retcode != 0:
result['Error'] = 'error executing fmdump'
else:
result = _parse_fmdump(res['stdout'])
return result
def show(uuid):
'''
Display log details
uuid: string
uuid of fault
CLI Example:
.. code-block:: bash
salt '*' fmadm.show 11b4070f-4358-62fa-9e1e-998f485977e1
'''
ret = {}
fmdump = _check_fmdump()
cmd = '{cmd} -u {uuid} -V'.format(
cmd=fmdump,
uuid=uuid
)
res = __salt__['cmd.run_all'](cmd)
retcode = res['retcode']
result = {}
if retcode != 0:
result['Error'] = 'error executing fmdump'
else:
result = _parse_fmdump_verbose(res['stdout'])
return result
def config():
'''
Display fault manager configuration
CLI Example:
.. code-block:: bash
salt '*' fmadm.config
'''
ret = {}
fmadm = _check_fmadm()
cmd = '{cmd} config'.format(
cmd=fmadm
)
res = __salt__['cmd.run_all'](cmd)
retcode = res['retcode']
result = {}
if retcode != 0:
result['Error'] = 'error executing fmadm config'
else:
result = _parse_fmadm_config(res['stdout'])
return result
def load(path):
'''
Load specified fault manager module
path: string
path of fault manager module
CLI Example:
.. code-block:: bash
salt '*' fmadm.load /module/path
'''
ret = {}
fmadm = _check_fmadm()
cmd = '{cmd} load {path}'.format(
cmd=fmadm,
path=path
)
res = __salt__['cmd.run_all'](cmd)
retcode = res['retcode']
result = {}
if retcode != 0:
result['Error'] = res['stderr']
else:
result = True
return result
def unload(module):
'''
Unload specified fault manager module
module: string
module to unload
CLI Example:
.. code-block:: bash
salt '*' fmadm.unload software-response
'''
ret = {}
fmadm = _check_fmadm()
cmd = '{cmd} unload {module}'.format(
cmd=fmadm,
module=module
)
res = __salt__['cmd.run_all'](cmd)
retcode = res['retcode']
result = {}
if retcode != 0:
result['Error'] = res['stderr']
else:
result = True
return result
def reset(module, serd=None):
'''
Reset module or sub-component
module: string
module to unload
serd : string
serd sub module
CLI Example:
.. code-block:: bash
salt '*' fmadm.reset software-response
'''
ret = {}
fmadm = _check_fmadm()
cmd = '{cmd} reset {serd}{module}'.format(
cmd=fmadm,
serd='-s {0} '.format(serd) if serd else '',
module=module
)
res = __salt__['cmd.run_all'](cmd)
retcode = res['retcode']
result = {}
if retcode != 0:
result['Error'] = res['stderr']
else:
result = True
return result
def flush(fmri):
'''
Flush cached state for resource
fmri: string
fmri
CLI Example:
.. code-block:: bash
salt '*' fmadm.flush fmri
'''
return _fmadm_action_fmri('flush', fmri)
def repaired(fmri):
'''
Notify fault manager that resource has been repaired
fmri: string
fmri
CLI Example:
.. code-block:: bash
salt '*' fmadm.repaired fmri
'''
return _fmadm_action_fmri('repaired', fmri)
def replaced(fmri):
'''
Notify fault manager that resource has been replaced
fmri: string
fmri
CLI Example:
.. code-block:: bash
salt '*' fmadm.repaired fmri
'''
return _fmadm_action_fmri('replaced', fmri)
def acquit(fmri):
'''
Acquit resource or acquit case
fmri: string
fmri or uuid
CLI Example:
.. code-block:: bash
salt '*' fmadm.acquit fmri | uuid
'''
return _fmadm_action_fmri('acquit', fmri)
def faulty():
'''
Display list of faulty resources
CLI Example:
.. code-block:: bash
salt '*' fmadm.faulty
'''
fmadm = _check_fmadm()
cmd = '{cmd} faulty'.format(
cmd=fmadm,
)
res = __salt__['cmd.run_all'](cmd)
result = {}
if res['stdout'] == '':
result = False
else:
result = _parse_fmadm_faulty(res['stdout'])
return result
def healthy():
'''
Return whether fmadm is reporting faults
CLI Example:
.. code-block:: bash
salt '*' fmadm.healthy
'''
return False if faulty() else True
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
|
dejacode/about-code-tool | refs/heads/master | src/attributecode/util.py | 1 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# ============================================================================
# Copyright (c) 2013-2018 nexB Inc. http://www.nexb.com/ - All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import collections
from collections import OrderedDict
import codecs
import errno
import json
import ntpath
import os
from os.path import abspath
from os.path import dirname
from os.path import join
import posixpath
import shutil
import socket
import string
import sys
if sys.version_info[0] < 3: # Python 2
from itertools import izip_longest as zip_longest # NOQA
else: # Python 3
from itertools import zip_longest # NOQA
from yaml.reader import Reader
from yaml.scanner import Scanner
from yaml.parser import Parser
from yaml.composer import Composer
from yaml.constructor import Constructor, ConstructorError
from yaml.resolver import Resolver
from yaml.nodes import MappingNode
if sys.version_info[0] < 3:
# Python 2
import backports.csv as csv # NOQA
else:
# Python 3
import csv # NOQA
try:
# Python 2
import httplib
except ImportError:
# Python 3
import http.client as httplib
from attributecode import CRITICAL
from attributecode import Error
on_windows = 'win32' in sys.platform
def to_posix(path):
"""
Return a path using the posix path separator given a path that may contain
posix or windows separators, converting \ to /. NB: this path will still
be valid in the windows explorer (except if UNC or share name). It will be
a valid path everywhere in Python. It will not be valid for windows
command line operations.
"""
return path.replace(ntpath.sep, posixpath.sep)
UNC_PREFIX = u'\\\\?\\'
UNC_PREFIX_POSIX = to_posix(UNC_PREFIX)
UNC_PREFIXES = (UNC_PREFIX_POSIX, UNC_PREFIX,)
valid_file_chars = string.digits + string.ascii_letters + '_-.'
def invalid_chars(path):
"""
Return a list of invalid characters in the file name of path
"""
path = to_posix(path)
rname = resource_name(path)
name = rname.lower()
return [c for c in name if c not in valid_file_chars]
def check_file_names(paths):
"""
Given a sequence of file paths, check that file names are valid and that
there are no case-insensitive duplicates in any given directories.
Return a list of errors.
From spec :
A file name can contain only these US-ASCII characters:
- digits from 0 to 9
- uppercase and lowercase letters from A to Z
- the _ underscore, - dash and . period signs.
From spec:
The case of a file name is not significant. On case-sensitive file
systems (such as Linux), a tool must raise an error if two ABOUT files
stored in the same directory have the same lowercase file name.
"""
seen = {}
errors = []
for orig_path in paths:
path = orig_path
invalid = invalid_chars(path)
if invalid:
invalid = ''.join(invalid)
msg = ('Invalid characters %(invalid)r in file name at: '
'%(path)r' % locals())
errors.append(Error(CRITICAL, msg))
path = to_posix(orig_path)
name = resource_name(path).lower()
parent = posixpath.dirname(path)
path = posixpath.join(parent, name)
path = posixpath.normpath(path)
path = posixpath.abspath(path)
existing = seen.get(path)
if existing:
msg = ('Duplicate files: %(orig_path)r and %(existing)r '
'have the same case-insensitive file name' % locals())
errors.append(Error(CRITICAL, msg))
else:
seen[path] = orig_path
return errors
def check_duplicate_keys_about_file(context):
keys = []
dup_keys = []
for line in context.splitlines():
"""
Ignore all the continuation string, string block and empty line
"""
if not line.startswith(' ') and not len(line.strip()) == 0 :
# Get the key name
key = line.partition(':')[0]
if key in keys:
dup_keys.append(key)
else:
keys.append(key)
return dup_keys
def wrap_boolean_value(context):
bool_fields = ['redistribute', 'attribute', 'track_changes', 'modified']
input = [] # NOQA
for line in context.splitlines():
key = line.partition(':')[0]
if key in bool_fields:
value = "'" + line.partition(':')[2].strip() + "'"
updated_line = key + ': ' + value
input.append(updated_line)
else:
input.append(line)
updated_context = '\n'.join(input)
return updated_context
def get_absolute(location):
"""
Return an absolute normalized location.
"""
location = os.path.expanduser(location)
location = os.path.expandvars(location)
location = os.path.normpath(location)
location = os.path.abspath(location)
return location
def get_locations(location):
"""
Return a list of locations of files given the location of a
a file or a directory tree containing ABOUT files.
File locations are normalized using posix path separators.
"""
location = add_unc(location)
location = get_absolute(location)
assert os.path.exists(location)
if os.path.isfile(location):
yield location
else:
for base_dir, _, files in os.walk(location):
for name in files:
bd = to_posix(base_dir)
yield posixpath.join(bd, name)
def get_about_locations(location):
"""
Return a list of locations of ABOUT files given the location of a
a file or a directory tree containing ABOUT files.
File locations are normalized using posix path separators.
"""
for loc in get_locations(location):
if is_about_file(loc):
yield loc
def get_relative_path(base_loc, full_loc):
"""
Return a posix path for a given full location relative to a base location.
The first segment of the different between full_loc and base_loc will become
the first segment of the returned path.
"""
def norm(p):
if p.startswith(UNC_PREFIX) or p.startswith(to_posix(UNC_PREFIX)):
p = p.strip(UNC_PREFIX).strip(to_posix(UNC_PREFIX))
p = to_posix(p)
p = p.strip(posixpath.sep)
p = posixpath.normpath(p)
return p
base = norm(base_loc)
path = norm(full_loc)
assert path.startswith(base), ('Cannot compute relative path: '
'%(path)r does not start with %(base)r'
% locals())
base_name = resource_name(base)
no_dir = base == base_name
same_loc = base == path
if same_loc:
# this is the case of a single file or single dir
if no_dir:
# we have no dir: the full path is the same as the resource name
relative = base_name
else:
# we have at least one dir
parent_dir = posixpath.dirname(base)
parent_dir = resource_name(parent_dir)
relative = posixpath.join(parent_dir, base_name)
else:
relative = path[len(base) + 1:]
# We don't want to keep the first segment of the root of the returned path.
# See https://github.com/nexB/attributecode/issues/276
# relative = posixpath.join(base_name, relative)
return relative
def to_native(path):
"""
Return a path using the current OS path separator given a path that may
contain posix or windows separators, converting / to \ on windows and \ to
/ on posix OSes.
"""
path = path.replace(ntpath.sep, os.path.sep)
path = path.replace(posixpath.sep, os.path.sep)
return path
def is_about_file(path):
"""
Return True if the path represents a valid ABOUT file name.
"""
return path and path.lower().endswith('.about')
def resource_name(path):
"""
Return the file or directory name from a path.
"""
path = path.strip()
path = to_posix(path)
path = path.rstrip(posixpath.sep)
_left, right = posixpath.split(path)
return right.strip()
# Python 3
OrderedDictReader = csv.DictReader
if sys.version_info[0] < 3:
# Python 2
class OrderedDictReader(csv.DictReader):
"""
A DictReader that return OrderedDicts
Copied from csv.DictReader itself backported from Python 3
license: python
"""
def __next__(self):
if self.line_num == 0:
# Used only for its side effect.
self.fieldnames
row = next(self.reader)
self.line_num = self.reader.line_num
# unlike the basic reader, we prefer not to return blanks,
# because we will typically wind up with a dict full of None
# values
while row == []:
row = next(self.reader)
d = OrderedDict(zip(self.fieldnames, row))
lf = len(self.fieldnames)
lr = len(row)
if lf < lr:
d[self.restkey] = row[lf:]
elif lf > lr:
for key in self.fieldnames[lr:]:
d[key] = self.restval
return d
next = __next__
def get_mapping(location=None):
"""
Return a mapping of user key names to About key names by reading the
mapping.config file from location or the directory of this source file if
location was not provided.
"""
if not location:
location = join(abspath(dirname(__file__)), 'mapping.config')
if not os.path.exists(location):
return {}
mapping = collections.OrderedDict()
try:
with open(location) as mapping_file:
for line in mapping_file:
if not line or not line.strip() or line.strip().startswith('#'):
continue
if ':' in line:
line = line.lower()
key, sep, value = line.partition(':')
about_key = key.strip().replace(' ', '_')
user_key = value.strip()
mapping[about_key] = user_key
except Exception as e:
print(repr(e))
print('Cannot open or process mapping.config file at %(location)r.' % locals())
# FIXME: this is rather brutal
sys.exit(errno.EACCES)
return mapping
def get_output_mapping(location):
"""
Return a mapping of About key names to user key names by reading the
user's input file from location. The format of the user key names will
NOT be formatted (i.e. keys will NOT be forced to convert to lower case)
"""
if not os.path.exists(location):
return {}
mapping = {}
try:
with open(location) as mapping_file:
for line in mapping_file:
if not line or not line.strip() or line.strip().startswith('#'):
continue
if ':' in line:
key, sep, value = line.partition(':')
user_key = key.strip()
about_key = value.strip()
mapping[about_key] = user_key
except Exception as e:
print(repr(e))
print('Cannot open or process file at %(location)r.' % locals())
# FIXME: this is rather brutal
sys.exit(errno.EACCES)
return mapping
def apply_mapping(abouts, alternate_mapping=None):
"""
Given a list of About data dictionaries and a dictionary of
mapping, return a new About data dictionaries list where the keys
have been replaced by the About mapped_abouts key if present. Load
the mapping from the default mnapping.config if an alternate
mapping dict is not provided.
"""
if alternate_mapping:
mapping = get_mapping(alternate_mapping)
else:
mapping = get_mapping()
if not mapping:
return abouts
mapped_abouts = []
for about in abouts:
mapped_about = OrderedDict()
for key in about:
mapped = []
for mapping_keys, input_keys in mapping.items():
if key == input_keys:
mapped.append(mapping_keys)
if not mapped:
mapped.append(key)
for mapped_key in mapped:
mapped_about[mapped_key] = about[key]
mapped_abouts.append(mapped_about)
return mapped_abouts
def get_mapping_key_order(mapping_file):
"""
Get the mapping key order and return as a list
"""
if mapping_file:
mapping = get_mapping(mapping_file)
else:
mapping = get_mapping()
return mapping.keys()
def format_output(about_data, use_mapping, mapping_file):
"""
Convert the about_data dictionary to an ordered dictionary for saneyaml.dump()
The ordering should be:
about_resource
name
version <-- if any
and the rest is the order from the mapping.config file (if any); otherwise alphabetical order.
"""
mapping_key_order = []
if use_mapping or mapping_file:
mapping_key_order = get_mapping_key_order(mapping_file)
priority_keys = [u'about_resource', u'name', u'version']
about_data_keys = []
order_dict = collections.OrderedDict()
for key in about_data:
about_data_keys.append(key)
if u'about_resource' in about_data_keys:
order_dict['about_resource'] = about_data['about_resource']
if u'name' in about_data_keys:
order_dict['name'] = about_data['name']
if u'version' in about_data_keys:
order_dict['version'] = about_data['version']
if not mapping_key_order:
for other_key in sorted(about_data_keys):
if not other_key in priority_keys:
order_dict[other_key] = about_data[other_key]
else:
for key in mapping_key_order:
if not key in priority_keys and key in about_data_keys:
order_dict[key] = about_data[key]
for other_key in sorted(about_data_keys):
if not other_key in priority_keys and not other_key in mapping_key_order:
order_dict[other_key] = about_data[other_key]
return order_dict
def get_about_file_path(location, use_mapping=False, mapping_file=None):
"""
Read file at location, return a list of about_file_path.
"""
afp_list = []
if location.endswith('.csv'):
about_data = load_csv(location, use_mapping=use_mapping, mapping_file=mapping_file)
else:
about_data = load_json(location, use_mapping=use_mapping, mapping_file=mapping_file)
for about in about_data:
afp_list.append(about['about_file_path'])
return afp_list
def load_csv(location, use_mapping=False, mapping_file=None):
"""
Read CSV at location, return a list of ordered dictionaries, one
for each row.
"""
results = []
# FIXME: why ignore encoding errors here?
with codecs.open(location, mode='rb', encoding='utf-8',
errors='ignore') as csvfile:
for row in OrderedDictReader(csvfile):
# convert all the column keys to lower case as the same
# behavior as when user use the --mapping
updated_row = OrderedDict(
[(key.lower(), value) for key, value in row.items()]
)
results.append(updated_row)
if use_mapping or mapping_file:
results = apply_mapping(results, mapping_file)
return results
def load_json(location, use_mapping=False, mapping_file=None):
"""
Read JSON file at `location` and return a list of ordered mappings, one for
each entry.
"""
# FIXME: IMHO we should know where the JSON is from and its shape
# TODO use: object_pairs_hook=OrderedDict
with open(location) as json_file:
results = json.load(json_file)
# If the loaded JSON is not a list,
# - JSON output from AboutCode Manager:
# look for the "components" field as it is the field
# that contain everything the tool needs and ignore other fields.
# For instance,
# {
# "aboutcode_manager_notice":"xyz",
# "aboutcode_manager_version":"xxx",
# "components":
# [{
# "license_expression":"apache-2.0",
# "copyright":"Copyright (c) 2017 nexB Inc.",
# "path":"ScanCode",
# ...
# }]
# }
#
# - JSON output from ScanCode:
# look for the "files" field as it is the field
# that contain everything the tool needs and ignore other fields:
# For instance,
# {
# "scancode_notice":"xyz",
# "scancode_version":"xxx",
# "files":
# [{
# "path": "test",
# "type": "directory",
# "name": "test",
# ...
# }]
# }
#
# - JSON file that is not produced by scancode or aboutcode toolkit
# For instance,
# {
# "path": "test",
# "type": "directory",
# "name": "test",
# ...
# }
if isinstance(results, list):
updated_results = sorted(results)
else:
if u'aboutcode_manager_notice' in results:
updated_results = results['components']
elif u'scancode_notice' in results:
updated_results = results['files']
else:
updated_results = [results]
about_ordered_list = updated_results
# FIXME: why this double test? either have a mapping file and we use mapping or we do not.
# FIXME: IMHO only one argument is needed
if use_mapping or mapping_file:
about_ordered_list = apply_mapping(updated_results, mapping_file)
return about_ordered_list
def have_network_connection():
"""
Return True if an HTTP connection to some public web site is possible.
"""
http_connection = httplib.HTTPConnection('dejacode.org', timeout=10)
try:
http_connection.connect()
except socket.error:
return False
else:
return True
def extract_zip(location):
"""
Extract a zip file at location in a temp directory and return the temporary
directory where the archive was extracted.
"""
import zipfile
import tempfile
if not zipfile.is_zipfile(location):
raise Exception('Incorrect zip file %(location)r' % locals())
archive_base_name = os.path.basename(location).replace('.zip', '')
base_dir = tempfile.mkdtemp()
target_dir = os.path.join(base_dir, archive_base_name)
target_dir = add_unc(target_dir)
os.makedirs(target_dir)
if target_dir.endswith((ntpath.sep, posixpath.sep)):
target_dir = target_dir[:-1]
with zipfile.ZipFile(location) as zipf:
for info in zipf.infolist():
name = info.filename
content = zipf.read(name)
target = os.path.join(target_dir, name)
is_dir = target.endswith((ntpath.sep, posixpath.sep))
if is_dir:
target = target[:-1]
parent = os.path.dirname(target)
if on_windows:
target = target.replace(posixpath.sep, ntpath.sep)
parent = parent.replace(posixpath.sep, ntpath.sep)
if not os.path.exists(parent):
os.makedirs(add_unc(parent))
if not content and is_dir:
if not os.path.exists(target):
os.makedirs(add_unc(target))
if not os.path.exists(target):
with open(target, 'wb') as f:
f.write(content)
return target_dir
def add_unc(location):
"""
Convert a location to an absolute Window UNC path to support long paths on
Windows. Return the location unchanged if not on Windows. See
https://msdn.microsoft.com/en-us/library/aa365247.aspx
"""
if on_windows and not location.startswith(UNC_PREFIX):
if location.startswith(UNC_PREFIX_POSIX):
return UNC_PREFIX + os.path.abspath(location.strip(UNC_PREFIX_POSIX))
return UNC_PREFIX + os.path.abspath(location)
return location
def copy_license_notice_files(fields, base_dir, license_notice_text_location, afp):
lic_name = u''
for key, value in fields:
if key == u'license_file' or key == u'notice_file':
lic_name = value
from_lic_path = posixpath.join(to_posix(license_notice_text_location), lic_name)
about_file_dir = dirname(to_posix(afp)).lstrip('/')
to_lic_path = posixpath.join(to_posix(base_dir), about_file_dir)
if on_windows:
from_lic_path = add_unc(from_lic_path)
to_lic_path = add_unc(to_lic_path)
# Strip the white spaces
from_lic_path = from_lic_path.strip()
to_lic_path = to_lic_path.strip()
# Errors will be captured when doing the validation
if not posixpath.exists(from_lic_path):
continue
if not posixpath.exists(to_lic_path):
os.makedirs(to_lic_path)
try:
shutil.copy2(from_lic_path, to_lic_path)
except Exception as e:
print(repr(e))
print('Cannot copy file at %(from_lic_path)r.' % locals())
def inventory_filter(abouts, filter_dict):
updated_abouts = []
for key in filter_dict:
for about in abouts:
try:
# Check if the about object has the filtered attribute and if the
# attributed value is the same as the defined in the filter
for value in filter_dict[key]:
if vars(about)[key].value == value:
if not about in updated_abouts:
updated_abouts.append(about)
except:
# The current about object does not have the defined attribute
continue
return updated_abouts
def update_fieldnames(fieldnames, mapping_output):
mapping = get_output_mapping(mapping_output)
updated_header = []
for name in fieldnames:
try:
updated_header.append(mapping[name])
except:
updated_header.append(name)
return updated_header
def update_about_dictionary_keys(about_dictionary_list, mapping_output):
output_map = get_output_mapping(mapping_output)
updated_dict_list = []
for element in about_dictionary_list:
updated_ordered_dict = OrderedDict()
for about_key, value in element.items():
update_key = False
for custom_key in output_map:
if about_key == custom_key:
update_key = True
updated_ordered_dict[output_map[custom_key]] = value
break
if not update_key:
updated_ordered_dict[about_key] = value
updated_dict_list.append(updated_ordered_dict)
return updated_dict_list
def ungroup_licenses(licenses):
"""
Ungroup multiple licenses information
"""
lic_key = []
lic_name = []
lic_file = []
lic_url = []
for lic in licenses:
if 'key' in lic:
lic_key.append(lic['key'])
if 'name' in lic:
lic_name.append(lic['name'])
if 'file' in lic:
lic_file.append(lic['file'])
if 'url' in lic:
lic_url.append(lic['url'])
return lic_key, lic_name, lic_file, lic_url
def format_about_dict_for_csv_output(about_dictionary_list):
csv_formatted_list = []
file_fields = ['license_file', 'notice_file', 'changelog_file', 'author_file']
for element in about_dictionary_list:
row_list = OrderedDict()
for key in element:
if element[key]:
if isinstance(element[key], list):
row_list[key] = u'\n'.join((element[key]))
elif key == u'about_resource' or key in file_fields:
row_list[key] = u'\n'.join((element[key].keys()))
else:
row_list[key] = element[key]
csv_formatted_list.append(row_list)
return csv_formatted_list
def format_about_dict_for_json_output(about_dictionary_list):
licenses = ['license_key', 'license_name', 'license_file', 'license_url']
file_fields = ['notice_file', 'changelog_file', 'author_file']
json_formatted_list = []
for element in about_dictionary_list:
row_list = OrderedDict()
license_key = []
license_name = []
license_file = []
license_url = []
for key in element:
if element[key]:
"""
if key == u'about_resource':
row_list[key] = element[key][0]
"""
# The 'about_resource' is an ordered dict
if key == u'about_resource':
row_list[key] = list(element[key].keys())[0]
elif key in licenses:
if key == 'license_key':
license_key = element[key]
elif key == 'license_name':
license_name = element[key]
elif key == 'license_file':
license_file = element[key].keys()
elif key == 'license_url':
license_url = element[key]
elif key in file_fields:
row_list[key] = element[key].keys()
else:
row_list[key] = element[key]
# Group the same license information in a list
license_group = list(zip_longest(license_key, license_name, license_file, license_url))
if license_group:
licenses_list = []
for lic_group in license_group:
lic_dict = OrderedDict()
if lic_group[0]:
lic_dict['key'] = lic_group[0]
if lic_group[1]:
lic_dict['name'] = lic_group[1]
if lic_group[2]:
lic_dict['file'] = lic_group[2]
if lic_group[3]:
lic_dict['url'] = lic_group[3]
licenses_list.append(lic_dict)
row_list['licenses'] = licenses_list
json_formatted_list.append(row_list)
return json_formatted_list
class NoDuplicateConstructor(Constructor):
def construct_mapping(self, node, deep=False):
if not isinstance(node, MappingNode):
raise ConstructorError(
None, None,
"expected a mapping node, but found %s" % node.id,
node.start_mark)
mapping = {}
for key_node, value_node in node.value:
# keys can be list -> deep
key = self.construct_object(key_node, deep=True)
# lists are not hashable, but tuples are
if not isinstance(key, collections.Hashable):
if isinstance(key, list):
key = tuple(key)
if sys.version_info.major == 2:
try:
hash(key)
except TypeError as exc:
raise ConstructorError(
"while constructing a mapping", node.start_mark,
"found unacceptable key (%s)" %
exc, key_node.start_mark)
else:
if not isinstance(key, collections.Hashable):
raise ConstructorError(
"while constructing a mapping", node.start_mark,
"found unhashable key", key_node.start_mark)
value = self.construct_object(value_node, deep=deep)
# Actually do the check.
if key in mapping:
raise KeyError("Got duplicate key: {!r}".format(key))
mapping[key] = value
return mapping
class NoDuplicateLoader(Reader, Scanner, Parser, Composer, NoDuplicateConstructor, Resolver):
def __init__(self, stream):
Reader.__init__(self, stream)
Scanner.__init__(self)
Parser.__init__(self)
Composer.__init__(self)
NoDuplicateConstructor.__init__(self)
Resolver.__init__(self)
|
exa-analytics/atomic | refs/heads/master | exatomic/qe/__pw/footer.py | 4 | ## -*- coding: utf-8 -*-
## Copyright (c) 2015-2020, Exa Analytics Development Team
## Distributed under the terms of the Apache License 2.0
#"""
#PW Footer Subsection Parser
##############################
#"""
#from exa.typed import TypedProperty
#from exa.core.parser import Sections, Parser
#
#
#class Footer(Parser):
# """
# """
# def _parse(self):
# pass
|
netortik/yandex-tank | refs/heads/master | yandextank/__init__.py | 878 | # this is a namespace package
try:
import pkg_resources
pkg_resources.declare_namespace(__name__)
except ImportError:
import pkgutil
__path__ = pkgutil.extend_path(__path__, __name__)
|
MindPass/Code | refs/heads/master | Interface_graphique/mindmap/svgwrite-1.1.6/svgwrite/data/pattern.py | 2 | #!/usr/bin/env python
#coding:utf-8
# Author: mozman --<mozman@gmx.at>
# Purpose: pattern module
# Created: 27.09.2010
# Copyright (C) 2010, Manfred Moitzi
# License: MIT License
import re
#coordinate ::= number ("em" | "ex" | "px" | "in" | "cm" | "mm" | "pt" | "pc" | "%")?
coordinate = re.compile(r"(^[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?)(cm|em|ex|in|mm|pc|pt|px|%)?$")
#length ::= number ("em" | "ex" | "px" | "in" | "cm" | "mm" | "pt" | "pc" | "%")?
length = coordinate
#angle ::= number (~"deg" | ~"grad" | ~"rad")?
angle = re.compile(r"(^[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?)(deg|rad|grad)?$")
# numbers without units
number = re.compile(r"(^[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?)$")
# number as percentage value '###%'
percentage = re.compile(r"(^[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?)%$")
#frequency ::= number (~"Hz" | ~"kHz")
frequency = re.compile(r"(^[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?)(Hz|kHz)?$")
#time ::= number (~"s" | ~"ms")
time = re.compile(r"(^[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?)(s|ms)?$")
|
cycotech/WAR-app | refs/heads/master | env/lib/python3.5/site-packages/django/contrib/gis/db/backends/spatialite/adapter.py | 586 | from django.contrib.gis.db.backends.base.adapter import WKTAdapter
from django.db.backends.sqlite3.base import Database
class SpatiaLiteAdapter(WKTAdapter):
"SQLite adaptor for geometry objects."
def __conform__(self, protocol):
if protocol is Database.PrepareProtocol:
return str(self)
|
lstoll/tetherme | refs/heads/master | common/appenginepatch/mediautils/management/commands/generatemedia.py | 10 | # -*- coding: utf-8 -*-
"""
This app combines media files specified in the COMBINE_MEDIA setting into one
single file. It's a dictionary mapping the combined name to a tuple of files
that should be combined:
COMBINE_MEDIA = {
'global/js/combined.js': (
'global/js/main.js',
'app/js/other.js',
),
'global/css/main.css': (
'global/css/base.css',
'app/css/app.css',
)
}
The files will automatically be combined if you use manage.py runserver.
Files that shouldn't be combined are simply copied. Also, all css and js files
get compressed with yuicompressor. The result is written in a folder named
_generated_media.
If the target is a JavaScript file whose name contains the string
'%(LANGUAGE_CODE)s' it'll automatically be internationalized and multiple
files will be generated (one for each language code).
"""
from django.core.management.base import NoArgsCommand
from optparse import make_option
from mediautils.generatemedia import generatemedia, updatemedia, MEDIA_ROOT
import os, shutil
class Command(NoArgsCommand):
help = 'Combines and compresses your media files and saves them in _generated_media.'
option_list = NoArgsCommand.option_list + (
make_option('--uncompressed', action='store_true', dest='uncompressed',
help='Do not run yuicompressor on generated media.'),
make_option('--update', action='store_true', dest='update',
help='Only update changed files instead of regenerating everything.'),
)
requires_model_validation = False
def handle_noargs(self, **options):
compressed = None
if options.get('uncompressed'):
compressed = False
if options.get('update'):
updatemedia(compressed)
else:
generatemedia(compressed)
|
googleads/google-ads-python | refs/heads/master | google/ads/googleads/v8/enums/types/budget_status.py | 1 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v8.enums",
marshal="google.ads.googleads.v8",
manifest={"BudgetStatusEnum",},
)
class BudgetStatusEnum(proto.Message):
r"""Message describing a Budget status """
class BudgetStatus(proto.Enum):
r"""Possible statuses of a Budget."""
UNSPECIFIED = 0
UNKNOWN = 1
ENABLED = 2
REMOVED = 3
__all__ = tuple(sorted(__protobuf__.manifest))
|
waterice/Test-Git | refs/heads/master | arch/ia64/scripts/unwcheck.py | 916 | #!/usr/bin/env python
#
# Usage: unwcheck.py FILE
#
# This script checks the unwind info of each function in file FILE
# and verifies that the sum of the region-lengths matches the total
# length of the function.
#
# Based on a shell/awk script originally written by Harish Patil,
# which was converted to Perl by Matthew Chapman, which was converted
# to Python by David Mosberger.
#
import os
import re
import sys
if len(sys.argv) != 2:
print "Usage: %s FILE" % sys.argv[0]
sys.exit(2)
readelf = os.getenv("READELF", "readelf")
start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]")
rlen_pattern = re.compile(".*rlen=([0-9]+)")
def check_func (func, slots, rlen_sum):
if slots != rlen_sum:
global num_errors
num_errors += 1
if not func: func = "[%#x-%#x]" % (start, end)
print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)
return
num_funcs = 0
num_errors = 0
func = False
slots = 0
rlen_sum = 0
for line in os.popen("%s -u %s" % (readelf, sys.argv[1])):
m = start_pattern.match(line)
if m:
check_func(func, slots, rlen_sum)
func = m.group(1)
start = long(m.group(2), 16)
end = long(m.group(3), 16)
slots = 3 * (end - start) / 16
rlen_sum = 0L
num_funcs += 1
else:
m = rlen_pattern.match(line)
if m:
rlen_sum += long(m.group(1))
check_func(func, slots, rlen_sum)
if num_errors == 0:
print "No errors detected in %u functions." % num_funcs
else:
if num_errors > 1:
err="errors"
else:
err="error"
print "%u %s detected in %u functions." % (num_errors, err, num_funcs)
sys.exit(1)
|
les69/calvin-base | refs/heads/master | calvin/runtime/north/tests/test_storage.py | 2 | # -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvin.utilities import calvinuuid
from calvin.runtime.north import storage
from calvin.runtime.north import appmanager
from calvin.runtime.south.plugins.async import threads
from calvin.utilities.calvin_callback import CalvinCB
from calvin.tests import TestNode, TestActor, TestPort
import Queue
import pytest
import time
try:
import pytest.inlineCallbacks
except ImportError:
pytest.inlineCallbacks = lambda *args: False
@pytest.mark.interactive
class TestSetFlushAndGet(object):
@pytest.mark.slow
@pytest.inlineCallbacks
def test_late_start(self):
self.q = Queue.Queue()
def cb(key, value):
self.q.put({"key": key, "value": value})
def started_cb(started):
self.q.put(started)
self.storage = storage.Storage()
self.storage.set("test", "1", 1, None)
self.storage.set("test", "2", 2, None)
self.storage.set("test", "3", 3, None)
assert "test1" in self.storage.localstore
assert "test2" in self.storage.localstore
assert "test3" in self.storage.localstore
yield threads.defer_to_thread(self.storage.start, CalvinCB(started_cb))
yield threads.defer_to_thread(time.sleep, 2)
value = self.q.get(timeout=0.2)
assert value
assert "test1" not in self.storage.localstore
assert "test2" not in self.storage.localstore
assert "test3" not in self.storage.localstore
yield threads.defer_to_thread(self.storage.get, "test", "3", CalvinCB(func=cb))
value = self.q.get(timeout=0.2)
assert value["value"] == 3
yield threads.defer_to_thread(self.storage.stop)
@pytest.mark.interactive
@pytest.mark.slow
class TestStorageStarted(object):
@pytest.inlineCallbacks
def setup_class(self):
self.storage = storage.Storage()
yield threads.defer_to_thread(self.storage.start)
yield threads.defer_to_thread(time.sleep, 2)
@pytest.inlineCallbacks
def teardown_class(self):
yield threads.defer_to_thread(self.storage.stop)
yield threads.defer_to_thread(time.sleep, 2)
@pytest.inlineCallbacks
def test_node_functions(self):
self.q = Queue.Queue()
def cb(key, value):
self.q.put({"key": key, "value": value})
yield threads.defer_to_thread(time.sleep, 2)
node = TestNode("127.0.0.1:5000")
yield threads.defer_to_thread(self.storage.add_node, node, cb=CalvinCB(cb))
yield threads.defer_to_thread(time.sleep, 2)
value = self.q.get(timeout=0.2)
assert value["value"] is True
yield threads.defer_to_thread(self.storage.get_node, node.id, cb=CalvinCB(cb))
yield threads.defer_to_thread(time.sleep, 2)
value = self.q.get(timeout=0.2)
assert value["value"] == {'uri': node.uri}
yield threads.defer_to_thread(self.storage.delete_node, node, cb=CalvinCB(cb))
yield threads.defer_to_thread(time.sleep, 2)
value = self.q.get(timeout=0.2)
assert value["value"] is True
yield threads.defer_to_thread(self.storage.get_node, node.id, cb=CalvinCB(cb))
yield threads.defer_to_thread(time.sleep, 2)
value = self.q.get(timeout=0.2)
assert value["value"] is None
@pytest.inlineCallbacks
def test_application_functions(self):
self.q = Queue.Queue()
def cb(key, value):
self.q.put({"key": key, "value": value})
yield threads.defer_to_thread(time.sleep, 2)
application = appmanager.Application(calvinuuid.uuid(
'APP'), "test_app", [calvinuuid.uuid('ACTOR'), calvinuuid.uuid('ACTOR')])
yield threads.defer_to_thread(self.storage.add_application, application, cb=CalvinCB(cb))
yield threads.defer_to_thread(time.sleep, 2)
value = self.q.get(timeout=0.2)
assert value["value"] is True
yield threads.defer_to_thread(self.storage.get_application, application.id, cb=CalvinCB(cb))
yield threads.defer_to_thread(time.sleep, 2)
value = self.q.get(timeout=0.2)
assert value["value"]["name"] == application.name
yield threads.defer_to_thread(self.storage.delete_application, application.id, cb=CalvinCB(cb))
yield threads.defer_to_thread(time.sleep, 2)
value = self.q.get(timeout=0.2)
assert value["value"] is True
yield threads.defer_to_thread(self.storage.get_application, application.id, cb=CalvinCB(cb))
yield threads.defer_to_thread(time.sleep, 2)
value = self.q.get(timeout=0.2)
assert value["value"] is None
@pytest.inlineCallbacks
def test_actor_functions(self):
self.q = Queue.Queue()
def cb(key, value):
self.q.put({"key": key, "value": value})
yield threads.defer_to_thread(time.sleep, 2)
port1 = TestPort("out", "out")
port2 = TestPort("in", "in", )
port1.peers = [("local", port2.id)]
port2.peer = ("local", port1.id)
actor = TestActor("actor1", "type1", {}, {port1.name: port1})
yield threads.defer_to_thread(self.storage.add_actor, actor, calvinuuid.uuid("NODE"), cb=CalvinCB(cb))
yield threads.defer_to_thread(time.sleep, 2)
value = self.q.get(timeout=0.2)
assert value["value"] is True
yield threads.defer_to_thread(self.storage.get_actor, actor.id, cb=CalvinCB(cb))
yield threads.defer_to_thread(time.sleep, 2)
value = self.q.get(timeout=0.2)
assert value["value"]["name"] == actor.name
yield threads.defer_to_thread(self.storage.delete_actor, actor.id, cb=CalvinCB(cb))
yield threads.defer_to_thread(time.sleep, 2)
value = self.q.get(timeout=0.2)
assert value["value"] is True
yield threads.defer_to_thread(self.storage.get_actor, actor.id, cb=CalvinCB(cb))
yield threads.defer_to_thread(time.sleep, 2)
value = self.q.get(timeout=0.2)
assert value["value"] is None
@pytest.mark.interactive
class TestStorageNotStarted(object):
def setup_class(self):
self.storage = storage.Storage()
def teardown_class(self):
pass
def test_node_functions(self):
self.q = Queue.Queue()
def cb(key, value):
self.q.put({"key": key, "value": value})
node = TestNode("127.0.0.1:5000")
self.storage.add_node(node)
value = self.storage.get_node(node_id=node.id, cb=CalvinCB(func=cb))
value = self.q.get(timeout=0.2)
assert value["key"] == node.id and value["value"] == {'uri': node.uri}
self.storage.delete_node(node, cb=CalvinCB(func=cb))
value = self.q.get(timeout=0.2)
assert value
assert node.id not in self.storage.localstore
def test_application_functions(self):
self.q = Queue.Queue()
def cb(key, value):
self.q.put({"key": key, "value": value})
application = appmanager.Application(calvinuuid.uuid(
'APP'), "test_app", [calvinuuid.uuid('ACTOR'), calvinuuid.uuid('ACTOR')])
self.storage.add_application(application)
value = self.storage.get_application(
application.id, cb=CalvinCB(func=cb))
value = self.q.get(timeout=0.2)
assert value["key"] == application.id and value[
"value"]["name"] == application.name
self.storage.delete_application(application.id, cb=CalvinCB(func=cb))
value = self.q.get(timeout=0.2)
assert value
assert application.id not in self.storage.localstore
def test_actor_and_port_functions(self):
self.q = Queue.Queue()
def cb(key, value):
self.q.put({"key": key, "value": value})
port1 = TestPort("out", "out")
port2 = TestPort("in", "in", )
port1.peers = [("local", port2.id)]
port2.peer = ("local", port1.id)
actor1 = TestActor("actor1", "type1", {}, {port1.name: port1})
actor2 = TestActor("actor2", "type2", {port2.name: port2}, {})
self.storage.add_actor(actor1, calvinuuid.uuid("NODE"))
value = self.storage.get_actor(actor1.id, cb=CalvinCB(func=cb))
value = self.q.get(timeout=0.2)
assert value["key"] == actor1.id and value[
"value"]["name"] == actor1.name
assert value["value"]["name"] == actor1.name
assert value["value"]["type"] == actor1._type
assert value["value"]["inports"] == []
assert value["value"]["outports"][0]["id"] == port1.id
value = self.storage.get_port(port1.id, cb=CalvinCB(func=cb))
value = self.q.get(timeout=0.2)
assert value["key"] == port1.id
assert value["value"]["name"] == port1.name
assert value["value"]["direction"] == port1.direction
assert value["value"]["peers"] == [["local", port2.id]]
self.storage.add_actor(actor2, calvinuuid.uuid("NODE"))
value = self.storage.get_actor(actor2.id, cb=CalvinCB(func=cb))
value = self.q.get(timeout=0.2)
assert value["key"] == actor2.id
assert value["value"]["name"] == actor2.name
assert value["value"]["type"] == actor2._type
assert value["value"]["inports"][0]["id"] == port2.id
assert value["value"]["outports"] == []
value = self.storage.get_port(port2.id, cb=CalvinCB(func=cb))
value = self.q.get(timeout=0.2)
assert value["key"] == port2.id
assert value["value"]["name"] == port2.name
assert value["value"]["direction"] == port2.direction
assert value["value"]["peer"] == ["local", port1.id]
self.storage.delete_actor(actor1.id, cb=CalvinCB(func=cb))
value = self.q.get(timeout=0.2)
assert value
assert actor1.id not in self.storage.localstore
self.storage.delete_port(port1.id, cb=CalvinCB(func=cb))
value = self.q.get(timeout=0.2)
assert value
assert port1.id not in self.storage.localstore
|
ehabkost/avocado-vt | refs/heads/master | virttest/libvirt_xml/devices/hostdev.py | 8 | """
hostdev device support class(es)
http://libvirt.org/formatdomain.html#elementsHostDev
"""
import logging
from virttest.libvirt_xml.devices import base
from virttest.libvirt_xml import accessors
class Hostdev(base.TypedDeviceBase):
__slots__ = ('mode', 'hostdev_type', 'source_address',
'managed', 'boot_order')
def __init__(self, type_name="hostdev", virsh_instance=base.base.virsh):
accessors.XMLAttribute('hostdev_type', self, parent_xpath='/',
tag_name='hostdev', attribute='type')
accessors.XMLAttribute('mode', self, parent_xpath='/',
tag_name='hostdev', attribute='mode')
accessors.XMLAttribute('managed', self, parent_xpath='/',
tag_name='hostdev', attribute='managed')
accessors.XMLElementNest('source_address', self, parent_xpath='/',
tag_name='source', subclass=self.SourceAddress,
subclass_dargs={
'virsh_instance': virsh_instance})
accessors.XMLAttribute('boot_order', self, parent_xpath='/',
tag_name='boot', attribute='order')
super(self.__class__, self).__init__(device_tag='hostdev',
type_name=type_name,
virsh_instance=virsh_instance)
def new_source_address(self, **dargs):
new_one = self.SourceAddress(virsh_instance=self.virsh)
new_address = new_one.new_untyped_address(**dargs)
new_one.untyped_address = new_address
return new_one
class SourceAddress(base.base.LibvirtXMLBase):
__slots__ = ('untyped_address',)
def __init__(self, virsh_instance=base.base.virsh):
accessors.XMLElementNest('untyped_address', self, parent_xpath='/',
tag_name='address', subclass=self.UntypedAddress,
subclass_dargs={
'virsh_instance': virsh_instance})
super(self.__class__, self).__init__(virsh_instance=virsh_instance)
self.xml = '<source/>'
def new_untyped_address(self, **dargs):
new_one = self.UntypedAddress(virsh_instance=self.virsh)
for key, value in dargs.items():
setattr(new_one, key, value)
return new_one
class UntypedAddress(base.UntypedDeviceBase):
__slots__ = ('domain', 'bus', 'slot', 'function',)
def __init__(self, virsh_instance=base.base.virsh):
accessors.XMLAttribute('domain', self, parent_xpath='/',
tag_name='address', attribute='domain')
accessors.XMLAttribute('slot', self, parent_xpath='/',
tag_name='address', attribute='slot')
accessors.XMLAttribute('bus', self, parent_xpath='/',
tag_name='address', attribute='bus')
accessors.XMLAttribute('function', self, parent_xpath='/',
tag_name='address', attribute='function')
super(self.__class__, self).__init__(
"address", virsh_instance=virsh_instance)
self.xml = "<address/>"
|
ngouzy/smartchangelog | refs/heads/master | smartchangelog/datetools.py | 2 | from datetime import datetime
date_format = "%Y-%m-%d %H:%M:%S %z"
def str2date(string: str) -> datetime:
return datetime.strptime(string, date_format)
def date2str(dt: datetime) -> str:
return dt.strftime(date_format) |
firerszd/kbengine | refs/heads/master | kbe/src/lib/python/Lib/encodings/cp65001.py | 112 | """
Code page 65001: Windows UTF-8 (CP_UTF8).
"""
import codecs
import functools
if not hasattr(codecs, 'code_page_encode'):
raise LookupError("cp65001 encoding is only available on Windows")
### Codec APIs
encode = functools.partial(codecs.code_page_encode, 65001)
decode = functools.partial(codecs.code_page_decode, 65001)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return encode(input, self.errors)[0]
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
_buffer_decode = decode
class StreamWriter(codecs.StreamWriter):
encode = encode
class StreamReader(codecs.StreamReader):
decode = decode
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp65001',
encode=encode,
decode=decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
mdavid/pledgeservice | refs/heads/master | testlib/setuptools/command/bdist_rpm.py | 111 | import distutils.command.bdist_rpm as orig
class bdist_rpm(orig.bdist_rpm):
"""
Override the default bdist_rpm behavior to do the following:
1. Run egg_info to ensure the name and version are properly calculated.
2. Always run 'install' using --single-version-externally-managed to
disable eggs in RPM distributions.
3. Replace dash with underscore in the version numbers for better RPM
compatibility.
"""
def run(self):
# ensure distro name is up-to-date
self.run_command('egg_info')
orig.bdist_rpm.run(self)
def _make_spec_file(self):
version = self.distribution.get_version()
rpmversion = version.replace('-','_')
spec = orig.bdist_rpm._make_spec_file(self)
line23 = '%define version ' + version
line24 = '%define version ' + rpmversion
spec = [
line.replace(
"Source0: %{name}-%{version}.tar",
"Source0: %{name}-%{unmangled_version}.tar"
).replace(
"setup.py install ",
"setup.py install --single-version-externally-managed "
).replace(
"%setup",
"%setup -n %{name}-%{unmangled_version}"
).replace(line23, line24)
for line in spec
]
insert_loc = spec.index(line24) + 1
unmangled_version = "%define unmangled_version " + version
spec.insert(insert_loc, unmangled_version)
return spec
|
doismellburning/edx-platform | refs/heads/master | common/test/data/uploads/python_lib_zip/sub/submodule.py | 223 | HELLO = "world"
|
dagwieers/ansible | refs/heads/devel | lib/ansible/modules/crypto/acme/acme_inspect.py | 20 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2018 Felix Fontein (@felixfontein)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: acme_inspect
author: "Felix Fontein (@felixfontein)"
version_added: "2.8"
short_description: Send direct requests to an ACME server
description:
- "Allows to send direct requests to an ACME server with the
L(ACME protocol,https://tools.ietf.org/html/rfc8555),
which is supported by CAs such as L(Let's Encrypt,https://letsencrypt.org/)."
- "This module can be used to debug failed certificate request attempts,
for example when M(acme_certificate) fails or encounters a problem which
you wish to investigate."
- "The module can also be used to directly access features of an ACME servers
which are not yet supported by the Ansible ACME modules."
notes:
- "The I(account_uri) option must be specified for properly authenticated
ACME v2 requests (except a C(new-account) request)."
- "Using the C(ansible) tool, M(acme_inspect) can be used to directly execute
ACME requests without the need of writing a playbook. For example, the
following command retrieves the ACME account with ID 1 from Let's Encrypt
(assuming C(/path/to/key) is the correct private account key):
C(ansible localhost -m acme_inspect -a \"account_key_src=/path/to/key
acme_directory=https://acme-v02.api.letsencrypt.org/directory acme_version=2
account_uri=https://acme-v02.api.letsencrypt.org/acme/acct/1 method=get
url=https://acme-v02.api.letsencrypt.org/acme/acct/1\")"
seealso:
- name: Automatic Certificate Management Environment (ACME)
description: The specification of the ACME protocol (RFC 8555).
link: https://tools.ietf.org/html/rfc8555
- name: ACME TLS ALPN Challenge Extension
description: The current draft specification of the C(tls-alpn-01) challenge.
link: https://tools.ietf.org/html/draft-ietf-acme-tls-alpn-05
extends_documentation_fragment:
- acme
options:
url:
description:
- "The URL to send the request to."
- "Must be specified if I(method) is not C(directory-only)."
type: str
method:
description:
- "The method to use to access the given URL on the ACME server."
- "The value C(post) executes an authenticated POST request. The content
must be specified in the I(content) option."
- "The value C(get) executes an authenticated POST-as-GET request for ACME v2,
and a regular GET request for ACME v1."
- "The value C(directory-only) only retrieves the directory, without doing
a request."
type: str
default: get
choices:
- get
- post
- directory-only
content:
description:
- "An encoded JSON object which will be sent as the content if I(method)
is C(post)."
- "Required when I(method) is C(post), and not allowed otherwise."
type: str
fail_on_acme_error:
description:
- "If I(method) is C(post) or C(get), make the module fail in case an ACME
error is returned."
type: bool
default: yes
'''
EXAMPLES = r'''
- name: Get directory
acme_inspect:
acme_directory: https://acme-staging-v02.api.letsencrypt.org/directory
acme_version: 2
method: directory-only
register: directory
- name: Create an account
acme_inspect:
acme_directory: https://acme-staging-v02.api.letsencrypt.org/directory
acme_version: 2
account_key_src: /etc/pki/cert/private/account.key
url: "{{ directory.newAccount}}"
method: post
content: '{"termsOfServiceAgreed":true}'
register: account_creation
# account_creation.headers.location contains the account URI
# if creation was successful
- name: Get account information
acme_inspect:
acme_directory: https://acme-staging-v02.api.letsencrypt.org/directory
acme_version: 2
account_key_src: /etc/pki/cert/private/account.key
account_uri: "{{ account_creation.headers.location }}"
url: "{{ account_creation.headers.location }}"
method: get
- name: Update account contacts
acme_inspect:
acme_directory: https://acme-staging-v02.api.letsencrypt.org/directory
acme_version: 2
account_key_src: /etc/pki/cert/private/account.key
account_uri: "{{ account_creation.headers.location }}"
url: "{{ account_creation.headers.location }}"
method: post
content: '{{ account_info | to_json }}'
vars:
account_info:
# For valid values, see
# https://tools.ietf.org/html/rfc8555#section-7.3
contact:
- mailto:me@example.com
- name: Create certificate order
acme_certificate:
acme_directory: https://acme-staging-v02.api.letsencrypt.org/directory
acme_version: 2
account_key_src: /etc/pki/cert/private/account.key
account_uri: "{{ account_creation.headers.location }}"
csr: /etc/pki/cert/csr/sample.com.csr
fullchain_dest: /etc/httpd/ssl/sample.com-fullchain.crt
challenge: http-01
register: certificate_request
# Assume something went wrong. certificate_request.order_uri contains
# the order URI.
- name: Get order information
acme_inspect:
acme_directory: https://acme-staging-v02.api.letsencrypt.org/directory
acme_version: 2
account_key_src: /etc/pki/cert/private/account.key
account_uri: "{{ account_creation.headers.location }}"
url: "{{ certificate_request.order_uri }}"
method: get
register: order
- name: Get first authz for order
acme_inspect:
acme_directory: https://acme-staging-v02.api.letsencrypt.org/directory
acme_version: 2
account_key_src: /etc/pki/cert/private/account.key
account_uri: "{{ account_creation.headers.location }}"
url: "{{ order.output_json.authorizations[0] }}"
method: get
register: authz
- name: Get HTTP-01 challenge for authz
acme_inspect:
acme_directory: https://acme-staging-v02.api.letsencrypt.org/directory
acme_version: 2
account_key_src: /etc/pki/cert/private/account.key
account_uri: "{{ account_creation.headers.location }}"
url: "{{ authz.output_json.challenges | selectattr('type', 'equalto', 'http-01') }}"
method: get
register: http01challenge
- name: Activate HTTP-01 challenge manually
acme_inspect:
acme_directory: https://acme-staging-v02.api.letsencrypt.org/directory
acme_version: 2
account_key_src: /etc/pki/cert/private/account.key
account_uri: "{{ account_creation.headers.location }}"
url: "{{ http01challenge.url }}"
method: post
content: '{}'
'''
RETURN = '''
directory:
description: The ACME directory's content
returned: always
type: dict
sample: |
{
"a85k3x9f91A4": "https://community.letsencrypt.org/t/adding-random-entries-to-the-directory/33417",
"keyChange": "https://acme-v02.api.letsencrypt.org/acme/key-change",
"meta": {
"caaIdentities": [
"letsencrypt.org"
],
"termsOfService": "https://letsencrypt.org/documents/LE-SA-v1.2-November-15-2017.pdf",
"website": "https://letsencrypt.org"
},
"newAccount": "https://acme-v02.api.letsencrypt.org/acme/new-acct",
"newNonce": "https://acme-v02.api.letsencrypt.org/acme/new-nonce",
"newOrder": "https://acme-v02.api.letsencrypt.org/acme/new-order",
"revokeCert": "https://acme-v02.api.letsencrypt.org/acme/revoke-cert"
}
headers:
description: The request's HTTP headers (with lowercase keys)
returned: always
type: dict
sample: |
{
"boulder-requester": "12345",
"cache-control": "max-age=0, no-cache, no-store",
"connection": "close",
"content-length": "904",
"content-type": "application/json",
"cookies": {},
"cookies_string": "",
"date": "Wed, 07 Nov 2018 12:34:56 GMT",
"expires": "Wed, 07 Nov 2018 12:44:56 GMT",
"link": "<https://letsencrypt.org/documents/LE-SA-v1.2-November-15-2017.pdf>;rel=\"terms-of-service\"",
"msg": "OK (904 bytes)",
"pragma": "no-cache",
"replay-nonce": "1234567890abcdefghijklmnopqrstuvwxyzABCDEFGH",
"server": "nginx",
"status": 200,
"strict-transport-security": "max-age=604800",
"url": "https://acme-v02.api.letsencrypt.org/acme/acct/46161",
"x-frame-options": "DENY"
}
output_text:
description: The raw text output
returned: always
type: str
sample: "{\\n \\\"id\\\": 12345,\\n \\\"key\\\": {\\n \\\"kty\\\": \\\"RSA\\\",\\n ..."
output_json:
description: The output parsed as JSON
returned: if output can be parsed as JSON
type: dict
sample:
- id: 12345
- key:
- kty: RSA
- ...
'''
from ansible.module_utils.acme import (
ModuleFailException, ACMEAccount, set_crypto_backend,
)
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native, to_bytes
import json
def main():
module = AnsibleModule(
argument_spec=dict(
account_key_src=dict(type='path', aliases=['account_key']),
account_key_content=dict(type='str', no_log=True),
account_uri=dict(type='str'),
acme_directory=dict(type='str', default='https://acme-staging.api.letsencrypt.org/directory'),
acme_version=dict(type='int', default=1, choices=[1, 2]),
validate_certs=dict(type='bool', default=True),
url=dict(type='str'),
method=dict(type='str', choices=['get', 'post', 'directory-only'], default='get'),
content=dict(type='str'),
fail_on_acme_error=dict(type='bool', default=True),
select_crypto_backend=dict(type='str', default='auto', choices=['auto', 'openssl', 'cryptography']),
),
mutually_exclusive=(
['account_key_src', 'account_key_content'],
),
required_if=(
['method', 'get', ['url']],
['method', 'post', ['url', 'content']],
['method', 'get', ['account_key_src', 'account_key_content'], True],
['method', 'post', ['account_key_src', 'account_key_content'], True],
),
)
set_crypto_backend(module)
if not module.params.get('validate_certs'):
module.warn(warning='Disabling certificate validation for communications with ACME endpoint. ' +
'This should only be done for testing against a local ACME server for ' +
'development purposes, but *never* for production purposes.')
result = dict()
changed = False
try:
# Get hold of ACMEAccount object (includes directory)
account = ACMEAccount(module)
method = module.params['method']
result['directory'] = account.directory.directory
# Do we have to do more requests?
if method != 'directory-only':
url = module.params['url']
fail_on_acme_error = module.params['fail_on_acme_error']
# Do request
if method == 'get':
data, info = account.get_request(url, parse_json_result=False, fail_on_error=False)
elif method == 'post':
changed = True # only POSTs can change
data, info = account.send_signed_request(url, to_bytes(module.params['content']), parse_json_result=False, encode_payload=False)
# Update results
result.update(dict(
headers=info,
output_text=to_native(data),
))
# See if we can parse the result as JSON
try:
result['output_json'] = json.loads(data)
except Exception as dummy:
pass
# Fail if error was returned
if fail_on_acme_error and info['status'] >= 400:
raise ModuleFailException("ACME request failed: CODE: {0} RESULT: {1}".format(info['status'], data))
# Done!
module.exit_json(changed=changed, **result)
except ModuleFailException as e:
e.do_fail(module, **result)
if __name__ == '__main__':
main()
|
polyaxon/polyaxon | refs/heads/master | core/polyaxon/proxies/schemas/base.py | 1 | #!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from polyaxon import settings
def _get_indent(indent):
return (
settings.PROXIES_CONFIG.nginx_indent_char
* settings.PROXIES_CONFIG.nginx_indent_width
* indent
)
def get_config(options, indent=0, **kwargs):
_options = options.format(**kwargs)
config = []
for p in _options.split("\n"):
config.append("{}{}".format(_get_indent(indent), p))
return clean_config(config)
def clean_config(config):
return "\n".join(config).replace(" \n", "")
|
muravjov/ansible-modules-core | refs/heads/devel | cloud/rackspace/rax_cdb_database.py | 41 | #!/usr/bin/python -tt
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
module: rax_cdb_database
short_description: 'create / delete a database in the Cloud Databases'
description:
- create / delete a database in the Cloud Databases.
version_added: "1.8"
options:
cdb_id:
description:
- The databases server UUID
default: null
name:
description:
- Name to give to the database
default: null
character_set:
description:
- Set of symbols and encodings
default: 'utf8'
collate:
description:
- Set of rules for comparing characters in a character set
default: 'utf8_general_ci'
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
author: Simon JAILLET
extends_documentation_fragment: rackspace
'''
EXAMPLES = '''
- name: Build a database in Cloud Databases
tasks:
- name: Database build request
local_action:
module: rax_cdb_database
credentials: ~/.raxpub
region: IAD
cdb_id: 323e7ce0-9cb0-11e3-a5e2-0800200c9a66
name: db1
state: present
register: rax_db_database
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def find_database(instance, name):
try:
database = instance.get_database(name)
except Exception:
return False
return database
def save_database(module, cdb_id, name, character_set, collate):
cdb = pyrax.cloud_databases
try:
instance = cdb.get(cdb_id)
except Exception, e:
module.fail_json(msg='%s' % e.message)
changed = False
database = find_database(instance, name)
if not database:
try:
database = instance.create_database(name=name,
character_set=character_set,
collate=collate)
except Exception, e:
module.fail_json(msg='%s' % e.message)
else:
changed = True
module.exit_json(changed=changed, action='create',
database=rax_to_dict(database))
def delete_database(module, cdb_id, name):
cdb = pyrax.cloud_databases
try:
instance = cdb.get(cdb_id)
except Exception, e:
module.fail_json(msg='%s' % e.message)
changed = False
database = find_database(instance, name)
if database:
try:
database.delete()
except Exception, e:
module.fail_json(msg='%s' % e.message)
else:
changed = True
module.exit_json(changed=changed, action='delete',
database=rax_to_dict(database))
def rax_cdb_database(module, state, cdb_id, name, character_set, collate):
# act on the state
if state == 'present':
save_database(module, cdb_id, name, character_set, collate)
elif state == 'absent':
delete_database(module, cdb_id, name)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
cdb_id=dict(type='str', required=True),
name=dict(type='str', required=True),
character_set=dict(type='str', default='utf8'),
collate=dict(type='str', default='utf8_general_ci'),
state=dict(default='present', choices=['present', 'absent'])
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together(),
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
cdb_id = module.params.get('cdb_id')
name = module.params.get('name')
character_set = module.params.get('character_set')
collate = module.params.get('collate')
state = module.params.get('state')
setup_rax_module(module, pyrax)
rax_cdb_database(module, state, cdb_id, name, character_set, collate)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
# invoke the module
main()
|
CentralLabFacilities/m3meka | refs/heads/master | python/scripts/demo/m3_demo_shapes.py | 2 | #! /usr/bin/python
#Copyright 2010, Meka Robotics
#All rights reserved.
#http://mekabot.com
#Redistribution and use in source and binary forms, with or without
#modification, are permitted.
#THIS SOFTWARE IS PROVIDED BY THE Copyright HOLDERS AND CONTRIBUTORS
#"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
#FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
#Copyright OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
#INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES INCLUDING,
#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
#LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
#CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
#LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
#ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
#POSSIBILITY OF SUCH DAMAGE.
import time
import numpy as nu
from m3.unit_conversion import *
import m3.component_factory as m3f
import m3.toolbox as m3t
import m3.trajectory as m3jt
import m3.rt_proxy as m3p
import m3.toolbox as m3t
import PyKDL as kdl
import m3.viz as m3v
class M3DemoShapes:
def __init__(self):
self.jt = m3jt.JointTrajectory(7)
self.vias=[]
self.vel_avg=20.
self.stiffness = 0.5
self.ik_vias=[]
self.ndof=7
self.theta_0 = []
self.thetadot_0 = []
self.viz_launched = False
self.m3_launched = False
self.center = []
self.axis_demo = []
self.axis_demo = [0, 0, 1]
def start(self):
print '--------------------------'
print 'm: Target M3 arm'
print 'v: Target RVIZ'
print 'b: Target Both M3 and RVIZ'
print 'q: quit'
print '--------------'
print
self.k = 'a'
while self.k!='m' and self.k!='v' and self.k!='b' and self.k!='q':
self.k=m3t.get_keystroke()
if self.k=='q':
return
self.proxy = m3p.M3RtProxy()
if self.k=='m' or self.k=='b':
self.proxy.start()
bot_name=m3t.get_robot_name()
if bot_name == "":
print 'Error: no robot components found:', bot_names
return
self.bot=m3f.create_component(bot_name)
arm_names = ['right_arm', 'left_arm']
self.arm_name = m3t.user_select_components_interactive(arm_names,single=True)[0]
if self.arm_name == 'right_arm':
self.center = [0.450, -0.25, -0.1745]
else:
self.center = [0.450, 0.25, -0.1745]
avail_chains = self.bot.get_available_chains()
for c in avail_chains:
if c == 'torso':
self.center[2] += 0.5079
if self.k=='v' or self.k=='b':
self.viz = m3v.M3Viz(self.proxy, self.bot)
self.viz_launched = True
self.viz.turn_sim_on()
if self.k=='v':
self.theta_0[:] = self.bot.get_theta_sim_deg(self.arm_name)[:]
if self.k=='m' or self.k=='b':
self.proxy.subscribe_status(self.bot)
self.proxy.publish_command(self.bot)
self.proxy.make_operational_all()
self.proxy.step()
self.theta_0[:] = self.bot.get_theta_deg(self.arm_name)[:]
self.m3_launched = True
self.theta_soln_deg = [0.]*self.bot.get_num_dof(self.arm_name)
self.thetadot_0 = [0.]*self.bot.get_num_dof(self.arm_name)
self.bot.set_slew_rate_proportion(self.arm_name, [1.0]*7)
while True:
print '--------------------------'
print 'g: generate vias'
print 'd: display vias'
print 'v: set avg velocity (Current ',self.vel_avg,')'
print 's: set stiffness (Current',self.stiffness,')'
if self.k=='b' or self.k=='m':
print 'e: execute vias'
if self.k=='b' or self.k=='v':
print 't: test vias in visualizer'
print 'q: quit'
print '--------------'
print
m=m3t.get_keystroke()
if m=='q':
return
if m=='v':
print 'Enter avg velocity (0-60 Deg/S) [',self.vel_avg,']'
self.vel_avg=max(0,min(60,m3t.get_float(self.vel_avg)))
if m=='s':
print 'Enter stiffness (0-1.0) [',self.stiffness,']'
self.stiffness=max(0,min(1.0,m3t.get_float(self.stiffness)))
if m == 'g':
self.vias=[]
print
print '(s)quare or (c)ircle?'
shape = None
while shape != 's' and shape != 'c':
shape=m3t.get_keystroke()
length_m = 0.0
if shape == 's':
print
print 'Length of square side in cm (10-25) [25]'
length_cm = nu.float(max(10,min(25,m3t.get_int(25))))
length_m = length_cm / 100.0
diameter_m = 0.0
if shape == 'c':
print
print 'Diameter of circle in cm (10-25) [25]'
diameter_cm = nu.float(max(10,min(25,m3t.get_int(25))))
diameter_m = diameter_cm / 100.0
print
print 'Enter shape resolution (1-20 vias/side) [20]'
resolution = max(1,min(20,m3t.get_int(20)))
if self.m3_launched:
self.proxy.step()
x = self.center[0]
if shape == 's':
y_left = self.center[1] + length_m/2.0
y_right = self.center[1] - length_m/2.0
z_top = self.center[2] + length_m/2.0
z_bottom = self.center[2] - length_m/2.0
dy = (y_left - y_right) / nu.float(resolution)
dz = (z_top - z_bottom) / nu.float(resolution)
if self.arm_name=='right_arm':
# first add start point
self.ik_vias.append([x, y_left, z_top, self.axis_demo[0], self.axis_demo[1], self.axis_demo[2]])
# add top line
for i in range(resolution):
self.ik_vias.append([x, y_left - (i+1)*dy, z_top, self.axis_demo[0],self.axis_demo[1], self.axis_demo[2]])
# add right line
for i in range(resolution):
self.ik_vias.append([x, y_right, z_top - (i+1)*dz, self.axis_demo[0], self.axis_demo[1], self.axis_demo[2]])
# add bottom line
for i in range(resolution):
self.ik_vias.append([x, y_right + (i+1)*dy, z_bottom, self.axis_demo[0], self.axis_demo[1], self.axis_demo[2]])
# add left line
for i in range(resolution):
self.ik_vias.append([x, y_left, z_bottom + (i+1)*dz, self.axis_demo[0], self.axis_demo[1], self.axis_demo[2]])
else:
# first add start point
self.ik_vias.append([x, y_right, z_top, self.axis_demo[0], self.axis_demo[1], self.axis_demo[2]])
# add top line
for i in range(resolution):
self.ik_vias.append([x, y_right + (i+1)*dy, z_top, self.axis_demo[0],self.axis_demo[1], self.axis_demo[2]])
# add right line
for i in range(resolution):
self.ik_vias.append([x, y_left, z_top - (i+1)*dz, self.axis_demo[0], self.axis_demo[1], self.axis_demo[2]])
# add bottom line
for i in range(resolution):
self.ik_vias.append([x, y_left - (i+1)*dy, z_bottom, self.axis_demo[0], self.axis_demo[1], self.axis_demo[2]])
# add left line
for i in range(resolution):
self.ik_vias.append([x, y_right, z_bottom + (i+1)*dz, self.axis_demo[0], self.axis_demo[1], self.axis_demo[2]])
if shape == 'c':
for i in range(resolution*4 + 1):
dt = 2*nu.pi/(nu.float(resolution)*4)
t = (nu.pi/2) + i*dt
if t > nu.pi:
t -= 2*nu.pi
y = self.center[1] + (diameter_m/2.0) * nu.cos(t)
z = self.center[2] + (diameter_m/2.0) * nu.sin(t)
self.ik_vias.append([x, y, z, self.axis_demo[0], self.axis_demo[1], self.axis_demo[2]])
self.vias.append(self.theta_0[:])
# use zero position as reference for IK solver
ref=[0]*self.bot.get_num_dof(self.arm_name)
# use holdup position as reference
ref= [30,0,0,40,0,0,0]
self.bot.set_theta_sim_deg(self.arm_name,ref)
for ikv in self.ik_vias:
theta_soln = []
print 'solving for ik via:', ikv
if self.bot.get_tool_axis_2_theta_deg_sim(self.arm_name, ikv[:3], ikv[3:], theta_soln):
self.vias.append(theta_soln)
self.bot.set_theta_sim_deg(self.arm_name,theta_soln)
else:
print 'WARNING: no IK solution found for via ', ikv
self.bot.set_theta_sim_deg(self.arm_name,ref)
if self.viz_launched:
self.viz.step()
self.vias.append(self.theta_0[:])
if m=='d':
print
print '--------- IK Vias (', len(self.ik_vias), ')--------'
print '---------------[end_xyz[3], end_axis[3]]-----------'
for ikv in self.ik_vias:
print ikv
print
print '--------- Joint Vias (', len(self.vias), ')--------'
for v in self.vias:
print v
if m == 'e' or m=='t':
if len(self.vias) != 0:
for v in self.vias:
#print 'Adding via',v
self.jt.add_via_deg(v, [self.vel_avg]*self.ndof)
self.jt.start(self.theta_0[:], self.thetadot_0[:])
print
print '--------- Splines (', len(self.jt.splines), ')--------'
print '------------q_0, q_f, qdot_0, qdot_f, tf--------------'
for s in self.jt.splines:
print s.q_0, s.q_f, s.qdot_0, s.qdot_f, s.tf
print
print 'Hit any key to start or (q) to quit execution'
p=m3t.get_keystroke()
if p != 'q':
if self.m3_launched and m=='e':
self.bot.set_motor_power_on()
self.bot.set_mode_theta_gc(self.arm_name)
self.bot.set_stiffness(self.arm_name, [self.stiffness]*self.bot.get_num_dof(self.arm_name))
while not self.jt.is_splined_traj_complete():
q = self.jt.step()
if self.viz_launched and m=='t':
self.bot.set_theta_sim_deg(self.arm_name, q)
self.viz.step()
time.sleep(0.1)
elif self.m3_launched and m=='e':
self.bot.set_theta_deg(self.arm_name, q)
self.proxy.step()
self.ik_vias=[]
def stop(self):
if self.viz_launched:
self.viz.stop()
if self.m3_launched:
self.proxy.stop()
if __name__ == '__main__':
demo = M3DemoShapes()
try:
demo.start()
except (KeyboardInterrupt):
pass
demo.stop()
|
CoderBotOrg/coderbotsrv | refs/heads/master | server/lib/werkzeug/serving.py | 145 | # -*- coding: utf-8 -*-
"""
werkzeug.serving
~~~~~~~~~~~~~~~~
There are many ways to serve a WSGI application. While you're developing
it you usually don't want a full blown webserver like Apache but a simple
standalone one. From Python 2.5 onwards there is the `wsgiref`_ server in
the standard library. If you're using older versions of Python you can
download the package from the cheeseshop.
However there are some caveats. Sourcecode won't reload itself when
changed and each time you kill the server using ``^C`` you get an
`KeyboardInterrupt` error. While the latter is easy to solve the first
one can be a pain in the ass in some situations.
The easiest way is creating a small ``start-myproject.py`` that runs the
application::
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from myproject import make_app
from werkzeug.serving import run_simple
app = make_app(...)
run_simple('localhost', 8080, app, use_reloader=True)
You can also pass it a `extra_files` keyword argument with a list of
additional files (like configuration files) you want to observe.
For bigger applications you should consider using `werkzeug.script`
instead of a simple start file.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import os
import socket
import sys
import time
import signal
import subprocess
try:
import thread
except ImportError:
import _thread as thread
try:
from SocketServer import ThreadingMixIn, ForkingMixIn
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
except ImportError:
from socketserver import ThreadingMixIn, ForkingMixIn
from http.server import HTTPServer, BaseHTTPRequestHandler
import werkzeug
from werkzeug._internal import _log
from werkzeug._compat import iteritems, PY2, reraise, text_type, \
wsgi_encoding_dance
from werkzeug.urls import url_parse, url_unquote
from werkzeug.exceptions import InternalServerError, BadRequest
class WSGIRequestHandler(BaseHTTPRequestHandler, object):
"""A request handler that implements WSGI dispatching."""
@property
def server_version(self):
return 'Werkzeug/' + werkzeug.__version__
def make_environ(self):
request_url = url_parse(self.path)
def shutdown_server():
self.server.shutdown_signal = True
url_scheme = self.server.ssl_context is None and 'http' or 'https'
path_info = url_unquote(request_url.path)
environ = {
'wsgi.version': (1, 0),
'wsgi.url_scheme': url_scheme,
'wsgi.input': self.rfile,
'wsgi.errors': sys.stderr,
'wsgi.multithread': self.server.multithread,
'wsgi.multiprocess': self.server.multiprocess,
'wsgi.run_once': False,
'werkzeug.server.shutdown':
shutdown_server,
'SERVER_SOFTWARE': self.server_version,
'REQUEST_METHOD': self.command,
'SCRIPT_NAME': '',
'PATH_INFO': wsgi_encoding_dance(path_info),
'QUERY_STRING': wsgi_encoding_dance(request_url.query),
'CONTENT_TYPE': self.headers.get('Content-Type', ''),
'CONTENT_LENGTH': self.headers.get('Content-Length', ''),
'REMOTE_ADDR': self.client_address[0],
'REMOTE_PORT': self.client_address[1],
'SERVER_NAME': self.server.server_address[0],
'SERVER_PORT': str(self.server.server_address[1]),
'SERVER_PROTOCOL': self.request_version
}
for key, value in self.headers.items():
key = 'HTTP_' + key.upper().replace('-', '_')
if key not in ('HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH'):
environ[key] = value
if request_url.netloc:
environ['HTTP_HOST'] = request_url.netloc
return environ
def run_wsgi(self):
if self.headers.get('Expect', '').lower().strip() == '100-continue':
self.wfile.write(b'HTTP/1.1 100 Continue\r\n\r\n')
environ = self.make_environ()
headers_set = []
headers_sent = []
def write(data):
assert headers_set, 'write() before start_response'
if not headers_sent:
status, response_headers = headers_sent[:] = headers_set
try:
code, msg = status.split(None, 1)
except ValueError:
code, msg = status, ""
self.send_response(int(code), msg)
header_keys = set()
for key, value in response_headers:
self.send_header(key, value)
key = key.lower()
header_keys.add(key)
if 'content-length' not in header_keys:
self.close_connection = True
self.send_header('Connection', 'close')
if 'server' not in header_keys:
self.send_header('Server', self.version_string())
if 'date' not in header_keys:
self.send_header('Date', self.date_time_string())
self.end_headers()
assert type(data) is bytes, 'applications must write bytes'
self.wfile.write(data)
self.wfile.flush()
def start_response(status, response_headers, exc_info=None):
if exc_info:
try:
if headers_sent:
reraise(*exc_info)
finally:
exc_info = None
elif headers_set:
raise AssertionError('Headers already set')
headers_set[:] = [status, response_headers]
return write
def execute(app):
application_iter = app(environ, start_response)
try:
for data in application_iter:
write(data)
if not headers_sent:
write(b'')
finally:
if hasattr(application_iter, 'close'):
application_iter.close()
application_iter = None
try:
execute(self.server.app)
except (socket.error, socket.timeout) as e:
self.connection_dropped(e, environ)
except Exception:
if self.server.passthrough_errors:
raise
from werkzeug.debug.tbtools import get_current_traceback
traceback = get_current_traceback(ignore_system_exceptions=True)
try:
# if we haven't yet sent the headers but they are set
# we roll back to be able to set them again.
if not headers_sent:
del headers_set[:]
execute(InternalServerError())
except Exception:
pass
self.server.log('error', 'Error on request:\n%s',
traceback.plaintext)
def handle(self):
"""Handles a request ignoring dropped connections."""
rv = None
try:
rv = BaseHTTPRequestHandler.handle(self)
except (socket.error, socket.timeout) as e:
self.connection_dropped(e)
except Exception:
if self.server.ssl_context is None or not is_ssl_error():
raise
if self.server.shutdown_signal:
self.initiate_shutdown()
return rv
def initiate_shutdown(self):
"""A horrible, horrible way to kill the server for Python 2.6 and
later. It's the best we can do.
"""
# Windows does not provide SIGKILL, go with SIGTERM then.
sig = getattr(signal, 'SIGKILL', signal.SIGTERM)
# reloader active
if os.environ.get('WERKZEUG_RUN_MAIN') == 'true':
os.kill(os.getpid(), sig)
# python 2.7
self.server._BaseServer__shutdown_request = True
# python 2.6
self.server._BaseServer__serving = False
def connection_dropped(self, error, environ=None):
"""Called if the connection was closed by the client. By default
nothing happens.
"""
def handle_one_request(self):
"""Handle a single HTTP request."""
self.raw_requestline = self.rfile.readline()
if not self.raw_requestline:
self.close_connection = 1
elif self.parse_request():
return self.run_wsgi()
def send_response(self, code, message=None):
"""Send the response header and log the response code."""
self.log_request(code)
if message is None:
message = code in self.responses and self.responses[code][0] or ''
if self.request_version != 'HTTP/0.9':
hdr = "%s %d %s\r\n" % (self.protocol_version, code, message)
self.wfile.write(hdr.encode('ascii'))
def version_string(self):
return BaseHTTPRequestHandler.version_string(self).strip()
def address_string(self):
return self.client_address[0]
def log_request(self, code='-', size='-'):
self.log('info', '"%s" %s %s', self.requestline, code, size)
def log_error(self, *args):
self.log('error', *args)
def log_message(self, format, *args):
self.log('info', format, *args)
def log(self, type, message, *args):
_log(type, '%s - - [%s] %s\n' % (self.address_string(),
self.log_date_time_string(),
message % args))
#: backwards compatible name if someone is subclassing it
BaseRequestHandler = WSGIRequestHandler
def generate_adhoc_ssl_pair(cn=None):
from random import random
from OpenSSL import crypto
# pretty damn sure that this is not actually accepted by anyone
if cn is None:
cn = '*'
cert = crypto.X509()
cert.set_serial_number(int(random() * sys.maxint))
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(60 * 60 * 24 * 365)
subject = cert.get_subject()
subject.CN = cn
subject.O = 'Dummy Certificate'
issuer = cert.get_issuer()
issuer.CN = 'Untrusted Authority'
issuer.O = 'Self-Signed'
pkey = crypto.PKey()
pkey.generate_key(crypto.TYPE_RSA, 768)
cert.set_pubkey(pkey)
cert.sign(pkey, 'md5')
return cert, pkey
def make_ssl_devcert(base_path, host=None, cn=None):
"""Creates an SSL key for development. This should be used instead of
the ``'adhoc'`` key which generates a new cert on each server start.
It accepts a path for where it should store the key and cert and
either a host or CN. If a host is given it will use the CN
``*.host/CN=host``.
For more information see :func:`run_simple`.
.. versionadded:: 0.9
:param base_path: the path to the certificate and key. The extension
``.crt`` is added for the certificate, ``.key`` is
added for the key.
:param host: the name of the host. This can be used as an alternative
for the `cn`.
:param cn: the `CN` to use.
"""
from OpenSSL import crypto
if host is not None:
cn = '*.%s/CN=%s' % (host, host)
cert, pkey = generate_adhoc_ssl_pair(cn=cn)
cert_file = base_path + '.crt'
pkey_file = base_path + '.key'
with open(cert_file, 'w') as f:
f.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
with open(pkey_file, 'w') as f:
f.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey))
return cert_file, pkey_file
def generate_adhoc_ssl_context():
"""Generates an adhoc SSL context for the development server."""
from OpenSSL import SSL
cert, pkey = generate_adhoc_ssl_pair()
ctx = SSL.Context(SSL.SSLv23_METHOD)
ctx.use_privatekey(pkey)
ctx.use_certificate(cert)
return ctx
def load_ssl_context(cert_file, pkey_file):
"""Loads an SSL context from a certificate and private key file."""
from OpenSSL import SSL
ctx = SSL.Context(SSL.SSLv23_METHOD)
ctx.use_certificate_file(cert_file)
ctx.use_privatekey_file(pkey_file)
return ctx
def is_ssl_error(error=None):
"""Checks if the given error (or the current one) is an SSL error."""
if error is None:
error = sys.exc_info()[1]
from OpenSSL import SSL
return isinstance(error, SSL.Error)
class _SSLConnectionFix(object):
"""Wrapper around SSL connection to provide a working makefile()."""
def __init__(self, con):
self._con = con
def makefile(self, mode, bufsize):
return socket._fileobject(self._con, mode, bufsize)
def __getattr__(self, attrib):
return getattr(self._con, attrib)
def shutdown(self, arg=None):
try:
self._con.shutdown()
except Exception:
pass
def select_ip_version(host, port):
"""Returns AF_INET4 or AF_INET6 depending on where to connect to."""
# disabled due to problems with current ipv6 implementations
# and various operating systems. Probably this code also is
# not supposed to work, but I can't come up with any other
# ways to implement this.
##try:
## info = socket.getaddrinfo(host, port, socket.AF_UNSPEC,
## socket.SOCK_STREAM, 0,
## socket.AI_PASSIVE)
## if info:
## return info[0][0]
##except socket.gaierror:
## pass
if ':' in host and hasattr(socket, 'AF_INET6'):
return socket.AF_INET6
return socket.AF_INET
class BaseWSGIServer(HTTPServer, object):
"""Simple single-threaded, single-process WSGI server."""
multithread = False
multiprocess = False
request_queue_size = 128
def __init__(self, host, port, app, handler=None,
passthrough_errors=False, ssl_context=None):
if handler is None:
handler = WSGIRequestHandler
self.address_family = select_ip_version(host, port)
HTTPServer.__init__(self, (host, int(port)), handler)
self.app = app
self.passthrough_errors = passthrough_errors
self.shutdown_signal = False
if ssl_context is not None:
try:
from OpenSSL import tsafe
except ImportError:
raise TypeError('SSL is not available if the OpenSSL '
'library is not installed.')
if isinstance(ssl_context, tuple):
ssl_context = load_ssl_context(*ssl_context)
if ssl_context == 'adhoc':
ssl_context = generate_adhoc_ssl_context()
self.socket = tsafe.Connection(ssl_context, self.socket)
self.ssl_context = ssl_context
else:
self.ssl_context = None
def log(self, type, message, *args):
_log(type, message, *args)
def serve_forever(self):
self.shutdown_signal = False
try:
HTTPServer.serve_forever(self)
except KeyboardInterrupt:
pass
def handle_error(self, request, client_address):
if self.passthrough_errors:
raise
else:
return HTTPServer.handle_error(self, request, client_address)
def get_request(self):
con, info = self.socket.accept()
if self.ssl_context is not None:
con = _SSLConnectionFix(con)
return con, info
class ThreadedWSGIServer(ThreadingMixIn, BaseWSGIServer):
"""A WSGI server that does threading."""
multithread = True
class ForkingWSGIServer(ForkingMixIn, BaseWSGIServer):
"""A WSGI server that does forking."""
multiprocess = True
def __init__(self, host, port, app, processes=40, handler=None,
passthrough_errors=False, ssl_context=None):
BaseWSGIServer.__init__(self, host, port, app, handler,
passthrough_errors, ssl_context)
self.max_children = processes
def make_server(host, port, app=None, threaded=False, processes=1,
request_handler=None, passthrough_errors=False,
ssl_context=None):
"""Create a new server instance that is either threaded, or forks
or just processes one request after another.
"""
if threaded and processes > 1:
raise ValueError("cannot have a multithreaded and "
"multi process server.")
elif threaded:
return ThreadedWSGIServer(host, port, app, request_handler,
passthrough_errors, ssl_context)
elif processes > 1:
return ForkingWSGIServer(host, port, app, processes, request_handler,
passthrough_errors, ssl_context)
else:
return BaseWSGIServer(host, port, app, request_handler,
passthrough_errors, ssl_context)
def _iter_module_files():
# The list call is necessary on Python 3 in case the module
# dictionary modifies during iteration.
for module in list(sys.modules.values()):
filename = getattr(module, '__file__', None)
if filename:
old = None
while not os.path.isfile(filename):
old = filename
filename = os.path.dirname(filename)
if filename == old:
break
else:
if filename[-4:] in ('.pyc', '.pyo'):
filename = filename[:-1]
yield filename
def _reloader_stat_loop(extra_files=None, interval=1):
"""When this function is run from the main thread, it will force other
threads to exit when any modules currently loaded change.
Copyright notice. This function is based on the autoreload.py from
the CherryPy trac which originated from WSGIKit which is now dead.
:param extra_files: a list of additional files it should watch.
"""
from itertools import chain
mtimes = {}
while 1:
for filename in chain(_iter_module_files(), extra_files or ()):
try:
mtime = os.stat(filename).st_mtime
except OSError:
continue
old_time = mtimes.get(filename)
if old_time is None:
mtimes[filename] = mtime
continue
elif mtime > old_time:
_log('info', ' * Detected change in %r, reloading' % filename)
sys.exit(3)
time.sleep(interval)
def _reloader_inotify(extra_files=None, interval=None):
# Mutated by inotify loop when changes occur.
changed = [False]
# Setup inotify watches
from pyinotify import WatchManager, Notifier
# this API changed at one point, support both
try:
from pyinotify import EventsCodes as ec
ec.IN_ATTRIB
except (ImportError, AttributeError):
import pyinotify as ec
wm = WatchManager()
mask = ec.IN_DELETE_SELF | ec.IN_MOVE_SELF | ec.IN_MODIFY | ec.IN_ATTRIB
def signal_changed(event):
if changed[0]:
return
_log('info', ' * Detected change in %r, reloading' % event.path)
changed[:] = [True]
for fname in extra_files or ():
wm.add_watch(fname, mask, signal_changed)
# ... And now we wait...
notif = Notifier(wm)
try:
while not changed[0]:
# always reiterate through sys.modules, adding them
for fname in _iter_module_files():
wm.add_watch(fname, mask, signal_changed)
notif.process_events()
if notif.check_events(timeout=interval):
notif.read_events()
# TODO Set timeout to something small and check parent liveliness
finally:
notif.stop()
sys.exit(3)
# currently we always use the stat loop reloader for the simple reason
# that the inotify one does not respond to added files properly. Also
# it's quite buggy and the API is a mess.
reloader_loop = _reloader_stat_loop
def restart_with_reloader():
"""Spawn a new Python interpreter with the same arguments as this one,
but running the reloader thread.
"""
while 1:
_log('info', ' * Restarting with reloader')
args = [sys.executable] + sys.argv
new_environ = os.environ.copy()
new_environ['WERKZEUG_RUN_MAIN'] = 'true'
# a weird bug on windows. sometimes unicode strings end up in the
# environment and subprocess.call does not like this, encode them
# to latin1 and continue.
if os.name == 'nt' and PY2:
for key, value in iteritems(new_environ):
if isinstance(value, text_type):
new_environ[key] = value.encode('iso-8859-1')
exit_code = subprocess.call(args, env=new_environ)
if exit_code != 3:
return exit_code
def run_with_reloader(main_func, extra_files=None, interval=1):
"""Run the given function in an independent python interpreter."""
import signal
signal.signal(signal.SIGTERM, lambda *args: sys.exit(0))
if os.environ.get('WERKZEUG_RUN_MAIN') == 'true':
thread.start_new_thread(main_func, ())
try:
reloader_loop(extra_files, interval)
except KeyboardInterrupt:
return
try:
sys.exit(restart_with_reloader())
except KeyboardInterrupt:
pass
def run_simple(hostname, port, application, use_reloader=False,
use_debugger=False, use_evalex=True,
extra_files=None, reloader_interval=1, threaded=False,
processes=1, request_handler=None, static_files=None,
passthrough_errors=False, ssl_context=None):
"""Start an application using wsgiref and with an optional reloader. This
wraps `wsgiref` to fix the wrong default reporting of the multithreaded
WSGI variable and adds optional multithreading and fork support.
This function has a command-line interface too::
python -m werkzeug.serving --help
.. versionadded:: 0.5
`static_files` was added to simplify serving of static files as well
as `passthrough_errors`.
.. versionadded:: 0.6
support for SSL was added.
.. versionadded:: 0.8
Added support for automatically loading a SSL context from certificate
file and private key.
.. versionadded:: 0.9
Added command-line interface.
:param hostname: The host for the application. eg: ``'localhost'``
:param port: The port for the server. eg: ``8080``
:param application: the WSGI application to execute
:param use_reloader: should the server automatically restart the python
process if modules were changed?
:param use_debugger: should the werkzeug debugging system be used?
:param use_evalex: should the exception evaluation feature be enabled?
:param extra_files: a list of files the reloader should watch
additionally to the modules. For example configuration
files.
:param reloader_interval: the interval for the reloader in seconds.
:param threaded: should the process handle each request in a separate
thread?
:param processes: if greater than 1 then handle each request in a new process
up to this maximum number of concurrent processes.
:param request_handler: optional parameter that can be used to replace
the default one. You can use this to replace it
with a different
:class:`~BaseHTTPServer.BaseHTTPRequestHandler`
subclass.
:param static_files: a dict of paths for static files. This works exactly
like :class:`SharedDataMiddleware`, it's actually
just wrapping the application in that middleware before
serving.
:param passthrough_errors: set this to `True` to disable the error catching.
This means that the server will die on errors but
it can be useful to hook debuggers in (pdb etc.)
:param ssl_context: an SSL context for the connection. Either an OpenSSL
context, a tuple in the form ``(cert_file, pkey_file)``,
the string ``'adhoc'`` if the server should
automatically create one, or `None` to disable SSL
(which is the default).
"""
if use_debugger:
from werkzeug.debug import DebuggedApplication
application = DebuggedApplication(application, use_evalex)
if static_files:
from werkzeug.wsgi import SharedDataMiddleware
application = SharedDataMiddleware(application, static_files)
def inner():
make_server(hostname, port, application, threaded,
processes, request_handler,
passthrough_errors, ssl_context).serve_forever()
if os.environ.get('WERKZEUG_RUN_MAIN') != 'true':
display_hostname = hostname != '*' and hostname or 'localhost'
if ':' in display_hostname:
display_hostname = '[%s]' % display_hostname
_log('info', ' * Running on %s://%s:%d/', ssl_context is None
and 'http' or 'https', display_hostname, port)
if use_reloader:
# Create and destroy a socket so that any exceptions are raised before
# we spawn a separate Python interpreter and lose this ability.
address_family = select_ip_version(hostname, port)
test_socket = socket.socket(address_family, socket.SOCK_STREAM)
test_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
test_socket.bind((hostname, port))
test_socket.close()
run_with_reloader(inner, extra_files, reloader_interval)
else:
inner()
def main():
'''A simple command-line interface for :py:func:`run_simple`.'''
# in contrast to argparse, this works at least under Python < 2.7
import optparse
from werkzeug.utils import import_string
parser = optparse.OptionParser(usage='Usage: %prog [options] app_module:app_object')
parser.add_option('-b', '--bind', dest='address',
help='The hostname:port the app should listen on.')
parser.add_option('-d', '--debug', dest='use_debugger',
action='store_true', default=False,
help='Use Werkzeug\'s debugger.')
parser.add_option('-r', '--reload', dest='use_reloader',
action='store_true', default=False,
help='Reload Python process if modules change.')
options, args = parser.parse_args()
hostname, port = None, None
if options.address:
address = options.address.split(':')
hostname = address[0]
if len(address) > 1:
port = address[1]
if len(args) != 1:
sys.stdout.write('No application supplied, or too much. See --help\n')
sys.exit(1)
app = import_string(args[0])
run_simple(
hostname=(hostname or '127.0.0.1'), port=int(port or 5000),
application=app, use_reloader=options.use_reloader,
use_debugger=options.use_debugger
)
if __name__ == '__main__':
main()
|
MalloyPower/parsing-python | refs/heads/master | front-end/testsuite-python-lib/Python-3.0/Lib/lib2to3/tests/test_parser.py | 1 | #!/usr/bin/env python2.5
"""Test suite for 2to3's parser and grammar files.
This is the place to add tests for changes to 2to3's grammar, such as those
merging the grammars for Python 2 and 3. In addition to specific tests for
parts of the grammar we've changed, we also make sure we can parse the
test_grammar.py files from both Python 2 and Python 3.
"""
# Author: Collin Winter
# Testing imports
from . import support
from .support import driver, test_dir
# Python imports
import os
import os.path
# Local imports
from ..pgen2.parse import ParseError
class GrammarTest(support.TestCase):
def validate(self, code):
support.parse_string(code)
def invalid_syntax(self, code):
try:
self.validate(code)
except ParseError:
pass
else:
raise AssertionError("Syntax shouldn't have been valid")
class TestRaiseChanges(GrammarTest):
def test_2x_style_1(self):
self.validate("raise")
def test_2x_style_2(self):
self.validate("raise E, V")
def test_2x_style_3(self):
self.validate("raise E, V, T")
def test_2x_style_invalid_1(self):
self.invalid_syntax("raise E, V, T, Z")
def test_3x_style(self):
self.validate("raise E1 from E2")
def test_3x_style_invalid_1(self):
self.invalid_syntax("raise E, V from E1")
def test_3x_style_invalid_2(self):
self.invalid_syntax("raise E from E1, E2")
def test_3x_style_invalid_3(self):
self.invalid_syntax("raise from E1, E2")
def test_3x_style_invalid_4(self):
self.invalid_syntax("raise E from")
# Adapated from Python 3's Lib/test/test_grammar.py:GrammarTests.testFuncdef
class TestFunctionAnnotations(GrammarTest):
def test_1(self):
self.validate("""def f(x) -> list: pass""")
def test_2(self):
self.validate("""def f(x:int): pass""")
def test_3(self):
self.validate("""def f(*x:str): pass""")
def test_4(self):
self.validate("""def f(**x:float): pass""")
def test_5(self):
self.validate("""def f(x, y:1+2): pass""")
def test_6(self):
self.validate("""def f(a, (b:1, c:2, d)): pass""")
def test_7(self):
self.validate("""def f(a, (b:1, c:2, d), e:3=4, f=5, *g:6): pass""")
def test_8(self):
s = """def f(a, (b:1, c:2, d), e:3=4, f=5,
*g:6, h:7, i=8, j:9=10, **k:11) -> 12: pass"""
self.validate(s)
class TestExcept(GrammarTest):
def test_new(self):
s = """
try:
x
except E as N:
y"""
self.validate(s)
def test_old(self):
s = """
try:
x
except E, N:
y"""
self.validate(s)
# Adapted from Python 3's Lib/test/test_grammar.py:GrammarTests.testAtoms
class TestSetLiteral(GrammarTest):
def test_1(self):
self.validate("""x = {'one'}""")
def test_2(self):
self.validate("""x = {'one', 1,}""")
def test_3(self):
self.validate("""x = {'one', 'two', 'three'}""")
def test_4(self):
self.validate("""x = {2, 3, 4,}""")
class TestNumericLiterals(GrammarTest):
def test_new_octal_notation(self):
self.validate("""0o7777777777777""")
self.invalid_syntax("""0o7324528887""")
def test_new_binary_notation(self):
self.validate("""0b101010""")
self.invalid_syntax("""0b0101021""")
class TestClassDef(GrammarTest):
def test_new_syntax(self):
self.validate("class B(t=7): pass")
self.validate("class B(t, *args): pass")
self.validate("class B(t, **kwargs): pass")
self.validate("class B(t, *args, **kwargs): pass")
self.validate("class B(t, y=9, *args, **kwargs): pass")
class TestParserIdempotency(support.TestCase):
"""A cut-down version of pytree_idempotency.py."""
def test_all_project_files(self):
for filepath in support.all_project_files():
print("Parsing %s..." % filepath)
tree = driver.parse_file(filepath, debug=True)
if diff(filepath, tree):
self.fail("Idempotency failed: %s" % filepath)
class TestLiterals(GrammarTest):
def test_multiline_bytes_literals(self):
s = """
md5test(b"\xaa" * 80,
(b"Test Using Larger Than Block-Size Key "
b"and Larger Than One Block-Size Data"),
"6f630fad67cda0ee1fb1f562db3aa53e")
"""
self.validate(s)
def test_multiline_bytes_tripquote_literals(self):
s = '''
b"""
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN">
"""
'''
self.validate(s)
def test_multiline_str_literals(self):
s = """
md5test("\xaa" * 80,
("Test Using Larger Than Block-Size Key "
"and Larger Than One Block-Size Data"),
"6f630fad67cda0ee1fb1f562db3aa53e")
"""
self.validate(s)
def diff(fn, tree):
f = open("@", "w")
try:
f.write(str(tree))
finally:
f.close()
try:
return os.system("diff -u %s @" % fn)
finally:
os.remove("@")
if __name__ == "__main__":
import __main__
support.run_all_tests(__main__)
|
kunaltyagi/nsiqcppstyle | refs/heads/master | rules/RULE_5_2_C_provide_doxygen_struct_comment_on_struct_def.py | 1 | """
Provide the struct/union doxygen comment.
It checks if there is doxygen sytle comment in front of each struct/union definition.
== Violation ==
struct A { <== Violation. No doxygen comment.
};
/* <== Violation. It's not doxygen comment
*
*/
union B {
};
== Good ==
/**
* blar blar
*/
struct A { <== OK
};
struct A; <== Don't care. It's forward decl.
"""
from nsiqunittest.nsiqcppstyle_unittestbase import *
from nsiqcppstyle_rulehelper import *
from nsiqcppstyle_reporter import *
from nsiqcppstyle_rulemanager import *
def RunRule(lexer, currentType, fullName, decl, contextStack, context):
if not decl and currentType in ("STRUCT", "UNION") and context is not None:
t = lexer.GetCurToken()
lexer.PushTokenIndex()
t2 = lexer.GetPrevTokenInType("COMMENT")
lexer.PopTokenIndex()
lexer.PushTokenIndex()
t3 = lexer.GetPrevTokenInTypeList(
["SEMI", "PREPROCESSOR", "LBRACE"], False, True)
lexer.PopTokenIndex()
if t2 is not None and t2.additional == "DOXYGEN":
if t3 is None or t2.lexpos > t3.lexpos:
return
nsiqcppstyle_reporter.Error(
t, __name__, "Doxygen Comment should be provided in front of struct/union def(%s)." % fullName)
ruleManager.AddTypeNameRule(RunRule)
##########################################################################
# Unit Test
##########################################################################
class testRule(nct):
def setUpRule(self):
ruleManager.AddTypeNameRule(RunRule)
def test1(self):
self.Analyze("thisfile.c",
"""
struct A {
}
""")
self.ExpectError(__name__)
def test2(self):
self.Analyze("thisfile.c",
"""
/*
*/
struct K {
}
""")
self.ExpectError(__name__)
def test3(self):
self.Analyze("thisfile.c",
"""
/**
*/
struct K {
struct T {
}
}
""")
self.ExpectError(__name__)
def test4(self):
self.Analyze("thisfile.c",
"""
/**
*
*/
struct J {
int k;
/**
*/
struct T {
}
}
class T;
""")
self.ExpectSuccess(__name__)
def test5(self):
self.Analyze("thisfile.c",
"""
/*
*/
struct K {
}
""")
self.ExpectError(__name__)
def test6(self):
self.Analyze("thisfile.c",
"""
typedef struct {
} K
""")
self.ExpectError(__name__)
|
stevejb71/Idris-dev | refs/heads/master | docs/conf.py | 21 | # -*- coding: utf-8 -*-
#
# Idris Manual documentation build configuration file, created by
# sphinx-quickstart on Sat Feb 28 20:41:47 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# True if the readthedocs theme is locally installed
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
'sphinx.ext.pngmath',
'sphinx.ext.ifconfig',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Idris'
copyright = u'2015, The Idris Community'
author = u'The Idris Community'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.9.18'
# The full version, including alpha/beta/rc tags.
release = '0.9.18'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "default"
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except ImportError:
html_theme = "default"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'IdrisManualdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('guides/index', 'idris-guides.tex', u'Idris Tutorial Series', u'The Idris Community', 'manual'),
('reference/index', 'idris-reference.tex', u'The Idris Reference', u'The Idris Community', 'manual'),
('tutorial/index', 'idris-tutorial.tex', u'The Idris Tutorial', u'The Idris Community', 'manual'),
('effects/index', 'eff-tutorial.tex', u'The Effects Tutorial', u'The Idris Community', 'manual')
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = True
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'idrismanual', u'Idris Manual Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'IdrisManual', u'Idris Manual Documentation',
author, 'IdrisManual', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
|
jrg365/gpytorch | refs/heads/master | gpytorch/likelihoods/gaussian_likelihood.py | 1 | #!/usr/bin/env python3
import math
import warnings
from copy import deepcopy
from typing import Any, Optional
import torch
from torch import Tensor
from ..distributions import MultivariateNormal, base_distributions
from ..lazy import ZeroLazyTensor
from ..utils.warnings import GPInputWarning
from .likelihood import Likelihood
from .noise_models import FixedGaussianNoise, HomoskedasticNoise, Noise
class _GaussianLikelihoodBase(Likelihood):
"""Base class for Gaussian Likelihoods, supporting general heteroskedastic noise models."""
def __init__(self, noise_covar: Noise, **kwargs: Any) -> None:
super().__init__()
param_transform = kwargs.get("param_transform")
if param_transform is not None:
warnings.warn(
"The 'param_transform' argument is now deprecated. If you want to use a different "
"transformaton, specify a different 'noise_constraint' instead.",
DeprecationWarning,
)
self.noise_covar = noise_covar
def _shaped_noise_covar(self, base_shape: torch.Size, *params: Any, **kwargs: Any):
return self.noise_covar(*params, shape=base_shape, **kwargs)
def expected_log_prob(self, target: Tensor, input: MultivariateNormal, *params: Any, **kwargs: Any) -> Tensor:
mean, variance = input.mean, input.variance
num_event_dim = len(input.event_shape)
noise = self._shaped_noise_covar(mean.shape, *params, **kwargs).diag()
# Potentially reshape the noise to deal with the multitask case
noise = noise.view(*noise.shape[:-1], *input.event_shape)
res = ((target - mean) ** 2 + variance) / noise + noise.log() + math.log(2 * math.pi)
res = res.mul(-0.5)
if num_event_dim > 1: # Do appropriate summation for multitask Gaussian likelihoods
res = res.sum(list(range(-1, -num_event_dim, -1)))
return res
def forward(self, function_samples: Tensor, *params: Any, **kwargs: Any) -> base_distributions.Normal:
noise = self._shaped_noise_covar(function_samples.shape, *params, **kwargs).diag()
return base_distributions.Normal(function_samples, noise.sqrt())
def log_marginal(
self, observations: Tensor, function_dist: MultivariateNormal, *params: Any, **kwargs: Any
) -> Tensor:
marginal = self.marginal(function_dist, *params, **kwargs)
# We're making everything conditionally independent
indep_dist = base_distributions.Normal(marginal.mean, marginal.variance.clamp_min(1e-8).sqrt())
res = indep_dist.log_prob(observations)
# Do appropriate summation for multitask Gaussian likelihoods
num_event_dim = len(function_dist.event_shape)
if num_event_dim > 1:
res = res.sum(list(range(-1, -num_event_dim, -1)))
return res
def marginal(self, function_dist: MultivariateNormal, *params: Any, **kwargs: Any) -> MultivariateNormal:
mean, covar = function_dist.mean, function_dist.lazy_covariance_matrix
noise_covar = self._shaped_noise_covar(mean.shape, *params, **kwargs)
full_covar = covar + noise_covar
return function_dist.__class__(mean, full_covar)
class GaussianLikelihood(_GaussianLikelihoodBase):
def __init__(self, noise_prior=None, noise_constraint=None, batch_shape=torch.Size(), **kwargs):
noise_covar = HomoskedasticNoise(
noise_prior=noise_prior, noise_constraint=noise_constraint, batch_shape=batch_shape
)
super().__init__(noise_covar=noise_covar)
@property
def noise(self) -> Tensor:
return self.noise_covar.noise
@noise.setter
def noise(self, value: Tensor) -> None:
self.noise_covar.initialize(noise=value)
@property
def raw_noise(self) -> Tensor:
return self.noise_covar.raw_noise
@raw_noise.setter
def raw_noise(self, value: Tensor) -> None:
self.noise_covar.initialize(raw_noise=value)
class FixedNoiseGaussianLikelihood(_GaussianLikelihoodBase):
"""
A Likelihood that assumes fixed heteroscedastic noise. This is useful when you have fixed, known observation
noise for each training example.
Args:
:attr:`noise` (Tensor):
Known observation noise (variance) for each training example.
:attr:`learn_additional_noise` (bool, optional):
Set to true if you additionally want to learn added diagonal noise, similar to GaussianLikelihood.
Note that this likelihood takes an additional argument when you call it, `noise`, that adds a specified amount
of noise to the passed MultivariateNormal. This allows for adding known observational noise to test data.
Example:
>>> train_x = torch.randn(55, 2)
>>> noises = torch.ones(55) * 0.01
>>> likelihood = FixedNoiseGaussianLikelihood(noise=noises, learn_additional_noise=True)
>>> pred_y = likelihood(gp_model(train_x))
>>>
>>> test_x = torch.randn(21, 2)
>>> test_noises = torch.ones(21) * 0.02
>>> pred_y = likelihood(gp_model(test_x), noise=test_noises)
"""
def __init__(
self,
noise: Tensor,
learn_additional_noise: Optional[bool] = False,
batch_shape: Optional[torch.Size] = torch.Size(),
**kwargs: Any,
) -> None:
super().__init__(noise_covar=FixedGaussianNoise(noise=noise))
if learn_additional_noise:
noise_prior = kwargs.get("noise_prior", None)
noise_constraint = kwargs.get("noise_constraint", None)
self.second_noise_covar = HomoskedasticNoise(
noise_prior=noise_prior, noise_constraint=noise_constraint, batch_shape=batch_shape
)
else:
self.second_noise_covar = None
@property
def noise(self) -> Tensor:
return self.noise_covar.noise + self.second_noise
@noise.setter
def noise(self, value: Tensor) -> None:
self.noise_covar.initialize(noise=value)
@property
def second_noise(self) -> Tensor:
if self.second_noise_covar is None:
return 0
else:
return self.second_noise_covar.noise
@second_noise.setter
def second_noise(self, value: Tensor) -> None:
if self.second_noise_covar is None:
raise RuntimeError(
"Attempting to set secondary learned noise for FixedNoiseGaussianLikelihood, "
"but learn_additional_noise must have been False!"
)
self.second_noise_covar.initialize(noise=value)
def get_fantasy_likelihood(self, **kwargs):
if "noise" not in kwargs:
raise RuntimeError("FixedNoiseGaussianLikelihood.fantasize requires a `noise` kwarg")
old_noise_covar = self.noise_covar
self.noise_covar = None
fantasy_liklihood = deepcopy(self)
self.noise_covar = old_noise_covar
old_noise = old_noise_covar.noise
new_noise = kwargs.get("noise")
if old_noise.dim() != new_noise.dim():
old_noise = old_noise.expand(*new_noise.shape[:-1], old_noise.shape[-1])
fantasy_liklihood.noise_covar = FixedGaussianNoise(noise=torch.cat([old_noise, new_noise], -1))
return fantasy_liklihood
def _shaped_noise_covar(self, base_shape: torch.Size, *params: Any, **kwargs: Any):
if len(params) > 0:
# we can infer the shape from the params
shape = None
else:
# here shape[:-1] is the batch shape requested, and shape[-1] is `n`, the number of points
shape = base_shape
res = self.noise_covar(*params, shape=shape, **kwargs)
if self.second_noise_covar is not None:
res = res + self.second_noise_covar(*params, shape=shape, **kwargs)
elif isinstance(res, ZeroLazyTensor):
warnings.warn(
"You have passed data through a FixedNoiseGaussianLikelihood that did not match the size "
"of the fixed noise, *and* you did not specify noise. This is treated as a no-op.",
GPInputWarning,
)
return res
|
kevinmel2000/sl4a | refs/heads/master | python/gdata/src/gdata/apps/groups/service.py | 137 | #!/usr/bin/python
#
# Copyright (C) 2008 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Allow Google Apps domain administrators to manage groups, groups memembers and groups owners.
EmailSettingsService: Set various email settings.
"""
__author__ = 'google-apps-apis@googlegroups.com'
import gdata.apps
import gdata.apps.service
import gdata.service
API_VER='2.0'
BASE_URL = '/a/feeds/group/' + API_VER + '/%s'
GROUP_MEMBER_URL = BASE_URL + '?member=%s'
GROUP_MEMBER_DIRECT_URL = GROUP_MEMBER_URL + '&directOnly=%s'
GROUP_ID_URL = BASE_URL + '/%s'
MEMBER_URL = BASE_URL + '/%s/member'
MEMBER_ID_URL = MEMBER_URL + '/%s'
OWNER_URL = BASE_URL + '/%s/owner'
OWNER_ID_URL = OWNER_URL + '/%s'
PERMISSION_OWNER = 'Owner'
PERMISSION_MEMBER = 'Member'
PERMISSION_DOMAIN = 'Domain'
PERMISSION_ANYONE = 'Anyone'
class GroupsService(gdata.apps.service.PropertyService):
"""Client for the Google Apps Groups service."""
def _ServiceUrl(self, service_type, is_existed, group_id, member_id, owner_email,
start_key, direct_only=None, domain=None):
if domain is None:
domain = self.domain
if service_type == 'group':
if group_id != '' and is_existed:
return GROUP_ID_URL % (domain, group_id)
if member_id != '':
if direct_only is not None:
return GROUP_MEMBER_DIRECT_URL % (domain, member_id,
self._Bool2Str(direct_only))
else:
return GROUP_MEMBER_URL % (domain, member_id)
if start_key != '':
return GROUP_START_URL % (domain, start_key)
return BASE_URL % (domain)
if service_type == 'member':
if member_id != '' and is_existed:
return MEMBER_ID_URL % (domain, group_id, member_id)
if start_key != '':
return MEMBER_START_URL % (domain, group_id, start_key)
return MEMBER_URL % (domain, group_id)
if service_type == 'owner':
if owner_email != '' and is_existed:
return OWNER_ID_URL % (domain, group_id, owner_email)
return OWNER_URL % (domain, group_id)
def _Bool2Str(self, b):
if b is None:
return None
return str(b is True).lower()
def _IsExisted(self, uri):
try:
properties = self._GetProperties(uri)
return True
except gdata.apps.service.AppsForYourDomainException, e:
if e.error_code == gdata.apps.service.ENTITY_DOES_NOT_EXIST:
return False
else:
raise e
def CreateGroup(self, group_id, group_name, description, email_permission):
"""Create a group.
Args:
group_id: The ID of the group (e.g. us-sales).
group_name: The name of the group.
description: A description of the group
email_permission: The subscription permission of the group.
Returns:
A dict containing the result of the create operation.
"""
uri = self._ServiceUrl('group', False, group_id, '', '', '', '')
properties = {}
properties['groupId'] = group_id
properties['groupName'] = group_name
properties['description'] = description
properties['emailPermission'] = email_permission
return self._PostProperties(uri, properties)
def UpdateGroup(self, group_id, group_name, description, email_permission):
"""Update a group's name, description and/or permission.
Args:
group_id: The ID of the group (e.g. us-sales).
group_name: The name of the group.
description: A description of the group
email_permission: The subscription permission of the group.
Returns:
A dict containing the result of the update operation.
"""
uri = self._ServiceUrl('group', True, group_id, '', '', '', '')
properties = {}
properties['groupId'] = group_id
properties['groupName'] = group_name
properties['description'] = description
properties['emailPermission'] = email_permission
return self._PutProperties(uri, properties)
def RetrieveGroup(self, group_id):
"""Retrieve a group based on its ID.
Args:
group_id: The ID of the group (e.g. us-sales).
Returns:
A dict containing the result of the retrieve operation.
"""
uri = self._ServiceUrl('group', True, group_id, '', '', '', '')
return self._GetProperties(uri)
def RetrieveAllGroups(self):
"""Retrieve all groups in the domain.
Args:
None.
Returns:
A dict containing the result of the retrieve operation.
"""
uri = self._ServiceUrl('group', True, '', '', '', '', '')
return self._GetPropertiesList(uri)
def RetrieveGroups(self, member_id, direct_only=False):
"""Retrieve all groups that belong to the given member_id.
Args:
member_id: The member's email address (e.g. member@example.com).
direct_only: Boolean whether only return groups that this member directly belongs to.
Returns:
A dict containing the result of the retrieve operation.
"""
uri = self._ServiceUrl('group', True, '', member_id, '', '', direct_only)
return self._GetPropertiesList(uri)
def DeleteGroup(self, group_id):
"""Delete a group based on its ID.
Args:
group_id: The ID of the group (e.g. us-sales).
Returns:
A dict containing the result of the delete operation.
"""
uri = self._ServiceUrl('group', True, group_id, '', '', '', '')
return self._DeleteProperties(uri)
def AddMemberToGroup(self, member_id, group_id):
"""Add a member to a group.
Args:
member_id: The member's email address (e.g. member@example.com).
group_id: The ID of the group (e.g. us-sales).
Returns:
A dict containing the result of the add operation.
"""
uri = self._ServiceUrl('member', False, group_id, member_id, '', '', '')
properties = {}
properties['memberId'] = member_id
return self._PostProperties(uri, properties)
def IsMember(self, member_id, group_id):
"""Check whether the given member already exists in the given group
Args:
member_id: The member's email address (e.g. member@example.com).
group_id: The ID of the group (e.g. us-sales).
Returns:
True if the member exists in the group. False otherwise.
"""
uri = self._ServiceUrl('member', True, group_id, member_id, '', '', '')
return self._IsExisted(uri)
def RetrieveMember(self, member_id, group_id):
"""Retrieve the given member in the given group
Args:
member_id: The member's email address (e.g. member@example.com).
group_id: The ID of the group (e.g. us-sales).
Returns:
A dict containing the result of the retrieve operation.
"""
uri = self._ServiceUrl('member', True, group_id, member_id, '', '', '')
return self._GetProperties(uri)
def RetrieveAllMembers(self, group_id):
"""Retrieve all members in the given group.
Args:
group_id: The ID of the group (e.g. us-sales).
Returns:
A dict containing the result of the retrieve operation.
"""
uri = self._ServiceUrl('member', True, group_id, '', '', '', '')
return self._GetPropertiesList(uri)
def RemoveMemberFromGroup(self, member_id, group_id):
"""Remove the given member from the given group
Args:
group_id: The ID of the group (e.g. us-sales).
member_id: The member's email address (e.g. member@example.com).
Returns:
A dict containing the result of the remove operation.
"""
uri = self._ServiceUrl('member', True, group_id, member_id, '', '', '')
return self._DeleteProperties(uri)
def AddOwnerToGroup(self, owner_email, group_id):
"""Add an owner to a group.
Args:
owner_email: The email address of a group owner.
group_id: The ID of the group (e.g. us-sales).
Returns:
A dict containing the result of the add operation.
"""
uri = self._ServiceUrl('owner', False, group_id, '', owner_email, '', '')
properties = {}
properties['email'] = owner_email
return self._PostProperties(uri, properties)
def IsOwner(self, owner_email, group_id):
"""Check whether the given member an owner of the given group.
Args:
owner_email: The email address of a group owner.
group_id: The ID of the group (e.g. us-sales).
Returns:
True if the member is an owner of the given group. False otherwise.
"""
uri = self._ServiceUrl('owner', True, group_id, '', owner_email, '', '')
return self._IsExisted(uri)
def RetrieveOwner(self, owner_email, group_id):
"""Retrieve the given owner in the given group
Args:
owner_email: The email address of a group owner.
group_id: The ID of the group (e.g. us-sales).
Returns:
A dict containing the result of the retrieve operation.
"""
uri = self._ServiceUrl('owner', True, group_id, '', owner_email, '', '')
return self._GetProperties(uri)
def RetrieveAllOwners(self, group_id):
"""Retrieve all owners of the given group
Args:
group_id: The ID of the group (e.g. us-sales).
Returns:
A dict containing the result of the retrieve operation.
"""
uri = self._ServiceUrl('owner', True, group_id, '', '', '', '')
return self._GetPropertiesList(uri)
def RemoveOwnerFromGroup(self, owner_email, group_id):
"""Remove the given owner from the given group
Args:
owner_email: The email address of a group owner.
group_id: The ID of the group (e.g. us-sales).
Returns:
A dict containing the result of the remove operation.
"""
uri = self._ServiceUrl('owner', True, group_id, '', owner_email, '', '')
return self._DeleteProperties(uri)
|
Thraxis/SickRage | refs/heads/master | lib/sqlalchemy/exc.py | 79 | # sqlalchemy/exc.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Exceptions used with SQLAlchemy.
The base exception class is :exc:`.SQLAlchemyError`. Exceptions which are
raised as a result of DBAPI exceptions are all subclasses of
:exc:`.DBAPIError`.
"""
import traceback
class SQLAlchemyError(Exception):
"""Generic error class."""
class ArgumentError(SQLAlchemyError):
"""Raised when an invalid or conflicting function argument is supplied.
This error generally corresponds to construction time state errors.
"""
class NoSuchModuleError(ArgumentError):
"""Raised when a dynamically-loaded module (usually a database dialect)
of a particular name cannot be located."""
class NoForeignKeysError(ArgumentError):
"""Raised when no foreign keys can be located between two selectables
during a join."""
class AmbiguousForeignKeysError(ArgumentError):
"""Raised when more than one foreign key matching can be located
between two selectables during a join."""
class CircularDependencyError(SQLAlchemyError):
"""Raised by topological sorts when a circular dependency is detected.
There are two scenarios where this error occurs:
* In a Session flush operation, if two objects are mutually dependent
on each other, they can not be inserted or deleted via INSERT or
DELETE statements alone; an UPDATE will be needed to post-associate
or pre-deassociate one of the foreign key constrained values.
The ``post_update`` flag described at :ref:`post_update` can resolve
this cycle.
* In a :meth:`.MetaData.create_all`, :meth:`.MetaData.drop_all`,
:attr:`.MetaData.sorted_tables` operation, two :class:`.ForeignKey`
or :class:`.ForeignKeyConstraint` objects mutually refer to each
other. Apply the ``use_alter=True`` flag to one or both,
see :ref:`use_alter`.
"""
def __init__(self, message, cycles, edges, msg=None):
if msg is None:
message += " Cycles: %r all edges: %r" % (cycles, edges)
else:
message = msg
SQLAlchemyError.__init__(self, message)
self.cycles = cycles
self.edges = edges
def __reduce__(self):
return self.__class__, (None, self.cycles,
self.edges, self.args[0])
class CompileError(SQLAlchemyError):
"""Raised when an error occurs during SQL compilation"""
class UnsupportedCompilationError(CompileError):
"""Raised when an operation is not supported by the given compiler.
.. versionadded:: 0.8.3
"""
def __init__(self, compiler, element_type):
super(UnsupportedCompilationError, self).__init__(
"Compiler %r can't render element of type %s" %
(compiler, element_type))
class IdentifierError(SQLAlchemyError):
"""Raised when a schema name is beyond the max character limit"""
class DisconnectionError(SQLAlchemyError):
"""A disconnect is detected on a raw DB-API connection.
This error is raised and consumed internally by a connection pool. It can
be raised by the :meth:`.PoolEvents.checkout` event so that the host pool
forces a retry; the exception will be caught three times in a row before
the pool gives up and raises :class:`~sqlalchemy.exc.InvalidRequestError`
regarding the connection attempt.
"""
class TimeoutError(SQLAlchemyError):
"""Raised when a connection pool times out on getting a connection."""
class InvalidRequestError(SQLAlchemyError):
"""SQLAlchemy was asked to do something it can't do.
This error generally corresponds to runtime state errors.
"""
class NoInspectionAvailable(InvalidRequestError):
"""A subject passed to :func:`sqlalchemy.inspection.inspect` produced
no context for inspection."""
class ResourceClosedError(InvalidRequestError):
"""An operation was requested from a connection, cursor, or other
object that's in a closed state."""
class NoSuchColumnError(KeyError, InvalidRequestError):
"""A nonexistent column is requested from a ``RowProxy``."""
class NoReferenceError(InvalidRequestError):
"""Raised by ``ForeignKey`` to indicate a reference cannot be resolved."""
class NoReferencedTableError(NoReferenceError):
"""Raised by ``ForeignKey`` when the referred ``Table`` cannot be
located.
"""
def __init__(self, message, tname):
NoReferenceError.__init__(self, message)
self.table_name = tname
def __reduce__(self):
return self.__class__, (self.args[0], self.table_name)
class NoReferencedColumnError(NoReferenceError):
"""Raised by ``ForeignKey`` when the referred ``Column`` cannot be
located.
"""
def __init__(self, message, tname, cname):
NoReferenceError.__init__(self, message)
self.table_name = tname
self.column_name = cname
def __reduce__(self):
return self.__class__, (self.args[0], self.table_name,
self.column_name)
class NoSuchTableError(InvalidRequestError):
"""Table does not exist or is not visible to a connection."""
class UnboundExecutionError(InvalidRequestError):
"""SQL was attempted without a database connection to execute it on."""
class DontWrapMixin(object):
"""A mixin class which, when applied to a user-defined Exception class,
will not be wrapped inside of :exc:`.StatementError` if the error is
emitted within the process of executing a statement.
E.g.::
from sqlalchemy.exc import DontWrapMixin
class MyCustomException(Exception, DontWrapMixin):
pass
class MySpecialType(TypeDecorator):
impl = String
def process_bind_param(self, value, dialect):
if value == 'invalid':
raise MyCustomException("invalid!")
"""
# Moved to orm.exc; compatibility definition installed by orm import until 0.6
UnmappedColumnError = None
class StatementError(SQLAlchemyError):
"""An error occurred during execution of a SQL statement.
:class:`StatementError` wraps the exception raised
during execution, and features :attr:`.statement`
and :attr:`.params` attributes which supply context regarding
the specifics of the statement which had an issue.
The wrapped exception object is available in
the :attr:`.orig` attribute.
"""
statement = None
"""The string SQL statement being invoked when this exception occurred."""
params = None
"""The parameter list being used when this exception occurred."""
orig = None
"""The DBAPI exception object."""
def __init__(self, message, statement, params, orig):
SQLAlchemyError.__init__(self, message)
self.statement = statement
self.params = params
self.orig = orig
self.detail = []
def add_detail(self, msg):
self.detail.append(msg)
def __reduce__(self):
return self.__class__, (self.args[0], self.statement,
self.params, self.orig)
def __str__(self):
from sqlalchemy.sql import util
params_repr = util._repr_params(self.params, 10)
return ' '.join([
"(%s)" % det for det in self.detail
] + [
SQLAlchemyError.__str__(self),
repr(self.statement), repr(params_repr)
])
def __unicode__(self):
return self.__str__()
class DBAPIError(StatementError):
"""Raised when the execution of a database operation fails.
Wraps exceptions raised by the DB-API underlying the
database operation. Driver-specific implementations of the standard
DB-API exception types are wrapped by matching sub-types of SQLAlchemy's
:class:`DBAPIError` when possible. DB-API's ``Error`` type maps to
:class:`DBAPIError` in SQLAlchemy, otherwise the names are identical. Note
that there is no guarantee that different DB-API implementations will
raise the same exception type for any given error condition.
:class:`DBAPIError` features :attr:`~.StatementError.statement`
and :attr:`~.StatementError.params` attributes which supply context
regarding the specifics of the statement which had an issue, for the
typical case when the error was raised within the context of
emitting a SQL statement.
The wrapped exception object is available in the
:attr:`~.StatementError.orig` attribute. Its type and properties are
DB-API implementation specific.
"""
@classmethod
def instance(cls, statement, params,
orig,
dbapi_base_err,
connection_invalidated=False):
# Don't ever wrap these, just return them directly as if
# DBAPIError didn't exist.
if isinstance(orig, (KeyboardInterrupt, SystemExit, DontWrapMixin)):
return orig
if orig is not None:
# not a DBAPI error, statement is present.
# raise a StatementError
if not isinstance(orig, dbapi_base_err) and statement:
msg = traceback.format_exception_only(
orig.__class__, orig)[-1].strip()
return StatementError(
"%s (original cause: %s)" % (str(orig), msg),
statement, params, orig
)
name, glob = orig.__class__.__name__, globals()
if name in glob and issubclass(glob[name], DBAPIError):
cls = glob[name]
return cls(statement, params, orig, connection_invalidated)
def __reduce__(self):
return self.__class__, (self.statement, self.params,
self.orig, self.connection_invalidated)
def __init__(self, statement, params, orig, connection_invalidated=False):
try:
text = str(orig)
except (KeyboardInterrupt, SystemExit):
raise
except Exception as e:
text = 'Error in str() of DB-API-generated exception: ' + str(e)
StatementError.__init__(
self,
'(%s) %s' % (orig.__class__.__name__, text),
statement,
params,
orig
)
self.connection_invalidated = connection_invalidated
class InterfaceError(DBAPIError):
"""Wraps a DB-API InterfaceError."""
class DatabaseError(DBAPIError):
"""Wraps a DB-API DatabaseError."""
class DataError(DatabaseError):
"""Wraps a DB-API DataError."""
class OperationalError(DatabaseError):
"""Wraps a DB-API OperationalError."""
class IntegrityError(DatabaseError):
"""Wraps a DB-API IntegrityError."""
class InternalError(DatabaseError):
"""Wraps a DB-API InternalError."""
class ProgrammingError(DatabaseError):
"""Wraps a DB-API ProgrammingError."""
class NotSupportedError(DatabaseError):
"""Wraps a DB-API NotSupportedError."""
# Warnings
class SADeprecationWarning(DeprecationWarning):
"""Issued once per usage of a deprecated API."""
class SAPendingDeprecationWarning(PendingDeprecationWarning):
"""Issued once per usage of a deprecated API."""
class SAWarning(RuntimeWarning):
"""Issued at runtime."""
|
ctmarinas/stgit | refs/heads/master | stgit/commands/series.py | 1 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from stgit.argparse import opt, patch_range
from stgit.commands.common import CmdException, DirectoryHasRepository, parse_patches
from stgit.config import config
from stgit.out import out
__copyright__ = """
Copyright (C) 2005, Catalin Marinas <catalin.marinas@gmail.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see http://www.gnu.org/licenses/.
"""
help = 'Print the patch series'
kind = 'stack'
usage = ['[options] [--] [<patch-range>]']
description = """
Show all the patches in the series, or just those in the given range,
ordered from top to bottom.
The applied patches are prefixed with a +++ (except the current patch,
which is prefixed with a +>+), the unapplied patches with a +-+, and
the hidden patches with a +!+.
Empty patches are prefixed with a '0'."""
args = [patch_range('applied_patches', 'unapplied_patches', 'hidden_patches')]
options = [
opt(
'-b',
'--branch',
args=['stg_branches'],
short='Use BRANCH instead of the default branch',
),
opt(
'-a',
'--all',
action='store_true',
short='Show all patches, including the hidden ones',
),
opt('-A', '--applied', action='store_true', short='Show the applied patches only',),
opt(
'-U',
'--unapplied',
action='store_true',
short='Show the unapplied patches only',
),
opt('-H', '--hidden', action='store_true', short='Show the hidden patches only',),
opt(
'-m',
'--missing',
metavar='BRANCH',
args=['stg_branches'],
short='Show patches in BRANCH missing in current',
),
opt(
'-c',
'--count',
action='store_true',
short='Print the number of patches in the series',
),
opt(
'-d',
'--description',
action='store_true',
short='Show a short description for each patch',
),
opt('--author', action='store_true', short='Show the author name for each patch',),
opt(
'-e',
'--empty',
action='store_true',
short='Check whether patches are empty',
long="""
Before the +++, +>+, +-+, and +!+ prefixes, print a column
that contains either +0+ (for empty patches) or a space (for
non-empty patches).""",
),
opt(
'--showbranch',
action='store_true',
short='Append the branch name to the listed patches',
),
opt(
'--noprefix', action='store_true', short='Do not show the patch status prefix',
),
opt(
'-s',
'--short',
action='store_true',
short='List just the patches around the topmost patch',
),
]
directory = DirectoryHasRepository()
def __get_description(stack, patch):
"""Extract and return a patch's short description
"""
cd = stack.patches.get(patch).commit.data
return cd.message_str.strip().split('\n', 1)[0].rstrip()
def __get_author(stack, patch):
"""Extract and return a patch's short description
"""
cd = stack.patches.get(patch).commit.data
return cd.author.name
def __render_text(text, effects):
_effects = {
'none': 0,
'bright': 1,
'dim': 2,
'black_foreground': 30,
'red_foreground': 31,
'green_foreground': 32,
'yellow_foreground': 33,
'blue_foreground': 34,
'magenta_foreground': 35,
'cyan_foreground': 36,
'white_foreground': 37,
'black_background': 40,
'red_background': 41,
'green_background': 42,
'yellow_background': 44,
'blue_background': 44,
'magenta_background': 45,
'cyan_background': 46,
'white_background': 47,
}
start = [str(_effects[e]) for e in effects.split() if e in _effects]
start = '\033[' + ';'.join(start) + 'm'
stop = '\033[' + str(_effects['none']) + 'm'
return ''.join([start, text, stop])
def __print_patch(stack, patch, branch_str, prefix, length, options, effects):
"""Print a patch name, description and various markers.
"""
if options.noprefix:
prefix = ''
elif options.empty:
if stack.patches.get(patch).is_empty():
prefix = '0' + prefix
else:
prefix = ' ' + prefix
patch_str = branch_str + patch
if options.description or options.author:
patch_str = patch_str.ljust(length)
if options.description:
output = prefix + patch_str + ' # ' + __get_description(stack, patch)
elif options.author:
output = prefix + patch_str + ' # ' + __get_author(stack, patch)
else:
output = prefix + patch_str
if not effects or not out.isatty:
out.stdout(output)
else:
out.stdout(__render_text(output, effects))
def func(parser, options, args):
"""Show the patch series
"""
if options.all and options.short:
raise CmdException('combining --all and --short is meaningless')
stack = directory.repository.get_stack(options.branch)
if options.missing:
cmp_stack = stack
stack = directory.repository.get_stack(options.missing)
# current series patches
applied = unapplied = hidden = ()
if options.applied or options.unapplied or options.hidden:
if options.all:
raise CmdException('--all cannot be used with --applied/unapplied/hidden')
if options.applied:
applied = stack.patchorder.applied
if options.unapplied:
unapplied = stack.patchorder.unapplied
if options.hidden:
hidden = stack.patchorder.hidden
elif options.all:
applied = stack.patchorder.applied
unapplied = stack.patchorder.unapplied
hidden = stack.patchorder.hidden
else:
applied = stack.patchorder.applied
unapplied = stack.patchorder.unapplied
if options.missing:
cmp_patches = cmp_stack.patchorder.all
else:
cmp_patches = ()
# the filtering range covers the whole series
if args:
show_patches = parse_patches(args, applied + unapplied + hidden, len(applied))
else:
show_patches = applied + unapplied + hidden
# missing filtering
show_patches = [p for p in show_patches if p not in cmp_patches]
# filter the patches
applied = [p for p in applied if p in show_patches]
unapplied = [p for p in unapplied if p in show_patches]
hidden = [p for p in hidden if p in show_patches]
if options.short:
nr = int(config.get('stgit.shortnr'))
if len(applied) > nr:
applied = applied[-(nr + 1) :]
n = len(unapplied)
if n > nr:
unapplied = unapplied[:nr]
elif n < nr:
hidden = hidden[: nr - n]
patches = applied + unapplied + hidden
if options.count:
out.stdout(len(patches))
return
if not patches:
return
if options.showbranch:
branch_str = stack.name + ':'
else:
branch_str = ''
max_len = len(branch_str) + max(len(p) for p in patches)
if applied:
for p in applied[:-1]:
__print_patch(
stack,
p,
branch_str,
'+ ',
max_len,
options,
config.get("stgit.color.applied"),
)
__print_patch(
stack,
applied[-1],
branch_str,
'> ',
max_len,
options,
config.get("stgit.color.current"),
)
for p in unapplied:
__print_patch(
stack,
p,
branch_str,
'- ',
max_len,
options,
config.get("stgit.color.unapplied"),
)
for p in hidden:
__print_patch(
stack,
p,
branch_str,
'! ',
max_len,
options,
config.get("stgit.color.hidden"),
)
|
bertrandvidal/stuff | refs/heads/master | djangoprojects/django_rest_framework/tutorial/snippets/permissions.py | 21 | from rest_framework import permissions
class IsOwnerOrReadOnly(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_object_permission(self, request, view, obj):
# Read permissions are allowed to any request,
# so we'll always allow GET, HEAD or OPTIONS requests.
if request.method in permissions.SAFE_METHODS:
return True
# Write permissions are only allowed to the owner of the snippet.
return obj.owner == request.user
|
jmwright/cadquery-x | refs/heads/master | gui/libs/future/backports/_markupbase.py | 84 | """Shared support for scanning document type declarations in HTML and XHTML.
Backported for python-future from Python 3.3. Reason: ParserBase is an
old-style class in the Python 2.7 source of markupbase.py, which I suspect
might be the cause of sporadic unit-test failures on travis-ci.org with
test_htmlparser.py. The test failures look like this:
======================================================================
ERROR: test_attr_entity_replacement (future.tests.test_htmlparser.AttributesStrictTestCase)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/travis/build/edschofield/python-future/future/tests/test_htmlparser.py", line 661, in test_attr_entity_replacement
[("starttag", "a", [("b", "&><\"'")])])
File "/home/travis/build/edschofield/python-future/future/tests/test_htmlparser.py", line 93, in _run_check
collector = self.get_collector()
File "/home/travis/build/edschofield/python-future/future/tests/test_htmlparser.py", line 617, in get_collector
return EventCollector(strict=True)
File "/home/travis/build/edschofield/python-future/future/tests/test_htmlparser.py", line 27, in __init__
html.parser.HTMLParser.__init__(self, *args, **kw)
File "/home/travis/build/edschofield/python-future/future/backports/html/parser.py", line 135, in __init__
self.reset()
File "/home/travis/build/edschofield/python-future/future/backports/html/parser.py", line 143, in reset
_markupbase.ParserBase.reset(self)
TypeError: unbound method reset() must be called with ParserBase instance as first argument (got EventCollector instance instead)
This module is used as a foundation for the html.parser module. It has no
documented public API and should not be used directly.
"""
import re
_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9]*\s*').match
_declstringlit_match = re.compile(r'(\'[^\']*\'|"[^"]*")\s*').match
_commentclose = re.compile(r'--\s*>')
_markedsectionclose = re.compile(r']\s*]\s*>')
# An analysis of the MS-Word extensions is available at
# http://www.planetpublish.com/xmlarena/xap/Thursday/WordtoXML.pdf
_msmarkedsectionclose = re.compile(r']\s*>')
del re
class ParserBase(object):
"""Parser base class which provides some common support methods used
by the SGML/HTML and XHTML parsers."""
def __init__(self):
if self.__class__ is ParserBase:
raise RuntimeError(
"_markupbase.ParserBase must be subclassed")
def error(self, message):
raise NotImplementedError(
"subclasses of ParserBase must override error()")
def reset(self):
self.lineno = 1
self.offset = 0
def getpos(self):
"""Return current line number and offset."""
return self.lineno, self.offset
# Internal -- update line number and offset. This should be
# called for each piece of data exactly once, in order -- in other
# words the concatenation of all the input strings to this
# function should be exactly the entire input.
def updatepos(self, i, j):
if i >= j:
return j
rawdata = self.rawdata
nlines = rawdata.count("\n", i, j)
if nlines:
self.lineno = self.lineno + nlines
pos = rawdata.rindex("\n", i, j) # Should not fail
self.offset = j-(pos+1)
else:
self.offset = self.offset + j-i
return j
_decl_otherchars = ''
# Internal -- parse declaration (for use by subclasses).
def parse_declaration(self, i):
# This is some sort of declaration; in "HTML as
# deployed," this should only be the document type
# declaration ("<!DOCTYPE html...>").
# ISO 8879:1986, however, has more complex
# declaration syntax for elements in <!...>, including:
# --comment--
# [marked section]
# name in the following list: ENTITY, DOCTYPE, ELEMENT,
# ATTLIST, NOTATION, SHORTREF, USEMAP,
# LINKTYPE, LINK, IDLINK, USELINK, SYSTEM
rawdata = self.rawdata
j = i + 2
assert rawdata[i:j] == "<!", "unexpected call to parse_declaration"
if rawdata[j:j+1] == ">":
# the empty comment <!>
return j + 1
if rawdata[j:j+1] in ("-", ""):
# Start of comment followed by buffer boundary,
# or just a buffer boundary.
return -1
# A simple, practical version could look like: ((name|stringlit) S*) + '>'
n = len(rawdata)
if rawdata[j:j+2] == '--': #comment
# Locate --.*-- as the body of the comment
return self.parse_comment(i)
elif rawdata[j] == '[': #marked section
# Locate [statusWord [...arbitrary SGML...]] as the body of the marked section
# Where statusWord is one of TEMP, CDATA, IGNORE, INCLUDE, RCDATA
# Note that this is extended by Microsoft Office "Save as Web" function
# to include [if...] and [endif].
return self.parse_marked_section(i)
else: #all other declaration elements
decltype, j = self._scan_name(j, i)
if j < 0:
return j
if decltype == "doctype":
self._decl_otherchars = ''
while j < n:
c = rawdata[j]
if c == ">":
# end of declaration syntax
data = rawdata[i+2:j]
if decltype == "doctype":
self.handle_decl(data)
else:
# According to the HTML5 specs sections "8.2.4.44 Bogus
# comment state" and "8.2.4.45 Markup declaration open
# state", a comment token should be emitted.
# Calling unknown_decl provides more flexibility though.
self.unknown_decl(data)
return j + 1
if c in "\"'":
m = _declstringlit_match(rawdata, j)
if not m:
return -1 # incomplete
j = m.end()
elif c in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ":
name, j = self._scan_name(j, i)
elif c in self._decl_otherchars:
j = j + 1
elif c == "[":
# this could be handled in a separate doctype parser
if decltype == "doctype":
j = self._parse_doctype_subset(j + 1, i)
elif decltype in set(["attlist", "linktype", "link", "element"]):
# must tolerate []'d groups in a content model in an element declaration
# also in data attribute specifications of attlist declaration
# also link type declaration subsets in linktype declarations
# also link attribute specification lists in link declarations
self.error("unsupported '[' char in %s declaration" % decltype)
else:
self.error("unexpected '[' char in declaration")
else:
self.error(
"unexpected %r char in declaration" % rawdata[j])
if j < 0:
return j
return -1 # incomplete
# Internal -- parse a marked section
# Override this to handle MS-word extension syntax <![if word]>content<![endif]>
def parse_marked_section(self, i, report=1):
rawdata= self.rawdata
assert rawdata[i:i+3] == '<![', "unexpected call to parse_marked_section()"
sectName, j = self._scan_name( i+3, i )
if j < 0:
return j
if sectName in set(["temp", "cdata", "ignore", "include", "rcdata"]):
# look for standard ]]> ending
match= _markedsectionclose.search(rawdata, i+3)
elif sectName in set(["if", "else", "endif"]):
# look for MS Office ]> ending
match= _msmarkedsectionclose.search(rawdata, i+3)
else:
self.error('unknown status keyword %r in marked section' % rawdata[i+3:j])
if not match:
return -1
if report:
j = match.start(0)
self.unknown_decl(rawdata[i+3: j])
return match.end(0)
# Internal -- parse comment, return length or -1 if not terminated
def parse_comment(self, i, report=1):
rawdata = self.rawdata
if rawdata[i:i+4] != '<!--':
self.error('unexpected call to parse_comment()')
match = _commentclose.search(rawdata, i+4)
if not match:
return -1
if report:
j = match.start(0)
self.handle_comment(rawdata[i+4: j])
return match.end(0)
# Internal -- scan past the internal subset in a <!DOCTYPE declaration,
# returning the index just past any whitespace following the trailing ']'.
def _parse_doctype_subset(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
j = i
while j < n:
c = rawdata[j]
if c == "<":
s = rawdata[j:j+2]
if s == "<":
# end of buffer; incomplete
return -1
if s != "<!":
self.updatepos(declstartpos, j + 1)
self.error("unexpected char in internal subset (in %r)" % s)
if (j + 2) == n:
# end of buffer; incomplete
return -1
if (j + 4) > n:
# end of buffer; incomplete
return -1
if rawdata[j:j+4] == "<!--":
j = self.parse_comment(j, report=0)
if j < 0:
return j
continue
name, j = self._scan_name(j + 2, declstartpos)
if j == -1:
return -1
if name not in set(["attlist", "element", "entity", "notation"]):
self.updatepos(declstartpos, j + 2)
self.error(
"unknown declaration %r in internal subset" % name)
# handle the individual names
meth = getattr(self, "_parse_doctype_" + name)
j = meth(j, declstartpos)
if j < 0:
return j
elif c == "%":
# parameter entity reference
if (j + 1) == n:
# end of buffer; incomplete
return -1
s, j = self._scan_name(j + 1, declstartpos)
if j < 0:
return j
if rawdata[j] == ";":
j = j + 1
elif c == "]":
j = j + 1
while j < n and rawdata[j].isspace():
j = j + 1
if j < n:
if rawdata[j] == ">":
return j
self.updatepos(declstartpos, j)
self.error("unexpected char after internal subset")
else:
return -1
elif c.isspace():
j = j + 1
else:
self.updatepos(declstartpos, j)
self.error("unexpected char %r in internal subset" % c)
# end of buffer reached
return -1
# Internal -- scan past <!ELEMENT declarations
def _parse_doctype_element(self, i, declstartpos):
name, j = self._scan_name(i, declstartpos)
if j == -1:
return -1
# style content model; just skip until '>'
rawdata = self.rawdata
if '>' in rawdata[j:]:
return rawdata.find(">", j) + 1
return -1
# Internal -- scan past <!ATTLIST declarations
def _parse_doctype_attlist(self, i, declstartpos):
rawdata = self.rawdata
name, j = self._scan_name(i, declstartpos)
c = rawdata[j:j+1]
if c == "":
return -1
if c == ">":
return j + 1
while 1:
# scan a series of attribute descriptions; simplified:
# name type [value] [#constraint]
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
c = rawdata[j:j+1]
if c == "":
return -1
if c == "(":
# an enumerated type; look for ')'
if ")" in rawdata[j:]:
j = rawdata.find(")", j) + 1
else:
return -1
while rawdata[j:j+1].isspace():
j = j + 1
if not rawdata[j:]:
# end of buffer, incomplete
return -1
else:
name, j = self._scan_name(j, declstartpos)
c = rawdata[j:j+1]
if not c:
return -1
if c in "'\"":
m = _declstringlit_match(rawdata, j)
if m:
j = m.end()
else:
return -1
c = rawdata[j:j+1]
if not c:
return -1
if c == "#":
if rawdata[j:] == "#":
# end of buffer
return -1
name, j = self._scan_name(j + 1, declstartpos)
if j < 0:
return j
c = rawdata[j:j+1]
if not c:
return -1
if c == '>':
# all done
return j + 1
# Internal -- scan past <!NOTATION declarations
def _parse_doctype_notation(self, i, declstartpos):
name, j = self._scan_name(i, declstartpos)
if j < 0:
return j
rawdata = self.rawdata
while 1:
c = rawdata[j:j+1]
if not c:
# end of buffer; incomplete
return -1
if c == '>':
return j + 1
if c in "'\"":
m = _declstringlit_match(rawdata, j)
if not m:
return -1
j = m.end()
else:
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
# Internal -- scan past <!ENTITY declarations
def _parse_doctype_entity(self, i, declstartpos):
rawdata = self.rawdata
if rawdata[i:i+1] == "%":
j = i + 1
while 1:
c = rawdata[j:j+1]
if not c:
return -1
if c.isspace():
j = j + 1
else:
break
else:
j = i
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
while 1:
c = self.rawdata[j:j+1]
if not c:
return -1
if c in "'\"":
m = _declstringlit_match(rawdata, j)
if m:
j = m.end()
else:
return -1 # incomplete
elif c == ">":
return j + 1
else:
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
# Internal -- scan a name token and the new position and the token, or
# return -1 if we've reached the end of the buffer.
def _scan_name(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
if i == n:
return None, -1
m = _declname_match(rawdata, i)
if m:
s = m.group()
name = s.strip()
if (i + len(s)) == n:
return None, -1 # end of buffer
return name.lower(), m.end()
else:
self.updatepos(declstartpos, i)
self.error("expected name token at %r"
% rawdata[declstartpos:declstartpos+20])
# To be overridden -- handlers for unknown objects
def unknown_decl(self, data):
pass
|
gonboy/sl4a | refs/heads/master | python/src/Lib/plat-mac/lib-scriptpackages/StdSuites/Macintosh_Connectivity_Clas.py | 81 | """Suite Macintosh Connectivity Classes: Classes relating to Apple Macintosh personal computer connectivity
Level 1, version 1
Generated from /Volumes/Sap/System Folder/Extensions/AppleScript
AETE/AEUT resource version 1/0, language 0, script 0
"""
import aetools
import MacOS
_code = 'macc'
class Macintosh_Connectivity_Clas_Events:
pass
class ADB_address(aetools.ComponentItem):
"""ADB address - Addresses a device connected via Apple Desktop Bus """
want = 'cadb'
class _Prop__3c_inheritance_3e_(aetools.NProperty):
"""<inheritance> - inherits some of its properties from this class """
which = 'c@#^'
want = 'cadr'
class _Prop_ID(aetools.NProperty):
"""ID - the Apple Desktop Bus device ID """
which = 'ID '
want = 'shor'
ADB_addresses = ADB_address
class address_specification(aetools.ComponentItem):
"""address specification - Unique designation of a device or service connected to this computer """
want = 'cadr'
class _Prop_conduit(aetools.NProperty):
"""conduit - How the addressee is physically connected """
which = 'pcon'
want = 'econ'
class _Prop_properties(aetools.NProperty):
"""properties - property that allows getting and setting of multiple properties """
which = 'pALL'
want = 'reco'
class _Prop_protocol(aetools.NProperty):
"""protocol - How to talk to this addressee """
which = 'pprt'
want = 'epro'
address_specifications = address_specification
class AppleTalk_address(aetools.ComponentItem):
"""AppleTalk address - Addresses a device or service connected via the AppleTalk protocol """
want = 'cat '
class _Prop_AppleTalk_machine(aetools.NProperty):
"""AppleTalk machine - the machine name part of the address """
which = 'patm'
want = 'TEXT'
class _Prop_AppleTalk_type(aetools.NProperty):
"""AppleTalk type - the type part of the AppleTalk address """
which = 'patt'
want = 'TEXT'
class _Prop_AppleTalk_zone(aetools.NProperty):
"""AppleTalk zone - the zone part of the address """
which = 'patz'
want = 'TEXT'
AppleTalk_addresses = AppleTalk_address
class bus_slot(aetools.ComponentItem):
"""bus slot - Addresses a PC, PCI, or NuBus card """
want = 'cbus'
bus_slots = bus_slot
class device_specification(aetools.ComponentItem):
"""device specification - A device connected to a computer """
want = 'cdev'
class _Prop_device_address(aetools.NProperty):
"""device address - the address of the device """
which = 'pdva'
want = 'cadr'
class _Prop_device_type(aetools.NProperty):
"""device type - the kind of device """
which = 'pdvt'
want = 'edvt'
device_specifications = device_specification
class Ethernet_address(aetools.ComponentItem):
"""Ethernet address - Addresses a device by its Ethernet address """
want = 'cen '
Ethernet_addresses = Ethernet_address
class FireWire_address(aetools.ComponentItem):
"""FireWire address - Addresses a device on the FireWire bus """
want = 'cfw '
FireWire_addresses = FireWire_address
class IP_address(aetools.ComponentItem):
"""IP address - Addresses a device or service via the Internet Protocol (IP) """
want = 'cip '
class _Prop_DNS_form(aetools.NProperty):
"""DNS form - the address in the form "apple.com" """
which = 'pdns'
want = 'TEXT'
class _Prop_port(aetools.NProperty):
"""port - the port number of the service or client being addressed """
which = 'ppor'
want = 'TEXT'
IP_addresses = IP_address
class LocalTalk_address(aetools.ComponentItem):
"""LocalTalk address - Addresses a device by its LocalTalk address """
want = 'clt '
class _Prop_network(aetools.NProperty):
"""network - the LocalTalk network number """
which = 'pnet'
want = 'shor'
class _Prop_node(aetools.NProperty):
"""node - the LocalTalk node number """
which = 'pnod'
want = 'shor'
class _Prop_socket(aetools.NProperty):
"""socket - the LocalTalk socket number """
which = 'psoc'
want = 'shor'
LocalTalk_addresses = LocalTalk_address
class SCSI_address(aetools.ComponentItem):
"""SCSI address - Addresses a SCSI device """
want = 'cscs'
class _Prop_LUN(aetools.NProperty):
"""LUN - the SCSI logical unit number """
which = 'pslu'
want = 'shor'
class _Prop_SCSI_bus(aetools.NProperty):
"""SCSI bus - the SCSI bus number """
which = 'pscb'
want = 'shor'
SCSI_addresses = SCSI_address
class Token_Ring_address(aetools.ComponentItem):
"""Token Ring address - Addresses a device or service via the Token Ring protocol """
want = 'ctok'
Token_Ring_addresses = Token_Ring_address
class USB_address(aetools.ComponentItem):
"""USB address - Addresses a device on the Universal Serial Bus """
want = 'cusb'
class _Prop_name(aetools.NProperty):
"""name - the USB device name """
which = 'pnam'
want = 'TEXT'
USB_Addresses = USB_address
ADB_address._superclassnames = ['address_specification']
ADB_address._privpropdict = {
'ID' : _Prop_ID,
'_3c_inheritance_3e_' : _Prop__3c_inheritance_3e_,
}
ADB_address._privelemdict = {
}
address_specification._superclassnames = []
address_specification._privpropdict = {
'conduit' : _Prop_conduit,
'properties' : _Prop_properties,
'protocol' : _Prop_protocol,
}
address_specification._privelemdict = {
}
AppleTalk_address._superclassnames = ['address_specification']
AppleTalk_address._privpropdict = {
'AppleTalk_machine' : _Prop_AppleTalk_machine,
'AppleTalk_type' : _Prop_AppleTalk_type,
'AppleTalk_zone' : _Prop_AppleTalk_zone,
'_3c_inheritance_3e_' : _Prop__3c_inheritance_3e_,
}
AppleTalk_address._privelemdict = {
}
bus_slot._superclassnames = ['address_specification']
bus_slot._privpropdict = {
'ID' : _Prop_ID,
'_3c_inheritance_3e_' : _Prop__3c_inheritance_3e_,
}
bus_slot._privelemdict = {
}
device_specification._superclassnames = []
device_specification._privpropdict = {
'device_address' : _Prop_device_address,
'device_type' : _Prop_device_type,
'properties' : _Prop_properties,
}
device_specification._privelemdict = {
}
Ethernet_address._superclassnames = ['address_specification']
Ethernet_address._privpropdict = {
'ID' : _Prop_ID,
'_3c_inheritance_3e_' : _Prop__3c_inheritance_3e_,
}
Ethernet_address._privelemdict = {
}
FireWire_address._superclassnames = ['address_specification']
FireWire_address._privpropdict = {
'ID' : _Prop_ID,
'_3c_inheritance_3e_' : _Prop__3c_inheritance_3e_,
}
FireWire_address._privelemdict = {
}
IP_address._superclassnames = ['address_specification']
IP_address._privpropdict = {
'DNS_form' : _Prop_DNS_form,
'ID' : _Prop_ID,
'_3c_inheritance_3e_' : _Prop__3c_inheritance_3e_,
'port' : _Prop_port,
}
IP_address._privelemdict = {
}
LocalTalk_address._superclassnames = ['address_specification']
LocalTalk_address._privpropdict = {
'_3c_inheritance_3e_' : _Prop__3c_inheritance_3e_,
'network' : _Prop_network,
'node' : _Prop_node,
'socket' : _Prop_socket,
}
LocalTalk_address._privelemdict = {
}
SCSI_address._superclassnames = ['address_specification']
SCSI_address._privpropdict = {
'ID' : _Prop_ID,
'LUN' : _Prop_LUN,
'SCSI_bus' : _Prop_SCSI_bus,
'_3c_inheritance_3e_' : _Prop__3c_inheritance_3e_,
}
SCSI_address._privelemdict = {
}
Token_Ring_address._superclassnames = ['address_specification']
Token_Ring_address._privpropdict = {
'ID' : _Prop_ID,
'_3c_inheritance_3e_' : _Prop__3c_inheritance_3e_,
}
Token_Ring_address._privelemdict = {
}
USB_address._superclassnames = ['address_specification']
USB_address._privpropdict = {
'_3c_inheritance_3e_' : _Prop__3c_inheritance_3e_,
'name' : _Prop_name,
}
USB_address._privelemdict = {
}
_Enum_econ = {
'ADB' : 'eadb', #
'printer_port' : 'ecpp', #
'modem_port' : 'ecmp', #
'modem_printer_port' : 'empp', #
'LocalTalk' : 'eclt', #
'Ethernet' : 'ecen', #
'Token_Ring' : 'etok', #
'SCSI' : 'ecsc', #
'USB' : 'ecus', #
'FireWire' : 'ecfw', #
'infrared' : 'ecir', #
'PC_card' : 'ecpc', #
'PCI_bus' : 'ecpi', #
'NuBus' : 'enub', #
'PDS_slot' : 'ecpd', #
'Comm_slot' : 'eccm', #
'monitor_out' : 'ecmn', #
'video_out' : 'ecvo', #
'video_in' : 'ecvi', #
'audio_out' : 'ecao', #
'audio_line_in' : 'ecai', #
'audio_line_out' : 'ecal', #
'microphone' : 'ecmi', #
}
_Enum_edvt = {
'hard_disk_drive' : 'ehd ', #
'floppy_disk_drive' : 'efd ', #
'CD_ROM_drive' : 'ecd ', #
'DVD_drive' : 'edvd', #
'storage_device' : 'edst', #
'keyboard' : 'ekbd', #
'mouse' : 'emou', #
'trackball' : 'etrk', #
'trackpad' : 'edtp', #
'pointing_device' : 'edpd', #
'video_monitor' : 'edvm', #
'LCD_display' : 'edlc', #
'display' : 'edds', #
'modem' : 'edmm', #
'PC_card' : 'ecpc', #
'PCI_card' : 'edpi', #
'NuBus_card' : 'ednb', #
'printer' : 'edpr', #
'speakers' : 'edsp', #
'microphone' : 'ecmi', #
}
_Enum_epro = {
'serial' : 'epsr', #
'AppleTalk' : 'epat', #
'IP' : 'epip', #
'SCSI' : 'ecsc', #
'ADB' : 'eadb', #
'FireWire' : 'ecfw', #
'IrDA' : 'epir', #
'IRTalk' : 'epit', #
'USB' : 'ecus', #
'PC_card' : 'ecpc', #
'PCI_bus' : 'ecpi', #
'NuBus' : 'enub', #
'bus' : 'ebus', #
'Macintosh_video' : 'epmv', #
'SVGA' : 'epsg', #
'S_video' : 'epsv', #
'analog_audio' : 'epau', #
'digital_audio' : 'epda', #
'PostScript' : 'epps', #
}
#
# Indices of types declared in this module
#
_classdeclarations = {
'cadb' : ADB_address,
'cadr' : address_specification,
'cat ' : AppleTalk_address,
'cbus' : bus_slot,
'cdev' : device_specification,
'cen ' : Ethernet_address,
'cfw ' : FireWire_address,
'cip ' : IP_address,
'clt ' : LocalTalk_address,
'cscs' : SCSI_address,
'ctok' : Token_Ring_address,
'cusb' : USB_address,
}
_propdeclarations = {
'ID ' : _Prop_ID,
'c@#^' : _Prop__3c_inheritance_3e_,
'pALL' : _Prop_properties,
'patm' : _Prop_AppleTalk_machine,
'patt' : _Prop_AppleTalk_type,
'patz' : _Prop_AppleTalk_zone,
'pcon' : _Prop_conduit,
'pdns' : _Prop_DNS_form,
'pdva' : _Prop_device_address,
'pdvt' : _Prop_device_type,
'pnam' : _Prop_name,
'pnet' : _Prop_network,
'pnod' : _Prop_node,
'ppor' : _Prop_port,
'pprt' : _Prop_protocol,
'pscb' : _Prop_SCSI_bus,
'pslu' : _Prop_LUN,
'psoc' : _Prop_socket,
}
_compdeclarations = {
}
_enumdeclarations = {
'econ' : _Enum_econ,
'edvt' : _Enum_edvt,
'epro' : _Enum_epro,
}
|
lordB8r/polls | refs/heads/master | ENV/lib/python2.7/site-packages/pip-1.3.1-py2.7.egg/pip/commands/search.py | 63 | import sys
import textwrap
import pkg_resources
import pip.download
from pip.basecommand import Command, SUCCESS
from pip.util import get_terminal_size
from pip.log import logger
from pip.backwardcompat import xmlrpclib, reduce, cmp
from pip.exceptions import CommandError
from pip.status_codes import NO_MATCHES_FOUND
from distutils.version import StrictVersion, LooseVersion
class SearchCommand(Command):
"""Search for PyPI packages whose name or summary contains <query>."""
name = 'search'
usage = """
%prog [options] <query>"""
summary = 'Search PyPI for packages.'
def __init__(self, *args, **kw):
super(SearchCommand, self).__init__(*args, **kw)
self.cmd_opts.add_option(
'--index',
dest='index',
metavar='URL',
default='https://pypi.python.org/pypi',
help='Base URL of Python Package Index (default %default)')
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
if not args:
raise CommandError('Missing required argument (search query).')
query = args
index_url = options.index
pypi_hits = self.search(query, index_url)
hits = transform_hits(pypi_hits)
terminal_width = None
if sys.stdout.isatty():
terminal_width = get_terminal_size()[0]
print_results(hits, terminal_width=terminal_width)
if pypi_hits:
return SUCCESS
return NO_MATCHES_FOUND
def search(self, query, index_url):
pypi = xmlrpclib.ServerProxy(index_url, pip.download.xmlrpclib_transport)
hits = pypi.search({'name': query, 'summary': query}, 'or')
return hits
def transform_hits(hits):
"""
The list from pypi is really a list of versions. We want a list of
packages with the list of versions stored inline. This converts the
list from pypi into one we can use.
"""
packages = {}
for hit in hits:
name = hit['name']
summary = hit['summary']
version = hit['version']
score = hit['_pypi_ordering']
if score is None:
score = 0
if name not in packages.keys():
packages[name] = {'name': name, 'summary': summary, 'versions': [version], 'score': score}
else:
packages[name]['versions'].append(version)
# if this is the highest version, replace summary and score
if version == highest_version(packages[name]['versions']):
packages[name]['summary'] = summary
packages[name]['score'] = score
# each record has a unique name now, so we will convert the dict into a list sorted by score
package_list = sorted(packages.values(), key=lambda x: x['score'], reverse=True)
return package_list
def print_results(hits, name_column_width=25, terminal_width=None):
installed_packages = [p.project_name for p in pkg_resources.working_set]
for hit in hits:
name = hit['name']
summary = hit['summary'] or ''
if terminal_width is not None:
# wrap and indent summary to fit terminal
summary = textwrap.wrap(summary, terminal_width - name_column_width - 5)
summary = ('\n' + ' ' * (name_column_width + 3)).join(summary)
line = '%s - %s' % (name.ljust(name_column_width), summary)
try:
logger.notify(line)
if name in installed_packages:
dist = pkg_resources.get_distribution(name)
logger.indent += 2
try:
latest = highest_version(hit['versions'])
if dist.version == latest:
logger.notify('INSTALLED: %s (latest)' % dist.version)
else:
logger.notify('INSTALLED: %s' % dist.version)
logger.notify('LATEST: %s' % latest)
finally:
logger.indent -= 2
except UnicodeEncodeError:
pass
def compare_versions(version1, version2):
try:
return cmp(StrictVersion(version1), StrictVersion(version2))
# in case of abnormal version number, fall back to LooseVersion
except ValueError:
pass
try:
return cmp(LooseVersion(version1), LooseVersion(version2))
except TypeError:
# certain LooseVersion comparions raise due to unorderable types,
# fallback to string comparison
return cmp([str(v) for v in LooseVersion(version1).version],
[str(v) for v in LooseVersion(version2).version])
def highest_version(versions):
return reduce((lambda v1, v2: compare_versions(v1, v2) == 1 and v1 or v2), versions)
|
LateNitePie/python-oauth2 | refs/heads/master | tests/test_oauth.py | 301 | # -*- coding: utf-8 -*-
"""
The MIT License
Copyright (c) 2009 Vic Fryzel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import sys
import os
import unittest
import oauth2 as oauth
import random
import time
import urllib
import urlparse
from types import ListType
import mock
import httplib2
# Fix for python2.5 compatibility
try:
from urlparse import parse_qs, parse_qsl
except ImportError:
from cgi import parse_qs, parse_qsl
sys.path[0:0] = [os.path.join(os.path.dirname(__file__), ".."),]
class TestError(unittest.TestCase):
def test_message(self):
try:
raise oauth.Error
except oauth.Error, e:
self.assertEqual(e.message, 'OAuth error occurred.')
msg = 'OMG THINGS BROKE!!!!'
try:
raise oauth.Error(msg)
except oauth.Error, e:
self.assertEqual(e.message, msg)
def test_str(self):
try:
raise oauth.Error
except oauth.Error, e:
self.assertEquals(str(e), 'OAuth error occurred.')
class TestGenerateFunctions(unittest.TestCase):
def test_build_auth_header(self):
header = oauth.build_authenticate_header()
self.assertEqual(header['WWW-Authenticate'], 'OAuth realm=""')
self.assertEqual(len(header), 1)
realm = 'http://example.myrealm.com/'
header = oauth.build_authenticate_header(realm)
self.assertEqual(header['WWW-Authenticate'], 'OAuth realm="%s"' %
realm)
self.assertEqual(len(header), 1)
def test_build_xoauth_string(self):
consumer = oauth.Consumer('consumer_token', 'consumer_secret')
token = oauth.Token('user_token', 'user_secret')
url = "https://mail.google.com/mail/b/joe@example.com/imap/"
xoauth_string = oauth.build_xoauth_string(url, consumer, token)
method, oauth_url, oauth_string = xoauth_string.split(' ')
self.assertEqual("GET", method)
self.assertEqual(url, oauth_url)
returned = {}
parts = oauth_string.split(',')
for part in parts:
var, val = part.split('=')
returned[var] = val.strip('"')
self.assertEquals('HMAC-SHA1', returned['oauth_signature_method'])
self.assertEquals('user_token', returned['oauth_token'])
self.assertEquals('consumer_token', returned['oauth_consumer_key'])
self.assertTrue('oauth_signature' in returned, 'oauth_signature')
def test_escape(self):
string = 'http://whatever.com/~someuser/?test=test&other=other'
self.assert_('~' in oauth.escape(string))
string = '../../../../../../../etc/passwd'
self.assert_('../' not in oauth.escape(string))
def test_gen_nonce(self):
nonce = oauth.generate_nonce()
self.assertEqual(len(nonce), 8)
nonce = oauth.generate_nonce(20)
self.assertEqual(len(nonce), 20)
def test_gen_verifier(self):
verifier = oauth.generate_verifier()
self.assertEqual(len(verifier), 8)
verifier = oauth.generate_verifier(16)
self.assertEqual(len(verifier), 16)
def test_gen_timestamp(self):
exp = int(time.time())
now = oauth.generate_timestamp()
self.assertEqual(exp, now)
class TestConsumer(unittest.TestCase):
def setUp(self):
self.key = 'my-key'
self.secret = 'my-secret'
self.consumer = oauth.Consumer(key=self.key, secret=self.secret)
def test_init(self):
self.assertEqual(self.consumer.key, self.key)
self.assertEqual(self.consumer.secret, self.secret)
def test_basic(self):
self.assertRaises(ValueError, lambda: oauth.Consumer(None, None))
self.assertRaises(ValueError, lambda: oauth.Consumer('asf', None))
self.assertRaises(ValueError, lambda: oauth.Consumer(None, 'dasf'))
def test_str(self):
res = dict(parse_qsl(str(self.consumer)))
self.assertTrue('oauth_consumer_key' in res)
self.assertTrue('oauth_consumer_secret' in res)
self.assertEquals(res['oauth_consumer_key'], self.consumer.key)
self.assertEquals(res['oauth_consumer_secret'], self.consumer.secret)
class TestToken(unittest.TestCase):
def setUp(self):
self.key = 'my-key'
self.secret = 'my-secret'
self.token = oauth.Token(self.key, self.secret)
def test_basic(self):
self.assertRaises(ValueError, lambda: oauth.Token(None, None))
self.assertRaises(ValueError, lambda: oauth.Token('asf', None))
self.assertRaises(ValueError, lambda: oauth.Token(None, 'dasf'))
def test_init(self):
self.assertEqual(self.token.key, self.key)
self.assertEqual(self.token.secret, self.secret)
self.assertEqual(self.token.callback, None)
self.assertEqual(self.token.callback_confirmed, None)
self.assertEqual(self.token.verifier, None)
def test_set_callback(self):
self.assertEqual(self.token.callback, None)
self.assertEqual(self.token.callback_confirmed, None)
cb = 'http://www.example.com/my-callback'
self.token.set_callback(cb)
self.assertEqual(self.token.callback, cb)
self.assertEqual(self.token.callback_confirmed, 'true')
self.token.set_callback(None)
self.assertEqual(self.token.callback, None)
# TODO: The following test should probably not pass, but it does
# To fix this, check for None and unset 'true' in set_callback
# Additionally, should a confirmation truly be done of the callback?
self.assertEqual(self.token.callback_confirmed, 'true')
def test_set_verifier(self):
self.assertEqual(self.token.verifier, None)
v = oauth.generate_verifier()
self.token.set_verifier(v)
self.assertEqual(self.token.verifier, v)
self.token.set_verifier()
self.assertNotEqual(self.token.verifier, v)
self.token.set_verifier('')
self.assertEqual(self.token.verifier, '')
def test_get_callback_url(self):
self.assertEqual(self.token.get_callback_url(), None)
self.token.set_verifier()
self.assertEqual(self.token.get_callback_url(), None)
cb = 'http://www.example.com/my-callback?save=1&return=true'
v = oauth.generate_verifier()
self.token.set_callback(cb)
self.token.set_verifier(v)
url = self.token.get_callback_url()
verifier_str = '&oauth_verifier=%s' % v
self.assertEqual(url, '%s%s' % (cb, verifier_str))
cb = 'http://www.example.com/my-callback-no-query'
v = oauth.generate_verifier()
self.token.set_callback(cb)
self.token.set_verifier(v)
url = self.token.get_callback_url()
verifier_str = '?oauth_verifier=%s' % v
self.assertEqual(url, '%s%s' % (cb, verifier_str))
def test_to_string(self):
string = 'oauth_token_secret=%s&oauth_token=%s' % (self.secret,
self.key)
self.assertEqual(self.token.to_string(), string)
self.token.set_callback('http://www.example.com/my-callback')
string += '&oauth_callback_confirmed=true'
self.assertEqual(self.token.to_string(), string)
def _compare_tokens(self, new):
self.assertEqual(self.token.key, new.key)
self.assertEqual(self.token.secret, new.secret)
# TODO: What about copying the callback to the new token?
# self.assertEqual(self.token.callback, new.callback)
self.assertEqual(self.token.callback_confirmed,
new.callback_confirmed)
# TODO: What about copying the verifier to the new token?
# self.assertEqual(self.token.verifier, new.verifier)
def test_to_string(self):
tok = oauth.Token('tooken', 'seecret')
self.assertEqual(str(tok), 'oauth_token_secret=seecret&oauth_token=tooken')
def test_from_string(self):
self.assertRaises(ValueError, lambda: oauth.Token.from_string(''))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('blahblahblah'))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('blah=blah'))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('oauth_token_secret=asfdasf'))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('oauth_token_secret='))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('oauth_token=asfdasf'))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('oauth_token='))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('oauth_token=&oauth_token_secret='))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('oauth_token=tooken%26oauth_token_secret=seecret'))
string = self.token.to_string()
new = oauth.Token.from_string(string)
self._compare_tokens(new)
self.token.set_callback('http://www.example.com/my-callback')
string = self.token.to_string()
new = oauth.Token.from_string(string)
self._compare_tokens(new)
class ReallyEqualMixin:
def failUnlessReallyEqual(self, a, b, msg=None):
self.failUnlessEqual(a, b, msg=msg)
self.failUnlessEqual(type(a), type(b), msg="a :: %r, b :: %r, %r" % (a, b, msg))
class TestFuncs(unittest.TestCase):
def test_to_unicode(self):
self.failUnlessRaises(TypeError, oauth.to_unicode, '\xae')
self.failUnlessRaises(TypeError, oauth.to_unicode_optional_iterator, '\xae')
self.failUnlessRaises(TypeError, oauth.to_unicode_optional_iterator, ['\xae'])
self.failUnlessEqual(oauth.to_unicode(':-)'), u':-)')
self.failUnlessEqual(oauth.to_unicode(u'\u00ae'), u'\u00ae')
self.failUnlessEqual(oauth.to_unicode('\xc2\xae'), u'\u00ae')
self.failUnlessEqual(oauth.to_unicode_optional_iterator([':-)']), [u':-)'])
self.failUnlessEqual(oauth.to_unicode_optional_iterator([u'\u00ae']), [u'\u00ae'])
class TestRequest(unittest.TestCase, ReallyEqualMixin):
def test_setter(self):
url = "http://example.com"
method = "GET"
req = oauth.Request(method)
self.assertTrue(not hasattr(req, 'url') or req.url is None)
self.assertTrue(not hasattr(req, 'normalized_url') or req.normalized_url is None)
def test_deleter(self):
url = "http://example.com"
method = "GET"
req = oauth.Request(method, url)
try:
del req.url
url = req.url
self.fail("AttributeError should have been raised on empty url.")
except AttributeError:
pass
except Exception, e:
self.fail(str(e))
def test_url(self):
url1 = "http://example.com:80/foo.php"
url2 = "https://example.com:443/foo.php"
exp1 = "http://example.com/foo.php"
exp2 = "https://example.com/foo.php"
method = "GET"
req = oauth.Request(method, url1)
self.assertEquals(req.normalized_url, exp1)
self.assertEquals(req.url, url1)
req = oauth.Request(method, url2)
self.assertEquals(req.normalized_url, exp2)
self.assertEquals(req.url, url2)
def test_bad_url(self):
request = oauth.Request()
try:
request.url = "ftp://example.com"
self.fail("Invalid URL scheme was accepted.")
except ValueError:
pass
def test_unset_consumer_and_token(self):
consumer = oauth.Consumer('my_consumer_key', 'my_consumer_secret')
token = oauth.Token('my_key', 'my_secret')
request = oauth.Request("GET", "http://example.com/fetch.php")
request.sign_request(oauth.SignatureMethod_HMAC_SHA1(), consumer,
token)
self.assertEquals(consumer.key, request['oauth_consumer_key'])
self.assertEquals(token.key, request['oauth_token'])
def test_no_url_set(self):
consumer = oauth.Consumer('my_consumer_key', 'my_consumer_secret')
token = oauth.Token('my_key', 'my_secret')
request = oauth.Request()
try:
try:
request.sign_request(oauth.SignatureMethod_HMAC_SHA1(),
consumer, token)
except TypeError:
self.fail("Signature method didn't check for a normalized URL.")
except ValueError:
pass
def test_url_query(self):
url = "https://www.google.com/m8/feeds/contacts/default/full/?alt=json&max-contacts=10"
normalized_url = urlparse.urlunparse(urlparse.urlparse(url)[:3] + (None, None, None))
method = "GET"
req = oauth.Request(method, url)
self.assertEquals(req.url, url)
self.assertEquals(req.normalized_url, normalized_url)
def test_get_parameter(self):
url = "http://example.com"
method = "GET"
params = {'oauth_consumer' : 'asdf'}
req = oauth.Request(method, url, parameters=params)
self.assertEquals(req.get_parameter('oauth_consumer'), 'asdf')
self.assertRaises(oauth.Error, req.get_parameter, 'blah')
def test_get_nonoauth_parameters(self):
oauth_params = {
'oauth_consumer': 'asdfasdfasdf'
}
other_params = {
u'foo': u'baz',
u'bar': u'foo',
u'multi': [u'FOO',u'BAR'],
u'uni_utf8': u'\xae',
u'uni_unicode': u'\u00ae',
u'uni_unicode_2': u'åÅøØ',
}
params = oauth_params
params.update(other_params)
req = oauth.Request("GET", "http://example.com", params)
self.assertEquals(other_params, req.get_nonoauth_parameters())
def test_to_header(self):
realm = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'oauth_signature': "wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
}
req = oauth.Request("GET", realm, params)
header, value = req.to_header(realm).items()[0]
parts = value.split('OAuth ')
vars = parts[1].split(', ')
self.assertTrue(len(vars), (len(params) + 1))
res = {}
for v in vars:
var, val = v.split('=')
res[var] = urllib.unquote(val.strip('"'))
self.assertEquals(realm, res['realm'])
del res['realm']
self.assertTrue(len(res), len(params))
for key, val in res.items():
self.assertEquals(val, params.get(key))
def test_to_postdata_nonascii(self):
realm = "http://sp.example.com/"
params = {
'nonasciithing': u'q\xbfu\xe9 ,aasp u?..a.s',
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'oauth_signature': "wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
}
req = oauth.Request("GET", realm, params)
self.failUnlessReallyEqual(req.to_postdata(), 'nonasciithing=q%C2%BFu%C3%A9%20%2Caasp%20u%3F..a.s&oauth_nonce=4572616e48616d6d65724c61686176&oauth_timestamp=137131200&oauth_consumer_key=0685bd9184jfhq22&oauth_signature_method=HMAC-SHA1&oauth_version=1.0&oauth_token=ad180jjd733klru7&oauth_signature=wOJIO9A2W5mFwDgiDvZbTSMK%252FPY%253D')
def test_to_postdata(self):
realm = "http://sp.example.com/"
params = {
'multi': ['FOO','BAR'],
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'oauth_signature': "wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
}
req = oauth.Request("GET", realm, params)
flat = [('multi','FOO'),('multi','BAR')]
del params['multi']
flat.extend(params.items())
kf = lambda x: x[0]
self.assertEquals(sorted(flat, key=kf), sorted(parse_qsl(req.to_postdata()), key=kf))
def test_to_url(self):
url = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'oauth_signature': "wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
}
req = oauth.Request("GET", url, params)
exp = urlparse.urlparse("%s?%s" % (url, urllib.urlencode(params)))
res = urlparse.urlparse(req.to_url())
self.assertEquals(exp.scheme, res.scheme)
self.assertEquals(exp.netloc, res.netloc)
self.assertEquals(exp.path, res.path)
a = parse_qs(exp.query)
b = parse_qs(res.query)
self.assertEquals(a, b)
def test_to_url_with_query(self):
url = "https://www.google.com/m8/feeds/contacts/default/full/?alt=json&max-contacts=10"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'oauth_signature': "wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
}
req = oauth.Request("GET", url, params)
# Note: the url above already has query parameters, so append new ones with &
exp = urlparse.urlparse("%s&%s" % (url, urllib.urlencode(params)))
res = urlparse.urlparse(req.to_url())
self.assertEquals(exp.scheme, res.scheme)
self.assertEquals(exp.netloc, res.netloc)
self.assertEquals(exp.path, res.path)
a = parse_qs(exp.query)
b = parse_qs(res.query)
self.assertTrue('alt' in b)
self.assertTrue('max-contacts' in b)
self.assertEquals(b['alt'], ['json'])
self.assertEquals(b['max-contacts'], ['10'])
self.assertEquals(a, b)
def test_signature_base_string_nonascii_nonutf8(self):
consumer = oauth.Consumer('consumer_token', 'consumer_secret')
url = u'http://api.simplegeo.com:80/1.0/places/address.json?q=monkeys&category=animal&address=41+Decatur+St,+San+Francisc\u2766,+CA'
req = oauth.Request("GET", url)
self.failUnlessReallyEqual(req.normalized_url, u'http://api.simplegeo.com/1.0/places/address.json')
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), consumer, None)
self.failUnlessReallyEqual(req['oauth_signature'], 'WhufgeZKyYpKsI70GZaiDaYwl6g=')
url = 'http://api.simplegeo.com:80/1.0/places/address.json?q=monkeys&category=animal&address=41+Decatur+St,+San+Francisc\xe2\x9d\xa6,+CA'
req = oauth.Request("GET", url)
self.failUnlessReallyEqual(req.normalized_url, u'http://api.simplegeo.com/1.0/places/address.json')
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), consumer, None)
self.failUnlessReallyEqual(req['oauth_signature'], 'WhufgeZKyYpKsI70GZaiDaYwl6g=')
url = 'http://api.simplegeo.com:80/1.0/places/address.json?q=monkeys&category=animal&address=41+Decatur+St,+San+Francisc%E2%9D%A6,+CA'
req = oauth.Request("GET", url)
self.failUnlessReallyEqual(req.normalized_url, u'http://api.simplegeo.com/1.0/places/address.json')
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), consumer, None)
self.failUnlessReallyEqual(req['oauth_signature'], 'WhufgeZKyYpKsI70GZaiDaYwl6g=')
url = u'http://api.simplegeo.com:80/1.0/places/address.json?q=monkeys&category=animal&address=41+Decatur+St,+San+Francisc%E2%9D%A6,+CA'
req = oauth.Request("GET", url)
self.failUnlessReallyEqual(req.normalized_url, u'http://api.simplegeo.com/1.0/places/address.json')
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), consumer, None)
self.failUnlessReallyEqual(req['oauth_signature'], 'WhufgeZKyYpKsI70GZaiDaYwl6g=')
def test_signature_base_string_with_query(self):
url = "https://www.google.com/m8/feeds/contacts/default/full/?alt=json&max-contacts=10"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'oauth_signature': "wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
}
req = oauth.Request("GET", url, params)
self.assertEquals(req.normalized_url, 'https://www.google.com/m8/feeds/contacts/default/full/')
self.assertEquals(req.url, 'https://www.google.com/m8/feeds/contacts/default/full/?alt=json&max-contacts=10')
normalized_params = parse_qsl(req.get_normalized_parameters())
self.assertTrue(len(normalized_params), len(params) + 2)
normalized_params = dict(normalized_params)
for key, value in params.iteritems():
if key == 'oauth_signature':
continue
self.assertEquals(value, normalized_params[key])
self.assertEquals(normalized_params['alt'], 'json')
self.assertEquals(normalized_params['max-contacts'], '10')
def test_get_normalized_parameters_empty(self):
url = "http://sp.example.com/?empty="
req = oauth.Request("GET", url)
res = req.get_normalized_parameters()
expected='empty='
self.assertEquals(expected, res)
def test_get_normalized_parameters_duplicate(self):
url = "http://example.com/v2/search/videos?oauth_nonce=79815175&oauth_timestamp=1295397962&oauth_consumer_key=mykey&oauth_signature_method=HMAC-SHA1&q=car&oauth_version=1.0&offset=10&oauth_signature=spWLI%2FGQjid7sQVd5%2FarahRxzJg%3D"
req = oauth.Request("GET", url)
res = req.get_normalized_parameters()
expected='oauth_consumer_key=mykey&oauth_nonce=79815175&oauth_signature_method=HMAC-SHA1&oauth_timestamp=1295397962&oauth_version=1.0&offset=10&q=car'
self.assertEquals(expected, res)
def test_get_normalized_parameters_from_url(self):
# example copied from
# https://github.com/ciaranj/node-oauth/blob/master/tests/oauth.js
# which in turns says that it was copied from
# http://oauth.net/core/1.0/#sig_base_example .
url = "http://photos.example.net/photos?file=vacation.jpg&oauth_consumer_key=dpf43f3p2l4k3l03&oauth_nonce=kllo9940pd9333jh&oauth_signature_method=HMAC-SHA1&oauth_timestamp=1191242096&oauth_token=nnch734d00sl2jdk&oauth_version=1.0&size=original"
req = oauth.Request("GET", url)
res = req.get_normalized_parameters()
expected = 'file=vacation.jpg&oauth_consumer_key=dpf43f3p2l4k3l03&oauth_nonce=kllo9940pd9333jh&oauth_signature_method=HMAC-SHA1&oauth_timestamp=1191242096&oauth_token=nnch734d00sl2jdk&oauth_version=1.0&size=original'
self.assertEquals(expected, res)
def test_signing_base(self):
# example copied from
# https://github.com/ciaranj/node-oauth/blob/master/tests/oauth.js
# which in turns says that it was copied from
# http://oauth.net/core/1.0/#sig_base_example .
url = "http://photos.example.net/photos?file=vacation.jpg&oauth_consumer_key=dpf43f3p2l4k3l03&oauth_nonce=kllo9940pd9333jh&oauth_signature_method=HMAC-SHA1&oauth_timestamp=1191242096&oauth_token=nnch734d00sl2jdk&oauth_version=1.0&size=original"
req = oauth.Request("GET", url)
sm = oauth.SignatureMethod_HMAC_SHA1()
consumer = oauth.Consumer('dpf43f3p2l4k3l03', 'foo')
key, raw = sm.signing_base(req, consumer, None)
expected = 'GET&http%3A%2F%2Fphotos.example.net%2Fphotos&file%3Dvacation.jpg%26oauth_consumer_key%3Ddpf43f3p2l4k3l03%26oauth_nonce%3Dkllo9940pd9333jh%26oauth_signature_method%3DHMAC-SHA1%26oauth_timestamp%3D1191242096%26oauth_token%3Dnnch734d00sl2jdk%26oauth_version%3D1.0%26size%3Doriginal'
self.assertEquals(expected, raw)
def test_get_normalized_parameters(self):
url = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'multi': ['FOO','BAR', u'\u00ae', '\xc2\xae'],
'multi_same': ['FOO','FOO'],
'uni_utf8_bytes': '\xc2\xae',
'uni_unicode_object': u'\u00ae'
}
req = oauth.Request("GET", url, params)
res = req.get_normalized_parameters()
expected='multi=BAR&multi=FOO&multi=%C2%AE&multi=%C2%AE&multi_same=FOO&multi_same=FOO&oauth_consumer_key=0685bd9184jfhq22&oauth_nonce=4572616e48616d6d65724c61686176&oauth_signature_method=HMAC-SHA1&oauth_timestamp=137131200&oauth_token=ad180jjd733klru7&oauth_version=1.0&uni_unicode_object=%C2%AE&uni_utf8_bytes=%C2%AE'
self.assertEquals(expected, res)
def test_get_normalized_parameters_ignores_auth_signature(self):
url = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_signature': "some-random-signature-%d" % random.randint(1000, 2000),
'oauth_token': "ad180jjd733klru7",
}
req = oauth.Request("GET", url, params)
res = req.get_normalized_parameters()
self.assertNotEquals(urllib.urlencode(sorted(params.items())), res)
foo = params.copy()
del foo["oauth_signature"]
self.assertEqual(urllib.urlencode(sorted(foo.items())), res)
def test_set_signature_method(self):
consumer = oauth.Consumer('key', 'secret')
client = oauth.Client(consumer)
class Blah:
pass
try:
client.set_signature_method(Blah())
self.fail("Client.set_signature_method() accepted invalid method.")
except ValueError:
pass
m = oauth.SignatureMethod_HMAC_SHA1()
client.set_signature_method(m)
self.assertEquals(m, client.method)
def test_get_normalized_string_escapes_spaces_properly(self):
url = "http://sp.example.com/"
params = {
"some_random_data": random.randint(100, 1000),
"data": "This data with a random number (%d) has spaces!" % random.randint(1000, 2000),
}
req = oauth.Request("GET", url, params)
res = req.get_normalized_parameters()
expected = urllib.urlencode(sorted(params.items())).replace('+', '%20')
self.assertEqual(expected, res)
@mock.patch('oauth2.Request.make_timestamp')
@mock.patch('oauth2.Request.make_nonce')
def test_request_nonutf8_bytes(self, mock_make_nonce, mock_make_timestamp):
mock_make_nonce.return_value = 5
mock_make_timestamp.return_value = 6
tok = oauth.Token(key="tok-test-key", secret="tok-test-secret")
con = oauth.Consumer(key="con-test-key", secret="con-test-secret")
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_token': tok.key,
'oauth_consumer_key': con.key
}
# If someone passes a sequence of bytes which is not ascii for
# url, we'll raise an exception as early as possible.
url = "http://sp.example.com/\x92" # It's actually cp1252-encoding...
self.assertRaises(TypeError, oauth.Request, method="GET", url=url, parameters=params)
# And if they pass an unicode, then we'll use it.
url = u'http://sp.example.com/\u2019'
req = oauth.Request(method="GET", url=url, parameters=params)
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), con, None)
self.failUnlessReallyEqual(req['oauth_signature'], 'cMzvCkhvLL57+sTIxLITTHfkqZk=')
# And if it is a utf-8-encoded-then-percent-encoded non-ascii
# thing, we'll decode it and use it.
url = "http://sp.example.com/%E2%80%99"
req = oauth.Request(method="GET", url=url, parameters=params)
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), con, None)
self.failUnlessReallyEqual(req['oauth_signature'], 'yMLKOyNKC/DkyhUOb8DLSvceEWE=')
# Same thing with the params.
url = "http://sp.example.com/"
# If someone passes a sequence of bytes which is not ascii in
# params, we'll raise an exception as early as possible.
params['non_oauth_thing'] = '\xae', # It's actually cp1252-encoding...
self.assertRaises(TypeError, oauth.Request, method="GET", url=url, parameters=params)
# And if they pass a unicode, then we'll use it.
params['non_oauth_thing'] = u'\u2019'
req = oauth.Request(method="GET", url=url, parameters=params)
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), con, None)
self.failUnlessReallyEqual(req['oauth_signature'], '0GU50m0v60CVDB5JnoBXnvvvKx4=')
# And if it is a utf-8-encoded non-ascii thing, we'll decode
# it and use it.
params['non_oauth_thing'] = '\xc2\xae'
req = oauth.Request(method="GET", url=url, parameters=params)
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), con, None)
self.failUnlessReallyEqual(req['oauth_signature'], 'pqOCu4qvRTiGiXB8Z61Jsey0pMM=')
# Also if there are non-utf8 bytes in the query args.
url = "http://sp.example.com/?q=\x92" # cp1252
self.assertRaises(TypeError, oauth.Request, method="GET", url=url, parameters=params)
def test_request_hash_of_body(self):
tok = oauth.Token(key="token", secret="tok-test-secret")
con = oauth.Consumer(key="consumer", secret="con-test-secret")
# Example 1a from Appendix A.1 of
# http://oauth.googlecode.com/svn/spec/ext/body_hash/1.0/oauth-bodyhash.html
# Except that we get a differetn result than they do.
params = {
'oauth_version': "1.0",
'oauth_token': tok.key,
'oauth_nonce': 10288510250934,
'oauth_timestamp': 1236874155,
'oauth_consumer_key': con.key
}
url = u"http://www.example.com/resource"
req = oauth.Request(method="PUT", url=url, parameters=params, body="Hello World!", is_form_encoded=False)
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), con, None)
self.failUnlessReallyEqual(req['oauth_body_hash'], 'Lve95gjOVATpfV8EL5X4nxwjKHE=')
self.failUnlessReallyEqual(req['oauth_signature'], 't+MX8l/0S8hdbVQL99nD0X1fPnM=')
# oauth-bodyhash.html A.1 has
# '08bUFF%2Fjmp59mWB7cSgCYBUpJ0U%3D', but I don't see how that
# is possible.
# Example 1b
params = {
'oauth_version': "1.0",
'oauth_token': tok.key,
'oauth_nonce': 10369470270925,
'oauth_timestamp': 1236874236,
'oauth_consumer_key': con.key
}
req = oauth.Request(method="PUT", url=url, parameters=params, body="Hello World!", is_form_encoded=False)
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), con, None)
self.failUnlessReallyEqual(req['oauth_body_hash'], 'Lve95gjOVATpfV8EL5X4nxwjKHE=')
self.failUnlessReallyEqual(req['oauth_signature'], 'CTFmrqJIGT7NsWJ42OrujahTtTc=')
# Appendix A.2
params = {
'oauth_version': "1.0",
'oauth_token': tok.key,
'oauth_nonce': 8628868109991,
'oauth_timestamp': 1238395022,
'oauth_consumer_key': con.key
}
req = oauth.Request(method="GET", url=url, parameters=params, is_form_encoded=False)
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), con, None)
self.failUnlessReallyEqual(req['oauth_body_hash'], '2jmj7l5rSw0yVb/vlWAYkK/YBwk=')
self.failUnlessReallyEqual(req['oauth_signature'], 'Zhl++aWSP0O3/hYQ0CuBc7jv38I=')
def test_sign_request(self):
url = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200"
}
tok = oauth.Token(key="tok-test-key", secret="tok-test-secret")
con = oauth.Consumer(key="con-test-key", secret="con-test-secret")
params['oauth_token'] = tok.key
params['oauth_consumer_key'] = con.key
req = oauth.Request(method="GET", url=url, parameters=params)
methods = {
'DX01TdHws7OninCLK9VztNTH1M4=': oauth.SignatureMethod_HMAC_SHA1(),
'con-test-secret&tok-test-secret': oauth.SignatureMethod_PLAINTEXT()
}
for exp, method in methods.items():
req.sign_request(method, con, tok)
self.assertEquals(req['oauth_signature_method'], method.name)
self.assertEquals(req['oauth_signature'], exp)
# Also if there are non-ascii chars in the URL.
url = "http://sp.example.com/\xe2\x80\x99" # utf-8 bytes
req = oauth.Request(method="GET", url=url, parameters=params)
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), con, tok)
self.assertEquals(req['oauth_signature'], 'loFvp5xC7YbOgd9exIO6TxB7H4s=')
url = u'http://sp.example.com/\u2019' # Python unicode object
req = oauth.Request(method="GET", url=url, parameters=params)
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), con, tok)
self.assertEquals(req['oauth_signature'], 'loFvp5xC7YbOgd9exIO6TxB7H4s=')
# Also if there are non-ascii chars in the query args.
url = "http://sp.example.com/?q=\xe2\x80\x99" # utf-8 bytes
req = oauth.Request(method="GET", url=url, parameters=params)
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), con, tok)
self.assertEquals(req['oauth_signature'], 'IBw5mfvoCsDjgpcsVKbyvsDqQaU=')
url = u'http://sp.example.com/?q=\u2019' # Python unicode object
req = oauth.Request(method="GET", url=url, parameters=params)
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), con, tok)
self.assertEquals(req['oauth_signature'], 'IBw5mfvoCsDjgpcsVKbyvsDqQaU=')
def test_from_request(self):
url = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'oauth_signature': "wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
}
req = oauth.Request("GET", url, params)
headers = req.to_header()
# Test from the headers
req = oauth.Request.from_request("GET", url, headers)
self.assertEquals(req.method, "GET")
self.assertEquals(req.url, url)
self.assertEquals(params, req.copy())
# Test with bad OAuth headers
bad_headers = {
'Authorization' : 'OAuth this is a bad header'
}
self.assertRaises(oauth.Error, oauth.Request.from_request, "GET",
url, bad_headers)
# Test getting from query string
qs = urllib.urlencode(params)
req = oauth.Request.from_request("GET", url, query_string=qs)
exp = parse_qs(qs, keep_blank_values=False)
for k, v in exp.iteritems():
exp[k] = urllib.unquote(v[0])
self.assertEquals(exp, req.copy())
# Test that a boned from_request() call returns None
req = oauth.Request.from_request("GET", url)
self.assertEquals(None, req)
def test_from_token_and_callback(self):
url = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'oauth_signature': "wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
}
tok = oauth.Token(key="tok-test-key", secret="tok-test-secret")
req = oauth.Request.from_token_and_callback(tok)
self.assertFalse('oauth_callback' in req)
self.assertEquals(req['oauth_token'], tok.key)
req = oauth.Request.from_token_and_callback(tok, callback=url)
self.assertTrue('oauth_callback' in req)
self.assertEquals(req['oauth_callback'], url)
def test_from_consumer_and_token(self):
url = "http://sp.example.com/"
tok = oauth.Token(key="tok-test-key", secret="tok-test-secret")
tok.set_verifier('this_is_a_test_verifier')
con = oauth.Consumer(key="con-test-key", secret="con-test-secret")
req = oauth.Request.from_consumer_and_token(con, token=tok,
http_method="GET", http_url=url)
self.assertEquals(req['oauth_token'], tok.key)
self.assertEquals(req['oauth_consumer_key'], con.key)
self.assertEquals(tok.verifier, req['oauth_verifier'])
class SignatureMethod_Bad(oauth.SignatureMethod):
name = "BAD"
def signing_base(self, request, consumer, token):
return ""
def sign(self, request, consumer, token):
return "invalid-signature"
class TestServer(unittest.TestCase):
def setUp(self):
url = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': int(time.time()),
'bar': 'blerg',
'multi': ['FOO','BAR'],
'foo': 59
}
self.consumer = oauth.Consumer(key="consumer-key",
secret="consumer-secret")
self.token = oauth.Token(key="token-key", secret="token-secret")
params['oauth_token'] = self.token.key
params['oauth_consumer_key'] = self.consumer.key
self.request = oauth.Request(method="GET", url=url, parameters=params)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
self.request.sign_request(signature_method, self.consumer, self.token)
def test_init(self):
server = oauth.Server(signature_methods={'HMAC-SHA1' : oauth.SignatureMethod_HMAC_SHA1()})
self.assertTrue('HMAC-SHA1' in server.signature_methods)
self.assertTrue(isinstance(server.signature_methods['HMAC-SHA1'],
oauth.SignatureMethod_HMAC_SHA1))
server = oauth.Server()
self.assertEquals(server.signature_methods, {})
def test_add_signature_method(self):
server = oauth.Server()
res = server.add_signature_method(oauth.SignatureMethod_HMAC_SHA1())
self.assertTrue(len(res) == 1)
self.assertTrue('HMAC-SHA1' in res)
self.assertTrue(isinstance(res['HMAC-SHA1'],
oauth.SignatureMethod_HMAC_SHA1))
res = server.add_signature_method(oauth.SignatureMethod_PLAINTEXT())
self.assertTrue(len(res) == 2)
self.assertTrue('PLAINTEXT' in res)
self.assertTrue(isinstance(res['PLAINTEXT'],
oauth.SignatureMethod_PLAINTEXT))
def test_verify_request(self):
server = oauth.Server()
server.add_signature_method(oauth.SignatureMethod_HMAC_SHA1())
parameters = server.verify_request(self.request, self.consumer,
self.token)
self.assertTrue('bar' in parameters)
self.assertTrue('foo' in parameters)
self.assertTrue('multi' in parameters)
self.assertEquals(parameters['bar'], 'blerg')
self.assertEquals(parameters['foo'], 59)
self.assertEquals(parameters['multi'], ['FOO','BAR'])
def test_build_authenticate_header(self):
server = oauth.Server()
headers = server.build_authenticate_header('example.com')
self.assertTrue('WWW-Authenticate' in headers)
self.assertEquals('OAuth realm="example.com"',
headers['WWW-Authenticate'])
def test_no_version(self):
url = "http://sp.example.com/"
params = {
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': int(time.time()),
'bar': 'blerg',
'multi': ['FOO','BAR'],
'foo': 59
}
self.consumer = oauth.Consumer(key="consumer-key",
secret="consumer-secret")
self.token = oauth.Token(key="token-key", secret="token-secret")
params['oauth_token'] = self.token.key
params['oauth_consumer_key'] = self.consumer.key
self.request = oauth.Request(method="GET", url=url, parameters=params)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
self.request.sign_request(signature_method, self.consumer, self.token)
server = oauth.Server()
server.add_signature_method(oauth.SignatureMethod_HMAC_SHA1())
parameters = server.verify_request(self.request, self.consumer,
self.token)
def test_invalid_version(self):
url = "http://sp.example.com/"
params = {
'oauth_version': '222.9922',
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': int(time.time()),
'bar': 'blerg',
'multi': ['foo','bar'],
'foo': 59
}
consumer = oauth.Consumer(key="consumer-key",
secret="consumer-secret")
token = oauth.Token(key="token-key", secret="token-secret")
params['oauth_token'] = token.key
params['oauth_consumer_key'] = consumer.key
request = oauth.Request(method="GET", url=url, parameters=params)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
request.sign_request(signature_method, consumer, token)
server = oauth.Server()
server.add_signature_method(oauth.SignatureMethod_HMAC_SHA1())
self.assertRaises(oauth.Error, server.verify_request, request, consumer, token)
def test_invalid_signature_method(self):
url = "http://sp.example.com/"
params = {
'oauth_version': '1.0',
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': int(time.time()),
'bar': 'blerg',
'multi': ['FOO','BAR'],
'foo': 59
}
consumer = oauth.Consumer(key="consumer-key",
secret="consumer-secret")
token = oauth.Token(key="token-key", secret="token-secret")
params['oauth_token'] = token.key
params['oauth_consumer_key'] = consumer.key
request = oauth.Request(method="GET", url=url, parameters=params)
signature_method = SignatureMethod_Bad()
request.sign_request(signature_method, consumer, token)
server = oauth.Server()
server.add_signature_method(oauth.SignatureMethod_HMAC_SHA1())
self.assertRaises(oauth.Error, server.verify_request, request,
consumer, token)
def test_missing_signature(self):
url = "http://sp.example.com/"
params = {
'oauth_version': '1.0',
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': int(time.time()),
'bar': 'blerg',
'multi': ['FOO','BAR'],
'foo': 59
}
consumer = oauth.Consumer(key="consumer-key",
secret="consumer-secret")
token = oauth.Token(key="token-key", secret="token-secret")
params['oauth_token'] = token.key
params['oauth_consumer_key'] = consumer.key
request = oauth.Request(method="GET", url=url, parameters=params)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
request.sign_request(signature_method, consumer, token)
del request['oauth_signature']
server = oauth.Server()
server.add_signature_method(oauth.SignatureMethod_HMAC_SHA1())
self.assertRaises(oauth.MissingSignature, server.verify_request,
request, consumer, token)
# Request Token: http://oauth-sandbox.sevengoslings.net/request_token
# Auth: http://oauth-sandbox.sevengoslings.net/authorize
# Access Token: http://oauth-sandbox.sevengoslings.net/access_token
# Two-legged: http://oauth-sandbox.sevengoslings.net/two_legged
# Three-legged: http://oauth-sandbox.sevengoslings.net/three_legged
# Key: bd37aed57e15df53
# Secret: 0e9e6413a9ef49510a4f68ed02cd
class TestClient(unittest.TestCase):
# oauth_uris = {
# 'request_token': '/request_token.php',
# 'access_token': '/access_token.php'
# }
oauth_uris = {
'request_token': '/request_token',
'authorize': '/authorize',
'access_token': '/access_token',
'two_legged': '/two_legged',
'three_legged': '/three_legged'
}
consumer_key = 'bd37aed57e15df53'
consumer_secret = '0e9e6413a9ef49510a4f68ed02cd'
host = 'http://oauth-sandbox.sevengoslings.net'
def setUp(self):
self.consumer = oauth.Consumer(key=self.consumer_key,
secret=self.consumer_secret)
self.body = {
'foo': 'bar',
'bar': 'foo',
'multi': ['FOO','BAR'],
'blah': 599999
}
def _uri(self, type):
uri = self.oauth_uris.get(type)
if uri is None:
raise KeyError("%s is not a valid OAuth URI type." % type)
return "%s%s" % (self.host, uri)
def create_simple_multipart_data(self, data):
boundary = '---Boundary-%d' % random.randint(1,1000)
crlf = '\r\n'
items = []
for key, value in data.iteritems():
items += [
'--'+boundary,
'Content-Disposition: form-data; name="%s"'%str(key),
'',
str(value),
]
items += ['', '--'+boundary+'--', '']
content_type = 'multipart/form-data; boundary=%s' % boundary
return content_type, crlf.join(items)
def test_init(self):
class Blah():
pass
try:
client = oauth.Client(Blah())
self.fail("Client.__init__() accepted invalid Consumer.")
except ValueError:
pass
consumer = oauth.Consumer('token', 'secret')
try:
client = oauth.Client(consumer, Blah())
self.fail("Client.__init__() accepted invalid Token.")
except ValueError:
pass
def test_access_token_get(self):
"""Test getting an access token via GET."""
client = oauth.Client(self.consumer, None)
resp, content = client.request(self._uri('request_token'), "GET")
self.assertEquals(int(resp['status']), 200)
def test_access_token_post(self):
"""Test getting an access token via POST."""
client = oauth.Client(self.consumer, None)
resp, content = client.request(self._uri('request_token'), "POST")
self.assertEquals(int(resp['status']), 200)
res = dict(parse_qsl(content))
self.assertTrue('oauth_token' in res)
self.assertTrue('oauth_token_secret' in res)
def _two_legged(self, method):
client = oauth.Client(self.consumer, None)
return client.request(self._uri('two_legged'), method,
body=urllib.urlencode(self.body))
def test_two_legged_post(self):
"""A test of a two-legged OAuth POST request."""
resp, content = self._two_legged("POST")
self.assertEquals(int(resp['status']), 200)
def test_two_legged_get(self):
"""A test of a two-legged OAuth GET request."""
resp, content = self._two_legged("GET")
self.assertEquals(int(resp['status']), 200)
@mock.patch('httplib2.Http.request')
def test_multipart_post_does_not_alter_body(self, mockHttpRequest):
random_result = random.randint(1,100)
data = {
'rand-%d'%random.randint(1,100):random.randint(1,100),
}
content_type, body = self.create_simple_multipart_data(data)
client = oauth.Client(self.consumer, None)
uri = self._uri('two_legged')
def mockrequest(cl, ur, **kw):
self.failUnless(cl is client)
self.failUnless(ur is uri)
self.failUnlessEqual(frozenset(kw.keys()), frozenset(['method', 'body', 'redirections', 'connection_type', 'headers']))
self.failUnlessEqual(kw['body'], body)
self.failUnlessEqual(kw['connection_type'], None)
self.failUnlessEqual(kw['method'], 'POST')
self.failUnlessEqual(kw['redirections'], httplib2.DEFAULT_MAX_REDIRECTS)
self.failUnless(isinstance(kw['headers'], dict))
return random_result
mockHttpRequest.side_effect = mockrequest
result = client.request(uri, 'POST', headers={'Content-Type':content_type}, body=body)
self.assertEqual(result, random_result)
@mock.patch('httplib2.Http.request')
def test_url_with_query_string(self, mockHttpRequest):
uri = 'http://example.com/foo/bar/?show=thundercats&character=snarf'
client = oauth.Client(self.consumer, None)
random_result = random.randint(1,100)
def mockrequest(cl, ur, **kw):
self.failUnless(cl is client)
self.failUnlessEqual(frozenset(kw.keys()), frozenset(['method', 'body', 'redirections', 'connection_type', 'headers']))
self.failUnlessEqual(kw['body'], '')
self.failUnlessEqual(kw['connection_type'], None)
self.failUnlessEqual(kw['method'], 'GET')
self.failUnlessEqual(kw['redirections'], httplib2.DEFAULT_MAX_REDIRECTS)
self.failUnless(isinstance(kw['headers'], dict))
req = oauth.Request.from_consumer_and_token(self.consumer, None,
http_method='GET', http_url=uri, parameters={})
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), self.consumer, None)
expected = parse_qsl(urlparse.urlparse(req.to_url()).query)
actual = parse_qsl(urlparse.urlparse(ur).query)
self.failUnlessEqual(len(expected), len(actual))
actual = dict(actual)
for key, value in expected:
if key not in ('oauth_signature', 'oauth_nonce', 'oauth_timestamp'):
self.failUnlessEqual(actual[key], value)
return random_result
mockHttpRequest.side_effect = mockrequest
client.request(uri, 'GET')
@mock.patch('httplib2.Http.request')
@mock.patch('oauth2.Request.from_consumer_and_token')
def test_multiple_values_for_a_key(self, mockReqConstructor, mockHttpRequest):
client = oauth.Client(self.consumer, None)
request = oauth.Request("GET", "http://example.com/fetch.php", parameters={'multi': ['1', '2']})
mockReqConstructor.return_value = request
client.request('http://whatever', 'POST', body='multi=1&multi=2')
self.failUnlessEqual(mockReqConstructor.call_count, 1)
self.failUnlessEqual(mockReqConstructor.call_args[1]['parameters'], {'multi': ['1', '2']})
self.failUnless('multi=1' in mockHttpRequest.call_args[1]['body'])
self.failUnless('multi=2' in mockHttpRequest.call_args[1]['body'])
if __name__ == "__main__":
unittest.main()
|
cloudcache/namebench | refs/heads/master | nb_third_party/dns/rdtypes/ANY/RRSIG.py | 248 | # Copyright (C) 2004-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import dns.rdtypes.sigbase
class RRSIG(dns.rdtypes.sigbase.SIGBase):
"""RRSIG record"""
pass
|
nvoron23/socialite | refs/heads/master | jython/Lib/test/test_listcomp_jy.py | 23 | import unittest
from test import test_support
class ListCompTestCase(unittest.TestCase):
#http://bugs.jython.org/issue1205
def test_long_listcomp(self):
#for a long list comp, we compute the Hardy-Ramanujan number
#http://en.wikipedia.org/wiki/1729_(number)
res = [(x1**3+x2**3,(x1,x2),(y1,y2))
for x1 in range(20) for x2 in range(20) if x1 < x2 # x-Paare
for y1 in range(20) for y2 in range(20) if y1 < y2 # y-Paare
if x1**3+x2**3 == y1**3+y2**3 # gleiche Summe
if (x1,x2) < (y1,y2)
]
self.assertEquals(1729, min(res)[0])
self.assertEquals(len(res), 2)
def test_main():
test_support.run_unittest(ListCompTestCase)
if __name__ == '__main__':
test_main()
|
iwvelando/conky_wunderground_scripts | refs/heads/master | wunderground_daily_forecast.py | 1 | import urllib2
import json
import sys
import os
# Extract command line arguments and grab the wunderground API key
location = sys.argv[1]
nPeriods = int(sys.argv[2])
apiKey = open(os.getenv('HOME') + '/.api/wunderground','r').readline().split('\n')[0]
# Wunderground API call for 10 day forecast data
f = urllib2.urlopen('http://api.wunderground.com/api/' + apiKey + '/forecast10day/q/' + location + '.json')
json_string = f.read()
f.close()
parsed_json = json.loads(json_string)
forecast = parsed_json['forecast']['simpleforecast']['forecastday']
# Limit to nPeriods or the max days of forecast, whichever is lower
nPeriods = min(nPeriods,len(forecast))
#Set a dictionary for parsing the icon field to a smaller text
icons = {'chanceflurries' : 'flurry?',
'chancerain' : 'rain?',
'chancesleet' : 'sleet?',
'chancesnow' : 'snow?',
'chancetstorms' : 'tstorm?',
'clear' : 'clear',
'cloudy' : 'cloudy',
'flurries' : 'flurry',
'fog' : 'fog',
'hazy' : 'hazy',
'mostlycloudy' : 'cloudy',
'mostlysunny' : 'sunny',
'partlycloudy' : 'cloudy~',
'partlysunny' : 'sunny~',
'rain' : 'rain',
'sleet' : 'sleet',
'snow' : 'snow',
'sunny' : 'sunny',
'tstorms' : 'tstorm',
'unknown' : '???'}
# Iterate through nPeriods days starting from tomorrow and print their data
for i in range(nPeriods):
date = int(forecast[i]['date']['day'])
day = str(forecast[i]['date']['weekday_short'])[0:2]
temp_low = int(forecast[i]['low']['fahrenheit'])
temp_high = int(forecast[i]['high']['fahrenheit'])
wind_avg = int(forecast[i]['avewind']['mph'])
wind_avg_dir = int(forecast[i]['avewind']['degrees'])
wind_max = int(forecast[i]['maxwind']['mph'])
wind_max_dir = int(forecast[i]['maxwind']['degrees'])
cond = icons[str(forecast[i]['icon'])]
rain_chance = int(forecast[i]['pop'])
humidity_avg = int(forecast[i]['avehumidity'])
precipitation = str(forecast[i]['qpf_allday']['in'])
print '{:2d} | {:3d} | {:2d} @ {:3d} | {:3d} | {:7s}\n{:2s} | {:3d} | {:2d} @ {:3d} | {:3d} | {:s}'.format(date,temp_low,wind_avg,wind_avg_dir,rain_chance,cond,day,temp_high,wind_max,wind_max_dir,humidity_avg,precipitation)
|
habibiefaried/ryu | refs/heads/master | ryu/services/protocols/bgp/operator/commands/root.py | 52 | from ryu.services.protocols.bgp.operator.command import Command
from ryu.services.protocols.bgp.operator.commands.clear import ClearCmd
from ryu.services.protocols.bgp.operator.commands.set import SetCmd
from ryu.services.protocols.bgp.operator.commands.show import ShowCmd
class RootCmd(Command):
subcommands = {
'show': ShowCmd,
'set': SetCmd,
'clear': ClearCmd}
|
dricciardelli/vae2vec | refs/heads/master | capt_gen_n2e_cs.py | 1 | # -*- coding: utf-8 -*-
import math
import os
import tensorflow as tf
import numpy as np
import pandas as pd
import pickle
import pickle as pkl
import cv2
import skimage
import tensorflow.python.platform
from tensorflow.python.ops import rnn
from keras.preprocessing import sequence
from collections import Counter
from collections import defaultdict
import itertools
test_image_path='./data/acoustic-guitar-player.jpg'
vgg_path='./data/vgg16-20160129.tfmodel'
n=50000-2
def map_lambda():
return n+1
def rev_map_lambda():
return "<UNK>"
def load_text(n,capts,num_samples=None):
# # fname = 'Oxford_English_Dictionary.txt'
# # txt = []
# # with open(fname,'rb') as f:
# # txt = f.readlines()
# # txt = [x.decode('utf-8').strip() for x in txt]
# # txt = [re.sub(r'[^a-zA-Z ]+', '', x) for x in txt if len(x) > 1]
# # List of words
# # word_list = [x.split(' ', 1)[0].strip() for x in txt]
# # # List of definitions
# # def_list = [x.split(' ', 1)[1].strip()for x in txt]
# with open('./training_data/training_data.pkl','rb') as raw:
# word_list,dl=pkl.load(raw)
# def_list=[]
# # def_list=[' '.join(defi) for defi in def_list]
# i=0
# while i<len( dl):
# defi=dl[i]
# if len(defi)>0:
# def_list+=[' '.join(defi)]
# i+=1
# else:
# dl.pop(i)
# word_list.pop(i)
# maxlen=0
# minlen=100
# for defi in def_list:
# minlen=min(minlen,len(defi.split()))
# maxlen=max(maxlen,len(defi.split()))
# print(minlen)
# print(maxlen)
maxlen=30
# # Initialize the "CountVectorizer" object, which is scikit-learn's
# # bag of words tool.
# vectorizer = CountVectorizer(analyzer = "word", \
# tokenizer = None, \
# preprocessor = None, \
# stop_words = None, \
# max_features = None, \
# token_pattern='\\b\\w+\\b') # Keep single character words
# _map,rev_map=get_one_hot_map(word_list,def_list,n)
_map=pkl.load(open('mapaoh.pkl','rb'))
rev_map=pkl.load(open('rev_mapaoh.pkl','rb'))
if num_samples is not None:
num_samples=len(capts)
# X = map_one_hot(word_list[:num_samples],_map,1,n)
# y = (36665, 56210)
# print _map
if capts is not None:
# y,mask = map_one_hot(capts[:num_samples],_map,maxlen,n)
# np.save('ycoh')
y=np.load('ycoh.npy','r')
else:
# np.save('X',X)
# np.save('yc',y)
# np.save('maskc',mask)
y=np.load('yaoh.npy','r')
X=np.load('Xaoh.npy','r')
mask=np.load('maskaoh.npy','r')
print (np.max(y))
return X, y, mask,rev_map
def get_one_hot_map(to_def,corpus,n):
# words={}
# for line in to_def:
# if line:
# words[line.split()[0]]=1
# counts=defaultdict(int)
# uniq=defaultdict(int)
# for line in corpus:
# for word in line.split():
# if word not in words:
# counts[word]+=1
# words=list(words.keys())
words=[]
counts=defaultdict(int)
uniq=defaultdict(int)
for line in to_def+corpus:
for word in line.split():
if word not in words:
counts[word]+=1
_map=defaultdict(lambda :n+1)
rev_map=defaultdict(lambda:"<UNK>")
# words=words[:25000]
for i in counts.values():
uniq[i]+=1
print (len(words))
# random.shuffle(words)
words+=list(map(lambda z:z[0],reversed(sorted(counts.items(),key=lambda x:x[1]))))[:n-len(words)]
print (len(words))
i=0
# random.shuffle(words)
for num_bits in range(binary_dim):
for bit_config in itertools.combinations_with_replacement(range(binary_dim),num_bits+1):
bitmap=np.zeros(binary_dim)
bitmap[np.array(bit_config)]=1
num=bitmap*(2** np.arange(binary_dim ))
num=np.sum(num).astype(np.uint32)
word=words[i]
_map[word]=num
rev_map[num]=word
i+=1
if i>=len(words):
break
if i>=len(words):
break
# for word in words:
# i+=1
# _map[word]=i
# rev_map[i]=word
rev_map[n+1]='<UNK>'
if zero_end_tok:
rev_map[0]='.'
else:
rev_map[0]='Start'
rev_map[n+2]='End'
print (list(reversed(sorted(uniq.items()))))
print (len(list(uniq.items())))
# print rev_map
return _map,rev_map
def map_word_emb(corpus,_map):
### NOTE: ONLY WORKS ON TARGET WORD (DOES NOT HANDLE UNK PROPERLY)
rtn=[]
rtn2=[]
for word in corpus:
mapped=_map[word]
rtn.append(mapped)
if get_rand_vec:
mapped_rand=random.choice(list(_map.keys()))
while mapped_rand==word:
mapped_rand=random.choice(list(_map.keys()))
mapped_rand=_map[mapped_rand]
rtn2.append(mapped_rand)
if get_rand_vec:
return np.array(rtn),np.array(rtn2)
return np.array(rtn)
def map_one_hot(corpus,_map,maxlen,n):
if maxlen==1:
if not form2:
total_not=0
rtn=np.zeros([len(corpus),n+3],dtype=np.float32)
for l,line in enumerate(corpus):
if len(line)==0:
rtn[l,-1]=1
else:
mapped=_map[line]
if mapped==75001:
total_not+=1
rtn[l,mapped]=1
print (total_not,len(corpus))
return rtn
else:
total_not=0
rtn=np.zeros([len(corpus)],dtype=np.float32)
for l,line in enumerate(corpus):
if len(line)==0:
rtn[l,-1]=1
else:
mapped=_map[line]
if mapped==75001:
total_not+=1
rtn[l]=mapped
print (total_not,len(corpus))
return rtn
else:
if form2:
rtn=np.zeros([len(corpus),maxlen+2],dtype=np.float32)
else:
rtn=np.zeros([len(corpus),maxlen+2],dtype=np.int32)
print (rtn.shape)
mask=np.zeros([len(corpus),maxlen+2],dtype=np.float32)
print (mask.shape)
mask[:,1]=1.0
totes=0
nopes=0
wtf=0
for l,_line in enumerate(corpus):
x=0
line=_line.split()
for i in range(min(len(line),maxlen-1)):
# if line[i] not in _map:
# nopes+=1
mapped=_map[line[i]]
rtn[l,i+1]=mapped
if mapped==n+1:
wtf+=1
mask[l,i+1]=1.0
totes+=1
x=i+1
to_app=n+2
if zero_end_tok:
to_app=0
rtn[l,x+1]=to_app
mask[l,x+1]=1.0
print (nopes,totes,wtf)
return rtn,mask
def xavier_init(fan_in, fan_out, constant=1e-4):
""" Xavier initialization of network weights"""
# https://stackoverflow.com/questions/33640581/how-to-do-xavier-initialization-on-tensorflow
low = -constant*np.sqrt(6.0/(fan_in + fan_out))
high = constant*np.sqrt(6.0/(fan_in + fan_out))
return tf.random_uniform((fan_in, fan_out),
minval=low, maxval=high,
dtype=tf.float32)
class Caption_Generator():
def __init__(self, dim_in, dim_embed, dim_hidden, batch_size, n_lstm_steps, n_words, init_b=None,from_image=False,n_input=None,n_lstm_input=None,n_z=None):
self.dim_in = dim_in
self.dim_embed = dim_embed
self.dim_hidden = dim_hidden
self.batch_size = batch_size
self.n_lstm_steps = n_lstm_steps
self.n_words = n_words
self.n_input = n_input
self.n_lstm_input=n_lstm_input
self.n_z=n_z
if from_image:
with open(vgg_path,'rb') as f:
fileContent = f.read()
graph_def = tf.GraphDef()
graph_def.ParseFromString(fileContent)
self.images = tf.placeholder("float32", [1, 224, 224, 3])
tf.import_graph_def(graph_def, input_map={"images":self.images})
graph = tf.get_default_graph()
self.sess = tf.InteractiveSession(graph=graph)
self.from_image=from_image
# declare the variables to be used for our word embeddings
self.word_embedding = tf.Variable(tf.random_uniform([self.n_z, self.dim_embed], -0.1, 0.1), name='word_embedding')
self.embedding_bias = tf.Variable(tf.zeros([dim_embed]), name='embedding_bias')
# declare the LSTM itself
self.lstm = tf.contrib.rnn.BasicLSTMCell(dim_hidden)
# declare the variables to be used to embed the image feature embedding to the word embedding space
self.img_embedding = tf.Variable(tf.random_uniform([dim_in, dim_hidden], -0.1, 0.1), name='img_embedding')
self.img_embedding_bias = tf.Variable(tf.zeros([dim_hidden]), name='img_embedding_bias')
# declare the variables to go from an LSTM output to a word encoding output
self.word_encoding = tf.Variable(tf.random_uniform([dim_hidden, self.n_z], -0.1, 0.1), name='word_encoding')
# initialize this bias variable from the preProBuildWordVocab output
# optional initialization setter for encoding bias variable
if init_b is not None:
self.word_encoding_bias = tf.Variable(init_b, name='word_encoding_bias')
else:
self.word_encoding_bias = tf.Variable(tf.zeros([self.n_input]), name='word_encoding_bias')
with tf.device('/cpu:0'):
self.embw=tf.Variable(xavier_init(self.n_input,self.n_z),name='embw')
self.embb=tf.Variable(tf.zeros([self.n_z]),name='embb')
self.all_encoding_weights=[self.embw,self.embb]
def build_model(self):
# declaring the placeholders for our extracted image feature vectors, our caption, and our mask
# (describes how long our caption is with an array of 0/1 values of length `maxlen`
img = tf.placeholder(tf.float32, [self.batch_size, self.dim_in])
caption_placeholder = tf.placeholder(tf.int32, [self.batch_size, self.n_lstm_steps])
mask = tf.placeholder(tf.float32, [self.batch_size, self.n_lstm_steps])
self.output_placeholder = tf.placeholder(tf.int32, [self.batch_size, self.n_lstm_steps])
network_weights = self._initialize_weights()
# getting an initial LSTM embedding from our image_imbedding
image_embedding = tf.matmul(img, self.img_embedding) + self.img_embedding_bias
flat_caption_placeholder=tf.reshape(caption_placeholder,[-1])
#leverage one-hot sparsity to lookup embeddings fast
embedded_input,KLD_loss=self._get_word_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['input_meaning'],flat_caption_placeholder,logit=True)
KLD_loss=tf.multiply(KLD_loss,tf.reshape(mask,[-1,1]))
KLD_loss=tf.reduce_sum(KLD_loss)
# word_embeddings=tf.matmul(embedded_input,self.word_embedding)+self.embedding_bias
# with tf.device('/cpu:0'):
# word_embeddings=tf.nn.embedding_lookup(self.embw,flat_caption_placeholder)
# word_embeddings+=self.embb
# word_embeddings=tf.reshape(word_embeddings,[self.batch_size,self.n_lstm_steps,-1])
embedded_input=tf.stop_gradient(embedded_input)
embedded_input=tf.reshape(embedded_input,[self.batch_size,self.n_lstm_steps,-1])
embedded_input=tf.nn.l2_normalize(embedded_input,dim=-1)
#initialize lstm state
state = self.lstm.zero_state(self.batch_size, dtype=tf.float32)
rnn_output=[]
total_loss=0
with tf.variable_scope("RNN"):
# unroll lstm
for i in range(self.n_lstm_steps):
if i > 0:
# if this isn’t the first iteration of our LSTM we need to get the word_embedding corresponding
# to the (i-1)th word in our caption
current_embedding = image_embedding
else:
#if this is the first iteration of our LSTM we utilize the embedded image as our input
current_embedding = image_embedding
if i > 0:
# allows us to reuse the LSTM tensor variable on each iteration
tf.get_variable_scope().reuse_variables()
out, state = self.lstm(current_embedding, state)
if i>0:
out=tf.nn.l2_normalize(tf.matmul(out,self.word_encoding)+self.word_encoding_bias,dim=-1)
total_loss+=tf.reduce_sum(tf.reduce_sum(tf.multiply(embedded_input[:,i,:],out),axis=-1)*mask[:,i])
# #perform classification of output
# rnn_output=tf.concat(rnn_output,axis=1)
# rnn_output=tf.reshape(rnn_output,[self.batch_size*(self.n_lstm_steps),-1])
# encoded_output=tf.matmul(rnn_output,self.word_encoding)+self.word_encoding_bias
# #get loss
# normed_embedding= tf.nn.l2_normalize(encoded_output, dim=-1)
# normed_target=tf.nn.l2_normalize(embedded_input,dim=-1)
# cos_sim=tf.multiply(normed_embedding,normed_target)[:,1:]
# cos_sim=(tf.reduce_sum(cos_sim,axis=-1))
# cos_sim=tf.reshape(cos_sim,[self.batch_size,-1])
# cos_sim=tf.reduce_sum(cos_sim[:,1:]*mask[:,1:])
cos_sim=total_loss/tf.reduce_sum(mask[:,1:])
# self.exp_loss=tf.reduce_sum((-cos_sim))
# self.exp_loss=tf.reduce_sum(xentropy)/float(self.batch_size)
total_loss = tf.reduce_sum(-(cos_sim))
# mse=tf.reduce_sum(tf.reshape(tf.square(encoded_output-embedded_input),[self.batch_size,self.n_lstm_steps,-1]),axis=-1)[:,1:]*(mask[:,1:])
# mse=tf.reduce_sum(mse)/tf.reduce_sum(mask[:,1:])
#average over timeseries length
# total_loss=tf.reduce_sum(masked_xentropy)/tf.reduce_sum(mask[:,1:])
# total_loss=mse
self.print_loss=total_loss
total_loss+=KLD_loss/tf.reduce_sum(mask)
return total_loss, img, caption_placeholder, mask
def build_generator(self, maxlen, batchsize=1,from_image=False):
#same setup as `build_model` function
img = tf.placeholder(tf.float32, [self.batch_size, self.dim_in])
image_embedding = tf.matmul(img, self.img_embedding) + self.img_embedding_bias
state = self.lstm.zero_state(batchsize,dtype=tf.float32)
#declare list to hold the words of our generated captions
all_words = []
with tf.variable_scope("RNN"):
# in the first iteration we have no previous word, so we directly pass in the image embedding
# and set the `previous_word` to the embedding of the start token ([0]) for the future iterations
output, state = self.lstm(image_embedding, state)
# previous_word = tf.nn.embedding_lookup(self.word_embedding, [0]) + self.embedding_bias
for i in range(maxlen):
tf.get_variable_scope().reuse_variables()
out, state = self.lstm(image_embedding, state)
# get a get maximum probability word and it's encoding from the output of the LSTM
logit = tf.matmul(out, self.word_encoding) + self.word_encoding_bias
best_word = tf.argmax(logit, 1)
# with tf.device("/cpu:0"):
# # get the embedding of the best_word to use as input to the next iteration of our LSTM
# previous_word = tf.nn.embedding_lookup(self.word_embedding, best_word)
# previous_word += self.embedding_bias
all_words.append(best_word)
self.img=img
self.all_words=all_words
return img, all_words
def _initialize_weights(self):
all_weights = dict()
trainability=False
if not same_embedding:
all_weights['input_meaning'] = {
'affine_weight': tf.Variable(xavier_init(self.n_z, self.n_lstm_input),name='affine_weight',trainable=trainability),
'affine_bias': tf.Variable(tf.zeros(self.n_lstm_input),name='affine_bias',trainable=trainability)}
with tf.device('/cpu:0'):
om=tf.Variable(xavier_init(self.n_input, self.n_z),name='out_mean',trainable=trainability)
if not vanilla:
all_weights['biases_variational_encoding'] = {
'out_mean': tf.Variable(tf.zeros([self.n_z], dtype=tf.float32),name='out_meanb',trainable=trainability),
'out_log_sigma': tf.Variable(tf.zeros([self.n_z], dtype=tf.float32),name='out_log_sigmab',trainable=trainability)}
all_weights['variational_encoding'] = {
'out_mean': om,
'out_log_sigma': tf.Variable(xavier_init(self.n_input, self.n_z),name='out_log_sigma',trainable=trainability)}
else:
all_weights['biases_variational_encoding'] = {
'out_mean': tf.Variable(tf.zeros([self.n_z], dtype=tf.float32),name='out_meanb',trainable=trainability)}
all_weights['variational_encoding'] = {
'out_mean': om}
# self.no_reload+=all_weights['input_meaning'].values()
# self.var_embs=[]
# if transfertype2:
# self.var_embs=all_weights['biases_variational_encoding'].values()+all_weights['variational_encoding'].values()
# self.lstm=tf.contrib.rnn.BasicLSTMCell(n_lstm_input)
# if lstm_stack>1:
# self.lstm=tf.contrib.rnn.MultiRNNCell([self.lstm]*lstm_stack)
# all_weights['LSTM'] = {
# 'affine_weight': tf.Variable(xavier_init(n_z, n_lstm_input),name='affine_weight2'),
# 'affine_bias': tf.Variable(tf.zeros(n_lstm_input),name='affine_bias2'),
# 'encoding_weight': tf.Variable(xavier_init(n_lstm_input,n_input),name='encoding_weight'),
# 'encoding_bias': tf.Variable(tf.zeros(n_input),name='encoding_bias'),
# 'lstm': self.lstm}
all_encoding_weights=[all_weights[x].values() for x in all_weights]
for w in all_encoding_weights:
self.all_encoding_weights+=w
return all_weights
def _get_word_embedding(self, ve_weights, lstm_weights, x,logit=False):
# x=tf.matmul(x,self.embw)+self.embb
if logit:
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],x,lookup=True)
else:
if not form2:
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],x, True)
else:
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],tf.one_hot(x,depth=self.n_input))
all_the_f_one_h.append(tf.one_hot(x,depth=self.n_input))
embedding=tf.matmul(z,self.word_embedding)+self.embedding_bias
# embedding=z
return embedding,vae_loss
def _vae_sample(self, weights, biases, x, lookup=False):
#TODO: consider adding a linear transform layer+relu or softplus here first
if not lookup:
mu=tf.matmul(x,weights['out_mean'])+biases['out_mean']
if not vanilla:
logvar=tf.matmul(x,weights['out_log_sigma'])+biases['out_log_sigma']
else:
with tf.device('/cpu:0'):
mu=tf.nn.embedding_lookup(weights['out_mean'],x)
mu+=biases['out_mean']
if not vanilla:
with tf.device('/cpu:0'):
logvar=tf.nn.embedding_lookup(weights['out_log_sigma'],x)
logvar+=biases['out_log_sigma']
if not vanilla:
epsilon=tf.random_normal(tf.shape(logvar),name='epsilon')
std=tf.exp(.5*logvar)
z=mu+tf.multiply(std,epsilon)
else:
z=mu
KLD=0.0
if not vanilla:
KLD = -0.5 * tf.reduce_sum(1 + logvar - tf.pow(mu, 2) - tf.exp(logvar),axis=-1)
print logvar.shape,epsilon.shape,std.shape,z.shape,KLD.shape
return z,KLD
def crop_image(self,x, target_height=227, target_width=227, as_float=True,from_path=True):
#image preprocessing to crop and resize image
image = (x)
if from_path==True:
image=cv2.imread(image)
if as_float:
image = image.astype(np.float32)
if len(image.shape) == 2:
image = np.tile(image[:,:,None], 3)
elif len(image.shape) == 4:
image = image[:,:,:,0]
height, width, rgb = image.shape
if width == height:
resized_image = cv2.resize(image, (target_height,target_width))
elif height < width:
resized_image = cv2.resize(image, (int(width * float(target_height)/height), target_width))
cropping_length = int((resized_image.shape[1] - target_height) / 2)
resized_image = resized_image[:,cropping_length:resized_image.shape[1] - cropping_length]
else:
resized_image = cv2.resize(image, (target_height, int(height * float(target_width) / width)))
cropping_length = int((resized_image.shape[0] - target_width) / 2)
resized_image = resized_image[cropping_length:resized_image.shape[0] - cropping_length,:]
return cv2.resize(resized_image, (target_height, target_width))
def read_image(self,path=None):
# parses image from file path and crops/resizes
if path is None:
path=test_image_path
img = crop_image(path, target_height=224, target_width=224)
if img.shape[2] == 4:
img = img[:,:,:3]
img = img[None, ...]
return img
def get_caption(self,x=None):
#gets caption from an image by feeding it through imported VGG16 graph
if self.from_image:
feat = read_image(x)
fc7 = self.sess.run(graph.get_tensor_by_name("import/Relu_1:0"), feed_dict={self.images:feat})
else:
fc7=np.load(x,'r')
generated_word_index= self.sess.run(self.generated_words, feed_dict={self.img:fc7})
generated_word_index = np.hstack(generated_word_index)
generated_words = [ixtoword[x] for x in generated_word_index]
punctuation = np.argmax(np.array(generated_words) == '.')+1
generated_words = generated_words[:punctuation]
generated_sentence = ' '.join(generated_words)
return (generated_sentence)
def get_data(annotation_path, feature_path):
#load training/validation data
annotations = pd.read_table(annotation_path, sep='\t', header=None, names=['image', 'caption'])
return np.load(feature_path,'r'), annotations['caption'].values
def preProBuildWordVocab(sentence_iterator, word_count_threshold=30): # function from Andre Karpathy's NeuralTalk
#process and vectorize training/validation captions
print('preprocessing %d word vocab' % (word_count_threshold, ))
word_counts = {}
nsents = 0
for sent in sentence_iterator:
nsents += 1
for w in sent.lower().split(' '):
word_counts[w] = word_counts.get(w, 0) + 1
vocab = [w for w in word_counts if word_counts[w] >= word_count_threshold]
print('preprocessed words %d -> %d' % (len(word_counts), len(vocab)))
ixtoword = {}
ixtoword[0] = '.'
wordtoix = {}
wordtoix['#START#'] = 0
ix = 1
for w in vocab:
wordtoix[w] = ix
ixtoword[ix] = w
ix += 1
word_counts['.'] = nsents
bias_init_vector = np.array([1.0*word_counts[ixtoword[i]] for i in ixtoword])
bias_init_vector /= np.sum(bias_init_vector)
bias_init_vector = np.log(bias_init_vector)
bias_init_vector -= np.max(bias_init_vector)
return wordtoix, ixtoword, bias_init_vector.astype(np.float32)
dim_embed = 256
dim_hidden = 256
dim_in = 4096
batch_size = 128
momentum = 0.9
n_epochs = 25
def train(learning_rate=0.001, continue_training=False):
tf.reset_default_graph()
feats, captions = get_data(annotation_path, feature_path)
wordtoix, ixtoword, init_b = preProBuildWordVocab(captions)
np.save('data/ixtoword', ixtoword)
print ('num words:',len(ixtoword))
sess = tf.InteractiveSession()
n_words = len(wordtoix)
maxlen = 30
X, final_captions, mask, _map = load_text(2**19-3,captions)
running_decay=1
decay_rate=0.9999302192204246
# with tf.device('/gpu:0'):
caption_generator = Caption_Generator(dim_in, dim_hidden, dim_embed, batch_size, maxlen+2, n_words, np.zeros(n_z).astype(np.float32),n_input=n_input,n_lstm_input=n_lstm_input,n_z=n_z)
loss, image, sentence, mask = caption_generator.build_model()
saver = tf.train.Saver(max_to_keep=100)
train_op = tf.train.AdamOptimizer(learning_rate).minimize(loss)
tf.global_variables_initializer().run()
tf.train.Saver(var_list=caption_generator.all_encoding_weights,max_to_keep=100).restore(sess,tf.train.latest_checkpoint('./models/tensorflow'))
if continue_training:
saver.restore(sess,tf.train.latest_checkpoint(model_path))
losses=[]
for epoch in range(n_epochs):
if epoch==1:
for w in caption_generator.all_encoding_weights:
w.trainable=True
index = (np.arange(len(feats)).astype(int))
np.random.shuffle(index)
index=index[:]
i=0
for start, end in zip( range(0, len(index), batch_size), range(batch_size, len(index), batch_size)):
#format data batch
current_feats = feats[index[start:end]]
current_captions = captions[index[start:end]]
current_caption_ind = [x for x in map(lambda cap: [wordtoix[word] for word in cap.lower().split(' ')[:-1] if word in wordtoix], current_captions)]
current_caption_matrix = sequence.pad_sequences(current_caption_ind, padding='post', maxlen=maxlen+1)
current_caption_matrix = np.hstack( [np.full( (len(current_caption_matrix),1), 0), current_caption_matrix] )
current_mask_matrix = np.zeros((current_caption_matrix.shape[0], current_caption_matrix.shape[1]))
nonzeros = np.array([x for x in map(lambda x: (x != 0).sum()+2, current_caption_matrix )])
current_capts=final_captions[index[start:end]]
for ind, row in enumerate(current_mask_matrix):
row[:nonzeros[ind]] = 1
_, loss_value,total_loss = sess.run([train_op, caption_generator.print_loss,loss], feed_dict={
image: current_feats.astype(np.float32),
caption_generator.output_placeholder : current_caption_matrix.astype(np.int32),
mask : current_mask_matrix.astype(np.float32),
sentence : current_capts.astype(np.float32)
})
print("Current Cost: ", loss_value, "\t Epoch {}/{}".format(epoch, n_epochs), "\t Iter {}/{}".format(start,len(feats)))
losses.append(loss_value*running_decay)
# if epoch<9:
# if i%3==0:
# running_decay*=decay_rate
# else:
# if i%8==0:
# running_decay*=decay_rate
i+=1
print losses[-1]
print("Saving the model from epoch: ", epoch)
pkl.dump(losses,open('losses/loss_n2e.pkl','wb'))
saver.save(sess, os.path.join(model_path, 'model'), global_step=epoch)
learning_rate *= 0.95
def test(sess,image,generated_words,ixtoword,idx=0): # Naive greedy search
feats, captions = get_data(annotation_path, feature_path)
feat = np.array([feats[idx]])
saver = tf.train.Saver()
sanity_check= False
# sanity_check=True
if not sanity_check:
saved_path=tf.train.latest_checkpoint(model_path)
saver.restore(sess, saved_path)
else:
tf.global_variables_initializer().run()
generated_word_index= sess.run(generated_words, feed_dict={image:feat})
generated_word_index = np.hstack(generated_word_index)
generated_sentence = [ixtoword[x] for x in generated_word_index]
print(generated_sentence)
if __name__=='__main__':
model_path = './models/tensorflow_n2ecs'
feature_path = './data/feats.npy'
annotation_path = './data/results_20130124.token'
import sys
feats, captions = get_data(annotation_path, feature_path)
n_input=50000
binary_dim=n_input
n_lstm_input=256
n_z=256
zero_end_tok=True
form2=True
vanilla=True
onehot=False
same_embedding=False
if sys.argv[1]=='train':
train()
elif sys.argv[1]=='test':
ixtoword = np.load('data/ixtoword.npy').tolist()
n_words = len(ixtoword)
maxlen=15
sess = tf.InteractiveSession()
batch_size=1
caption_generator = Caption_Generator(dim_in, dim_hidden, dim_embed, 1, maxlen+2, n_words,n_input=n_input,n_lstm_input=n_lstm_input,n_z=n_z)
image, generated_words = caption_generator.build_generator(maxlen=maxlen)
test(sess,image,generated_words,ixtoword,1) |
ecederstrand/django | refs/heads/master | django/template/loaders/app_directories.py | 635 | """
Wrapper for loading templates from "templates" directories in INSTALLED_APPS
packages.
"""
from django.template.utils import get_app_template_dirs
from .filesystem import Loader as FilesystemLoader
class Loader(FilesystemLoader):
def get_dirs(self):
return get_app_template_dirs('templates')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.