repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
edbgon/rpipin
|
refs/heads/master
|
test/i2ctest.py
|
1
|
import smbus
import time
#bus = smbus.SMBus(0) # Rev 1 Pi uses 0
bus = smbus.SMBus(1) # Rev 2 Pi uses 1
DEVICE = 0x27 # Device address (A0-A2)
IODIRA = 0x00 # Pin direction register
OLATA = 0x14 # Register for outputs
GPIOA = 0x12 # Register for inputs
# Set all GPA pins as outputs by setting
# all bits of IODIRA register to 0
bus.write_byte_data(DEVICE,IODIRA,0x00)
# Set output all 7 output bits to 0
bus.write_byte_data(DEVICE,OLATA,0)
for MyData in range(1,15):
# Count from 1 to 8 which in binary will count
# from 001 to 111
bus.write_byte_data(DEVICE,OLATA,1)
time.sleep(0.25)
bus.write_byte_data(DEVICE,OLATA,0)
time.sleep(0.25)
|
AdamRLukaitis/muzei
|
refs/heads/master
|
web/lib/bs4/__init__.py
|
417
|
"""Beautiful Soup
Elixir and Tonic
"The Screen-Scraper's Friend"
http://www.crummy.com/software/BeautifulSoup/
Beautiful Soup uses a pluggable XML or HTML parser to parse a
(possibly invalid) document into a tree representation. Beautiful Soup
provides provides methods and Pythonic idioms that make it easy to
navigate, search, and modify the parse tree.
Beautiful Soup works with Python 2.6 and up. It works better if lxml
and/or html5lib is installed.
For more than you ever wanted to know about Beautiful Soup, see the
documentation:
http://www.crummy.com/software/BeautifulSoup/bs4/doc/
"""
__author__ = "Leonard Richardson (leonardr@segfault.org)"
__version__ = "4.3.2"
__copyright__ = "Copyright (c) 2004-2013 Leonard Richardson"
__license__ = "MIT"
__all__ = ['BeautifulSoup']
import os
import re
import warnings
from .builder import builder_registry, ParserRejectedMarkup
from .dammit import UnicodeDammit
from .element import (
CData,
Comment,
DEFAULT_OUTPUT_ENCODING,
Declaration,
Doctype,
NavigableString,
PageElement,
ProcessingInstruction,
ResultSet,
SoupStrainer,
Tag,
)
# The very first thing we do is give a useful error if someone is
# running this code under Python 3 without converting it.
syntax_error = u'You are trying to run the Python 2 version of Beautiful Soup under Python 3. This will not work. You need to convert the code, either by installing it (`python setup.py install`) or by running 2to3 (`2to3 -w bs4`).'
class BeautifulSoup(Tag):
"""
This class defines the basic interface called by the tree builders.
These methods will be called by the parser:
reset()
feed(markup)
The tree builder may call these methods from its feed() implementation:
handle_starttag(name, attrs) # See note about return value
handle_endtag(name)
handle_data(data) # Appends to the current data node
endData(containerClass=NavigableString) # Ends the current data node
No matter how complicated the underlying parser is, you should be
able to build a tree using 'start tag' events, 'end tag' events,
'data' events, and "done with data" events.
If you encounter an empty-element tag (aka a self-closing tag,
like HTML's <br> tag), call handle_starttag and then
handle_endtag.
"""
ROOT_TAG_NAME = u'[document]'
# If the end-user gives no indication which tree builder they
# want, look for one with these features.
DEFAULT_BUILDER_FEATURES = ['html', 'fast']
ASCII_SPACES = '\x20\x0a\x09\x0c\x0d'
def __init__(self, markup="", features=None, builder=None,
parse_only=None, from_encoding=None, **kwargs):
"""The Soup object is initialized as the 'root tag', and the
provided markup (which can be a string or a file-like object)
is fed into the underlying parser."""
if 'convertEntities' in kwargs:
warnings.warn(
"BS4 does not respect the convertEntities argument to the "
"BeautifulSoup constructor. Entities are always converted "
"to Unicode characters.")
if 'markupMassage' in kwargs:
del kwargs['markupMassage']
warnings.warn(
"BS4 does not respect the markupMassage argument to the "
"BeautifulSoup constructor. The tree builder is responsible "
"for any necessary markup massage.")
if 'smartQuotesTo' in kwargs:
del kwargs['smartQuotesTo']
warnings.warn(
"BS4 does not respect the smartQuotesTo argument to the "
"BeautifulSoup constructor. Smart quotes are always converted "
"to Unicode characters.")
if 'selfClosingTags' in kwargs:
del kwargs['selfClosingTags']
warnings.warn(
"BS4 does not respect the selfClosingTags argument to the "
"BeautifulSoup constructor. The tree builder is responsible "
"for understanding self-closing tags.")
if 'isHTML' in kwargs:
del kwargs['isHTML']
warnings.warn(
"BS4 does not respect the isHTML argument to the "
"BeautifulSoup constructor. You can pass in features='html' "
"or features='xml' to get a builder capable of handling "
"one or the other.")
def deprecated_argument(old_name, new_name):
if old_name in kwargs:
warnings.warn(
'The "%s" argument to the BeautifulSoup constructor '
'has been renamed to "%s."' % (old_name, new_name))
value = kwargs[old_name]
del kwargs[old_name]
return value
return None
parse_only = parse_only or deprecated_argument(
"parseOnlyThese", "parse_only")
from_encoding = from_encoding or deprecated_argument(
"fromEncoding", "from_encoding")
if len(kwargs) > 0:
arg = kwargs.keys().pop()
raise TypeError(
"__init__() got an unexpected keyword argument '%s'" % arg)
if builder is None:
if isinstance(features, basestring):
features = [features]
if features is None or len(features) == 0:
features = self.DEFAULT_BUILDER_FEATURES
builder_class = builder_registry.lookup(*features)
if builder_class is None:
raise FeatureNotFound(
"Couldn't find a tree builder with the features you "
"requested: %s. Do you need to install a parser library?"
% ",".join(features))
builder = builder_class()
self.builder = builder
self.is_xml = builder.is_xml
self.builder.soup = self
self.parse_only = parse_only
if hasattr(markup, 'read'): # It's a file-type object.
markup = markup.read()
elif len(markup) <= 256:
# Print out warnings for a couple beginner problems
# involving passing non-markup to Beautiful Soup.
# Beautiful Soup will still parse the input as markup,
# just in case that's what the user really wants.
if (isinstance(markup, unicode)
and not os.path.supports_unicode_filenames):
possible_filename = markup.encode("utf8")
else:
possible_filename = markup
is_file = False
try:
is_file = os.path.exists(possible_filename)
except Exception, e:
# This is almost certainly a problem involving
# characters not valid in filenames on this
# system. Just let it go.
pass
if is_file:
warnings.warn(
'"%s" looks like a filename, not markup. You should probably open this file and pass the filehandle into Beautiful Soup.' % markup)
if markup[:5] == "http:" or markup[:6] == "https:":
# TODO: This is ugly but I couldn't get it to work in
# Python 3 otherwise.
if ((isinstance(markup, bytes) and not b' ' in markup)
or (isinstance(markup, unicode) and not u' ' in markup)):
warnings.warn(
'"%s" looks like a URL. Beautiful Soup is not an HTTP client. You should probably use an HTTP client to get the document behind the URL, and feed that document to Beautiful Soup.' % markup)
for (self.markup, self.original_encoding, self.declared_html_encoding,
self.contains_replacement_characters) in (
self.builder.prepare_markup(markup, from_encoding)):
self.reset()
try:
self._feed()
break
except ParserRejectedMarkup:
pass
# Clear out the markup and remove the builder's circular
# reference to this object.
self.markup = None
self.builder.soup = None
def _feed(self):
# Convert the document to Unicode.
self.builder.reset()
self.builder.feed(self.markup)
# Close out any unfinished strings and close all the open tags.
self.endData()
while self.currentTag.name != self.ROOT_TAG_NAME:
self.popTag()
def reset(self):
Tag.__init__(self, self, self.builder, self.ROOT_TAG_NAME)
self.hidden = 1
self.builder.reset()
self.current_data = []
self.currentTag = None
self.tagStack = []
self.preserve_whitespace_tag_stack = []
self.pushTag(self)
def new_tag(self, name, namespace=None, nsprefix=None, **attrs):
"""Create a new tag associated with this soup."""
return Tag(None, self.builder, name, namespace, nsprefix, attrs)
def new_string(self, s, subclass=NavigableString):
"""Create a new NavigableString associated with this soup."""
navigable = subclass(s)
navigable.setup()
return navigable
def insert_before(self, successor):
raise NotImplementedError("BeautifulSoup objects don't support insert_before().")
def insert_after(self, successor):
raise NotImplementedError("BeautifulSoup objects don't support insert_after().")
def popTag(self):
tag = self.tagStack.pop()
if self.preserve_whitespace_tag_stack and tag == self.preserve_whitespace_tag_stack[-1]:
self.preserve_whitespace_tag_stack.pop()
#print "Pop", tag.name
if self.tagStack:
self.currentTag = self.tagStack[-1]
return self.currentTag
def pushTag(self, tag):
#print "Push", tag.name
if self.currentTag:
self.currentTag.contents.append(tag)
self.tagStack.append(tag)
self.currentTag = self.tagStack[-1]
if tag.name in self.builder.preserve_whitespace_tags:
self.preserve_whitespace_tag_stack.append(tag)
def endData(self, containerClass=NavigableString):
if self.current_data:
current_data = u''.join(self.current_data)
# If whitespace is not preserved, and this string contains
# nothing but ASCII spaces, replace it with a single space
# or newline.
if not self.preserve_whitespace_tag_stack:
strippable = True
for i in current_data:
if i not in self.ASCII_SPACES:
strippable = False
break
if strippable:
if '\n' in current_data:
current_data = '\n'
else:
current_data = ' '
# Reset the data collector.
self.current_data = []
# Should we add this string to the tree at all?
if self.parse_only and len(self.tagStack) <= 1 and \
(not self.parse_only.text or \
not self.parse_only.search(current_data)):
return
o = containerClass(current_data)
self.object_was_parsed(o)
def object_was_parsed(self, o, parent=None, most_recent_element=None):
"""Add an object to the parse tree."""
parent = parent or self.currentTag
most_recent_element = most_recent_element or self._most_recent_element
o.setup(parent, most_recent_element)
if most_recent_element is not None:
most_recent_element.next_element = o
self._most_recent_element = o
parent.contents.append(o)
def _popToTag(self, name, nsprefix=None, inclusivePop=True):
"""Pops the tag stack up to and including the most recent
instance of the given tag. If inclusivePop is false, pops the tag
stack up to but *not* including the most recent instqance of
the given tag."""
#print "Popping to %s" % name
if name == self.ROOT_TAG_NAME:
# The BeautifulSoup object itself can never be popped.
return
most_recently_popped = None
stack_size = len(self.tagStack)
for i in range(stack_size - 1, 0, -1):
t = self.tagStack[i]
if (name == t.name and nsprefix == t.prefix):
if inclusivePop:
most_recently_popped = self.popTag()
break
most_recently_popped = self.popTag()
return most_recently_popped
def handle_starttag(self, name, namespace, nsprefix, attrs):
"""Push a start tag on to the stack.
If this method returns None, the tag was rejected by the
SoupStrainer. You should proceed as if the tag had not occured
in the document. For instance, if this was a self-closing tag,
don't call handle_endtag.
"""
# print "Start tag %s: %s" % (name, attrs)
self.endData()
if (self.parse_only and len(self.tagStack) <= 1
and (self.parse_only.text
or not self.parse_only.search_tag(name, attrs))):
return None
tag = Tag(self, self.builder, name, namespace, nsprefix, attrs,
self.currentTag, self._most_recent_element)
if tag is None:
return tag
if self._most_recent_element:
self._most_recent_element.next_element = tag
self._most_recent_element = tag
self.pushTag(tag)
return tag
def handle_endtag(self, name, nsprefix=None):
#print "End tag: " + name
self.endData()
self._popToTag(name, nsprefix)
def handle_data(self, data):
self.current_data.append(data)
def decode(self, pretty_print=False,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Returns a string or Unicode representation of this document.
To get Unicode, pass None for encoding."""
if self.is_xml:
# Print the XML declaration
encoding_part = ''
if eventual_encoding != None:
encoding_part = ' encoding="%s"' % eventual_encoding
prefix = u'<?xml version="1.0"%s?>\n' % encoding_part
else:
prefix = u''
if not pretty_print:
indent_level = None
else:
indent_level = 0
return prefix + super(BeautifulSoup, self).decode(
indent_level, eventual_encoding, formatter)
# Alias to make it easier to type import: 'from bs4 import _soup'
_s = BeautifulSoup
_soup = BeautifulSoup
class BeautifulStoneSoup(BeautifulSoup):
"""Deprecated interface to an XML parser."""
def __init__(self, *args, **kwargs):
kwargs['features'] = 'xml'
warnings.warn(
'The BeautifulStoneSoup class is deprecated. Instead of using '
'it, pass features="xml" into the BeautifulSoup constructor.')
super(BeautifulStoneSoup, self).__init__(*args, **kwargs)
class StopParsing(Exception):
pass
class FeatureNotFound(ValueError):
pass
#By default, act as an HTML pretty-printer.
if __name__ == '__main__':
import sys
soup = BeautifulSoup(sys.stdin)
print soup.prettify()
|
lizardsystem/lizard-fewsunblobbed
|
refs/heads/master
|
lizard_fewsunblobbed/management/commands/fews_unblobbed_copy_data.py
|
1
|
"""
Retrieves fews unblobbed filter tree (and store it in the cache).
"""
from django.core.management.base import BaseCommand
from django.db import connections, transaction
from lizard_fewsunblobbed.models import Timeserie
import logging
import datetime
import traceback
import sys
logger = logging.getLogger(__name__)
class Command(BaseCommand):
args = ''
help = 'Populate fews unblobbed cache for better user experience'
@transaction.commit_manually(using='fews-unblobbed')
def handle(self, *args, **options):
logger.info('Copying data (last 2 days)...')
# Take correct cursor!
cursor = connections['fews-unblobbed'].cursor()
count = 0
errors = 0
today = datetime.datetime.now()
# Fetch data from a year ago and put it on this year.
for ts in Timeserie.objects.all():
for tsd in ts.timeseriedata.filter(
tsd_time__gte=today - datetime.timedelta(days=367),
tsd_time__lte=today - datetime.timedelta(days=365)):
timestamp = tsd.tsd_time + datetime.timedelta(days=365)
# which fields do we want to copy?
try:
cursor.execute("insert into timeseriedata (tsd_time," +
" tsd_value, tsd_flag, tsd_detection," +
" tsd_comments, tkey) values " +
"(%s, %s, %s, %s, %s, %s)",
[timestamp.isoformat(), tsd.tsd_value,
tsd.tsd_flag, tsd.tsd_detection,
('%s' % tsd.tsd_comments), ts.tkey])
transaction.commit(using='fews-unblobbed')
#transaction.commit_unless_managed(using='fews-unblobbed')
count += 1
if count % 1000 == 0:
logger.info('Copied... %d' % count)
except:
transaction.rollback(using='fews-unblobbed')
errors += 1
if errors % 1000 == 0:
traceback.print_exc(file=sys.stdout)
logger.info('Errors (duplicates?)... %d' % errors)
print "\r", count,
logger.info('Copied: %d' % count)
logger.info('Errors (duplicates?): %d' % errors)
# for timeserie in Timeserie.objects.all():
# logger.info('Timeserie: %s' % timeserie)
# timeserie.timeseriedata_set.all()
# This was supposed to work, but is doesn't.
# ValidationError: [u'Enter a valid date/time in
# YYYY-MM-DD HH:MM[:ss[.uuuuuu]] format.']
# >>> from copy import deepcopy
# >>> new_tsd = deepcopy(tsd)
# >>> new_tsd.id = None
# >>> new_tsd.tsd_time
# datetime.datetime(2010, 6, 3, 6, 15)
# >>> new_tsd.tsd_time += datetime.timedelta(days=365)
# >>> new_tsd.save()
|
MarkusH/django-steps
|
refs/heads/master
|
steps/templatetags/django_steps.py
|
1
|
from django import template
from ..steps import (
get_current_step, get_current_steps, get_current_url, get_next_step,
get_next_url, get_previous_step, get_previous_url, get_url,
)
register = template.Library()
@register.assignment_tag(takes_context=True)
def get_all_steps(context):
return get_current_steps(context['request'])
@register.assignment_tag()
def step_attribute(step, attr):
if step is not None and attr is not None:
return getattr(step, attr)
return step
@register.assignment_tag(takes_context=True)
def step_url(context, step, *args, **kwargs):
return get_url(step, context['request'], *args, **kwargs)
@register.assignment_tag(takes_context=True)
def current_step(context, attr=None):
step = get_current_step(context['request'])
return step_attribute(step, attr)
@register.assignment_tag(takes_context=True)
def next_step(context, attr=None):
step = get_next_step(context['request'])
return step_attribute(step, attr)
@register.assignment_tag(takes_context=True)
def previous_step(context, attr=None):
step = get_previous_step(context['request'])
return step_attribute(step, attr)
@register.simple_tag(takes_context=True)
def current_step_url(context, *args, **kwargs):
return get_current_url(context['request'], *args, **kwargs)
@register.simple_tag(takes_context=True)
def next_step_url(context, *args, **kwargs):
return get_next_url(context['request'], *args, **kwargs)
@register.simple_tag(takes_context=True)
def previous_step_url(context, *args, **kwargs):
return get_previous_url(context['request'], *args, **kwargs)
|
davidh-ssec/polar2grid
|
refs/heads/master
|
polar2grid/core/histogram.py
|
1
|
#!/usr/bin/env python3
# encoding: utf-8
"""
Functions related to histogram equalization. This is a scaling function like the
others found in rescale.py, but has so much support infrastructure it's been moved
to it's own module.
:attention:
A scaling function is not guarenteed to not change the
original data array passed. If fact, it is faster in most cases
to change the array in place.
:author: Eva Schiffer (evas)
:author: David Hoese (davidh)
:contact: david.hoese@ssec.wisc.edu
:organization: Space Science and Engineering Center (SSEC)
:copyright: Copyright (c) 2012 University of Wisconsin SSEC. All rights reserved.
:date: Jan 2013
:license: GNU GPLv3
"""
__docformat__ = "restructuredtext en"
import sys
import logging
import numpy
log = logging.getLogger(__name__)
def histogram_equalization (data, mask_to_equalize,
number_of_bins=1000,
std_mult_cutoff=4.0,
do_zerotoone_normalization=True,
valid_data_mask=None,
# these are theoretically hooked up, but not useful with only one equalization
clip_limit=None,
slope_limit=None,
# these parameters don't do anything, they're just here to mirror those in the other call
do_log_scale=False,
log_offset=None,
local_radius_px=None,
out=None) :
"""
Perform a histogram equalization on the data selected by mask_to_equalize.
The data will be separated into number_of_bins levels for equalization and
outliers beyond +/- std_mult_cutoff*std will be ignored.
If do_zerotoone_normalization is True the data selected by mask_to_equalize
will be returned in the 0 to 1 range. Otherwise the data selected by
mask_to_equalize will be returned in the 0 to number_of_bins range.
Note: the data will be changed in place.
"""
out = out if out is not None else data.copy()
mask_to_use = mask_to_equalize if valid_data_mask is None else valid_data_mask
log.debug(" determining DNB data range for histogram equalization")
avg = numpy.mean(data[mask_to_use])
std = numpy.std (data[mask_to_use])
# limit our range to +/- std_mult_cutoff*std; e.g. the default std_mult_cutoff is 4.0 so about 99.8% of the data
concervative_mask = (data < (avg + std*std_mult_cutoff)) & (data > (avg - std*std_mult_cutoff)) & mask_to_use
log.debug(" running histogram equalization")
cumulative_dist_function, temp_bins = _histogram_equalization_helper (data[concervative_mask], number_of_bins, clip_limit=clip_limit, slope_limit=slope_limit)
# linearly interpolate using the distribution function to get the new values
out[mask_to_equalize] = numpy.interp(data[mask_to_equalize], temp_bins[:-1], cumulative_dist_function)
# if we were asked to, normalize our data to be between zero and one, rather than zero and number_of_bins
if do_zerotoone_normalization :
_linear_normalization_from_0to1 (out, mask_to_equalize, number_of_bins)
return out
def local_histogram_equalization (data, mask_to_equalize, valid_data_mask=None, number_of_bins=1000,
std_mult_cutoff=3.0,
do_zerotoone_normalization=True,
local_radius_px=300,
clip_limit=60.0, #20.0,
slope_limit=3.0, #0.5,
do_log_scale=True,
log_offset=0.00001, # can't take the log of zero, so the offset may be needed; pass 0.0 if your data doesn't need it
out=None
) :
"""
equalize the provided data (in the mask_to_equalize) using adaptive histogram equalization
tiles of width/height (2 * local_radius_px + 1) will be calculated and results for each pixel will be bilinerarly interpolated from the nearest 4 tiles
when pixels fall near the edge of the image (there is no adjacent tile) the resultant interpolated sum from the available tiles will be multipled to
account for the weight of any missing tiles (pixel total interpolated value = pixel available interpolated value / (1 - missing interpolation weight))
if do_zerotoone_normalization is True the data will be scaled so that all data in the mask_to_equalize falls between 0 and 1; otherwise the data
in mask_to_equalize will all fall between 0 and number_of_bins
returns the equalized data
"""
out = out if out is not None else numpy.zeros_like(data)
# if we don't have a valid mask, use the mask of what we should be equalizing
if valid_data_mask is None:
valid_data_mask = mask_to_equalize
# calculate some useful numbers for our tile math
total_rows = data.shape[0]
total_cols = data.shape[1]
tile_size = int((local_radius_px * 2.0) + 1.0)
row_tiles = int(total_rows / tile_size) if (total_rows % tile_size is 0) else int(total_rows / tile_size) + 1
col_tiles = int(total_cols / tile_size) if (total_cols % tile_size is 0) else int(total_cols / tile_size) + 1
# an array of our distribution functions for equalization
all_cumulative_dist_functions = [ [ ] ]
# an array of our bin information for equalization
all_bin_information = [ [ ] ]
# loop through our tiles and create the histogram equalizations for each one
for num_row_tile in range(row_tiles) :
# make sure we have enough rows available to store info on this next row of tiles
if len(all_cumulative_dist_functions) <= num_row_tile :
all_cumulative_dist_functions.append( [ ] )
if len(all_bin_information) <= num_row_tile :
all_bin_information .append( [ ] )
# go through each tile in this row and calculate the equalization
for num_col_tile in range(col_tiles) :
# calculate the range for this tile (min is inclusive, max is exclusive)
min_row = num_row_tile * tile_size
max_row = min_row + tile_size
min_col = num_col_tile * tile_size
max_col = min_col + tile_size
# for speed of calculation, pull out the mask of pixels that should be used to calculate the histogram
mask_valid_data_in_tile = valid_data_mask[min_row:max_row, min_col:max_col]
# if we have any valid data in this tile, calculate a histogram equalization for this tile
# (note: even if this tile does no fall in the mask_to_equalize, it's histogram may be used by other tiles)
cumulative_dist_function, temp_bins = None, None
if mask_valid_data_in_tile.any():
# use all valid data in the tile, so separate sections will blend cleanly
temp_valid_data = data[min_row:max_row, min_col:max_col][mask_valid_data_in_tile]
temp_valid_data = temp_valid_data[temp_valid_data >= 0] # TEMP, testing to see if negative data is messing everything up
# limit the contrast by only considering data within a certain range of the average
if std_mult_cutoff is not None :
avg = numpy.mean(temp_valid_data)
std = numpy.std (temp_valid_data)
# limit our range to avg +/- std_mult_cutoff*std; e.g. the default std_mult_cutoff is 4.0 so about 99.8% of the data
concervative_mask = (temp_valid_data < (avg + std*std_mult_cutoff)) & (temp_valid_data > (avg - std*std_mult_cutoff))
temp_valid_data = temp_valid_data[concervative_mask]
# if we are taking the log of our data, do so now
if do_log_scale :
temp_valid_data = numpy.log(temp_valid_data + log_offset)
# do the histogram equalization and get the resulting distribution function and bin information
if temp_valid_data.size > 0 :
cumulative_dist_function, temp_bins = _histogram_equalization_helper (temp_valid_data, number_of_bins, clip_limit=clip_limit, slope_limit=slope_limit)
# hang on to our equalization related information for use later
all_cumulative_dist_functions[num_row_tile].append(cumulative_dist_function)
all_bin_information [num_row_tile].append(temp_bins)
# get the tile weight array so we can use it to interpolate our data
tile_weights = _calculate_weights(tile_size)
# now loop through our tiles and linearly interpolate the equalized versions of the data
for num_row_tile in range(row_tiles) :
for num_col_tile in range(col_tiles) :
# calculate the range for this tile (min is inclusive, max is exclusive)
min_row = num_row_tile * tile_size
max_row = min_row + tile_size
min_col = num_col_tile * tile_size
max_col = min_col + tile_size
# for convenience, pull some of these tile sized chunks out
temp_all_data = data[min_row:max_row, min_col:max_col].copy()
temp_mask_to_equalize = mask_to_equalize[min_row:max_row, min_col:max_col]
temp_all_valid_data_mask = valid_data_mask[min_row:max_row, min_col:max_col]
# if we have any data in this tile, calculate our weighted sum
if temp_mask_to_equalize.any():
if do_log_scale:
temp_all_data[temp_all_valid_data_mask] = numpy.log(temp_all_data[temp_all_valid_data_mask] + log_offset)
temp_data_to_equalize = temp_all_data[temp_mask_to_equalize]
temp_all_valid_data = temp_all_data[temp_all_valid_data_mask]
# a place to hold our weighted sum that represents the interpolated contributions
# of the histogram equalizations from the surrounding tiles
temp_sum = numpy.zeros_like(temp_data_to_equalize)
# how much weight were we unable to use because those tiles fell off the edge of the image?
unused_weight = numpy.zeros(temp_data_to_equalize.shape, dtype=tile_weights.dtype)
# loop through all the surrounding tiles and process their contributions to this tile
for weight_row in range(3):
for weight_col in range(3):
# figure out which adjacent tile we're processing (in overall tile coordinates instead of relative to our current tile)
calculated_row = num_row_tile - 1 + weight_row
calculated_col = num_col_tile - 1 + weight_col
tmp_tile_weights = tile_weights[weight_row, weight_col][numpy.where(temp_mask_to_equalize)]
# if we're inside the tile array and the tile we're processing has a histogram equalization for us to use, process it
if ( (calculated_row >= 0) and (calculated_row < row_tiles) and
(calculated_col >= 0) and (calculated_col < col_tiles) and
(all_bin_information[calculated_row][calculated_col] is not None) and
(all_cumulative_dist_functions[calculated_row][calculated_col] is not None)) :
# equalize our current tile using the histogram equalization from the tile we're processing
temp_equalized_data = numpy.interp(temp_all_valid_data,
all_bin_information[calculated_row][calculated_col][:-1],
all_cumulative_dist_functions[calculated_row][calculated_col])
temp_equalized_data = temp_equalized_data[numpy.where(temp_mask_to_equalize[temp_all_valid_data_mask])]
# add the contribution for the tile we're processing to our weighted sum
temp_sum += (temp_equalized_data * tmp_tile_weights)
else : # if the tile we're processing doesn't exist, hang onto the weight we would have used for it so we can correct that later
unused_weight -= tmp_tile_weights
# if we have unused weights, scale our values to correct for that
if unused_weight.any():
# TODO, if the mask masks everything out this will be a zero!
temp_sum /= unused_weight + 1
# now that we've calculated the weighted sum for this tile, set it in our data array
out[min_row:max_row, min_col:max_col][temp_mask_to_equalize] = temp_sum
"""
# TEMP, test without using weights
data[min_row:max_row, min_col:max_col][temp_mask_to_equalize] = numpy.interp(temp_data_to_equalize,
all_bin_information [num_row_tile ][num_col_tile][:-1],
all_cumulative_dist_functions[num_row_tile ][num_col_tile])
"""
# if we were asked to, normalize our data to be between zero and one, rather than zero and number_of_bins
if do_zerotoone_normalization :
_linear_normalization_from_0to1 (out, mask_to_equalize, number_of_bins)
return out
def _histogram_equalization_helper (valid_data, number_of_bins, clip_limit=None, slope_limit=None) :
"""
calculate the simplest possible histogram equalization, using only valid data
returns the cumulative distribution function and bin information
"""
# bucket all the selected data using numpy's histogram function
temp_histogram, temp_bins = numpy.histogram(valid_data, number_of_bins)
# if we have a clip limit and we should do our clipping before building the cumulative distribution function, clip off our histogram
if (clip_limit is not None) :
# clip our histogram and remember how much we removed
pixels_to_clip_at = int(clip_limit * (valid_data.size / float(number_of_bins)))
mask_to_clip = temp_histogram > clip_limit
num_bins_clipped = sum(mask_to_clip)
num_pixels_clipped = sum(temp_histogram[mask_to_clip]) - (num_bins_clipped * pixels_to_clip_at)
temp_histogram[mask_to_clip] = pixels_to_clip_at
# calculate the cumulative distribution function
cumulative_dist_function = temp_histogram.cumsum()
# if we have a clip limit and we should do our clipping after building the cumulative distribution function, clip off our cdf
if (slope_limit is not None) :
# clip our cdf and remember how much we removed
pixel_height_limit = int(slope_limit * (valid_data.size / float(number_of_bins)))
cumulative_excess_height = 0
num_clipped_pixels = 0
weight_metric = numpy.zeros(cumulative_dist_function.shape, dtype=float)
for pixel_index in range(1, cumulative_dist_function.size) :
current_pixel_count = cumulative_dist_function[pixel_index]
diff_from_acceptable = (current_pixel_count - cumulative_dist_function[pixel_index-1] -
pixel_height_limit - cumulative_excess_height)
if diff_from_acceptable < 0:
weight_metric[pixel_index] = abs(diff_from_acceptable)
cumulative_excess_height += max( diff_from_acceptable, 0)
cumulative_dist_function[pixel_index] = current_pixel_count - cumulative_excess_height
num_clipped_pixels = num_clipped_pixels + cumulative_excess_height
# now normalize the overall distribution function
cumulative_dist_function = (number_of_bins - 1) * cumulative_dist_function / cumulative_dist_function[-1]
# return what someone else will need in order to apply the equalization later
return cumulative_dist_function, temp_bins
def _calculate_weights (tile_size) :
"""
calculate a weight array that will be used to quickly bilinearly-interpolate the histogram equalizations
tile size should be the width and height of a tile in pixels
returns a 4D weight array, where the first 2 dimensions correspond to the grid of where the tiles are
relative to the tile being interpolated
"""
# we are essentially making a set of weight masks for an ideal center tile that has all 8 surrounding tiles available
# create our empty template tiles
template_tile = numpy.zeros((3, 3, tile_size, tile_size), dtype=numpy.float32)
"""
# TEMP FOR TESTING, create a weight tile that does no interpolation
template_tile[1,1] = template_tile[1,1] + 1.0
"""
# for ease of calculation, figure out the index of the center pixel in a tile
# and how far that pixel is from the edge of the tile (in pixel units)
center_index = int(tile_size / 2)
center_dist = tile_size / 2.0
# loop through each pixel in the tile and calculate the 9 weights for that pixel
# were weights for a pixel are 0.0 they are not set (since the template_tile
# starts out as all zeros)
for row in range(tile_size) :
for col in range(tile_size) :
vertical_dist = abs(center_dist - row) # the distance from our pixel to the center of our tile, vertically
horizontal_dist = abs(center_dist - col) # the distance from our pixel to the center of our tile, horizontally
# pre-calculate which 3 adjacent tiles will affect our tile
# (note: these calculations aren't quite right if center_index equals the row or col)
horizontal_index = 0 if col < center_index else 2
vertical_index = 0 if row < center_index else 2
# if this is the center pixel, we only need to use it's own tile for it
if (row is center_index) and (col is center_index) :
# all of the weight for this pixel comes from it's own tile
template_tile[1,1][row, col] = 1.0
# if this pixel is in the center row, but is not the center pixel
# we're going to need to linearly interpolate it's tile and the
# tile that is horizontally nearest to it
elif (row is center_index) and (col is not center_index) :
# linear interp horizontally
beside_weight = horizontal_dist / tile_size # the weight from the adjacent tile
local_weight = (tile_size - horizontal_dist) / tile_size # the weight from this tile
# set the weights for the two relevant tiles
template_tile[1, 1] [row, col] = local_weight
template_tile[1, horizontal_index][row, col] = beside_weight
# if this pixel is in the center column, but is not the center pixel
# we're going to need to linearly interpolate it's tile and the
# tile that is vertically nearest to it
elif (row is not center_index) and (col is center_index) :
# linear interp vertical
beside_weight = vertical_dist / tile_size # the weight from the adjacent tile
local_weight = (tile_size - vertical_dist) / tile_size # the weight from this tile
# set the weights for the two relevant tiles
template_tile[1, 1][row, col] = local_weight
template_tile[vertical_index, 1][row, col] = beside_weight
# if the pixel is in one of the four quadrants that are above or below the center
# row and column, we need to bilinearly interpolate it between the nearest four tiles
else:
# bilinear interpolation
local_weight = ((tile_size - vertical_dist) / tile_size) * ((tile_size - horizontal_dist) / tile_size) # the weight from this tile
vertical_weight = (( vertical_dist) / tile_size) * ((tile_size - horizontal_dist) / tile_size) # the weight from the vertically adjacent tile
horizontal_weight = ((tile_size - vertical_dist) / tile_size) * (( horizontal_dist) / tile_size) # the weight from the horizontally adjacent tile
diagonal_weight = (( vertical_dist) / tile_size) * (( horizontal_dist) / tile_size) # the weight from the diagonally adjacent tile
# set the weights for the four relevant tiles
template_tile[1, 1, row, col] = local_weight
template_tile[vertical_index, 1, row, col] = vertical_weight
template_tile[1, horizontal_index, row, col] = horizontal_weight
template_tile[vertical_index, horizontal_index, row, col] = diagonal_weight
# return the weights for an ideal center tile
return template_tile
def _linear_normalization_from_0to1 (data, mask, theoretical_max, theoretical_min=0, message=" normalizing equalized data to fit in 0 to 1 range") :
#" normalizing DNB data into 0 to 1 range") :
"""
do a linear normalization so all data is in the 0 to 1 range. This is a sloppy but fast calculation that relies on parameters
giving it the correct theoretical current max and min so it can scale the data accordingly.
"""
log.debug(message)
if (theoretical_min is not 0) :
data[mask] = data[mask] - theoretical_min
theoretical_max = theoretical_max - theoretical_min
data[mask] = data[mask] / theoretical_max
def main():
print("Command line interface not implemented. If you wish to rescale from the command line use rescale.py.")
if __name__ == "__main__":
sys.exit(main())
|
onyxfish/agate-sql
|
refs/heads/master
|
docs/conf.py
|
1
|
# -*- coding: utf-8 -*-
#
# flake8: noqa
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx']
autodoc_member_order = 'bysource'
intersphinx_mapping = {
'python': ('http://docs.python.org/3.5/', None),
'agate': ('http://agate.readthedocs.org/en/latest/', None)
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'agate-sql'
copyright = u'2017, Christopher Groskopf'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.5.5'
# The full version, including alpha/beta/rc tags.
release = '0.5.5'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'agatesqldoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'agate-sql.tex', u'agate-sql Documentation',
u'Christopher Groskopf', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
]
|
tbyehl/boxstarter
|
refs/heads/master
|
BuildPackages/example-light/tools/sublime/Packages/PowershellUtils/tests/test_sublimepath.py
|
20
|
import _setuptestenv
import sys
try:
import mock
except ImportError:
print "ERROR: Cannot find mock module in your SYSTEM'S Python library."
sys.exit(1)
import unittest
import sublime
import sublimeplugin
#===============================================================================
# Add your tests below here.
#===============================================================================
import sublimepath
class RootAtPackagesDirFunction(unittest.TestCase):
def testCallWithoutArguments(self):
actual = sublimepath.rootAtPackagesDir()
self.assertEquals(actual, "?????")
def testCallWithOneArgument(self):
actual = sublimepath.rootAtPackagesDir("XXX")
self.assertEquals(actual, "?????\\XXX")
def testCallWithMultipleArguments(self):
actual = sublimepath.rootAtPackagesDir("XXX", "YYY")
self.assertEquals(actual, "?????\\XXX\\YYY")
if __name__ == "__main__":
unittest.main()
|
Grumbel/rfactorlcd
|
refs/heads/master
|
rfactorlcd/dashlet_selection.py
|
1
|
# rFactor Remote LCD
# Copyright (C) 2014 Ingo Ruhnke <grumbel@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gtk
import rfactorlcd
class DragMode:
Move = 0
ResizeLeft = 1 << 0
ResizeRight = 1 << 1
ResizeTop = 1 << 2
ResizeBottom = 1 << 3
class ControlPoint:
def __init__(self, x, y, w, h, mode):
self.x = x
self.y = y
self.w = w
self.h = h
self.mode = mode
def render(self, cr):
cr.rectangle(self.x - self.w/2,
self.y - self.h/2,
self.w, self.h)
cr.set_line_width(4.0)
cr.set_source_rgb(1, 0, 0)
cr.fill_preserve()
cr.set_source_rgb(0.75, 0, 0)
cr.set_line_width(1.0)
cr.stroke()
def contains(self, x, y):
return (abs(self.x - x) <= self.w and
abs(self.y - y) <= self.h)
class DashletSelection(object):
def __init__(self, style, workspace):
self.lcd_style = style
self.workspace = workspace
self.dashlets = []
self.dashlet_origins = []
self.drag_start = None
self.drag_mode = None
self.control_points = []
def on_button_press(self, widget, event):
# print "press", event.x, event.y, event.button
self.dashlet_selection.on_button_press(self, widget, event)
def is_active(self):
return bool(self.dashlet)
def add(self, dashlet):
self.dashlets.append(dashlet)
self.update()
def remove(self, dashlet):
self.dashlets.remove(dashlet)
self.update()
def set(self, dashlet):
self.dashlets = [dashlet]
self.update()
def clear(self):
self.dashlets = []
self.update()
def update(self):
bb = self.bounding_box = self.calc_bounding_box()
self.control_points = [
ControlPoint(bb.x1 - 8, bb.y1 - 8, 16, 16, DragMode.ResizeLeft | DragMode.ResizeTop),
ControlPoint(bb.x1 - 8, bb.cy, 16, 16, DragMode.ResizeLeft),
ControlPoint(bb.x1 - 8, bb.y2 + 8, 16, 16, DragMode.ResizeLeft | DragMode.ResizeBottom),
ControlPoint(bb.cx, bb.y1 - 8, 16, 16, DragMode.ResizeTop),
ControlPoint(bb.cx, bb.y2 + 8, 16, 16, DragMode.ResizeBottom),
ControlPoint(bb.x2 + 8, bb.y1 - 8, 16, 16, DragMode.ResizeRight | DragMode.ResizeTop),
ControlPoint(bb.x2 + 8, bb.cy, 16, 16, DragMode.ResizeRight),
ControlPoint(bb.x2 + 8, bb.y2 + 8, 16, 16, DragMode.ResizeRight | DragMode.ResizeBottom),
]
def contains(self, dashlet):
return dashlet in self.dashlets
def is_empty(self):
return self.dashlets == []
def calc_bounding_box(self):
if self.dashlets == []:
return None
else:
x1 = self.dashlets[0].x
y1 = self.dashlets[0].y
x2 = self.dashlets[0].x2
y2 = self.dashlets[0].y2
for dashlet in self.dashlets[1:]:
x1 = min(dashlet.x, x1)
y1 = min(dashlet.y, y1)
x2 = max(dashlet.x2, x2)
y2 = max(dashlet.y2, y2)
return rfactorlcd.Rect(x1, y1, x2 - x1, y2 - y1)
def find_control_point(self, x, y):
for cp in self.control_points:
if cp.contains(x, y):
return cp
return None
def on_button_press(self, event):
control_point = self.find_control_point(event.x, event.y)
if control_point is not None:
self.control_points = []
self.drag_start = (event.x, event.y)
self.drag_mode = control_point.mode
self.dashlet_origins = [(dashlet.x, dashlet.y, dashlet.w, dashlet.h)
for dashlet in self.dashlets]
else:
dashlet = self.workspace.find_dashlet_at(event.x, event.y)
if dashlet is None:
self.clear()
else:
if event.state & gtk.gdk.SHIFT_MASK:
if self.contains(dashlet):
self.remove(dashlet)
else:
self.add(dashlet)
else:
if not self.contains(dashlet):
self.set(dashlet)
if not self.is_empty():
self.control_points = []
self.drag_start = (event.x, event.y)
self.drag_mode = DragMode.Move
self.dashlet_origins = [(d.x, d.y, d.w, d.h)
for d in self.dashlets]
def on_button_release(self, event):
self.drag_start = None
self.drag_mode = None
self.update()
def on_move(self, event):
if self.drag_start is not None:
x = event.x - self.drag_start[0]
y = event.y - self.drag_start[1]
for i, dashlet in enumerate(self.dashlets):
origins = self.dashlet_origins[i]
if self.drag_mode == DragMode.Move:
dashlet.set_geometry(origins[0] + x, origins[1] + y)
else:
rect = rfactorlcd.Rect.copy(self.bounding_box)
if self.drag_mode & DragMode.ResizeLeft:
rect.x += x
rect.w -= x
if self.drag_mode & DragMode.ResizeRight:
rect.w += x
if self.drag_mode & DragMode.ResizeTop:
rect.y += y
rect.h -= y
if self.drag_mode & DragMode.ResizeBottom:
rect.h += y
scale_x = rect.w / self.bounding_box.w
scale_y = rect.h / self.bounding_box.h
dashlet.set_geometry(x=rect.x + (origins[0] - self.bounding_box.x) * scale_x,
y=rect.y + (origins[1] - self.bounding_box.y) * scale_y,
w=origins[2] * scale_x,
h=origins[3] * scale_y)
def render(self, cr):
if self.dashlets == []:
return
else:
cr.set_line_width(2.0)
cr.set_source_rgb(*self.lcd_style.select_color)
for dashlet in self.dashlets:
cr.rectangle(dashlet.x, dashlet.y,
dashlet.w, dashlet.h)
cr.stroke()
if self.control_points != []:
cr.set_line_width(1.0)
cr.set_dash([4.0, 4.0])
cr.rectangle(self.bounding_box.x, self.bounding_box.y,
self.bounding_box.w, self.bounding_box.h)
cr.stroke()
cr.set_dash([])
for cp in self.control_points:
cp.render(cr)
# EOF #
|
justajeffy/arsenalsuite
|
refs/heads/master
|
cpp/apps/burner/plugins/houdinisim.py
|
11
|
from blur.Stone import *
from blur.Classes import *
from blur.Burner import *
from PyQt4.QtCore import *
from PyQt4.QtSql import *
import traceback, os
class HoudiniSimBurner(JobBurner):
def __init__(self,jobAss,slave):
JobBurner.__init__(self, jobAss, slave)
self.Job = jobAss.job()
self.CurrentFrame = None
self.frameList = []
self.StartFrame = None
self.EndFrame = None
self.jobDone = QRegExp("^baztime:")
self.errors = []
self.errors.append(QRegExp("cannot open output file"))
self.errors.append(QRegExp("3DL SEVERE ERROR L2033"))
self.errors.append(QRegExp("Command exited with non-zero status"))
def __del__(self):
# Nothing is required
# self.cleanup() is explicitly called by the slave
pass
# Names of processes to kill after burn is finished
def processNames(self):
return QStringList()
def environment(self):
env = self.Job.environment().environment()
Log( "HoudiniSimBurner::environment(): %s" % env )
return env.split("\n")
def buildCmdArgs(self):
return QStringList()
def executable(self):
timeCmd = "/usr/bin/time --format=baztime:real:%e:user:%U:sys:%S:iowait:%w ";
cmd = timeCmd + "/bin/su %s -c \"exec hython /drd/software/int/farm/hbatchrendersim.py " % self.Job.user().name()
args = QStringList()
args << str(self.Job.fileName())
args << str(self.Job.nodeName())
args << str(self.Job.frameStart())
args << str(self.Job.frameEnd())
args << str(self.Job.packetSize())
args << str(self.assignedTasks())
#args << str(self.tracker())
cmd = cmd + args.join(" ") + "\""
return cmd
def startProcess(self):
Log( "HoudiniSimBurner::startBurn() called" )
JobBurner.startProcess(self)
Log( "HoudiniSimBurner::startBurn() done" )
def cleanup(self):
Log( "HoudiniSimBurner::cleanup() called" )
mProcessId = self.process().pid()
Log( "HoudiniSimBurner::cleanup() Getting pid: %s" % mProcessId )
# Need to find the correct PID space ..
if mProcessId > 6000:
descendants = processChildrenIds( mProcessId, True )
for processId in descendants:
Log( "HoudiniSimBurner::cleanup() Killing pid: %s" % processId )
killProcess( processId )
JobBurner.cleanup(self)
Log( "HoudiniSimBurner::cleaup() done" )
def slotProcessOutputLine(self,line,channel):
JobBurner.slotProcessOutputLine(self,line,channel)
class HoudiniSimBurnerPlugin(JobBurnerPlugin):
def __init__(self):
JobBurnerPlugin.__init__(self)
def jobTypes(self):
return QStringList('HoudiniSim10')
def createBurner(self,jobAss,slave):
Log( "HoudiniSimBurnerPlugin::createBurner() called" )
if jobAss.job().jobType().name() == 'HoudiniSim10':
return HoudiniSimBurner(jobAss,slave)
JobBurnerFactory.registerPlugin( HoudiniSimBurnerPlugin() )
|
8u1a/plaso
|
refs/heads/master
|
tests/serializer/json_serializer.py
|
3
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the serializer object implementation using JSON."""
import collections
import json
import unittest
from plaso.lib import event
from plaso.serializer import json_serializer
from plaso.storage import collection
import pytz
class JSONSerializerTestCase(unittest.TestCase):
"""Tests for a JSON serializer object."""
# Show full diff results, part of TestCase so does not follow our naming
# conventions.
maxDiff = None
def _TestReadSerialized(self, serializer_object, json_dict):
"""Tests the ReadSerialized function.
Args:
serializer_object: the JSON serializer object.
json_dict: the JSON dict.
Returns:
The unserialized object.
"""
# We use json.dumps to make sure the dict does not serialize into
# an invalid JSON string e.g. one that contains string prefixes
# like b'' or u''.
json_string = json.dumps(json_dict)
unserialized_object = serializer_object.ReadSerialized(json_string)
self.assertNotEqual(unserialized_object, None)
return unserialized_object
def _TestWriteSerialized(
self, serializer_object, unserialized_object, expected_json_dict):
"""Tests the WriteSerialized function.
Args:
serializer_object: the JSON serializer object.
unserialized_object: the unserialized object.
expected_json_dict: the expected JSON dict.
Returns:
The serialized JSON string.
"""
json_string = serializer_object.WriteSerialized(unserialized_object)
# We use json.loads here to compare dicts since we cannot pre-determine
# the actual order of values in the JSON string.
json_dict = json.loads(json_string)
self.assertEqual(json_dict, expected_json_dict)
return json_string
class JSONAnalysisReportSerializerTest(JSONSerializerTestCase):
"""Tests for the JSON analysis report serializer object."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
# TODO: preserve the tuples in the report dict.
self._report_dict = {
u'dude': [
[u'Google Keep - notes and lists',
u'hmjkmjkepdijhoojdojkdfohbdgmmhki']
],
u'frank': [
[u'YouTube', u'blpcfgokakmgnkcojhhkbfbldkacnbeo'],
[u'Google Play Music', u'icppfcnhkcmnfdhfhphakoifcfokfdhg']
]
}
self._report_text = (
u' == USER: dude ==\n'
u' Google Keep - notes and lists [hmjkmjkepdijhoojdojkdfohbdgmmhki]\n'
u'\n'
u' == USER: frank ==\n'
u' Google Play Music [icppfcnhkcmnfdhfhphakoifcfokfdhg]\n'
u' YouTube [blpcfgokakmgnkcojhhkbfbldkacnbeo]\n'
u'\n')
# TODO: add report_array and _anomalies tests.
self._event_tag_json_string = (
u'[{"comment": "This is a test event tag.", '
u'"event_uuid": "403818f93dce467bac497ef0f263fde8", '
u'"__type__": "EventTag", '
u'"tags": ["This is a test.", "Also a test."]}]')
self._json_dict = {
u'__type__': u'AnalysisReport',
u'_anomalies': [],
u'_tags': self._event_tag_json_string,
u'plugin_name': u'chrome_extension_test',
u'report_dict': self._report_dict,
u'text': self._report_text,
u'time_compiled': 1431978243000000}
self._serializer = json_serializer.JSONAnalysisReportSerializer
def testReadSerialized(self):
"""Tests the ReadSerialized function."""
self._TestReadSerialized(self._serializer, self._json_dict)
def testWriteSerialized(self):
"""Tests the WriteSerialized function."""
event_tag = event.EventTag()
event_tag.event_uuid = u'403818f93dce467bac497ef0f263fde8'
event_tag.comment = u'This is a test event tag.'
event_tag.tags = [u'This is a test.', u'Also a test.']
self.assertTrue(event_tag.IsValidForSerialization())
analysis_report = event.AnalysisReport(u'chrome_extension_test')
analysis_report.report_dict = self._report_dict
analysis_report.text = self._report_text
analysis_report.time_compiled = 1431978243000000
analysis_report.SetTags([event_tag])
self._TestWriteSerialized(
self._serializer, analysis_report, self._json_dict)
class JSONEventObjectSerializerTest(JSONSerializerTestCase):
"""Tests for the JSON event object serializer object."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._json_dict = {
u'__type__': u'EventObject',
u'a_tuple': [
u'some item', [234, 52, 15], {u'a': u'not a', u'b': u'not b'}, 35],
u'binary_string': {
u'__type__': u'bytes',
u'stream': u'=C0=90=90binary'},
u'data_type': u'test:event2',
u'empty_string': u'',
u'integer': 34,
u'my_dict': {
u'a': u'not b',
u'an': [234, 32],
u'c': 34,
u'list': [u'sf', 234]
},
u'my_list': [u'asf', 4234, 2, 54, u'asf'],
u'string': u'Normal string',
u'timestamp_desc': u'Written',
u'timestamp': 1234124,
u'uuid': u'5a78777006de4ddb8d7bbe12ab92ccf8',
u'unicode_string': u'And I am a unicorn.',
u'zero_integer': 0
}
self._serializer = json_serializer.JSONEventObjectSerializer
def testReadSerialized(self):
"""Tests the ReadSerialized function."""
event_object = self._TestReadSerialized(self._serializer, self._json_dict)
# An integer value containing 0 should get stored.
self.assertTrue(hasattr(event_object, u'zero_integer'))
attribute_value = getattr(event_object, u'integer', 0)
self.assertEqual(attribute_value, 34)
attribute_value = getattr(event_object, u'my_list', [])
self.assertEqual(len(attribute_value), 5)
attribute_value = getattr(event_object, u'string', u'')
self.assertEqual(attribute_value, u'Normal string')
attribute_value = getattr(event_object, u'unicode_string', u'')
self.assertEqual(attribute_value, u'And I am a unicorn.')
attribute_value = getattr(event_object, u'a_tuple', ())
self.assertEqual(len(attribute_value), 4)
def testWriteSerialized(self):
"""Tests the WriteSerialized function."""
event_object = event.EventObject()
event_object.data_type = u'test:event2'
event_object.timestamp = 1234124
event_object.timestamp_desc = u'Written'
# Prevent the event object for generating its own UUID.
event_object.uuid = u'5a78777006de4ddb8d7bbe12ab92ccf8'
event_object.binary_string = b'\xc0\x90\x90binary'
event_object.empty_string = u''
event_object.zero_integer = 0
event_object.integer = 34
event_object.string = u'Normal string'
event_object.unicode_string = u'And I am a unicorn.'
event_object.my_list = [u'asf', 4234, 2, 54, u'asf']
event_object.my_dict = {
u'a': u'not b', u'c': 34, u'list': [u'sf', 234], u'an': [234, 32]}
event_object.a_tuple = (
u'some item', [234, 52, 15], {u'a': u'not a', u'b': u'not b'}, 35)
event_object.null_value = None
json_string = self._TestWriteSerialized(
self._serializer, event_object, self._json_dict)
event_object = self._serializer.ReadSerialized(json_string)
# TODO: fix this.
# An empty string should not get stored.
# self.assertFalse(hasattr(event_object, u'empty_string'))
# A None (or Null) value should not get stored.
# self.assertFalse(hasattr(event_object, u'null_value'))
class JSONEventTagSerializerTest(JSONSerializerTestCase):
"""Test for the JSON Event Tag serializer object."""
def setUp(self):
"""Set up the necessary objects."""
self._event_uuid = u'403818f93dce467bac497ef0f263fde8'
self._json_dict = {
u'event_uuid': self._event_uuid,
u'comment': u'This is a test event tag.',
u'tags': [u'This is a test.', u'Also a test.'],
}
self._serializer = json_serializer.JSONEventTagSerializer
def testReadSerialized(self):
"""Tests the ReadSerialized function."""
self._TestReadSerialized(self._serializer, self._json_dict)
def testWriteSerializer(self):
"""Tests the WriteSerialized function."""
event_tag = event.EventTag()
event_tag.event_uuid = self._event_uuid
event_tag.comment = u'This is a test event tag.'
event_tag.tags = [u'This is a test.', u'Also a test.']
self.assertTrue(event_tag.IsValidForSerialization())
self._TestWriteSerialized(self._serializer, event_tag, self._json_dict)
class JSONPreprocessObjectSerializerTest(JSONSerializerTestCase):
"""Tests for the JSON preprocessing object serializer object."""
def setUp(self):
"""Set up the necessary objects."""
self._parsers = [
u'esedb', u'chrome_preferences', u'winfirewall', u'android_app_usage',
u'selinux', u'recycle_bin', u'pls_recall', u'filestat', u'sqlite',
u'cups_ipp', u'winiis', u'lnk', u'rplog', u'symantec_scanlog',
u'recycle_bin_info2', u'winevtx', u'plist', u'bsm_log', u'mac_keychain',
u'pcap', u'mac_securityd', u'utmp', u'pe', u'asl_log', u'opera_global',
u'custom_destinations', u'chrome_cache', u'popularity_contest',
u'prefetch', u'winreg', u'msiecf', u'bencode', u'skydrive_log',
u'openxml', u'xchatscrollback', u'utmpx', u'binary_cookies', u'syslog',
u'hachoir', u'opera_typed_history', u'winevt', u'mac_appfirewall_log',
u'winjob', u'olecf', u'xchatlog', u'macwifi', u'mactime', u'java_idx',
u'firefox_cache', u'mcafee_protection', u'skydrive_log_error']
self._stores = {
u'Number': 1,
u'Store 1': {
u'count': 3,
u'data_type': [u'fs:stat'],
u'parsers': [u'filestat'],
u'range': [1387891912000000, 1387891912000000],
u'type_count': [[u'fs:stat', 3]],
u'version': 1
}
}
self._json_dict = {
u'__type__': u'PreprocessObject',
u'collection_information': {
u'cmd_line': (
u'/usr/bin/log2timeline.py pinfo_test.out '
u'tsk_volume_system.raw'),
u'configured_zone': {
u'__type__': u'timezone',
u'zone': u'UTC'
},
u'debug': False,
u'file_processed': u'/tmp/tsk_volume_system.raw',
u'image_offset': 180224,
u'method': u'imaged processed',
u'os_detected': u'N/A',
u'output_file': u'pinfo_test.out',
u'parser_selection': u'(no list set)',
u'parsers': self._parsers,
u'preferred_encoding': u'utf-8',
u'preprocess': True,
u'protobuf_size': 0,
u'recursive': False,
u'runtime': u'multi process mode',
u'time_of_run': 1430290411000000,
u'version': u'1.2.1_20150424',
u'vss parsing': False,
u'workers': 0
},
u'counter': {
u'__type__': u'collections.Counter',
u'filestat': 3,
u'total': 3
},
u'guessed_os': u'None',
u'plugin_counter': {
u'__type__': u'collections.Counter',
},
u'store_range': {
u'__type__': u'range',
u'end': 1,
u'start': 1
},
u'stores': self._stores,
u'zone': {
u'__type__': u'timezone',
u'zone': u'UTC'
}
}
self._counter = collections.Counter()
self._counter[u'filestat'] = 3
self._counter[u'total'] = 3
self._plugin_counter = collections.Counter()
self._serializer = json_serializer.JSONPreprocessObjectSerializer
def testReadSerialized(self):
"""Tests the ReadSerialized function."""
pre_obj = self._TestReadSerialized(self._serializer, self._json_dict)
counter = pre_obj.counter
for key, value in iter(counter.items()):
self.assertEquals(self._counter[key], value)
def testWriteSerialized(self):
"""Tests the WriteSerialized function."""
preprocess_object = event.PreprocessObject()
preprocess_object.collection_information = {
u'cmd_line': (
u'/usr/bin/log2timeline.py pinfo_test.out tsk_volume_system.raw'),
u'configured_zone': pytz.UTC,
u'debug': False,
u'file_processed': u'/tmp/tsk_volume_system.raw',
u'image_offset': 180224,
u'method': u'imaged processed',
u'os_detected': u'N/A',
u'output_file': u'pinfo_test.out',
u'parser_selection': u'(no list set)',
u'parsers': self._parsers,
u'preferred_encoding': u'utf-8',
u'preprocess': True,
u'protobuf_size': 0,
u'recursive': False,
u'runtime': u'multi process mode',
u'time_of_run': 1430290411000000,
u'version': u'1.2.1_20150424',
u'vss parsing': False,
u'workers': 0
}
preprocess_object.counter = self._counter
preprocess_object.guessed_os = u'None'
preprocess_object.plugin_counter = self._plugin_counter
preprocess_object.store_range = (1, 1)
preprocess_object.stores = self._stores
preprocess_object.zone = pytz.UTC
self._TestWriteSerialized(
self._serializer, preprocess_object, self._json_dict)
class JSONCollectionInformationSerializerTest(JSONSerializerTestCase):
"""Tests for the JSON preprocessing collection information object."""
def setUp(self):
"""Set up the necessary objects."""
self._json_dict = {
u'__COUNTERS__': {
u'foobar': {
u'stuff': 1245
}
},
u'foo': u'bar',
u'foo2': u'randombar'
}
self._collection_information_object = collection.CollectionInformation()
self._collection_information_object.AddCounter(u'foobar')
self._collection_information_object.IncrementCounter(
u'foobar', u'stuff', value=1245)
self._collection_information_object.SetValue(u'foo', u'bar')
self._collection_information_object.SetValue(u'foo2', u'randombar')
self._serializer = json_serializer.JSONCollectionInformationObjectSerializer
def testReadSerialized(self):
"""Tests the ReadSerialized function."""
collection_object = self._TestReadSerialized(
self._serializer, self._json_dict)
for key, value in collection_object.GetValueDict().iteritems():
self.assertEqual(
value, self._collection_information_object.GetValue(key))
for identifier, counter in collection_object.GetCounters():
compare_counter = self._collection_information_object.GetCounter(
identifier)
for key, value in counter.iteritems():
self.assertEqual(value, compare_counter[key])
def testWriteSerialized(self):
"""Tests the WriteSerialized function."""
self._TestWriteSerialized(
self._serializer, self._collection_information_object, self._json_dict)
if __name__ == '__main__':
unittest.main()
|
kinow-io/kinow-python-sdk
|
refs/heads/master
|
kinow_client/models/customer_group_video_stats_1.py
|
1
|
# coding: utf-8
"""
Server API
Reference for Server API (REST/Json)
OpenAPI spec version: 1.4.41
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class CustomerGroupVideoStats1(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, pagination=None, data=None):
"""
CustomerGroupVideoStats1 - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'pagination': 'Pagination',
'data': 'list[CustomerGroupVideoStats]'
}
self.attribute_map = {
'pagination': 'pagination',
'data': 'data'
}
self._pagination = pagination
self._data = data
@property
def pagination(self):
"""
Gets the pagination of this CustomerGroupVideoStats1.
:return: The pagination of this CustomerGroupVideoStats1.
:rtype: Pagination
"""
return self._pagination
@pagination.setter
def pagination(self, pagination):
"""
Sets the pagination of this CustomerGroupVideoStats1.
:param pagination: The pagination of this CustomerGroupVideoStats1.
:type: Pagination
"""
self._pagination = pagination
@property
def data(self):
"""
Gets the data of this CustomerGroupVideoStats1.
:return: The data of this CustomerGroupVideoStats1.
:rtype: list[CustomerGroupVideoStats]
"""
return self._data
@data.setter
def data(self, data):
"""
Sets the data of this CustomerGroupVideoStats1.
:param data: The data of this CustomerGroupVideoStats1.
:type: list[CustomerGroupVideoStats]
"""
self._data = data
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
Curso-OpenShift/Formulario
|
refs/heads/master
|
OverFlow/ProjectFormulario/env/lib/python2.7/site-packages/django/conf/locale/el/__init__.py
|
12133432
| |
rven/odoo
|
refs/heads/14.0-fix-partner-merge-mail-activity
|
addons/iap_mail/__manifest__.py
|
3
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': "IAP / Mail",
'summary': """Bridge between IAP and mail""",
'description': """Bridge between IAP and mail""",
'category': 'Hidden/Tools',
'version': '1.0',
'depends': [
'iap',
'mail',
],
'application': False,
'installable': True,
'auto_install': True,
'data': [
'views/mail_templates.xml',
],
}
|
giftman/Gifts
|
refs/heads/master
|
Python/moveit/chapter2/setup.py
|
1
|
from distutils.core import setup
setup(
name = 'giftman_nester',
version = '1.2.0',
py_modules = ['giftman_nester'],
author = 'giftman',
author_email = '121552591@qq.com',
url = 'www.baidu.com',
description = 'A S',
)
|
rbtcollins/pip
|
refs/heads/develop
|
tests/data/packages/symlinks/setup.py
|
68
|
from setuptools import setup
version = '0.1'
setup(name='symlinks',
version=version,
packages=["symlinks"],
)
|
itucsdb1522/itucsdb1522
|
refs/heads/master
|
team.py
|
1
|
class team:
def __init__(self,i,nm,fd,str,n):
self.id = i
self.name = nm
self.funddate = fd
self.stars = str
self.nation = n
|
vFense/vFenseAgent-nix
|
refs/heads/development
|
agent/deps/mac/Python-2.7.5/lib/python2.7/lib2to3/fixes/fix_except.py
|
326
|
"""Fixer for except statements with named exceptions.
The following cases will be converted:
- "except E, T:" where T is a name:
except E as T:
- "except E, T:" where T is not a name, tuple or list:
except E as t:
T = t
This is done because the target of an "except" clause must be a
name.
- "except E, T:" where T is a tuple or list literal:
except E as t:
T = t.args
"""
# Author: Collin Winter
# Local imports
from .. import pytree
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Assign, Attr, Name, is_tuple, is_list, syms
def find_excepts(nodes):
for i, n in enumerate(nodes):
if n.type == syms.except_clause:
if n.children[0].value == u'except':
yield (n, nodes[i+2])
class FixExcept(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
try_stmt< 'try' ':' (simple_stmt | suite)
cleanup=(except_clause ':' (simple_stmt | suite))+
tail=(['except' ':' (simple_stmt | suite)]
['else' ':' (simple_stmt | suite)]
['finally' ':' (simple_stmt | suite)]) >
"""
def transform(self, node, results):
syms = self.syms
tail = [n.clone() for n in results["tail"]]
try_cleanup = [ch.clone() for ch in results["cleanup"]]
for except_clause, e_suite in find_excepts(try_cleanup):
if len(except_clause.children) == 4:
(E, comma, N) = except_clause.children[1:4]
comma.replace(Name(u"as", prefix=u" "))
if N.type != token.NAME:
# Generate a new N for the except clause
new_N = Name(self.new_name(), prefix=u" ")
target = N.clone()
target.prefix = u""
N.replace(new_N)
new_N = new_N.clone()
# Insert "old_N = new_N" as the first statement in
# the except body. This loop skips leading whitespace
# and indents
#TODO(cwinter) suite-cleanup
suite_stmts = e_suite.children
for i, stmt in enumerate(suite_stmts):
if isinstance(stmt, pytree.Node):
break
# The assignment is different if old_N is a tuple or list
# In that case, the assignment is old_N = new_N.args
if is_tuple(N) or is_list(N):
assign = Assign(target, Attr(new_N, Name(u'args')))
else:
assign = Assign(target, new_N)
#TODO(cwinter) stopgap until children becomes a smart list
for child in reversed(suite_stmts[:i]):
e_suite.insert_child(0, child)
e_suite.insert_child(i, assign)
elif N.prefix == u"":
# No space after a comma is legal; no space after "as",
# not so much.
N.prefix = u" "
#TODO(cwinter) fix this when children becomes a smart list
children = [c.clone() for c in node.children[:3]] + try_cleanup + tail
return pytree.Node(node.type, children)
|
Belxjander/Kirito
|
refs/heads/master
|
Python-3.5.0-Amiga/Lib/ctypes/test/test_unicode.py
|
102
|
import unittest
import ctypes
from ctypes.test import need_symbol
import _ctypes_test
@need_symbol('c_wchar')
class UnicodeTestCase(unittest.TestCase):
def test_wcslen(self):
dll = ctypes.CDLL(_ctypes_test.__file__)
wcslen = dll.my_wcslen
wcslen.argtypes = [ctypes.c_wchar_p]
self.assertEqual(wcslen("abc"), 3)
self.assertEqual(wcslen("ab\u2070"), 3)
self.assertRaises(ctypes.ArgumentError, wcslen, b"ab\xe4")
def test_buffers(self):
buf = ctypes.create_unicode_buffer("abc")
self.assertEqual(len(buf), 3+1)
buf = ctypes.create_unicode_buffer("ab\xe4\xf6\xfc")
self.assertEqual(buf[:], "ab\xe4\xf6\xfc\0")
self.assertEqual(buf[::], "ab\xe4\xf6\xfc\0")
self.assertEqual(buf[::-1], '\x00\xfc\xf6\xe4ba')
self.assertEqual(buf[::2], 'a\xe4\xfc')
self.assertEqual(buf[6:5:-1], "")
func = ctypes.CDLL(_ctypes_test.__file__)._testfunc_p_p
class StringTestCase(UnicodeTestCase):
def setUp(self):
func.argtypes = [ctypes.c_char_p]
func.restype = ctypes.c_char_p
def tearDown(self):
func.argtypes = None
func.restype = ctypes.c_int
def test_func(self):
self.assertEqual(func(b"abc\xe4"), b"abc\xe4")
def test_buffers(self):
buf = ctypes.create_string_buffer(b"abc")
self.assertEqual(len(buf), 3+1)
buf = ctypes.create_string_buffer(b"ab\xe4\xf6\xfc")
self.assertEqual(buf[:], b"ab\xe4\xf6\xfc\0")
self.assertEqual(buf[::], b"ab\xe4\xf6\xfc\0")
self.assertEqual(buf[::-1], b'\x00\xfc\xf6\xe4ba')
self.assertEqual(buf[::2], b'a\xe4\xfc')
self.assertEqual(buf[6:5:-1], b"")
if __name__ == '__main__':
unittest.main()
|
phalax4/CarnotKE
|
refs/heads/master
|
jyhton/lib-python/2.7/test/test_list.py
|
84
|
import sys
from test import test_support, list_tests
class ListTest(list_tests.CommonTest):
type2test = list
def test_basic(self):
self.assertEqual(list([]), [])
l0_3 = [0, 1, 2, 3]
l0_3_bis = list(l0_3)
self.assertEqual(l0_3, l0_3_bis)
self.assertTrue(l0_3 is not l0_3_bis)
self.assertEqual(list(()), [])
self.assertEqual(list((0, 1, 2, 3)), [0, 1, 2, 3])
self.assertEqual(list(''), [])
self.assertEqual(list('spam'), ['s', 'p', 'a', 'm'])
if sys.maxsize == 0x7fffffff:
# This test can currently only work on 32-bit machines.
# XXX If/when PySequence_Length() returns a ssize_t, it should be
# XXX re-enabled.
# Verify clearing of bug #556025.
# This assumes that the max data size (sys.maxint) == max
# address size this also assumes that the address size is at
# least 4 bytes with 8 byte addresses, the bug is not well
# tested
#
# Note: This test is expected to SEGV under Cygwin 1.3.12 or
# earlier due to a newlib bug. See the following mailing list
# thread for the details:
# http://sources.redhat.com/ml/newlib/2002/msg00369.html
self.assertRaises(MemoryError, list, xrange(sys.maxint // 2))
# This code used to segfault in Py2.4a3
x = []
x.extend(-y for y in x)
self.assertEqual(x, [])
def test_truth(self):
super(ListTest, self).test_truth()
self.assertTrue(not [])
self.assertTrue([42])
def test_identity(self):
self.assertTrue([] is not [])
def test_len(self):
super(ListTest, self).test_len()
self.assertEqual(len([]), 0)
self.assertEqual(len([0]), 1)
self.assertEqual(len([0, 1, 2]), 3)
def test_overflow(self):
lst = [4, 5, 6, 7]
n = int((sys.maxint*2+2) // len(lst))
def mul(a, b): return a * b
def imul(a, b): a *= b
self.assertRaises((MemoryError, OverflowError), mul, lst, n)
self.assertRaises((MemoryError, OverflowError), imul, lst, n)
def test_main(verbose=None):
test_support.run_unittest(ListTest)
# verify reference counting
import sys
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in xrange(len(counts)):
test_support.run_unittest(ListTest)
gc.collect()
counts[i] = sys.gettotalrefcount()
print counts
if __name__ == "__main__":
test_main(verbose=True)
|
huor/incubator-hawq
|
refs/heads/master
|
tools/bin/pythonSrc/pychecker-0.8.18/test_input/test55.py
|
11
|
'd'
def x():
try :
print "howdy, this ain't right"
except KeyError, RuntimeError :
pass
def y():
try :
print "ok, " + "this func %s should be fine" % y.__name__
except (KeyError, RuntimeError) :
pass
def z():
try :
pass
except (KeyError, RuntimeError, IndexError) :
pass
def a():
try :
pass
except (KeyError, RuntimeError, IndexError), a :
print a
try :
pass
except KeyError, RuntimeError :
pass
try :
pass
except (KeyError, RuntimeError) :
pass
def b():
try :
print "ok, " + "this func %s should be fine" % y.__name__
except (KeyError, RuntimeError), msg :
print msg
def c():
try :
print "ok, " + "this func %s should be fine" % y.__name__
except KeyError, detail :
print detail
|
etuna-SBF-kog/Stadsparken
|
refs/heads/master
|
env/lib/python2.7/site-packages/south/tests/otherfakeapp/__init__.py
|
12133432
| |
Suninus/NewsBlur
|
refs/heads/master
|
vendor/paypal/standard/ipn/django_migrations/__init__.py
|
12133432
| |
achang97/YouTunes
|
refs/heads/master
|
lib/python2.7/encodings/shift_jis_2004.py
|
816
|
#
# shift_jis_2004.py: Python Unicode Codec for SHIFT_JIS_2004
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_jp, codecs
import _multibytecodec as mbc
codec = _codecs_jp.getcodec('shift_jis_2004')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='shift_jis_2004',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
rancherio/rancher
|
refs/heads/master
|
tests/validation/tests/v3_api/test_istio.py
|
2
|
import copy
import os
import re
import pytest
import time
from subprocess import CalledProcessError
from rancher import ApiError
from .test_auth import enable_ad, load_setup_data
from .common import add_role_to_user
from .common import auth_get_user_token
from .common import auth_resource_cleanup
from .common import AUTH_PROVIDER
from .common import AUTH_USER_PASSWORD
from .common import apply_crd
from .common import check_condition
from .common import compare_versions
from .common import CLUSTER_MEMBER
from .common import CLUSTER_OWNER
from .common import create_kubeconfig
from .common import create_project_and_ns
from .common import create_ns
from .common import DEFAULT_TIMEOUT
from .common import delete_crd
from .common import execute_kubectl_cmd
from .common import get_a_group_and_a_user_not_in_it
from .common import get_admin_client
from .common import get_client_for_token
from .common import get_cluster_client_for_token
from .common import get_crd
from .common import get_group_principal_id
from .common import get_project_client_for_token
from .common import get_user_by_group
from .common import get_user_client
from .common import get_user_client_and_cluster
from .common import if_test_group_rbac
from .common import if_test_rbac
from .common import login_as_auth_user
from .common import NESTED_GROUP_ENABLED
from .common import PROJECT_MEMBER
from .common import PROJECT_OWNER
from .common import PROJECT_READ_ONLY
from .common import random_test_name
from .common import rbac_get_kubeconfig_by_role
from .common import rbac_get_namespace
from .common import rbac_get_user_token_by_role
from .common import requests
from .common import run_command as run_command_common
from .common import ADMIN_TOKEN
from .common import USER_TOKEN
from .common import validate_all_workload_image_from_rancher
from .common import validate_app_deletion
from .common import wait_for_condition
from .common import wait_for_pod_to_running
from .common import wait_for_pods_in_workload
from .common import wait_for_wl_to_active
from .test_monitoring import C_MONITORING_ANSWERS
ISTIO_PATH = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "resource/istio")
ISTIO_CRD_PATH = os.path.join(ISTIO_PATH, "crds")
ISTIO_TEMPLATE_ID = "cattle-global-data:system-library-rancher-istio"
ISTIO_VERSION = os.environ.get('RANCHER_ISTIO_VERSION', "")
ISTIO_INGRESSGATEWAY_NODEPORT = os.environ.get(
'RANCHER_ISTIO_INGRESSGATEWAY_NODEPORT', 31380)
ISTIO_BOOKINFO_QUERY_RESULT = "<title>Simple Bookstore App</title>"
ISTIO_EXTERNAL_ID = "catalog://?catalog=system-library" \
"&template=rancher-istio&version="
DEFAULT_ANSWERS = {
"enableCRDs": "true",
"gateways.enabled": "true",
"gateways.istio-ingressgateway.type": "NodePort",
"gateways.istio-ingressgateway.ports[0].nodePort":
ISTIO_INGRESSGATEWAY_NODEPORT,
"gateways.istio-ingressgateway.ports[0].port": 80,
"gateways.istio-ingressgateway.ports[0].targetPort": 80,
"gateways.istio-ingressgateway.ports[0].name": "http2",
"global.monitoring.type": "cluster-monitoring"}
namespace = {"app_client": None, "app_ns": None, "gateway_url": None,
"system_ns": None, "system_project": None,
"istio_version": None, "istio_app": None}
crd_test_data = [
("policy.authentication.istio.io", "authenticationpolicy.yaml"),
# ("adapter.config.istio.io", "adapter.yaml"),
# ABOVE FAILS in current state: Rancher v2.3.5
# ("attributemanifest.config.istio.io", "attributemanifest.yaml"),
# ABOVE FAILS in current state: Rancher v2.3.5
("handler.config.istio.io", "handler.yaml"),
# ("httpapispecbinding.config.istio.io", "httpapispecbinding.yaml"),
# ABOVE FAILS in current state: Rancher v2.3.5
# ("httpapispec.config.istio.io", "httpapispec.yaml"),
# ABOVE FAILS in current state: Rancher v2.3.5
# ("instance.config.istio.io", "instance.yaml"),
# ABOVE FAILS in current state: Rancher v2.3.5
("quotaspecbinding.config.istio.io", "quotaspecbinding.yaml"),
("quotaspec.config.istio.io", "quotaspec.yaml"),
("rule.config.istio.io", "rule.yaml"),
# ("template.config.istio.io", "template.yaml"),
# ABOVE FAILS in current state: Rancher v2.3.5
("destinationrule.networking.istio.io", "destinationrule.yaml"),
("envoyfilter.networking.istio.io", "envoyfilter.yaml"),
("gateway.networking.istio.io", "gateway.yaml"),
("serviceentry.networking.istio.io", "serviceentry.yaml"),
("sidecar.networking.istio.io", "sidecar.yaml"),
("virtualservice.networking.istio.io", "virtualservice.yaml"),
("rbacconfig.rbac.istio.io", "rbacconfig.yaml"),
("servicerolebinding.rbac.istio.io", "servicerolebinding.yaml"),
("servicerole.rbac.istio.io", "servicerole.yaml"),
("authorizationpolicy.security.istio.io", "authorizationpolicy.yaml"),
# ("certificate.certmanager.k8s.io", "certificate.yaml"),
# ABOVE FAILS in current state: Rancher v2.3.5
# ("challenge.certmanager.k8s.io", "challenge.yaml"),
# ABOVE FAILS in current state: Rancher v2.3.5
# ("clusterissuer.certmanager.k8s.io", "clusterissuer.yaml"),
# ABOVE FAILS in current state: Rancher v2.3.5
# ("issuer.certmanager.k8s.io", "issuer.yaml"),
# ABOVE FAILS in current state: Rancher v2.3.5
# ("order.certmanager.k8s.io", "order.yaml"),
# ABOVE FAILS in current state: Rancher v2.3.5
]
def test_istio_resources():
app_client = namespace["app_client"]
app_ns = namespace["app_ns"]
gateway_url = namespace["gateway_url"]
create_and_test_bookinfo_services(app_client, app_ns)
create_bookinfo_virtual_service(app_client, app_ns)
create_and_test_bookinfo_gateway(app_client, app_ns, gateway_url)
create_and_test_bookinfo_routing(app_client, app_ns, gateway_url)
def test_istio_deployment_options():
file_path = ISTIO_PATH + '/nginx-custom-sidecar.yaml'
expected_image = "rancher/istio-proxyv2:1.4.3"
p_client = namespace["app_client"]
ns = namespace["app_ns"]
execute_kubectl_cmd('apply -f ' + file_path + ' -n ' + ns.name, False)
result = execute_kubectl_cmd('get deployment -n ' + ns.name, True)
for deployment in result['items']:
wl = p_client.list_workload(id='deployment:'
+ deployment['metadata']['namespace']
+ ':'
+ deployment['metadata']['name']).data[
0]
wl = wait_for_wl_to_active(p_client, wl, 60)
wl_pods = wait_for_pods_in_workload(p_client, wl, 1)
wait_for_pod_to_running(p_client, wl_pods[0])
workload = p_client.list_workload(name="nginx-v1",
namespaceId=ns.id).data[0]
pod = p_client.list_pod(workloadId=workload.id).data[0]
try:
assert any(container.image == expected_image
for container in pod.containers)
except AssertionError as e:
retrieved_images = ""
for container in pod.containers:
retrieved_images += container.image + " "
retrieved_images = retrieved_images.strip().split(" ")
raise AssertionError("None of {} matches '{}'".format(
retrieved_images, expected_image))
# Enables all possible istio custom answers with the exception of certmanager
def test_istio_custom_answers(skipif_unsupported_istio_version,
enable_all_options_except_certmanager):
expected_deployments = [
"grafana", "istio-citadel", "istio-egressgateway", "istio-galley",
"istio-ilbgateway", "istio-ingressgateway", "istio-pilot",
"istio-policy", "istio-sidecar-injector", "istio-telemetry",
"istio-tracing", "istiocoredns", "kiali", "prometheus"
]
expected_daemonsets = ["istio-nodeagent"]
expected_job_list = ["istio-onefive-migration" if int(namespace["istio_version"].split(".")[1]) >= 5 else None]
validate_all_workload_image_from_rancher(
get_system_client(USER_TOKEN), namespace["system_ns"],
ignore_pod_count=True, deployment_list=expected_deployments,
daemonset_list=expected_daemonsets, job_list=expected_job_list)
# This is split out separately from test_istio_custom_answers because
# certmanager creates its own crds outside of istio
@pytest.mark.skip(reason="To be removed, no support from 1.7.000")
def test_istio_certmanager_enables(skipif_unsupported_istio_version,
enable_certmanager):
expected_deployments = [
"certmanager", "istio-citadel", "istio-galley", "istio-ingressgateway",
"istio-pilot", "istio-policy", "istio-sidecar-injector",
"istio-telemetry", "istio-tracing", "kiali"
]
validate_all_workload_image_from_rancher(
get_system_client(USER_TOKEN), namespace["system_ns"],
ignore_pod_count=True, deployment_list=expected_deployments)
@if_test_rbac
def test_rbac_istio_metrics_allow_all_cluster_owner(allow_all_access):
kiali_url, tracing_url, _, _ = get_urls()
cluster_owner = rbac_get_user_token_by_role(CLUSTER_OWNER)
validate_access(kiali_url, cluster_owner)
validate_access(tracing_url, cluster_owner)
@if_test_rbac
def test_rbac_istio_monitoring_allow_all_cluster_owner(allow_all_access):
_, _, grafana_url, prometheus_url = get_urls()
cluster_owner = rbac_get_user_token_by_role(CLUSTER_OWNER)
validate_access(grafana_url, cluster_owner)
validate_access(prometheus_url, cluster_owner)
@if_test_rbac
def test_rbac_istio_metrics_allow_all_cluster_member(allow_all_access):
kiali_url, tracing_url, _, _ = get_urls()
cluster_member = rbac_get_user_token_by_role(CLUSTER_MEMBER)
validate_access(kiali_url, cluster_member)
validate_access(tracing_url, cluster_member)
@if_test_rbac
def test_rbac_istio_monitoring_allow_all_cluster_member(allow_all_access):
_, _, grafana_url, prometheus_url = get_urls()
cluster_member = rbac_get_user_token_by_role(CLUSTER_MEMBER)
validate_no_access(grafana_url, cluster_member)
validate_no_access(prometheus_url, cluster_member)
@if_test_rbac
def test_rbac_istio_metrics_allow_all_project_owner(allow_all_access):
kiali_url, tracing_url, _, _ = get_urls()
cluster_member = rbac_get_user_token_by_role(PROJECT_OWNER)
validate_access(kiali_url, cluster_member)
validate_access(tracing_url, cluster_member)
@if_test_rbac
def test_rbac_istio_monitoring_allow_all_project_owner(allow_all_access):
_, _, grafana_url, prometheus_url = get_urls()
cluster_member = rbac_get_user_token_by_role(PROJECT_OWNER)
validate_no_access(grafana_url, cluster_member)
validate_no_access(prometheus_url, cluster_member)
@if_test_rbac
def test_rbac_istio_metrics_allow_all_project_member(allow_all_access):
kiali_url, tracing_url, _, _ = get_urls()
cluster_member = rbac_get_user_token_by_role(PROJECT_MEMBER)
validate_access(kiali_url, cluster_member)
validate_access(tracing_url, cluster_member)
@if_test_rbac
def test_rbac_istio_monitoring_allow_all_project_member(allow_all_access):
_, _, grafana_url, prometheus_url = get_urls()
cluster_member = rbac_get_user_token_by_role(PROJECT_MEMBER)
validate_no_access(grafana_url, cluster_member)
validate_no_access(prometheus_url, cluster_member)
@if_test_rbac
def test_rbac_istio_metrics_allow_all_project_read(allow_all_access):
kiali_url, tracing_url, _, _ = get_urls()
cluster_member = rbac_get_user_token_by_role(PROJECT_READ_ONLY)
validate_access(kiali_url, cluster_member)
validate_access(tracing_url, cluster_member)
@if_test_rbac
def test_rbac_istio_monitoring_allow_all_project_read(allow_all_access):
_, _, grafana_url, prometheus_url = get_urls()
cluster_member = rbac_get_user_token_by_role(PROJECT_READ_ONLY)
validate_no_access(grafana_url, cluster_member)
validate_no_access(prometheus_url, cluster_member)
@if_test_rbac
def test_rbac_istio_metrics_allow_none_cluster_owner(default_access):
kiali_url, tracing_url, _, _ = get_urls()
cluster_owner = rbac_get_user_token_by_role(CLUSTER_OWNER)
validate_access(kiali_url, cluster_owner)
validate_access(tracing_url, cluster_owner)
@if_test_rbac
def test_rbac_istio_monitoring_allow_none_cluster_owner(default_access):
_, _, grafana_url, prometheus_url = get_urls()
cluster_owner = rbac_get_user_token_by_role(CLUSTER_OWNER)
validate_access(grafana_url, cluster_owner)
validate_access(prometheus_url, cluster_owner)
@if_test_rbac
def test_rbac_istio_metrics_allow_none_cluster_member(default_access):
kiali_url, tracing_url, _, _ = get_urls()
cluster_member = rbac_get_user_token_by_role(CLUSTER_MEMBER)
validate_no_access(kiali_url, cluster_member)
validate_no_access(tracing_url, cluster_member)
@if_test_rbac
def test_rbac_istio_monitoring_allow_none_cluster_member(default_access):
_, _, grafana_url, prometheus_url = get_urls()
cluster_member = rbac_get_user_token_by_role(CLUSTER_MEMBER)
validate_no_access(grafana_url, cluster_member)
validate_no_access(prometheus_url, cluster_member)
@if_test_rbac
def test_rbac_istio_metrics_allow_none_project_owner(default_access):
kiali_url, tracing_url, _, _ = get_urls()
cluster_member = rbac_get_user_token_by_role(PROJECT_OWNER)
validate_no_access(kiali_url, cluster_member)
validate_no_access(tracing_url, cluster_member)
@if_test_rbac
def test_rbac_istio_monitoring_allow_none_project_owner(default_access):
_, _, grafana_url, prometheus_url = get_urls()
cluster_member = rbac_get_user_token_by_role(PROJECT_OWNER)
validate_no_access(grafana_url, cluster_member)
validate_no_access(prometheus_url, cluster_member)
@if_test_rbac
def test_rbac_istio_metrics_allow_none_project_member(default_access):
kiali_url, tracing_url, _, _ = get_urls()
cluster_member = rbac_get_user_token_by_role(PROJECT_MEMBER)
validate_no_access(kiali_url, cluster_member)
validate_no_access(tracing_url, cluster_member)
@if_test_rbac
def test_rbac_istio_monitoring_allow_none_project_member(default_access):
_, _, grafana_url, prometheus_url = get_urls()
cluster_member = rbac_get_user_token_by_role(PROJECT_MEMBER)
validate_no_access(grafana_url, cluster_member)
validate_no_access(prometheus_url, cluster_member)
@if_test_rbac
def test_rbac_istio_metrics_allow_none_project_read(default_access):
kiali_url, tracing_url, _, _ = get_urls()
cluster_member = rbac_get_user_token_by_role(PROJECT_READ_ONLY)
validate_no_access(kiali_url, cluster_member)
validate_no_access(tracing_url, cluster_member)
@if_test_rbac
def test_rbac_istio_monitoring_allow_none_project_read(default_access):
_, _, grafana_url, prometheus_url = get_urls()
cluster_member = rbac_get_user_token_by_role(PROJECT_READ_ONLY)
validate_no_access(grafana_url, cluster_member)
validate_no_access(prometheus_url, cluster_member)
@if_test_rbac
def test_rbac_istio_update_cluster_member():
user = rbac_get_user_token_by_role(CLUSTER_MEMBER)
with pytest.raises(ApiError) as e:
update_istio_app({"FOO": "BAR"}, user)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
@if_test_rbac
def test_rbac_istio_disable_cluster_member():
user = rbac_get_user_token_by_role(CLUSTER_MEMBER)
with pytest.raises(ApiError) as e:
delete_istio_app(user)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
@if_test_rbac
def test_rbac_istio_update_project_owner():
user = rbac_get_user_token_by_role(PROJECT_OWNER)
with pytest.raises(ApiError) as e:
update_istio_app({"FOO": "BAR"}, user)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
@if_test_rbac
def test_rbac_istio_disable_project_owner():
user = rbac_get_user_token_by_role(PROJECT_OWNER)
with pytest.raises(ApiError) as e:
delete_istio_app(user)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
@if_test_rbac
def test_rbac_istio_update_project_member():
user = rbac_get_user_token_by_role(PROJECT_MEMBER)
with pytest.raises(ApiError) as e:
update_istio_app({"FOO": "BAR"}, user)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
@if_test_rbac
def test_rbac_istio_disable_project_member():
user = rbac_get_user_token_by_role(PROJECT_MEMBER)
with pytest.raises(ApiError) as e:
delete_istio_app(user)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
@if_test_rbac
def test_rbac_istio_update_project_read():
user = rbac_get_user_token_by_role(PROJECT_READ_ONLY)
with pytest.raises(ApiError) as e:
update_istio_app({"FOO": "BAR"}, user)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
@if_test_rbac
def test_rbac_istio_disable_project_read():
user = rbac_get_user_token_by_role(PROJECT_READ_ONLY)
with pytest.raises(ApiError) as e:
delete_istio_app(user)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
@if_test_rbac
@pytest.mark.parametrize("crd,manifest", crd_test_data)
def test_rbac_istio_crds_project_owner(skipif_unsupported_istio_version,
update_answers, crd, manifest):
if "certmanager" in crd:
update_answers("enable_certmanager")
else :
update_answers("default_access")
kubectl_context = rbac_get_kubeconfig_by_role(PROJECT_OWNER)
file = ISTIO_CRD_PATH + '/' + manifest
ns = rbac_get_namespace()
assert re.match("{}.* created".format(crd),
apply_crd(ns, file, kubectl_context))
assert "Forbidden" not in get_crd(ns, crd, kubectl_context)
assert re.match("{}.* deleted".format(crd),
delete_crd(ns, file, kubectl_context))
@if_test_rbac
@pytest.mark.parametrize("crd,manifest", crd_test_data)
def test_rbac_istio_crds_project_member(skipif_unsupported_istio_version,
update_answers, crd, manifest):
if "certmanager" in crd:
update_answers("enable_certmanager")
else :
update_answers("default_access")
kubectl_context = rbac_get_kubeconfig_by_role(PROJECT_MEMBER)
file = ISTIO_CRD_PATH + '/' + manifest
ns = rbac_get_namespace()
assert re.match("{}.* created".format(crd),
apply_crd(ns, file, kubectl_context))
assert "Forbidden" not in get_crd(ns, crd, kubectl_context)
assert re.match("{}.* deleted".format(crd),
delete_crd(ns, file, kubectl_context))
@if_test_rbac
@pytest.mark.parametrize("crd,manifest", crd_test_data)
def test_rbac_istio_crds_project_read(skipif_unsupported_istio_version,
update_answers, crd, manifest):
if "certmanager" in crd:
update_answers("enable_certmanager")
else :
update_answers("default_access")
kubectl_context = rbac_get_kubeconfig_by_role(PROJECT_READ_ONLY)
file = ISTIO_CRD_PATH + '/' + manifest
ns = rbac_get_namespace()
assert str(apply_crd(ns, file, kubectl_context)).startswith(
"Error from server (Forbidden)")
assert "Forbidden" not in get_crd(ns, crd, kubectl_context)
assert str(delete_crd(ns, file, kubectl_context)).startswith(
"Error from server (Forbidden)")
@if_test_group_rbac
def test_rbac_istio_group_access(auth_cluster_access, update_answers):
group, users, noauth_user = auth_cluster_access
update_answers("allow_group_access", group=group)
kiali_url, tracing_url, grafana_url, prometheus_url = get_urls()
for user in users:
user_token = auth_get_user_token(user)
print("Validating {} has access.".format(user))
validate_access(kiali_url, user_token)
validate_access(tracing_url, user_token)
validate_no_access(grafana_url, user_token)
validate_no_access(prometheus_url, user_token)
print("Validating {} does not have access.".format(noauth_user))
noauth_token = auth_get_user_token(noauth_user)
validate_no_access(kiali_url, noauth_token)
validate_no_access(tracing_url, noauth_token)
validate_no_access(grafana_url, noauth_token)
validate_no_access(prometheus_url, noauth_token)
def validate_access(url, user):
headers = {'Authorization': 'Bearer ' + user}
response = requests.get(headers=headers, url=url, verify=False)
assert response.ok
return response
def validate_no_access(url, user):
headers = {'Authorization': 'Bearer ' + user}
response = requests.get(headers=headers, url=url, verify=False)
assert not response.ok
return response
def update_istio_app(answers, user, app=None, ns=None, project=None):
if app is None:
app = namespace["istio_app"]
if ns is None:
ns = namespace["system_ns"]
if project is None:
project = namespace["system_project"]
p_client = get_system_client(user)
updated_answers = copy.deepcopy(DEFAULT_ANSWERS)
updated_answers.update(answers)
namespace["istio_app"] = p_client.update(
obj=app,
externalId=ISTIO_EXTERNAL_ID,
targetNamespace=ns.name,
projectId=project.id,
answers=updated_answers)
verify_istio_app_ready(p_client, namespace["istio_app"], 120, 120)
def create_and_verify_istio_app(p_client, ns, project):
print("creating istio catalog app")
app = p_client.create_app(
name="cluster-istio",
externalId=ISTIO_EXTERNAL_ID,
targetNamespace=ns.name,
projectId=project.id,
answers=DEFAULT_ANSWERS
)
verify_istio_app_ready(p_client, app, 120, 600)
return app
def delete_istio_app(user):
p_client = get_system_client(user)
p_client.delete(namespace["istio_app"])
def verify_istio_app_ready(p_client, app, install_timeout, deploy_timeout,
initial_run=True):
if initial_run:
print("Verify Istio App has installed and deployed properly")
if install_timeout <= 0 or deploy_timeout <= 0:
raise TimeoutError("Timeout waiting for istio to be properly "
"installed and deployed.") from None
elif 'conditions' in app and not initial_run:
for cond in app['conditions']:
if "False" in cond['status'] and 'message' in cond \
and "failed" in cond['message']:
raise AssertionError(
"Failed to properly install/deploy app. Reason: {}".format(
cond['message'])) from None
try:
wait_for_condition(p_client, app, check_condition('Installed', 'True'),
timeout=2)
except (Exception, TypeError):
verify_istio_app_ready(p_client, p_client.list_app(
name='cluster-istio').data[0], install_timeout-2, deploy_timeout,
initial_run=False)
try:
wait_for_condition(p_client, app, check_condition('Deployed', 'True'),
timeout=2)
except (Exception, TypeError):
verify_istio_app_ready(p_client, p_client.list_app(
name='cluster-istio').data[0], 2, deploy_timeout-2,
initial_run=False)
def get_urls():
_, cluster = get_user_client_and_cluster()
if namespace["istio_version"] == "0.1.0" \
or namespace["istio_version"] == "0.1.1":
kiali_url = os.environ.get('CATTLE_TEST_URL', "") + \
"/k8s/clusters/" + cluster.id + \
"/api/v1/namespaces/istio-system/services/" \
"http:kiali-http:80/proxy/"
else:
kiali_url = os.environ.get('CATTLE_TEST_URL', "") + \
"/k8s/clusters/" + cluster.id + \
"/api/v1/namespaces/istio-system/services/" \
"http:kiali:20001/proxy/"
tracing_url = os.environ.get('CATTLE_TEST_URL', "") + \
"/k8s/clusters/" + cluster.id + \
"/api/v1/namespaces/istio-system/services/" \
"http:tracing:80/proxy/jaeger/search"
grafana_url = os.environ.get('CATTLE_TEST_URL', "") + \
"/k8s/clusters/" + cluster.id + \
"/api/v1/namespaces/cattle-prometheus/services/" \
"http:access-grafana:80/proxy/dashboards/"
prometheus_url = os.environ.get('CATTLE_TEST_URL', "") + \
"/k8s/clusters/" + cluster.id + \
"/api/v1/namespaces/cattle-prometheus/services/" \
"http:access-prometheus:80/proxy/"
return kiali_url, tracing_url, grafana_url, prometheus_url
def verify_admission_webhook():
has_admission_webhook = execute_kubectl_cmd(
'api-versions | grep admissionregistration', False)
if len(has_admission_webhook) == 0:
raise AssertionError(
"MutatingAdmissionWebhook and ValidatingAdmissionWebhook plugins "
"are not listed in the kube-apiserver --enable-admission-plugins")
def add_istio_label_to_ns(c_client, ns):
labels = {
"istio-injection": "enabled"
}
ns = c_client.update_by_id_namespace(ns.id, labels=labels)
return ns
def create_and_test_bookinfo_services(p_client, ns, timeout=DEFAULT_TIMEOUT):
book_info_file_path = ISTIO_PATH + '/bookinfo.yaml'
execute_kubectl_cmd('apply -f ' + book_info_file_path + ' -n '
+ ns.name, False)
result = execute_kubectl_cmd('get deployment -n ' + ns.name, True)
for deployment in result['items']:
wl = p_client.list_workload(id='deployment:'
+ deployment['metadata']['namespace']
+ ':'
+ deployment['metadata']['name']).data[0]
wl = wait_for_wl_to_active(p_client, wl, 60)
wl_pods = wait_for_pods_in_workload(p_client, wl, 1)
wait_for_pod_to_running(p_client, wl_pods[0])
rating_pod = execute_kubectl_cmd('get pod -l app=ratings -n' + ns.name)
assert len(rating_pod['items']) == 1
rating_pod_name = rating_pod['items'][0]['metadata']['name']
try:
result = execute_kubectl_cmd(
'exec -it -n ' + ns.name + ' ' + rating_pod_name
+ ' -c ratings -- curl productpage:9080/productpage'
+ ' | grep -o "<title>.*</title>"', False)
except CalledProcessError:
result = None
start = time.time()
while result is None or result.rstrip() != ISTIO_BOOKINFO_QUERY_RESULT:
if time.time() - start > timeout:
raise AssertionError(
"Timed out and failed to get bookinfo service ready")
time.sleep(.5)
try:
result = execute_kubectl_cmd(
'exec -it -n ' + ns.name + ' ' + rating_pod_name
+ ' -c ratings -- curl productpage:9080/productpage'
+ ' | grep -o "<title>.*</title>"', False)
except CalledProcessError:
result = None
assert result.rstrip() == ISTIO_BOOKINFO_QUERY_RESULT
return result
def create_and_test_bookinfo_gateway(app_client, namespace,
gateway_url, timeout=DEFAULT_TIMEOUT):
servers = [{
"hosts": ["*"],
"port": {
"number": "80",
"protocol": "HTTP",
"name": "http"
}
}]
selector = {"istio": "ingressgateway"}
app_client.create_gateway(name="bookinfo-gateway",
namespaceId=namespace.id,
selector=selector,
servers=servers)
gateways = execute_kubectl_cmd('get gateway -n' + namespace.name, True)
assert len(gateways['items']) == 1
curl_cmd = 'curl -s http://' + gateway_url \
+ '/productpage | grep -o "<title>.*</title>"'
result = run_command(curl_cmd)
start = time.time()
while result is None or result.rstrip() != ISTIO_BOOKINFO_QUERY_RESULT:
if time.time() - start > timeout:
raise AssertionError(
"Timed out and failed to get bookinfo gateway ready")
time.sleep(.5)
result = run_command(curl_cmd)
assert result.rstrip() == ISTIO_BOOKINFO_QUERY_RESULT
return result
def create_bookinfo_virtual_service(app_client, namespace):
http = [{
"route": [{
"destination": {
"host": "productpage",
"port": {"number": 9080}
},
"weight": 100,
"portNumberOrName": "9080"
}],
"match": [
{"uri": {"exact": "/productpage"}},
{"uri": {"exact": "/login"}},
{"uri": {"exact": "/logout"}},
{"uri": {"prefix": "/api/v1/products"}}
]
}]
app_client.create_virtual_service(name="bookinfo",
namespaceId=namespace.id,
gateways=["bookinfo-gateway"],
http=http,
hosts=["*"])
def create_bookinfo_destination_rules(app_client, namespace):
subsets = [
{
"name": "v1",
"labels": {
"version": "v1"
}
},
{
"name": "v2",
"labels": {
"version": "v2"
}
},
{
"name": "v3",
"labels": {
"version": "v3"
}
}
]
app_client.create_destination_rule(namespaceId=namespace.id,
name="reviews",
host="reviews",
subsets=subsets)
def create_and_test_bookinfo_routing(app_client, namespace,
gateway_url, timeout=30):
http = [{
"route": [{
"destination": {
"subset": "v3",
"host": "reviews",
"port": {"number": 9080}
},
"weight": 100,
"portNumberOrName": "9080"
}]
}]
create_bookinfo_destination_rules(app_client, namespace)
app_client.create_virtual_service(name="reviews",
namespaceId=namespace.id,
http=http,
hosts=["reviews"])
curl_cmd = 'curl -s http://' + gateway_url \
+ '/productpage | grep -o "glyphicon-star"'
result = run_command(curl_cmd)
start = time.time()
while result is None or "glyphicon-star" not in result:
if time.time() - start > timeout:
raise AssertionError(
"Timed out and failed to get correct reviews version")
time.sleep(.5)
result = run_command(curl_cmd)
assert "glyphicon-star" in result
return result
# if grep returns no output, subprocess.check_output raises CalledProcessError
def run_command(command):
try:
return run_command_common(command)
except CalledProcessError:
return None
def get_system_client(user):
# Gets client and cluster using USER_TOKEN, who is a CLUSTER_OWNER
client, cluster = get_user_client_and_cluster()
projects = client.list_project(name='System', clusterId=cluster.id)
if len(projects.data) == 0:
raise AssertionError(
"System project not found in the cluster " + cluster.Name)
p = projects.data[0]
return get_project_client_for_token(p, user)
def add_user_to_cluster(username):
class User(object):
def __init__(self, u_name, user_id, token):
self.username = u_name
self.id = user_id
self.token = token
user_data = login_as_auth_user(username, AUTH_USER_PASSWORD)
u_id = user_data['userId']
u_token = user_data['token']
user_obj = User(username, u_id, u_token)
add_role_to_user(user_obj, CLUSTER_MEMBER)
# Enable one of these two below options to get around Issue #25365
get_client_for_token(u_token)
# headers = {'Authorization': 'Bearer ' + u_token}
# url = os.environ.get('CATTLE_TEST_URL', "") + "/v3/users?me=true"
# response = requests.get(headers=headers, url=url, verify=False)
@pytest.fixture()
def update_answers():
def _update_answers(answer_type, group=None):
answers = {
"kiali.enabled": "true",
"tracing.enabled": "true",
}
if answer_type == "allow_all_access":
additional_answers = {
"global.members[0].kind": "Group",
"global.members[0].name": "system:authenticated",
}
answers.update(additional_answers)
elif answer_type == "allow_group_access":
auth_admin = login_as_auth_user(load_setup_data()["admin_user"],
AUTH_USER_PASSWORD)
group_id = get_group_principal_id(group, token=auth_admin['token'])
additional_answers = {
"global.members[0].kind": "Group",
"global.members[0].name": group_id,
}
answers.update(additional_answers)
elif answer_type == "enable_certmanager":
additional_answers = {"certmanager.enabled": "true"}
answers.update(additional_answers)
elif answer_type == "enable_all_options_except_certmanager":
additional_answers = {
"gateways.istio-egressgateway.enabled": "true",
"gateways.istio-ilbgateway.enabled": "true",
"gateways.istio-ingressgateway.sds.enabled": "true",
"global.proxy.accessLogFile": "/dev/stdout",
"grafana.enabled": "true",
"istiocoredns.enabled": "true",
"kiali.dashboard.grafanaURL": "",
"kiali.prometheusAddr": "http://prometheus:9090",
"nodeagent.enabled": "true",
"nodeagent.env.CA_ADDR": "istio-citadel:8060",
"nodeagent.env.CA_PROVIDER": "Citadel",
"prometheus.enabled": "true",
}
answers.update(additional_answers)
update_istio_app(answers, USER_TOKEN)
return _update_answers
@pytest.fixture()
def default_access(update_answers):
update_answers("default_access")
@pytest.fixture()
def allow_all_access(update_answers):
update_answers("allow_all_access")
@pytest.fixture()
def enable_certmanager(update_answers):
update_answers("enable_certmanager")
@pytest.fixture()
def enable_all_options_except_certmanager(update_answers):
update_answers("enable_all_options_except_certmanager")
@pytest.fixture(scope='function')
def skipif_unsupported_istio_version(request):
if ISTIO_VERSION != "":
istio_version = ISTIO_VERSION
else:
client, _ = get_user_client_and_cluster()
istio_versions = list(client.list_template(
id=ISTIO_TEMPLATE_ID).data[0].versionLinks.keys())
istio_version = istio_versions[len(istio_versions) - 1]
if compare_versions(istio_version, "1.4.3") < 0:
pytest.skip("This test is not supported for older Istio versions")
@pytest.fixture(scope='function')
def auth_cluster_access(request):
group, noauth_user = get_a_group_and_a_user_not_in_it(
NESTED_GROUP_ENABLED)
users = get_user_by_group(group, NESTED_GROUP_ENABLED)
for user in users:
add_user_to_cluster(user)
add_user_to_cluster(noauth_user)
def fin():
auth_resource_cleanup()
request.addfinalizer(fin)
return group, users, noauth_user
@pytest.fixture(scope='module', autouse="True")
def create_project_client(request):
global DEFAULT_ANSWERS
global ISTIO_EXTERNAL_ID
client, cluster = get_user_client_and_cluster()
create_kubeconfig(cluster)
admin_client = get_admin_client()
ad_enabled = admin_client.by_id_auth_config("activedirectory").enabled
if AUTH_PROVIDER == "activeDirectory" and not ad_enabled:
enable_ad(load_setup_data()["admin_user"], ADMIN_TOKEN,
password=AUTH_USER_PASSWORD, nested=NESTED_GROUP_ENABLED)
projects = client.list_project(name='System', clusterId=cluster.id)
if len(projects.data) == 0:
raise AssertionError(
"System project not found in the cluster " + cluster.name)
p = projects.data[0]
p_client = get_project_client_for_token(p, USER_TOKEN)
c_client = get_cluster_client_for_token(cluster, USER_TOKEN)
istio_versions = list(client.list_template(
id=ISTIO_TEMPLATE_ID).data[0].versionLinks.keys())
istio_version = istio_versions[len(istio_versions) - 1]
if ISTIO_VERSION != "":
istio_version = ISTIO_VERSION
ISTIO_EXTERNAL_ID += istio_version
answers = {"global.rancher.clusterId": p.clusterId}
DEFAULT_ANSWERS.update(answers)
monitoring_answers = copy.deepcopy(C_MONITORING_ANSWERS)
monitoring_answers["prometheus.persistence.enabled"] = "false"
monitoring_answers["grafana.persistence.enabled"] = "false"
if cluster["enableClusterMonitoring"] is False:
client.action(cluster, "enableMonitoring",
answers=monitoring_answers)
if cluster["istioEnabled"] is False:
verify_admission_webhook()
ns = create_ns(c_client, cluster, p, 'istio-system')
app = create_and_verify_istio_app(p_client, ns, p)
else:
app = p_client.list_app(name='cluster-istio').data[0]
ns = c_client.list_namespace(name='istio-system').data[0]
update_istio_app(DEFAULT_ANSWERS, USER_TOKEN,
app=app, ns=ns, project=p)
istio_project, app_ns = create_project_and_ns(
USER_TOKEN, cluster,
random_test_name("istio-app"),
random_test_name("istio-app-ns"))
add_istio_label_to_ns(c_client, app_ns)
app_client = get_project_client_for_token(istio_project, USER_TOKEN)
istio_gateway_wl = p_client.by_id_workload('deployment:' +
ns.name +
':istio-ingressgateway')
assert istio_gateway_wl is not None
endpoints = istio_gateway_wl['publicEndpoints'][0]
gateway_url = endpoints['addresses'][0] + ':' + str(endpoints['port'])
namespace["gateway_url"] = gateway_url
namespace["app_ns"] = app_ns
namespace["app_client"] = app_client
namespace["system_ns"] = ns
namespace["system_project"] = p
namespace["istio_version"] = istio_version
namespace["istio_app"] = app
def fin():
client = get_user_client()
# delete the istio app
app = p_client.delete(namespace["istio_app"])
validate_app_deletion(p_client, app.id)
# delete the istio ns
p_client.delete(namespace["system_ns"])
# disable the cluster monitoring
c = client.reload(cluster)
if c["enableClusterMonitoring"] is True:
client.action(c, "disableMonitoring")
# delete the istio testing project
client.delete(istio_project)
request.addfinalizer(fin)
|
timoguic/sp_hub
|
refs/heads/master
|
drf_sp_hub/sp_app/lib/html_importer.py
|
1
|
import logging
import csv
import re
import json
from lxml import etree
from django.utils.html import strip_tags
from sp_app.models import Article
from sp_app.models import SPCategory
from sp_app.models import SPKeyword
logger = logging.getLogger(__name__)
class HTMLImporter():
def __init__(self, obj):
self.parser = etree.HTMLParser()
self.tree = etree.parse(obj.html_file, self.parser)
self.instance = obj
def process_file(self):
if self.instance.html_file:
self.update_authors_field()
self.associate_editor_keywords()
self.associate_author_keywords()
def update_authors_field(self):
authors = self.tree.xpath("//div[@vocab='http://xmlns.com/foaf/0.1/' and @typeof='Person' and @class='foaf-author']")
author_dict = dict()
for a in authors:
orcid = a.xpath("span[@property='openid']")
# Pas d'ORC ID? Suivant! (exemple: SP1282.html)
if not orcid:
continue
else:
if not orcid[0].text:
continue
orcid = orcid[0].text
nom = a.xpath("span[@property='familyName']")
if nom:
nom = nom[0].text
prenom = a.xpath("span[@property='firstName']")
if prenom:
prenom = prenom[0].text
author_dict[orcid] = nom + ' ' + prenom
Article.objects.filter(pk=self.instance.pk).update(authors=json.dumps(author_dict))
def associate_editor_keywords(self):
""" Associates editor keywords with articles upon save """
# https://github.com/timoguic/sp_hub/issues/31
editor_keywords = self.tree.xpath("//div[@class='keywords']/div")
for elem in editor_keywords:
# <span property="subject" class="label">Imaginaire</span>
kw_label = elem.xpath("span[@property='subject' and @class='label']")
if not kw_label:
continue
kw_label = kw_label[0].text
logger.info('Found editor keyword: ' + kw_label)
kw_data = {}
aligned_fields = ['uriRameau', 'idRameau', 'wikidata' ]
for field in aligned_fields:
xpath_query = "span[@property='subject' and @class='{}']"
alignment = elem.xpath(xpath_query.format(field))
if alignment:
# We found an aligned field
if alignment[0].text:
kw_data[field] = alignment[0].text
my_kw = create_update_editor_kw(kw_label, kw_data=kw_data, lang='fr')
logger.info('Associating keyword ' + kw_label + ' to ' + self.instance.title)
self.instance.keywords.add(my_kw)
def associate_author_keywords(self):
# <meta name="keywords" xml:lang="fr" lang="fr" content="Facebook, éditorialisation, algorithmes, connectivité, public, médias, globalisation, opinion, bulle de filtre, segmentation." />
author_keywords = self.tree.xpath("//meta[@name='keywords']")
# TODO import keywords from other languages too
for kw in author_keywords:
lang = kw.get('lang')
if not lang:
lang = 'fr'
label = kw.get('content')
# We get rid of the possible ending '.'
if label[len(label)-1] == '.':
label = label[:-1]
# Let's split on ;
word_list = label.split(';')
# No luck? We split on ,
if len(word_list) == 1:
word_list = ''.join(word_list).split(',')
# Strip the words and their HTML tags
word_list = [ strip_tags(w.strip()) for w in word_list ]
for word in word_list:
possible_kw = SPKeyword.objects.filter(name__iexact=word, language=lang, aligned=False)
if possible_kw:
for kw in possible_kw:
logger.info('Linking ' + kw.name + ' to ' + str(self.instance.pk))
self.instance.keywords.add(kw)
else:
logger.info('Creating ' + word)
my_kw = SPKeyword.objects.create(name=word, language=lang, aligned=False)
my_kw.save()
self.instance.keywords.add(my_kw)
class SpipKeywords():
def __init__(self, file_path):
csvfile = open(file_path, encoding='utf-8')
# Skip first line
csvfile.readline()
self.csvreader = csv.reader(csvfile, delimiter=';')
def import_all(self):
logger.info('========== IMPORT SPIP =========')
for row in self.csvreader:
label = row[1]
parent_name = row[5]
if parent_name:
logger.info('Category ' + parent_name)
my_cat = SPCategory.objects.get_or_create(name=parent_name)
if label:
my_keywords = create_update_keywords_from_multi_string(label)
# Associate parents
for k in my_keywords:
logger.info('Update keyword ' + k.name + ' with parent ' + my_cat[0].name)
k.category = my_cat[0]
k.save()
class XMLKeywords():
# Class variables (namespaces)
sp_ns = 'http://sens-public.org/sp/'
namespaces = { 'ns': sp_ns }
def __init__(self, file_path):
# Init XML parser
parser = etree.XMLParser()
tree = etree.parse(file_path, parser)
# Get <entry> elements
self.results = tree.xpath('//ns:entry', namespaces=self.namespaces)
def import_all(self):
logger.info('========== IMPORT XML =========')
# For multilingual entries (<multi>[fr]Bla[en]Blah</multi>)
multi_with_ns = '{' + self.sp_ns + '}' + 'multi'
id_with_ns = '{' + self.sp_ns + '}' + 'id'
url_with_ns = '{' + self.sp_ns + '}' + 'url'
for r in self.results:
# Find ID Rameau of the keyword
kw_id = r.find(id_with_ns)
# Find URL rameau of the keyword
kw_url = r.find(url_with_ns)
if kw_id is None or kw_url is None:
continue
# Look for a label
label = r.xpath('ns:label', namespaces=self.namespaces)
if len(label) == 0:
# This is not good. Probably better to skip
continue
label_dict = {}
if isinstance(label, list) and len(label) > 0:
children = label[0].find(multi_with_ns)
# We found a <multi> element - retrieve its text content
if children is not None:
label = children.text
else:
# No multi - get the text of the label element
label = label[0].text
else:
# In the worst case, the text could be there already
label = label.text
kw_data = {}
kw_data['idrameau'] = kw_id.text
kw_data['urlrameau'] = kw_url.text
create_update_keywords_from_multi_string(label, kw_data)
""" HELPER FUNCTIONS"""
def create_update_keywords_from_multi_string(label, kw_data=None):
# Do nothing if the string is empty
if not label:
return False
created_keywords = []
label_dict = split_multi_spstring(label)
# Some keywords are multilingual but don't have French versions
# So we check that first
if 'fr' in label_dict:
# Remove the french item from the dict
kw_label = label_dict.pop('fr')
# And create/update the corresponding keyword
kw_fr = create_update_editor_kw(kw_label, kw_data=kw_data, lang='fr')
created_keywords.append(kw_fr)
# Then, look at other languages
for lang in label_dict:
new_kw = create_update_editor_kw(
label_dict[lang],
# We only have an alignemnt for the french version, so pass None here
kw_data=None,
lang=lang,
translation_of=kw_fr,
)
created_keywords.append(new_kw)
return created_keywords
else:
print("Cannot find 'fr' language for " + next(iter(label_dict.values())))
return False
def create_update_editor_kw(label, kw_data=None, lang='fr', translation_of=None):
# Look for an existing keyword by the same name
existing_kw = SPKeyword.objects.filter(name=label, language=lang, is_editor=True)
my_args = { 'is_editor': True,
'language': lang,
'is_translation': translation_of,
}
# If we have alignment info, then the keyword is aligned
if kw_data:
my_args['data'] = kw_data
my_args['aligned'] = True
# Update
if existing_kw:
logger.info('Update existing keyword: ' + label + ' (' + lang + ')')
existing_kw.update(**my_args)
return existing_kw.get()
# Create
else:
logger.info('Create editor keyword ' + label + ' (' + lang + ')')
my_kw = SPKeyword.objects.create(name=label, **my_args)
my_kw.save()
return my_kw
def split_multi_spstring(s):
# Look for multilingual patterns
matches = re.findall('\[(..)\]([^\[<]+)', s)
# Looks like it's not multilingual
if not matches:
return { 'fr': s }
else:
return { m[0]: m[1] for m in matches }
|
halvertoluke/edx-platform
|
refs/heads/default_branch
|
common/djangoapps/third_party_auth/tests/specs/test_testshib.py
|
46
|
"""
Third_party_auth integration tests using a mock version of the TestShib provider
"""
import unittest
import httpretty
from mock import patch
from third_party_auth.tasks import fetch_saml_metadata
from third_party_auth.tests import testutil
from .base import IntegrationTestMixin
TESTSHIB_ENTITY_ID = 'https://idp.testshib.org/idp/shibboleth'
TESTSHIB_METADATA_URL = 'https://mock.testshib.org/metadata/testshib-providers.xml'
TESTSHIB_SSO_URL = 'https://idp.testshib.org/idp/profile/SAML2/Redirect/SSO'
@unittest.skipUnless(testutil.AUTH_FEATURE_ENABLED, 'third_party_auth not enabled')
class TestShibIntegrationTest(IntegrationTestMixin, testutil.SAMLTestCase):
"""
TestShib provider Integration Test, to test SAML functionality
"""
PROVIDER_ID = "saml-testshib"
PROVIDER_NAME = "TestShib"
PROVIDER_BACKEND = "tpa-saml"
USER_EMAIL = "myself@testshib.org"
USER_NAME = "Me Myself And I"
USER_USERNAME = "myself"
def setUp(self):
super(TestShibIntegrationTest, self).setUp()
self.enable_saml(
private_key=self._get_private_key(),
public_key=self._get_public_key(),
entity_id="https://saml.example.none",
)
# Mock out HTTP requests that may be made to TestShib:
httpretty.enable()
def metadata_callback(_request, _uri, headers):
""" Return a cached copy of TestShib's metadata by reading it from disk """
return (200, headers, self.read_data_file('testshib_metadata.xml'))
httpretty.register_uri(httpretty.GET, TESTSHIB_METADATA_URL, content_type='text/xml', body=metadata_callback)
self.addCleanup(httpretty.disable)
self.addCleanup(httpretty.reset)
# Configure the SAML library to use the same request ID for every request.
# Doing this and freezing the time allows us to play back recorded request/response pairs
uid_patch = patch('onelogin.saml2.utils.OneLogin_Saml2_Utils.generate_unique_id', return_value='TESTID')
uid_patch.start()
self.addCleanup(uid_patch.stop)
self._freeze_time(timestamp=1434326820) # This is the time when the saved request/response was recorded.
def test_login_before_metadata_fetched(self):
self._configure_testshib_provider(fetch_metadata=False)
# The user goes to the login page, and sees a button to login with TestShib:
testshib_login_url = self._check_login_page()
# The user clicks on the TestShib button:
try_login_response = self.client.get(testshib_login_url)
# The user should be redirected to back to the login page:
self.assertEqual(try_login_response.status_code, 302)
self.assertEqual(try_login_response['Location'], self.url_prefix + self.login_page_url)
# When loading the login page, the user will see an error message:
response = self.client.get(self.login_page_url)
self.assertEqual(response.status_code, 200)
self.assertIn('Authentication with TestShib is currently unavailable.', response.content)
def test_login(self):
""" Configure TestShib before running the login test """
self._configure_testshib_provider()
super(TestShibIntegrationTest, self).test_login()
def test_register(self):
""" Configure TestShib before running the register test """
self._configure_testshib_provider()
super(TestShibIntegrationTest, self).test_register()
def _freeze_time(self, timestamp):
""" Mock the current time for SAML, so we can replay canned requests/responses """
now_patch = patch('onelogin.saml2.utils.OneLogin_Saml2_Utils.now', return_value=timestamp)
now_patch.start()
self.addCleanup(now_patch.stop)
def _configure_testshib_provider(self, **kwargs):
""" Enable and configure the TestShib SAML IdP as a third_party_auth provider """
fetch_metadata = kwargs.pop('fetch_metadata', True)
kwargs.setdefault('name', 'TestShib')
kwargs.setdefault('enabled', True)
kwargs.setdefault('idp_slug', 'testshib')
kwargs.setdefault('entity_id', TESTSHIB_ENTITY_ID)
kwargs.setdefault('metadata_source', TESTSHIB_METADATA_URL)
kwargs.setdefault('icon_class', 'fa-university')
kwargs.setdefault('attr_email', 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6') # eduPersonPrincipalName
self.configure_saml_provider(**kwargs)
if fetch_metadata:
self.assertTrue(httpretty.is_enabled())
num_changed, num_failed, num_total = fetch_saml_metadata()
self.assertEqual(num_failed, 0)
self.assertEqual(num_changed, 1)
self.assertEqual(num_total, 1)
def do_provider_login(self, provider_redirect_url):
""" Mocked: the user logs in to TestShib and then gets redirected back """
# The SAML provider (TestShib) will authenticate the user, then get the browser to POST a response:
self.assertTrue(provider_redirect_url.startswith(TESTSHIB_SSO_URL))
return self.client.post(
self.complete_url,
content_type='application/x-www-form-urlencoded',
data=self.read_data_file('testshib_response.txt'),
)
|
popazerty/openblackhole-SH4
|
refs/heads/master
|
lib/python/Plugins/Extensions/PluginHider/plugin.py
|
11
|
from __future__ import print_function
from . import _
# Plugin definition
from Plugins.Plugin import PluginDescriptor
from Components.PluginComponent import PluginComponent
from Components.config import config, ConfigSubsection, ConfigSet
from PluginHiderSetup import PluginHiderSetup
from operator import attrgetter
config.plugins.pluginhider = ConfigSubsection()
config.plugins.pluginhider.hideextensions = ConfigSet(choices=[])
config.plugins.pluginhider.hideplugins = ConfigSet(choices=[])
config.plugins.pluginhider.hideeventinfo = ConfigSet(choices=[])
hasPluginWeight = True
def hidePlugin(plugin):
"""Convenience function for external code to hide a plugin."""
hide = config.plugins.pluginhider.hideplugins.value
if not plugin.name in hide:
hide.append(plugin.name)
config.plugins.pluginhider.hideplugins.save()
def PluginComponent_getPlugins(self, where):
if not isinstance(where, list):
where = [ where ]
res = []
if PluginDescriptor.WHERE_EXTENSIONSMENU in where:
hide = config.plugins.pluginhider.hideextensions.value
res.extend((x for x in self.plugins.get(PluginDescriptor.WHERE_EXTENSIONSMENU, []) if x.name not in hide))
where.remove(PluginDescriptor.WHERE_EXTENSIONSMENU)
if PluginDescriptor.WHERE_PLUGINMENU in where:
hide = config.plugins.pluginhider.hideplugins.value
res.extend((x for x in self.plugins.get(PluginDescriptor.WHERE_PLUGINMENU, []) if x.name not in hide))
where.remove(PluginDescriptor.WHERE_PLUGINMENU)
if PluginDescriptor.WHERE_EVENTINFO in where:
hide = config.plugins.pluginhider.hideeventinfo.value
res.extend((x for x in self.plugins.get(PluginDescriptor.WHERE_EVENTINFO , []) if x.name not in hide))
where.remove(PluginDescriptor.WHERE_EVENTINFO)
if where:
res.extend(PluginComponent.pluginHider_baseGetPlugins(self, where))
if hasPluginWeight:
res.sort(key=attrgetter('weight'))
return res
def autostart(reason, *args, **kwargs):
if reason == 0:
if hasattr(PluginComponent, 'pluginHider_baseGetPlugins'):
print("[PluginHider] Something went wrong as our autostart handler was called multiple times for startup, printing traceback and ignoring.")
import traceback, sys
traceback.print_stack(limit=5, file=sys.stdout)
else:
PluginComponent.pluginHider_baseGetPlugins = PluginComponent.getPlugins
PluginComponent.getPlugins = PluginComponent_getPlugins
else:
if hasattr(PluginComponent, 'pluginHider_baseGetPlugins'):
PluginComponent.getPlugins = PluginComponent.pluginHider_baseGetPlugins
del PluginComponent.pluginHider_baseGetPlugins
else:
print("[PluginHider] Something went wrong as our autostart handler was called multiple times for shutdown, printing traceback and ignoring.")
import traceback, sys
traceback.print_stack(limit=5, file=sys.stdout)
def main(session, *args, **kwargs):
session.open(PluginHiderSetup)
def menu(menuid):
if menuid != "system":
return []
return [(_("Hide Plugins"), main, "pluginhider_setup", None)]
def Plugins(**kwargs):
pd = PluginDescriptor(
where=PluginDescriptor.WHERE_AUTOSTART,
fnc=autostart,
needsRestart=False,
)
if not hasattr(pd, 'weight'):
global hasPluginWeight
hasPluginWeight = False
return [
pd,
PluginDescriptor(
where=PluginDescriptor.WHERE_MENU,
fnc=menu,
needsRestart=False,
),
]
|
massimiliano-della-rovere/aiohttp_debugtoolbar
|
refs/heads/master
|
aiohttp_debugtoolbar/panels/settings.py
|
6
|
from operator import itemgetter
from .base import DebugPanel
from ..utils import APP_KEY
__all__ = ['SettingsDebugPanel']
class SettingsDebugPanel(DebugPanel):
"""
A panel to display debug toolbar setting for now.
"""
name = 'Settings'
has_content = True
template = 'settings.jinja2'
title = 'Settings'
nav_title = title
def __init__(self, request):
super().__init__(request)
# TODO: show application setting here
# always repr this stuff before it's sent to the template to appease
# dumbass stuff like MongoDB's __getattr__ that always returns a
# Collection, which fails when Jinja tries to look up __html__ on it.
settings = request.app[APP_KEY]['settings']
# filter out non-pyramid prefixed settings to avoid duplication
reprs = [(k, repr(v)) for k, v in settings.items()]
self.data = {'settings': sorted(reprs, key=itemgetter(0))}
|
sysbot/CouchPotatoServer
|
refs/heads/master
|
libs/rtorrent/group.py
|
179
|
# Copyright (c) 2013 Dean Gardiner, <gardiner91@gmail.com>
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import rtorrent.rpc
Method = rtorrent.rpc.Method
class Group:
__name__ = 'Group'
def __init__(self, _rt_obj, name):
self._rt_obj = _rt_obj
self.name = name
self.methods = [
# RETRIEVERS
Method(Group, 'get_max', 'group.' + self.name + '.ratio.max', varname='max'),
Method(Group, 'get_min', 'group.' + self.name + '.ratio.min', varname='min'),
Method(Group, 'get_upload', 'group.' + self.name + '.ratio.upload', varname='upload'),
# MODIFIERS
Method(Group, 'set_max', 'group.' + self.name + '.ratio.max.set', varname='max'),
Method(Group, 'set_min', 'group.' + self.name + '.ratio.min.set', varname='min'),
Method(Group, 'set_upload', 'group.' + self.name + '.ratio.upload.set', varname='upload')
]
rtorrent.rpc._build_rpc_methods(self, self.methods)
# Setup multicall_add method
caller = lambda multicall, method, *args: \
multicall.add(method, *args)
setattr(self, "multicall_add", caller)
def _get_prefix(self):
return 'group.' + self.name + '.ratio.'
def update(self):
multicall = rtorrent.rpc.Multicall(self)
retriever_methods = [m for m in self.methods
if m.is_retriever() and m.is_available(self._rt_obj)]
for method in retriever_methods:
multicall.add(method)
multicall.call()
def enable(self):
p = self._rt_obj._get_conn()
return getattr(p, self._get_prefix() + 'enable')()
def disable(self):
p = self._rt_obj._get_conn()
return getattr(p, self._get_prefix() + 'disable')()
def set_command(self, *methods):
methods = [m + '=' for m in methods]
m = rtorrent.rpc.Multicall(self)
self.multicall_add(
m, 'system.method.set',
self._get_prefix() + 'command',
*methods
)
return(m.call()[-1])
|
impromptuartist/impromptuartist.github.io
|
refs/heads/master
|
node_modules/pygmentize-bundled/vendor/pygments/build-2.7/pygments/styles/fruity.py
|
364
|
# -*- coding: utf-8 -*-
"""
pygments.styles.fruity
~~~~~~~~~~~~~~~~~~~~~~
pygments version of my "fruity" vim theme.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Token, Comment, Name, Keyword, \
Generic, Number, String, Whitespace
class FruityStyle(Style):
"""
Pygments version of the "native" vim theme.
"""
background_color = '#111111'
highlight_color = '#333333'
styles = {
Whitespace: '#888888',
Token: '#ffffff',
Generic.Output: '#444444 bg:#222222',
Keyword: '#fb660a bold',
Keyword.Pseudo: 'nobold',
Number: '#0086f7 bold',
Name.Tag: '#fb660a bold',
Name.Variable: '#fb660a',
Comment: '#008800 bg:#0f140f italic',
Name.Attribute: '#ff0086 bold',
String: '#0086d2',
Name.Function: '#ff0086 bold',
Generic.Heading: '#ffffff bold',
Keyword.Type: '#cdcaa9 bold',
Generic.Subheading: '#ffffff bold',
Name.Constant: '#0086d2',
Comment.Preproc: '#ff0007 bold'
}
|
js0701/chromium-crosswalk
|
refs/heads/master
|
components/data_reduction_proxy/PRESUBMIT.py
|
38
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for the data_reduction_proxy component.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools.
"""
def CheckChangeOnUpload(input_api, output_api):
return input_api.canned_checks.CheckPatchFormatted(input_api, output_api)
|
fschill/mavue
|
refs/heads/master
|
pymavlink/fgFDM.py
|
46
|
#!/usr/bin/env python
# parse and construct FlightGear NET FDM packets
# Andrew Tridgell, November 2011
# released under GNU GPL version 2 or later
import struct, math
class fgFDMError(Exception):
'''fgFDM error class'''
def __init__(self, msg):
Exception.__init__(self, msg)
self.message = 'fgFDMError: ' + msg
class fgFDMVariable(object):
'''represent a single fgFDM variable'''
def __init__(self, index, arraylength, units):
self.index = index
self.arraylength = arraylength
self.units = units
class fgFDMVariableList(object):
'''represent a list of fgFDM variable'''
def __init__(self):
self.vars = {}
self._nextidx = 0
def add(self, varname, arraylength=1, units=None):
self.vars[varname] = fgFDMVariable(self._nextidx, arraylength, units=units)
self._nextidx += arraylength
class fgFDM(object):
'''a flightgear native FDM parser/generator'''
def __init__(self):
'''init a fgFDM object'''
self.FG_NET_FDM_VERSION = 24
self.pack_string = '>I 4x 3d 6f 11f 3f 2f I 4I 4f 4f 4f 4f 4f 4f 4f 4f 4f I 4f I 3I 3f 3f 3f I i f 10f'
self.values = [0]*98
self.FG_MAX_ENGINES = 4
self.FG_MAX_WHEELS = 3
self.FG_MAX_TANKS = 4
# supported unit mappings
self.unitmap = {
('radians', 'degrees') : math.degrees(1),
('rps', 'dps') : math.degrees(1),
('feet', 'meters') : 0.3048,
('fps', 'mps') : 0.3048,
('knots', 'mps') : 0.514444444,
('knots', 'fps') : 0.514444444/0.3048,
('fpss', 'mpss') : 0.3048,
('seconds', 'minutes') : 60,
('seconds', 'hours') : 3600,
}
# build a mapping between variable name and index in the values array
# note that the order of this initialisation is critical - it must
# match the wire structure
self.mapping = fgFDMVariableList()
self.mapping.add('version')
# position
self.mapping.add('longitude', units='radians') # geodetic (radians)
self.mapping.add('latitude', units='radians') # geodetic (radians)
self.mapping.add('altitude', units='meters') # above sea level (meters)
self.mapping.add('agl', units='meters') # above ground level (meters)
# attitude
self.mapping.add('phi', units='radians') # roll (radians)
self.mapping.add('theta', units='radians') # pitch (radians)
self.mapping.add('psi', units='radians') # yaw or true heading (radians)
self.mapping.add('alpha', units='radians') # angle of attack (radians)
self.mapping.add('beta', units='radians') # side slip angle (radians)
# Velocities
self.mapping.add('phidot', units='rps') # roll rate (radians/sec)
self.mapping.add('thetadot', units='rps') # pitch rate (radians/sec)
self.mapping.add('psidot', units='rps') # yaw rate (radians/sec)
self.mapping.add('vcas', units='fps') # calibrated airspeed
self.mapping.add('climb_rate', units='fps') # feet per second
self.mapping.add('v_north', units='fps') # north velocity in local/body frame, fps
self.mapping.add('v_east', units='fps') # east velocity in local/body frame, fps
self.mapping.add('v_down', units='fps') # down/vertical velocity in local/body frame, fps
self.mapping.add('v_wind_body_north', units='fps') # north velocity in local/body frame
self.mapping.add('v_wind_body_east', units='fps') # east velocity in local/body frame
self.mapping.add('v_wind_body_down', units='fps') # down/vertical velocity in local/body
# Accelerations
self.mapping.add('A_X_pilot', units='fpss') # X accel in body frame ft/sec^2
self.mapping.add('A_Y_pilot', units='fpss') # Y accel in body frame ft/sec^2
self.mapping.add('A_Z_pilot', units='fpss') # Z accel in body frame ft/sec^2
# Stall
self.mapping.add('stall_warning') # 0.0 - 1.0 indicating the amount of stall
self.mapping.add('slip_deg', units='degrees') # slip ball deflection
# Engine status
self.mapping.add('num_engines') # Number of valid engines
self.mapping.add('eng_state', self.FG_MAX_ENGINES) # Engine state (off, cranking, running)
self.mapping.add('rpm', self.FG_MAX_ENGINES) # Engine RPM rev/min
self.mapping.add('fuel_flow', self.FG_MAX_ENGINES) # Fuel flow gallons/hr
self.mapping.add('fuel_px', self.FG_MAX_ENGINES) # Fuel pressure psi
self.mapping.add('egt', self.FG_MAX_ENGINES) # Exhuast gas temp deg F
self.mapping.add('cht', self.FG_MAX_ENGINES) # Cylinder head temp deg F
self.mapping.add('mp_osi', self.FG_MAX_ENGINES) # Manifold pressure
self.mapping.add('tit', self.FG_MAX_ENGINES) # Turbine Inlet Temperature
self.mapping.add('oil_temp', self.FG_MAX_ENGINES) # Oil temp deg F
self.mapping.add('oil_px', self.FG_MAX_ENGINES) # Oil pressure psi
# Consumables
self.mapping.add('num_tanks') # Max number of fuel tanks
self.mapping.add('fuel_quantity', self.FG_MAX_TANKS)
# Gear status
self.mapping.add('num_wheels')
self.mapping.add('wow', self.FG_MAX_WHEELS)
self.mapping.add('gear_pos', self.FG_MAX_WHEELS)
self.mapping.add('gear_steer', self.FG_MAX_WHEELS)
self.mapping.add('gear_compression', self.FG_MAX_WHEELS)
# Environment
self.mapping.add('cur_time', units='seconds') # current unix time
self.mapping.add('warp', units='seconds') # offset in seconds to unix time
self.mapping.add('visibility', units='meters') # visibility in meters (for env. effects)
# Control surface positions (normalized values)
self.mapping.add('elevator')
self.mapping.add('elevator_trim_tab')
self.mapping.add('left_flap')
self.mapping.add('right_flap')
self.mapping.add('left_aileron')
self.mapping.add('right_aileron')
self.mapping.add('rudder')
self.mapping.add('nose_wheel')
self.mapping.add('speedbrake')
self.mapping.add('spoilers')
self._packet_size = struct.calcsize(self.pack_string)
self.set('version', self.FG_NET_FDM_VERSION)
if len(self.values) != self.mapping._nextidx:
raise fgFDMError('Invalid variable list in initialisation')
def packet_size(self):
'''return expected size of FG FDM packets'''
return self._packet_size
def convert(self, value, fromunits, tounits):
'''convert a value from one set of units to another'''
if fromunits == tounits:
return value
if (fromunits,tounits) in self.unitmap:
return value * self.unitmap[(fromunits,tounits)]
if (tounits,fromunits) in self.unitmap:
return value / self.unitmap[(tounits,fromunits)]
raise fgFDMError("unknown unit mapping (%s,%s)" % (fromunits, tounits))
def units(self, varname):
'''return the default units of a variable'''
if not varname in self.mapping.vars:
raise fgFDMError('Unknown variable %s' % varname)
return self.mapping.vars[varname].units
def variables(self):
'''return a list of available variables'''
return sorted(list(self.mapping.vars.keys()),
key = lambda v : self.mapping.vars[v].index)
def get(self, varname, idx=0, units=None):
'''get a variable value'''
if not varname in self.mapping.vars:
raise fgFDMError('Unknown variable %s' % varname)
if idx >= self.mapping.vars[varname].arraylength:
raise fgFDMError('index of %s beyond end of array idx=%u arraylength=%u' % (
varname, idx, self.mapping.vars[varname].arraylength))
value = self.values[self.mapping.vars[varname].index + idx]
if units:
value = self.convert(value, self.mapping.vars[varname].units, units)
return value
def set(self, varname, value, idx=0, units=None):
'''set a variable value'''
if not varname in self.mapping.vars:
raise fgFDMError('Unknown variable %s' % varname)
if idx >= self.mapping.vars[varname].arraylength:
raise fgFDMError('index of %s beyond end of array idx=%u arraylength=%u' % (
varname, idx, self.mapping.vars[varname].arraylength))
if units:
value = self.convert(value, units, self.mapping.vars[varname].units)
# avoid range errors when packing into 4 byte floats
if math.isinf(value) or math.isnan(value) or math.fabs(value) > 3.4e38:
value = 0
self.values[self.mapping.vars[varname].index + idx] = value
def parse(self, buf):
'''parse a FD FDM buffer'''
try:
t = struct.unpack(self.pack_string, buf)
except struct.error as msg:
raise fgFDMError('unable to parse - %s' % msg)
self.values = list(t)
def pack(self):
'''pack a FD FDM buffer from current values'''
for i in range(len(self.values)):
if math.isnan(self.values[i]):
self.values[i] = 0
return struct.pack(self.pack_string, *self.values)
|
uwdata/termite-data-server
|
refs/heads/master
|
web2py/gluon/dal.py
|
10
|
#!/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
Thanks to
* Niall Sweeny <niall.sweeny@fonjax.com> for MS SQL support
* Marcel Leuthi <mluethi@mlsystems.ch> for Oracle support
* Denes
* Chris Clark
* clach05
* Denes Lengyel
* and many others who have contributed to current and previous versions
This file contains the DAL support for many relational databases,
including:
- SQLite & SpatiaLite
- MySQL
- Postgres
- Firebird
- Oracle
- MS SQL
- DB2
- Interbase
- Ingres
- Informix (9+ and SE)
- SapDB (experimental)
- Cubrid (experimental)
- CouchDB (experimental)
- MongoDB (in progress)
- Google:nosql
- Google:sql
- Teradata
- IMAP (experimental)
Example of usage:
>>> # from dal import DAL, Field
### create DAL connection (and create DB if it doesn't exist)
>>> db = DAL(('sqlite://storage.sqlite','mysql://a:b@localhost/x'),
... folder=None)
### define a table 'person' (create/alter as necessary)
>>> person = db.define_table('person',Field('name','string'))
### insert a record
>>> id = person.insert(name='James')
### retrieve it by id
>>> james = person(id)
### retrieve it by name
>>> james = person(name='James')
### retrieve it by arbitrary query
>>> query = (person.name=='James') & (person.name.startswith('J'))
>>> james = db(query).select(person.ALL)[0]
### update one record
>>> james.update_record(name='Jim')
<Row {'id': 1, 'name': 'Jim'}>
### update multiple records by query
>>> db(person.name.like('J%')).update(name='James')
1
### delete records by query
>>> db(person.name.lower() == 'jim').delete()
0
### retrieve multiple records (rows)
>>> people = db(person).select(orderby=person.name,
... groupby=person.name, limitby=(0,100))
### further filter them
>>> james = people.find(lambda row: row.name == 'James').first()
>>> print james.id, james.name
1 James
### check aggregates
>>> counter = person.id.count()
>>> print db(person).select(counter).first()(counter)
1
### delete one record
>>> james.delete_record()
1
### delete (drop) entire database table
>>> person.drop()
Supported field types:
id string text boolean integer double decimal password upload
blob time date datetime
Supported DAL URI strings:
'sqlite://test.db'
'spatialite://test.db'
'sqlite:memory'
'spatialite:memory'
'jdbc:sqlite://test.db'
'mysql://root:none@localhost/test'
'postgres://mdipierro:password@localhost/test'
'postgres:psycopg2://mdipierro:password@localhost/test'
'postgres:pg8000://mdipierro:password@localhost/test'
'jdbc:postgres://mdipierro:none@localhost/test'
'mssql://web2py:none@A64X2/web2py_test'
'mssql2://web2py:none@A64X2/web2py_test' # alternate mappings
'oracle://username:password@database'
'firebird://user:password@server:3050/database'
'db2://DSN=dsn;UID=user;PWD=pass'
'firebird://username:password@hostname/database'
'firebird_embedded://username:password@c://path'
'informix://user:password@server:3050/database'
'informixu://user:password@server:3050/database' # unicode informix
'ingres://database' # or use an ODBC connection string, e.g. 'ingres://dsn=dsn_name'
'google:datastore' # for google app engine datastore
'google:sql' # for google app engine with sql (mysql compatible)
'teradata://DSN=dsn;UID=user;PWD=pass; DATABASE=database' # experimental
'imap://user:password@server:port' # experimental
'mongodb://user:password@server:port/database' # experimental
For more info:
help(DAL)
help(Field)
"""
###################################################################################
# this file only exposes DAL and Field
###################################################################################
__all__ = ['DAL', 'Field']
DEFAULTLENGTH = {'string':512,
'password':512,
'upload':512,
'text':2**15,
'blob':2**31}
TIMINGSSIZE = 100
SPATIALLIBS = {
'Windows':'libspatialite',
'Linux':'libspatialite.so',
'Darwin':'libspatialite.dylib'
}
DEFAULT_URI = 'sqlite://dummy.db'
import re
import sys
import locale
import os
import types
import datetime
import threading
import time
import csv
import cgi
import copy
import socket
import logging
import base64
import shutil
import marshal
import decimal
import struct
import urllib
import hashlib
import uuid
import glob
import traceback
import platform
PYTHON_VERSION = sys.version_info[:3]
if PYTHON_VERSION[0] == 2:
import cPickle as pickle
import cStringIO as StringIO
import copy_reg as copyreg
hashlib_md5 = hashlib.md5
bytes, unicode = str, unicode
else:
import pickle
from io import StringIO as StringIO
import copyreg
long = int
hashlib_md5 = lambda s: hashlib.md5(bytes(s,'utf8'))
bytes, unicode = bytes, str
if PYTHON_VERSION[:2] < (2, 7):
from gluon.contrib.ordereddict import OrderedDict
else:
from collections import OrderedDict
CALLABLETYPES = (types.LambdaType, types.FunctionType,
types.BuiltinFunctionType,
types.MethodType, types.BuiltinMethodType)
TABLE_ARGS = set(
('migrate','primarykey','fake_migrate','format','redefine',
'singular','plural','trigger_name','sequence_name','fields',
'common_filter','polymodel','table_class','on_define','rname'))
SELECT_ARGS = set(
('orderby', 'groupby', 'limitby','required', 'cache', 'left',
'distinct', 'having', 'join','for_update', 'processor','cacheable', 'orderby_on_limitby'))
ogetattr = object.__getattribute__
osetattr = object.__setattr__
exists = os.path.exists
pjoin = os.path.join
###################################################################################
# following checks allow the use of dal without web2py, as a standalone module
###################################################################################
try:
from gluon.utils import web2py_uuid
except (ImportError, SystemError):
import uuid
def web2py_uuid(): return str(uuid.uuid4())
try:
import portalocker
have_portalocker = True
except ImportError:
have_portalocker = False
try:
from gluon import serializers
have_serializers = True
except ImportError:
have_serializers = False
try:
import json as simplejson
except ImportError:
try:
import gluon.contrib.simplejson as simplejson
except ImportError:
simplejson = None
LOGGER = logging.getLogger("web2py.dal")
DEFAULT = lambda:0
GLOBAL_LOCKER = threading.RLock()
THREAD_LOCAL = threading.local()
# internal representation of tables with field
# <table>.<field>, tables and fields may only be [a-zA-Z0-9_]
REGEX_TYPE = re.compile('^([\w\_\:]+)')
REGEX_DBNAME = re.compile('^(\w+)(\:\w+)*')
REGEX_W = re.compile('^\w+$')
REGEX_TABLE_DOT_FIELD = re.compile('^(\w+)\.([^.]+)$')
REGEX_NO_GREEDY_ENTITY_NAME = r'(.+?)'
REGEX_UPLOAD_PATTERN = re.compile('(?P<table>[\w\-]+)\.(?P<field>[\w\-]+)\.(?P<uuidkey>[\w\-]+)(\.(?P<name>\w+))?\.\w+$')
REGEX_CLEANUP_FN = re.compile('[\'"\s;]+')
REGEX_UNPACK = re.compile('(?<!\|)\|(?!\|)')
REGEX_PYTHON_KEYWORDS = re.compile('^(and|del|from|not|while|as|elif|global|or|with|assert|else|if|pass|yield|break|except|import|print|class|exec|in|raise|continue|finally|is|return|def|for|lambda|try)$')
REGEX_SELECT_AS_PARSER = re.compile("\s+AS\s+(\S+)")
REGEX_CONST_STRING = re.compile('(\"[^\"]*?\")|(\'[^\']*?\')')
REGEX_SEARCH_PATTERN = re.compile('^{[^\.]+\.[^\.]+(\.(lt|gt|le|ge|eq|ne|contains|startswith|year|month|day|hour|minute|second))?(\.not)?}$')
REGEX_SQUARE_BRACKETS = re.compile('^.+\[.+\]$')
REGEX_STORE_PATTERN = re.compile('\.(?P<e>\w{1,5})$')
REGEX_QUOTES = re.compile("'[^']*'")
REGEX_ALPHANUMERIC = re.compile('^[0-9a-zA-Z]\w*$')
REGEX_PASSWORD = re.compile('\://([^:@]*)\:')
REGEX_NOPASSWD = re.compile('\/\/[\w\.\-]+[\:\/](.+)(?=@)') # was '(?<=[\:\/])([^:@/]+)(?=@.+)'
# list of drivers will be built on the fly
# and lists only what is available
DRIVERS = []
try:
from new import classobj
from google.appengine.ext import db as gae
from google.appengine.ext import ndb
from google.appengine.api import namespace_manager, rdbms
from google.appengine.api.datastore_types import Key ### for belongs on ID
from google.appengine.ext.db.polymodel import PolyModel
from google.appengine.ext.ndb.polymodel import PolyModel as NDBPolyModel
DRIVERS.append('google')
except ImportError:
pass
if not 'google' in DRIVERS:
try:
from pysqlite2 import dbapi2 as sqlite2
DRIVERS.append('SQLite(sqlite2)')
except ImportError:
LOGGER.debug('no SQLite drivers pysqlite2.dbapi2')
try:
from sqlite3 import dbapi2 as sqlite3
DRIVERS.append('SQLite(sqlite3)')
except ImportError:
LOGGER.debug('no SQLite drivers sqlite3')
try:
# first try contrib driver, then from site-packages (if installed)
try:
import gluon.contrib.pymysql as pymysql
# monkeypatch pymysql because they havent fixed the bug:
# https://github.com/petehunt/PyMySQL/issues/86
pymysql.ESCAPE_REGEX = re.compile("'")
pymysql.ESCAPE_MAP = {"'": "''"}
# end monkeypatch
except ImportError:
import pymysql
DRIVERS.append('MySQL(pymysql)')
except ImportError:
LOGGER.debug('no MySQL driver pymysql')
try:
import MySQLdb
DRIVERS.append('MySQL(MySQLdb)')
except ImportError:
LOGGER.debug('no MySQL driver MySQLDB')
try:
import mysql.connector as mysqlconnector
DRIVERS.append("MySQL(mysqlconnector)")
except ImportError:
LOGGER.debug("no driver mysql.connector")
try:
import psycopg2
from psycopg2.extensions import adapt as psycopg2_adapt
DRIVERS.append('PostgreSQL(psycopg2)')
except ImportError:
LOGGER.debug('no PostgreSQL driver psycopg2')
try:
# first try contrib driver, then from site-packages (if installed)
try:
import gluon.contrib.pg8000.dbapi as pg8000
except ImportError:
import pg8000.dbapi as pg8000
DRIVERS.append('PostgreSQL(pg8000)')
except ImportError:
LOGGER.debug('no PostgreSQL driver pg8000')
try:
import cx_Oracle
DRIVERS.append('Oracle(cx_Oracle)')
except ImportError:
LOGGER.debug('no Oracle driver cx_Oracle')
try:
try:
import pyodbc
except ImportError:
try:
import gluon.contrib.pypyodbc as pyodbc
except Exception, e:
raise ImportError(str(e))
DRIVERS.append('MSSQL(pyodbc)')
DRIVERS.append('DB2(pyodbc)')
DRIVERS.append('Teradata(pyodbc)')
DRIVERS.append('Ingres(pyodbc)')
except ImportError:
LOGGER.debug('no MSSQL/DB2/Teradata/Ingres driver pyodbc')
try:
import Sybase
DRIVERS.append('Sybase(Sybase)')
except ImportError:
LOGGER.debug('no Sybase driver')
try:
import kinterbasdb
DRIVERS.append('Interbase(kinterbasdb)')
DRIVERS.append('Firebird(kinterbasdb)')
except ImportError:
LOGGER.debug('no Firebird/Interbase driver kinterbasdb')
try:
import fdb
DRIVERS.append('Firebird(fdb)')
except ImportError:
LOGGER.debug('no Firebird driver fdb')
#####
try:
import firebirdsql
DRIVERS.append('Firebird(firebirdsql)')
except ImportError:
LOGGER.debug('no Firebird driver firebirdsql')
try:
import informixdb
DRIVERS.append('Informix(informixdb)')
LOGGER.warning('Informix support is experimental')
except ImportError:
LOGGER.debug('no Informix driver informixdb')
try:
import sapdb
DRIVERS.append('SQL(sapdb)')
LOGGER.warning('SAPDB support is experimental')
except ImportError:
LOGGER.debug('no SAP driver sapdb')
try:
import cubriddb
DRIVERS.append('Cubrid(cubriddb)')
LOGGER.warning('Cubrid support is experimental')
except ImportError:
LOGGER.debug('no Cubrid driver cubriddb')
try:
from com.ziclix.python.sql import zxJDBC
import java.sql
# Try sqlite jdbc driver from http://www.zentus.com/sqlitejdbc/
from org.sqlite import JDBC # required by java.sql; ensure we have it
zxJDBC_sqlite = java.sql.DriverManager
DRIVERS.append('PostgreSQL(zxJDBC)')
DRIVERS.append('SQLite(zxJDBC)')
LOGGER.warning('zxJDBC support is experimental')
is_jdbc = True
except ImportError:
LOGGER.debug('no SQLite/PostgreSQL driver zxJDBC')
is_jdbc = False
try:
import couchdb
DRIVERS.append('CouchDB(couchdb)')
except ImportError:
LOGGER.debug('no Couchdb driver couchdb')
try:
import pymongo
DRIVERS.append('MongoDB(pymongo)')
except:
LOGGER.debug('no MongoDB driver pymongo')
try:
import imaplib
DRIVERS.append('IMAP(imaplib)')
except:
LOGGER.debug('no IMAP driver imaplib')
PLURALIZE_RULES = [
(re.compile('child$'), re.compile('child$'), 'children'),
(re.compile('oot$'), re.compile('oot$'), 'eet'),
(re.compile('ooth$'), re.compile('ooth$'), 'eeth'),
(re.compile('l[eo]af$'), re.compile('l([eo])af$'), 'l\\1aves'),
(re.compile('sis$'), re.compile('sis$'), 'ses'),
(re.compile('man$'), re.compile('man$'), 'men'),
(re.compile('ife$'), re.compile('ife$'), 'ives'),
(re.compile('eau$'), re.compile('eau$'), 'eaux'),
(re.compile('lf$'), re.compile('lf$'), 'lves'),
(re.compile('[sxz]$'), re.compile('$'), 'es'),
(re.compile('[^aeioudgkprt]h$'), re.compile('$'), 'es'),
(re.compile('(qu|[^aeiou])y$'), re.compile('y$'), 'ies'),
(re.compile('$'), re.compile('$'), 's'),
]
def pluralize(singular, rules=PLURALIZE_RULES):
for line in rules:
re_search, re_sub, replace = line
plural = re_search.search(singular) and re_sub.sub(replace, singular)
if plural: return plural
def hide_password(uri):
if isinstance(uri,(list,tuple)):
return [hide_password(item) for item in uri]
return REGEX_NOPASSWD.sub('******',uri)
def OR(a,b):
return a|b
def AND(a,b):
return a&b
def IDENTITY(x): return x
def varquote_aux(name,quotestr='%s'):
return name if REGEX_W.match(name) else quotestr % name
def quote_keyword(a,keyword='timestamp'):
regex = re.compile('\.keyword(?=\w)')
a = regex.sub('."%s"' % keyword,a)
return a
if 'google' in DRIVERS:
is_jdbc = False
class GAEDecimalProperty(gae.Property):
"""
GAE decimal implementation
"""
data_type = decimal.Decimal
def __init__(self, precision, scale, **kwargs):
super(GAEDecimalProperty, self).__init__(self, **kwargs)
d = '1.'
for x in range(scale):
d += '0'
self.round = decimal.Decimal(d)
def get_value_for_datastore(self, model_instance):
value = super(GAEDecimalProperty, self)\
.get_value_for_datastore(model_instance)
if value is None or value == '':
return None
else:
return str(value)
def make_value_from_datastore(self, value):
if value is None or value == '':
return None
else:
return decimal.Decimal(value).quantize(self.round)
def validate(self, value):
value = super(GAEDecimalProperty, self).validate(value)
if value is None or isinstance(value, decimal.Decimal):
return value
elif isinstance(value, basestring):
return decimal.Decimal(value)
raise gae.BadValueError("Property %s must be a Decimal or string."\
% self.name)
#TODO Needs more testing
class NDBDecimalProperty(ndb.StringProperty):
"""
NDB decimal implementation
"""
data_type = decimal.Decimal
def __init__(self, precision, scale, **kwargs):
d = '1.'
for x in range(scale):
d += '0'
self.round = decimal.Decimal(d)
def _to_base_type(self, value):
if value is None or value == '':
return None
else:
return str(value)
def _from_base_type(self, value):
if value is None or value == '':
return None
else:
return decimal.Decimal(value).quantize(self.round)
def _validate(self, value):
if value is None or isinstance(value, decimal.Decimal):
return value
elif isinstance(value, basestring):
return decimal.Decimal(value)
raise TypeError("Property %s must be a Decimal or string."\
% self._name)
###################################################################################
# class that handles connection pooling (all adapters are derived from this one)
###################################################################################
class ConnectionPool(object):
POOLS = {}
check_active_connection = True
@staticmethod
def set_folder(folder):
THREAD_LOCAL.folder = folder
# ## this allows gluon to commit/rollback all dbs in this thread
def close(self,action='commit',really=True):
if action:
if callable(action):
action(self)
else:
getattr(self, action)()
# ## if you want pools, recycle this connection
if self.pool_size:
GLOBAL_LOCKER.acquire()
pool = ConnectionPool.POOLS[self.uri]
if len(pool) < self.pool_size:
pool.append(self.connection)
really = False
GLOBAL_LOCKER.release()
if really:
self.close_connection()
self.connection = None
@staticmethod
def close_all_instances(action):
""" to close cleanly databases in a multithreaded environment """
dbs = getattr(THREAD_LOCAL,'db_instances',{}).items()
for db_uid, db_group in dbs:
for db in db_group:
if hasattr(db,'_adapter'):
db._adapter.close(action)
getattr(THREAD_LOCAL,'db_instances',{}).clear()
getattr(THREAD_LOCAL,'db_instances_zombie',{}).clear()
if callable(action):
action(None)
return
def find_or_make_work_folder(self):
""" this actually does not make the folder. it has to be there """
self.folder = getattr(THREAD_LOCAL,'folder','')
if (os.path.isabs(self.folder) and
isinstance(self, UseDatabaseStoredFile) and
self.folder.startswith(os.getcwd())):
self.folder = os.path.relpath(self.folder, os.getcwd())
# Creating the folder if it does not exist
if False and self.folder and not exists(self.folder):
os.mkdir(self.folder)
def after_connection_hook(self):
"""hook for the after_connection parameter"""
if callable(self._after_connection):
self._after_connection(self)
self.after_connection()
def after_connection(self):
""" this it is supposed to be overloaded by adapters"""
pass
def reconnect(self, f=None, cursor=True):
"""
this function defines: self.connection and self.cursor
(iff cursor is True)
if self.pool_size>0 it will try pull the connection from the pool
if the connection is not active (closed by db server) it will loop
if not self.pool_size or no active connections in pool makes a new one
"""
if getattr(self,'connection', None) != None:
return
if f is None:
f = self.connector
# if not hasattr(self, "driver") or self.driver is None:
# LOGGER.debug("Skipping connection since there's no driver")
# return
if not self.pool_size:
self.connection = f()
self.cursor = cursor and self.connection.cursor()
else:
uri = self.uri
POOLS = ConnectionPool.POOLS
while True:
GLOBAL_LOCKER.acquire()
if not uri in POOLS:
POOLS[uri] = []
if POOLS[uri]:
self.connection = POOLS[uri].pop()
GLOBAL_LOCKER.release()
self.cursor = cursor and self.connection.cursor()
try:
if self.cursor and self.check_active_connection:
self.execute('SELECT 1;')
break
except:
pass
else:
GLOBAL_LOCKER.release()
self.connection = f()
self.cursor = cursor and self.connection.cursor()
break
self.after_connection_hook()
###################################################################################
# metaclass to prepare adapter classes static values
###################################################################################
class AdapterMeta(type):
"""Metaclass to support manipulation of adapter classes.
At the moment is used to intercept entity_quoting argument passed to DAL.
"""
def __call__(cls, *args, **kwargs):
entity_quoting = kwargs.get('entity_quoting', False)
if 'entity_quoting' in kwargs:
del kwargs['entity_quoting']
obj = super(AdapterMeta, cls).__call__(*args, **kwargs)
if not entity_quoting:
quot = obj.QUOTE_TEMPLATE = '%s'
regex_ent = r'(\w+)'
else:
quot = obj.QUOTE_TEMPLATE
regex_ent = REGEX_NO_GREEDY_ENTITY_NAME
obj.REGEX_TABLE_DOT_FIELD = re.compile(r'^' + \
quot % regex_ent + \
r'\.' + \
quot % regex_ent + \
r'$')
return obj
###################################################################################
# this is a generic adapter that does nothing; all others are derived from this one
###################################################################################
class BaseAdapter(ConnectionPool):
__metaclass__ = AdapterMeta
native_json = False
driver = None
driver_name = None
drivers = () # list of drivers from which to pick
connection = None
commit_on_alter_table = False
support_distributed_transaction = False
uploads_in_blob = False
can_select_for_update = True
dbpath = None
folder = None
connector = lambda *args, **kwargs: None # __init__ should override this
TRUE = 'T'
FALSE = 'F'
T_SEP = ' '
QUOTE_TEMPLATE = '"%s"'
types = {
'boolean': 'CHAR(1)',
'string': 'CHAR(%(length)s)',
'text': 'TEXT',
'json': 'TEXT',
'password': 'CHAR(%(length)s)',
'blob': 'BLOB',
'upload': 'CHAR(%(length)s)',
'integer': 'INTEGER',
'bigint': 'INTEGER',
'float':'DOUBLE',
'double': 'DOUBLE',
'decimal': 'DOUBLE',
'date': 'DATE',
'time': 'TIME',
'datetime': 'TIMESTAMP',
'id': 'INTEGER PRIMARY KEY AUTOINCREMENT',
'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'list:integer': 'TEXT',
'list:string': 'TEXT',
'list:reference': 'TEXT',
# the two below are only used when DAL(...bigint_id=True) and replace 'id','reference'
'big-id': 'BIGINT PRIMARY KEY AUTOINCREMENT',
'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference FK': ', CONSTRAINT "FK_%(constraint_name)s" FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
}
def isOperationalError(self,exception):
if not hasattr(self.driver, "OperationalError"):
return None
return isinstance(exception, self.driver.OperationalError)
def isProgrammingError(self,exception):
if not hasattr(self.driver, "ProgrammingError"):
return None
return isinstance(exception, self.driver.ProgrammingError)
def id_query(self, table):
pkeys = getattr(table,'_primarykey',None)
if pkeys:
return table[pkeys[0]] != None
else:
return table._id != None
def adapt(self, obj):
return "'%s'" % obj.replace("'", "''")
def smart_adapt(self, obj):
if isinstance(obj,(int,float)):
return str(obj)
return self.adapt(str(obj))
def file_exists(self, filename):
"""
to be used ONLY for files that on GAE may not be on filesystem
"""
return exists(filename)
def file_open(self, filename, mode='rb', lock=True):
"""
to be used ONLY for files that on GAE may not be on filesystem
"""
if have_portalocker and lock:
fileobj = portalocker.LockedFile(filename,mode)
else:
fileobj = open(filename,mode)
return fileobj
def file_close(self, fileobj):
"""
to be used ONLY for files that on GAE may not be on filesystem
"""
if fileobj:
fileobj.close()
def file_delete(self, filename):
os.unlink(filename)
def find_driver(self,adapter_args,uri=None):
self.adapter_args = adapter_args
if getattr(self,'driver',None) != None:
return
drivers_available = [driver for driver in self.drivers
if driver in globals()]
if uri:
items = uri.split('://',1)[0].split(':')
request_driver = items[1] if len(items)>1 else None
else:
request_driver = None
request_driver = request_driver or adapter_args.get('driver')
if request_driver:
if request_driver in drivers_available:
self.driver_name = request_driver
self.driver = globals().get(request_driver)
else:
raise RuntimeError("driver %s not available" % request_driver)
elif drivers_available:
self.driver_name = drivers_available[0]
self.driver = globals().get(self.driver_name)
else:
raise RuntimeError("no driver available %s" % str(self.drivers))
def log(self, message, table=None):
""" Logs migrations
It will not log changes if logfile is not specified. Defaults
to sql.log
"""
isabs = None
logfilename = self.adapter_args.get('logfile','sql.log')
writelog = bool(logfilename)
if writelog:
isabs = os.path.isabs(logfilename)
if table and table._dbt and writelog and self.folder:
if isabs:
table._loggername = logfilename
else:
table._loggername = pjoin(self.folder, logfilename)
logfile = self.file_open(table._loggername, 'a')
logfile.write(message)
self.file_close(logfile)
def __init__(self, db,uri,pool_size=0, folder=None, db_codec='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={},do_connect=True, after_connection=None):
self.db = db
self.dbengine = "None"
self.uri = uri
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self._after_connection = after_connection
class Dummy(object):
lastrowid = 1
def __getattr__(self, value):
return lambda *a, **b: []
self.connection = Dummy()
self.cursor = Dummy()
def sequence_name(self,tablename):
return self.QUOTE_TEMPLATE % ('%s_sequence' % tablename)
def trigger_name(self,tablename):
return '%s_sequence' % tablename
def varquote(self,name):
return name
def create_table(self, table,
migrate=True,
fake_migrate=False,
polymodel=None):
db = table._db
fields = []
# PostGIS geo fields are added after the table has been created
postcreation_fields = []
sql_fields = {}
sql_fields_aux = {}
TFK = {}
tablename = table._tablename
sortable = 0
types = self.types
for field in table:
sortable += 1
field_name = field.name
field_type = field.type
if isinstance(field_type,SQLCustomType):
ftype = field_type.native or field_type.type
elif field_type.startswith('reference'):
referenced = field_type[10:].strip()
if referenced == '.':
referenced = tablename
constraint_name = self.constraint_name(tablename, field_name)
# if not '.' in referenced \
# and referenced != tablename \
# and hasattr(table,'_primarykey'):
# ftype = types['integer']
#else:
try:
rtable = db[referenced]
rfield = rtable._id
rfieldname = rfield.name
rtablename = referenced
except (KeyError, ValueError, AttributeError), e:
LOGGER.debug('Error: %s' % e)
try:
rtablename,rfieldname = referenced.split('.')
rtable = db[rtablename]
rfield = rtable[rfieldname]
except Exception, e:
LOGGER.debug('Error: %s' %e)
raise KeyError('Cannot resolve reference %s in %s definition' % (referenced, table._tablename))
# must be PK reference or unique
if getattr(rtable, '_primarykey', None) and rfieldname in rtable._primarykey or \
rfield.unique:
ftype = types[rfield.type[:9]] % \
dict(length=rfield.length)
# multicolumn primary key reference?
if not rfield.unique and len(rtable._primarykey)>1:
# then it has to be a table level FK
if rtablename not in TFK:
TFK[rtablename] = {}
TFK[rtablename][rfieldname] = field_name
else:
ftype = ftype + \
types['reference FK'] % dict(
constraint_name = constraint_name, # should be quoted
foreign_key = rtable.sqlsafe + ' (' + rfield.sqlsafe_name + ')',
table_name = table.sqlsafe,
field_name = field.sqlsafe_name,
on_delete_action=field.ondelete)
else:
# make a guess here for circular references
if referenced in db:
id_fieldname = db[referenced]._id.sqlsafe_name
elif referenced == tablename:
id_fieldname = table._id.sqlsafe_name
else: #make a guess
id_fieldname = self.QUOTE_TEMPLATE % 'id'
#gotcha: the referenced table must be defined before
#the referencing one to be able to create the table
#Also if it's not recommended, we can still support
#references to tablenames without rname to make
#migrations and model relationship work also if tables
#are not defined in order
if referenced == tablename:
real_referenced = db[referenced].sqlsafe
else:
real_referenced = (referenced in db
and db[referenced].sqlsafe
or referenced)
rfield = db[referenced]._id
ftype = types[field_type[:9]] % dict(
index_name = self.QUOTE_TEMPLATE % (field_name+'__idx'),
field_name = field.sqlsafe_name,
constraint_name = self.QUOTE_TEMPLATE % constraint_name,
foreign_key = '%s (%s)' % (real_referenced, rfield.sqlsafe_name),
on_delete_action=field.ondelete)
elif field_type.startswith('list:reference'):
ftype = types[field_type[:14]]
elif field_type.startswith('decimal'):
precision, scale = map(int,field_type[8:-1].split(','))
ftype = types[field_type[:7]] % \
dict(precision=precision,scale=scale)
elif field_type.startswith('geo'):
if not hasattr(self,'srid'):
raise RuntimeError('Adapter does not support geometry')
srid = self.srid
geotype, parms = field_type[:-1].split('(')
if not geotype in types:
raise SyntaxError(
'Field: unknown field type: %s for %s' \
% (field_type, field_name))
ftype = types[geotype]
if self.dbengine == 'postgres' and geotype == 'geometry':
# parameters: schema, srid, dimension
dimension = 2 # GIS.dimension ???
parms = parms.split(',')
if len(parms) == 3:
schema, srid, dimension = parms
elif len(parms) == 2:
schema, srid = parms
else:
schema = parms[0]
ftype = "SELECT AddGeometryColumn ('%%(schema)s', '%%(tablename)s', '%%(fieldname)s', %%(srid)s, '%s', %%(dimension)s);" % types[geotype]
ftype = ftype % dict(schema=schema,
tablename=tablename,
fieldname=field_name, srid=srid,
dimension=dimension)
postcreation_fields.append(ftype)
elif not field_type in types:
raise SyntaxError('Field: unknown field type: %s for %s' % \
(field_type, field_name))
else:
ftype = types[field_type]\
% dict(length=field.length)
if not field_type.startswith('id') and \
not field_type.startswith('reference'):
if field.notnull:
ftype += ' NOT NULL'
else:
ftype += self.ALLOW_NULL()
if field.unique:
ftype += ' UNIQUE'
if field.custom_qualifier:
ftype += ' %s' % field.custom_qualifier
# add to list of fields
sql_fields[field_name] = dict(
length=field.length,
unique=field.unique,
notnull=field.notnull,
sortable=sortable,
type=str(field_type),
sql=ftype)
if field.notnull and not field.default is None:
# Caveat: sql_fields and sql_fields_aux
# differ for default values.
# sql_fields is used to trigger migrations and sql_fields_aux
# is used for create tables.
# The reason is that we do not want to trigger
# a migration simply because a default value changes.
not_null = self.NOT_NULL(field.default, field_type)
ftype = ftype.replace('NOT NULL', not_null)
sql_fields_aux[field_name] = dict(sql=ftype)
# Postgres - PostGIS:
# geometry fields are added after the table has been created, not now
if not (self.dbengine == 'postgres' and \
field_type.startswith('geom')):
fields.append('%s %s' % (field.sqlsafe_name, ftype))
other = ';'
# backend-specific extensions to fields
if self.dbengine == 'mysql':
if not hasattr(table, "_primarykey"):
fields.append('PRIMARY KEY (%s)' % (self.QUOTE_TEMPLATE % table._id.name))
engine = self.adapter_args.get('engine','InnoDB')
other = ' ENGINE=%s CHARACTER SET utf8;' % engine
fields = ',\n '.join(fields)
for rtablename in TFK:
rfields = TFK[rtablename]
pkeys = [self.QUOTE_TEMPLATE % pk for pk in db[rtablename]._primarykey]
fkeys = [self.QUOTE_TEMPLATE % rfields[k].name for k in pkeys ]
fields = fields + ',\n ' + \
types['reference TFK'] % dict(
table_name = table.sqlsafe,
field_name=', '.join(fkeys),
foreign_table = table.sqlsafe,
foreign_key = ', '.join(pkeys),
on_delete_action = field.ondelete)
table_rname = table.sqlsafe
if getattr(table,'_primarykey',None):
query = "CREATE TABLE %s(\n %s,\n %s) %s" % \
(table.sqlsafe, fields,
self.PRIMARY_KEY(', '.join([self.QUOTE_TEMPLATE % pk for pk in table._primarykey])),other)
else:
query = "CREATE TABLE %s(\n %s\n)%s" % \
(table.sqlsafe, fields, other)
if self.uri.startswith('sqlite:///') \
or self.uri.startswith('spatialite:///'):
path_encoding = sys.getfilesystemencoding() \
or locale.getdefaultlocale()[1] or 'utf8'
dbpath = self.uri[9:self.uri.rfind('/')]\
.decode('utf8').encode(path_encoding)
else:
dbpath = self.folder
if not migrate:
return query
elif self.uri.startswith('sqlite:memory')\
or self.uri.startswith('spatialite:memory'):
table._dbt = None
elif isinstance(migrate, str):
table._dbt = pjoin(dbpath, migrate)
else:
table._dbt = pjoin(
dbpath, '%s_%s.table' % (table._db._uri_hash, tablename))
if not table._dbt or not self.file_exists(table._dbt):
if table._dbt:
self.log('timestamp: %s\n%s\n'
% (datetime.datetime.today().isoformat(),
query), table)
if not fake_migrate:
self.create_sequence_and_triggers(query,table)
table._db.commit()
# Postgres geom fields are added now,
# after the table has been created
for query in postcreation_fields:
self.execute(query)
table._db.commit()
if table._dbt:
tfile = self.file_open(table._dbt, 'w')
pickle.dump(sql_fields, tfile)
self.file_close(tfile)
if fake_migrate:
self.log('faked!\n', table)
else:
self.log('success!\n', table)
else:
tfile = self.file_open(table._dbt, 'r')
try:
sql_fields_old = pickle.load(tfile)
except EOFError:
self.file_close(tfile)
raise RuntimeError('File %s appears corrupted' % table._dbt)
self.file_close(tfile)
if sql_fields != sql_fields_old:
self.migrate_table(
table,
sql_fields, sql_fields_old,
sql_fields_aux, None,
fake_migrate=fake_migrate
)
return query
def migrate_table(
self,
table,
sql_fields,
sql_fields_old,
sql_fields_aux,
logfile,
fake_migrate=False,
):
# logfile is deprecated (moved to adapter.log method)
db = table._db
db._migrated.append(table._tablename)
tablename = table._tablename
def fix(item):
k,v=item
if not isinstance(v,dict):
v=dict(type='unknown',sql=v)
if self.ignore_field_case is not True: return k, v
return k.lower(),v
# make sure all field names are lower case to avoid
# migrations because of case cahnge
sql_fields = dict(map(fix,sql_fields.iteritems()))
sql_fields_old = dict(map(fix,sql_fields_old.iteritems()))
sql_fields_aux = dict(map(fix,sql_fields_aux.iteritems()))
if db._debug:
logging.debug('migrating %s to %s' % (sql_fields_old,sql_fields))
keys = sql_fields.keys()
for key in sql_fields_old:
if not key in keys:
keys.append(key)
new_add = self.concat_add(tablename)
metadata_change = False
sql_fields_current = copy.copy(sql_fields_old)
for key in keys:
query = None
if not key in sql_fields_old:
sql_fields_current[key] = sql_fields[key]
if self.dbengine in ('postgres',) and \
sql_fields[key]['type'].startswith('geometry'):
# 'sql' == ftype in sql
query = [ sql_fields[key]['sql'] ]
else:
query = ['ALTER TABLE %s ADD %s %s;' % \
(table.sqlsafe, key,
sql_fields_aux[key]['sql'].replace(', ', new_add))]
metadata_change = True
elif self.dbengine in ('sqlite', 'spatialite'):
if key in sql_fields:
sql_fields_current[key] = sql_fields[key]
metadata_change = True
elif not key in sql_fields:
del sql_fields_current[key]
ftype = sql_fields_old[key]['type']
if (self.dbengine in ('postgres',) and
ftype.startswith('geometry')):
geotype, parms = ftype[:-1].split('(')
schema = parms.split(',')[0]
query = [ "SELECT DropGeometryColumn ('%(schema)s', "+
"'%(table)s', '%(field)s');" %
dict(schema=schema, table=tablename, field=key,) ]
elif self.dbengine in ('firebird',):
query = ['ALTER TABLE %s DROP %s;' %
(self.QUOTE_TEMPLATE % tablename, self.QUOTE_TEMPLATE % key)]
else:
query = ['ALTER TABLE %s DROP COLUMN %s;' %
(self.QUOTE_TEMPLATE % tablename, self.QUOTE_TEMPLATE % key)]
metadata_change = True
elif sql_fields[key]['sql'] != sql_fields_old[key]['sql'] \
and not (key in table.fields and
isinstance(table[key].type, SQLCustomType)) \
and not sql_fields[key]['type'].startswith('reference')\
and not sql_fields[key]['type'].startswith('double')\
and not sql_fields[key]['type'].startswith('id'):
sql_fields_current[key] = sql_fields[key]
t = tablename
tt = sql_fields_aux[key]['sql'].replace(', ', new_add)
if self.dbengine in ('firebird',):
drop_expr = 'ALTER TABLE %s DROP %s;'
else:
drop_expr = 'ALTER TABLE %s DROP COLUMN %s;'
key_tmp = key + '__tmp'
query = ['ALTER TABLE %s ADD %s %s;' % (self.QUOTE_TEMPLATE % t, self.QUOTE_TEMPLATE % key_tmp, tt),
'UPDATE %s SET %s=%s;' %
(self.QUOTE_TEMPLATE % t, self.QUOTE_TEMPLATE % key_tmp, self.QUOTE_TEMPLATE % key),
drop_expr % (self.QUOTE_TEMPLATE % t, self.QUOTE_TEMPLATE % key),
'ALTER TABLE %s ADD %s %s;' %
(self.QUOTE_TEMPLATE % t, self.QUOTE_TEMPLATE % key, tt),
'UPDATE %s SET %s=%s;' %
(self.QUOTE_TEMPLATE % t, self.QUOTE_TEMPLATE % key, self.QUOTE_TEMPLATE % key_tmp),
drop_expr % (self.QUOTE_TEMPLATE % t, self.QUOTE_TEMPLATE % key_tmp)]
metadata_change = True
elif sql_fields[key]['type'] != sql_fields_old[key]['type']:
sql_fields_current[key] = sql_fields[key]
metadata_change = True
if query:
self.log('timestamp: %s\n'
% datetime.datetime.today().isoformat(), table)
db['_lastsql'] = '\n'.join(query)
for sub_query in query:
self.log(sub_query + '\n', table)
if fake_migrate:
if db._adapter.commit_on_alter_table:
self.save_dbt(table,sql_fields_current)
self.log('faked!\n', table)
else:
self.execute(sub_query)
# Caveat: mysql, oracle and firebird
# do not allow multiple alter table
# in one transaction so we must commit
# partial transactions and
# update table._dbt after alter table.
if db._adapter.commit_on_alter_table:
db.commit()
self.save_dbt(table,sql_fields_current)
self.log('success!\n', table)
elif metadata_change:
self.save_dbt(table,sql_fields_current)
if metadata_change and not (query and db._adapter.commit_on_alter_table):
db.commit()
self.save_dbt(table,sql_fields_current)
self.log('success!\n', table)
def save_dbt(self,table, sql_fields_current):
tfile = self.file_open(table._dbt, 'w')
pickle.dump(sql_fields_current, tfile)
self.file_close(tfile)
def LOWER(self, first):
return 'LOWER(%s)' % self.expand(first)
def UPPER(self, first):
return 'UPPER(%s)' % self.expand(first)
def COUNT(self, first, distinct=None):
return ('COUNT(%s)' if not distinct else 'COUNT(DISTINCT %s)') \
% self.expand(first)
def EXTRACT(self, first, what):
return "EXTRACT(%s FROM %s)" % (what, self.expand(first))
def EPOCH(self, first):
return self.EXTRACT(first, 'epoch')
def LENGTH(self, first):
return "LENGTH(%s)" % self.expand(first)
def AGGREGATE(self, first, what):
return "%s(%s)" % (what, self.expand(first))
def JOIN(self):
return 'JOIN'
def LEFT_JOIN(self):
return 'LEFT JOIN'
def RANDOM(self):
return 'Random()'
def NOT_NULL(self, default, field_type):
return 'NOT NULL DEFAULT %s' % self.represent(default,field_type)
def COALESCE(self, first, second):
expressions = [self.expand(first)]+[self.expand(e) for e in second]
return 'COALESCE(%s)' % ','.join(expressions)
def COALESCE_ZERO(self, first):
return 'COALESCE(%s,0)' % self.expand(first)
def RAW(self, first):
return first
def ALLOW_NULL(self):
return ''
def SUBSTRING(self, field, parameters):
return 'SUBSTR(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1])
def PRIMARY_KEY(self, key):
return 'PRIMARY KEY(%s)' % key
def _drop(self, table, mode):
return ['DROP TABLE %s;' % table.sqlsafe]
def drop(self, table, mode=''):
db = table._db
queries = self._drop(table, mode)
for query in queries:
if table._dbt:
self.log(query + '\n', table)
self.execute(query)
db.commit()
del db[table._tablename]
del db.tables[db.tables.index(table._tablename)]
db._remove_references_to(table)
if table._dbt:
self.file_delete(table._dbt)
self.log('success!\n', table)
def _insert(self, table, fields):
table_rname = table.sqlsafe
if fields:
keys = ','.join(f.sqlsafe_name for f, v in fields)
values = ','.join(self.expand(v, f.type) for f, v in fields)
return 'INSERT INTO %s(%s) VALUES (%s);' % (table_rname, keys, values)
else:
return self._insert_empty(table)
def _insert_empty(self, table):
return 'INSERT INTO %s DEFAULT VALUES;' % (table.sqlsafe)
def insert(self, table, fields):
query = self._insert(table,fields)
try:
self.execute(query)
except Exception:
e = sys.exc_info()[1]
if hasattr(table,'_on_insert_error'):
return table._on_insert_error(table,fields,e)
raise e
if hasattr(table, '_primarykey'):
mydict = dict([(k[0].name, k[1]) for k in fields if k[0].name in table._primarykey])
if mydict != {}:
return mydict
id = self.lastrowid(table)
if hasattr(table, '_primarykey') and len(table._primarykey) == 1:
id = {table._primarykey[0]: id}
if not isinstance(id, (int, long)):
return id
rid = Reference(id)
(rid._table, rid._record) = (table, None)
return rid
def bulk_insert(self, table, items):
return [self.insert(table,item) for item in items]
def NOT(self, first):
return '(NOT %s)' % self.expand(first)
def AND(self, first, second):
return '(%s AND %s)' % (self.expand(first), self.expand(second))
def OR(self, first, second):
return '(%s OR %s)' % (self.expand(first), self.expand(second))
def BELONGS(self, first, second):
if isinstance(second, str):
return '(%s IN (%s))' % (self.expand(first), second[:-1])
if not second:
return '(1=0)'
items = ','.join(self.expand(item, first.type) for item in second)
return '(%s IN (%s))' % (self.expand(first), items)
def REGEXP(self, first, second):
"regular expression operator"
raise NotImplementedError
def LIKE(self, first, second):
"case sensitive like operator"
raise NotImplementedError
def ILIKE(self, first, second):
"case in-sensitive like operator"
return '(%s LIKE %s)' % (self.expand(first),
self.expand(second, 'string'))
def STARTSWITH(self, first, second):
return '(%s LIKE %s)' % (self.expand(first),
self.expand(second+'%', 'string'))
def ENDSWITH(self, first, second):
return '(%s LIKE %s)' % (self.expand(first),
self.expand('%'+second, 'string'))
def CONTAINS(self,first,second,case_sensitive=False):
if first.type in ('string','text', 'json'):
if isinstance(second,Expression):
second = Expression(None,self.CONCAT('%',Expression(
None,self.REPLACE(second,('%','%%'))),'%'))
else:
second = '%'+str(second).replace('%','%%')+'%'
elif first.type.startswith('list:'):
if isinstance(second,Expression):
second = Expression(None,self.CONCAT(
'%|',Expression(None,self.REPLACE(
Expression(None,self.REPLACE(
second,('%','%%'))),('|','||'))),'|%'))
else:
second = '%|'+str(second).replace('%','%%')\
.replace('|','||')+'|%'
op = case_sensitive and self.LIKE or self.ILIKE
return op(first,second)
def EQ(self, first, second=None):
if second is None:
return '(%s IS NULL)' % self.expand(first)
return '(%s = %s)' % (self.expand(first),
self.expand(second, first.type))
def NE(self, first, second=None):
if second is None:
return '(%s IS NOT NULL)' % self.expand(first)
return '(%s <> %s)' % (self.expand(first),
self.expand(second, first.type))
def LT(self,first,second=None):
if second is None:
raise RuntimeError("Cannot compare %s < None" % first)
return '(%s < %s)' % (self.expand(first),
self.expand(second,first.type))
def LE(self,first,second=None):
if second is None:
raise RuntimeError("Cannot compare %s <= None" % first)
return '(%s <= %s)' % (self.expand(first),
self.expand(second,first.type))
def GT(self,first,second=None):
if second is None:
raise RuntimeError("Cannot compare %s > None" % first)
return '(%s > %s)' % (self.expand(first),
self.expand(second,first.type))
def GE(self,first,second=None):
if second is None:
raise RuntimeError("Cannot compare %s >= None" % first)
return '(%s >= %s)' % (self.expand(first),
self.expand(second,first.type))
def is_numerical_type(self, ftype):
return ftype in ('integer','boolean','double','bigint') or \
ftype.startswith('decimal')
def REPLACE(self, first, (second, third)):
return 'REPLACE(%s,%s,%s)' % (self.expand(first,'string'),
self.expand(second,'string'),
self.expand(third,'string'))
def CONCAT(self, *items):
return '(%s)' % ' || '.join(self.expand(x,'string') for x in items)
def ADD(self, first, second):
if self.is_numerical_type(first.type) or isinstance(first.type, Field):
return '(%s + %s)' % (self.expand(first),
self.expand(second, first.type))
else:
return self.CONCAT(first, second)
def SUB(self, first, second):
return '(%s - %s)' % (self.expand(first),
self.expand(second, first.type))
def MUL(self, first, second):
return '(%s * %s)' % (self.expand(first),
self.expand(second, first.type))
def DIV(self, first, second):
return '(%s / %s)' % (self.expand(first),
self.expand(second, first.type))
def MOD(self, first, second):
return '(%s %% %s)' % (self.expand(first),
self.expand(second, first.type))
def AS(self, first, second):
return '%s AS %s' % (self.expand(first), second)
def ON(self, first, second):
table_rname = self.table_alias(first)
if use_common_filters(second):
second = self.common_filter(second,[first._tablename])
return ('%s ON %s') % (self.expand(table_rname), self.expand(second))
def INVERT(self, first):
return '%s DESC' % self.expand(first)
def COMMA(self, first, second):
return '%s, %s' % (self.expand(first), self.expand(second))
def CAST(self, first, second):
return 'CAST(%s AS %s)' % (first, second)
def expand(self, expression, field_type=None, colnames=False):
if isinstance(expression, Field):
et = expression.table
if not colnames:
table_rname = et._ot and self.QUOTE_TEMPLATE % et._tablename or et._rname or self.QUOTE_TEMPLATE % et._tablename
out = '%s.%s' % (table_rname, expression._rname or (self.QUOTE_TEMPLATE % (expression.name)))
else:
out = '%s.%s' % (self.QUOTE_TEMPLATE % et._tablename, self.QUOTE_TEMPLATE % expression.name)
if field_type == 'string' and not expression.type in (
'string','text','json','password'):
out = self.CAST(out, self.types['text'])
return out
elif isinstance(expression, (Expression, Query)):
first = expression.first
second = expression.second
op = expression.op
optional_args = expression.optional_args or {}
if not second is None:
out = op(first, second, **optional_args)
elif not first is None:
out = op(first,**optional_args)
elif isinstance(op, str):
if op.endswith(';'):
op=op[:-1]
out = '(%s)' % op
else:
out = op()
return out
elif field_type:
return str(self.represent(expression,field_type))
elif isinstance(expression,(list,tuple)):
return ','.join(self.represent(item,field_type) \
for item in expression)
elif isinstance(expression, bool):
return '1' if expression else '0'
else:
return str(expression)
def table_alias(self, tbl):
if not isinstance(tbl, Table):
tbl = self.db[tbl]
return tbl.sqlsafe_alias
def alias(self, table, alias):
"""
Given a table object, makes a new table object
with alias name.
"""
other = copy.copy(table)
other['_ot'] = other._ot or other.sqlsafe
other['ALL'] = SQLALL(other)
other['_tablename'] = alias
for fieldname in other.fields:
other[fieldname] = copy.copy(other[fieldname])
other[fieldname]._tablename = alias
other[fieldname].tablename = alias
other[fieldname].table = other
table._db[alias] = other
return other
def _truncate(self, table, mode=''):
return ['TRUNCATE TABLE %s %s;' % (table.sqlsafe, mode or '')]
def truncate(self, table, mode= ' '):
# Prepare functions "write_to_logfile" and "close_logfile"
try:
queries = table._db._adapter._truncate(table, mode)
for query in queries:
self.log(query + '\n', table)
self.execute(query)
self.log('success!\n', table)
finally:
pass
def _update(self, tablename, query, fields):
if query:
if use_common_filters(query):
query = self.common_filter(query, [tablename])
sql_w = ' WHERE ' + self.expand(query)
else:
sql_w = ''
sql_v = ','.join(['%s=%s' % (field.sqlsafe_name,
self.expand(value, field.type)) \
for (field, value) in fields])
tablename = self.db[tablename].sqlsafe
return 'UPDATE %s SET %s%s;' % (tablename, sql_v, sql_w)
def update(self, tablename, query, fields):
sql = self._update(tablename, query, fields)
try:
self.execute(sql)
except Exception:
e = sys.exc_info()[1]
table = self.db[tablename]
if hasattr(table,'_on_update_error'):
return table._on_update_error(table,query,fields,e)
raise e
try:
return self.cursor.rowcount
except:
return None
def _delete(self, tablename, query):
if query:
if use_common_filters(query):
query = self.common_filter(query, [tablename])
sql_w = ' WHERE ' + self.expand(query)
else:
sql_w = ''
tablename = self.db[tablename].sqlsafe
return 'DELETE FROM %s%s;' % (tablename, sql_w)
def delete(self, tablename, query):
sql = self._delete(tablename, query)
### special code to handle CASCADE in SQLite & SpatiaLite
db = self.db
table = db[tablename]
if self.dbengine in ('sqlite', 'spatialite') and table._referenced_by:
deleted = [x[table._id.name] for x in db(query).select(table._id)]
### end special code to handle CASCADE in SQLite & SpatiaLite
self.execute(sql)
try:
counter = self.cursor.rowcount
except:
counter = None
### special code to handle CASCADE in SQLite & SpatiaLite
if self.dbengine in ('sqlite', 'spatialite') and counter:
for field in table._referenced_by:
if field.type=='reference '+table._tablename \
and field.ondelete=='CASCADE':
db(field.belongs(deleted)).delete()
### end special code to handle CASCADE in SQLite & SpatiaLite
return counter
def get_table(self, query):
tablenames = self.tables(query)
if len(tablenames)==1:
return tablenames[0]
elif len(tablenames)<1:
raise RuntimeError("No table selected")
else:
raise RuntimeError("Too many tables selected")
def expand_all(self, fields, tablenames):
db = self.db
new_fields = []
append = new_fields.append
for item in fields:
if isinstance(item,SQLALL):
new_fields += item._table
elif isinstance(item,str):
m = self.REGEX_TABLE_DOT_FIELD.match(item)
if m:
tablename,fieldname = m.groups()
append(db[tablename][fieldname])
else:
append(Expression(db,lambda item=item:item))
else:
append(item)
# ## if no fields specified take them all from the requested tables
if not new_fields:
for table in tablenames:
for field in db[table]:
append(field)
return new_fields
def _select(self, query, fields, attributes):
tables = self.tables
for key in set(attributes.keys())-SELECT_ARGS:
raise SyntaxError('invalid select attribute: %s' % key)
args_get = attributes.get
tablenames = tables(query)
tablenames_for_common_filters = tablenames
for field in fields:
if isinstance(field, basestring):
m = self.REGEX_TABLE_DOT_FIELD.match(field)
if m:
tn,fn = m.groups()
field = self.db[tn][fn]
for tablename in tables(field):
if not tablename in tablenames:
tablenames.append(tablename)
if len(tablenames) < 1:
raise SyntaxError('Set: no tables selected')
def colexpand(field):
return self.expand(field, colnames=True)
self._colnames = map(colexpand, fields)
def geoexpand(field):
if isinstance(field.type,str) and field.type.startswith('geometry') and isinstance(field, Field):
field = field.st_astext()
return self.expand(field)
sql_f = ', '.join(map(geoexpand, fields))
sql_o = ''
sql_s = ''
left = args_get('left', False)
inner_join = args_get('join', False)
distinct = args_get('distinct', False)
groupby = args_get('groupby', False)
orderby = args_get('orderby', False)
having = args_get('having', False)
limitby = args_get('limitby', False)
orderby_on_limitby = args_get('orderby_on_limitby', True)
for_update = args_get('for_update', False)
if self.can_select_for_update is False and for_update is True:
raise SyntaxError('invalid select attribute: for_update')
if distinct is True:
sql_s += 'DISTINCT'
elif distinct:
sql_s += 'DISTINCT ON (%s)' % distinct
if inner_join:
icommand = self.JOIN()
if not isinstance(inner_join, (tuple, list)):
inner_join = [inner_join]
ijoint = [t._tablename for t in inner_join
if not isinstance(t,Expression)]
ijoinon = [t for t in inner_join if isinstance(t, Expression)]
itables_to_merge={} #issue 490
[itables_to_merge.update(
dict.fromkeys(tables(t))) for t in ijoinon]
ijoinont = [t.first._tablename for t in ijoinon]
[itables_to_merge.pop(t) for t in ijoinont
if t in itables_to_merge] #issue 490
iimportant_tablenames = ijoint + ijoinont + itables_to_merge.keys()
iexcluded = [t for t in tablenames
if not t in iimportant_tablenames]
if left:
join = attributes['left']
command = self.LEFT_JOIN()
if not isinstance(join, (tuple, list)):
join = [join]
joint = [t._tablename for t in join
if not isinstance(t, Expression)]
joinon = [t for t in join if isinstance(t, Expression)]
#patch join+left patch (solves problem with ordering in left joins)
tables_to_merge={}
[tables_to_merge.update(
dict.fromkeys(tables(t))) for t in joinon]
joinont = [t.first._tablename for t in joinon]
[tables_to_merge.pop(t) for t in joinont if t in tables_to_merge]
tablenames_for_common_filters = [t for t in tablenames
if not t in joinont ]
important_tablenames = joint + joinont + tables_to_merge.keys()
excluded = [t for t in tablenames
if not t in important_tablenames ]
else:
excluded = tablenames
if use_common_filters(query):
query = self.common_filter(query,tablenames_for_common_filters)
sql_w = ' WHERE ' + self.expand(query) if query else ''
if inner_join and not left:
sql_t = ', '.join([self.table_alias(t) for t in iexcluded + \
itables_to_merge.keys()])
for t in ijoinon:
sql_t += ' %s %s' % (icommand, t)
elif not inner_join and left:
sql_t = ', '.join([self.table_alias(t) for t in excluded + \
tables_to_merge.keys()])
if joint:
sql_t += ' %s %s' % (command,
','.join([t for t in joint]))
for t in joinon:
sql_t += ' %s %s' % (command, t)
elif inner_join and left:
all_tables_in_query = set(important_tablenames + \
iimportant_tablenames + \
tablenames)
tables_in_joinon = set(joinont + ijoinont)
tables_not_in_joinon = \
all_tables_in_query.difference(tables_in_joinon)
sql_t = ','.join([self.table_alias(t) for t in tables_not_in_joinon])
for t in ijoinon:
sql_t += ' %s %s' % (icommand, t)
if joint:
sql_t += ' %s %s' % (command,
','.join([t for t in joint]))
for t in joinon:
sql_t += ' %s %s' % (command, t)
else:
sql_t = ', '.join(self.table_alias(t) for t in tablenames)
if groupby:
if isinstance(groupby, (list, tuple)):
groupby = xorify(groupby)
sql_o += ' GROUP BY %s' % self.expand(groupby)
if having:
sql_o += ' HAVING %s' % attributes['having']
if orderby:
if isinstance(orderby, (list, tuple)):
orderby = xorify(orderby)
if str(orderby) == '<random>':
sql_o += ' ORDER BY %s' % self.RANDOM()
else:
sql_o += ' ORDER BY %s' % self.expand(orderby)
if (limitby and not groupby and tablenames and orderby_on_limitby and not orderby):
sql_o += ' ORDER BY %s' % ', '.join(
[self.db[t].sqlsafe + '.' + self.db[t][x].sqlsafe_name for t in tablenames for x in (
hasattr(self.db[t], '_primarykey') and self.db[t]._primarykey
or ['_id']
)
]
)
# oracle does not support limitby
sql = self.select_limitby(sql_s, sql_f, sql_t, sql_w, sql_o, limitby)
if for_update and self.can_select_for_update is True:
sql = sql.rstrip(';') + ' FOR UPDATE;'
return sql
def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
if limitby:
(lmin, lmax) = limitby
sql_o += ' LIMIT %i OFFSET %i' % (lmax - lmin, lmin)
return 'SELECT %s %s FROM %s%s%s;' % \
(sql_s, sql_f, sql_t, sql_w, sql_o)
def _fetchall(self):
return self.cursor.fetchall()
def _select_aux(self,sql,fields,attributes):
args_get = attributes.get
cache = args_get('cache',None)
if not cache:
self.execute(sql)
rows = self._fetchall()
else:
(cache_model, time_expire) = cache
key = self.uri + '/' + sql + '/rows'
if len(key)>200: key = hashlib_md5(key).hexdigest()
def _select_aux2():
self.execute(sql)
return self._fetchall()
rows = cache_model(key,_select_aux2,time_expire)
if isinstance(rows,tuple):
rows = list(rows)
limitby = args_get('limitby', None) or (0,)
rows = self.rowslice(rows,limitby[0],None)
processor = args_get('processor',self.parse)
cacheable = args_get('cacheable',False)
return processor(rows,fields,self._colnames,cacheable=cacheable)
def select(self, query, fields, attributes):
"""
Always returns a Rows object, possibly empty.
"""
sql = self._select(query, fields, attributes)
cache = attributes.get('cache', None)
if cache and attributes.get('cacheable',False):
del attributes['cache']
(cache_model, time_expire) = cache
key = self.uri + '/' + sql
if len(key)>200: key = hashlib_md5(key).hexdigest()
args = (sql,fields,attributes)
return cache_model(
key,
lambda self=self,args=args:self._select_aux(*args),
time_expire)
else:
return self._select_aux(sql,fields,attributes)
def _count(self, query, distinct=None):
tablenames = self.tables(query)
if query:
if use_common_filters(query):
query = self.common_filter(query, tablenames)
sql_w = ' WHERE ' + self.expand(query)
else:
sql_w = ''
sql_t = ','.join(self.table_alias(t) for t in tablenames)
if distinct:
if isinstance(distinct,(list, tuple)):
distinct = xorify(distinct)
sql_d = self.expand(distinct)
return 'SELECT count(DISTINCT %s) FROM %s%s;' % \
(sql_d, sql_t, sql_w)
return 'SELECT count(*) FROM %s%s;' % (sql_t, sql_w)
def count(self, query, distinct=None):
self.execute(self._count(query, distinct))
return self.cursor.fetchone()[0]
def tables(self, *queries):
tables = set()
for query in queries:
if isinstance(query, Field):
tables.add(query.tablename)
elif isinstance(query, (Expression, Query)):
if not query.first is None:
tables = tables.union(self.tables(query.first))
if not query.second is None:
tables = tables.union(self.tables(query.second))
return list(tables)
def commit(self):
if self.connection:
return self.connection.commit()
def rollback(self):
if self.connection:
return self.connection.rollback()
def close_connection(self):
if self.connection:
r = self.connection.close()
self.connection = None
return r
def distributed_transaction_begin(self, key):
return
def prepare(self, key):
if self.connection: self.connection.prepare()
def commit_prepared(self, key):
if self.connection: self.connection.commit()
def rollback_prepared(self, key):
if self.connection: self.connection.rollback()
def concat_add(self, tablename):
return ', ADD '
def constraint_name(self, table, fieldname):
return '%s_%s__constraint' % (table,fieldname)
def create_sequence_and_triggers(self, query, table, **args):
self.execute(query)
def log_execute(self, *a, **b):
if not self.connection: raise ValueError(a[0])
if not self.connection: return None
command = a[0]
if hasattr(self,'filter_sql_command'):
command = self.filter_sql_command(command)
if self.db._debug:
LOGGER.debug('SQL: %s' % command)
self.db._lastsql = command
t0 = time.time()
ret = self.cursor.execute(command, *a[1:], **b)
self.db._timings.append((command,time.time()-t0))
del self.db._timings[:-TIMINGSSIZE]
return ret
def execute(self, *a, **b):
return self.log_execute(*a, **b)
def represent(self, obj, fieldtype):
field_is_type = fieldtype.startswith
if isinstance(obj, CALLABLETYPES):
obj = obj()
if isinstance(fieldtype, SQLCustomType):
value = fieldtype.encoder(obj)
if fieldtype.type in ('string','text', 'json'):
return self.adapt(value)
return value
if isinstance(obj, (Expression, Field)):
return str(obj)
if field_is_type('list:'):
if not obj:
obj = []
elif not isinstance(obj, (list, tuple)):
obj = [obj]
if field_is_type('list:string'):
obj = map(str,obj)
else:
obj = map(int,[o for o in obj if o != ''])
# we don't want to bar_encode json objects
if isinstance(obj, (list, tuple)) and (not fieldtype == "json"):
obj = bar_encode(obj)
if obj is None:
return 'NULL'
if obj == '' and not fieldtype[:2] in ['st', 'te', 'js', 'pa', 'up']:
return 'NULL'
r = self.represent_exceptions(obj, fieldtype)
if not r is None:
return r
if fieldtype == 'boolean':
if obj and not str(obj)[:1].upper() in '0F':
return self.smart_adapt(self.TRUE)
else:
return self.smart_adapt(self.FALSE)
if fieldtype == 'id' or fieldtype == 'integer':
return str(long(obj))
if field_is_type('decimal'):
return str(obj)
elif field_is_type('reference'): # reference
# check for tablename first
referenced = fieldtype[9:].strip()
if referenced in self.db.tables:
return str(long(obj))
p = referenced.partition('.')
if p[2] != '':
try:
ftype = self.db[p[0]][p[2]].type
return self.represent(obj, ftype)
except (ValueError, KeyError):
return repr(obj)
elif isinstance(obj, (Row, Reference)):
return str(obj['id'])
return str(long(obj))
elif fieldtype == 'double':
return repr(float(obj))
if isinstance(obj, unicode):
obj = obj.encode(self.db_codec)
if fieldtype == 'blob':
obj = base64.b64encode(str(obj))
elif fieldtype == 'date':
if isinstance(obj, (datetime.date, datetime.datetime)):
obj = obj.isoformat()[:10]
else:
obj = str(obj)
elif fieldtype == 'datetime':
if isinstance(obj, datetime.datetime):
obj = obj.isoformat(self.T_SEP)[:19]
elif isinstance(obj, datetime.date):
obj = obj.isoformat()[:10]+self.T_SEP+'00:00:00'
else:
obj = str(obj)
elif fieldtype == 'time':
if isinstance(obj, datetime.time):
obj = obj.isoformat()[:10]
else:
obj = str(obj)
elif fieldtype == 'json':
if not self.native_json:
if have_serializers:
obj = serializers.json(obj)
elif simplejson:
obj = simplejson.dumps(obj)
else:
raise RuntimeError("missing simplejson")
if not isinstance(obj,bytes):
obj = bytes(obj)
try:
obj.decode(self.db_codec)
except:
obj = obj.decode('latin1').encode(self.db_codec)
return self.adapt(obj)
def represent_exceptions(self, obj, fieldtype):
return None
def lastrowid(self, table):
return None
def rowslice(self, rows, minimum=0, maximum=None):
"""
By default this function does nothing;
overload when db does not do slicing.
"""
return rows
def parse_value(self, value, field_type, blob_decode=True):
if field_type != 'blob' and isinstance(value, str):
try:
value = value.decode(self.db._db_codec)
except Exception:
pass
if isinstance(value, unicode):
value = value.encode('utf-8')
if isinstance(field_type, SQLCustomType):
value = field_type.decoder(value)
if not isinstance(field_type, str) or value is None:
return value
elif field_type in ('string', 'text', 'password', 'upload', 'dict'):
return value
elif field_type.startswith('geo'):
return value
elif field_type == 'blob' and not blob_decode:
return value
else:
key = REGEX_TYPE.match(field_type).group(0)
return self.parsemap[key](value,field_type)
def parse_reference(self, value, field_type):
referee = field_type[10:].strip()
if not '.' in referee:
value = Reference(value)
value._table, value._record = self.db[referee], None
return value
def parse_boolean(self, value, field_type):
return value == self.TRUE or str(value)[:1].lower() == 't'
def parse_date(self, value, field_type):
if isinstance(value, datetime.datetime):
return value.date()
if not isinstance(value, (datetime.date,datetime.datetime)):
(y, m, d) = map(int, str(value)[:10].strip().split('-'))
value = datetime.date(y, m, d)
return value
def parse_time(self, value, field_type):
if not isinstance(value, datetime.time):
time_items = map(int,str(value)[:8].strip().split(':')[:3])
if len(time_items) == 3:
(h, mi, s) = time_items
else:
(h, mi, s) = time_items + [0]
value = datetime.time(h, mi, s)
return value
def parse_datetime(self, value, field_type):
if not isinstance(value, datetime.datetime):
value = str(value)
date_part,time_part,timezone = value[:10],value[11:19],value[19:]
if '+' in timezone:
ms,tz = timezone.split('+')
h,m = tz.split(':')
dt = datetime.timedelta(seconds=3600*int(h)+60*int(m))
elif '-' in timezone:
ms,tz = timezone.split('-')
h,m = tz.split(':')
dt = -datetime.timedelta(seconds=3600*int(h)+60*int(m))
else:
dt = None
(y, m, d) = map(int,date_part.split('-'))
time_parts = time_part and time_part.split(':')[:3] or (0,0,0)
while len(time_parts)<3: time_parts.append(0)
time_items = map(int,time_parts)
(h, mi, s) = time_items
value = datetime.datetime(y, m, d, h, mi, s)
if dt:
value = value + dt
return value
def parse_blob(self, value, field_type):
return base64.b64decode(str(value))
def parse_decimal(self, value, field_type):
decimals = int(field_type[8:-1].split(',')[-1])
if self.dbengine in ('sqlite', 'spatialite'):
value = ('%.' + str(decimals) + 'f') % value
if not isinstance(value, decimal.Decimal):
value = decimal.Decimal(str(value))
return value
def parse_list_integers(self, value, field_type):
if not isinstance(self, NoSQLAdapter):
value = bar_decode_integer(value)
return value
def parse_list_references(self, value, field_type):
if not isinstance(self, NoSQLAdapter):
value = bar_decode_integer(value)
return [self.parse_reference(r, field_type[5:]) for r in value]
def parse_list_strings(self, value, field_type):
if not isinstance(self, NoSQLAdapter):
value = bar_decode_string(value)
return value
def parse_id(self, value, field_type):
return long(value)
def parse_integer(self, value, field_type):
return long(value)
def parse_double(self, value, field_type):
return float(value)
def parse_json(self, value, field_type):
if not self.native_json:
if not isinstance(value, basestring):
raise RuntimeError('json data not a string')
if isinstance(value, unicode):
value = value.encode('utf-8')
if have_serializers:
value = serializers.loads_json(value)
elif simplejson:
value = simplejson.loads(value)
else:
raise RuntimeError("missing simplejson")
return value
def build_parsemap(self):
self.parsemap = {
'id':self.parse_id,
'integer':self.parse_integer,
'bigint':self.parse_integer,
'float':self.parse_double,
'double':self.parse_double,
'reference':self.parse_reference,
'boolean':self.parse_boolean,
'date':self.parse_date,
'time':self.parse_time,
'datetime':self.parse_datetime,
'blob':self.parse_blob,
'decimal':self.parse_decimal,
'json':self.parse_json,
'list:integer':self.parse_list_integers,
'list:reference':self.parse_list_references,
'list:string':self.parse_list_strings,
}
def parse(self, rows, fields, colnames, blob_decode=True,
cacheable = False):
db = self.db
virtualtables = []
new_rows = []
tmps = []
for colname in colnames:
col_m = self.REGEX_TABLE_DOT_FIELD.match(colname)
if not col_m:
tmps.append(None)
else:
tablename, fieldname = col_m.groups()
table = db[tablename]
field = table[fieldname]
ft = field.type
tmps.append((tablename, fieldname, table, field, ft))
for (i,row) in enumerate(rows):
new_row = Row()
for (j,colname) in enumerate(colnames):
value = row[j]
tmp = tmps[j]
if tmp:
(tablename,fieldname,table,field,ft) = tmp
colset = new_row.get(tablename, None)
if colset is None:
colset = new_row[tablename] = Row()
if tablename not in virtualtables:
virtualtables.append(tablename)
value = self.parse_value(value,ft,blob_decode)
if field.filter_out:
value = field.filter_out(value)
colset[fieldname] = value
# for backward compatibility
if ft=='id' and fieldname!='id' and \
not 'id' in table.fields:
colset['id'] = value
if ft == 'id' and not cacheable:
# temporary hack to deal with
# GoogleDatastoreAdapter
# references
if isinstance(self, GoogleDatastoreAdapter):
id = value.key.id() if self.use_ndb else value.key().id_or_name()
colset[fieldname] = id
colset.gae_item = value
else:
id = value
colset.update_record = RecordUpdater(colset,table,id)
colset.delete_record = RecordDeleter(table,id)
if table._db._lazy_tables:
colset['__get_lazy_reference__'] = LazyReferenceGetter(table, id)
for rfield in table._referenced_by:
referee_link = db._referee_name and \
db._referee_name % dict(
table=rfield.tablename,field=rfield.name)
if referee_link and not referee_link in colset:
colset[referee_link] = LazySet(rfield,id)
else:
if not '_extra' in new_row:
new_row['_extra'] = Row()
new_row['_extra'][colname] = \
self.parse_value(value,
fields[j].type,blob_decode)
new_column_name = \
REGEX_SELECT_AS_PARSER.search(colname)
if not new_column_name is None:
column_name = new_column_name.groups(0)
setattr(new_row,column_name[0],value)
new_rows.append(new_row)
rowsobj = Rows(db, new_rows, colnames, rawrows=rows)
for tablename in virtualtables:
table = db[tablename]
fields_virtual = [(f,v) for (f,v) in table.iteritems()
if isinstance(v,FieldVirtual)]
fields_lazy = [(f,v) for (f,v) in table.iteritems()
if isinstance(v,FieldMethod)]
if fields_virtual or fields_lazy:
for row in rowsobj.records:
box = row[tablename]
for f,v in fields_virtual:
try:
box[f] = v.f(row)
except AttributeError:
pass # not enough fields to define virtual field
for f,v in fields_lazy:
try:
box[f] = (v.handler or VirtualCommand)(v.f,row)
except AttributeError:
pass # not enough fields to define virtual field
### old style virtual fields
for item in table.virtualfields:
try:
rowsobj = rowsobj.setvirtualfields(**{tablename:item})
except (KeyError, AttributeError):
# to avoid breaking virtualfields when partial select
pass
return rowsobj
def common_filter(self, query, tablenames):
tenant_fieldname = self.db._request_tenant
for tablename in tablenames:
table = self.db[tablename]
# deal with user provided filters
if table._common_filter != None:
query = query & table._common_filter(query)
# deal with multi_tenant filters
if tenant_fieldname in table:
default = table[tenant_fieldname].default
if not default is None:
newquery = table[tenant_fieldname] == default
if query is None:
query = newquery
else:
query = query & newquery
return query
def CASE(self,query,t,f):
def represent(x):
types = {type(True):'boolean',type(0):'integer',type(1.0):'double'}
if x is None: return 'NULL'
elif isinstance(x,Expression): return str(x)
else: return self.represent(x,types.get(type(x),'string'))
return Expression(self.db,'CASE WHEN %s THEN %s ELSE %s END' % \
(self.expand(query),represent(t),represent(f)))
def sqlsafe_table(self, tablename, ot=None):
if ot is not None:
return ('%s AS ' + self.QUOTE_TEMPLATE) % (ot, tablename)
return self.QUOTE_TEMPLATE % tablename
def sqlsafe_field(self, fieldname):
return self.QUOTE_TEMPLATE % fieldname
###################################################################################
# List of all the available adapters; they all extend BaseAdapter.
###################################################################################
class SQLiteAdapter(BaseAdapter):
drivers = ('sqlite2','sqlite3')
can_select_for_update = None # support ourselves with BEGIN TRANSACTION
def EXTRACT(self,field,what):
return "web2py_extract('%s',%s)" % (what, self.expand(field))
@staticmethod
def web2py_extract(lookup, s):
table = {
'year': (0, 4),
'month': (5, 7),
'day': (8, 10),
'hour': (11, 13),
'minute': (14, 16),
'second': (17, 19),
}
try:
if lookup != 'epoch':
(i, j) = table[lookup]
return int(s[i:j])
else:
return time.mktime(datetime.datetime.strptime(s, '%Y-%m-%d %H:%M:%S').timetuple())
except:
return None
@staticmethod
def web2py_regexp(expression, item):
return re.compile(expression).search(item) is not None
def __init__(self, db, uri, pool_size=0, folder=None, db_codec ='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None):
self.db = db
self.dbengine = "sqlite"
self.uri = uri
self.adapter_args = adapter_args
if do_connect: self.find_driver(adapter_args)
self.pool_size = 0
self.folder = folder
self.db_codec = db_codec
self._after_connection = after_connection
self.find_or_make_work_folder()
path_encoding = sys.getfilesystemencoding() \
or locale.getdefaultlocale()[1] or 'utf8'
if uri.startswith('sqlite:memory'):
self.dbpath = ':memory:'
else:
self.dbpath = uri.split('://',1)[1]
if self.dbpath[0] != '/':
if PYTHON_VERSION[0] == 2:
self.dbpath = pjoin(
self.folder.decode(path_encoding).encode('utf8'), self.dbpath)
else:
self.dbpath = pjoin(self.folder, self.dbpath)
if not 'check_same_thread' in driver_args:
driver_args['check_same_thread'] = False
if not 'detect_types' in driver_args and do_connect:
driver_args['detect_types'] = self.driver.PARSE_DECLTYPES
def connector(dbpath=self.dbpath, driver_args=driver_args):
return self.driver.Connection(dbpath, **driver_args)
self.connector = connector
if do_connect: self.reconnect()
def after_connection(self):
self.connection.create_function('web2py_extract', 2,
SQLiteAdapter.web2py_extract)
self.connection.create_function("REGEXP", 2,
SQLiteAdapter.web2py_regexp)
if self.adapter_args.get('foreign_keys',True):
self.execute('PRAGMA foreign_keys=ON;')
def _truncate(self, table, mode=''):
tablename = table._tablename
return ['DELETE FROM %s;' % tablename,
"DELETE FROM sqlite_sequence WHERE name='%s';" % tablename]
def lastrowid(self, table):
return self.cursor.lastrowid
def REGEXP(self,first,second):
return '(%s REGEXP %s)' % (self.expand(first),
self.expand(second,'string'))
def select(self, query, fields, attributes):
"""
Simulate SELECT ... FOR UPDATE with BEGIN IMMEDIATE TRANSACTION.
Note that the entire database, rather than one record, is locked
(it will be locked eventually anyway by the following UPDATE).
"""
if attributes.get('for_update', False) and not 'cache' in attributes:
self.execute('BEGIN IMMEDIATE TRANSACTION;')
return super(SQLiteAdapter, self).select(query, fields, attributes)
class SpatiaLiteAdapter(SQLiteAdapter):
drivers = ('sqlite3','sqlite2')
types = copy.copy(BaseAdapter.types)
types.update(geometry='GEOMETRY')
def __init__(self, db, uri, pool_size=0, folder=None, db_codec ='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, srid=4326, after_connection=None):
self.db = db
self.dbengine = "spatialite"
self.uri = uri
if do_connect: self.find_driver(adapter_args)
self.pool_size = 0
self.folder = folder
self.db_codec = db_codec
self._after_connection = after_connection
self.find_or_make_work_folder()
self.srid = srid
path_encoding = sys.getfilesystemencoding() \
or locale.getdefaultlocale()[1] or 'utf8'
if uri.startswith('spatialite:memory'):
self.dbpath = ':memory:'
else:
self.dbpath = uri.split('://',1)[1]
if self.dbpath[0] != '/':
self.dbpath = pjoin(
self.folder.decode(path_encoding).encode('utf8'), self.dbpath)
if not 'check_same_thread' in driver_args:
driver_args['check_same_thread'] = False
if not 'detect_types' in driver_args and do_connect:
driver_args['detect_types'] = self.driver.PARSE_DECLTYPES
def connector(dbpath=self.dbpath, driver_args=driver_args):
return self.driver.Connection(dbpath, **driver_args)
self.connector = connector
if do_connect: self.reconnect()
def after_connection(self):
self.connection.enable_load_extension(True)
# for Windows, rename libspatialite-2.dll to libspatialite.dll
# Linux uses libspatialite.so
# Mac OS X uses libspatialite.dylib
libspatialite = SPATIALLIBS[platform.system()]
self.execute(r'SELECT load_extension("%s");' % libspatialite)
self.connection.create_function('web2py_extract', 2,
SQLiteAdapter.web2py_extract)
self.connection.create_function("REGEXP", 2,
SQLiteAdapter.web2py_regexp)
# GIS functions
def ST_ASGEOJSON(self, first, second):
return 'AsGeoJSON(%s,%s,%s)' %(self.expand(first),
second['precision'], second['options'])
def ST_ASTEXT(self, first):
return 'AsText(%s)' %(self.expand(first))
def ST_CONTAINS(self, first, second):
return 'Contains(%s,%s)' %(self.expand(first),
self.expand(second, first.type))
def ST_DISTANCE(self, first, second):
return 'Distance(%s,%s)' %(self.expand(first),
self.expand(second, first.type))
def ST_EQUALS(self, first, second):
return 'Equals(%s,%s)' %(self.expand(first),
self.expand(second, first.type))
def ST_INTERSECTS(self, first, second):
return 'Intersects(%s,%s)' %(self.expand(first),
self.expand(second, first.type))
def ST_OVERLAPS(self, first, second):
return 'Overlaps(%s,%s)' %(self.expand(first),
self.expand(second, first.type))
def ST_SIMPLIFY(self, first, second):
return 'Simplify(%s,%s)' %(self.expand(first),
self.expand(second, 'double'))
def ST_TOUCHES(self, first, second):
return 'Touches(%s,%s)' %(self.expand(first),
self.expand(second, first.type))
def ST_WITHIN(self, first, second):
return 'Within(%s,%s)' %(self.expand(first),
self.expand(second, first.type))
def represent(self, obj, fieldtype):
field_is_type = fieldtype.startswith
if field_is_type('geo'):
srid = 4326 # Spatialite default srid for geometry
geotype, parms = fieldtype[:-1].split('(')
parms = parms.split(',')
if len(parms) >= 2:
schema, srid = parms[:2]
# if field_is_type('geometry'):
value = "ST_GeomFromText('%s',%s)" %(obj, srid)
# elif field_is_type('geography'):
# value = "ST_GeogFromText('SRID=%s;%s')" %(srid, obj)
# else:
# raise SyntaxError, 'Invalid field type %s' %fieldtype
return value
return BaseAdapter.represent(self, obj, fieldtype)
class JDBCSQLiteAdapter(SQLiteAdapter):
drivers = ('zxJDBC_sqlite',)
def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None):
self.db = db
self.dbengine = "sqlite"
self.uri = uri
if do_connect: self.find_driver(adapter_args)
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self._after_connection = after_connection
self.find_or_make_work_folder()
path_encoding = sys.getfilesystemencoding() \
or locale.getdefaultlocale()[1] or 'utf8'
if uri.startswith('sqlite:memory'):
self.dbpath = ':memory:'
else:
self.dbpath = uri.split('://',1)[1]
if self.dbpath[0] != '/':
self.dbpath = pjoin(
self.folder.decode(path_encoding).encode('utf8'), self.dbpath)
def connector(dbpath=self.dbpath,driver_args=driver_args):
return self.driver.connect(
self.driver.getConnection('jdbc:sqlite:'+dbpath),
**driver_args)
self.connector = connector
if do_connect: self.reconnect()
def after_connection(self):
# FIXME http://www.zentus.com/sqlitejdbc/custom_functions.html for UDFs
self.connection.create_function('web2py_extract', 2,
SQLiteAdapter.web2py_extract)
def execute(self, a):
return self.log_execute(a)
class MySQLAdapter(BaseAdapter):
drivers = ('MySQLdb','pymysql', 'mysqlconnector')
commit_on_alter_table = True
support_distributed_transaction = True
types = {
'boolean': 'CHAR(1)',
'string': 'VARCHAR(%(length)s)',
'text': 'LONGTEXT',
'json': 'LONGTEXT',
'password': 'VARCHAR(%(length)s)',
'blob': 'LONGBLOB',
'upload': 'VARCHAR(%(length)s)',
'integer': 'INT',
'bigint': 'BIGINT',
'float': 'FLOAT',
'double': 'DOUBLE',
'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
'date': 'DATE',
'time': 'TIME',
'datetime': 'DATETIME',
'id': 'INT AUTO_INCREMENT NOT NULL',
'reference': 'INT, INDEX %(index_name)s (%(field_name)s), FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'list:integer': 'LONGTEXT',
'list:string': 'LONGTEXT',
'list:reference': 'LONGTEXT',
'big-id': 'BIGINT AUTO_INCREMENT NOT NULL',
'big-reference': 'BIGINT, INDEX %(index_name)s (%(field_name)s), FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference FK': ', CONSTRAINT `FK_%(constraint_name)s` FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
}
QUOTE_TEMPLATE = "`%s`"
def varquote(self,name):
return varquote_aux(name,'`%s`')
def RANDOM(self):
return 'RAND()'
def SUBSTRING(self,field,parameters):
return 'SUBSTRING(%s,%s,%s)' % (self.expand(field),
parameters[0], parameters[1])
def EPOCH(self, first):
return "UNIX_TIMESTAMP(%s)" % self.expand(first)
def CONCAT(self, *items):
return 'CONCAT(%s)' % ','.join(self.expand(x,'string') for x in items)
def REGEXP(self,first,second):
return '(%s REGEXP %s)' % (self.expand(first),
self.expand(second,'string'))
def _drop(self,table,mode):
# breaks db integrity but without this mysql does not drop table
table_rname = table.sqlsafe
return ['SET FOREIGN_KEY_CHECKS=0;','DROP TABLE %s;' % table_rname,
'SET FOREIGN_KEY_CHECKS=1;']
def _insert_empty(self, table):
return 'INSERT INTO %s VALUES (DEFAULT);' % (table.sqlsafe)
def distributed_transaction_begin(self,key):
self.execute('XA START;')
def prepare(self,key):
self.execute("XA END;")
self.execute("XA PREPARE;")
def commit_prepared(self,ley):
self.execute("XA COMMIT;")
def rollback_prepared(self,key):
self.execute("XA ROLLBACK;")
REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^?]+)(\?set_encoding=(?P<charset>\w+))?$')
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None):
self.db = db
self.dbengine = "mysql"
self.uri = uri
if do_connect: self.find_driver(adapter_args,uri)
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self._after_connection = after_connection
self.find_or_make_work_folder()
ruri = uri.split('://',1)[1]
m = self.REGEX_URI.match(ruri)
if not m:
raise SyntaxError(
"Invalid URI string in DAL: %s" % self.uri)
user = credential_decoder(m.group('user'))
if not user:
raise SyntaxError('User required')
password = credential_decoder(m.group('password'))
if not password:
password = ''
host = m.group('host')
if not host:
raise SyntaxError('Host name required')
db = m.group('db')
if not db:
raise SyntaxError('Database name required')
port = int(m.group('port') or '3306')
charset = m.group('charset') or 'utf8'
driver_args.update(db=db,
user=credential_decoder(user),
passwd=credential_decoder(password),
host=host,
port=port,
charset=charset)
def connector(driver_args=driver_args):
return self.driver.connect(**driver_args)
self.connector = connector
if do_connect: self.reconnect()
def after_connection(self):
self.execute('SET FOREIGN_KEY_CHECKS=1;')
self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
def lastrowid(self,table):
self.execute('select last_insert_id();')
return int(self.cursor.fetchone()[0])
class PostgreSQLAdapter(BaseAdapter):
drivers = ('psycopg2','pg8000')
QUOTE_TEMPLATE = '"%s"'
support_distributed_transaction = True
types = {
'boolean': 'CHAR(1)',
'string': 'VARCHAR(%(length)s)',
'text': 'TEXT',
'json': 'TEXT',
'password': 'VARCHAR(%(length)s)',
'blob': 'BYTEA',
'upload': 'VARCHAR(%(length)s)',
'integer': 'INTEGER',
'bigint': 'BIGINT',
'float': 'FLOAT',
'double': 'FLOAT8',
'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
'date': 'DATE',
'time': 'TIME',
'datetime': 'TIMESTAMP',
'id': 'SERIAL PRIMARY KEY',
'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'list:integer': 'TEXT',
'list:string': 'TEXT',
'list:reference': 'TEXT',
'geometry': 'GEOMETRY',
'geography': 'GEOGRAPHY',
'big-id': 'BIGSERIAL PRIMARY KEY',
'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference FK': ', CONSTRAINT "FK_%(constraint_name)s" FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference TFK': ' CONSTRAINT "FK_%(foreign_table)s_PK" FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s',
}
def varquote(self,name):
return varquote_aux(name,'"%s"')
def adapt(self,obj):
if self.driver_name == 'psycopg2':
return psycopg2_adapt(obj).getquoted()
elif self.driver_name == 'pg8000':
return "'%s'" % str(obj).replace("%","%%").replace("'","''")
else:
return "'%s'" % str(obj).replace("'","''")
def sequence_name(self,table):
return self.QUOTE_TEMPLATE % (table + '_id_seq')
def RANDOM(self):
return 'RANDOM()'
def ADD(self, first, second):
t = first.type
if t in ('text','string','password', 'json', 'upload','blob'):
return '(%s || %s)' % (self.expand(first), self.expand(second, t))
else:
return '(%s + %s)' % (self.expand(first), self.expand(second, t))
def distributed_transaction_begin(self,key):
return
def prepare(self,key):
self.execute("PREPARE TRANSACTION '%s';" % key)
def commit_prepared(self,key):
self.execute("COMMIT PREPARED '%s';" % key)
def rollback_prepared(self,key):
self.execute("ROLLBACK PREPARED '%s';" % key)
def create_sequence_and_triggers(self, query, table, **args):
# following lines should only be executed if table._sequence_name does not exist
# self.execute('CREATE SEQUENCE %s;' % table._sequence_name)
# self.execute("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');" \
# % (table._tablename, table._fieldname, table._sequence_name))
self.execute(query)
REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?sslmode=(?P<sslmode>.+))?$')
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, srid=4326,
after_connection=None):
self.db = db
self.dbengine = "postgres"
self.uri = uri
if do_connect: self.find_driver(adapter_args,uri)
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self._after_connection = after_connection
self.srid = srid
self.find_or_make_work_folder()
ruri = uri.split('://',1)[1]
m = self.REGEX_URI.match(ruri)
if not m:
raise SyntaxError("Invalid URI string in DAL")
user = credential_decoder(m.group('user'))
if not user:
raise SyntaxError('User required')
password = credential_decoder(m.group('password'))
if not password:
password = ''
host = m.group('host')
if not host:
raise SyntaxError('Host name required')
db = m.group('db')
if not db:
raise SyntaxError('Database name required')
port = m.group('port') or '5432'
sslmode = m.group('sslmode')
if sslmode:
msg = ("dbname='%s' user='%s' host='%s' "
"port=%s password='%s' sslmode='%s'") \
% (db, user, host, port, password, sslmode)
else:
msg = ("dbname='%s' user='%s' host='%s' "
"port=%s password='%s'") \
% (db, user, host, port, password)
# choose diver according uri
if self.driver:
self.__version__ = "%s %s" % (self.driver.__name__,
self.driver.__version__)
else:
self.__version__ = None
def connector(msg=msg,driver_args=driver_args):
return self.driver.connect(msg,**driver_args)
self.connector = connector
if do_connect: self.reconnect()
def after_connection(self):
self.connection.set_client_encoding('UTF8')
self.execute("SET standard_conforming_strings=on;")
self.try_json()
def lastrowid(self,table = None):
self.execute("select lastval()")
return int(self.cursor.fetchone()[0])
def try_json(self):
# check JSON data type support
# (to be added to after_connection)
if self.driver_name == "pg8000":
supports_json = self.connection.server_version >= "9.2.0"
elif (self.driver_name == "psycopg2") and \
(self.driver.__version__ >= "2.0.12"):
supports_json = self.connection.server_version >= 90200
elif self.driver_name == "zxJDBC":
supports_json = self.connection.dbversion >= "9.2.0"
else: supports_json = None
if supports_json:
self.types["json"] = "JSON"
self.native_json = True
else: LOGGER.debug("Your database version does not support the JSON data type (using TEXT instead)")
def LIKE(self,first,second):
args = (self.expand(first), self.expand(second,'string'))
if not first.type in ('string', 'text', 'json'):
return '(%s LIKE %s)' % (
self.CAST(args[0], 'CHAR(%s)' % first.length), args[1])
else:
return '(%s LIKE %s)' % args
def ILIKE(self,first,second):
args = (self.expand(first), self.expand(second,'string'))
if not first.type in ('string', 'text', 'json'):
return '(%s LIKE %s)' % (
self.CAST(args[0], 'CHAR(%s)' % first.length), args[1])
else:
return '(%s ILIKE %s)' % args
def REGEXP(self,first,second):
return '(%s ~ %s)' % (self.expand(first),
self.expand(second,'string'))
def STARTSWITH(self,first,second):
return '(%s ILIKE %s)' % (self.expand(first),
self.expand(second+'%','string'))
def ENDSWITH(self,first,second):
return '(%s ILIKE %s)' % (self.expand(first),
self.expand('%'+second,'string'))
# GIS functions
def ST_ASGEOJSON(self, first, second):
"""
http://postgis.org/docs/ST_AsGeoJSON.html
"""
return 'ST_AsGeoJSON(%s,%s,%s,%s)' %(second['version'],
self.expand(first), second['precision'], second['options'])
def ST_ASTEXT(self, first):
"""
http://postgis.org/docs/ST_AsText.html
"""
return 'ST_AsText(%s)' %(self.expand(first))
def ST_X(self, first):
"""
http://postgis.org/docs/ST_X.html
"""
return 'ST_X(%s)' %(self.expand(first))
def ST_Y(self, first):
"""
http://postgis.org/docs/ST_Y.html
"""
return 'ST_Y(%s)' %(self.expand(first))
def ST_CONTAINS(self, first, second):
"""
http://postgis.org/docs/ST_Contains.html
"""
return 'ST_Contains(%s,%s)' %(self.expand(first), self.expand(second, first.type))
def ST_DISTANCE(self, first, second):
"""
http://postgis.org/docs/ST_Distance.html
"""
return 'ST_Distance(%s,%s)' %(self.expand(first), self.expand(second, first.type))
def ST_EQUALS(self, first, second):
"""
http://postgis.org/docs/ST_Equals.html
"""
return 'ST_Equals(%s,%s)' %(self.expand(first), self.expand(second, first.type))
def ST_INTERSECTS(self, first, second):
"""
http://postgis.org/docs/ST_Intersects.html
"""
return 'ST_Intersects(%s,%s)' %(self.expand(first), self.expand(second, first.type))
def ST_OVERLAPS(self, first, second):
"""
http://postgis.org/docs/ST_Overlaps.html
"""
return 'ST_Overlaps(%s,%s)' %(self.expand(first), self.expand(second, first.type))
def ST_SIMPLIFY(self, first, second):
"""
http://postgis.org/docs/ST_Simplify.html
"""
return 'ST_Simplify(%s,%s)' %(self.expand(first), self.expand(second, 'double'))
def ST_TOUCHES(self, first, second):
"""
http://postgis.org/docs/ST_Touches.html
"""
return 'ST_Touches(%s,%s)' %(self.expand(first), self.expand(second, first.type))
def ST_WITHIN(self, first, second):
"""
http://postgis.org/docs/ST_Within.html
"""
return 'ST_Within(%s,%s)' %(self.expand(first), self.expand(second, first.type))
def ST_DWITHIN(self, first, (second, third)):
"""
http://postgis.org/docs/ST_DWithin.html
"""
return 'ST_DWithin(%s,%s,%s)' %(self.expand(first),
self.expand(second, first.type),
self.expand(third, 'double'))
def represent(self, obj, fieldtype):
field_is_type = fieldtype.startswith
if field_is_type('geo'):
srid = 4326 # postGIS default srid for geometry
geotype, parms = fieldtype[:-1].split('(')
parms = parms.split(',')
if len(parms) >= 2:
schema, srid = parms[:2]
if field_is_type('geometry'):
value = "ST_GeomFromText('%s',%s)" %(obj, srid)
elif field_is_type('geography'):
value = "ST_GeogFromText('SRID=%s;%s')" %(srid, obj)
# else:
# raise SyntaxError('Invalid field type %s' %fieldtype)
return value
return BaseAdapter.represent(self, obj, fieldtype)
def _drop(self, table, mode='restrict'):
if mode not in ['restrict', 'cascade', '']:
raise ValueError('Invalid mode: %s' % mode)
return ['DROP TABLE ' + table.sqlsafe + ' ' + str(mode) + ';']
class NewPostgreSQLAdapter(PostgreSQLAdapter):
drivers = ('psycopg2','pg8000')
types = {
'boolean': 'CHAR(1)',
'string': 'VARCHAR(%(length)s)',
'text': 'TEXT',
'json': 'TEXT',
'password': 'VARCHAR(%(length)s)',
'blob': 'BYTEA',
'upload': 'VARCHAR(%(length)s)',
'integer': 'INTEGER',
'bigint': 'BIGINT',
'float': 'FLOAT',
'double': 'FLOAT8',
'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
'date': 'DATE',
'time': 'TIME',
'datetime': 'TIMESTAMP',
'id': 'SERIAL PRIMARY KEY',
'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'list:integer': 'BIGINT[]',
'list:string': 'TEXT[]',
'list:reference': 'BIGINT[]',
'geometry': 'GEOMETRY',
'geography': 'GEOGRAPHY',
'big-id': 'BIGSERIAL PRIMARY KEY',
'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
}
def parse_list_integers(self, value, field_type):
return value
def parse_list_references(self, value, field_type):
return [self.parse_reference(r, field_type[5:]) for r in value]
def parse_list_strings(self, value, field_type):
return value
def represent(self, obj, fieldtype):
field_is_type = fieldtype.startswith
if field_is_type('list:'):
if not obj:
obj = []
elif not isinstance(obj, (list, tuple)):
obj = [obj]
if field_is_type('list:string'):
obj = map(str,obj)
else:
obj = map(int,obj)
return 'ARRAY[%s]' % ','.join(repr(item) for item in obj)
return BaseAdapter.represent(self, obj, fieldtype)
class JDBCPostgreSQLAdapter(PostgreSQLAdapter):
drivers = ('zxJDBC',)
REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$')
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None ):
self.db = db
self.dbengine = "postgres"
self.uri = uri
if do_connect: self.find_driver(adapter_args,uri)
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self._after_connection = after_connection
self.find_or_make_work_folder()
ruri = uri.split('://',1)[1]
m = self.REGEX_URI.match(ruri)
if not m:
raise SyntaxError("Invalid URI string in DAL")
user = credential_decoder(m.group('user'))
if not user:
raise SyntaxError('User required')
password = credential_decoder(m.group('password'))
if not password:
password = ''
host = m.group('host')
if not host:
raise SyntaxError('Host name required')
db = m.group('db')
if not db:
raise SyntaxError('Database name required')
port = m.group('port') or '5432'
msg = ('jdbc:postgresql://%s:%s/%s' % (host, port, db), user, password)
def connector(msg=msg,driver_args=driver_args):
return self.driver.connect(*msg,**driver_args)
self.connector = connector
if do_connect: self.reconnect()
def after_connection(self):
self.connection.set_client_encoding('UTF8')
self.execute('BEGIN;')
self.execute("SET CLIENT_ENCODING TO 'UNICODE';")
self.try_json()
class OracleAdapter(BaseAdapter):
drivers = ('cx_Oracle',)
commit_on_alter_table = False
types = {
'boolean': 'CHAR(1)',
'string': 'VARCHAR2(%(length)s)',
'text': 'CLOB',
'json': 'CLOB',
'password': 'VARCHAR2(%(length)s)',
'blob': 'CLOB',
'upload': 'VARCHAR2(%(length)s)',
'integer': 'INT',
'bigint': 'NUMBER',
'float': 'FLOAT',
'double': 'BINARY_DOUBLE',
'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
'date': 'DATE',
'time': 'CHAR(8)',
'datetime': 'DATE',
'id': 'NUMBER PRIMARY KEY',
'reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'list:integer': 'CLOB',
'list:string': 'CLOB',
'list:reference': 'CLOB',
'big-id': 'NUMBER PRIMARY KEY',
'big-reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s',
}
def trigger_name(self,tablename):
return '%s_trigger' % tablename
def LEFT_JOIN(self):
return 'LEFT OUTER JOIN'
def RANDOM(self):
return 'dbms_random.value'
def NOT_NULL(self,default,field_type):
return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
def _drop(self,table,mode):
sequence_name = table._sequence_name
return ['DROP TABLE %s %s;' % (table.sqlsafe, mode), 'DROP SEQUENCE %s;' % sequence_name]
def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
if limitby:
(lmin, lmax) = limitby
if len(sql_w) > 1:
sql_w_row = sql_w + ' AND w_row > %i' % lmin
else:
sql_w_row = 'WHERE w_row > %i' % lmin
return 'SELECT %s %s FROM (SELECT w_tmp.*, ROWNUM w_row FROM (SELECT %s FROM %s%s%s) w_tmp WHERE ROWNUM<=%i) %s %s %s;' % (sql_s, sql_f, sql_f, sql_t, sql_w, sql_o, lmax, sql_t, sql_w_row, sql_o)
return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
def constraint_name(self, tablename, fieldname):
constraint_name = BaseAdapter.constraint_name(self, tablename, fieldname)
if len(constraint_name)>30:
constraint_name = '%s_%s__constraint' % (tablename[:10], fieldname[:7])
return constraint_name
def represent_exceptions(self, obj, fieldtype):
if fieldtype == 'blob':
obj = base64.b64encode(str(obj))
return ":CLOB('%s')" % obj
elif fieldtype == 'date':
if isinstance(obj, (datetime.date, datetime.datetime)):
obj = obj.isoformat()[:10]
else:
obj = str(obj)
return "to_date('%s','yyyy-mm-dd')" % obj
elif fieldtype == 'datetime':
if isinstance(obj, datetime.datetime):
obj = obj.isoformat()[:19].replace('T',' ')
elif isinstance(obj, datetime.date):
obj = obj.isoformat()[:10]+' 00:00:00'
else:
obj = str(obj)
return "to_date('%s','yyyy-mm-dd hh24:mi:ss')" % obj
return None
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None):
self.db = db
self.dbengine = "oracle"
self.uri = uri
if do_connect: self.find_driver(adapter_args,uri)
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self._after_connection = after_connection
self.find_or_make_work_folder()
ruri = uri.split('://',1)[1]
if not 'threaded' in driver_args:
driver_args['threaded']=True
def connector(uri=ruri,driver_args=driver_args):
return self.driver.connect(uri,**driver_args)
self.connector = connector
if do_connect: self.reconnect()
def after_connection(self):
self.execute("ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS';")
self.execute("ALTER SESSION SET NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS';")
oracle_fix = re.compile("[^']*('[^']*'[^']*)*\:(?P<clob>CLOB\('([^']+|'')*'\))")
def execute(self, command, args=None):
args = args or []
i = 1
while True:
m = self.oracle_fix.match(command)
if not m:
break
command = command[:m.start('clob')] + str(i) + command[m.end('clob'):]
args.append(m.group('clob')[6:-2].replace("''", "'"))
i += 1
if command[-1:]==';':
command = command[:-1]
return self.log_execute(command, args)
def create_sequence_and_triggers(self, query, table, **args):
tablename = table._tablename
id_name = table._id.name
sequence_name = table._sequence_name
trigger_name = table._trigger_name
self.execute(query)
self.execute('CREATE SEQUENCE %s START WITH 1 INCREMENT BY 1 NOMAXVALUE MINVALUE -1;' % sequence_name)
self.execute("""
CREATE OR REPLACE TRIGGER %(trigger_name)s BEFORE INSERT ON %(tablename)s FOR EACH ROW
DECLARE
curr_val NUMBER;
diff_val NUMBER;
PRAGMA autonomous_transaction;
BEGIN
IF :NEW.%(id)s IS NOT NULL THEN
EXECUTE IMMEDIATE 'SELECT %(sequence_name)s.nextval FROM dual' INTO curr_val;
diff_val := :NEW.%(id)s - curr_val - 1;
IF diff_val != 0 THEN
EXECUTE IMMEDIATE 'alter sequence %(sequence_name)s increment by '|| diff_val;
EXECUTE IMMEDIATE 'SELECT %(sequence_name)s.nextval FROM dual' INTO curr_val;
EXECUTE IMMEDIATE 'alter sequence %(sequence_name)s increment by 1';
END IF;
END IF;
SELECT %(sequence_name)s.nextval INTO :NEW.%(id)s FROM DUAL;
END;
""" % dict(trigger_name=trigger_name, tablename=tablename,
sequence_name=sequence_name,id=id_name))
def lastrowid(self,table):
sequence_name = table._sequence_name
self.execute('SELECT %s.currval FROM dual;' % sequence_name)
return long(self.cursor.fetchone()[0])
#def parse_value(self, value, field_type, blob_decode=True):
# if blob_decode and isinstance(value, cx_Oracle.LOB):
# try:
# value = value.read()
# except self.driver.ProgrammingError:
# # After a subsequent fetch the LOB value is not valid anymore
# pass
# return BaseAdapter.parse_value(self, value, field_type, blob_decode)
def _fetchall(self):
if any(x[1]==cx_Oracle.CLOB for x in self.cursor.description):
return [tuple([(c.read() if type(c) == cx_Oracle.LOB else c) \
for c in r]) for r in self.cursor]
else:
return self.cursor.fetchall()
def sqlsafe_table(self, tablename, ot=None):
if ot is not None:
return (self.QUOTE_TEMPLATE + ' ' \
+ self.QUOTE_TEMPLATE) % (ot, tablename)
return self.QUOTE_TEMPLATE % tablename
class MSSQLAdapter(BaseAdapter):
drivers = ('pyodbc',)
T_SEP = 'T'
QUOTE_TEMPLATE = '"%s"'
types = {
'boolean': 'BIT',
'string': 'VARCHAR(%(length)s)',
'text': 'TEXT',
'json': 'TEXT',
'password': 'VARCHAR(%(length)s)',
'blob': 'IMAGE',
'upload': 'VARCHAR(%(length)s)',
'integer': 'INT',
'bigint': 'BIGINT',
'float': 'FLOAT',
'double': 'FLOAT',
'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
'date': 'DATETIME',
'time': 'CHAR(8)',
'datetime': 'DATETIME',
'id': 'INT IDENTITY PRIMARY KEY',
'reference': 'INT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'list:integer': 'TEXT',
'list:string': 'TEXT',
'list:reference': 'TEXT',
'geometry': 'geometry',
'geography': 'geography',
'big-id': 'BIGINT IDENTITY PRIMARY KEY',
'big-reference': 'BIGINT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s',
}
def concat_add(self,tablename):
return '; ALTER TABLE %s ADD ' % tablename
def varquote(self,name):
return varquote_aux(name,'[%s]')
def EXTRACT(self,field,what):
return "DATEPART(%s,%s)" % (what, self.expand(field))
def LEFT_JOIN(self):
return 'LEFT OUTER JOIN'
def RANDOM(self):
return 'NEWID()'
def ALLOW_NULL(self):
return ' NULL'
def CAST(self, first, second):
return first # apparently no cast necessary in MSSQL
def SUBSTRING(self,field,parameters):
return 'SUBSTRING(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1])
def PRIMARY_KEY(self,key):
return 'PRIMARY KEY CLUSTERED (%s)' % key
def AGGREGATE(self, first, what):
if what == 'LENGTH':
what = 'LEN'
return "%s(%s)" % (what, self.expand(first))
def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
if limitby:
(lmin, lmax) = limitby
sql_s += ' TOP %i' % lmax
return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
TRUE = 1
FALSE = 0
REGEX_DSN = re.compile('^(?P<dsn>.+)$')
REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?(?P<urlargs>.*))?$')
REGEX_ARGPATTERN = re.compile('(?P<argkey>[^=]+)=(?P<argvalue>[^&]*)')
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, srid=4326,
after_connection=None):
self.db = db
self.dbengine = "mssql"
self.uri = uri
if do_connect: self.find_driver(adapter_args,uri)
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self._after_connection = after_connection
self.srid = srid
self.find_or_make_work_folder()
# ## read: http://bytes.com/groups/python/460325-cx_oracle-utf8
ruri = uri.split('://',1)[1]
if '@' not in ruri:
try:
m = self.REGEX_DSN.match(ruri)
if not m:
raise SyntaxError(
'Parsing uri string(%s) has no result' % self.uri)
dsn = m.group('dsn')
if not dsn:
raise SyntaxError('DSN required')
except SyntaxError:
e = sys.exc_info()[1]
LOGGER.error('NdGpatch error')
raise e
# was cnxn = 'DSN=%s' % dsn
cnxn = dsn
else:
m = self.REGEX_URI.match(ruri)
if not m:
raise SyntaxError(
"Invalid URI string in DAL: %s" % self.uri)
user = credential_decoder(m.group('user'))
if not user:
raise SyntaxError('User required')
password = credential_decoder(m.group('password'))
if not password:
password = ''
host = m.group('host')
if not host:
raise SyntaxError('Host name required')
db = m.group('db')
if not db:
raise SyntaxError('Database name required')
port = m.group('port') or '1433'
# Parse the optional url name-value arg pairs after the '?'
# (in the form of arg1=value1&arg2=value2&...)
# Default values (drivers like FreeTDS insist on uppercase parameter keys)
argsdict = { 'DRIVER':'{SQL Server}' }
urlargs = m.group('urlargs') or ''
for argmatch in self.REGEX_ARGPATTERN.finditer(urlargs):
argsdict[str(argmatch.group('argkey')).upper()] = argmatch.group('argvalue')
urlargs = ';'.join(['%s=%s' % (ak, av) for (ak, av) in argsdict.iteritems()])
cnxn = 'SERVER=%s;PORT=%s;DATABASE=%s;UID=%s;PWD=%s;%s' \
% (host, port, db, user, password, urlargs)
def connector(cnxn=cnxn,driver_args=driver_args):
return self.driver.connect(cnxn,**driver_args)
self.connector = connector
if do_connect: self.reconnect()
def lastrowid(self,table):
#self.execute('SELECT @@IDENTITY;')
self.execute('SELECT SCOPE_IDENTITY();')
return long(self.cursor.fetchone()[0])
def rowslice(self,rows,minimum=0,maximum=None):
if maximum is None:
return rows[minimum:]
return rows[minimum:maximum]
def EPOCH(self, first):
return "DATEDIFF(second, '1970-01-01 00:00:00', %s)" % self.expand(first)
def CONCAT(self, *items):
return '(%s)' % ' + '.join(self.expand(x,'string') for x in items)
# GIS Spatial Extensions
# No STAsGeoJSON in MSSQL
def ST_ASTEXT(self, first):
return '%s.STAsText()' %(self.expand(first))
def ST_CONTAINS(self, first, second):
return '%s.STContains(%s)=1' %(self.expand(first), self.expand(second, first.type))
def ST_DISTANCE(self, first, second):
return '%s.STDistance(%s)' %(self.expand(first), self.expand(second, first.type))
def ST_EQUALS(self, first, second):
return '%s.STEquals(%s)=1' %(self.expand(first), self.expand(second, first.type))
def ST_INTERSECTS(self, first, second):
return '%s.STIntersects(%s)=1' %(self.expand(first), self.expand(second, first.type))
def ST_OVERLAPS(self, first, second):
return '%s.STOverlaps(%s)=1' %(self.expand(first), self.expand(second, first.type))
# no STSimplify in MSSQL
def ST_TOUCHES(self, first, second):
return '%s.STTouches(%s)=1' %(self.expand(first), self.expand(second, first.type))
def ST_WITHIN(self, first, second):
return '%s.STWithin(%s)=1' %(self.expand(first), self.expand(second, first.type))
def represent(self, obj, fieldtype):
field_is_type = fieldtype.startswith
if field_is_type('geometry'):
srid = 0 # MS SQL default srid for geometry
geotype, parms = fieldtype[:-1].split('(')
if parms:
srid = parms
return "geometry::STGeomFromText('%s',%s)" %(obj, srid)
elif fieldtype == 'geography':
srid = 4326 # MS SQL default srid for geography
geotype, parms = fieldtype[:-1].split('(')
if parms:
srid = parms
return "geography::STGeomFromText('%s',%s)" %(obj, srid)
# else:
# raise SyntaxError('Invalid field type %s' %fieldtype)
return "geometry::STGeomFromText('%s',%s)" %(obj, srid)
return BaseAdapter.represent(self, obj, fieldtype)
class MSSQL3Adapter(MSSQLAdapter):
""" experimental support for pagination in MSSQL"""
def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
if limitby:
(lmin, lmax) = limitby
if lmin == 0:
sql_s += ' TOP %i' % lmax
return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
lmin += 1
sql_o_inner = sql_o[sql_o.find('ORDER BY ')+9:]
sql_g_inner = sql_o[:sql_o.find('ORDER BY ')]
sql_f_outer = ['f_%s' % f for f in range(len(sql_f.split(',')))]
sql_f_inner = [f for f in sql_f.split(',')]
sql_f_iproxy = ['%s AS %s' % (o, n) for (o, n) in zip(sql_f_inner, sql_f_outer)]
sql_f_iproxy = ', '.join(sql_f_iproxy)
sql_f_oproxy = ', '.join(sql_f_outer)
return 'SELECT %s %s FROM (SELECT %s ROW_NUMBER() OVER (ORDER BY %s) AS w_row, %s FROM %s%s%s) TMP WHERE w_row BETWEEN %i AND %s;' % (sql_s,sql_f_oproxy,sql_s,sql_f,sql_f_iproxy,sql_t,sql_w,sql_g_inner,lmin,lmax)
return 'SELECT %s %s FROM %s%s%s;' % (sql_s,sql_f,sql_t,sql_w,sql_o)
def rowslice(self,rows,minimum=0,maximum=None):
return rows
class MSSQL4Adapter(MSSQLAdapter):
""" support for true pagination in MSSQL >= 2012"""
def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
if limitby:
(lmin, lmax) = limitby
if lmin == 0:
#top is still slightly faster, especially because
#web2py's default to fetch references is to not specify
#an orderby clause
sql_s += ' TOP %i' % lmax
else:
if not sql_o:
#if there is no orderby, we can't use the brand new statements
#that being said, developer chose its own poison, so be it random
sql_o += ' ORDER BY %s' % self.RANDOM()
sql_o += ' OFFSET %i ROWS FETCH NEXT %i ROWS ONLY' % (lmin, lmax - lmin)
return 'SELECT %s %s FROM %s%s%s;' % \
(sql_s, sql_f, sql_t, sql_w, sql_o)
def rowslice(self,rows,minimum=0,maximum=None):
return rows
class MSSQL2Adapter(MSSQLAdapter):
drivers = ('pyodbc',)
types = {
'boolean': 'CHAR(1)',
'string': 'NVARCHAR(%(length)s)',
'text': 'NTEXT',
'json': 'NTEXT',
'password': 'NVARCHAR(%(length)s)',
'blob': 'IMAGE',
'upload': 'NVARCHAR(%(length)s)',
'integer': 'INT',
'bigint': 'BIGINT',
'float': 'FLOAT',
'double': 'FLOAT',
'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
'date': 'DATETIME',
'time': 'CHAR(8)',
'datetime': 'DATETIME',
'id': 'INT IDENTITY PRIMARY KEY',
'reference': 'INT, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'list:integer': 'NTEXT',
'list:string': 'NTEXT',
'list:reference': 'NTEXT',
'big-id': 'BIGINT IDENTITY PRIMARY KEY',
'big-reference': 'BIGINT, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s',
}
def represent(self, obj, fieldtype):
value = BaseAdapter.represent(self, obj, fieldtype)
if fieldtype in ('string','text', 'json') and value[:1]=="'":
value = 'N'+value
return value
def execute(self,a):
return self.log_execute(a.decode('utf8'))
class VerticaAdapter(MSSQLAdapter):
drivers = ('pyodbc',)
T_SEP = ' '
types = {
'boolean': 'BOOLEAN',
'string': 'VARCHAR(%(length)s)',
'text': 'BYTEA',
'json': 'VARCHAR(%(length)s)',
'password': 'VARCHAR(%(length)s)',
'blob': 'BYTEA',
'upload': 'VARCHAR(%(length)s)',
'integer': 'INT',
'bigint': 'BIGINT',
'float': 'FLOAT',
'double': 'DOUBLE PRECISION',
'decimal': 'DECIMAL(%(precision)s,%(scale)s)',
'date': 'DATE',
'time': 'TIME',
'datetime': 'DATETIME',
'id': 'IDENTITY',
'reference': 'INT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'list:integer': 'BYTEA',
'list:string': 'BYTEA',
'list:reference': 'BYTEA',
'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
}
def EXTRACT(self, first, what):
return "DATE_PART('%s', TIMESTAMP %s)" % (what, self.expand(first))
def _truncate(self, table, mode=''):
tablename = table._tablename
return ['TRUNCATE %s %s;' % (tablename, mode or '')]
def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
if limitby:
(lmin, lmax) = limitby
sql_o += ' LIMIT %i OFFSET %i' % (lmax - lmin, lmin)
return 'SELECT %s %s FROM %s%s%s;' % \
(sql_s, sql_f, sql_t, sql_w, sql_o)
def lastrowid(self,table):
self.execute('SELECT LAST_INSERT_ID();')
return long(self.cursor.fetchone()[0])
def execute(self, a):
return self.log_execute(a)
class SybaseAdapter(MSSQLAdapter):
drivers = ('Sybase',)
types = {
'boolean': 'BIT',
'string': 'CHAR VARYING(%(length)s)',
'text': 'TEXT',
'json': 'TEXT',
'password': 'CHAR VARYING(%(length)s)',
'blob': 'IMAGE',
'upload': 'CHAR VARYING(%(length)s)',
'integer': 'INT',
'bigint': 'BIGINT',
'float': 'FLOAT',
'double': 'FLOAT',
'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
'date': 'DATETIME',
'time': 'CHAR(8)',
'datetime': 'DATETIME',
'id': 'INT IDENTITY PRIMARY KEY',
'reference': 'INT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'list:integer': 'TEXT',
'list:string': 'TEXT',
'list:reference': 'TEXT',
'geometry': 'geometry',
'geography': 'geography',
'big-id': 'BIGINT IDENTITY PRIMARY KEY',
'big-reference': 'BIGINT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s',
}
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, srid=4326,
after_connection=None):
self.db = db
self.dbengine = "sybase"
self.uri = uri
if do_connect: self.find_driver(adapter_args,uri)
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self._after_connection = after_connection
self.srid = srid
self.find_or_make_work_folder()
# ## read: http://bytes.com/groups/python/460325-cx_oracle-utf8
ruri = uri.split('://',1)[1]
if '@' not in ruri:
try:
m = self.REGEX_DSN.match(ruri)
if not m:
raise SyntaxError(
'Parsing uri string(%s) has no result' % self.uri)
dsn = m.group('dsn')
if not dsn:
raise SyntaxError('DSN required')
except SyntaxError:
e = sys.exc_info()[1]
LOGGER.error('NdGpatch error')
raise e
else:
m = self.REGEX_URI.match(uri)
if not m:
raise SyntaxError(
"Invalid URI string in DAL: %s" % self.uri)
user = credential_decoder(m.group('user'))
if not user:
raise SyntaxError('User required')
password = credential_decoder(m.group('password'))
if not password:
password = ''
host = m.group('host')
if not host:
raise SyntaxError('Host name required')
db = m.group('db')
if not db:
raise SyntaxError('Database name required')
port = m.group('port') or '1433'
dsn = 'sybase:host=%s:%s;dbname=%s' % (host,port,db)
driver_args.update(user = credential_decoder(user),
password = credential_decoder(password))
def connector(dsn=dsn,driver_args=driver_args):
return self.driver.connect(dsn,**driver_args)
self.connector = connector
if do_connect: self.reconnect()
class FireBirdAdapter(BaseAdapter):
drivers = ('kinterbasdb','firebirdsql','fdb','pyodbc')
commit_on_alter_table = False
support_distributed_transaction = True
types = {
'boolean': 'CHAR(1)',
'string': 'VARCHAR(%(length)s)',
'text': 'BLOB SUB_TYPE 1',
'json': 'BLOB SUB_TYPE 1',
'password': 'VARCHAR(%(length)s)',
'blob': 'BLOB SUB_TYPE 0',
'upload': 'VARCHAR(%(length)s)',
'integer': 'INTEGER',
'bigint': 'BIGINT',
'float': 'FLOAT',
'double': 'DOUBLE PRECISION',
'decimal': 'DECIMAL(%(precision)s,%(scale)s)',
'date': 'DATE',
'time': 'TIME',
'datetime': 'TIMESTAMP',
'id': 'INTEGER PRIMARY KEY',
'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'list:integer': 'BLOB SUB_TYPE 1',
'list:string': 'BLOB SUB_TYPE 1',
'list:reference': 'BLOB SUB_TYPE 1',
'big-id': 'BIGINT PRIMARY KEY',
'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
}
def sequence_name(self,tablename):
return ('genid_' + self.QUOTE_TEMPLATE) % tablename
def trigger_name(self,tablename):
return 'trg_id_%s' % tablename
def RANDOM(self):
return 'RAND()'
def EPOCH(self, first):
return "DATEDIFF(second, '1970-01-01 00:00:00', %s)" % self.expand(first)
def NOT_NULL(self,default,field_type):
return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
def SUBSTRING(self,field,parameters):
return 'SUBSTRING(%s from %s for %s)' % (self.expand(field), parameters[0], parameters[1])
def LENGTH(self, first):
return "CHAR_LENGTH(%s)" % self.expand(first)
def CONTAINS(self,first,second,case_sensitive=False):
if first.type.startswith('list:'):
second = Expression(None,self.CONCAT('|',Expression(
None,self.REPLACE(second,('|','||'))),'|'))
return '(%s CONTAINING %s)' % (self.expand(first),
self.expand(second, 'string'))
def _drop(self,table,mode):
sequence_name = table._sequence_name
return ['DROP TABLE %s %s;' % (table.sqlsafe, mode), 'DROP GENERATOR %s;' % sequence_name]
def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
if limitby:
(lmin, lmax) = limitby
sql_s = ' FIRST %i SKIP %i %s' % (lmax - lmin, lmin, sql_s)
return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
def _truncate(self,table,mode = ''):
return ['DELETE FROM %s;' % table._tablename,
'SET GENERATOR %s TO 0;' % table._sequence_name]
REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+?)(\?set_encoding=(?P<charset>\w+))?$')
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None):
self.db = db
self.dbengine = "firebird"
self.uri = uri
if do_connect: self.find_driver(adapter_args,uri)
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self._after_connection = after_connection
self.find_or_make_work_folder()
ruri = uri.split('://',1)[1]
m = self.REGEX_URI.match(ruri)
if not m:
raise SyntaxError("Invalid URI string in DAL: %s" % self.uri)
user = credential_decoder(m.group('user'))
if not user:
raise SyntaxError('User required')
password = credential_decoder(m.group('password'))
if not password:
password = ''
host = m.group('host')
if not host:
raise SyntaxError('Host name required')
port = int(m.group('port') or 3050)
db = m.group('db')
if not db:
raise SyntaxError('Database name required')
charset = m.group('charset') or 'UTF8'
driver_args.update(dsn='%s/%s:%s' % (host,port,db),
user = credential_decoder(user),
password = credential_decoder(password),
charset = charset)
def connector(driver_args=driver_args):
return self.driver.connect(**driver_args)
self.connector = connector
if do_connect: self.reconnect()
def create_sequence_and_triggers(self, query, table, **args):
tablename = table._tablename
sequence_name = table._sequence_name
trigger_name = table._trigger_name
self.execute(query)
self.execute('create generator %s;' % sequence_name)
self.execute('set generator %s to 0;' % sequence_name)
self.execute('create trigger %s for %s active before insert position 0 as\nbegin\nif(new.id is null) then\nbegin\nnew.id = gen_id(%s, 1);\nend\nend;' % (trigger_name, tablename, sequence_name))
def lastrowid(self,table):
sequence_name = table._sequence_name
self.execute('SELECT gen_id(%s, 0) FROM rdb$database' % sequence_name)
return long(self.cursor.fetchone()[0])
class FireBirdEmbeddedAdapter(FireBirdAdapter):
drivers = ('kinterbasdb','firebirdsql','fdb','pyodbc')
REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<path>[^\?]+)(\?set_encoding=(?P<charset>\w+))?$')
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None):
self.db = db
self.dbengine = "firebird"
self.uri = uri
if do_connect: self.find_driver(adapter_args,uri)
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self._after_connection = after_connection
self.find_or_make_work_folder()
ruri = uri.split('://',1)[1]
m = self.REGEX_URI.match(ruri)
if not m:
raise SyntaxError(
"Invalid URI string in DAL: %s" % self.uri)
user = credential_decoder(m.group('user'))
if not user:
raise SyntaxError('User required')
password = credential_decoder(m.group('password'))
if not password:
password = ''
pathdb = m.group('path')
if not pathdb:
raise SyntaxError('Path required')
charset = m.group('charset')
if not charset:
charset = 'UTF8'
host = ''
driver_args.update(host=host,
database=pathdb,
user=credential_decoder(user),
password=credential_decoder(password),
charset=charset)
def connector(driver_args=driver_args):
return self.driver.connect(**driver_args)
self.connector = connector
if do_connect: self.reconnect()
class InformixAdapter(BaseAdapter):
drivers = ('informixdb',)
types = {
'boolean': 'CHAR(1)',
'string': 'VARCHAR(%(length)s)',
'text': 'BLOB SUB_TYPE 1',
'json': 'BLOB SUB_TYPE 1',
'password': 'VARCHAR(%(length)s)',
'blob': 'BLOB SUB_TYPE 0',
'upload': 'VARCHAR(%(length)s)',
'integer': 'INTEGER',
'bigint': 'BIGINT',
'float': 'FLOAT',
'double': 'DOUBLE PRECISION',
'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
'date': 'DATE',
'time': 'CHAR(8)',
'datetime': 'DATETIME',
'id': 'SERIAL',
'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'list:integer': 'BLOB SUB_TYPE 1',
'list:string': 'BLOB SUB_TYPE 1',
'list:reference': 'BLOB SUB_TYPE 1',
'big-id': 'BIGSERIAL',
'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference FK': 'REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s CONSTRAINT FK_%(table_name)s_%(field_name)s',
'reference TFK': 'FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s CONSTRAINT TFK_%(table_name)s_%(field_name)s',
}
def RANDOM(self):
return 'Random()'
def NOT_NULL(self,default,field_type):
return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
if limitby:
(lmin, lmax) = limitby
fetch_amt = lmax - lmin
dbms_version = int(self.connection.dbms_version.split('.')[0])
if lmin and (dbms_version >= 10):
# Requires Informix 10.0+
sql_s += ' SKIP %d' % (lmin, )
if fetch_amt and (dbms_version >= 9):
# Requires Informix 9.0+
sql_s += ' FIRST %d' % (fetch_amt, )
return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
def represent_exceptions(self, obj, fieldtype):
if fieldtype == 'date':
if isinstance(obj, (datetime.date, datetime.datetime)):
obj = obj.isoformat()[:10]
else:
obj = str(obj)
return "to_date('%s','%%Y-%%m-%%d')" % obj
elif fieldtype == 'datetime':
if isinstance(obj, datetime.datetime):
obj = obj.isoformat()[:19].replace('T',' ')
elif isinstance(obj, datetime.date):
obj = obj.isoformat()[:10]+' 00:00:00'
else:
obj = str(obj)
return "to_date('%s','%%Y-%%m-%%d %%H:%%M:%%S')" % obj
return None
REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$')
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None):
self.db = db
self.dbengine = "informix"
self.uri = uri
if do_connect: self.find_driver(adapter_args,uri)
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self._after_connection = after_connection
self.find_or_make_work_folder()
ruri = uri.split('://',1)[1]
m = self.REGEX_URI.match(ruri)
if not m:
raise SyntaxError(
"Invalid URI string in DAL: %s" % self.uri)
user = credential_decoder(m.group('user'))
if not user:
raise SyntaxError('User required')
password = credential_decoder(m.group('password'))
if not password:
password = ''
host = m.group('host')
if not host:
raise SyntaxError('Host name required')
db = m.group('db')
if not db:
raise SyntaxError('Database name required')
user = credential_decoder(user)
password = credential_decoder(password)
dsn = '%s@%s' % (db,host)
driver_args.update(user=user,password=password,autocommit=True)
def connector(dsn=dsn,driver_args=driver_args):
return self.driver.connect(dsn,**driver_args)
self.connector = connector
if do_connect: self.reconnect()
def execute(self,command):
if command[-1:]==';':
command = command[:-1]
return self.log_execute(command)
def lastrowid(self,table):
return self.cursor.sqlerrd[1]
class InformixSEAdapter(InformixAdapter):
""" work in progress """
def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
return 'SELECT %s %s FROM %s%s%s;' % \
(sql_s, sql_f, sql_t, sql_w, sql_o)
def rowslice(self,rows,minimum=0,maximum=None):
if maximum is None:
return rows[minimum:]
return rows[minimum:maximum]
class DB2Adapter(BaseAdapter):
drivers = ('pyodbc',)
types = {
'boolean': 'CHAR(1)',
'string': 'VARCHAR(%(length)s)',
'text': 'CLOB',
'json': 'CLOB',
'password': 'VARCHAR(%(length)s)',
'blob': 'BLOB',
'upload': 'VARCHAR(%(length)s)',
'integer': 'INT',
'bigint': 'BIGINT',
'float': 'REAL',
'double': 'DOUBLE',
'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
'date': 'DATE',
'time': 'TIME',
'datetime': 'TIMESTAMP',
'id': 'INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL',
'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'list:integer': 'CLOB',
'list:string': 'CLOB',
'list:reference': 'CLOB',
'big-id': 'BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL',
'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s',
}
def LEFT_JOIN(self):
return 'LEFT OUTER JOIN'
def RANDOM(self):
return 'RAND()'
def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
if limitby:
(lmin, lmax) = limitby
sql_o += ' FETCH FIRST %i ROWS ONLY' % lmax
return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
def represent_exceptions(self, obj, fieldtype):
if fieldtype == 'blob':
obj = base64.b64encode(str(obj))
return "BLOB('%s')" % obj
elif fieldtype == 'datetime':
if isinstance(obj, datetime.datetime):
obj = obj.isoformat()[:19].replace('T','-').replace(':','.')
elif isinstance(obj, datetime.date):
obj = obj.isoformat()[:10]+'-00.00.00'
return "'%s'" % obj
return None
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None):
self.db = db
self.dbengine = "db2"
self.uri = uri
if do_connect: self.find_driver(adapter_args,uri)
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self._after_connection = after_connection
self.find_or_make_work_folder()
ruri = uri.split('://', 1)[1]
def connector(cnxn=ruri,driver_args=driver_args):
return self.driver.connect(cnxn,**driver_args)
self.connector = connector
if do_connect: self.reconnect()
def execute(self,command):
if command[-1:]==';':
command = command[:-1]
return self.log_execute(command)
def lastrowid(self,table):
self.execute('SELECT DISTINCT IDENTITY_VAL_LOCAL() FROM %s;' % table)
return long(self.cursor.fetchone()[0])
def rowslice(self,rows,minimum=0,maximum=None):
if maximum is None:
return rows[minimum:]
return rows[minimum:maximum]
class TeradataAdapter(BaseAdapter):
drivers = ('pyodbc',)
types = {
'boolean': 'CHAR(1)',
'string': 'VARCHAR(%(length)s)',
'text': 'VARCHAR(2000)',
'json': 'VARCHAR(4000)',
'password': 'VARCHAR(%(length)s)',
'blob': 'BLOB',
'upload': 'VARCHAR(%(length)s)',
'integer': 'INT',
'bigint': 'BIGINT',
'float': 'REAL',
'double': 'DOUBLE',
'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
'date': 'DATE',
'time': 'TIME',
'datetime': 'TIMESTAMP',
# Modified Constraint syntax for Teradata.
# Teradata does not support ON DELETE.
'id': 'INT GENERATED ALWAYS AS IDENTITY', # Teradata Specific
'reference': 'INT',
'list:integer': 'VARCHAR(4000)',
'list:string': 'VARCHAR(4000)',
'list:reference': 'VARCHAR(4000)',
'big-id': 'BIGINT GENERATED ALWAYS AS IDENTITY', # Teradata Specific
'big-reference': 'BIGINT',
'reference FK': ' REFERENCES %(foreign_key)s',
'reference TFK': ' FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s)',
}
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None):
self.db = db
self.dbengine = "teradata"
self.uri = uri
if do_connect: self.find_driver(adapter_args,uri)
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self._after_connection = after_connection
self.find_or_make_work_folder()
ruri = uri.split('://', 1)[1]
def connector(cnxn=ruri,driver_args=driver_args):
return self.driver.connect(cnxn,**driver_args)
self.connector = connector
if do_connect: self.reconnect()
def close(self,action='commit',really=True):
# Teradata does not implicitly close off the cursor
# leading to SQL_ACTIVE_STATEMENTS limit errors
self.cursor.close()
ConnectionPool.close(self, action, really)
def LEFT_JOIN(self):
return 'LEFT OUTER JOIN'
# Similar to MSSQL, Teradata can't specify a range (for Pageby)
def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
if limitby:
(lmin, lmax) = limitby
sql_s += ' TOP %i' % lmax
return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
def _truncate(self, table, mode=''):
tablename = table._tablename
return ['DELETE FROM %s ALL;' % (tablename)]
INGRES_SEQNAME='ii***lineitemsequence' # NOTE invalid database object name
# (ANSI-SQL wants this form of name
# to be a delimited identifier)
class IngresAdapter(BaseAdapter):
drivers = ('pyodbc',)
types = {
'boolean': 'CHAR(1)',
'string': 'VARCHAR(%(length)s)',
'text': 'CLOB',
'json': 'CLOB',
'password': 'VARCHAR(%(length)s)', ## Not sure what this contains utf8 or nvarchar. Or even bytes?
'blob': 'BLOB',
'upload': 'VARCHAR(%(length)s)', ## FIXME utf8 or nvarchar... or blob? what is this type?
'integer': 'INTEGER4', # or int8...
'bigint': 'BIGINT',
'float': 'FLOAT',
'double': 'FLOAT8',
'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
'date': 'ANSIDATE',
'time': 'TIME WITHOUT TIME ZONE',
'datetime': 'TIMESTAMP WITHOUT TIME ZONE',
'id': 'int not null unique with default next value for %s' % INGRES_SEQNAME,
'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'list:integer': 'CLOB',
'list:string': 'CLOB',
'list:reference': 'CLOB',
'big-id': 'bigint not null unique with default next value for %s' % INGRES_SEQNAME,
'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO
}
def LEFT_JOIN(self):
return 'LEFT OUTER JOIN'
def RANDOM(self):
return 'RANDOM()'
def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
if limitby:
(lmin, lmax) = limitby
fetch_amt = lmax - lmin
if fetch_amt:
sql_s += ' FIRST %d ' % (fetch_amt, )
if lmin:
# Requires Ingres 9.2+
sql_o += ' OFFSET %d' % (lmin, )
return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None):
self.db = db
self.dbengine = "ingres"
self._driver = pyodbc
self.uri = uri
if do_connect: self.find_driver(adapter_args,uri)
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self._after_connection = after_connection
self.find_or_make_work_folder()
connstr = uri.split(':', 1)[1]
# Simple URI processing
connstr = connstr.lstrip()
while connstr.startswith('/'):
connstr = connstr[1:]
if '=' in connstr:
# Assume we have a regular ODBC connection string and just use it
ruri = connstr
else:
# Assume only (local) dbname is passed in with OS auth
database_name = connstr
default_driver_name = 'Ingres'
vnode = '(local)'
servertype = 'ingres'
ruri = 'Driver={%s};Server=%s;Database=%s' % (default_driver_name, vnode, database_name)
def connector(cnxn=ruri,driver_args=driver_args):
return self.driver.connect(cnxn,**driver_args)
self.connector = connector
# TODO if version is >= 10, set types['id'] to Identity column, see http://community.actian.com/wiki/Using_Ingres_Identity_Columns
if do_connect: self.reconnect()
def create_sequence_and_triggers(self, query, table, **args):
# post create table auto inc code (if needed)
# modify table to btree for performance....
# Older Ingres releases could use rule/trigger like Oracle above.
if hasattr(table,'_primarykey'):
modify_tbl_sql = 'modify %s to btree unique on %s' % \
(table._tablename,
', '.join(["'%s'" % x for x in table.primarykey]))
self.execute(modify_tbl_sql)
else:
tmp_seqname='%s_iisq' % table._tablename
query=query.replace(INGRES_SEQNAME, tmp_seqname)
self.execute('create sequence %s' % tmp_seqname)
self.execute(query)
self.execute('modify %s to btree unique on %s' % (table._tablename, 'id'))
def lastrowid(self,table):
tmp_seqname='%s_iisq' % table
self.execute('select current value for %s' % tmp_seqname)
return long(self.cursor.fetchone()[0]) # don't really need int type cast here...
class IngresUnicodeAdapter(IngresAdapter):
drivers = ('pyodbc',)
types = {
'boolean': 'CHAR(1)',
'string': 'NVARCHAR(%(length)s)',
'text': 'NCLOB',
'json': 'NCLOB',
'password': 'NVARCHAR(%(length)s)', ## Not sure what this contains utf8 or nvarchar. Or even bytes?
'blob': 'BLOB',
'upload': 'VARCHAR(%(length)s)', ## FIXME utf8 or nvarchar... or blob? what is this type?
'integer': 'INTEGER4', # or int8...
'bigint': 'BIGINT',
'float': 'FLOAT',
'double': 'FLOAT8',
'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
'date': 'ANSIDATE',
'time': 'TIME WITHOUT TIME ZONE',
'datetime': 'TIMESTAMP WITHOUT TIME ZONE',
'id': 'INTEGER4 not null unique with default next value for %s'% INGRES_SEQNAME,
'reference': 'INTEGER4, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'list:integer': 'NCLOB',
'list:string': 'NCLOB',
'list:reference': 'NCLOB',
'big-id': 'BIGINT not null unique with default next value for %s'% INGRES_SEQNAME,
'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO
}
class SAPDBAdapter(BaseAdapter):
drivers = ('sapdb',)
support_distributed_transaction = False
types = {
'boolean': 'CHAR(1)',
'string': 'VARCHAR(%(length)s)',
'text': 'LONG',
'json': 'LONG',
'password': 'VARCHAR(%(length)s)',
'blob': 'LONG',
'upload': 'VARCHAR(%(length)s)',
'integer': 'INT',
'bigint': 'BIGINT',
'float': 'FLOAT',
'double': 'DOUBLE PRECISION',
'decimal': 'FIXED(%(precision)s,%(scale)s)',
'date': 'DATE',
'time': 'TIME',
'datetime': 'TIMESTAMP',
'id': 'INT PRIMARY KEY',
'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'list:integer': 'LONG',
'list:string': 'LONG',
'list:reference': 'LONG',
'big-id': 'BIGINT PRIMARY KEY',
'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
}
def sequence_name(self,table):
return (self.QUOTE_TEMPLATE + '_id_Seq') % table
def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
if limitby:
(lmin, lmax) = limitby
if len(sql_w) > 1:
sql_w_row = sql_w + ' AND w_row > %i' % lmin
else:
sql_w_row = 'WHERE w_row > %i' % lmin
return '%s %s FROM (SELECT w_tmp.*, ROWNO w_row FROM (SELECT %s FROM %s%s%s) w_tmp WHERE ROWNO=%i) %s %s %s;' % (sql_s, sql_f, sql_f, sql_t, sql_w, sql_o, lmax, sql_t, sql_w_row, sql_o)
return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
def create_sequence_and_triggers(self, query, table, **args):
# following lines should only be executed if table._sequence_name does not exist
self.execute('CREATE SEQUENCE %s;' % table._sequence_name)
self.execute("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');" \
% (table._tablename, table._id.name, table._sequence_name))
self.execute(query)
REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?sslmode=(?P<sslmode>.+))?$')
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None):
self.db = db
self.dbengine = "sapdb"
self.uri = uri
if do_connect: self.find_driver(adapter_args,uri)
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self._after_connection = after_connection
self.find_or_make_work_folder()
ruri = uri.split('://',1)[1]
m = self.REGEX_URI.match(ruri)
if not m:
raise SyntaxError("Invalid URI string in DAL")
user = credential_decoder(m.group('user'))
if not user:
raise SyntaxError('User required')
password = credential_decoder(m.group('password'))
if not password:
password = ''
host = m.group('host')
if not host:
raise SyntaxError('Host name required')
db = m.group('db')
if not db:
raise SyntaxError('Database name required')
def connector(user=user, password=password, database=db,
host=host, driver_args=driver_args):
return self.driver.Connection(user, password, database,
host, **driver_args)
self.connector = connector
if do_connect: self.reconnect()
def lastrowid(self,table):
self.execute("select %s.NEXTVAL from dual" % table._sequence_name)
return long(self.cursor.fetchone()[0])
class CubridAdapter(MySQLAdapter):
drivers = ('cubriddb',)
REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^?]+)(\?set_encoding=(?P<charset>\w+))?$')
def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None):
self.db = db
self.dbengine = "cubrid"
self.uri = uri
if do_connect: self.find_driver(adapter_args,uri)
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self._after_connection = after_connection
self.find_or_make_work_folder()
ruri = uri.split('://',1)[1]
m = self.REGEX_URI.match(ruri)
if not m:
raise SyntaxError(
"Invalid URI string in DAL: %s" % self.uri)
user = credential_decoder(m.group('user'))
if not user:
raise SyntaxError('User required')
password = credential_decoder(m.group('password'))
if not password:
password = ''
host = m.group('host')
if not host:
raise SyntaxError('Host name required')
db = m.group('db')
if not db:
raise SyntaxError('Database name required')
port = int(m.group('port') or '30000')
charset = m.group('charset') or 'utf8'
user = credential_decoder(user)
passwd = credential_decoder(password)
def connector(host=host,port=port,db=db,
user=user,passwd=password,driver_args=driver_args):
return self.driver.connect(host,port,db,user,passwd,**driver_args)
self.connector = connector
if do_connect: self.reconnect()
def after_connection(self):
self.execute('SET FOREIGN_KEY_CHECKS=1;')
self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
######## GAE MySQL ##########
class DatabaseStoredFile:
web2py_filesystem = False
def escape(self,obj):
return self.db._adapter.escape(obj)
def __init__(self,db,filename,mode):
if not db._adapter.dbengine in ('mysql', 'postgres', 'sqlite'):
raise RuntimeError("only MySQL/Postgres/SQLite can store metadata .table files in database for now")
self.db = db
self.filename = filename
self.mode = mode
if not self.web2py_filesystem:
if db._adapter.dbengine == 'mysql':
sql = "CREATE TABLE IF NOT EXISTS web2py_filesystem (path VARCHAR(255), content LONGTEXT, PRIMARY KEY(path) ) ENGINE=InnoDB;"
elif db._adapter.dbengine in ('postgres', 'sqlite'):
sql = "CREATE TABLE IF NOT EXISTS web2py_filesystem (path VARCHAR(255), content TEXT, PRIMARY KEY(path));"
self.db.executesql(sql)
DatabaseStoredFile.web2py_filesystem = True
self.p=0
self.data = ''
if mode in ('r','rw','a'):
query = "SELECT content FROM web2py_filesystem WHERE path='%s'" \
% filename
rows = self.db.executesql(query)
if rows:
self.data = rows[0][0]
elif exists(filename):
datafile = open(filename, 'r')
try:
self.data = datafile.read()
finally:
datafile.close()
elif mode in ('r','rw'):
raise RuntimeError("File %s does not exist" % filename)
def read(self, bytes):
data = self.data[self.p:self.p+bytes]
self.p += len(data)
return data
def readline(self):
i = self.data.find('\n',self.p)+1
if i>0:
data, self.p = self.data[self.p:i], i
else:
data, self.p = self.data[self.p:], len(self.data)
return data
def write(self,data):
self.data += data
def close_connection(self):
if self.db is not None:
self.db.executesql(
"DELETE FROM web2py_filesystem WHERE path='%s'" % self.filename)
query = "INSERT INTO web2py_filesystem(path,content) VALUES ('%s','%s')"\
% (self.filename, self.data.replace("'","''"))
self.db.executesql(query)
self.db.commit()
self.db = None
def close(self):
self.close_connection()
@staticmethod
def exists(db, filename):
if exists(filename):
return True
query = "SELECT path FROM web2py_filesystem WHERE path='%s'" % filename
try:
if db.executesql(query):
return True
except Exception, e:
if not (db._adapter.isOperationalError(e) or
db._adapter.isProgrammingError(e)):
raise
# no web2py_filesystem found?
tb = traceback.format_exc()
LOGGER.error("Could not retrieve %s\n%s" % (filename, tb))
return False
class UseDatabaseStoredFile:
def file_exists(self, filename):
return DatabaseStoredFile.exists(self.db,filename)
def file_open(self, filename, mode='rb', lock=True):
return DatabaseStoredFile(self.db,filename,mode)
def file_close(self, fileobj):
fileobj.close_connection()
def file_delete(self,filename):
query = "DELETE FROM web2py_filesystem WHERE path='%s'" % filename
self.db.executesql(query)
self.db.commit()
class GoogleSQLAdapter(UseDatabaseStoredFile,MySQLAdapter):
uploads_in_blob = True
REGEX_URI = re.compile('^(?P<instance>.*)/(?P<db>.*)$')
def __init__(self, db, uri='google:sql://realm:domain/database',
pool_size=0, folder=None, db_codec='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None):
self.db = db
self.dbengine = "mysql"
self.uri = uri
self.pool_size = pool_size
self.db_codec = db_codec
self._after_connection = after_connection
if do_connect: self.find_driver(adapter_args, uri)
self.folder = folder or pjoin('$HOME',THREAD_LOCAL.folder.split(
os.sep+'applications'+os.sep,1)[1])
ruri = uri.split("://")[1]
m = self.REGEX_URI.match(ruri)
if not m:
raise SyntaxError("Invalid URI string in SQLDB: %s" % self.uri)
instance = credential_decoder(m.group('instance'))
self.dbstring = db = credential_decoder(m.group('db'))
driver_args['instance'] = instance
if not 'charset' in driver_args:
driver_args['charset'] = 'utf8'
self.createdb = createdb = adapter_args.get('createdb',True)
if not createdb:
driver_args['database'] = db
def connector(driver_args=driver_args):
return rdbms.connect(**driver_args)
self.connector = connector
if do_connect: self.reconnect()
def after_connection(self):
if self.createdb:
# self.execute('DROP DATABASE %s' % self.dbstring)
self.execute('CREATE DATABASE IF NOT EXISTS %s' % self.dbstring)
self.execute('USE %s' % self.dbstring)
self.execute("SET FOREIGN_KEY_CHECKS=1;")
self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
def execute(self, command, *a, **b):
return self.log_execute(command.decode('utf8'), *a, **b)
def find_driver(self,adapter_args,uri=None):
self.adapter_args = adapter_args
self.driver = "google"
class NoSQLAdapter(BaseAdapter):
can_select_for_update = False
QUOTE_TEMPLATE = '%s'
@staticmethod
def to_unicode(obj):
if isinstance(obj, str):
return obj.decode('utf8')
elif not isinstance(obj, unicode):
return unicode(obj)
return obj
def id_query(self, table):
return table._id > 0
def represent(self, obj, fieldtype):
field_is_type = fieldtype.startswith
if isinstance(obj, CALLABLETYPES):
obj = obj()
if isinstance(fieldtype, SQLCustomType):
return fieldtype.encoder(obj)
if isinstance(obj, (Expression, Field)):
raise SyntaxError("non supported on GAE")
if self.dbengine == 'google:datastore':
if isinstance(fieldtype, gae.Property):
return obj
is_string = isinstance(fieldtype,str)
is_list = is_string and field_is_type('list:')
if is_list:
if not obj:
obj = []
if not isinstance(obj, (list, tuple)):
obj = [obj]
if obj == '' and not \
(is_string and fieldtype[:2] in ['st','te', 'pa','up']):
return None
if not obj is None:
if isinstance(obj, list) and not is_list:
obj = [self.represent(o, fieldtype) for o in obj]
elif fieldtype in ('integer','bigint','id'):
obj = long(obj)
elif fieldtype == 'double':
obj = float(obj)
elif is_string and field_is_type('reference'):
if isinstance(obj, (Row, Reference)):
obj = obj['id']
obj = long(obj)
elif fieldtype == 'boolean':
if obj and not str(obj)[0].upper() in '0F':
obj = True
else:
obj = False
elif fieldtype == 'date':
if not isinstance(obj, datetime.date):
(y, m, d) = map(int,str(obj).strip().split('-'))
obj = datetime.date(y, m, d)
elif isinstance(obj,datetime.datetime):
(y, m, d) = (obj.year, obj.month, obj.day)
obj = datetime.date(y, m, d)
elif fieldtype == 'time':
if not isinstance(obj, datetime.time):
time_items = map(int,str(obj).strip().split(':')[:3])
if len(time_items) == 3:
(h, mi, s) = time_items
else:
(h, mi, s) = time_items + [0]
obj = datetime.time(h, mi, s)
elif fieldtype == 'datetime':
if not isinstance(obj, datetime.datetime):
(y, m, d) = map(int,str(obj)[:10].strip().split('-'))
time_items = map(int,str(obj)[11:].strip().split(':')[:3])
while len(time_items)<3:
time_items.append(0)
(h, mi, s) = time_items
obj = datetime.datetime(y, m, d, h, mi, s)
elif fieldtype == 'blob':
pass
elif fieldtype == 'json':
if isinstance(obj, basestring):
obj = self.to_unicode(obj)
if have_serializers:
obj = serializers.loads_json(obj)
elif simplejson:
obj = simplejson.loads(obj)
else:
raise RuntimeError("missing simplejson")
elif is_string and field_is_type('list:string'):
return map(self.to_unicode,obj)
elif is_list:
return map(int,obj)
else:
obj = self.to_unicode(obj)
return obj
def _insert(self,table,fields):
return 'insert %s in %s' % (fields, table)
def _count(self,query,distinct=None):
return 'count %s' % repr(query)
def _select(self,query,fields,attributes):
return 'select %s where %s' % (repr(fields), repr(query))
def _delete(self,tablename, query):
return 'delete %s where %s' % (repr(tablename),repr(query))
def _update(self,tablename,query,fields):
return 'update %s (%s) where %s' % (repr(tablename),
repr(fields),repr(query))
def commit(self):
"""
remember: no transactions on many NoSQL
"""
pass
def rollback(self):
"""
remember: no transactions on many NoSQL
"""
pass
def close_connection(self):
"""
remember: no transactions on many NoSQL
"""
pass
# these functions should never be called!
def OR(self,first,second): raise SyntaxError("Not supported")
def AND(self,first,second): raise SyntaxError("Not supported")
def AS(self,first,second): raise SyntaxError("Not supported")
def ON(self,first,second): raise SyntaxError("Not supported")
def STARTSWITH(self,first,second=None): raise SyntaxError("Not supported")
def ENDSWITH(self,first,second=None): raise SyntaxError("Not supported")
def ADD(self,first,second): raise SyntaxError("Not supported")
def SUB(self,first,second): raise SyntaxError("Not supported")
def MUL(self,first,second): raise SyntaxError("Not supported")
def DIV(self,first,second): raise SyntaxError("Not supported")
def LOWER(self,first): raise SyntaxError("Not supported")
def UPPER(self,first): raise SyntaxError("Not supported")
def EXTRACT(self,first,what): raise SyntaxError("Not supported")
def LENGTH(self, first): raise SyntaxError("Not supported")
def AGGREGATE(self,first,what): raise SyntaxError("Not supported")
def LEFT_JOIN(self): raise SyntaxError("Not supported")
def RANDOM(self): raise SyntaxError("Not supported")
def SUBSTRING(self,field,parameters): raise SyntaxError("Not supported")
def PRIMARY_KEY(self,key): raise SyntaxError("Not supported")
def ILIKE(self,first,second): raise SyntaxError("Not supported")
def drop(self,table,mode): raise SyntaxError("Not supported")
def alias(self,table,alias): raise SyntaxError("Not supported")
def migrate_table(self,*a,**b): raise SyntaxError("Not supported")
def distributed_transaction_begin(self,key): raise SyntaxError("Not supported")
def prepare(self,key): raise SyntaxError("Not supported")
def commit_prepared(self,key): raise SyntaxError("Not supported")
def rollback_prepared(self,key): raise SyntaxError("Not supported")
def concat_add(self,table): raise SyntaxError("Not supported")
def constraint_name(self, table, fieldname): raise SyntaxError("Not supported")
def create_sequence_and_triggers(self, query, table, **args): pass
def log_execute(self,*a,**b): raise SyntaxError("Not supported")
def execute(self,*a,**b): raise SyntaxError("Not supported")
def represent_exceptions(self, obj, fieldtype): raise SyntaxError("Not supported")
def lastrowid(self,table): raise SyntaxError("Not supported")
def rowslice(self,rows,minimum=0,maximum=None): raise SyntaxError("Not supported")
class GAEF(object):
def __init__(self,name,op,value,apply):
self.name=name=='id' and '__key__' or name
self.op=op
self.value=value
self.apply=apply
def __repr__(self):
return '(%s %s %s:%s)' % (self.name, self.op, repr(self.value), type(self.value))
class GoogleDatastoreAdapter(NoSQLAdapter):
"""
NDB:
You can enable NDB by using adapter_args:
db = DAL('google:datastore', adapter_args={'ndb_settings':ndb_settings, 'use_ndb':True})
ndb_settings is optional and can be used for per model caching settings.
It must be a dict in this form:
ndb_settings = {<table_name>:{<variable_name>:<variable_value>}}
See: https://developers.google.com/appengine/docs/python/ndb/cache
"""
uploads_in_blob = True
types = {}
# reconnect is not required for Datastore dbs
reconnect = lambda *args, **kwargs: None
def file_exists(self, filename): pass
def file_open(self, filename, mode='rb', lock=True): pass
def file_close(self, fileobj): pass
REGEX_NAMESPACE = re.compile('.*://(?P<namespace>.+)')
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None):
self.use_ndb = ('use_ndb' in adapter_args) and adapter_args['use_ndb']
if self.use_ndb is True:
self.types.update({
'boolean': ndb.BooleanProperty,
'string': (lambda **kwargs: ndb.StringProperty(**kwargs)),
'text': ndb.TextProperty,
'json': ndb.TextProperty,
'password': ndb.StringProperty,
'blob': ndb.BlobProperty,
'upload': ndb.StringProperty,
'integer': ndb.IntegerProperty,
'bigint': ndb.IntegerProperty,
'float': ndb.FloatProperty,
'double': ndb.FloatProperty,
'decimal': NDBDecimalProperty,
'date': ndb.DateProperty,
'time': ndb.TimeProperty,
'datetime': ndb.DateTimeProperty,
'id': None,
'reference': ndb.IntegerProperty,
'list:string': (lambda **kwargs: ndb.StringProperty(repeated=True,default=None, **kwargs)),
'list:integer': (lambda **kwargs: ndb.IntegerProperty(repeated=True,default=None, **kwargs)),
'list:reference': (lambda **kwargs: ndb.IntegerProperty(repeated=True,default=None, **kwargs)),
})
else:
self.types.update({
'boolean': gae.BooleanProperty,
'string': (lambda **kwargs: gae.StringProperty(multiline=True, **kwargs)),
'text': gae.TextProperty,
'json': gae.TextProperty,
'password': gae.StringProperty,
'blob': gae.BlobProperty,
'upload': gae.StringProperty,
'integer': gae.IntegerProperty,
'bigint': gae.IntegerProperty,
'float': gae.FloatProperty,
'double': gae.FloatProperty,
'decimal': GAEDecimalProperty,
'date': gae.DateProperty,
'time': gae.TimeProperty,
'datetime': gae.DateTimeProperty,
'id': None,
'reference': gae.IntegerProperty,
'list:string': (lambda **kwargs: gae.StringListProperty(default=None, **kwargs)),
'list:integer': (lambda **kwargs: gae.ListProperty(int,default=None, **kwargs)),
'list:reference': (lambda **kwargs: gae.ListProperty(int,default=None, **kwargs)),
})
self.db = db
self.uri = uri
self.dbengine = 'google:datastore'
self.folder = folder
db['_lastsql'] = ''
self.db_codec = 'UTF-8'
self._after_connection = after_connection
self.pool_size = 0
match = self.REGEX_NAMESPACE.match(uri)
if match:
namespace_manager.set_namespace(match.group('namespace'))
self.keyfunc = (self.use_ndb and ndb.Key) or Key.from_path
self.ndb_settings = None
if 'ndb_settings' in adapter_args:
self.ndb_settings = adapter_args['ndb_settings']
def parse_id(self, value, field_type):
return value
def create_table(self,table,migrate=True,fake_migrate=False, polymodel=None):
myfields = {}
for field in table:
if isinstance(polymodel,Table) and field.name in polymodel.fields():
continue
attr = {}
if isinstance(field.custom_qualifier, dict):
#this is custom properties to add to the GAE field declartion
attr = field.custom_qualifier
field_type = field.type
if isinstance(field_type, SQLCustomType):
ftype = self.types[field_type.native or field_type.type](**attr)
elif isinstance(field_type, ((self.use_ndb and ndb.Property) or gae.Property)):
ftype = field_type
elif field_type.startswith('id'):
continue
elif field_type.startswith('decimal'):
precision, scale = field_type[7:].strip('()').split(',')
precision = int(precision)
scale = int(scale)
dec_cls = (self.use_ndb and NDBDecimalProperty) or GAEDecimalProperty
ftype = dec_cls(precision, scale, **attr)
elif field_type.startswith('reference'):
if field.notnull:
attr = dict(required=True)
ftype = self.types[field_type[:9]](**attr)
elif field_type.startswith('list:reference'):
if field.notnull:
attr['required'] = True
ftype = self.types[field_type[:14]](**attr)
elif field_type.startswith('list:'):
ftype = self.types[field_type](**attr)
elif not field_type in self.types\
or not self.types[field_type]:
raise SyntaxError('Field: unknown field type: %s' % field_type)
else:
ftype = self.types[field_type](**attr)
myfields[field.name] = ftype
if not polymodel:
model_cls = (self.use_ndb and ndb.Model) or gae.Model
table._tableobj = classobj(table._tablename, (model_cls, ), myfields)
if self.use_ndb:
# Set NDB caching variables
if self.ndb_settings and (table._tablename in self.ndb_settings):
for k, v in self.ndb_settings.iteritems():
setattr(table._tableobj, k, v)
elif polymodel==True:
pm_cls = (self.use_ndb and NDBPolyModel) or PolyModel
table._tableobj = classobj(table._tablename, (pm_cls, ), myfields)
elif isinstance(polymodel,Table):
table._tableobj = classobj(table._tablename, (polymodel._tableobj, ), myfields)
else:
raise SyntaxError("polymodel must be None, True, a table or a tablename")
return None
def expand(self,expression,field_type=None):
if isinstance(expression,Field):
if expression.type in ('text', 'blob', 'json'):
raise SyntaxError('AppEngine does not index by: %s' % expression.type)
return expression.name
elif isinstance(expression, (Expression, Query)):
if not expression.second is None:
return expression.op(expression.first, expression.second)
elif not expression.first is None:
return expression.op(expression.first)
else:
return expression.op()
elif field_type:
return self.represent(expression,field_type)
elif isinstance(expression,(list,tuple)):
return ','.join([self.represent(item,field_type) for item in expression])
else:
return str(expression)
### TODO from gql.py Expression
def AND(self,first,second):
a = self.expand(first)
b = self.expand(second)
if b[0].name=='__key__' and a[0].name!='__key__':
return b+a
return a+b
def EQ(self,first,second=None):
if isinstance(second, Key):
return [GAEF(first.name,'=',second,lambda a,b:a==b)]
return [GAEF(first.name,'=',self.represent(second,first.type),lambda a,b:a==b)]
def NE(self,first,second=None):
if first.type != 'id':
return [GAEF(first.name,'!=',self.represent(second,first.type),lambda a,b:a!=b)]
else:
if not second is None:
second = Key.from_path(first._tablename, long(second))
return [GAEF(first.name,'!=',second,lambda a,b:a!=b)]
def LT(self,first,second=None):
if first.type != 'id':
return [GAEF(first.name,'<',self.represent(second,first.type),lambda a,b:a<b)]
else:
second = Key.from_path(first._tablename, long(second))
return [GAEF(first.name,'<',second,lambda a,b:a<b)]
def LE(self,first,second=None):
if first.type != 'id':
return [GAEF(first.name,'<=',self.represent(second,first.type),lambda a,b:a<=b)]
else:
second = Key.from_path(first._tablename, long(second))
return [GAEF(first.name,'<=',second,lambda a,b:a<=b)]
def GT(self,first,second=None):
if first.type != 'id' or second==0 or second == '0':
return [GAEF(first.name,'>',self.represent(second,first.type),lambda a,b:a>b)]
else:
second = Key.from_path(first._tablename, long(second))
return [GAEF(first.name,'>',second,lambda a,b:a>b)]
def GE(self,first,second=None):
if first.type != 'id':
return [GAEF(first.name,'>=',self.represent(second,first.type),lambda a,b:a>=b)]
else:
second = Key.from_path(first._tablename, long(second))
return [GAEF(first.name,'>=',second,lambda a,b:a>=b)]
def INVERT(self,first):
return '-%s' % first.name
def COMMA(self,first,second):
return '%s, %s' % (self.expand(first),self.expand(second))
def BELONGS(self,first,second=None):
if not isinstance(second,(list, tuple, set)):
raise SyntaxError("Not supported")
if not self.use_ndb:
if isinstance(second,set):
second = list(second)
if first.type == 'id':
second = [Key.from_path(first._tablename, int(i)) for i in second]
return [GAEF(first.name,'in',second,lambda a,b:a in b)]
def CONTAINS(self,first,second,case_sensitive=False):
# silently ignoring: GAE can only do case sensitive matches!
if not first.type.startswith('list:'):
raise SyntaxError("Not supported")
return [GAEF(first.name,'=',self.expand(second,first.type[5:]),lambda a,b:b in a)]
def NOT(self,first):
nops = { self.EQ: self.NE,
self.NE: self.EQ,
self.LT: self.GE,
self.GT: self.LE,
self.LE: self.GT,
self.GE: self.LT}
if not isinstance(first,Query):
raise SyntaxError("Not suported")
nop = nops.get(first.op,None)
if not nop:
raise SyntaxError("Not suported %s" % first.op.__name__)
first.op = nop
return self.expand(first)
def truncate(self,table,mode):
self.db(self.db._adapter.id_query(table)).delete()
GAE_FILTER_OPTIONS = {
'=': lambda q, t, p, v: q.filter(getattr(t,p) == v),
'>': lambda q, t, p, v: q.filter(getattr(t,p) > v),
'<': lambda q, t, p, v: q.filter(getattr(t,p) < v),
'<=': lambda q, t, p, v: q.filter(getattr(t,p) <= v),
'>=': lambda q, t, p, v: q.filter(getattr(t,p) >= v),
'!=': lambda q, t, p, v: q.filter(getattr(t,p) != v),
'in': lambda q, t, p, v: q.filter(getattr(t,p).IN(v)),
}
def filter(self, query, tableobj, prop, op, value):
return self.GAE_FILTER_OPTIONS[op](query, tableobj, prop, value)
def select_raw(self,query,fields=None,attributes=None):
db = self.db
fields = fields or []
attributes = attributes or {}
args_get = attributes.get
new_fields = []
for item in fields:
if isinstance(item,SQLALL):
new_fields += item._table
else:
new_fields.append(item)
fields = new_fields
if query:
tablename = self.get_table(query)
elif fields:
tablename = fields[0].tablename
query = db._adapter.id_query(fields[0].table)
else:
raise SyntaxError("Unable to determine a tablename")
if query:
if use_common_filters(query):
query = self.common_filter(query,[tablename])
#tableobj is a GAE/NDB Model class (or subclass)
tableobj = db[tablename]._tableobj
filters = self.expand(query)
projection = None
if len(db[tablename].fields) == len(fields):
#getting all fields, not a projection query
projection = None
elif args_get('projection') == True:
projection = []
for f in fields:
if f.type in ['text', 'blob', 'json']:
raise SyntaxError(
"text and blob field types not allowed in projection queries")
else:
projection.append(f.name)
elif args_get('filterfields') == True:
projection = []
for f in fields:
projection.append(f.name)
# real projection's can't include 'id'.
# it will be added to the result later
query_projection = [
p for p in projection if \
p != db[tablename]._id.name] if projection and \
args_get('projection') == True\
else None
cursor = None
if isinstance(args_get('reusecursor'), str):
cursor = args_get('reusecursor')
if self.use_ndb:
qo = ndb.QueryOptions(projection=query_projection, cursor=cursor)
items = tableobj.query(default_options=qo)
else:
items = gae.Query(tableobj, projection=query_projection,
cursor=cursor)
for filter in filters:
if args_get('projection') == True and \
filter.name in query_projection and \
filter.op in ['=', '<=', '>=']:
raise SyntaxError(
"projection fields cannot have equality filters")
if filter.name=='__key__' and filter.op=='>' and filter.value==0:
continue
elif filter.name=='__key__' and filter.op=='=':
if filter.value==0:
items = []
elif isinstance(filter.value, (self.use_ndb and ndb.Key) or Key):
# key qeuries return a class instance,
# can't use projection
# extra values will be ignored in post-processing later
item = filter.value.get() if self.use_ndb else tableobj.get(filter.value)
items = (item and [item]) or []
else:
# key qeuries return a class instance,
# can't use projection
# extra values will be ignored in post-processing later
item = tableobj.get_by_id(filter.value)
items = (item and [item]) or []
elif isinstance(items,list): # i.e. there is a single record!
items = [i for i in items if filter.apply(
getattr(item,filter.name),filter.value)]
else:
if filter.name=='__key__' and filter.op != 'in':
if self.use_ndb:
items.order(tableobj._key)
else:
items.order('__key__')
items = self.filter(items, tableobj, filter.name,
filter.op, filter.value) \
if self.use_ndb else \
items.filter('%s %s' % (filter.name,filter.op),
filter.value)
if not isinstance(items,list):
if args_get('left', None):
raise SyntaxError('Set: no left join in appengine')
if args_get('groupby', None):
raise SyntaxError('Set: no groupby in appengine')
orderby = args_get('orderby', False)
if orderby:
### THIS REALLY NEEDS IMPROVEMENT !!!
if isinstance(orderby, (list, tuple)):
orderby = xorify(orderby)
if isinstance(orderby,Expression):
orderby = self.expand(orderby)
orders = orderby.split(', ')
for order in orders:
if self.use_ndb:
#TODO There must be a better way
def make_order(o):
s = str(o)
desc = s[0] == '-'
s = (desc and s[1:]) or s
return (desc and -getattr(tableobj, s)) or getattr(tableobj, s)
_order = {'-id':-tableobj._key,'id':tableobj._key}.get(order)
if _order is None:
_order = make_order(order)
items = items.order(_order)
else:
order={'-id':'-__key__','id':'__key__'}.get(order,order)
items = items.order(order)
if args_get('limitby', None):
(lmin, lmax) = attributes['limitby']
(limit, offset) = (lmax - lmin, lmin)
if self.use_ndb:
rows, cursor, more = items.fetch_page(limit,offset=offset)
else:
rows = items.fetch(limit,offset=offset)
#cursor is only useful if there was a limit and we didn't return
# all results
if args_get('reusecursor'):
db['_lastcursor'] = cursor if self.use_ndb else items.cursor()
items = rows
return (items, tablename, projection or db[tablename].fields)
def select(self,query,fields,attributes):
"""
This is the GAE version of select. some notes to consider:
- db['_lastsql'] is not set because there is not SQL statement string
for a GAE query
- 'nativeRef' is a magical fieldname used for self references on GAE
- optional attribute 'projection' when set to True will trigger
use of the GAE projection queries. note that there are rules for
what is accepted imposed by GAE: each field must be indexed,
projection queries cannot contain blob or text fields, and you
cannot use == and also select that same field. see https://developers.google.com/appengine/docs/python/datastore/queries#Query_Projection
- optional attribute 'filterfields' when set to True web2py will only
parse the explicitly listed fields into the Rows object, even though
all fields are returned in the query. This can be used to reduce
memory usage in cases where true projection queries are not
usable.
- optional attribute 'reusecursor' allows use of cursor with queries
that have the limitby attribute. Set the attribute to True for the
first query, set it to the value of db['_lastcursor'] to continue
a previous query. The user must save the cursor value between
requests, and the filters must be identical. It is up to the user
to follow google's limitations: https://developers.google.com/appengine/docs/python/datastore/queries#Query_Cursors
"""
(items, tablename, fields) = self.select_raw(query,fields,attributes)
# self.db['_lastsql'] = self._select(query,fields,attributes)
rows = [[(t==self.db[tablename]._id.name and item) or \
(t=='nativeRef' and item) or getattr(item, t) \
for t in fields] for item in items]
colnames = ['%s.%s' % (tablename, t) for t in fields]
processor = attributes.get('processor',self.parse)
return processor(rows,fields,colnames,False)
def parse_list_integers(self, value, field_type):
return value[:] if self.use_ndb else value
def parse_list_strings(self, value, field_type):
return value[:] if self.use_ndb else value
def count(self,query,distinct=None,limit=None):
if distinct:
raise RuntimeError("COUNT DISTINCT not supported")
(items, tablename, fields) = self.select_raw(query)
# self.db['_lastsql'] = self._count(query)
try:
return len(items)
except TypeError:
return items.count(limit=limit)
def delete(self,tablename, query):
"""
This function was changed on 2010-05-04 because according to
http://code.google.com/p/googleappengine/issues/detail?id=3119
GAE no longer supports deleting more than 1000 records.
"""
# self.db['_lastsql'] = self._delete(tablename,query)
(items, tablename, fields) = self.select_raw(query)
# items can be one item or a query
if not isinstance(items,list):
#use a keys_only query to ensure that this runs as a datastore
# small operations
leftitems = items.fetch(1000, keys_only=True)
counter = 0
while len(leftitems):
counter += len(leftitems)
if self.use_ndb:
ndb.delete_multi(leftitems)
else:
gae.delete(leftitems)
leftitems = items.fetch(1000, keys_only=True)
else:
counter = len(items)
if self.use_ndb:
ndb.delete_multi([item.key for item in items])
else:
gae.delete(items)
return counter
def update(self,tablename,query,update_fields):
# self.db['_lastsql'] = self._update(tablename,query,update_fields)
(items, tablename, fields) = self.select_raw(query)
counter = 0
for item in items:
for field, value in update_fields:
setattr(item, field.name, self.represent(value,field.type))
item.put()
counter += 1
LOGGER.info(str(counter))
return counter
def insert(self,table,fields):
dfields=dict((f.name,self.represent(v,f.type)) for f,v in fields)
# table._db['_lastsql'] = self._insert(table,fields)
tmp = table._tableobj(**dfields)
tmp.put()
key = tmp.key if self.use_ndb else tmp.key()
rid = Reference(key.id())
(rid._table, rid._record, rid._gaekey) = (table, None, key)
return rid
def bulk_insert(self,table,items):
parsed_items = []
for item in items:
dfields=dict((f.name,self.represent(v,f.type)) for f,v in item)
parsed_items.append(table._tableobj(**dfields))
if self.use_ndb:
ndb.put_multi(parsed_items)
else:
gae.put(parsed_items)
return True
def uuid2int(uuidv):
return uuid.UUID(uuidv).int
def int2uuid(n):
return str(uuid.UUID(int=n))
class CouchDBAdapter(NoSQLAdapter):
drivers = ('couchdb',)
uploads_in_blob = True
types = {
'boolean': bool,
'string': str,
'text': str,
'json': str,
'password': str,
'blob': str,
'upload': str,
'integer': long,
'bigint': long,
'float': float,
'double': float,
'date': datetime.date,
'time': datetime.time,
'datetime': datetime.datetime,
'id': long,
'reference': long,
'list:string': list,
'list:integer': list,
'list:reference': list,
}
def file_exists(self, filename): pass
def file_open(self, filename, mode='rb', lock=True): pass
def file_close(self, fileobj): pass
def expand(self,expression,field_type=None):
if isinstance(expression,Field):
if expression.type=='id':
return "%s._id" % expression.tablename
return BaseAdapter.expand(self,expression,field_type)
def AND(self,first,second):
return '(%s && %s)' % (self.expand(first),self.expand(second))
def OR(self,first,second):
return '(%s || %s)' % (self.expand(first),self.expand(second))
def EQ(self,first,second):
if second is None:
return '(%s == null)' % self.expand(first)
return '(%s == %s)' % (self.expand(first),self.expand(second,first.type))
def NE(self,first,second):
if second is None:
return '(%s != null)' % self.expand(first)
return '(%s != %s)' % (self.expand(first),self.expand(second,first.type))
def COMMA(self,first,second):
return '%s + %s' % (self.expand(first),self.expand(second))
def represent(self, obj, fieldtype):
value = NoSQLAdapter.represent(self, obj, fieldtype)
if fieldtype=='id':
return repr(str(long(value)))
elif fieldtype in ('date','time','datetime','boolean'):
return serializers.json(value)
return repr(not isinstance(value,unicode) and value \
or value and value.encode('utf8'))
def __init__(self,db,uri='couchdb://127.0.0.1:5984',
pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None):
self.db = db
self.uri = uri
if do_connect: self.find_driver(adapter_args)
self.dbengine = 'couchdb'
self.folder = folder
db['_lastsql'] = ''
self.db_codec = 'UTF-8'
self._after_connection = after_connection
self.pool_size = pool_size
url='http://'+uri[10:]
def connector(url=url,driver_args=driver_args):
return self.driver.Server(url,**driver_args)
self.reconnect(connector,cursor=False)
def create_table(self, table, migrate=True, fake_migrate=False, polymodel=None):
if migrate:
try:
self.connection.create(table._tablename)
except:
pass
def insert(self,table,fields):
id = uuid2int(web2py_uuid())
ctable = self.connection[table._tablename]
values = dict((k.name,self.represent(v,k.type)) for k,v in fields)
values['_id'] = str(id)
ctable.save(values)
return id
def _select(self,query,fields,attributes):
if not isinstance(query,Query):
raise SyntaxError("Not Supported")
for key in set(attributes.keys())-SELECT_ARGS:
raise SyntaxError('invalid select attribute: %s' % key)
new_fields=[]
for item in fields:
if isinstance(item,SQLALL):
new_fields += item._table
else:
new_fields.append(item)
def uid(fd):
return fd=='id' and '_id' or fd
def get(row,fd):
return fd=='id' and long(row['_id']) or row.get(fd,None)
fields = new_fields
tablename = self.get_table(query)
fieldnames = [f.name for f in (fields or self.db[tablename])]
colnames = ['%s.%s' % (tablename,k) for k in fieldnames]
fields = ','.join(['%s.%s' % (tablename,uid(f)) for f in fieldnames])
fn="(function(%(t)s){if(%(query)s)emit(%(order)s,[%(fields)s]);})" %\
dict(t=tablename,
query=self.expand(query),
order='%s._id' % tablename,
fields=fields)
return fn, colnames
def select(self,query,fields,attributes):
if not isinstance(query,Query):
raise SyntaxError("Not Supported")
fn, colnames = self._select(query,fields,attributes)
tablename = colnames[0].split('.')[0]
ctable = self.connection[tablename]
rows = [cols['value'] for cols in ctable.query(fn)]
processor = attributes.get('processor',self.parse)
return processor(rows,fields,colnames,False)
def delete(self,tablename,query):
if not isinstance(query,Query):
raise SyntaxError("Not Supported")
if query.first.type=='id' and query.op==self.EQ:
id = query.second
tablename = query.first.tablename
assert(tablename == query.first.tablename)
ctable = self.connection[tablename]
try:
del ctable[str(id)]
return 1
except couchdb.http.ResourceNotFound:
return 0
else:
tablename = self.get_table(query)
rows = self.select(query,[self.db[tablename]._id],{})
ctable = self.connection[tablename]
for row in rows:
del ctable[str(row.id)]
return len(rows)
def update(self,tablename,query,fields):
if not isinstance(query,Query):
raise SyntaxError("Not Supported")
if query.first.type=='id' and query.op==self.EQ:
id = query.second
tablename = query.first.tablename
ctable = self.connection[tablename]
try:
doc = ctable[str(id)]
for key,value in fields:
doc[key.name] = self.represent(value,self.db[tablename][key.name].type)
ctable.save(doc)
return 1
except couchdb.http.ResourceNotFound:
return 0
else:
tablename = self.get_table(query)
rows = self.select(query,[self.db[tablename]._id],{})
ctable = self.connection[tablename]
table = self.db[tablename]
for row in rows:
doc = ctable[str(row.id)]
for key,value in fields:
doc[key.name] = self.represent(value,table[key.name].type)
ctable.save(doc)
return len(rows)
def count(self,query,distinct=None):
if distinct:
raise RuntimeError("COUNT DISTINCT not supported")
if not isinstance(query,Query):
raise SyntaxError("Not Supported")
tablename = self.get_table(query)
rows = self.select(query,[self.db[tablename]._id],{})
return len(rows)
def cleanup(text):
"""
validates that the given text is clean: only contains [0-9a-zA-Z_]
"""
#if not REGEX_ALPHANUMERIC.match(text):
# raise SyntaxError('invalid table or field name: %s' % text)
return text
class MongoDBAdapter(NoSQLAdapter):
native_json = True
drivers = ('pymongo',)
uploads_in_blob = False
types = {
'boolean': bool,
'string': str,
'text': str,
'json': str,
'password': str,
'blob': str,
'upload': str,
'integer': long,
'bigint': long,
'float': float,
'double': float,
'date': datetime.date,
'time': datetime.time,
'datetime': datetime.datetime,
'id': long,
'reference': long,
'list:string': list,
'list:integer': list,
'list:reference': list,
}
error_messages = {"javascript_needed": "This must yet be replaced" +
" with javascript in order to work."}
def __init__(self,db,uri='mongodb://127.0.0.1:5984/db',
pool_size=0, folder=None, db_codec ='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None):
self.db = db
self.uri = uri
if do_connect: self.find_driver(adapter_args)
import random
from bson.objectid import ObjectId
from bson.son import SON
import pymongo.uri_parser
m = pymongo.uri_parser.parse_uri(uri)
self.SON = SON
self.ObjectId = ObjectId
self.random = random
self.dbengine = 'mongodb'
self.folder = folder
db['_lastsql'] = ''
self.db_codec = 'UTF-8'
self._after_connection = after_connection
self.pool_size = pool_size
#this is the minimum amount of replicates that it should wait
# for on insert/update
self.minimumreplication = adapter_args.get('minimumreplication',0)
# by default all inserts and selects are performand asynchronous,
# but now the default is
# synchronous, except when overruled by either this default or
# function parameter
self.safe = adapter_args.get('safe',True)
# load user setting for uploads in blob storage
self.uploads_in_blob = adapter_args.get('uploads_in_blob', False)
if isinstance(m,tuple):
m = {"database" : m[1]}
if m.get('database')==None:
raise SyntaxError("Database is required!")
def connector(uri=self.uri,m=m):
# Connection() is deprecated
if hasattr(self.driver, "MongoClient"):
Connection = self.driver.MongoClient
else:
Connection = self.driver.Connection
return Connection(uri)[m.get('database')]
self.reconnect(connector,cursor=False)
def object_id(self, arg=None):
""" Convert input to a valid Mongodb ObjectId instance
self.object_id("<random>") -> ObjectId (not unique) instance """
if not arg:
arg = 0
if isinstance(arg, basestring):
# we assume an integer as default input
rawhex = len(arg.replace("0x", "").replace("L", "")) == 24
if arg.isdigit() and (not rawhex):
arg = int(arg)
elif arg == "<random>":
arg = int("0x%sL" % \
"".join([self.random.choice("0123456789abcdef") \
for x in range(24)]), 0)
elif arg.isalnum():
if not arg.startswith("0x"):
arg = "0x%s" % arg
try:
arg = int(arg, 0)
except ValueError, e:
raise ValueError(
"invalid objectid argument string: %s" % e)
else:
raise ValueError("Invalid objectid argument string. " +
"Requires an integer or base 16 value")
elif isinstance(arg, self.ObjectId):
return arg
if not isinstance(arg, (int, long)):
raise TypeError("object_id argument must be of type " +
"ObjectId or an objectid representable integer")
hexvalue = hex(arg)[2:].rstrip('L').zfill(24)
return self.ObjectId(hexvalue)
def parse_reference(self, value, field_type):
# here we have to check for ObjectID before base parse
if isinstance(value, self.ObjectId):
value = long(str(value), 16)
return super(MongoDBAdapter,
self).parse_reference(value, field_type)
def parse_id(self, value, field_type):
if isinstance(value, self.ObjectId):
value = long(str(value), 16)
return super(MongoDBAdapter,
self).parse_id(value, field_type)
def represent(self, obj, fieldtype):
# the base adatpter does not support MongoDB ObjectId
if isinstance(obj, self.ObjectId):
value = obj
else:
value = NoSQLAdapter.represent(self, obj, fieldtype)
# reference types must be convert to ObjectID
if fieldtype =='date':
if value == None:
return value
# this piece of data can be stripped off based on the fieldtype
t = datetime.time(0, 0, 0)
# mongodb doesn't has a date object and so it must datetime,
# string or integer
return datetime.datetime.combine(value, t)
elif fieldtype == 'time':
if value == None:
return value
# this piece of data can be stripped of based on the fieldtype
d = datetime.date(2000, 1, 1)
# mongodb doesn't has a time object and so it must datetime,
# string or integer
return datetime.datetime.combine(d, value)
elif fieldtype == "blob":
if value== None:
return value
from bson import Binary
if not isinstance(value, Binary):
if not isinstance(value, basestring):
return Binary(str(value))
return Binary(value)
return value
elif (isinstance(fieldtype, basestring) and
fieldtype.startswith('list:')):
if fieldtype.startswith('list:reference'):
newval = []
for v in value:
newval.append(self.object_id(v))
return newval
return value
elif ((isinstance(fieldtype, basestring) and
fieldtype.startswith("reference")) or
(isinstance(fieldtype, Table)) or fieldtype=="id"):
value = self.object_id(value)
return value
def create_table(self, table, migrate=True, fake_migrate=False,
polymodel=None, isCapped=False):
if isCapped:
raise RuntimeError("Not implemented")
def count(self, query, distinct=None, snapshot=True):
if distinct:
raise RuntimeError("COUNT DISTINCT not supported")
if not isinstance(query,Query):
raise SyntaxError("Not Supported")
tablename = self.get_table(query)
return long(self.select(query,[self.db[tablename]._id], {},
count=True,snapshot=snapshot)['count'])
# Maybe it would be faster if we just implemented the pymongo
# .count() function which is probably quicker?
# therefor call __select() connection[table].find(query).count()
# Since this will probably reduce the return set?
def expand(self, expression, field_type=None):
if isinstance(expression, Query):
# any query using 'id':=
# set name as _id (as per pymongo/mongodb primary key)
# convert second arg to an objectid field
# (if its not already)
# if second arg is 0 convert to objectid
if isinstance(expression.first,Field) and \
((expression.first.type == 'id') or \
("reference" in expression.first.type)):
if expression.first.type == 'id':
expression.first.name = '_id'
# cast to Mongo ObjectId
if isinstance(expression.second, (tuple, list, set)):
expression.second = [self.object_id(item) for
item in expression.second]
else:
expression.second = self.object_id(expression.second)
result = expression.op(expression.first, expression.second)
if isinstance(expression, Field):
if expression.type=='id':
result = "_id"
else:
result = expression.name
elif isinstance(expression, (Expression, Query)):
if not expression.second is None:
result = expression.op(expression.first, expression.second)
elif not expression.first is None:
result = expression.op(expression.first)
elif not isinstance(expression.op, str):
result = expression.op()
else:
result = expression.op
elif field_type:
result = self.represent(expression,field_type)
elif isinstance(expression,(list,tuple)):
result = ','.join(self.represent(item,field_type) for
item in expression)
else:
result = expression
return result
def drop(self, table, mode=''):
ctable = self.connection[table._tablename]
ctable.drop()
def truncate(self, table, mode, safe=None):
if safe == None:
safe=self.safe
ctable = self.connection[table._tablename]
ctable.remove(None, safe=True)
def select(self, query, fields, attributes, count=False,
snapshot=False):
mongofields_dict = self.SON()
mongoqry_dict = {}
new_fields=[]
mongosort_list = []
# try an orderby attribute
orderby = attributes.get('orderby', False)
limitby = attributes.get('limitby', False)
# distinct = attributes.get('distinct', False)
if 'for_update' in attributes:
logging.warn('mongodb does not support for_update')
for key in set(attributes.keys())-set(('limitby',
'orderby','for_update')):
if attributes[key]!=None:
logging.warn('select attribute not implemented: %s' % key)
if limitby:
limitby_skip, limitby_limit = limitby[0], int(limitby[1])
else:
limitby_skip = limitby_limit = 0
if orderby:
if isinstance(orderby, (list, tuple)):
orderby = xorify(orderby)
# !!!! need to add 'random'
for f in self.expand(orderby).split(','):
if f.startswith('-'):
mongosort_list.append((f[1:], -1))
else:
mongosort_list.append((f, 1))
for item in fields:
if isinstance(item, SQLALL):
new_fields += item._table
else:
new_fields.append(item)
fields = new_fields
if isinstance(query,Query):
tablename = self.get_table(query)
elif len(fields) != 0:
tablename = fields[0].tablename
else:
raise SyntaxError("The table name could not be found in " +
"the query nor from the select statement.")
mongoqry_dict = self.expand(query)
fields = fields or self.db[tablename]
for field in fields:
mongofields_dict[field.name] = 1
ctable = self.connection[tablename]
if count:
return {'count' : ctable.find(
mongoqry_dict, mongofields_dict,
skip=limitby_skip, limit=limitby_limit,
sort=mongosort_list, snapshot=snapshot).count()}
else:
# pymongo cursor object
mongo_list_dicts = ctable.find(mongoqry_dict,
mongofields_dict, skip=limitby_skip,
limit=limitby_limit, sort=mongosort_list,
snapshot=snapshot)
rows = []
# populate row in proper order
# Here we replace ._id with .id to follow the standard naming
colnames = []
newnames = []
for field in fields:
colname = str(field)
colnames.append(colname)
tablename, fieldname = colname.split(".")
if fieldname == "_id":
# Mongodb reserved uuid key
field.name = "id"
newnames.append(".".join((tablename, field.name)))
for record in mongo_list_dicts:
row=[]
for colname in colnames:
tablename, fieldname = colname.split(".")
# switch to Mongo _id uuids for retrieving
# record id's
if fieldname == "id": fieldname = "_id"
if fieldname in record:
value = record[fieldname]
else:
value = None
row.append(value)
rows.append(row)
processor = attributes.get('processor', self.parse)
result = processor(rows, fields, newnames, False)
return result
def insert(self, table, fields, safe=None):
"""Safe determines whether a asynchronious request is done or a
synchronious action is done
For safety, we use by default synchronous requests"""
values = dict()
if safe==None:
safe = self.safe
ctable = self.connection[table._tablename]
for k, v in fields:
if not k.name in ["id", "safe"]:
fieldname = k.name
fieldtype = table[k.name].type
values[fieldname] = self.represent(v, fieldtype)
ctable.insert(values, safe=safe)
return long(str(values['_id']), 16)
def update(self, tablename, query, fields, safe=None):
if safe == None:
safe = self.safe
# return amount of adjusted rows or zero, but no exceptions
# @ related not finding the result
if not isinstance(query, Query):
raise RuntimeError("Not implemented")
amount = self.count(query, False)
if not isinstance(query, Query):
raise SyntaxError("Not Supported")
filter = None
if query:
filter = self.expand(query)
# do not try to update id fields to avoid backend errors
modify = {'$set': dict((k.name, self.represent(v, k.type)) for
k, v in fields if (not k.name in ("_id", "id")))}
try:
result = self.connection[tablename].update(filter,
modify, multi=True, safe=safe)
if safe:
try:
# if result count is available fetch it
return result["n"]
except (KeyError, AttributeError, TypeError):
return amount
else:
return amount
except Exception, e:
# TODO Reverse update query to verifiy that the query succeded
raise RuntimeError("uncaught exception when updating rows: %s" % e)
def delete(self, tablename, query, safe=None):
if safe is None:
safe = self.safe
amount = 0
amount = self.count(query, False)
if not isinstance(query, Query):
raise RuntimeError("query type %s is not supported" % \
type(query))
filter = self.expand(query)
self.connection[tablename].remove(filter, safe=safe)
return amount
def bulk_insert(self, table, items):
return [self.insert(table,item) for item in items]
## OPERATORS
def INVERT(self, first):
#print "in invert first=%s" % first
return '-%s' % self.expand(first)
# TODO This will probably not work:(
def NOT(self, first):
return {'$not': self.expand(first)}
def AND(self,first,second):
# pymongo expects: .find({'$and': [{'x':'1'}, {'y':'2'}]})
return {'$and': [self.expand(first),self.expand(second)]}
def OR(self,first,second):
# pymongo expects: .find({'$or': [{'name':'1'}, {'name':'2'}]})
return {'$or': [self.expand(first),self.expand(second)]}
def BELONGS(self, first, second):
if isinstance(second, str):
return {self.expand(first) : {"$in" : [ second[:-1]]} }
elif second==[] or second==() or second==set():
return {1:0}
items = [self.expand(item, first.type) for item in second]
return {self.expand(first) : {"$in" : items} }
def EQ(self,first,second=None):
result = {}
result[self.expand(first)] = self.expand(second)
return result
def NE(self, first, second=None):
result = {}
result[self.expand(first)] = {'$ne': self.expand(second)}
return result
def LT(self,first,second=None):
if second is None:
raise RuntimeError("Cannot compare %s < None" % first)
result = {}
result[self.expand(first)] = {'$lt': self.expand(second)}
return result
def LE(self,first,second=None):
if second is None:
raise RuntimeError("Cannot compare %s <= None" % first)
result = {}
result[self.expand(first)] = {'$lte': self.expand(second)}
return result
def GT(self,first,second):
result = {}
result[self.expand(first)] = {'$gt': self.expand(second)}
return result
def GE(self,first,second=None):
if second is None:
raise RuntimeError("Cannot compare %s >= None" % first)
result = {}
result[self.expand(first)] = {'$gte': self.expand(second)}
return result
def ADD(self, first, second):
raise NotImplementedError(self.error_messages["javascript_needed"])
return '%s + %s' % (self.expand(first),
self.expand(second, first.type))
def SUB(self, first, second):
raise NotImplementedError(self.error_messages["javascript_needed"])
return '(%s - %s)' % (self.expand(first),
self.expand(second, first.type))
def MUL(self, first, second):
raise NotImplementedError(self.error_messages["javascript_needed"])
return '(%s * %s)' % (self.expand(first),
self.expand(second, first.type))
def DIV(self, first, second):
raise NotImplementedError(self.error_messages["javascript_needed"])
return '(%s / %s)' % (self.expand(first),
self.expand(second, first.type))
def MOD(self, first, second):
raise NotImplementedError(self.error_messages["javascript_needed"])
return '(%s %% %s)' % (self.expand(first),
self.expand(second, first.type))
def AS(self, first, second):
raise NotImplementedError(self.error_messages["javascript_needed"])
return '%s AS %s' % (self.expand(first), second)
# We could implement an option that simulates a full featured SQL
# database. But I think the option should be set explicit or
# implemented as another library.
def ON(self, first, second):
raise NotImplementedError("This is not possible in NoSQL" +
" but can be simulated with a wrapper.")
return '%s ON %s' % (self.expand(first), self.expand(second))
# BLOW ARE TWO IMPLEMENTATIONS OF THE SAME FUNCITONS
# WHICH ONE IS BEST?
def COMMA(self, first, second):
return '%s, %s' % (self.expand(first), self.expand(second))
def LIKE(self, first, second):
#escaping regex operators?
return {self.expand(first): ('%s' % \
self.expand(second, 'string').replace('%','/'))}
def ILIKE(self, first, second):
val = second if isinstance(second,self.ObjectId) else {
'$regex': second.replace('%', ''), '$options': 'i'}
return {self.expand(first): val}
def STARTSWITH(self, first, second):
#escaping regex operators?
return {self.expand(first): ('/^%s/' % \
self.expand(second, 'string'))}
def ENDSWITH(self, first, second):
#escaping regex operators?
return {self.expand(first): ('/%s^/' % \
self.expand(second, 'string'))}
def CONTAINS(self, first, second, case_sensitive=False):
# silently ignore, only case sensitive
# There is a technical difference, but mongodb doesn't support
# that, but the result will be the same
val = second if isinstance(second,self.ObjectId) else \
{'$regex':".*" + re.escape(self.expand(second, 'string')) + ".*"}
return {self.expand(first) : val}
def LIKE(self, first, second):
import re
return {self.expand(first): {'$regex': \
re.escape(self.expand(second,
'string')).replace('%','.*')}}
#TODO verify full compatibilty with official SQL Like operator
def STARTSWITH(self, first, second):
#TODO Solve almost the same problem as with endswith
import re
return {self.expand(first): {'$regex' : '^' +
re.escape(self.expand(second,
'string'))}}
#TODO verify full compatibilty with official SQL Like operator
def ENDSWITH(self, first, second):
#escaping regex operators?
#TODO if searched for a name like zsa_corbitt and the function
# is endswith('a') then this is also returned.
# Aldo it end with a t
import re
return {self.expand(first): {'$regex': \
re.escape(self.expand(second, 'string')) + '$'}}
#TODO verify full compatibilty with official oracle contains operator
def CONTAINS(self, first, second, case_sensitive=False):
# silently ignore, only case sensitive
#There is a technical difference, but mongodb doesn't support
# that, but the result will be the same
#TODO contains operators need to be transformed to Regex
return {self.expand(first) : {'$regex': \
".*" + re.escape(self.expand(second, 'string')) + ".*"}}
class IMAPAdapter(NoSQLAdapter):
drivers = ('imaplib',)
""" IMAP server adapter
This class is intended as an interface with
email IMAP servers to perform simple queries in the
web2py DAL query syntax, so email read, search and
other related IMAP mail services (as those implemented
by brands like Google(r), and Yahoo!(r)
can be managed from web2py applications.
The code uses examples by Yuji Tomita on this post:
http://yuji.wordpress.com/2011/06/22/python-imaplib-imap-example-with-gmail/#comment-1137
and is based in docs for Python imaplib, python email
and email IETF's (i.e. RFC2060 and RFC3501)
This adapter was tested with a small set of operations with Gmail(r). Other
services requests could raise command syntax and response data issues.
It creates its table and field names "statically",
meaning that the developer should leave the table and field
definitions to the DAL instance by calling the adapter's
.define_tables() method. The tables are defined with the
IMAP server mailbox list information.
.define_tables() returns a dictionary mapping dal tablenames
to the server mailbox names with the following structure:
{<tablename>: str <server mailbox name>}
Here is a list of supported fields:
Field Type Description
################################################################
uid string
answered boolean Flag
created date
content list:string A list of dict text or html parts
to string
cc string
bcc string
size integer the amount of octets of the message*
deleted boolean Flag
draft boolean Flag
flagged boolean Flag
sender string
recent boolean Flag
seen boolean Flag
subject string
mime string The mime header declaration
email string The complete RFC822 message**
attachments <type list> Each non text part as dict
encoding string The main detected encoding
*At the application side it is measured as the length of the RFC822
message string
WARNING: As row id's are mapped to email sequence numbers,
make sure your imap client web2py app does not delete messages
during select or update actions, to prevent
updating or deleting different messages.
Sequence numbers change whenever the mailbox is updated.
To avoid this sequence numbers issues, it is recommended the use
of uid fields in query references (although the update and delete
in separate actions rule still applies).
# This is the code recommended to start imap support
# at the app's model:
imapdb = DAL("imap://user:password@server:port", pool_size=1) # port 993 for ssl
imapdb.define_tables()
Here is an (incomplete) list of possible imap commands:
# Count today's unseen messages
# smaller than 6000 octets from the
# inbox mailbox
q = imapdb.INBOX.seen == False
q &= imapdb.INBOX.created == datetime.date.today()
q &= imapdb.INBOX.size < 6000
unread = imapdb(q).count()
# Fetch last query messages
rows = imapdb(q).select()
# it is also possible to filter query select results with limitby and
# sequences of mailbox fields
set.select(<fields sequence>, limitby=(<int>, <int>))
# Mark last query messages as seen
messages = [row.uid for row in rows]
seen = imapdb(imapdb.INBOX.uid.belongs(messages)).update(seen=True)
# Delete messages in the imap database that have mails from mr. Gumby
deleted = 0
for mailbox in imapdb.tables
deleted += imapdb(imapdb[mailbox].sender.contains("gumby")).delete()
# It is possible also to mark messages for deletion instead of ereasing them
# directly with set.update(deleted=True)
# This object give access
# to the adapter auto mailbox
# mapped names (which native
# mailbox has what table name)
imapdb.mailboxes <dict> # tablename, server native name pairs
# To retrieve a table native mailbox name use:
imapdb.<table>.mailbox
### New features v2.4.1:
# Declare mailboxes statically with tablename, name pairs
# This avoids the extra server names retrieval
imapdb.define_tables({"inbox": "INBOX"})
# Selects without content/attachments/email columns will only
# fetch header and flags
imapdb(q).select(imapdb.INBOX.sender, imapdb.INBOX.subject)
"""
types = {
'string': str,
'text': str,
'date': datetime.date,
'datetime': datetime.datetime,
'id': long,
'boolean': bool,
'integer': int,
'bigint': long,
'blob': str,
'list:string': str,
}
dbengine = 'imap'
REGEX_URI = re.compile('^(?P<user>[^:]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@]+)(\:(?P<port>[0-9]+))?$')
def __init__(self,
db,
uri,
pool_size=0,
folder=None,
db_codec ='UTF-8',
credential_decoder=IDENTITY,
driver_args={},
adapter_args={},
do_connect=True,
after_connection=None):
# db uri: user@example.com:password@imap.server.com:123
# TODO: max size adapter argument for preventing large mail transfers
self.db = db
self.uri = uri
if do_connect: self.find_driver(adapter_args)
self.pool_size=pool_size
self.folder = folder
self.db_codec = db_codec
self._after_connection = after_connection
self.credential_decoder = credential_decoder
self.driver_args = driver_args
self.adapter_args = adapter_args
self.mailbox_size = None
self.static_names = None
self.charset = sys.getfilesystemencoding()
# imap class
self.imap4 = None
uri = uri.split("://")[1]
""" MESSAGE is an identifier for sequence number"""
self.flags = {'deleted': '\\Deleted', 'draft': '\\Draft',
'flagged': '\\Flagged', 'recent': '\\Recent',
'seen': '\\Seen', 'answered': '\\Answered'}
self.search_fields = {
'id': 'MESSAGE', 'created': 'DATE',
'uid': 'UID', 'sender': 'FROM',
'to': 'TO', 'cc': 'CC',
'bcc': 'BCC', 'content': 'TEXT',
'size': 'SIZE', 'deleted': '\\Deleted',
'draft': '\\Draft', 'flagged': '\\Flagged',
'recent': '\\Recent', 'seen': '\\Seen',
'subject': 'SUBJECT', 'answered': '\\Answered',
'mime': None, 'email': None,
'attachments': None
}
db['_lastsql'] = ''
m = self.REGEX_URI.match(uri)
user = m.group('user')
password = m.group('password')
host = m.group('host')
port = int(m.group('port'))
over_ssl = False
if port==993:
over_ssl = True
driver_args.update(host=host,port=port, password=password, user=user)
def connector(driver_args=driver_args):
# it is assumed sucessful authentication alLways
# TODO: support direct connection and login tests
if over_ssl:
self.imap4 = self.driver.IMAP4_SSL
else:
self.imap4 = self.driver.IMAP4
connection = self.imap4(driver_args["host"], driver_args["port"])
data = connection.login(driver_args["user"], driver_args["password"])
# static mailbox list
connection.mailbox_names = None
# dummy cursor function
connection.cursor = lambda : True
return connection
self.db.define_tables = self.define_tables
self.connector = connector
if do_connect: self.reconnect()
def reconnect(self, f=None, cursor=True):
"""
IMAP4 Pool connection method
imap connection lacks of self cursor command.
A custom command should be provided as a replacement
for connection pooling to prevent uncaught remote session
closing
"""
if getattr(self,'connection',None) != None:
return
if f is None:
f = self.connector
if not self.pool_size:
self.connection = f()
self.cursor = cursor and self.connection.cursor()
else:
POOLS = ConnectionPool.POOLS
uri = self.uri
while True:
GLOBAL_LOCKER.acquire()
if not uri in POOLS:
POOLS[uri] = []
if POOLS[uri]:
self.connection = POOLS[uri].pop()
GLOBAL_LOCKER.release()
self.cursor = cursor and self.connection.cursor()
if self.cursor and self.check_active_connection:
try:
# check if connection is alive or close it
result, data = self.connection.list()
except:
# Possible connection reset error
# TODO: read exception class
self.connection = f()
break
else:
GLOBAL_LOCKER.release()
self.connection = f()
self.cursor = cursor and self.connection.cursor()
break
self.after_connection_hook()
def get_last_message(self, tablename):
last_message = None
# request mailbox list to the server if needed.
if not isinstance(self.connection.mailbox_names, dict):
self.get_mailboxes()
try:
result = self.connection.select(
self.connection.mailbox_names[tablename])
last_message = int(result[1][0])
# Last message must be a positive integer
if last_message == 0:
last_message = 1
except (IndexError, ValueError, TypeError, KeyError):
e = sys.exc_info()[1]
LOGGER.debug("Error retrieving the last mailbox" +
" sequence number. %s" % str(e))
return last_message
def get_uid_bounds(self, tablename):
if not isinstance(self.connection.mailbox_names, dict):
self.get_mailboxes()
# fetch first and last messages
# return (first, last) messages uid's
last_message = self.get_last_message(tablename)
result, data = self.connection.uid("search", None, "(ALL)")
uid_list = data[0].strip().split()
if len(uid_list) <= 0:
return None
else:
return (uid_list[0], uid_list[-1])
def convert_date(self, date, add=None, imf=False):
if add is None:
add = datetime.timedelta()
""" Convert a date object to a string
with d-Mon-Y style for IMAP or the inverse
case
add <timedelta> adds to the date object
"""
months = [None, "JAN","FEB","MAR","APR","MAY","JUN",
"JUL", "AUG","SEP","OCT","NOV","DEC"]
if isinstance(date, basestring):
# Prevent unexpected date response format
try:
if "," in date:
dayname, datestring = date.split(",")
else:
dayname, datestring = None, date
date_list = datestring.strip().split()
year = int(date_list[2])
month = months.index(date_list[1].upper())
day = int(date_list[0])
hms = map(int, date_list[3].split(":"))
return datetime.datetime(year, month, day,
hms[0], hms[1], hms[2]) + add
except (ValueError, AttributeError, IndexError), e:
LOGGER.error("Could not parse date text: %s. %s" %
(date, e))
return None
elif isinstance(date, (datetime.date, datetime.datetime)):
if imf: date_format = "%a, %d %b %Y %H:%M:%S %z"
else: date_format = "%d-%b-%Y"
return (date + add).strftime(date_format)
else:
return None
@staticmethod
def header_represent(f, r):
from email.header import decode_header
text, encoding = decode_header(f)[0]
if encoding:
text = text.decode(encoding).encode('utf-8')
return text
def encode_text(self, text, charset, errors="replace"):
""" convert text for mail to unicode"""
if text is None:
text = ""
else:
if isinstance(text, str):
if charset is None:
text = unicode(text, "utf-8", errors)
else:
text = unicode(text, charset, errors)
else:
raise Exception("Unsupported mail text type %s" % type(text))
return text.encode("utf-8")
def get_charset(self, message):
charset = message.get_content_charset()
return charset
def get_mailboxes(self):
""" Query the mail database for mailbox names """
if self.static_names:
# statically defined mailbox names
self.connection.mailbox_names = self.static_names
return self.static_names.keys()
mailboxes_list = self.connection.list()
self.connection.mailbox_names = dict()
mailboxes = list()
x = 0
for item in mailboxes_list[1]:
x = x + 1
item = item.strip()
if not "NOSELECT" in item.upper():
sub_items = item.split("\"")
sub_items = [sub_item for sub_item in sub_items \
if len(sub_item.strip()) > 0]
# mailbox = sub_items[len(sub_items) -1]
mailbox = sub_items[-1].strip()
# remove unwanted characters and store original names
# Don't allow leading non alphabetic characters
mailbox_name = re.sub('^[_0-9]*', '', re.sub('[^_\w]','',re.sub('[/ ]','_',mailbox)))
mailboxes.append(mailbox_name)
self.connection.mailbox_names[mailbox_name] = mailbox
return mailboxes
def get_query_mailbox(self, query):
nofield = True
tablename = None
attr = query
while nofield:
if hasattr(attr, "first"):
attr = attr.first
if isinstance(attr, Field):
return attr.tablename
elif isinstance(attr, Query):
pass
else:
return None
else:
return None
return tablename
def is_flag(self, flag):
if self.search_fields.get(flag, None) in self.flags.values():
return True
else:
return False
def define_tables(self, mailbox_names=None):
"""
Auto create common IMAP fileds
This function creates fields definitions "statically"
meaning that custom fields as in other adapters should
not be supported and definitions handled on a service/mode
basis (local syntax for Gmail(r), Ymail(r)
Returns a dictionary with tablename, server native mailbox name
pairs.
"""
if mailbox_names:
# optional statically declared mailboxes
self.static_names = mailbox_names
else:
self.static_names = None
if not isinstance(self.connection.mailbox_names, dict):
self.get_mailboxes()
names = self.connection.mailbox_names.keys()
for name in names:
self.db.define_table("%s" % name,
Field("uid", writable=False),
Field("created", "datetime", writable=False),
Field("content", "text", writable=False),
Field("to", writable=False),
Field("cc", writable=False),
Field("bcc", writable=False),
Field("sender", writable=False),
Field("size", "integer", writable=False),
Field("subject", writable=False),
Field("mime", writable=False),
Field("email", "text", writable=False, readable=False),
Field("attachments", "text", writable=False, readable=False),
Field("encoding", writable=False),
Field("answered", "boolean"),
Field("deleted", "boolean"),
Field("draft", "boolean"),
Field("flagged", "boolean"),
Field("recent", "boolean", writable=False),
Field("seen", "boolean")
)
# Set a special _mailbox attribute for storing
# native mailbox names
self.db[name].mailbox = \
self.connection.mailbox_names[name]
# decode quoted printable
self.db[name].to.represent = self.db[name].cc.represent = \
self.db[name].bcc.represent = self.db[name].sender.represent = \
self.db[name].subject.represent = self.header_represent
# Set the db instance mailbox collections
self.db.mailboxes = self.connection.mailbox_names
return self.db.mailboxes
def create_table(self, *args, **kwargs):
# not implemented
# but required by DAL
pass
def select(self, query, fields, attributes):
""" Search and Fetch records and return web2py rows
"""
# move this statement elsewhere (upper-level)
if use_common_filters(query):
query = self.common_filter(query, [self.get_query_mailbox(query),])
import email
# get records from imap server with search + fetch
# convert results to a dictionary
tablename = None
fetch_results = list()
if isinstance(query, Query):
tablename = self.get_table(query)
mailbox = self.connection.mailbox_names.get(tablename, None)
if mailbox is None:
raise ValueError("Mailbox name not found: %s" % mailbox)
else:
# select with readonly
result, selected = self.connection.select(mailbox, True)
if result != "OK":
raise Exception("IMAP error: %s" % selected)
self.mailbox_size = int(selected[0])
search_query = "(%s)" % str(query).strip()
search_result = self.connection.uid("search", None, search_query)
# Normal IMAP response OK is assumed (change this)
if search_result[0] == "OK":
# For "light" remote server responses just get the first
# ten records (change for non-experimental implementation)
# However, light responses are not guaranteed with this
# approach, just fewer messages.
limitby = attributes.get('limitby', None)
messages_set = search_result[1][0].split()
# descending order
messages_set.reverse()
if limitby is not None:
# TODO: orderby, asc/desc, limitby from complete message set
messages_set = messages_set[int(limitby[0]):int(limitby[1])]
# keep the requests small for header/flags
if any([(field.name in ["content", "size",
"attachments", "email"]) for
field in fields]):
imap_fields = "(RFC822 FLAGS)"
else:
imap_fields = "(RFC822.HEADER FLAGS)"
if len(messages_set) > 0:
# create fetch results object list
# fetch each remote message and store it in memmory
# (change to multi-fetch command syntax for faster
# transactions)
for uid in messages_set:
# fetch the RFC822 message body
typ, data = self.connection.uid("fetch", uid, imap_fields)
if typ == "OK":
fr = {"message": int(data[0][0].split()[0]),
"uid": long(uid),
"email": email.message_from_string(data[0][1]),
"raw_message": data[0][1]}
fr["multipart"] = fr["email"].is_multipart()
# fetch flags for the message
fr["flags"] = self.driver.ParseFlags(data[1])
fetch_results.append(fr)
else:
# error retrieving the message body
raise Exception("IMAP error retrieving the body: %s" % data)
else:
raise Exception("IMAP search error: %s" % search_result[1])
elif isinstance(query, (Expression, basestring)):
raise NotImplementedError()
else:
raise TypeError("Unexpected query type")
imapqry_dict = {}
imapfields_dict = {}
if len(fields) == 1 and isinstance(fields[0], SQLALL):
allfields = True
elif len(fields) == 0:
allfields = True
else:
allfields = False
if allfields:
colnames = ["%s.%s" % (tablename, field) for field in self.search_fields.keys()]
else:
colnames = ["%s.%s" % (tablename, field.name) for field in fields]
for k in colnames:
imapfields_dict[k] = k
imapqry_list = list()
imapqry_array = list()
for fr in fetch_results:
attachments = []
content = []
size = 0
n = int(fr["message"])
item_dict = dict()
message = fr["email"]
uid = fr["uid"]
charset = self.get_charset(message)
flags = fr["flags"]
raw_message = fr["raw_message"]
# Return messages data mapping static fields
# and fetched results. Mapping should be made
# outside the select function (with auxiliary
# instance methods)
# pending: search flags states trough the email message
# instances for correct output
# preserve subject encoding (ASCII/quoted printable)
if "%s.id" % tablename in colnames:
item_dict["%s.id" % tablename] = n
if "%s.created" % tablename in colnames:
item_dict["%s.created" % tablename] = self.convert_date(message["Date"])
if "%s.uid" % tablename in colnames:
item_dict["%s.uid" % tablename] = uid
if "%s.sender" % tablename in colnames:
# If there is no encoding found in the message header
# force utf-8 replacing characters (change this to
# module's defaults). Applies to .sender, .to, .cc and .bcc fields
item_dict["%s.sender" % tablename] = message["From"]
if "%s.to" % tablename in colnames:
item_dict["%s.to" % tablename] = message["To"]
if "%s.cc" % tablename in colnames:
if "Cc" in message.keys():
item_dict["%s.cc" % tablename] = message["Cc"]
else:
item_dict["%s.cc" % tablename] = ""
if "%s.bcc" % tablename in colnames:
if "Bcc" in message.keys():
item_dict["%s.bcc" % tablename] = message["Bcc"]
else:
item_dict["%s.bcc" % tablename] = ""
if "%s.deleted" % tablename in colnames:
item_dict["%s.deleted" % tablename] = "\\Deleted" in flags
if "%s.draft" % tablename in colnames:
item_dict["%s.draft" % tablename] = "\\Draft" in flags
if "%s.flagged" % tablename in colnames:
item_dict["%s.flagged" % tablename] = "\\Flagged" in flags
if "%s.recent" % tablename in colnames:
item_dict["%s.recent" % tablename] = "\\Recent" in flags
if "%s.seen" % tablename in colnames:
item_dict["%s.seen" % tablename] = "\\Seen" in flags
if "%s.subject" % tablename in colnames:
item_dict["%s.subject" % tablename] = message["Subject"]
if "%s.answered" % tablename in colnames:
item_dict["%s.answered" % tablename] = "\\Answered" in flags
if "%s.mime" % tablename in colnames:
item_dict["%s.mime" % tablename] = message.get_content_type()
if "%s.encoding" % tablename in colnames:
item_dict["%s.encoding" % tablename] = charset
# Here goes the whole RFC822 body as an email instance
# for controller side custom processing
# The message is stored as a raw string
# >> email.message_from_string(raw string)
# returns a Message object for enhanced object processing
if "%s.email" % tablename in colnames:
# WARNING: no encoding performed (raw message)
item_dict["%s.email" % tablename] = raw_message
# Size measure as suggested in a Velocity Reviews post
# by Tim Williams: "how to get size of email attachment"
# Note: len() and server RFC822.SIZE reports doesn't match
# To retrieve the server size for representation would add a new
# fetch transaction to the process
for part in message.walk():
maintype = part.get_content_maintype()
if ("%s.attachments" % tablename in colnames) or \
("%s.content" % tablename in colnames):
payload = part.get_payload(decode=True)
if payload:
filename = part.get_filename()
values = {"mime": part.get_content_type()}
if ((filename or not "text" in maintype) and
("%s.attachments" % tablename in colnames)):
values.update({"payload": payload,
"filename": filename,
"encoding": part.get_content_charset(),
"disposition": part["Content-Disposition"]})
attachments.append(values)
elif (("text" in maintype) and
("%s.content" % tablename in colnames)):
values.update({"text": self.encode_text(payload,
self.get_charset(part))})
content.append(values)
if "%s.size" % tablename in colnames:
if part is not None:
size += len(str(part))
item_dict["%s.content" % tablename] = content
item_dict["%s.attachments" % tablename] = attachments
item_dict["%s.size" % tablename] = size
imapqry_list.append(item_dict)
# extra object mapping for the sake of rows object
# creation (sends an array or lists)
for item_dict in imapqry_list:
imapqry_array_item = list()
for fieldname in colnames:
imapqry_array_item.append(item_dict[fieldname])
imapqry_array.append(imapqry_array_item)
# parse result and return a rows object
colnames = colnames
processor = attributes.get('processor',self.parse)
return processor(imapqry_array, fields, colnames)
def insert(self, table, fields):
def add_payload(message, obj):
payload = Message()
encoding = obj.get("encoding", "utf-8")
if encoding and (encoding.upper() in
("BASE64", "7BIT", "8BIT", "BINARY")):
payload.add_header("Content-Transfer-Encoding", encoding)
else:
payload.set_charset(encoding)
mime = obj.get("mime", None)
if mime:
payload.set_type(mime)
if "text" in obj:
payload.set_payload(obj["text"])
elif "payload" in obj:
payload.set_payload(obj["payload"])
if "filename" in obj and obj["filename"]:
payload.add_header("Content-Disposition",
"attachment", filename=obj["filename"])
message.attach(payload)
mailbox = table.mailbox
d = dict(((k.name, v) for k, v in fields))
date_time = d.get("created") or datetime.datetime.now()
struct_time = date_time.timetuple()
if len(d) > 0:
message = d.get("email", None)
attachments = d.get("attachments", [])
content = d.get("content", [])
flags = " ".join(["\\%s" % flag.capitalize() for flag in
("answered", "deleted", "draft", "flagged",
"recent", "seen") if d.get(flag, False)])
if not message:
from email.message import Message
mime = d.get("mime", None)
charset = d.get("encoding", None)
message = Message()
message["from"] = d.get("sender", "")
message["subject"] = d.get("subject", "")
message["date"] = self.convert_date(date_time, imf=True)
if mime:
message.set_type(mime)
if charset:
message.set_charset(charset)
for item in ("to", "cc", "bcc"):
value = d.get(item, "")
if isinstance(value, basestring):
message[item] = value
else:
message[item] = ";".join([i for i in
value])
if (not message.is_multipart() and
(not message.get_content_type().startswith(
"multipart"))):
if isinstance(content, basestring):
message.set_payload(content)
elif len(content) > 0:
message.set_payload(content[0]["text"])
else:
[add_payload(message, c) for c in content]
[add_payload(message, a) for a in attachments]
message = message.as_string()
result, data = self.connection.append(mailbox, flags, struct_time, message)
if result == "OK":
uid = int(re.findall("\d+", str(data))[-1])
return self.db(table.uid==uid).select(table.id).first().id
else:
raise Exception("IMAP message append failed: %s" % data)
else:
raise NotImplementedError("IMAP empty insert is not implemented")
def update(self, tablename, query, fields):
# TODO: the adapter should implement an .expand method
commands = list()
rowcount = 0
if use_common_filters(query):
query = self.common_filter(query, [tablename,])
mark = []
unmark = []
if query:
for item in fields:
field = item[0]
name = field.name
value = item[1]
if self.is_flag(name):
flag = self.search_fields[name]
if (value is not None) and (flag != "\\Recent"):
if value:
mark.append(flag)
else:
unmark.append(flag)
result, data = self.connection.select(
self.connection.mailbox_names[tablename])
string_query = "(%s)" % query
result, data = self.connection.search(None, string_query)
store_list = [item.strip() for item in data[0].split()
if item.strip().isdigit()]
# build commands for marked flags
for number in store_list:
result = None
if len(mark) > 0:
commands.append((number, "+FLAGS", "(%s)" % " ".join(mark)))
if len(unmark) > 0:
commands.append((number, "-FLAGS", "(%s)" % " ".join(unmark)))
for command in commands:
result, data = self.connection.store(*command)
if result == "OK":
rowcount += 1
else:
raise Exception("IMAP storing error: %s" % data)
return rowcount
def count(self,query,distinct=None):
counter = 0
tablename = self.get_query_mailbox(query)
if query and tablename is not None:
if use_common_filters(query):
query = self.common_filter(query, [tablename,])
result, data = self.connection.select(self.connection.mailbox_names[tablename])
string_query = "(%s)" % query
result, data = self.connection.search(None, string_query)
store_list = [item.strip() for item in data[0].split() if item.strip().isdigit()]
counter = len(store_list)
return counter
def delete(self, tablename, query):
counter = 0
if query:
if use_common_filters(query):
query = self.common_filter(query, [tablename,])
result, data = self.connection.select(self.connection.mailbox_names[tablename])
string_query = "(%s)" % query
result, data = self.connection.search(None, string_query)
store_list = [item.strip() for item in data[0].split() if item.strip().isdigit()]
for number in store_list:
result, data = self.connection.store(number, "+FLAGS", "(\\Deleted)")
if result == "OK":
counter += 1
else:
raise Exception("IMAP store error: %s" % data)
if counter > 0:
result, data = self.connection.expunge()
return counter
def BELONGS(self, first, second):
result = None
name = self.search_fields[first.name]
if name == "MESSAGE":
values = [str(val) for val in second if str(val).isdigit()]
result = "%s" % ",".join(values).strip()
elif name == "UID":
values = [str(val) for val in second if str(val).isdigit()]
result = "UID %s" % ",".join(values).strip()
else:
raise Exception("Operation not supported")
# result = "(%s %s)" % (self.expand(first), self.expand(second))
return result
def CONTAINS(self, first, second, case_sensitive=False):
# silently ignore, only case sensitive
result = None
name = self.search_fields[first.name]
if name in ("FROM", "TO", "SUBJECT", "TEXT"):
result = "%s \"%s\"" % (name, self.expand(second))
else:
if first.name in ("cc", "bcc"):
result = "%s \"%s\"" % (first.name.upper(), self.expand(second))
elif first.name == "mime":
result = "HEADER Content-Type \"%s\"" % self.expand(second)
else:
raise Exception("Operation not supported")
return result
def GT(self, first, second):
result = None
name = self.search_fields[first.name]
if name == "MESSAGE":
last_message = self.get_last_message(first.tablename)
result = "%d:%d" % (int(self.expand(second)) + 1, last_message)
elif name == "UID":
# GT and LT may not return
# expected sets depending on
# the uid format implemented
try:
pedestal, threshold = self.get_uid_bounds(first.tablename)
except TypeError:
e = sys.exc_info()[1]
LOGGER.debug("Error requesting uid bounds: %s", str(e))
return ""
try:
lower_limit = int(self.expand(second)) + 1
except (ValueError, TypeError):
e = sys.exc_info()[1]
raise Exception("Operation not supported (non integer UID)")
result = "UID %s:%s" % (lower_limit, threshold)
elif name == "DATE":
result = "SINCE %s" % self.convert_date(second, add=datetime.timedelta(1))
elif name == "SIZE":
result = "LARGER %s" % self.expand(second)
else:
raise Exception("Operation not supported")
return result
def GE(self, first, second):
result = None
name = self.search_fields[first.name]
if name == "MESSAGE":
last_message = self.get_last_message(first.tablename)
result = "%s:%s" % (self.expand(second), last_message)
elif name == "UID":
# GT and LT may not return
# expected sets depending on
# the uid format implemented
try:
pedestal, threshold = self.get_uid_bounds(first.tablename)
except TypeError:
e = sys.exc_info()[1]
LOGGER.debug("Error requesting uid bounds: %s", str(e))
return ""
lower_limit = self.expand(second)
result = "UID %s:%s" % (lower_limit, threshold)
elif name == "DATE":
result = "SINCE %s" % self.convert_date(second)
else:
raise Exception("Operation not supported")
return result
def LT(self, first, second):
result = None
name = self.search_fields[first.name]
if name == "MESSAGE":
result = "%s:%s" % (1, int(self.expand(second)) - 1)
elif name == "UID":
try:
pedestal, threshold = self.get_uid_bounds(first.tablename)
except TypeError:
e = sys.exc_info()[1]
LOGGER.debug("Error requesting uid bounds: %s", str(e))
return ""
try:
upper_limit = int(self.expand(second)) - 1
except (ValueError, TypeError):
e = sys.exc_info()[1]
raise Exception("Operation not supported (non integer UID)")
result = "UID %s:%s" % (pedestal, upper_limit)
elif name == "DATE":
result = "BEFORE %s" % self.convert_date(second)
elif name == "SIZE":
result = "SMALLER %s" % self.expand(second)
else:
raise Exception("Operation not supported")
return result
def LE(self, first, second):
result = None
name = self.search_fields[first.name]
if name == "MESSAGE":
result = "%s:%s" % (1, self.expand(second))
elif name == "UID":
try:
pedestal, threshold = self.get_uid_bounds(first.tablename)
except TypeError:
e = sys.exc_info()[1]
LOGGER.debug("Error requesting uid bounds: %s", str(e))
return ""
upper_limit = int(self.expand(second))
result = "UID %s:%s" % (pedestal, upper_limit)
elif name == "DATE":
result = "BEFORE %s" % self.convert_date(second, add=datetime.timedelta(1))
else:
raise Exception("Operation not supported")
return result
def NE(self, first, second=None):
if (second is None) and isinstance(first, Field):
# All records special table query
if first.type == "id":
return self.GE(first, 1)
result = self.NOT(self.EQ(first, second))
result = result.replace("NOT NOT", "").strip()
return result
def EQ(self,first,second):
name = self.search_fields[first.name]
result = None
if name is not None:
if name == "MESSAGE":
# query by message sequence number
result = "%s" % self.expand(second)
elif name == "UID":
result = "UID %s" % self.expand(second)
elif name == "DATE":
result = "ON %s" % self.convert_date(second)
elif name in self.flags.values():
if second:
result = "%s" % (name.upper()[1:])
else:
result = "NOT %s" % (name.upper()[1:])
else:
raise Exception("Operation not supported")
else:
raise Exception("Operation not supported")
return result
def AND(self, first, second):
result = "%s %s" % (self.expand(first), self.expand(second))
return result
def OR(self, first, second):
result = "OR %s %s" % (self.expand(first), self.expand(second))
return "%s" % result.replace("OR OR", "OR")
def NOT(self, first):
result = "NOT %s" % self.expand(first)
return result
########################################################################
# end of adapters
########################################################################
ADAPTERS = {
'sqlite': SQLiteAdapter,
'spatialite': SpatiaLiteAdapter,
'sqlite:memory': SQLiteAdapter,
'spatialite:memory': SpatiaLiteAdapter,
'mysql': MySQLAdapter,
'postgres': PostgreSQLAdapter,
'postgres:psycopg2': PostgreSQLAdapter,
'postgres:pg8000': PostgreSQLAdapter,
'postgres2:psycopg2': NewPostgreSQLAdapter,
'postgres2:pg8000': NewPostgreSQLAdapter,
'oracle': OracleAdapter,
'mssql': MSSQLAdapter,
'mssql2': MSSQL2Adapter,
'mssql3': MSSQL3Adapter,
'mssql4' : MSSQL4Adapter,
'vertica': VerticaAdapter,
'sybase': SybaseAdapter,
'db2': DB2Adapter,
'teradata': TeradataAdapter,
'informix': InformixAdapter,
'informix-se': InformixSEAdapter,
'firebird': FireBirdAdapter,
'firebird_embedded': FireBirdAdapter,
'ingres': IngresAdapter,
'ingresu': IngresUnicodeAdapter,
'sapdb': SAPDBAdapter,
'cubrid': CubridAdapter,
'jdbc:sqlite': JDBCSQLiteAdapter,
'jdbc:sqlite:memory': JDBCSQLiteAdapter,
'jdbc:postgres': JDBCPostgreSQLAdapter,
'gae': GoogleDatastoreAdapter, # discouraged, for backward compatibility
'google:datastore': GoogleDatastoreAdapter,
'google:sql': GoogleSQLAdapter,
'couchdb': CouchDBAdapter,
'mongodb': MongoDBAdapter,
'imap': IMAPAdapter
}
def sqlhtml_validators(field):
"""
Field type validation, using web2py's validators mechanism.
makes sure the content of a field is in line with the declared
fieldtype
"""
db = field.db
try:
from gluon import validators
except ImportError:
return []
field_type, field_length = field.type, field.length
if isinstance(field_type, SQLCustomType):
if hasattr(field_type, 'validator'):
return field_type.validator
else:
field_type = field_type.type
elif not isinstance(field_type,str):
return []
requires=[]
def ff(r,id):
row=r(id)
if not row:
return id
elif hasattr(r, '_format') and isinstance(r._format,str):
return r._format % row
elif hasattr(r, '_format') and callable(r._format):
return r._format(row)
else:
return id
if field_type in (('string', 'text', 'password')):
requires.append(validators.IS_LENGTH(field_length))
elif field_type == 'json':
requires.append(validators.IS_EMPTY_OR(validators.IS_JSON(native_json=field.db._adapter.native_json)))
elif field_type == 'double' or field_type == 'float':
requires.append(validators.IS_FLOAT_IN_RANGE(-1e100, 1e100))
elif field_type == 'integer':
requires.append(validators.IS_INT_IN_RANGE(-2**31, 2**31))
elif field_type == 'bigint':
requires.append(validators.IS_INT_IN_RANGE(-2**63, 2**63))
elif field_type.startswith('decimal'):
requires.append(validators.IS_DECIMAL_IN_RANGE(-10**10, 10**10))
elif field_type == 'date':
requires.append(validators.IS_DATE())
elif field_type == 'time':
requires.append(validators.IS_TIME())
elif field_type == 'datetime':
requires.append(validators.IS_DATETIME())
elif db and field_type.startswith('reference') and \
field_type.find('.') < 0 and \
field_type[10:] in db.tables:
referenced = db[field_type[10:]]
def repr_ref(id, row=None, r=referenced, f=ff): return f(r, id)
field.represent = field.represent or repr_ref
if hasattr(referenced, '_format') and referenced._format:
requires = validators.IS_IN_DB(db,referenced._id,
referenced._format)
if field.unique:
requires._and = validators.IS_NOT_IN_DB(db,field)
if field.tablename == field_type[10:]:
return validators.IS_EMPTY_OR(requires)
return requires
elif db and field_type.startswith('list:reference') and \
field_type.find('.') < 0 and \
field_type[15:] in db.tables:
referenced = db[field_type[15:]]
def list_ref_repr(ids, row=None, r=referenced, f=ff):
if not ids:
return None
refs = None
db, id = r._db, r._id
if isinstance(db._adapter, GoogleDatastoreAdapter):
def count(values): return db(id.belongs(values)).select(id)
rx = range(0, len(ids), 30)
refs = reduce(lambda a,b:a&b, [count(ids[i:i+30]) for i in rx])
else:
refs = db(id.belongs(ids)).select(id)
return (refs and ', '.join(f(r,x.id) for x in refs) or '')
field.represent = field.represent or list_ref_repr
if hasattr(referenced, '_format') and referenced._format:
requires = validators.IS_IN_DB(db,referenced._id,
referenced._format,multiple=True)
else:
requires = validators.IS_IN_DB(db,referenced._id,
multiple=True)
if field.unique:
requires._and = validators.IS_NOT_IN_DB(db,field)
if not field.notnull:
requires = validators.IS_EMPTY_OR(requires)
return requires
elif field_type.startswith('list:'):
def repr_list(values,row=None): return', '.join(str(v) for v in (values or []))
field.represent = field.represent or repr_list
if field.unique:
requires.insert(0,validators.IS_NOT_IN_DB(db,field))
sff = ['in', 'do', 'da', 'ti', 'de', 'bo']
if field.notnull and not field_type[:2] in sff:
requires.insert(0, validators.IS_NOT_EMPTY())
elif not field.notnull and field_type[:2] in sff and requires:
requires[-1] = validators.IS_EMPTY_OR(requires[-1])
return requires
def bar_escape(item):
return str(item).replace('|', '||')
def bar_encode(items):
return '|%s|' % '|'.join(bar_escape(item) for item in items if str(item).strip())
def bar_decode_integer(value):
if not hasattr(value,'split') and hasattr(value,'read'):
value = value.read()
return [long(x) for x in value.split('|') if x.strip()]
def bar_decode_string(value):
return [x.replace('||', '|') for x in
REGEX_UNPACK.split(value[1:-1]) if x.strip()]
class Row(object):
"""
a dictionary that lets you do d['a'] as well as d.a
this is only used to store a Row
"""
__init__ = lambda self,*args,**kwargs: self.__dict__.update(*args,**kwargs)
def __getitem__(self, k):
if isinstance(k, Table):
try:
return ogetattr(self, k._tablename)
except (KeyError,AttributeError,TypeError):
pass
elif isinstance(k, Field):
try:
return ogetattr(self, k.name)
except (KeyError,AttributeError,TypeError):
pass
try:
return ogetattr(ogetattr(self, k.tablename), k.name)
except (KeyError,AttributeError,TypeError):
pass
key=str(k)
_extra = ogetattr(self, '__dict__').get('_extra', None)
if _extra is not None:
v = _extra.get(key, DEFAULT)
if v != DEFAULT:
return v
try:
return ogetattr(self, key)
except (KeyError,AttributeError,TypeError):
pass
m = REGEX_TABLE_DOT_FIELD.match(key)
if m:
try:
return ogetattr(self, m.group(1))[m.group(2)]
except (KeyError,AttributeError,TypeError):
key = m.group(2)
try:
return ogetattr(self, key)
except (KeyError,AttributeError,TypeError), ae:
try:
self[key] = ogetattr(self,'__get_lazy_reference__')(key)
return self[key]
except:
raise ae
__setitem__ = lambda self, key, value: setattr(self, str(key), value)
__delitem__ = object.__delattr__
__copy__ = lambda self: Row(self)
__call__ = __getitem__
def get(self, key, default=None):
try:
return self.__getitem__(key)
except(KeyError, AttributeError, TypeError):
return self.__dict__.get(key,default)
has_key = __contains__ = lambda self, key: key in self.__dict__
__nonzero__ = lambda self: len(self.__dict__)>0
update = lambda self, *args, **kwargs: self.__dict__.update(*args, **kwargs)
keys = lambda self: self.__dict__.keys()
items = lambda self: self.__dict__.items()
values = lambda self: self.__dict__.values()
__iter__ = lambda self: self.__dict__.__iter__()
iteritems = lambda self: self.__dict__.iteritems()
__str__ = __repr__ = lambda self: '<Row %s>' % self.as_dict()
__int__ = lambda self: object.__getattribute__(self,'id')
__long__ = lambda self: long(object.__getattribute__(self,'id'))
__getattr__ = __getitem__
# def __getattribute__(self, key):
# try:
# return object.__getattribute__(self, key)
# except AttributeError, ae:
# try:
# return self.__get_lazy_reference__(key)
# except:
# raise ae
def __eq__(self,other):
try:
return self.as_dict() == other.as_dict()
except AttributeError:
return False
def __ne__(self,other):
return not (self == other)
def __copy__(self):
return Row(dict(self))
def as_dict(self, datetime_to_str=False, custom_types=None):
SERIALIZABLE_TYPES = [str, unicode, int, long, float, bool, list, dict]
if isinstance(custom_types,(list,tuple,set)):
SERIALIZABLE_TYPES += custom_types
elif custom_types:
SERIALIZABLE_TYPES.append(custom_types)
d = dict(self)
for k in copy.copy(d.keys()):
v=d[k]
if d[k] is None:
continue
elif isinstance(v,Row):
d[k]=v.as_dict()
elif isinstance(v,Reference):
d[k]=long(v)
elif isinstance(v,decimal.Decimal):
d[k]=float(v)
elif isinstance(v, (datetime.date, datetime.datetime, datetime.time)):
if datetime_to_str:
d[k] = v.isoformat().replace('T',' ')[:19]
elif not isinstance(v,tuple(SERIALIZABLE_TYPES)):
del d[k]
return d
def as_xml(self, row_name="row", colnames=None, indent=' '):
def f(row,field,indent=' '):
if isinstance(row,Row):
spc = indent+' \n'
items = [f(row[x],x,indent+' ') for x in row]
return '%s<%s>\n%s\n%s</%s>' % (
indent,
field,
spc.join(item for item in items if item),
indent,
field)
elif not callable(row):
if REGEX_ALPHANUMERIC.match(field):
return '%s<%s>%s</%s>' % (indent,field,row,field)
else:
return '%s<extra name="%s">%s</extra>' % \
(indent,field,row)
else:
return None
return f(self, row_name, indent=indent)
def as_json(self, mode="object", default=None, colnames=None,
serialize=True, **kwargs):
"""
serializes the row to a JSON object
kwargs are passed to .as_dict method
only "object" mode supported
serialize = False used by Rows.as_json
TODO: return array mode with query column order
mode and colnames are not implemented
"""
item = self.as_dict(**kwargs)
if serialize:
if have_serializers:
return serializers.json(item,
default=default or
serializers.custom_json)
elif simplejson:
return simplejson.dumps(item)
else:
raise RuntimeError("missing simplejson")
else:
return item
################################################################################
# Everything below should be independent of the specifics of the database
# and should work for RDBMs and some NoSQL databases
################################################################################
class SQLCallableList(list):
def __call__(self):
return copy.copy(self)
def smart_query(fields,text):
if not isinstance(fields,(list,tuple)):
fields = [fields]
new_fields = []
for field in fields:
if isinstance(field,Field):
new_fields.append(field)
elif isinstance(field,Table):
for ofield in field:
new_fields.append(ofield)
else:
raise RuntimeError("fields must be a list of fields")
fields = new_fields
field_map = {}
for field in fields:
n = field.name.lower()
if not n in field_map:
field_map[n] = field
n = str(field).lower()
if not n in field_map:
field_map[n] = field
constants = {}
i = 0
while True:
m = REGEX_CONST_STRING.search(text)
if not m: break
text = text[:m.start()]+('#%i' % i)+text[m.end():]
constants[str(i)] = m.group()[1:-1]
i+=1
text = re.sub('\s+',' ',text).lower()
for a,b in [('&','and'),
('|','or'),
('~','not'),
('==','='),
('<','<'),
('>','>'),
('<=','<='),
('>=','>='),
('<>','!='),
('=<','<='),
('=>','>='),
('=','='),
(' less or equal than ','<='),
(' greater or equal than ','>='),
(' equal or less than ','<='),
(' equal or greater than ','>='),
(' less or equal ','<='),
(' greater or equal ','>='),
(' equal or less ','<='),
(' equal or greater ','>='),
(' not equal to ','!='),
(' not equal ','!='),
(' equal to ','='),
(' equal ','='),
(' equals ','='),
(' less than ','<'),
(' greater than ','>'),
(' starts with ','startswith'),
(' ends with ','endswith'),
(' not in ' , 'notbelongs'),
(' in ' , 'belongs'),
(' is ','=')]:
if a[0]==' ':
text = text.replace(' is'+a,' %s ' % b)
text = text.replace(a,' %s ' % b)
text = re.sub('\s+',' ',text).lower()
text = re.sub('(?P<a>[\<\>\!\=])\s+(?P<b>[\<\>\!\=])','\g<a>\g<b>',text)
query = field = neg = op = logic = None
for item in text.split():
if field is None:
if item == 'not':
neg = True
elif not neg and not logic and item in ('and','or'):
logic = item
elif item in field_map:
field = field_map[item]
else:
raise RuntimeError("Invalid syntax")
elif not field is None and op is None:
op = item
elif not op is None:
if item.startswith('#'):
if not item[1:] in constants:
raise RuntimeError("Invalid syntax")
value = constants[item[1:]]
else:
value = item
if field.type in ('text', 'string', 'json'):
if op == '=': op = 'like'
if op == '=': new_query = field==value
elif op == '<': new_query = field<value
elif op == '>': new_query = field>value
elif op == '<=': new_query = field<=value
elif op == '>=': new_query = field>=value
elif op == '!=': new_query = field!=value
elif op == 'belongs': new_query = field.belongs(value.split(','))
elif op == 'notbelongs': new_query = ~field.belongs(value.split(','))
elif field.type in ('text', 'string', 'json'):
if op == 'contains': new_query = field.contains(value)
elif op == 'like': new_query = field.like(value)
elif op == 'startswith': new_query = field.startswith(value)
elif op == 'endswith': new_query = field.endswith(value)
else: raise RuntimeError("Invalid operation")
elif field._db._adapter.dbengine=='google:datastore' and \
field.type in ('list:integer', 'list:string', 'list:reference'):
if op == 'contains': new_query = field.contains(value)
else: raise RuntimeError("Invalid operation")
else: raise RuntimeError("Invalid operation")
if neg: new_query = ~new_query
if query is None:
query = new_query
elif logic == 'and':
query &= new_query
elif logic == 'or':
query |= new_query
field = op = neg = logic = None
return query
class DAL(object):
"""
an instance of this class represents a database connection
Example::
db = DAL('sqlite://test.db')
or
db = DAL(**{"uri": ..., "tables": [...]...}) # experimental
db.define_table('tablename', Field('fieldname1'),
Field('fieldname2'))
"""
def __new__(cls, uri='sqlite://dummy.db', *args, **kwargs):
if not hasattr(THREAD_LOCAL,'db_instances'):
THREAD_LOCAL.db_instances = {}
if not hasattr(THREAD_LOCAL,'db_instances_zombie'):
THREAD_LOCAL.db_instances_zombie = {}
if uri == '<zombie>':
db_uid = kwargs['db_uid'] # a zombie must have a db_uid!
if db_uid in THREAD_LOCAL.db_instances:
db_group = THREAD_LOCAL.db_instances[db_uid]
db = db_group[-1]
elif db_uid in THREAD_LOCAL.db_instances_zombie:
db = THREAD_LOCAL.db_instances_zombie[db_uid]
else:
db = super(DAL, cls).__new__(cls)
THREAD_LOCAL.db_instances_zombie[db_uid] = db
else:
db_uid = kwargs.get('db_uid',hashlib_md5(repr(uri)).hexdigest())
if db_uid in THREAD_LOCAL.db_instances_zombie:
db = THREAD_LOCAL.db_instances_zombie[db_uid]
del THREAD_LOCAL.db_instances_zombie[db_uid]
else:
db = super(DAL, cls).__new__(cls)
db_group = THREAD_LOCAL.db_instances.get(db_uid,[])
db_group.append(db)
THREAD_LOCAL.db_instances[db_uid] = db_group
db._db_uid = db_uid
return db
@staticmethod
def set_folder(folder):
"""
# ## this allows gluon to set a folder for this thread
# ## <<<<<<<<< Should go away as new DAL replaces old sql.py
"""
BaseAdapter.set_folder(folder)
@staticmethod
def get_instances():
"""
Returns a dictionary with uri as key with timings and defined tables
{'sqlite://storage.sqlite': {
'dbstats': [(select auth_user.email from auth_user, 0.02009)],
'dbtables': {
'defined': ['auth_cas', 'auth_event', 'auth_group',
'auth_membership', 'auth_permission', 'auth_user'],
'lazy': '[]'
}
}
}
"""
dbs = getattr(THREAD_LOCAL,'db_instances',{}).items()
infos = {}
for db_uid, db_group in dbs:
for db in db_group:
if not db._uri:
continue
k = hide_password(db._adapter.uri)
infos[k] = dict(
dbstats = [(row[0], row[1]) for row in db._timings],
dbtables = {'defined': sorted(
list(set(db.tables)-set(db._LAZY_TABLES.keys()))),
'lazy': sorted(db._LAZY_TABLES.keys())})
return infos
@staticmethod
def distributed_transaction_begin(*instances):
if not instances:
return
thread_key = '%s.%s' % (socket.gethostname(), threading.currentThread())
keys = ['%s.%i' % (thread_key, i) for (i,db) in instances]
instances = enumerate(instances)
for (i, db) in instances:
if not db._adapter.support_distributed_transaction():
raise SyntaxError(
'distributed transaction not suported by %s' % db._dbname)
for (i, db) in instances:
db._adapter.distributed_transaction_begin(keys[i])
@staticmethod
def distributed_transaction_commit(*instances):
if not instances:
return
instances = enumerate(instances)
thread_key = '%s.%s' % (socket.gethostname(), threading.currentThread())
keys = ['%s.%i' % (thread_key, i) for (i,db) in instances]
for (i, db) in instances:
if not db._adapter.support_distributed_transaction():
raise SyntaxError(
'distributed transaction not suported by %s' % db._dbanme)
try:
for (i, db) in instances:
db._adapter.prepare(keys[i])
except:
for (i, db) in instances:
db._adapter.rollback_prepared(keys[i])
raise RuntimeError('failure to commit distributed transaction')
else:
for (i, db) in instances:
db._adapter.commit_prepared(keys[i])
return
def __init__(self, uri=DEFAULT_URI,
pool_size=0, folder=None,
db_codec='UTF-8', check_reserved=None,
migrate=True, fake_migrate=False,
migrate_enabled=True, fake_migrate_all=False,
decode_credentials=False, driver_args=None,
adapter_args=None, attempts=5, auto_import=False,
bigint_id=False, debug=False, lazy_tables=False,
db_uid=None, do_connect=True,
after_connection=None, tables=None, ignore_field_case=True,
entity_quoting=False):
"""
Creates a new Database Abstraction Layer instance.
Keyword arguments:
:uri: string that contains information for connecting to a database.
(default: 'sqlite://dummy.db')
experimental: you can specify a dictionary as uri
parameter i.e. with
db = DAL({"uri": "sqlite://storage.sqlite",
"tables": {...}, ...})
for an example of dict input you can check the output
of the scaffolding db model with
db.as_dict()
Note that for compatibility with Python older than
version 2.6.5 you should cast your dict input keys
to str due to a syntax limitation on kwarg names.
for proper DAL dictionary input you can use one of:
obj = serializers.cast_keys(dict, [encoding="utf-8"])
or else (for parsing json input)
obj = serializers.loads_json(data, unicode_keys=False)
:pool_size: How many open connections to make to the database object.
:folder: where .table files will be created.
automatically set within web2py
use an explicit path when using DAL outside web2py
:db_codec: string encoding of the database (default: 'UTF-8')
:check_reserved: list of adapters to check tablenames and column names
against sql/nosql reserved keywords. (Default None)
* 'common' List of sql keywords that are common to all database types
such as "SELECT, INSERT". (recommended)
* 'all' Checks against all known SQL keywords. (not recommended)
<adaptername> Checks against the specific adapters list of keywords
(recommended)
* '<adaptername>_nonreserved' Checks against the specific adapters
list of nonreserved keywords. (if available)
:migrate (defaults to True) sets default migrate behavior for all tables
:fake_migrate (defaults to False) sets default fake_migrate behavior for all tables
:migrate_enabled (defaults to True). If set to False disables ALL migrations
:fake_migrate_all (defaults to False). If sets to True fake migrates ALL tables
:attempts (defaults to 5). Number of times to attempt connecting
:auto_import (defaults to False). If set, import automatically table definitions from the
databases folder
:bigint_id (defaults to False): If set, turn on bigint instead of int for id fields
:lazy_tables (defaults to False): delay table definition until table access
:after_connection (defaults to None): a callable that will be execute after the connection
"""
if uri == '<zombie>' and db_uid is not None: return
if not decode_credentials:
credential_decoder = lambda cred: cred
else:
credential_decoder = lambda cred: urllib.unquote(cred)
self._folder = folder
if folder:
self.set_folder(folder)
self._uri = uri
self._pool_size = pool_size
self._db_codec = db_codec
self._lastsql = ''
self._timings = []
self._pending_references = {}
self._request_tenant = 'request_tenant'
self._common_fields = []
self._referee_name = '%(table)s'
self._bigint_id = bigint_id
self._debug = debug
self._migrated = []
self._LAZY_TABLES = {}
self._lazy_tables = lazy_tables
self._tables = SQLCallableList()
self._driver_args = driver_args
self._adapter_args = adapter_args
self._check_reserved = check_reserved
self._decode_credentials = decode_credentials
self._attempts = attempts
self._do_connect = do_connect
self._ignore_field_case = ignore_field_case
if not str(attempts).isdigit() or attempts < 0:
attempts = 5
if uri:
uris = isinstance(uri,(list,tuple)) and uri or [uri]
error = ''
connected = False
for k in range(attempts):
for uri in uris:
try:
if is_jdbc and not uri.startswith('jdbc:'):
uri = 'jdbc:'+uri
self._dbname = REGEX_DBNAME.match(uri).group()
if not self._dbname in ADAPTERS:
raise SyntaxError("Error in URI '%s' or database not supported" % self._dbname)
# notice that driver args or {} else driver_args
# defaults to {} global, not correct
kwargs = dict(db=self,uri=uri,
pool_size=pool_size,
folder=folder,
db_codec=db_codec,
credential_decoder=credential_decoder,
driver_args=driver_args or {},
adapter_args=adapter_args or {},
do_connect=do_connect,
after_connection=after_connection,
entity_quoting=entity_quoting)
self._adapter = ADAPTERS[self._dbname](**kwargs)
types = ADAPTERS[self._dbname].types
# copy so multiple DAL() possible
self._adapter.types = copy.copy(types)
self._adapter.build_parsemap()
self._adapter.ignore_field_case = ignore_field_case
if bigint_id:
if 'big-id' in types and 'reference' in types:
self._adapter.types['id'] = types['big-id']
self._adapter.types['reference'] = types['big-reference']
connected = True
break
except SyntaxError:
raise
except Exception:
tb = traceback.format_exc()
LOGGER.debug('DEBUG: connect attempt %i, connection error:\n%s' % (k, tb))
if connected:
break
else:
time.sleep(1)
if not connected:
raise RuntimeError("Failure to connect, tried %d times:\n%s" % (attempts, tb))
else:
self._adapter = BaseAdapter(db=self,pool_size=0,
uri='None',folder=folder,
db_codec=db_codec, after_connection=after_connection,
entity_quoting=entity_quoting)
migrate = fake_migrate = False
adapter = self._adapter
self._uri_hash = hashlib_md5(adapter.uri).hexdigest()
self.check_reserved = check_reserved
if self.check_reserved:
from reserved_sql_keywords import ADAPTERS as RSK
self.RSK = RSK
self._migrate = migrate
self._fake_migrate = fake_migrate
self._migrate_enabled = migrate_enabled
self._fake_migrate_all = fake_migrate_all
if auto_import or tables:
self.import_table_definitions(adapter.folder,
tables=tables)
@property
def tables(self):
return self._tables
def import_table_definitions(self, path, migrate=False,
fake_migrate=False, tables=None):
if tables:
for table in tables:
self.define_table(**table)
else:
pattern = pjoin(path,self._uri_hash+'_*.table')
for filename in glob.glob(pattern):
tfile = self._adapter.file_open(filename, 'r')
try:
sql_fields = pickle.load(tfile)
name = filename[len(pattern)-7:-6]
mf = [(value['sortable'],
Field(key,
type=value['type'],
length=value.get('length',None),
notnull=value.get('notnull',False),
unique=value.get('unique',False))) \
for key, value in sql_fields.iteritems()]
mf.sort(lambda a,b: cmp(a[0],b[0]))
self.define_table(name,*[item[1] for item in mf],
**dict(migrate=migrate,
fake_migrate=fake_migrate))
finally:
self._adapter.file_close(tfile)
def check_reserved_keyword(self, name):
"""
Validates ``name`` against SQL keywords
Uses self.check_reserve which is a list of
operators to use.
self.check_reserved
['common', 'postgres', 'mysql']
self.check_reserved
['all']
"""
for backend in self.check_reserved:
if name.upper() in self.RSK[backend]:
raise SyntaxError(
'invalid table/column name "%s" is a "%s" reserved SQL/NOSQL keyword' % (name, backend.upper()))
def parse_as_rest(self,patterns,args,vars,queries=None,nested_select=True):
"""
EXAMPLE:
db.define_table('person',Field('name'),Field('info'))
db.define_table('pet',Field('ownedby',db.person),Field('name'),Field('info'))
@request.restful()
def index():
def GET(*args,**vars):
patterns = [
"/friends[person]",
"/{person.name}/:field",
"/{person.name}/pets[pet.ownedby]",
"/{person.name}/pets[pet.ownedby]/{pet.name}",
"/{person.name}/pets[pet.ownedby]/{pet.name}/:field",
("/dogs[pet]", db.pet.info=='dog'),
("/dogs[pet]/{pet.name.startswith}", db.pet.info=='dog'),
]
parser = db.parse_as_rest(patterns,args,vars)
if parser.status == 200:
return dict(content=parser.response)
else:
raise HTTP(parser.status,parser.error)
def POST(table_name,**vars):
if table_name == 'person':
return db.person.validate_and_insert(**vars)
elif table_name == 'pet':
return db.pet.validate_and_insert(**vars)
else:
raise HTTP(400)
return locals()
"""
db = self
re1 = REGEX_SEARCH_PATTERN
re2 = REGEX_SQUARE_BRACKETS
def auto_table(table,base='',depth=0):
patterns = []
for field in db[table].fields:
if base:
tag = '%s/%s' % (base,field.replace('_','-'))
else:
tag = '/%s/%s' % (table.replace('_','-'),field.replace('_','-'))
f = db[table][field]
if not f.readable: continue
if f.type=='id' or 'slug' in field or f.type.startswith('reference'):
tag += '/{%s.%s}' % (table,field)
patterns.append(tag)
patterns.append(tag+'/:field')
elif f.type.startswith('boolean'):
tag += '/{%s.%s}' % (table,field)
patterns.append(tag)
patterns.append(tag+'/:field')
elif f.type in ('float','double','integer','bigint'):
tag += '/{%s.%s.ge}/{%s.%s.lt}' % (table,field,table,field)
patterns.append(tag)
patterns.append(tag+'/:field')
elif f.type.startswith('list:'):
tag += '/{%s.%s.contains}' % (table,field)
patterns.append(tag)
patterns.append(tag+'/:field')
elif f.type in ('date','datetime'):
tag+= '/{%s.%s.year}' % (table,field)
patterns.append(tag)
patterns.append(tag+'/:field')
tag+='/{%s.%s.month}' % (table,field)
patterns.append(tag)
patterns.append(tag+'/:field')
tag+='/{%s.%s.day}' % (table,field)
patterns.append(tag)
patterns.append(tag+'/:field')
if f.type in ('datetime','time'):
tag+= '/{%s.%s.hour}' % (table,field)
patterns.append(tag)
patterns.append(tag+'/:field')
tag+='/{%s.%s.minute}' % (table,field)
patterns.append(tag)
patterns.append(tag+'/:field')
tag+='/{%s.%s.second}' % (table,field)
patterns.append(tag)
patterns.append(tag+'/:field')
if depth>0:
for f in db[table]._referenced_by:
tag+='/%s[%s.%s]' % (table,f.tablename,f.name)
patterns.append(tag)
patterns += auto_table(table,base=tag,depth=depth-1)
return patterns
if patterns == 'auto':
patterns=[]
for table in db.tables:
if not table.startswith('auth_'):
patterns.append('/%s[%s]' % (table,table))
patterns += auto_table(table,base='',depth=1)
else:
i = 0
while i<len(patterns):
pattern = patterns[i]
if not isinstance(pattern,str):
pattern = pattern[0]
tokens = pattern.split('/')
if tokens[-1].startswith(':auto') and re2.match(tokens[-1]):
new_patterns = auto_table(tokens[-1][tokens[-1].find('[')+1:-1],
'/'.join(tokens[:-1]))
patterns = patterns[:i]+new_patterns+patterns[i+1:]
i += len(new_patterns)
else:
i += 1
if '/'.join(args) == 'patterns':
return Row({'status':200,'pattern':'list',
'error':None,'response':patterns})
for pattern in patterns:
basequery, exposedfields = None, []
if isinstance(pattern,tuple):
if len(pattern)==2:
pattern, basequery = pattern
elif len(pattern)>2:
pattern, basequery, exposedfields = pattern[0:3]
otable=table=None
if not isinstance(queries,dict):
dbset=db(queries)
if basequery is not None:
dbset = dbset(basequery)
i=0
tags = pattern[1:].split('/')
if len(tags)!=len(args):
continue
for tag in tags:
if re1.match(tag):
# print 're1:'+tag
tokens = tag[1:-1].split('.')
table, field = tokens[0], tokens[1]
if not otable or table == otable:
if len(tokens)==2 or tokens[2]=='eq':
query = db[table][field]==args[i]
elif tokens[2]=='ne':
query = db[table][field]!=args[i]
elif tokens[2]=='lt':
query = db[table][field]<args[i]
elif tokens[2]=='gt':
query = db[table][field]>args[i]
elif tokens[2]=='ge':
query = db[table][field]>=args[i]
elif tokens[2]=='le':
query = db[table][field]<=args[i]
elif tokens[2]=='year':
query = db[table][field].year()==args[i]
elif tokens[2]=='month':
query = db[table][field].month()==args[i]
elif tokens[2]=='day':
query = db[table][field].day()==args[i]
elif tokens[2]=='hour':
query = db[table][field].hour()==args[i]
elif tokens[2]=='minute':
query = db[table][field].minutes()==args[i]
elif tokens[2]=='second':
query = db[table][field].seconds()==args[i]
elif tokens[2]=='startswith':
query = db[table][field].startswith(args[i])
elif tokens[2]=='contains':
query = db[table][field].contains(args[i])
else:
raise RuntimeError("invalid pattern: %s" % pattern)
if len(tokens)==4 and tokens[3]=='not':
query = ~query
elif len(tokens)>=4:
raise RuntimeError("invalid pattern: %s" % pattern)
if not otable and isinstance(queries,dict):
dbset = db(queries[table])
if basequery is not None:
dbset = dbset(basequery)
dbset=dbset(query)
else:
raise RuntimeError("missing relation in pattern: %s" % pattern)
elif re2.match(tag) and args[i]==tag[:tag.find('[')]:
ref = tag[tag.find('[')+1:-1]
if '.' in ref and otable:
table,field = ref.split('.')
selfld = '_id'
if db[table][field].type.startswith('reference '):
refs = [ x.name for x in db[otable] if x.type == db[table][field].type ]
else:
refs = [ x.name for x in db[table]._referenced_by if x.tablename==otable ]
if refs:
selfld = refs[0]
if nested_select:
try:
dbset=db(db[table][field].belongs(dbset._select(db[otable][selfld])))
except ValueError:
return Row({'status':400,'pattern':pattern,
'error':'invalid path','response':None})
else:
items = [item.id for item in dbset.select(db[otable][selfld])]
dbset=db(db[table][field].belongs(items))
else:
table = ref
if not otable and isinstance(queries,dict):
dbset = db(queries[table])
dbset=dbset(db[table])
elif tag==':field' and table:
# print 're3:'+tag
field = args[i]
if not field in db[table]: break
# hand-built patterns should respect .readable=False as well
if not db[table][field].readable:
return Row({'status':418,'pattern':pattern,
'error':'I\'m a teapot','response':None})
try:
distinct = vars.get('distinct', False) == 'True'
offset = long(vars.get('offset',None) or 0)
limits = (offset,long(vars.get('limit',None) or 1000)+offset)
except ValueError:
return Row({'status':400,'error':'invalid limits','response':None})
items = dbset.select(db[table][field], distinct=distinct, limitby=limits)
if items:
return Row({'status':200,'response':items,
'pattern':pattern})
else:
return Row({'status':404,'pattern':pattern,
'error':'no record found','response':None})
elif tag != args[i]:
break
otable = table
i += 1
if i == len(tags) and table:
if hasattr(db[table], '_id'):
ofields = vars.get('order', db[table]._id.name).split('|')
else:
ofields = vars.get('order', db[table]._primarykey[0]).split('|')
try:
orderby = [db[table][f] if not f.startswith('~') else ~db[table][f[1:]] for f in ofields]
except (KeyError, AttributeError):
return Row({'status':400,'error':'invalid orderby','response':None})
if exposedfields:
fields = [field for field in db[table] if str(field).split('.')[-1] in exposedfields and field.readable]
else:
fields = [field for field in db[table] if field.readable]
count = dbset.count()
try:
offset = long(vars.get('offset',None) or 0)
limits = (offset,long(vars.get('limit',None) or 1000)+offset)
except ValueError:
return Row({'status':400,'error':'invalid limits','response':None})
#if count > limits[1]-limits[0]:
# return Row({'status':400,'error':'too many records','response':None})
try:
response = dbset.select(limitby=limits,orderby=orderby,*fields)
except ValueError:
return Row({'status':400,'pattern':pattern,
'error':'invalid path','response':None})
return Row({'status':200,'response':response,
'pattern':pattern,'count':count})
return Row({'status':400,'error':'no matching pattern','response':None})
def define_table(
self,
tablename,
*fields,
**args
):
if not fields and 'fields' in args:
fields = args.get('fields',())
if not isinstance(tablename, str):
if isinstance(tablename, unicode):
try:
tablename = str(tablename)
except UnicodeEncodeError:
raise SyntaxError("invalid unicode table name")
else:
raise SyntaxError("missing table name")
elif hasattr(self,tablename) or tablename in self.tables:
if not args.get('redefine',False):
raise SyntaxError('table already defined: %s' % tablename)
elif tablename.startswith('_') or hasattr(self,tablename) or \
REGEX_PYTHON_KEYWORDS.match(tablename):
raise SyntaxError('invalid table name: %s' % tablename)
elif self.check_reserved:
self.check_reserved_keyword(tablename)
else:
invalid_args = set(args)-TABLE_ARGS
if invalid_args:
raise SyntaxError('invalid table "%s" attributes: %s' \
% (tablename,invalid_args))
if self._lazy_tables and not tablename in self._LAZY_TABLES:
self._LAZY_TABLES[tablename] = (tablename,fields,args)
table = None
else:
table = self.lazy_define_table(tablename,*fields,**args)
if not tablename in self.tables:
self.tables.append(tablename)
return table
def lazy_define_table(
self,
tablename,
*fields,
**args
):
args_get = args.get
common_fields = self._common_fields
if common_fields:
fields = list(fields) + list(common_fields)
table_class = args_get('table_class',Table)
table = table_class(self, tablename, *fields, **args)
table._actual = True
self[tablename] = table
# must follow above line to handle self references
table._create_references()
for field in table:
if field.requires == DEFAULT:
field.requires = sqlhtml_validators(field)
migrate = self._migrate_enabled and args_get('migrate',self._migrate)
if migrate and not self._uri in (None,'None') \
or self._adapter.dbengine=='google:datastore':
fake_migrate = self._fake_migrate_all or \
args_get('fake_migrate',self._fake_migrate)
polymodel = args_get('polymodel',None)
try:
GLOBAL_LOCKER.acquire()
self._lastsql = self._adapter.create_table(
table,migrate=migrate,
fake_migrate=fake_migrate,
polymodel=polymodel)
finally:
GLOBAL_LOCKER.release()
else:
table._dbt = None
on_define = args_get('on_define',None)
if on_define: on_define(table)
return table
def as_dict(self, flat=False, sanitize=True):
db_uid = uri = None
if not sanitize:
uri, db_uid = (self._uri, self._db_uid)
db_as_dict = dict(tables=[], uri=uri, db_uid=db_uid,
**dict([(k, getattr(self, "_" + k, None))
for k in 'pool_size','folder','db_codec',
'check_reserved','migrate','fake_migrate',
'migrate_enabled','fake_migrate_all',
'decode_credentials','driver_args',
'adapter_args', 'attempts',
'bigint_id','debug','lazy_tables',
'do_connect']))
for table in self:
db_as_dict["tables"].append(table.as_dict(flat=flat,
sanitize=sanitize))
return db_as_dict
def as_xml(self, sanitize=True):
if not have_serializers:
raise ImportError("No xml serializers available")
d = self.as_dict(flat=True, sanitize=sanitize)
return serializers.xml(d)
def as_json(self, sanitize=True):
if not have_serializers:
raise ImportError("No json serializers available")
d = self.as_dict(flat=True, sanitize=sanitize)
return serializers.json(d)
def as_yaml(self, sanitize=True):
if not have_serializers:
raise ImportError("No YAML serializers available")
d = self.as_dict(flat=True, sanitize=sanitize)
return serializers.yaml(d)
def __contains__(self, tablename):
try:
return tablename in self.tables
except AttributeError:
# The instance has no .tables attribute yet
return False
has_key = __contains__
def get(self,key,default=None):
return self.__dict__.get(key,default)
def __iter__(self):
for tablename in self.tables:
yield self[tablename]
def __getitem__(self, key):
return self.__getattr__(str(key))
def __getattr__(self, key):
if ogetattr(self,'_lazy_tables') and \
key in ogetattr(self,'_LAZY_TABLES'):
tablename, fields, args = self._LAZY_TABLES.pop(key)
return self.lazy_define_table(tablename,*fields,**args)
return ogetattr(self, key)
def __setitem__(self, key, value):
osetattr(self, str(key), value)
def __setattr__(self, key, value):
if key[:1]!='_' and key in self:
raise SyntaxError(
'Object %s exists and cannot be redefined' % key)
osetattr(self,key,value)
__delitem__ = object.__delattr__
def __repr__(self):
if hasattr(self,'_uri'):
return '<DAL uri="%s">' % hide_password(self._adapter.uri)
else:
return '<DAL db_uid="%s">' % self._db_uid
def smart_query(self,fields,text):
return Set(self, smart_query(fields,text))
def __call__(self, query=None, ignore_common_filters=None):
if isinstance(query,Table):
query = self._adapter.id_query(query)
elif isinstance(query,Field):
query = query!=None
elif isinstance(query, dict):
icf = query.get("ignore_common_filters")
if icf: ignore_common_filters = icf
return Set(self, query, ignore_common_filters=ignore_common_filters)
def commit(self):
self._adapter.commit()
def rollback(self):
self._adapter.rollback()
def close(self):
self._adapter.close()
if self._db_uid in THREAD_LOCAL.db_instances:
db_group = THREAD_LOCAL.db_instances[self._db_uid]
db_group.remove(self)
if not db_group:
del THREAD_LOCAL.db_instances[self._db_uid]
def executesql(self, query, placeholders=None, as_dict=False,
fields=None, colnames=None, as_ordered_dict=False):
"""
placeholders is optional and will always be None.
If using raw SQL with placeholders, placeholders may be
a sequence of values to be substituted in
or, (if supported by the DB driver), a dictionary with keys
matching named placeholders in your SQL.
Added 2009-12-05 "as_dict" optional argument. Will always be
None when using DAL. If using raw SQL can be set to True and
the results cursor returned by the DB driver will be converted
to a sequence of dictionaries keyed with the db field
names. Tested with SQLite but should work with any database
since the cursor.description used to get field names is part
of the Python dbi 2.0 specs. Results returned with
as_dict=True are the same as those returned when applying
.to_list() to a DAL query. If "as_ordered_dict"=True the
behaviour is the same as when "as_dict"=True with the keys
(field names) guaranteed to be in the same order as returned
by the select name executed on the database.
[{field1: value1, field2: value2}, {field1: value1b, field2: value2b}]
Added 2012-08-24 "fields" and "colnames" optional arguments. If either
is provided, the results cursor returned by the DB driver will be
converted to a DAL Rows object using the db._adapter.parse() method.
The "fields" argument is a list of DAL Field objects that match the
fields returned from the DB. The Field objects should be part of one or
more Table objects defined on the DAL object. The "fields" list can
include one or more DAL Table objects in addition to or instead of
including Field objects, or it can be just a single table (not in a
list). In that case, the Field objects will be extracted from the
table(s).
Instead of specifying the "fields" argument, the "colnames" argument
can be specified as a list of field names in tablename.fieldname format.
Again, these should represent tables and fields defined on the DAL
object.
It is also possible to specify both "fields" and the associated
"colnames". In that case, "fields" can also include DAL Expression
objects in addition to Field objects. For Field objects in "fields",
the associated "colnames" must still be in tablename.fieldname format.
For Expression objects in "fields", the associated "colnames" can
be any arbitrary labels.
Note, the DAL Table objects referred to by "fields" or "colnames" can
be dummy tables and do not have to represent any real tables in the
database. Also, note that the "fields" and "colnames" must be in the
same order as the fields in the results cursor returned from the DB.
"""
adapter = self._adapter
if placeholders:
adapter.execute(query, placeholders)
else:
adapter.execute(query)
if as_dict or as_ordered_dict:
if not hasattr(adapter.cursor,'description'):
raise RuntimeError("database does not support executesql(...,as_dict=True)")
# Non-DAL legacy db query, converts cursor results to dict.
# sequence of 7-item sequences. each sequence tells about a column.
# first item is always the field name according to Python Database API specs
columns = adapter.cursor.description
# reduce the column info down to just the field names
fields = colnames or [f[0] for f in columns]
if len(fields) != len(set(fields)):
raise RuntimeError("Result set includes duplicate column names. Specify unique column names using the 'colnames' argument")
# will hold our finished resultset in a list
data = adapter._fetchall()
# convert the list for each row into a dictionary so it's
# easier to work with. row['field_name'] rather than row[0]
if as_ordered_dict:
_dict = OrderedDict
else:
_dict = dict
return [_dict(zip(fields,row)) for row in data]
try:
data = adapter._fetchall()
except:
return None
if fields or colnames:
fields = [] if fields is None else fields
if not isinstance(fields, list):
fields = [fields]
extracted_fields = []
for field in fields:
if isinstance(field, Table):
extracted_fields.extend([f for f in field])
else:
extracted_fields.append(field)
if not colnames:
colnames = ['%s.%s' % (f.tablename, f.name)
for f in extracted_fields]
data = adapter.parse(
data, fields=extracted_fields, colnames=colnames)
return data
def _remove_references_to(self, thistable):
for table in self:
table._referenced_by = [field for field in table._referenced_by
if not field.table==thistable]
def export_to_csv_file(self, ofile, *args, **kwargs):
step = long(kwargs.get('max_fetch_rows,',500))
write_colnames = kwargs['write_colnames'] = \
kwargs.get("write_colnames", True)
for table in self.tables:
ofile.write('TABLE %s\r\n' % table)
query = self._adapter.id_query(self[table])
nrows = self(query).count()
kwargs['write_colnames'] = write_colnames
for k in range(0,nrows,step):
self(query).select(limitby=(k,k+step)).export_to_csv_file(
ofile, *args, **kwargs)
kwargs['write_colnames'] = False
ofile.write('\r\n\r\n')
ofile.write('END')
def import_from_csv_file(self, ifile, id_map=None, null='<NULL>',
unique='uuid', map_tablenames=None,
ignore_missing_tables=False,
*args, **kwargs):
#if id_map is None: id_map={}
id_offset = {} # only used if id_map is None
map_tablenames = map_tablenames or {}
for line in ifile:
line = line.strip()
if not line:
continue
elif line == 'END':
return
elif not line.startswith('TABLE ') or \
not line[6:] in self.tables:
raise SyntaxError('invalid file format')
else:
tablename = line[6:]
tablename = map_tablenames.get(tablename,tablename)
if tablename is not None and tablename in self.tables:
self[tablename].import_from_csv_file(
ifile, id_map, null, unique, id_offset,
*args, **kwargs)
elif tablename is None or ignore_missing_tables:
# skip all non-empty lines
for line in ifile:
if not line.strip():
break
else:
raise RuntimeError("Unable to import table that does not exist.\nTry db.import_from_csv_file(..., map_tablenames={'table':'othertable'},ignore_missing_tables=True)")
def DAL_unpickler(db_uid):
return DAL('<zombie>',db_uid=db_uid)
def DAL_pickler(db):
return DAL_unpickler, (db._db_uid,)
copyreg.pickle(DAL, DAL_pickler, DAL_unpickler)
class SQLALL(object):
"""
Helper class providing a comma-separated string having all the field names
(prefixed by table name and '.')
normally only called from within gluon.sql
"""
def __init__(self, table):
self._table = table
def __str__(self):
return ', '.join([str(field) for field in self._table])
# class Reference(int):
class Reference(long):
def __allocate(self):
if not self._record:
self._record = self._table[long(self)]
if not self._record:
raise RuntimeError(
"Using a recursive select but encountered a broken reference: %s %d"%(self._table, long(self)))
def __getattr__(self, key):
if key == 'id':
return long(self)
if key in self._table:
self.__allocate()
if self._record:
return self._record.get(key,None) # to deal with case self.update_record()
else:
return None
def get(self, key, default=None):
return self.__getattr__(key, default)
def __setattr__(self, key, value):
if key.startswith('_'):
long.__setattr__(self, key, value)
return
self.__allocate()
self._record[key] = value
def __getitem__(self, key):
if key == 'id':
return long(self)
self.__allocate()
return self._record.get(key, None)
def __setitem__(self,key,value):
self.__allocate()
self._record[key] = value
def Reference_unpickler(data):
return marshal.loads(data)
def Reference_pickler(data):
try:
marshal_dump = marshal.dumps(long(data))
except AttributeError:
marshal_dump = 'i%s' % struct.pack('<i', long(data))
return (Reference_unpickler, (marshal_dump,))
copyreg.pickle(Reference, Reference_pickler, Reference_unpickler)
class MethodAdder(object):
def __init__(self,table):
self.table = table
def __call__(self):
return self.register()
def __getattr__(self,method_name):
return self.register(method_name)
def register(self,method_name=None):
def _decorated(f):
instance = self.table
import types
method = types.MethodType(f, instance, instance.__class__)
name = method_name or f.func_name
setattr(instance, name, method)
return f
return _decorated
class Table(object):
"""
an instance of this class represents a database table
Example::
db = DAL(...)
db.define_table('users', Field('name'))
db.users.insert(name='me') # print db.users._insert(...) to see SQL
db.users.drop()
"""
def __init__(
self,
db,
tablename,
*fields,
**args):
"""
Initializes the table and performs checking on the provided fields.
Each table will have automatically an 'id'.
If a field is of type Table, the fields (excluding 'id') from that table
will be used instead.
:raises SyntaxError: when a supplied field is of incorrect type.
"""
self._actual = False # set to True by define_table()
self._tablename = tablename
if (not isinstance(tablename, str) or tablename[0] == '_'
or hasattr(DAL, tablename) or '.' in tablename
or REGEX_PYTHON_KEYWORDS.match(tablename)
):
raise SyntaxError('Field: invalid table name: %s, '
'use rname for "funny" names' % tablename)
self._ot = None
self._rname = args.get('rname')
self._sequence_name = (args.get('sequence_name') or
db and db._adapter.sequence_name(self._rname
or tablename))
self._trigger_name = (args.get('trigger_name') or
db and db._adapter.trigger_name(tablename))
self._common_filter = args.get('common_filter')
self._format = args.get('format')
self._singular = args.get(
'singular', tablename.replace('_', ' ').capitalize())
self._plural = args.get(
'plural', pluralize(self._singular.lower()).capitalize())
# horrible but for backard compatibility of appamdin:
if 'primarykey' in args and args['primarykey'] is not None:
self._primarykey = args.get('primarykey')
self._before_insert = []
self._before_update = [Set.delete_uploaded_files]
self._before_delete = [Set.delete_uploaded_files]
self._after_insert = []
self._after_update = []
self._after_delete = []
self.add_method = MethodAdder(self)
fieldnames, newfields=set(), []
_primarykey = getattr(self, '_primarykey', None)
if _primarykey is not None:
if not isinstance(_primarykey, list):
raise SyntaxError(
"primarykey must be a list of fields from table '%s'"
% tablename)
if len(_primarykey) == 1:
self._id = [f for f in fields if isinstance(f, Field)
and f.name ==_primarykey[0]][0]
elif not [f for f in fields if (isinstance(f, Field) and
f.type == 'id') or (isinstance(f, dict) and
f.get("type", None) == "id")]:
field = Field('id', 'id')
newfields.append(field)
fieldnames.add('id')
self._id = field
virtual_fields = []
def include_new(field):
newfields.append(field)
fieldnames.add(field.name)
if field.type == 'id':
self._id = field
for field in fields:
if isinstance(field, (FieldMethod, FieldVirtual)):
virtual_fields.append(field)
elif isinstance(field, Field) and not field.name in fieldnames:
if field.db is not None:
field = copy.copy(field)
include_new(field)
elif isinstance(field, dict) and not field['fieldname'] in fieldnames:
include_new(Field(**field))
elif isinstance(field, Table):
table = field
for field in table:
if not field.name in fieldnames and not field.type == 'id':
t2 = not table._actual and self._tablename
include_new(field.clone(point_self_references_to=t2))
elif not isinstance(field, (Field, Table)):
raise SyntaxError(
'define_table argument is not a Field or Table: %s' % field)
fields = newfields
self._db = db
tablename = tablename
self._fields = SQLCallableList()
self.virtualfields = []
fields = list(fields)
if db and db._adapter.uploads_in_blob is True:
uploadfields = [f.name for f in fields if f.type == 'blob']
for field in fields:
fn = field.uploadfield
if isinstance(field, Field) and field.type == 'upload'\
and fn is True:
fn = field.uploadfield = '%s_blob' % field.name
if isinstance(fn, str) and not fn in uploadfields:
fields.append(Field(fn, 'blob', default='',
writable=False, readable=False))
fieldnames_set = set()
reserved = dir(Table) + ['fields']
if (db and db.check_reserved):
check_reserved = db.check_reserved_keyword
else:
def check_reserved(field_name):
if field_name in reserved:
raise SyntaxError("field name %s not allowed" % field_name)
for field in fields:
field_name = field.name
check_reserved(field_name)
if db and db._ignore_field_case:
fname_item = field_name.lower()
else:
fname_item = field_name
if fname_item in fieldnames_set:
raise SyntaxError("duplicate field %s in table %s" %
(field_name, tablename))
else:
fieldnames_set.add(fname_item)
self.fields.append(field_name)
self[field_name] = field
if field.type == 'id':
self['id'] = field
field.tablename = field._tablename = tablename
field.table = field._table = self
field.db = field._db = db
self.ALL = SQLALL(self)
if _primarykey is not None:
for k in _primarykey:
if k not in self.fields:
raise SyntaxError(
"primarykey must be a list of fields from table '%s " %
tablename)
else:
self[k].notnull = True
for field in virtual_fields:
self[field.name] = field
@property
def fields(self):
return self._fields
def update(self, *args, **kwargs):
raise RuntimeError("Syntax Not Supported")
def _enable_record_versioning(self,
archive_db=None,
archive_name='%(tablename)s_archive',
is_active='is_active',
current_record='current_record',
current_record_label=None):
db = self._db
archive_db = archive_db or db
archive_name = archive_name % dict(tablename=self._tablename)
if archive_name in archive_db.tables():
return # do not try define the archive if already exists
fieldnames = self.fields()
same_db = archive_db is db
field_type = self if same_db else 'bigint'
clones = []
for field in self:
nfk = same_db or not field.type.startswith('reference')
clones.append(
field.clone(unique=False, type=field.type if nfk else 'bigint')
)
archive_db.define_table(
archive_name,
Field(current_record, field_type, label=current_record_label),
*clones, **dict(format=self._format))
self._before_update.append(
lambda qset, fs, db=archive_db, an=archive_name, cn=current_record:
archive_record(qset, fs, db[an], cn))
if is_active and is_active in fieldnames:
self._before_delete.append(
lambda qset: qset.update(is_active=False))
newquery = lambda query, t=self, name=self._tablename: \
reduce(AND, [db[tn].is_active == True
for tn in db._adapter.tables(query)
if tn == name or getattr(db[tn],'_ot',None)==name])
query = self._common_filter
if query:
newquery = query & newquery
self._common_filter = newquery
def _validate(self, **vars):
errors = Row()
for key, value in vars.iteritems():
value, error = self[key].validate(value)
if error:
errors[key] = error
return errors
def _create_references(self):
db = self._db
pr = db._pending_references
self._referenced_by = []
self._references = []
for field in self:
#fieldname = field.name ##FIXME not used ?
field_type = field.type
if isinstance(field_type, str) and field_type[:10] == 'reference ':
ref = field_type[10:].strip()
if not ref:
SyntaxError('Table: reference to nothing: %s' % ref)
if '.' in ref:
rtablename, throw_it, rfieldname = ref.partition('.')
else:
rtablename, rfieldname = ref, None
if not rtablename in db:
pr[rtablename] = pr.get(rtablename, []) + [field]
continue
rtable = db[rtablename]
if rfieldname:
if not hasattr(rtable, '_primarykey'):
raise SyntaxError(
'keyed tables can only reference other keyed tables (for now)')
if rfieldname not in rtable.fields:
raise SyntaxError(
"invalid field '%s' for referenced table '%s'"
" in table '%s'" % (rfieldname, rtablename, self._tablename)
)
rfield = rtable[rfieldname]
else:
rfield = rtable._id
rtable._referenced_by.append(field)
field.referent = rfield
self._references.append(field)
else:
field.referent = None
if self._tablename in pr:
referees = pr.pop(self._tablename)
for referee in referees:
self._referenced_by.append(referee)
def _filter_fields(self, record, id=False):
return dict([(k, v) for (k, v) in record.iteritems() if k
in self.fields and (self[k].type!='id' or id)])
def _build_query(self,key):
""" for keyed table only """
query = None
for k,v in key.iteritems():
if k in self._primarykey:
if query:
query = query & (self[k] == v)
else:
query = (self[k] == v)
else:
raise SyntaxError(
'Field %s is not part of the primary key of %s' %
(k,self._tablename)
)
return query
def __getitem__(self, key):
if not key:
return None
elif isinstance(key, dict):
""" for keyed table """
query = self._build_query(key)
return self._db(query).select(limitby=(0, 1), orderby_on_limitby=False).first()
elif str(key).isdigit() or 'google' in DRIVERS and isinstance(key, Key):
return self._db(self._id == key).select(limitby=(0, 1), orderby_on_limitby=False).first()
elif key:
return ogetattr(self, str(key))
def __call__(self, key=DEFAULT, **kwargs):
for_update = kwargs.get('_for_update', False)
if '_for_update' in kwargs:
del kwargs['_for_update']
orderby = kwargs.get('_orderby', None)
if '_orderby' in kwargs:
del kwargs['_orderby']
if not key is DEFAULT:
if isinstance(key, Query):
record = self._db(key).select(
limitby=(0,1),for_update=for_update, orderby=orderby, orderby_on_limitby=False).first()
elif not str(key).isdigit():
record = None
else:
record = self._db(self._id == key).select(
limitby=(0,1),for_update=for_update, orderby=orderby, orderby_on_limitby=False).first()
if record:
for k,v in kwargs.iteritems():
if record[k]!=v: return None
return record
elif kwargs:
query = reduce(lambda a,b:a&b,[self[k]==v for k,v in kwargs.iteritems()])
return self._db(query).select(limitby=(0,1),for_update=for_update, orderby=orderby, orderby_on_limitby=False).first()
else:
return None
def __setitem__(self, key, value):
if isinstance(key, dict) and isinstance(value, dict):
""" option for keyed table """
if set(key.keys()) == set(self._primarykey):
value = self._filter_fields(value)
kv = {}
kv.update(value)
kv.update(key)
if not self.insert(**kv):
query = self._build_query(key)
self._db(query).update(**self._filter_fields(value))
else:
raise SyntaxError(
'key must have all fields from primary key: %s'%
(self._primarykey))
elif str(key).isdigit():
if key == 0:
self.insert(**self._filter_fields(value))
elif self._db(self._id == key)\
.update(**self._filter_fields(value)) is None:
raise SyntaxError('No such record: %s' % key)
else:
if isinstance(key, dict):
raise SyntaxError(
'value must be a dictionary: %s' % value)
osetattr(self, str(key), value)
__getattr__ = __getitem__
def __setattr__(self, key, value):
if key[:1]!='_' and key in self:
raise SyntaxError('Object exists and cannot be redefined: %s' % key)
osetattr(self,key,value)
def __delitem__(self, key):
if isinstance(key, dict):
query = self._build_query(key)
if not self._db(query).delete():
raise SyntaxError('No such record: %s' % key)
elif not str(key).isdigit() or \
not self._db(self._id == key).delete():
raise SyntaxError('No such record: %s' % key)
def __contains__(self,key):
return hasattr(self, key)
has_key = __contains__
def items(self):
return self.__dict__.items()
def __iter__(self):
for fieldname in self.fields:
yield self[fieldname]
def iteritems(self):
return self.__dict__.iteritems()
def __repr__(self):
return '<Table %s (%s)>' % (self._tablename, ','.join(self.fields()))
def __str__(self):
if self._ot is not None:
ot = self._ot
if 'Oracle' in str(type(self._db._adapter)):
return '%s %s' % (ot, self._tablename)
return '%s AS %s' % (ot, self._tablename)
return self._tablename
@property
def sqlsafe(self):
rname = self._rname
if rname: return rname
return self._db._adapter.sqlsafe_table(self._tablename)
@property
def sqlsafe_alias(self):
rname = self._rname
ot = self._ot
if rname and not ot: return rname
return self._db._adapter.sqlsafe_table(self._tablename, self._ot)
def _drop(self, mode=''):
return self._db._adapter._drop(self, mode)
def drop(self, mode=''):
return self._db._adapter.drop(self,mode)
def _listify(self,fields,update=False):
new_fields = {} # format: new_fields[name] = (field,value)
# store all fields passed as input in new_fields
for name in fields:
if not name in self.fields:
if name != 'id':
raise SyntaxError(
'Field %s does not belong to the table' % name)
else:
field = self[name]
value = fields[name]
if field.filter_in:
value = field.filter_in(value)
new_fields[name] = (field, value)
# check all fields that should be in the table but are not passed
to_compute = []
for ofield in self:
name = ofield.name
if not name in new_fields:
# if field is supposed to be computed, compute it!
if ofield.compute: # save those to compute for later
to_compute.append((name, ofield))
# if field is required, check its default value
elif not update and not ofield.default is None:
value = ofield.default
fields[name] = value
new_fields[name] = (ofield, value)
# if this is an update, user the update field instead
elif update and not ofield.update is None:
value = ofield.update
fields[name] = value
new_fields[name] = (ofield, value)
# if the field is still not there but it should, error
elif not update and ofield.required:
raise RuntimeError(
'Table: missing required field: %s' % name)
# now deal with fields that are supposed to be computed
if to_compute:
row = Row(fields)
for name, ofield in to_compute:
# try compute it
try:
row[name] = new_value = ofield.compute(row)
new_fields[name] = (ofield, new_value)
except (KeyError, AttributeError):
# error silently unless field is required!
if ofield.required:
raise SyntaxError('unable to compute field: %s' % name)
return new_fields.values()
def _attempt_upload(self, fields):
for field in self:
if field.type == 'upload' and field.name in fields:
value = fields[field.name]
if value is not None and not isinstance(value, str):
if hasattr(value, 'file') and hasattr(value, 'filename'):
new_name = field.store(value.file, filename=value.filename)
elif hasattr(value, 'read') and hasattr(value, 'name'):
new_name = field.store(value, filename=value.name)
else:
raise RuntimeError("Unable to handle upload")
fields[field.name] = new_name
def _defaults(self, fields):
"If there are no fields/values specified, return table defaults"
if not fields:
fields = {}
for field in self:
if field.type != "id":
fields[field.name] = field.default
return fields
def _insert(self, **fields):
fields = self._defaults(fields)
return self._db._adapter._insert(self, self._listify(fields))
def insert(self, **fields):
fields = self._defaults(fields)
self._attempt_upload(fields)
if any(f(fields) for f in self._before_insert): return 0
ret = self._db._adapter.insert(self, self._listify(fields))
if ret and self._after_insert:
fields = Row(fields)
[f(fields,ret) for f in self._after_insert]
return ret
def validate_and_insert(self, **fields):
response = Row()
response.errors = Row()
new_fields = copy.copy(fields)
for key,value in fields.iteritems():
value,error = self[key].validate(value)
if error:
response.errors[key] = "%s" % error
else:
new_fields[key] = value
if not response.errors:
response.id = self.insert(**new_fields)
else:
response.id = None
return response
def validate_and_update(self, _key=DEFAULT, **fields):
response = Row()
response.errors = Row()
new_fields = copy.copy(fields)
for key, value in fields.iteritems():
value, error = self[key].validate(value)
if error:
response.errors[key] = "%s" % error
else:
new_fields[key] = value
if _key is DEFAULT:
record = self(**fields)
elif isinstance(_key, dict):
record = self(**_key)
else:
record = self(_key)
if not response.errors and record:
if '_id' in self:
myset = self._db(self._id == record[self._id.name])
else:
query = None
for key, value in _key.iteritems():
if query is None:
query = getattr(self, key) == value
else:
query = query & (getattr(self, key) == value)
myset = self._db(query)
response.id = myset.update(**fields)
else:
response.id = None
return response
def update_or_insert(self, _key=DEFAULT, **values):
if _key is DEFAULT:
record = self(**values)
elif isinstance(_key, dict):
record = self(**_key)
else:
record = self(_key)
if record:
record.update_record(**values)
newid = None
else:
newid = self.insert(**values)
return newid
def validate_and_update_or_insert(self, _key=DEFAULT, **fields):
if _key is DEFAULT or _key == '':
primary_keys = {}
for key, value in fields.iteritems():
if key in self._primarykey:
primary_keys[key] = value
if primary_keys != {}:
record = self(**primary_keys)
_key = primary_keys
else:
required_keys = {}
for key, value in fields.iteritems():
if getattr(self, key).required:
required_keys[key] = value
record = self(**required_keys)
_key = required_keys
elif isinstance(_key, dict):
record = self(**_key)
else:
record = self(_key)
if record:
response = self.validate_and_update(_key, **fields)
primary_keys = {}
for key in self._primarykey:
primary_keys[key] = getattr(record, key)
response.id = primary_keys
else:
response = self.validate_and_insert(**fields)
return response
def bulk_insert(self, items):
"""
here items is a list of dictionaries
"""
items = [self._listify(item) for item in items]
if any(f(item) for item in items for f in self._before_insert):return 0
ret = self._db._adapter.bulk_insert(self,items)
ret and [[f(item,ret[k]) for k,item in enumerate(items)] for f in self._after_insert]
return ret
def _truncate(self, mode=None):
return self._db._adapter._truncate(self, mode)
def truncate(self, mode=None):
return self._db._adapter.truncate(self, mode)
def import_from_csv_file(
self,
csvfile,
id_map=None,
null='<NULL>',
unique='uuid',
id_offset=None, # id_offset used only when id_map is None
*args, **kwargs
):
"""
Import records from csv file.
Column headers must have same names as table fields.
Field 'id' is ignored.
If column names read 'table.file' the 'table.' prefix is ignored.
'unique' argument is a field which must be unique
(typically a uuid field)
'restore' argument is default False;
if set True will remove old values in table first.
'id_map' if set to None will not map ids.
The import will keep the id numbers in the restored table.
This assumes that there is an field of type id that
is integer and in incrementing order.
Will keep the id numbers in restored table.
"""
delimiter = kwargs.get('delimiter', ',')
quotechar = kwargs.get('quotechar', '"')
quoting = kwargs.get('quoting', csv.QUOTE_MINIMAL)
restore = kwargs.get('restore', False)
if restore:
self._db[self].truncate()
reader = csv.reader(csvfile, delimiter=delimiter,
quotechar=quotechar, quoting=quoting)
colnames = None
if isinstance(id_map, dict):
if not self._tablename in id_map:
id_map[self._tablename] = {}
id_map_self = id_map[self._tablename]
def fix(field, value, id_map, id_offset):
list_reference_s='list:reference'
if value == null:
value = None
elif field.type=='blob':
value = base64.b64decode(value)
elif field.type=='double' or field.type=='float':
if not value.strip():
value = None
else:
value = float(value)
elif field.type in ('integer','bigint'):
if not value.strip():
value = None
else:
value = long(value)
elif field.type.startswith('list:string'):
value = bar_decode_string(value)
elif field.type.startswith(list_reference_s):
ref_table = field.type[len(list_reference_s):].strip()
if id_map is not None:
value = [id_map[ref_table][long(v)] \
for v in bar_decode_string(value)]
else:
value = [v for v in bar_decode_string(value)]
elif field.type.startswith('list:'):
value = bar_decode_integer(value)
elif id_map and field.type.startswith('reference'):
try:
value = id_map[field.type[9:].strip()][long(value)]
except KeyError:
pass
elif id_offset and field.type.startswith('reference'):
try:
value = id_offset[field.type[9:].strip()]+long(value)
except KeyError:
pass
return (field.name, value)
def is_id(colname):
if colname in self:
return self[colname].type == 'id'
else:
return False
first = True
unique_idx = None
for lineno, line in enumerate(reader):
if not line:
break
if not colnames:
# assume this is the first line of the input, contains colnames
colnames = [x.split('.',1)[-1] for x in line][:len(line)]
cols, cid = [], None
for i,colname in enumerate(colnames):
if is_id(colname):
cid = i
elif colname in self.fields:
cols.append((i,self[colname]))
if colname == unique:
unique_idx = i
else:
# every other line contains instead data
items = []
for i, field in cols:
try:
items.append(fix(field, line[i], id_map, id_offset))
except ValueError:
raise RuntimeError("Unable to parse line:%s field:%s value:'%s'"
% (lineno+1,field,line[i]))
if not (id_map or cid is None or id_offset is None or unique_idx):
csv_id = long(line[cid])
curr_id = self.insert(**dict(items))
if first:
first = False
# First curr_id is bigger than csv_id,
# then we are not restoring but
# extending db table with csv db table
id_offset[self._tablename] = (curr_id-csv_id) \
if curr_id>csv_id else 0
# create new id until we get the same as old_id+offset
while curr_id<csv_id+id_offset[self._tablename]:
self._db(self._db[self][colnames[cid]] == curr_id).delete()
curr_id = self.insert(**dict(items))
# Validation. Check for duplicate of 'unique' &,
# if present, update instead of insert.
elif not unique_idx:
new_id = self.insert(**dict(items))
else:
unique_value = line[unique_idx]
query = self._db[self][unique] == unique_value
record = self._db(query).select().first()
if record:
record.update_record(**dict(items))
new_id = record[self._id.name]
else:
new_id = self.insert(**dict(items))
if id_map and cid is not None:
id_map_self[long(line[cid])] = new_id
def as_dict(self, flat=False, sanitize=True):
table_as_dict = dict(
tablename=str(self),
fields=[],
sequence_name=self._sequence_name,
trigger_name=self._trigger_name,
common_filter=self._common_filter,
format=self._format,
singular=self._singular,
plural=self._plural)
for field in self:
if (field.readable or field.writable) or (not sanitize):
table_as_dict["fields"].append(field.as_dict(
flat=flat, sanitize=sanitize))
return table_as_dict
def as_xml(self, sanitize=True):
if not have_serializers:
raise ImportError("No xml serializers available")
d = self.as_dict(flat=True, sanitize=sanitize)
return serializers.xml(d)
def as_json(self, sanitize=True):
if not have_serializers:
raise ImportError("No json serializers available")
d = self.as_dict(flat=True, sanitize=sanitize)
return serializers.json(d)
def as_yaml(self, sanitize=True):
if not have_serializers:
raise ImportError("No YAML serializers available")
d = self.as_dict(flat=True, sanitize=sanitize)
return serializers.yaml(d)
def with_alias(self, alias):
return self._db._adapter.alias(self, alias)
def on(self, query):
return Expression(self._db, self._db._adapter.ON, self, query)
def archive_record(qset, fs, archive_table, current_record):
tablenames = qset.db._adapter.tables(qset.query)
if len(tablenames) != 1:
raise RuntimeError("cannot update join")
for row in qset.select():
fields = archive_table._filter_fields(row)
fields[current_record] = row.id
archive_table.insert(**fields)
return False
class Expression(object):
def __init__(
self,
db,
op,
first=None,
second=None,
type=None,
**optional_args
):
self.db = db
self.op = op
self.first = first
self.second = second
self._table = getattr(first,'_table',None)
### self._tablename = first._tablename ## CHECK
if not type and first and hasattr(first,'type'):
self.type = first.type
else:
self.type = type
self.optional_args = optional_args
def sum(self):
db = self.db
return Expression(db, db._adapter.AGGREGATE, self, 'SUM', self.type)
def max(self):
db = self.db
return Expression(db, db._adapter.AGGREGATE, self, 'MAX', self.type)
def min(self):
db = self.db
return Expression(db, db._adapter.AGGREGATE, self, 'MIN', self.type)
def len(self):
db = self.db
return Expression(db, db._adapter.LENGTH, self, None, 'integer')
def avg(self):
db = self.db
return Expression(db, db._adapter.AGGREGATE, self, 'AVG', self.type)
def abs(self):
db = self.db
return Expression(db, db._adapter.AGGREGATE, self, 'ABS', self.type)
def lower(self):
db = self.db
return Expression(db, db._adapter.LOWER, self, None, self.type)
def upper(self):
db = self.db
return Expression(db, db._adapter.UPPER, self, None, self.type)
def replace(self, a, b):
db = self.db
return Expression(db, db._adapter.REPLACE, self, (a, b), self.type)
def year(self):
db = self.db
return Expression(db, db._adapter.EXTRACT, self, 'year', 'integer')
def month(self):
db = self.db
return Expression(db, db._adapter.EXTRACT, self, 'month', 'integer')
def day(self):
db = self.db
return Expression(db, db._adapter.EXTRACT, self, 'day', 'integer')
def hour(self):
db = self.db
return Expression(db, db._adapter.EXTRACT, self, 'hour', 'integer')
def minutes(self):
db = self.db
return Expression(db, db._adapter.EXTRACT, self, 'minute', 'integer')
def coalesce(self, *others):
db = self.db
return Expression(db, db._adapter.COALESCE, self, others, self.type)
def coalesce_zero(self):
db = self.db
return Expression(db, db._adapter.COALESCE_ZERO, self, None, self.type)
def seconds(self):
db = self.db
return Expression(db, db._adapter.EXTRACT, self, 'second', 'integer')
def epoch(self):
db = self.db
return Expression(db, db._adapter.EPOCH, self, None, 'integer')
def __getslice__(self, start, stop):
db = self.db
if start < 0:
pos0 = '(%s - %d)' % (self.len(), abs(start) - 1)
else:
pos0 = start + 1
if stop < 0:
length = '(%s - %d - %s)' % (self.len(), abs(stop) - 1, pos0)
elif stop == sys.maxint:
length = self.len()
else:
length = '(%s - %s)' % (stop + 1, pos0)
return Expression(db, db._adapter.SUBSTRING,
self, (pos0, length), self.type)
def __getitem__(self, i):
return self[i:i + 1]
def __str__(self):
return self.db._adapter.expand(self, self.type)
def __or__(self, other): # for use in sortby
db = self.db
return Expression(db, db._adapter.COMMA, self, other, self.type)
def __invert__(self):
db = self.db
if hasattr(self,'_op') and self.op == db._adapter.INVERT:
return self.first
return Expression(db, db._adapter.INVERT, self, type=self.type)
def __add__(self, other):
db = self.db
return Expression(db, db._adapter.ADD, self, other, self.type)
def __sub__(self, other):
db = self.db
if self.type in ('integer', 'bigint'):
result_type = 'integer'
elif self.type in ['date','time','datetime','double','float']:
result_type = 'double'
elif self.type.startswith('decimal('):
result_type = self.type
else:
raise SyntaxError("subtraction operation not supported for type")
return Expression(db,db._adapter.SUB,self,other,result_type)
def __mul__(self, other):
db = self.db
return Expression(db,db._adapter.MUL,self,other,self.type)
def __div__(self, other):
db = self.db
return Expression(db,db._adapter.DIV,self,other,self.type)
def __mod__(self, other):
db = self.db
return Expression(db,db._adapter.MOD,self,other,self.type)
def __eq__(self, value):
db = self.db
return Query(db, db._adapter.EQ, self, value)
def __ne__(self, value):
db = self.db
return Query(db, db._adapter.NE, self, value)
def __lt__(self, value):
db = self.db
return Query(db, db._adapter.LT, self, value)
def __le__(self, value):
db = self.db
return Query(db, db._adapter.LE, self, value)
def __gt__(self, value):
db = self.db
return Query(db, db._adapter.GT, self, value)
def __ge__(self, value):
db = self.db
return Query(db, db._adapter.GE, self, value)
def like(self, value, case_sensitive=False):
db = self.db
op = case_sensitive and db._adapter.LIKE or db._adapter.ILIKE
return Query(db, op, self, value)
def regexp(self, value):
db = self.db
return Query(db, db._adapter.REGEXP, self, value)
def belongs(self, *value, **kwattr):
"""
Accepts the following inputs:
field.belongs(1,2)
field.belongs((1,2))
field.belongs(query)
Does NOT accept:
field.belongs(1)
"""
db = self.db
if len(value) == 1:
value = value[0]
if isinstance(value,Query):
value = db(value)._select(value.first._table._id)
elif not isinstance(value, basestring):
value = set(value)
if kwattr.get('null') and None in value:
value.remove(None)
return (self == None) | Query(db, db._adapter.BELONGS, self, value)
return Query(db, db._adapter.BELONGS, self, value)
def startswith(self, value):
db = self.db
if not self.type in ('string', 'text', 'json', 'upload'):
raise SyntaxError("startswith used with incompatible field type")
return Query(db, db._adapter.STARTSWITH, self, value)
def endswith(self, value):
db = self.db
if not self.type in ('string', 'text', 'json', 'upload'):
raise SyntaxError("endswith used with incompatible field type")
return Query(db, db._adapter.ENDSWITH, self, value)
def contains(self, value, all=False, case_sensitive=False):
"""
The case_sensitive parameters is only useful for PostgreSQL
For other RDMBs it is ignored and contains is always case in-sensitive
For MongoDB and GAE contains is always case sensitive
"""
db = self.db
if isinstance(value,(list, tuple)):
subqueries = [self.contains(str(v).strip(),case_sensitive=case_sensitive)
for v in value if str(v).strip()]
if not subqueries:
return self.contains('')
else:
return reduce(all and AND or OR,subqueries)
if not self.type in ('string', 'text', 'json', 'upload') and not self.type.startswith('list:'):
raise SyntaxError("contains used with incompatible field type")
return Query(db, db._adapter.CONTAINS, self, value, case_sensitive=case_sensitive)
def with_alias(self, alias):
db = self.db
return Expression(db, db._adapter.AS, self, alias, self.type)
# GIS expressions
def st_asgeojson(self, precision=15, options=0, version=1):
return Expression(self.db, self.db._adapter.ST_ASGEOJSON, self,
dict(precision=precision, options=options,
version=version), 'string')
def st_astext(self):
db = self.db
return Expression(db, db._adapter.ST_ASTEXT, self, type='string')
def st_x(self):
db = self.db
return Expression(db, db._adapter.ST_X, self, type='string')
def st_y(self):
db = self.db
return Expression(db, db._adapter.ST_Y, self, type='string')
def st_distance(self, other):
db = self.db
return Expression(db,db._adapter.ST_DISTANCE,self,other, 'double')
def st_simplify(self, value):
db = self.db
return Expression(db, db._adapter.ST_SIMPLIFY, self, value, self.type)
# GIS queries
def st_contains(self, value):
db = self.db
return Query(db, db._adapter.ST_CONTAINS, self, value)
def st_equals(self, value):
db = self.db
return Query(db, db._adapter.ST_EQUALS, self, value)
def st_intersects(self, value):
db = self.db
return Query(db, db._adapter.ST_INTERSECTS, self, value)
def st_overlaps(self, value):
db = self.db
return Query(db, db._adapter.ST_OVERLAPS, self, value)
def st_touches(self, value):
db = self.db
return Query(db, db._adapter.ST_TOUCHES, self, value)
def st_within(self, value):
db = self.db
return Query(db, db._adapter.ST_WITHIN, self, value)
def st_dwithin(self, value, distance):
db = self.db
return Query(db, db._adapter.ST_DWITHIN, self, (value, distance))
# for use in both Query and sortby
class SQLCustomType(object):
"""
allows defining of custom SQL types
Example::
decimal = SQLCustomType(
type ='double',
native ='integer',
encoder =(lambda x: int(float(x) * 100)),
decoder = (lambda x: Decimal("0.00") + Decimal(str(float(x)/100)) )
)
db.define_table(
'example',
Field('value', type=decimal)
)
:param type: the web2py type (default = 'string')
:param native: the backend type
:param encoder: how to encode the value to store it in the backend
:param decoder: how to decode the value retrieved from the backend
:param validator: what validators to use ( default = None, will use the
default validator for type)
"""
def __init__(
self,
type='string',
native=None,
encoder=None,
decoder=None,
validator=None,
_class=None,
):
self.type = type
self.native = native
self.encoder = encoder or (lambda x: x)
self.decoder = decoder or (lambda x: x)
self.validator = validator
self._class = _class or type
def startswith(self, text=None):
try:
return self.type.startswith(self, text)
except TypeError:
return False
def endswith(self, text=None):
try:
return self.type.endswith(self, text)
except TypeError:
return False
def __getslice__(self, a=0, b=100):
return None
def __getitem__(self, i):
return None
def __str__(self):
return self._class
class FieldVirtual(object):
def __init__(self, name, f=None, ftype='string',label=None,table_name=None):
# for backward compatibility
(self.name, self.f) = (name, f) if f else ('unknown', name)
self.type = ftype
self.label = label or self.name.capitalize().replace('_',' ')
self.represent = lambda v,r=None:v
self.formatter = IDENTITY
self.comment = None
self.readable = True
self.writable = False
self.requires = None
self.widget = None
self.tablename = table_name
self.filter_out = None
def __str__(self):
return '%s.%s' % (self.tablename, self.name)
class FieldMethod(object):
def __init__(self, name, f=None, handler=None):
# for backward compatibility
(self.name, self.f) = (name, f) if f else ('unknown', name)
self.handler = handler
def list_represent(x,r=None):
return ', '.join(str(y) for y in x or [])
class Field(Expression):
Virtual = FieldVirtual
Method = FieldMethod
Lazy = FieldMethod # for backward compatibility
"""
an instance of this class represents a database field
example::
a = Field(name, 'string', length=32, default=None, required=False,
requires=IS_NOT_EMPTY(), ondelete='CASCADE',
notnull=False, unique=False,
uploadfield=True, widget=None, label=None, comment=None,
uploadfield=True, # True means store on disk,
# 'a_field_name' means store in this field in db
# False means file content will be discarded.
writable=True, readable=True, update=None, authorize=None,
autodelete=False, represent=None, uploadfolder=None,
uploadseparate=False # upload to separate directories by uuid_keys
# first 2 character and tablename.fieldname
# False - old behavior
# True - put uploaded file in
# <uploaddir>/<tablename>.<fieldname>/uuid_key[:2]
# directory)
uploadfs=None # a pyfilesystem where to store upload
to be used as argument of DAL.define_table
allowed field types:
string, boolean, integer, double, text, blob,
date, time, datetime, upload, password
"""
def __init__(
self,
fieldname,
type='string',
length=None,
default=DEFAULT,
required=False,
requires=DEFAULT,
ondelete='CASCADE',
notnull=False,
unique=False,
uploadfield=True,
widget=None,
label=None,
comment=None,
writable=True,
readable=True,
update=None,
authorize=None,
autodelete=False,
represent=None,
uploadfolder=None,
uploadseparate=False,
uploadfs=None,
compute=None,
custom_store=None,
custom_retrieve=None,
custom_retrieve_file_properties=None,
custom_delete=None,
filter_in=None,
filter_out=None,
custom_qualifier=None,
map_none=None,
rname=None
):
self._db = self.db = None # both for backward compatibility
self.op = None
self.first = None
self.second = None
if isinstance(fieldname, unicode):
try:
fieldname = str(fieldname)
except UnicodeEncodeError:
raise SyntaxError('Field: invalid unicode field name')
self.name = fieldname = cleanup(fieldname)
if not isinstance(fieldname, str) or hasattr(Table, fieldname) or \
fieldname[0] == '_' or '.' in fieldname or \
REGEX_PYTHON_KEYWORDS.match(fieldname):
raise SyntaxError('Field: invalid field name: %s, '
'use rname for "funny" names' % fieldname)
if not isinstance(type, (Table, Field)):
self.type = type
else:
self.type = 'reference %s' % type
self.length = length if not length is None else DEFAULTLENGTH.get(self.type, 512)
self.default = default if default != DEFAULT else (update or None)
self.required = required # is this field required
self.ondelete = ondelete.upper() # this is for reference fields only
self.notnull = notnull
self.unique = unique
self.uploadfield = uploadfield
self.uploadfolder = uploadfolder
self.uploadseparate = uploadseparate
self.uploadfs = uploadfs
self.widget = widget
self.comment = comment
self.writable = writable
self.readable = readable
self.update = update
self.authorize = authorize
self.autodelete = autodelete
self.represent = (list_represent if represent is None and
type in ('list:integer', 'list:string') else represent)
self.compute = compute
self.isattachment = True
self.custom_store = custom_store
self.custom_retrieve = custom_retrieve
self.custom_retrieve_file_properties = custom_retrieve_file_properties
self.custom_delete = custom_delete
self.filter_in = filter_in
self.filter_out = filter_out
self.custom_qualifier = custom_qualifier
self.label = (label if label is not None else
fieldname.replace('_', ' ').title())
self.requires = requires if requires is not None else []
self.map_none = map_none
self._rname = rname
def set_attributes(self, *args, **attributes):
self.__dict__.update(*args, **attributes)
def clone(self, point_self_references_to=False, **args):
field = copy.copy(self)
if point_self_references_to and \
field.type == 'reference %s'+field._tablename:
field.type = 'reference %s' % point_self_references_to
field.__dict__.update(args)
return field
def store(self, file, filename=None, path=None):
if self.custom_store:
return self.custom_store(file, filename, path)
if isinstance(file, cgi.FieldStorage):
filename = filename or file.filename
file = file.file
elif not filename:
filename = file.name
filename = os.path.basename(filename.replace('/', os.sep).replace('\\', os.sep))
m = REGEX_STORE_PATTERN.search(filename)
extension = m and m.group('e') or 'txt'
uuid_key = web2py_uuid().replace('-', '')[-16:]
encoded_filename = base64.b16encode(filename).lower()
newfilename = '%s.%s.%s.%s' % \
(self._tablename, self.name, uuid_key, encoded_filename)
newfilename = newfilename[:(self.length - 1 - len(extension))] + '.' + extension
self_uploadfield = self.uploadfield
if isinstance(self_uploadfield, Field):
blob_uploadfield_name = self_uploadfield.uploadfield
keys = {self_uploadfield.name: newfilename,
blob_uploadfield_name: file.read()}
self_uploadfield.table.insert(**keys)
elif self_uploadfield is True:
if path:
pass
elif self.uploadfolder:
path = self.uploadfolder
elif self.db._adapter.folder:
path = pjoin(self.db._adapter.folder, '..', 'uploads')
else:
raise RuntimeError(
"you must specify a Field(...,uploadfolder=...)")
if self.uploadseparate:
if self.uploadfs:
raise RuntimeError("not supported")
path = pjoin(path, "%s.%s" % (
self._tablename, self.name), uuid_key[:2]
)
if not exists(path):
os.makedirs(path)
pathfilename = pjoin(path, newfilename)
if self.uploadfs:
dest_file = self.uploadfs.open(newfilename, 'wb')
else:
dest_file = open(pathfilename, 'wb')
try:
shutil.copyfileobj(file, dest_file)
except IOError:
raise IOError(
'Unable to store file "%s" because invalid permissions, '
'readonly file system, or filename too long' % pathfilename)
dest_file.close()
return newfilename
def retrieve(self, name, path=None, nameonly=False):
"""
if nameonly==True return (filename, fullfilename) instead of
(filename, stream)
"""
self_uploadfield = self.uploadfield
if self.custom_retrieve:
return self.custom_retrieve(name, path)
import http
if self.authorize or isinstance(self_uploadfield, str):
row = self.db(self == name).select().first()
if not row:
raise http.HTTP(404)
if self.authorize and not self.authorize(row):
raise http.HTTP(403)
file_properties = self.retrieve_file_properties(name, path)
filename = file_properties['filename']
if isinstance(self_uploadfield, str): # ## if file is in DB
stream = StringIO.StringIO(row[self_uploadfield] or '')
elif isinstance(self_uploadfield, Field):
blob_uploadfield_name = self_uploadfield.uploadfield
query = self_uploadfield == name
data = self_uploadfield.table(query)[blob_uploadfield_name]
stream = StringIO.StringIO(data)
elif self.uploadfs:
# ## if file is on pyfilesystem
stream = self.uploadfs.open(name, 'rb')
else:
# ## if file is on regular filesystem
# this is intentially a sting with filename and not a stream
# this propagates and allows stream_file_or_304_or_206 to be called
fullname = pjoin(file_properties['path'], name)
if nameonly:
return (filename, fullname)
stream = open(fullname, 'rb')
return (filename, stream)
def retrieve_file_properties(self, name, path=None):
m = REGEX_UPLOAD_PATTERN.match(name)
if not m or not self.isattachment:
raise TypeError('Can\'t retrieve %s file properties' % name)
self_uploadfield = self.uploadfield
if self.custom_retrieve_file_properties:
return self.custom_retrieve_file_properties(name, path)
if m.group('name'):
try:
filename = base64.b16decode(m.group('name'), True)
filename = REGEX_CLEANUP_FN.sub('_', filename)
except (TypeError, AttributeError):
filename = name
else:
filename = name
# ## if file is in DB
if isinstance(self_uploadfield, (str, Field)):
return dict(path=None, filename=filename)
# ## if file is on filesystem
if not path:
if self.uploadfolder:
path = self.uploadfolder
else:
path = pjoin(self.db._adapter.folder, '..', 'uploads')
if self.uploadseparate:
t = m.group('table')
f = m.group('field')
u = m.group('uuidkey')
path = pjoin(path, "%s.%s" % (t, f), u[:2])
return dict(path=path, filename=filename)
def formatter(self, value):
requires = self.requires
if value is None or not requires:
return value or self.map_none
if not isinstance(requires, (list, tuple)):
requires = [requires]
elif isinstance(requires, tuple):
requires = list(requires)
else:
requires = copy.copy(requires)
requires.reverse()
for item in requires:
if hasattr(item, 'formatter'):
value = item.formatter(value)
return value
def validate(self, value):
if not self.requires or self.requires == DEFAULT:
return ((value if value != self.map_none else None), None)
requires = self.requires
if not isinstance(requires, (list, tuple)):
requires = [requires]
for validator in requires:
(value, error) = validator(value)
if error:
return (value, error)
return ((value if value != self.map_none else None), None)
def count(self, distinct=None):
return Expression(self.db, self.db._adapter.COUNT, self, distinct, 'integer')
def as_dict(self, flat=False, sanitize=True):
attrs = (
'name', 'authorize', 'represent', 'ondelete',
'custom_store', 'autodelete', 'custom_retrieve',
'filter_out', 'uploadseparate', 'widget', 'uploadfs',
'update', 'custom_delete', 'uploadfield', 'uploadfolder',
'custom_qualifier', 'unique', 'writable', 'compute',
'map_none', 'default', 'type', 'required', 'readable',
'requires', 'comment', 'label', 'length', 'notnull',
'custom_retrieve_file_properties', 'filter_in')
serializable = (int, long, basestring, float, tuple,
bool, type(None))
def flatten(obj):
if isinstance(obj, dict):
return dict((flatten(k), flatten(v)) for k, v in obj.items())
elif isinstance(obj, (tuple, list, set)):
return [flatten(v) for v in obj]
elif isinstance(obj, serializable):
return obj
elif isinstance(obj, (datetime.datetime,
datetime.date, datetime.time)):
return str(obj)
else:
return None
d = dict()
if not (sanitize and not (self.readable or self.writable)):
for attr in attrs:
if flat:
d.update({attr: flatten(getattr(self, attr))})
else:
d.update({attr: getattr(self, attr)})
d["fieldname"] = d.pop("name")
return d
def as_xml(self, sanitize=True):
if have_serializers:
xml = serializers.xml
else:
raise ImportError("No xml serializers available")
d = self.as_dict(flat=True, sanitize=sanitize)
return xml(d)
def as_json(self, sanitize=True):
if have_serializers:
json = serializers.json
else:
raise ImportError("No json serializers available")
d = self.as_dict(flat=True, sanitize=sanitize)
return json(d)
def as_yaml(self, sanitize=True):
if have_serializers:
d = self.as_dict(flat=True, sanitize=sanitize)
return serializers.yaml(d)
else:
raise ImportError("No YAML serializers available")
def __nonzero__(self):
return True
def __str__(self):
try:
return '%s.%s' % (self.tablename, self.name)
except:
return '<no table>.%s' % self.name
@property
def sqlsafe(self):
if self._table:
return self._table.sqlsafe + '.' + \
(self._rname or self._db._adapter.sqlsafe_field(self.name))
return '<no table>.%s' % self.name
@property
def sqlsafe_name(self):
return self._rname or self._db._adapter.sqlsafe_field(self.name)
class Query(object):
"""
a query object necessary to define a set.
it can be stored or can be passed to DAL.__call__() to obtain a Set
Example::
query = db.users.name=='Max'
set = db(query)
records = set.select()
"""
def __init__(
self,
db,
op,
first=None,
second=None,
ignore_common_filters=False,
**optional_args
):
self.db = self._db = db
self.op = op
self.first = first
self.second = second
self.ignore_common_filters = ignore_common_filters
self.optional_args = optional_args
def __repr__(self):
return '<Query %s>' % BaseAdapter.expand(self.db._adapter,self)
def __str__(self):
return str(self.db._adapter.expand(self))
def __and__(self, other):
return Query(self.db,self.db._adapter.AND,self,other)
__rand__ = __and__
def __or__(self, other):
return Query(self.db,self.db._adapter.OR,self,other)
__ror__ = __or__
def __invert__(self):
if self.op==self.db._adapter.NOT:
return self.first
return Query(self.db,self.db._adapter.NOT,self)
def __eq__(self, other):
return repr(self) == repr(other)
def __ne__(self, other):
return not (self == other)
def case(self,t=1,f=0):
return self.db._adapter.CASE(self,t,f)
def as_dict(self, flat=False, sanitize=True):
"""Experimental stuff
This allows to return a plain dictionary with the basic
query representation. Can be used with json/xml services
for client-side db I/O
Example:
>>> q = db.auth_user.id != 0
>>> q.as_dict(flat=True)
{"op": "NE", "first":{"tablename": "auth_user",
"fieldname": "id"},
"second":0}
"""
SERIALIZABLE_TYPES = (tuple, dict, set, list, int, long, float,
basestring, type(None), bool)
def loop(d):
newd = dict()
for k, v in d.items():
if k in ("first", "second"):
if isinstance(v, self.__class__):
newd[k] = loop(v.__dict__)
elif isinstance(v, Field):
newd[k] = {"tablename": v._tablename,
"fieldname": v.name}
elif isinstance(v, Expression):
newd[k] = loop(v.__dict__)
elif isinstance(v, SERIALIZABLE_TYPES):
newd[k] = v
elif isinstance(v, (datetime.date,
datetime.time,
datetime.datetime)):
newd[k] = unicode(v)
elif k == "op":
if callable(v):
newd[k] = v.__name__
elif isinstance(v, basestring):
newd[k] = v
else: pass # not callable or string
elif isinstance(v, SERIALIZABLE_TYPES):
if isinstance(v, dict):
newd[k] = loop(v)
else: newd[k] = v
return newd
if flat:
return loop(self.__dict__)
else: return self.__dict__
def as_xml(self, sanitize=True):
if have_serializers:
xml = serializers.xml
else:
raise ImportError("No xml serializers available")
d = self.as_dict(flat=True, sanitize=sanitize)
return xml(d)
def as_json(self, sanitize=True):
if have_serializers:
json = serializers.json
else:
raise ImportError("No json serializers available")
d = self.as_dict(flat=True, sanitize=sanitize)
return json(d)
def xorify(orderby):
if not orderby:
return None
orderby2 = orderby[0]
for item in orderby[1:]:
orderby2 = orderby2 | item
return orderby2
def use_common_filters(query):
return (query and hasattr(query,'ignore_common_filters') and \
not query.ignore_common_filters)
class Set(object):
"""
a Set represents a set of records in the database,
the records are identified by the query=Query(...) object.
normally the Set is generated by DAL.__call__(Query(...))
given a set, for example
set = db(db.users.name=='Max')
you can:
set.update(db.users.name='Massimo')
set.delete() # all elements in the set
set.select(orderby=db.users.id, groupby=db.users.name, limitby=(0,10))
and take subsets:
subset = set(db.users.id<5)
"""
def __init__(self, db, query, ignore_common_filters = None):
self.db = db
self._db = db # for backward compatibility
self.dquery = None
# if query is a dict, parse it
if isinstance(query, dict):
query = self.parse(query)
if not ignore_common_filters is None and \
use_common_filters(query) == ignore_common_filters:
query = copy.copy(query)
query.ignore_common_filters = ignore_common_filters
self.query = query
def __repr__(self):
return '<Set %s>' % BaseAdapter.expand(self.db._adapter,self.query)
def __call__(self, query, ignore_common_filters=False):
if query is None:
return self
elif isinstance(query,Table):
query = self.db._adapter.id_query(query)
elif isinstance(query,str):
query = Expression(self.db,query)
elif isinstance(query,Field):
query = query!=None
if self.query:
return Set(self.db, self.query & query,
ignore_common_filters=ignore_common_filters)
else:
return Set(self.db, query,
ignore_common_filters=ignore_common_filters)
def _count(self,distinct=None):
return self.db._adapter._count(self.query,distinct)
def _select(self, *fields, **attributes):
adapter = self.db._adapter
tablenames = adapter.tables(self.query,
attributes.get('join',None),
attributes.get('left',None),
attributes.get('orderby',None),
attributes.get('groupby',None))
fields = adapter.expand_all(fields, tablenames)
return adapter._select(self.query,fields,attributes)
def _delete(self):
db = self.db
tablename = db._adapter.get_table(self.query)
return db._adapter._delete(tablename,self.query)
def _update(self, **update_fields):
db = self.db
tablename = db._adapter.get_table(self.query)
fields = db[tablename]._listify(update_fields,update=True)
return db._adapter._update(tablename,self.query,fields)
def as_dict(self, flat=False, sanitize=True):
if flat:
uid = dbname = uri = None
codec = self.db._db_codec
if not sanitize:
uri, dbname, uid = (self.db._dbname, str(self.db),
self.db._db_uid)
d = {"query": self.query.as_dict(flat=flat)}
d["db"] = {"uid": uid, "codec": codec,
"name": dbname, "uri": uri}
return d
else: return self.__dict__
def as_xml(self, sanitize=True):
if have_serializers:
xml = serializers.xml
else:
raise ImportError("No xml serializers available")
d = self.as_dict(flat=True, sanitize=sanitize)
return xml(d)
def as_json(self, sanitize=True):
if have_serializers:
json = serializers.json
else:
raise ImportError("No json serializers available")
d = self.as_dict(flat=True, sanitize=sanitize)
return json(d)
def parse(self, dquery):
"Experimental: Turn a dictionary into a Query object"
self.dquery = dquery
return self.build(self.dquery)
def build(self, d):
"Experimental: see .parse()"
op, first, second = (d["op"], d["first"],
d.get("second", None))
left = right = built = None
if op in ("AND", "OR"):
if not (type(first), type(second)) == (dict, dict):
raise SyntaxError("Invalid AND/OR query")
if op == "AND":
built = self.build(first) & self.build(second)
else: built = self.build(first) | self.build(second)
elif op == "NOT":
if first is None:
raise SyntaxError("Invalid NOT query")
built = ~self.build(first)
else:
# normal operation (GT, EQ, LT, ...)
for k, v in {"left": first, "right": second}.items():
if isinstance(v, dict) and v.get("op"):
v = self.build(v)
if isinstance(v, dict) and ("tablename" in v):
v = self.db[v["tablename"]][v["fieldname"]]
if k == "left": left = v
else: right = v
if hasattr(self.db._adapter, op):
opm = getattr(self.db._adapter, op)
if op == "EQ": built = left == right
elif op == "NE": built = left != right
elif op == "GT": built = left > right
elif op == "GE": built = left >= right
elif op == "LT": built = left < right
elif op == "LE": built = left <= right
elif op in ("JOIN", "LEFT_JOIN", "RANDOM", "ALLOW_NULL"):
built = Expression(self.db, opm)
elif op in ("LOWER", "UPPER", "EPOCH", "PRIMARY_KEY",
"COALESCE_ZERO", "RAW", "INVERT"):
built = Expression(self.db, opm, left)
elif op in ("COUNT", "EXTRACT", "AGGREGATE", "SUBSTRING",
"REGEXP", "LIKE", "ILIKE", "STARTSWITH",
"ENDSWITH", "ADD", "SUB", "MUL", "DIV",
"MOD", "AS", "ON", "COMMA", "NOT_NULL",
"COALESCE", "CONTAINS", "BELONGS"):
built = Expression(self.db, opm, left, right)
# expression as string
elif not (left or right): built = Expression(self.db, op)
else:
raise SyntaxError("Operator not supported: %s" % op)
return built
def isempty(self):
return not self.select(limitby=(0,1), orderby_on_limitby=False)
def count(self,distinct=None, cache=None):
db = self.db
if cache:
cache_model, time_expire = cache
sql = self._count(distinct=distinct)
key = db._uri + '/' + sql
if len(key)>200: key = hashlib_md5(key).hexdigest()
return cache_model(
key,
(lambda self=self,distinct=distinct: \
db._adapter.count(self.query,distinct)),
time_expire)
return db._adapter.count(self.query,distinct)
def select(self, *fields, **attributes):
adapter = self.db._adapter
tablenames = adapter.tables(self.query,
attributes.get('join',None),
attributes.get('left',None),
attributes.get('orderby',None),
attributes.get('groupby',None))
fields = adapter.expand_all(fields, tablenames)
return adapter.select(self.query,fields,attributes)
def nested_select(self,*fields,**attributes):
return Expression(self.db,self._select(*fields,**attributes))
def delete(self):
db = self.db
tablename = db._adapter.get_table(self.query)
table = db[tablename]
if any(f(self) for f in table._before_delete): return 0
ret = db._adapter.delete(tablename,self.query)
ret and [f(self) for f in table._after_delete]
return ret
def update(self, **update_fields):
db = self.db
tablename = db._adapter.get_table(self.query)
table = db[tablename]
table._attempt_upload(update_fields)
if any(f(self,update_fields) for f in table._before_update):
return 0
fields = table._listify(update_fields,update=True)
if not fields:
raise SyntaxError("No fields to update")
ret = db._adapter.update("%s" % table._tablename,self.query,fields)
ret and [f(self,update_fields) for f in table._after_update]
return ret
def update_naive(self, **update_fields):
"""
same as update but does not call table._before_update and _after_update
"""
tablename = self.db._adapter.get_table(self.query)
table = self.db[tablename]
fields = table._listify(update_fields,update=True)
if not fields: raise SyntaxError("No fields to update")
ret = self.db._adapter.update("%s" % table,self.query,fields)
return ret
def validate_and_update(self, **update_fields):
tablename = self.db._adapter.get_table(self.query)
response = Row()
response.errors = Row()
new_fields = copy.copy(update_fields)
for key,value in update_fields.iteritems():
value,error = self.db[tablename][key].validate(value)
if error:
response.errors[key] = error
else:
new_fields[key] = value
table = self.db[tablename]
if response.errors:
response.updated = None
else:
if not any(f(self,new_fields) for f in table._before_update):
fields = table._listify(new_fields,update=True)
if not fields: raise SyntaxError("No fields to update")
ret = self.db._adapter.update(tablename,self.query,fields)
ret and [f(self,new_fields) for f in table._after_update]
else:
ret = 0
response.updated = ret
return response
def delete_uploaded_files(self, upload_fields=None):
table = self.db[self.db._adapter.tables(self.query)[0]]
# ## mind uploadfield==True means file is not in DB
if upload_fields:
fields = upload_fields.keys()
else:
fields = table.fields
fields = [f for f in fields if table[f].type == 'upload'
and table[f].uploadfield == True
and table[f].autodelete]
if not fields:
return False
for record in self.select(*[table[f] for f in fields]):
for fieldname in fields:
field = table[fieldname]
oldname = record.get(fieldname, None)
if not oldname:
continue
if upload_fields and oldname == upload_fields[fieldname]:
continue
if field.custom_delete:
field.custom_delete(oldname)
else:
uploadfolder = field.uploadfolder
if not uploadfolder:
uploadfolder = pjoin(
self.db._adapter.folder, '..', 'uploads')
if field.uploadseparate:
items = oldname.split('.')
uploadfolder = pjoin(
uploadfolder,
"%s.%s" % (items[0], items[1]),
items[2][:2])
oldpath = pjoin(uploadfolder, oldname)
if exists(oldpath):
os.unlink(oldpath)
return False
class RecordUpdater(object):
def __init__(self, colset, table, id):
self.colset, self.db, self.tablename, self.id = \
colset, table._db, table._tablename, id
def __call__(self, **fields):
colset, db, tablename, id = self.colset, self.db, self.tablename, self.id
table = db[tablename]
newfields = fields or dict(colset)
for fieldname in newfields.keys():
if not fieldname in table.fields or table[fieldname].type=='id':
del newfields[fieldname]
table._db(table._id==id,ignore_common_filters=True).update(**newfields)
colset.update(newfields)
return colset
class RecordDeleter(object):
def __init__(self, table, id):
self.db, self.tablename, self.id = table._db, table._tablename, id
def __call__(self):
return self.db(self.db[self.tablename]._id==self.id).delete()
class LazyReferenceGetter(object):
def __init__(self, table, id):
self.db, self.tablename, self.id = table._db, table._tablename, id
def __call__(self, other_tablename):
if self.db._lazy_tables is False:
raise AttributeError()
table = self.db[self.tablename]
other_table = self.db[other_tablename]
for rfield in table._referenced_by:
if rfield.table == other_table:
return LazySet(rfield, self.id)
raise AttributeError()
class LazySet(object):
def __init__(self, field, id):
self.db, self.tablename, self.fieldname, self.id = \
field.db, field._tablename, field.name, id
def _getset(self):
query = self.db[self.tablename][self.fieldname]==self.id
return Set(self.db,query)
def __repr__(self):
return repr(self._getset())
def __call__(self, query, ignore_common_filters=False):
return self._getset()(query, ignore_common_filters)
def _count(self,distinct=None):
return self._getset()._count(distinct)
def _select(self, *fields, **attributes):
return self._getset()._select(*fields,**attributes)
def _delete(self):
return self._getset()._delete()
def _update(self, **update_fields):
return self._getset()._update(**update_fields)
def isempty(self):
return self._getset().isempty()
def count(self,distinct=None, cache=None):
return self._getset().count(distinct,cache)
def select(self, *fields, **attributes):
return self._getset().select(*fields,**attributes)
def nested_select(self,*fields,**attributes):
return self._getset().nested_select(*fields,**attributes)
def delete(self):
return self._getset().delete()
def update(self, **update_fields):
return self._getset().update(**update_fields)
def update_naive(self, **update_fields):
return self._getset().update_naive(**update_fields)
def validate_and_update(self, **update_fields):
return self._getset().validate_and_update(**update_fields)
def delete_uploaded_files(self, upload_fields=None):
return self._getset().delete_uploaded_files(upload_fields)
class VirtualCommand(object):
def __init__(self,method,row):
self.method=method
self.row=row
def __call__(self,*args,**kwargs):
return self.method(self.row,*args,**kwargs)
def lazy_virtualfield(f):
f.__lazy__ = True
return f
class Rows(object):
"""
A wrapper for the return value of a select. It basically represents a table.
It has an iterator and each row is represented as a dictionary.
"""
# ## TODO: this class still needs some work to care for ID/OID
def __init__(
self,
db=None,
records=[],
colnames=[],
compact=True,
rawrows=None
):
self.db = db
self.records = records
self.colnames = colnames
self.compact = compact
self.response = rawrows
def __repr__(self):
return '<Rows (%s)>' % len(self.records)
def setvirtualfields(self,**keyed_virtualfields):
"""
db.define_table('x',Field('number','integer'))
if db(db.x).isempty(): [db.x.insert(number=i) for i in range(10)]
from gluon.dal import lazy_virtualfield
class MyVirtualFields(object):
# normal virtual field (backward compatible, discouraged)
def normal_shift(self): return self.x.number+1
# lazy virtual field (because of @staticmethod)
@lazy_virtualfield
def lazy_shift(instance,row,delta=4): return row.x.number+delta
db.x.virtualfields.append(MyVirtualFields())
for row in db(db.x).select():
print row.number, row.normal_shift, row.lazy_shift(delta=7)
"""
if not keyed_virtualfields:
return self
for row in self.records:
for (tablename,virtualfields) in keyed_virtualfields.iteritems():
attributes = dir(virtualfields)
if not tablename in row:
box = row[tablename] = Row()
else:
box = row[tablename]
updated = False
for attribute in attributes:
if attribute[0] != '_':
method = getattr(virtualfields,attribute)
if hasattr(method,'__lazy__'):
box[attribute]=VirtualCommand(method,row)
elif type(method)==types.MethodType:
if not updated:
virtualfields.__dict__.update(row)
updated = True
box[attribute]=method()
return self
def __and__(self,other):
if self.colnames!=other.colnames:
raise Exception('Cannot & incompatible Rows objects')
records = self.records+other.records
return Rows(self.db,records,self.colnames,
compact=self.compact or other.compact)
def __or__(self,other):
if self.colnames!=other.colnames:
raise Exception('Cannot | incompatible Rows objects')
records = [record for record in other.records
if not record in self.records]
records = self.records + records
return Rows(self.db,records,self.colnames,
compact=self.compact or other.compact)
def __nonzero__(self):
if len(self.records):
return 1
return 0
def __len__(self):
return len(self.records)
def __getslice__(self, a, b):
return Rows(self.db,self.records[a:b],self.colnames,compact=self.compact)
def __getitem__(self, i):
row = self.records[i]
keys = row.keys()
if self.compact and len(keys) == 1 and keys[0] != '_extra':
return row[row.keys()[0]]
return row
def __iter__(self):
"""
iterator over records
"""
for i in xrange(len(self)):
yield self[i]
def __str__(self):
"""
serializes the table into a csv file
"""
s = StringIO.StringIO()
self.export_to_csv_file(s)
return s.getvalue()
def first(self):
if not self.records:
return None
return self[0]
def last(self):
if not self.records:
return None
return self[-1]
def find(self,f,limitby=None):
"""
returns a new Rows object, a subset of the original object,
filtered by the function f
"""
if not self:
return Rows(self.db, [], self.colnames, compact=self.compact)
records = []
if limitby:
a,b = limitby
else:
a,b = 0,len(self)
k = 0
for i, row in enumerate(self):
if f(row):
if a<=k: records.append(self.records[i])
k += 1
if k==b: break
return Rows(self.db, records, self.colnames, compact=self.compact)
def exclude(self, f):
"""
removes elements from the calling Rows object, filtered by the function f,
and returns a new Rows object containing the removed elements
"""
if not self.records:
return Rows(self.db, [], self.colnames, compact=self.compact)
removed = []
i=0
while i<len(self):
row = self[i]
if f(row):
removed.append(self.records[i])
del self.records[i]
else:
i += 1
return Rows(self.db, removed, self.colnames, compact=self.compact)
def sort(self, f, reverse=False):
"""
returns a list of sorted elements (not sorted in place)
"""
rows = Rows(self.db, [], self.colnames, compact=self.compact)
# When compact=True, iterating over self modifies each record,
# so when sorting self, it is necessary to return a sorted
# version of self.records rather than the sorted self directly.
rows.records = [r for (r, s) in sorted(zip(self.records, self),
key=lambda r: f(r[1]),
reverse=reverse)]
return rows
def group_by_value(self, *fields, **args):
"""
regroups the rows, by one of the fields
"""
one_result = False
if 'one_result' in args:
one_result = args['one_result']
def build_fields_struct(row, fields, num, groups):
''' helper function:
'''
if num > len(fields)-1:
if one_result:
return row
else:
return [row]
key = fields[num]
value = row[key]
if value not in groups:
groups[value] = build_fields_struct(row, fields, num+1, {})
else:
struct = build_fields_struct(row, fields, num+1, groups[ value ])
# still have more grouping to do
if type(struct) == type(dict()):
groups[value].update()
# no more grouping, first only is off
elif type(struct) == type(list()):
groups[value] += struct
# no more grouping, first only on
else:
groups[value] = struct
return groups
if len(fields) == 0:
return self
# if select returned no results
if not self.records:
return {}
grouped_row_group = dict()
# build the struct
for row in self:
build_fields_struct(row, fields, 0, grouped_row_group)
return grouped_row_group
def render(self, i=None, fields=None):
"""
Takes an index and returns a copy of the indexed row with values
transformed via the "represent" attributes of the associated fields.
If no index is specified, a generator is returned for iteration
over all the rows.
fields -- a list of fields to transform (if None, all fields with
"represent" attributes will be transformed).
"""
if i is None:
return (self.render(i, fields=fields) for i in range(len(self)))
import sqlhtml
row = copy.deepcopy(self.records[i])
keys = row.keys()
tables = [f.tablename for f in fields] if fields \
else [k for k in keys if k != '_extra']
for table in tables:
repr_fields = [f.name for f in fields if f.tablename == table] \
if fields else [k for k in row[table].keys()
if (hasattr(self.db[table], k) and
isinstance(self.db[table][k], Field)
and self.db[table][k].represent)]
for field in repr_fields:
row[table][field] = sqlhtml.represent(
self.db[table][field], row[table][field], row[table])
if self.compact and len(keys) == 1 and keys[0] != '_extra':
return row[keys[0]]
return row
def as_list(self,
compact=True,
storage_to_dict=True,
datetime_to_str=False,
custom_types=None):
"""
returns the data as a list or dictionary.
:param storage_to_dict: when True returns a dict, otherwise a list(default True)
:param datetime_to_str: convert datetime fields as strings (default False)
"""
(oc, self.compact) = (self.compact, compact)
if storage_to_dict:
items = [item.as_dict(datetime_to_str, custom_types) for item in self]
else:
items = [item for item in self]
self.compact = compact
return items
def as_dict(self,
key='id',
compact=True,
storage_to_dict=True,
datetime_to_str=False,
custom_types=None):
"""
returns the data as a dictionary of dictionaries (storage_to_dict=True) or records (False)
:param key: the name of the field to be used as dict key, normally the id
:param compact: ? (default True)
:param storage_to_dict: when True returns a dict, otherwise a list(default True)
:param datetime_to_str: convert datetime fields as strings (default False)
"""
# test for multiple rows
multi = False
f = self.first()
if f and isinstance(key, basestring):
multi = any([isinstance(v, f.__class__) for v in f.values()])
if (not "." in key) and multi:
# No key provided, default to int indices
def new_key():
i = 0
while True:
yield i
i += 1
key_generator = new_key()
key = lambda r: key_generator.next()
rows = self.as_list(compact, storage_to_dict, datetime_to_str, custom_types)
if isinstance(key,str) and key.count('.')==1:
(table, field) = key.split('.')
return dict([(r[table][field],r) for r in rows])
elif isinstance(key,str):
return dict([(r[key],r) for r in rows])
else:
return dict([(key(r),r) for r in rows])
def as_trees(self, parent_name='parent_id', children_name='children'):
roots = []
drows = {}
for row in self:
drows[row.id] = row
row[children_name] = []
for row in self:
parent = row[parent_name]
if parent is None:
roots.append(row)
else:
drows[parent][children_name].append(row)
return roots
def export_to_csv_file(self, ofile, null='<NULL>', *args, **kwargs):
"""
export data to csv, the first line contains the column names
:param ofile: where the csv must be exported to
:param null: how null values must be represented (default '<NULL>')
:param delimiter: delimiter to separate values (default ',')
:param quotechar: character to use to quote string values (default '"')
:param quoting: quote system, use csv.QUOTE_*** (default csv.QUOTE_MINIMAL)
:param represent: use the fields .represent value (default False)
:param colnames: list of column names to use (default self.colnames)
This will only work when exporting rows objects!!!!
DO NOT use this with db.export_to_csv()
"""
delimiter = kwargs.get('delimiter', ',')
quotechar = kwargs.get('quotechar', '"')
quoting = kwargs.get('quoting', csv.QUOTE_MINIMAL)
represent = kwargs.get('represent', False)
writer = csv.writer(ofile, delimiter=delimiter,
quotechar=quotechar, quoting=quoting)
def unquote_colnames(colnames):
unq_colnames = []
for col in colnames:
m = self.db._adapter.REGEX_TABLE_DOT_FIELD.match(col)
if not m:
unq_colnames.append(col)
else:
unq_colnames.append('.'.join(m.groups()))
return unq_colnames
colnames = kwargs.get('colnames', self.colnames)
write_colnames = kwargs.get('write_colnames',True)
# a proper csv starting with the column names
if write_colnames:
writer.writerow(unquote_colnames(colnames))
def none_exception(value):
"""
returns a cleaned up value that can be used for csv export:
- unicode text is encoded as such
- None values are replaced with the given representation (default <NULL>)
"""
if value is None:
return null
elif isinstance(value, unicode):
return value.encode('utf8')
elif isinstance(value,Reference):
return long(value)
elif hasattr(value, 'isoformat'):
return value.isoformat()[:19].replace('T', ' ')
elif isinstance(value, (list,tuple)): # for type='list:..'
return bar_encode(value)
return value
for record in self:
row = []
for col in colnames:
m = self.db._adapter.REGEX_TABLE_DOT_FIELD.match(col)
if not m:
row.append(record._extra[col])
else:
(t, f) = m.groups()
field = self.db[t][f]
if isinstance(record.get(t, None), (Row,dict)):
value = record[t][f]
else:
value = record[f]
if field.type=='blob' and not value is None:
value = base64.b64encode(value)
elif represent and field.represent:
value = field.represent(value,record)
row.append(none_exception(value))
writer.writerow(row)
def xml(self,strict=False,row_name='row',rows_name='rows'):
"""
serializes the table using sqlhtml.SQLTABLE (if present)
"""
if strict:
ncols = len(self.colnames)
return '<%s>\n%s\n</%s>' % (rows_name,
'\n'.join(row.as_xml(row_name=row_name,
colnames=self.colnames) for
row in self), rows_name)
import sqlhtml
return sqlhtml.SQLTABLE(self).xml()
def as_xml(self,row_name='row',rows_name='rows'):
return self.xml(strict=True, row_name=row_name, rows_name=rows_name)
def as_json(self, mode='object', default=None):
"""
serializes the rows to a JSON list or object with objects
mode='object' is not implemented (should return a nested
object structure)
"""
items = [record.as_json(mode=mode, default=default,
serialize=False,
colnames=self.colnames) for
record in self]
if have_serializers:
return serializers.json(items,
default=default or
serializers.custom_json)
elif simplejson:
return simplejson.dumps(items)
else:
raise RuntimeError("missing simplejson")
# for consistent naming yet backwards compatible
as_csv = __str__
json = as_json
################################################################################
# dummy function used to define some doctests
################################################################################
def test_all():
"""
>>> if len(sys.argv)<2: db = DAL("sqlite://test.db")
>>> if len(sys.argv)>1: db = DAL(sys.argv[1])
>>> tmp = db.define_table('users',\
Field('stringf', 'string', length=32, required=True),\
Field('booleanf', 'boolean', default=False),\
Field('passwordf', 'password', notnull=True),\
Field('uploadf', 'upload'),\
Field('blobf', 'blob'),\
Field('integerf', 'integer', unique=True),\
Field('doublef', 'double', unique=True,notnull=True),\
Field('jsonf', 'json'),\
Field('datef', 'date', default=datetime.date.today()),\
Field('timef', 'time'),\
Field('datetimef', 'datetime'),\
migrate='test_user.table')
Insert a field
>>> db.users.insert(stringf='a', booleanf=True, passwordf='p', blobf='0A',\
uploadf=None, integerf=5, doublef=3.14,\
jsonf={"j": True},\
datef=datetime.date(2001, 1, 1),\
timef=datetime.time(12, 30, 15),\
datetimef=datetime.datetime(2002, 2, 2, 12, 30, 15))
1
Drop the table
>>> db.users.drop()
Examples of insert, select, update, delete
>>> tmp = db.define_table('person',\
Field('name'),\
Field('birth','date'),\
migrate='test_person.table')
>>> person_id = db.person.insert(name='Marco',birth='2005-06-22')
>>> person_id = db.person.insert(name='Massimo',birth='1971-12-21')
commented len(db().select(db.person.ALL))
commented 2
>>> me = db(db.person.id==person_id).select()[0] # test select
>>> me.name
'Massimo'
>>> db.person[2].name
'Massimo'
>>> db.person(2).name
'Massimo'
>>> db.person(name='Massimo').name
'Massimo'
>>> db.person(db.person.name=='Massimo').name
'Massimo'
>>> row = db.person[2]
>>> row.name == row['name'] == row['person.name'] == row('person.name')
True
>>> db(db.person.name=='Massimo').update(name='massimo') # test update
1
>>> db(db.person.name=='Marco').select().first().delete_record() # test delete
1
Update a single record
>>> me.update_record(name="Max")
<Row {'name': 'Max', 'birth': datetime.date(1971, 12, 21), 'id': 2}>
>>> me.name
'Max'
Examples of complex search conditions
>>> len(db((db.person.name=='Max')&(db.person.birth<'2003-01-01')).select())
1
>>> len(db((db.person.name=='Max')&(db.person.birth<datetime.date(2003,01,01))).select())
1
>>> len(db((db.person.name=='Max')|(db.person.birth<'2003-01-01')).select())
1
>>> me = db(db.person.id==person_id).select(db.person.name)[0]
>>> me.name
'Max'
Examples of search conditions using extract from date/datetime/time
>>> len(db(db.person.birth.month()==12).select())
1
>>> len(db(db.person.birth.year()>1900).select())
1
Example of usage of NULL
>>> len(db(db.person.birth==None).select()) ### test NULL
0
>>> len(db(db.person.birth!=None).select()) ### test NULL
1
Examples of search conditions using lower, upper, and like
>>> len(db(db.person.name.upper()=='MAX').select())
1
>>> len(db(db.person.name.like('%ax')).select())
1
>>> len(db(db.person.name.upper().like('%AX')).select())
1
>>> len(db(~db.person.name.upper().like('%AX')).select())
0
orderby, groupby and limitby
>>> people = db().select(db.person.name, orderby=db.person.name)
>>> order = db.person.name|~db.person.birth
>>> people = db().select(db.person.name, orderby=order)
>>> people = db().select(db.person.name, orderby=db.person.name, groupby=db.person.name)
>>> people = db().select(db.person.name, orderby=order, limitby=(0,100))
Example of one 2 many relation
>>> tmp = db.define_table('dog',\
Field('name'),\
Field('birth','date'),\
Field('owner',db.person),\
migrate='test_dog.table')
>>> db.dog.insert(name='Snoopy', birth=None, owner=person_id)
1
A simple JOIN
>>> len(db(db.dog.owner==db.person.id).select())
1
>>> len(db().select(db.person.ALL, db.dog.name,left=db.dog.on(db.dog.owner==db.person.id)))
1
Drop tables
>>> db.dog.drop()
>>> db.person.drop()
Example of many 2 many relation and Set
>>> tmp = db.define_table('author', Field('name'),\
migrate='test_author.table')
>>> tmp = db.define_table('paper', Field('title'),\
migrate='test_paper.table')
>>> tmp = db.define_table('authorship',\
Field('author_id', db.author),\
Field('paper_id', db.paper),\
migrate='test_authorship.table')
>>> aid = db.author.insert(name='Massimo')
>>> pid = db.paper.insert(title='QCD')
>>> tmp = db.authorship.insert(author_id=aid, paper_id=pid)
Define a Set
>>> authored_papers = db((db.author.id==db.authorship.author_id)&(db.paper.id==db.authorship.paper_id))
>>> rows = authored_papers.select(db.author.name, db.paper.title)
>>> for row in rows: print row.author.name, row.paper.title
Massimo QCD
Example of search condition using belongs
>>> set = (1, 2, 3)
>>> rows = db(db.paper.id.belongs(set)).select(db.paper.ALL)
>>> print rows[0].title
QCD
Example of search condition using nested select
>>> nested_select = db()._select(db.authorship.paper_id)
>>> rows = db(db.paper.id.belongs(nested_select)).select(db.paper.ALL)
>>> print rows[0].title
QCD
Example of expressions
>>> mynumber = db.define_table('mynumber', Field('x', 'integer'))
>>> db(mynumber).delete()
0
>>> for i in range(10): tmp = mynumber.insert(x=i)
>>> db(mynumber).select(mynumber.x.sum())[0](mynumber.x.sum())
45
>>> db(mynumber.x+2==5).select(mynumber.x + 2)[0](mynumber.x + 2)
5
Output in csv
>>> print str(authored_papers.select(db.author.name, db.paper.title)).strip()
author.name,paper.title\r
Massimo,QCD
Delete all leftover tables
>>> DAL.distributed_transaction_commit(db)
>>> db.mynumber.drop()
>>> db.authorship.drop()
>>> db.author.drop()
>>> db.paper.drop()
"""
################################################################################
# deprecated since the new DAL; here only for backward compatibility
################################################################################
SQLField = Field
SQLTable = Table
SQLXorable = Expression
SQLQuery = Query
SQLSet = Set
SQLRows = Rows
SQLStorage = Row
SQLDB = DAL
GQLDB = DAL
DAL.Field = Field # was necessary in gluon/globals.py session.connect
DAL.Table = Table # was necessary in gluon/globals.py session.connect
################################################################################
# Geodal utils
################################################################################
def geoPoint(x,y):
return "POINT (%f %f)" % (x,y)
def geoLine(*line):
return "LINESTRING (%s)" % ','.join("%f %f" % item for item in line)
def geoPolygon(*line):
return "POLYGON ((%s))" % ','.join("%f %f" % item for item in line)
################################################################################
# run tests
################################################################################
if __name__ == '__main__':
import doctest
doctest.testmod()
|
turtleloveshoes/kitsune
|
refs/heads/master
|
kitsune/dashboards/cron.py
|
16
|
from datetime import date
from django.conf import settings
from django.db import connection
import cronjobs
from kitsune.dashboards.models import (
PERIODS, WikiDocumentVisits, WikiMetric, L10N_TOP20_CODE, L10N_TOP100_CODE, L10N_ALL_CODE,
L10N_ACTIVE_CONTRIBUTORS_CODE)
from kitsune.dashboards.readouts import l10n_overview_rows
from kitsune.products.models import Product
from kitsune.sumo.redis_utils import redis_client
from kitsune.wiki.models import Document
from kitsune.wiki.utils import num_active_contributors
@cronjobs.register
def reload_wiki_traffic_stats():
if settings.STAGE:
return
for period, _ in PERIODS:
WikiDocumentVisits.reload_period_from_analytics(
period, verbose=settings.DEBUG)
@cronjobs.register
def update_l10n_coverage_metrics():
"""Calculate and store the l10n metrics for each locale/product.
The metrics are:
* Percent localized of top 20 articles
* Percent localized of all articles
"""
today = date.today()
# Loop through all locales.
for locale in settings.SUMO_LANGUAGES:
# Skip en-US, it is always 100% localized.
if locale == settings.WIKI_DEFAULT_LANGUAGE:
continue
# Loop through all enabled products, including None (really All).
for product in [None] + list(Product.objects.filter(visible=True)):
# (Ab)use the l10n_overview_rows helper from the readouts.
rows = l10n_overview_rows(locale=locale, product=product)
# % of top 20 articles
top20 = rows['top-20']
try:
percent = 100.0 * float(top20['numerator']) / top20['denominator']
except ZeroDivisionError:
percent = 0.0
WikiMetric.objects.create(
code=L10N_TOP20_CODE,
locale=locale,
product=product,
date=today,
value=percent)
# % of top 100 articles
top100 = rows['top-100']
try:
percent = 100.0 * float(top100['numerator']) / top100['denominator']
except ZeroDivisionError:
percent = 0.0
WikiMetric.objects.create(
code=L10N_TOP100_CODE,
locale=locale,
product=product,
date=today,
value=percent)
# % of all articles
all_ = rows['all']
try:
percent = 100 * float(all_['numerator']) / all_['denominator']
except ZeroDivisionError:
percent = 0.0
WikiMetric.objects.create(
code=L10N_ALL_CODE,
locale=locale,
product=product,
date=today,
value=percent)
@cronjobs.register
def update_l10n_contributor_metrics(day=None):
"""Update the number of active contributors for each locale/product.
An active contributor is defined as a user that created or reviewed a
revision in the previous calendar month.
"""
if day is None:
day = date.today()
first_of_month = date(day.year, day.month, 1)
if day.month == 1:
previous_first_of_month = date(day.year - 1, 12, 1)
else:
previous_first_of_month = date(day.year, day.month - 1, 1)
# Loop through all locales.
for locale in settings.SUMO_LANGUAGES:
# Loop through all enabled products, including None (really All).
for product in [None] + list(Product.objects.filter(visible=True)):
num = num_active_contributors(
from_date=previous_first_of_month,
to_date=first_of_month,
locale=locale,
product=product)
WikiMetric.objects.create(
code=L10N_ACTIVE_CONTRIBUTORS_CODE,
locale=locale,
product=product,
date=previous_first_of_month,
value=num)
def _get_old_unhelpful():
"""
Gets the data from 2 weeks ago and formats it as output so that we can
get a percent change.
"""
old_formatted = {}
cursor = connection.cursor()
cursor.execute(
"""SELECT doc_id, yes, no
FROM
(SELECT wiki_revision.document_id as doc_id,
SUM(limitedvotes.helpful) as yes,
SUM(NOT(limitedvotes.helpful)) as no
FROM
(SELECT * FROM wiki_helpfulvote
WHERE created <= DATE_SUB(CURDATE(), INTERVAL 1 WEEK)
AND created >= DATE_SUB(DATE_SUB(CURDATE(),
INTERVAL 1 WEEK), INTERVAL 1 WEEK)
) as limitedvotes
INNER JOIN wiki_revision ON
limitedvotes.revision_id=wiki_revision.id
INNER JOIN wiki_document ON
wiki_document.id=wiki_revision.document_id
WHERE wiki_document.locale="en-US"
GROUP BY doc_id
HAVING no > yes
) as calculated""")
old_data = cursor.fetchall()
for data in old_data:
doc_id = data[0]
yes = float(data[1])
no = float(data[2])
total = yes + no
if total == 0:
continue
old_formatted[doc_id] = {'total': total,
'percentage': yes / total}
return old_formatted
def _get_current_unhelpful(old_formatted):
"""Gets the data for the past week and formats it as return value."""
final = {}
cursor = connection.cursor()
cursor.execute(
"""SELECT doc_id, yes, no
FROM
(SELECT wiki_revision.document_id as doc_id,
SUM(limitedvotes.helpful) as yes,
SUM(NOT(limitedvotes.helpful)) as no
FROM
(SELECT * FROM wiki_helpfulvote
WHERE created >= DATE_SUB(CURDATE(), INTERVAL 1 WEEK)
) as limitedvotes
INNER JOIN wiki_revision ON
limitedvotes.revision_id=wiki_revision.id
INNER JOIN wiki_document ON
wiki_document.id=wiki_revision.document_id
WHERE wiki_document.locale="en-US"
GROUP BY doc_id
HAVING no > yes
) as calculated""")
current_data = cursor.fetchall()
for data in current_data:
doc_id = data[0]
yes = float(data[1])
no = float(data[2])
total = yes + no
if total == 0:
continue
percentage = yes / total
if doc_id in old_formatted:
final[doc_id] = {
'total': total,
'currperc': percentage,
'diffperc': percentage - old_formatted[doc_id]['percentage']
}
else:
final[doc_id] = {
'total': total,
'currperc': percentage,
'diffperc': 0.0
}
return final
@cronjobs.register
def cache_most_unhelpful_kb_articles():
"""Calculate and save the most unhelpful KB articles in the past month."""
REDIS_KEY = settings.HELPFULVOTES_UNHELPFUL_KEY
old_formatted = _get_old_unhelpful()
final = _get_current_unhelpful(old_formatted)
if final == {}:
return
def _mean(vals):
"""Argument: List of floats"""
if len(vals) == 0:
return None
return sum(vals) / len(vals)
def _bayes_avg(C, m, R, v):
# Bayesian Average
# C = mean vote, v = number of votes,
# R = mean rating, m = minimum votes to list in topranked
return (C * m + R * v) / (m + v)
mean_perc = _mean([float(final[key]['currperc']) for key in final.keys()])
mean_total = _mean([float(final[key]['total']) for key in final.keys()])
# TODO: Make this into namedtuples
sorted_final = [(key,
final[key]['total'],
final[key]['currperc'],
final[key]['diffperc'],
_bayes_avg(mean_perc, mean_total,
final[key]['currperc'],
final[key]['total']))
for key in final.keys()]
sorted_final.sort(key=lambda entry: entry[4]) # Sort by Bayesian Avg
redis = redis_client('helpfulvotes')
redis.delete(REDIS_KEY)
max_total = max([b[1] for b in sorted_final])
for entry in sorted_final:
doc = Document.objects.get(pk=entry[0])
redis.rpush(REDIS_KEY, (u'%s::%s::%s::%s::%s::%s::%s' %
(entry[0], # Document ID
entry[1], # Total Votes
entry[2], # Current Percentage
entry[3], # Difference in Percentage
1 - (entry[1] / max_total), # Graph Color
doc.slug, # Document slug
doc.title))) # Document title
|
lah7/openrazer
|
refs/heads/master
|
daemon/openrazer_daemon/dbus_services/dbus_methods/mamba.py
|
3
|
import math
import struct
from openrazer_daemon.dbus_services import endpoint
@endpoint('razer.device.power', 'getBattery', out_sig='d')
def get_battery(self):
"""
Get mouse's battery level
"""
self.logger.debug("DBus call get_battery")
driver_path = self.get_driver_path('charge_level')
with open(driver_path, 'r') as driver_file:
battery_255 = float(driver_file.read().strip())
if battery_255 < 0:
return -1.0
battery_100 = (battery_255 / 255) * 100
return battery_100
@endpoint('razer.device.power', 'isCharging', out_sig='b')
def is_charging(self):
"""
Get charging status
"""
self.logger.debug("DBus call is_charging")
driver_path = self.get_driver_path('charge_status')
with open(driver_path, 'r') as driver_file:
return bool(int(driver_file.read().strip()))
@endpoint('razer.device.power', 'setIdleTime', in_sig='q')
def set_idle_time(self, idle_time):
"""
Set the idle time of the mouse in seconds
:param idle_time: Idle time in seconds (unsigned short)
:type idle_time: int
"""
self.logger.debug("DBus call set_idle_time")
driver_path = self.get_driver_path('device_idle_time')
with open(driver_path, 'w') as driver_file:
driver_file.write(str(idle_time))
@endpoint('razer.device.power', 'getIdleTime', out_sig='q')
def get_idle_time(self):
"""
Get the idle time of the mouse in seconds
:return: Idle time in seconds (unsigned short)
:rtype: int
"""
self.logger.debug("DBus call get_idle_time")
driver_path = self.get_driver_path('device_idle_time')
with open(driver_path, 'r') as driver_file:
result = driver_file.read()
result = int(result.strip())
return result
@endpoint('razer.device.power', 'setLowBatteryThreshold', in_sig='y')
def set_low_battery_threshold(self, threshold):
"""
Set the low battery threshold as a percentage
:param threshold: Battery threshold as a percentage
:type threshold: int
"""
self.logger.debug("DBus call set_low_battery_threshold")
driver_path = self.get_driver_path('charge_low_threshold')
threshold = math.floor((threshold / 100) * 255)
with open(driver_path, 'w') as driver_file:
driver_file.write(str(threshold))
@endpoint('razer.device.power', 'getLowBatteryThreshold', out_sig='y')
def get_low_battery_threshold(self):
"""
Get the low battery threshold as a percentage
:return: Battery threshold as a percentage
:rtype: int
"""
self.logger.debug("DBus call get_low_battery_threshold")
driver_path = self.get_driver_path('charge_low_threshold')
with open(driver_path, 'r') as driver_file:
result = driver_file.read()
result = int(result.strip())
return round((result / 255) * 100)
@endpoint('razer.device.lighting.power', 'setChargeEffect', in_sig='y')
def set_charge_effect(self, charge_effect):
"""
Set the charging effect.
If 0x00 then it will use the current mouse's effect
If 0x01 it will use the charge colour
:param charge_effect: Charge effect
:type charge_effect: int
:return:
"""
self.logger.debug("DBus call set_charge_effect")
driver_path = self.get_driver_path('charge_effect')
with open(driver_path, 'wb') as driver_file:
driver_file.write(bytes([charge_effect]))
@endpoint('razer.device.lighting.power', 'setChargeColour', in_sig='yyy')
def set_charge_colour(self, red, green, blue):
"""
Set the charge colour
:param red: Red component
:type red: int
:param green: Green component
:type green: int
:param blue: Blue component
:type blue: int
"""
self.logger.debug("DBus call set_charge_colour")
driver_path = self.get_driver_path('charge_colour')
payload = bytes([red, green, blue])
with open(driver_path, 'wb') as driver_file:
driver_file.write(payload)
@endpoint('razer.device.dpi', 'setDPI', in_sig='qq')
def set_dpi_xy(self, dpi_x, dpi_y):
"""
Set the DPI on the mouse, Takes in 4 bytes big-endian
:param dpi_x: X DPI
:type dpi_x: int
:param dpi_y: Y DPI
:type dpi_x: int
"""
self.logger.debug("DBus call set_dpi_xy")
driver_path = self.get_driver_path('dpi')
if self._testing:
with open(driver_path, 'w') as driver_file:
if dpi_y == -1:
driver_file.write("{}".format(dpi_x))
else:
driver_file.write("{}:{}".format(dpi_x, dpi_y))
return
# If the application requests just one value to be written
if dpi_y == -1:
dpi_bytes = struct.pack('>H', dpi_x)
else:
dpi_bytes = struct.pack('>HH', dpi_x, dpi_y)
self.dpi[0] = dpi_x
self.dpi[1] = dpi_y
self.set_persistence(None, "dpi_x", dpi_x)
self.set_persistence(None, "dpi_y", dpi_y)
# constrain DPI to maximum
if hasattr(self, 'DPI_MAX'):
if self.dpi[0] > self.DPI_MAX:
self.dpi[0] = self.DPI_MAX
if self.dpi[1] > self.DPI_MAX:
self.dpi[1] = self.DPI_MAX
with open(driver_path, 'wb') as driver_file:
driver_file.write(dpi_bytes)
@endpoint('razer.device.dpi', 'getDPI', out_sig='ai')
def get_dpi_xy(self):
"""
get the DPI on the mouse
:return: List of X, Y DPI
:rtype: list of int
"""
self.logger.debug("DBus call get_dpi_xy")
driver_path = self.get_driver_path('dpi')
# try retrieving DPI from the hardware.
# if we can't (e.g. because the mouse has been disconnected)
# return the value in local storage.
try:
with open(driver_path, 'r') as driver_file:
result = driver_file.read()
dpi = [int(dpi) for dpi in result.strip().split(':')]
except FileNotFoundError:
return self.dpi
return dpi
@endpoint('razer.device.dpi', 'setDPIStages', in_sig='ya(qq)')
def set_dpi_stages(self, active_stage, dpi_stages):
"""
Set the DPI on the mouse, Takes in pairs of 2 bytes big-endian
:param active_stage: DPI stage to enable
:param dpi_stages: pairs of dpi X and dpi Y for each stage
:type dpi_stages: list of (int, int)
"""
self.logger.debug("DBus call set_dpi_stages")
driver_path = self.get_driver_path('dpi_stages')
dpi_bytes = struct.pack('B', active_stage)
for dpi_x, dpi_y in dpi_stages:
dpi_bytes += struct.pack('>HH', dpi_x, dpi_y)
with open(driver_path, 'wb') as driver_file:
driver_file.write(dpi_bytes)
@endpoint('razer.device.dpi', 'getDPIStages', out_sig='(ya(qq))')
def get_dpi_stages(self):
"""
get the DPI stages on the mouse
:return: List of X, Y DPI
:rtype: (int, list of (int, int))
"""
self.logger.debug("DBus call get_dpi_stages")
driver_path = self.get_driver_path('dpi_stages')
dpi_stages = []
with open(driver_path, 'rb') as driver_file:
result = driver_file.read()
(active_stage,) = struct.unpack('B', result[:1])
result = result[1:]
while len(result) >= 4:
(dpi_x, dpi_y) = struct.unpack('>HH', result[:4])
dpi_stages.append((dpi_x, dpi_y))
result = result[4:]
return (active_stage, dpi_stages)
@endpoint('razer.device.dpi', 'maxDPI', out_sig='i')
def max_dpi(self):
self.logger.debug("DBus call max_dpi")
if hasattr(self, 'DPI_MAX'):
return self.DPI_MAX
else:
return 500
@endpoint('razer.device.dpi', 'availableDPI', out_sig='ai')
def available_dpi(self):
self.logger.debug("DBus call available_dpi")
if hasattr(self, 'AVAILABLE_DPI'):
return self.AVAILABLE_DPI
return []
@endpoint('razer.device.misc', 'setPollRate', in_sig='q')
def set_poll_rate(self, rate):
"""
Set the DPI on the mouse, Takes in 4 bytes big-endian
:param rate: Poll rate
:type rate: int
"""
self.logger.debug("DBus call set_poll_rate")
if rate in (1000, 500, 125):
driver_path = self.get_driver_path('poll_rate')
# remember poll rate
self.poll_rate = rate
with open(driver_path, 'w') as driver_file:
driver_file.write(str(rate))
else:
self.logger.error("Poll rate %d is invalid", rate)
@endpoint('razer.device.misc', 'getPollRate', out_sig='i')
def get_poll_rate(self):
"""
Get the polling rate from the device
:return: Poll rate
:rtype: int
"""
self.logger.debug("DBus call get_poll_rate")
return int(self.poll_rate)
|
shanemcd/ansible
|
refs/heads/devel
|
test/units/modules/cloud/openstack/test_os_server.py
|
80
|
import collections
import inspect
import mock
import pytest
import yaml
from ansible.module_utils.six import string_types
from ansible.modules.cloud.openstack import os_server
class AnsibleFail(Exception):
pass
class AnsibleExit(Exception):
pass
def params_from_doc(func):
'''This function extracts the docstring from the specified function,
parses it as a YAML document, and returns parameters for the os_server
module.'''
doc = inspect.getdoc(func)
cfg = yaml.load(doc)
for task in cfg:
for module, params in task.items():
for k, v in params.items():
if k in ['nics'] and isinstance(v, string_types):
params[k] = [v]
task[module] = collections.defaultdict(str,
params)
return cfg[0]['os_server']
class FakeCloud (object):
ports = [
{'name': 'port1', 'id': '1234'},
{'name': 'port2', 'id': '4321'},
]
networks = [
{'name': 'network1', 'id': '5678'},
{'name': 'network2', 'id': '8765'},
]
images = [
{'name': 'cirros', 'id': '1'},
{'name': 'fedora', 'id': '2'},
]
flavors = [
{'name': 'm1.small', 'id': '1', 'flavor_ram': 1024},
{'name': 'm1.tiny', 'id': '2', 'flavor_ram': 512},
]
def _find(self, source, name):
for item in source:
if item['name'] == name or item['id'] == name:
return item
def get_image_id(self, name, exclude=None):
image = self._find(self.images, name)
if image:
return image['id']
def get_flavor(self, name):
return self._find(self.flavors, name)
def get_flavor_by_ram(self, ram, include=None):
for flavor in self.flavors:
if flavor['ram'] >= ram and (include is None or include in
flavor['name']):
return flavor
def get_port(self, name):
return self._find(self.ports, name)
def get_network(self, name):
return self._find(self.networks, name)
create_server = mock.MagicMock()
class TestNetworkArgs(object):
'''This class exercises the _network_args function of the
os_server module. For each test, we parse the YAML document
contained in the docstring to retrieve the module parameters for the
test.'''
def setup_method(self, method):
self.cloud = FakeCloud()
self.module = mock.MagicMock()
self.module.params = params_from_doc(method)
def test_nics_string_net_id(self):
'''
- os_server:
nics: net-id=1234
'''
args = os_server._network_args(self.module, self.cloud)
assert(args[0]['net-id'] == '1234')
def test_nics_string_net_id_list(self):
'''
- os_server:
nics: net-id=1234,net-id=4321
'''
args = os_server._network_args(self.module, self.cloud)
assert(args[0]['net-id'] == '1234')
assert(args[1]['net-id'] == '4321')
def test_nics_string_port_id(self):
'''
- os_server:
nics: port-id=1234
'''
args = os_server._network_args(self.module, self.cloud)
assert(args[0]['port-id'] == '1234')
def test_nics_string_net_name(self):
'''
- os_server:
nics: net-name=network1
'''
args = os_server._network_args(self.module, self.cloud)
assert(args[0]['net-id'] == '5678')
def test_nics_string_port_name(self):
'''
- os_server:
nics: port-name=port1
'''
args = os_server._network_args(self.module, self.cloud)
assert(args[0]['port-id'] == '1234')
def test_nics_structured_net_id(self):
'''
- os_server:
nics:
- net-id: '1234'
'''
args = os_server._network_args(self.module, self.cloud)
assert(args[0]['net-id'] == '1234')
def test_nics_structured_mixed(self):
'''
- os_server:
nics:
- net-id: '1234'
- port-name: port1
- 'net-name=network1,port-id=4321'
'''
args = os_server._network_args(self.module, self.cloud)
assert(args[0]['net-id'] == '1234')
assert(args[1]['port-id'] == '1234')
assert(args[2]['net-id'] == '5678')
assert(args[3]['port-id'] == '4321')
class TestCreateServer(object):
def setup_method(self, method):
self.cloud = FakeCloud()
self.module = mock.MagicMock()
self.module.params = params_from_doc(method)
self.module.fail_json.side_effect = AnsibleFail()
self.module.exit_json.side_effect = AnsibleExit()
self.meta = mock.MagicMock()
self.meta.gett_hostvars_from_server.return_value = {
'id': '1234'
}
os_server.meta = self.meta
def test_create_server(self):
'''
- os_server:
image: cirros
flavor: m1.tiny
nics:
- net-name: network1
meta:
- key: value
'''
with pytest.raises(AnsibleExit):
os_server._create_server(self.module, self.cloud)
assert(self.cloud.create_server.call_count == 1)
assert(self.cloud.create_server.call_args[1]['image'] == self.cloud.get_image_id('cirros'))
assert(self.cloud.create_server.call_args[1]['flavor'] == self.cloud.get_flavor('m1.tiny')['id'])
assert(self.cloud.create_server.call_args[1]['nics'][0]['net-id'] == self.cloud.get_network('network1')['id'])
def test_create_server_bad_flavor(self):
'''
- os_server:
image: cirros
flavor: missing_flavor
nics:
- net-name: network1
'''
with pytest.raises(AnsibleFail):
os_server._create_server(self.module, self.cloud)
assert('missing_flavor' in
self.module.fail_json.call_args[1]['msg'])
def test_create_server_bad_nic(self):
'''
- os_server:
image: cirros
flavor: m1.tiny
nics:
- net-name: missing_network
'''
with pytest.raises(AnsibleFail):
os_server._create_server(self.module, self.cloud)
assert('missing_network' in
self.module.fail_json.call_args[1]['msg'])
|
jethac/ATF
|
refs/heads/master
|
Test/FunctionalTests/FsmEditorTestScripts/ExpectedFailureNoSuccessMessage.py
|
10
|
#Copyright (c) 2014 Sony Computer Entertainment America LLC. See License.txt.
import sys
sys.path.append("./CommonTestScripts")
import Test
Test.Equal(1, 1)
Test.Equal(2, 2)
#Intentionally commented, we want this script to fail
#print Test.SUCCESS
|
jonwille/Frogger
|
refs/heads/gh-pages
|
node_modules/node-gyp/gyp/pylib/gyp/easy_xml.py
|
1558
|
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
import os
def XmlToString(content, encoding='utf-8', pretty=False):
""" Writes the XML content to disk, touching the file only if it has changed.
Visual Studio files have a lot of pre-defined structures. This function makes
it easy to represent these structures as Python data structures, instead of
having to create a lot of function calls.
Each XML element of the content is represented as a list composed of:
1. The name of the element, a string,
2. The attributes of the element, a dictionary (optional), and
3+. The content of the element, if any. Strings are simple text nodes and
lists are child elements.
Example 1:
<test/>
becomes
['test']
Example 2:
<myelement a='value1' b='value2'>
<childtype>This is</childtype>
<childtype>it!</childtype>
</myelement>
becomes
['myelement', {'a':'value1', 'b':'value2'},
['childtype', 'This is'],
['childtype', 'it!'],
]
Args:
content: The structured content to be converted.
encoding: The encoding to report on the first XML line.
pretty: True if we want pretty printing with indents and new lines.
Returns:
The XML content as a string.
"""
# We create a huge list of all the elements of the file.
xml_parts = ['<?xml version="1.0" encoding="%s"?>' % encoding]
if pretty:
xml_parts.append('\n')
_ConstructContentList(xml_parts, content, pretty)
# Convert it to a string
return ''.join(xml_parts)
def _ConstructContentList(xml_parts, specification, pretty, level=0):
""" Appends the XML parts corresponding to the specification.
Args:
xml_parts: A list of XML parts to be appended to.
specification: The specification of the element. See EasyXml docs.
pretty: True if we want pretty printing with indents and new lines.
level: Indentation level.
"""
# The first item in a specification is the name of the element.
if pretty:
indentation = ' ' * level
new_line = '\n'
else:
indentation = ''
new_line = ''
name = specification[0]
if not isinstance(name, str):
raise Exception('The first item of an EasyXml specification should be '
'a string. Specification was ' + str(specification))
xml_parts.append(indentation + '<' + name)
# Optionally in second position is a dictionary of the attributes.
rest = specification[1:]
if rest and isinstance(rest[0], dict):
for at, val in sorted(rest[0].iteritems()):
xml_parts.append(' %s="%s"' % (at, _XmlEscape(val, attr=True)))
rest = rest[1:]
if rest:
xml_parts.append('>')
all_strings = reduce(lambda x, y: x and isinstance(y, str), rest, True)
multi_line = not all_strings
if multi_line and new_line:
xml_parts.append(new_line)
for child_spec in rest:
# If it's a string, append a text node.
# Otherwise recurse over that child definition
if isinstance(child_spec, str):
xml_parts.append(_XmlEscape(child_spec))
else:
_ConstructContentList(xml_parts, child_spec, pretty, level + 1)
if multi_line and indentation:
xml_parts.append(indentation)
xml_parts.append('</%s>%s' % (name, new_line))
else:
xml_parts.append('/>%s' % new_line)
def WriteXmlIfChanged(content, path, encoding='utf-8', pretty=False,
win32=False):
""" Writes the XML content to disk, touching the file only if it has changed.
Args:
content: The structured content to be written.
path: Location of the file.
encoding: The encoding to report on the first line of the XML file.
pretty: True if we want pretty printing with indents and new lines.
"""
xml_string = XmlToString(content, encoding, pretty)
if win32 and os.linesep != '\r\n':
xml_string = xml_string.replace('\n', '\r\n')
try:
xml_string = xml_string.encode(encoding)
except Exception:
xml_string = unicode(xml_string, 'latin-1').encode(encoding)
# Get the old content
try:
f = open(path, 'r')
existing = f.read()
f.close()
except:
existing = None
# It has changed, write it
if existing != xml_string:
f = open(path, 'w')
f.write(xml_string)
f.close()
_xml_escape_map = {
'"': '"',
"'": ''',
'<': '<',
'>': '>',
'&': '&',
'\n': '
',
'\r': '
',
}
_xml_escape_re = re.compile(
"(%s)" % "|".join(map(re.escape, _xml_escape_map.keys())))
def _XmlEscape(value, attr=False):
""" Escape a string for inclusion in XML."""
def replace(match):
m = match.string[match.start() : match.end()]
# don't replace single quotes in attrs
if attr and m == "'":
return m
return _xml_escape_map[m]
return _xml_escape_re.sub(replace, value)
|
sparkslabs/kamaelia_
|
refs/heads/master
|
Sketches/MPS/BugReports/FixTests/Kamaelia/Kamaelia/Visualisation/PhysicsGraph3D/TopologyViewer3DWithParams.py
|
3
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
"""\
===========================================================
Generic 3D Topology Viewer With more Parameters supports
===========================================================
Extend TopologyViewer3D by supporting additional parameters of "ADD" and "UPDATE" commands.
Example Usage
-------------
A simple console driven topology viewer::
Pipeline( ConsoleReader(),
lines_to_tokenlists(),
TopologyViewer3DWithParams(),
).run()
Then at runtime try typing these commands to change the topology in real time::
>>> DEL ALL
>>> ADD NODE 1 "1st node" (0,0,-10) teapot
>>> ADD NODE 2 "2nd node" randompos sphere image=../../../Docs/cat.gif
>>> ADD NODE 3 "3rd node" randompos - bgcolour=(255,255,0);bgcolour=(0,255,255)
>>> UPDATE NODE 1 name=1st;bgcolour=(0,255,0)
>>> UPDATE NODE 3 name=3rd;bgcolour=(255,0,0);fgcolour=(0,0,255);fontsize=100
>>> ADD NODE 1:1 "1st child node of the 1st node" " ( 0 , 0 , -10 ) " -
>>> ADD NODE 1:2 "2nd child node of the 1st node" randompos - "fontsize = 20"
>>> ADD LINK 1 2
>>> ADD LINK 3 2
>>> DEL LINK 1 2
>>> ADD LINK 1:1 1:2
>>> DEL NODE 1
How does it work?
-----------------
Extend TopologyViewer3D by supporting additional parameters of "ADD" and "UPDATE" commands.
The format of "ADD" commands:
[ "ADD", "NODE", <id>, <name>, <positionSpec>, <particle type>, <parameters> ]
The format of "UPDATE" commands:
[ "UPDATE", "NODE", <id>, <parameters> ]
The format of parameters: pa=pa_value;pb=pb_value
Add quotation if there are spaces within parameters.
Available parameters:
- bgcolour -- Colour of surfaces behind text label (default=(230,230,230)), only apply to label texture
- fgcolour -- Colour of the text label (default=(0,0,0), only apply to label texture
- sidecolour -- Colour of side planes (default=(200,200,244)), only apply to CuboidParticle3D
- bgcolourselected -- Background colour when the particle is selected (default=(0,0,0)
- bgcolourselected -- Frontground colour when the particle is selected (default=(244,244,244))
- sidecolourselected -- Side colour when the particle is selected (default=(0,0,100))
- margin -- Margin size in pixels (default=8)
- fontsize -- Font size for label text (default=50)
- pixelscaling -- Factor to convert pixels to units in 3d, ignored if size is specified (default=100)
- thickness -- Thickness of button widget, ignored if size is specified (default=0.3)
- image -- The uri of image, image texture instead of label texture is used if specified
See Kamaelia.PhysicsGraph3D.TopologyViewer3D.TopologyViewer3D for more information.
"""
import re
def paramStr2paramDict(string):
"""Transform a parameters string to a parameters dictionary."""
colourRegex = re.compile("^\( *(\d{1,3}) *, *(\d{1,3}) *, *(\d{1,3}) *\)$")
decimalRegex = re.compile('^\d*\.?\d*$')
dictionary = {}
string = string.strip().strip(';')
string_list = string.split(';')
for item in string_list:
result = item.split('=')
param = result[0].strip()
value = result[1].strip()
mColour = colourRegex.match(value)
if mColour: # If colour triple tuple
value = map(int, mColour.groups())
else:
mDecimal = decimalRegex.match(value)
if mDecimal: # If Decimal
if '.' in value:
value = float(value)
else:
value = int(value)
dictionary.update({param : value})
return dictionary
from TopologyViewer3D import TopologyViewer3D
class TopologyViewer3DWithParams(TopologyViewer3D):
"""\
TopologyViewer3DWithParams(...) -> new TopologyViewer3DWithParams component.
A component that takes incoming topology (change) data and displays it live
using pygame OpenGL. A simple physics model assists with visual layout. Particle
types, appearance and physics interactions can be customised.
It extends TopologyViewer3D by supporting additional parameters of "ADD" commands.
Keyword arguments (in order):
- screensize -- (width,height) of the display area (default = (800,600))
- fullscreen -- True to start up in fullscreen mode (default = False)
- caption -- Caption for the pygame window (default = "Topology Viewer")
- particleTypes -- dict("type" -> klass) mapping types of particle to classes used to render them (default = {"-":RenderingParticle})
- initialTopology -- (nodes,bonds) where bonds=list((src,dst)) starting state for the topology (default=([],[]))
- laws -- Physics laws to apply between particles (default = SimpleLaws(bondlength=100))
- simCyclesPerRedraw -- number of physics sim cycles to run between each redraw (default=1)
- border -- Minimum distance from edge of display area that new particles appear (default=100)
"""
def __init__(self, **argd):
"""x.__init__(...) initializes x; see x.__class__.__doc__ for signature"""
super(TopologyViewer3DWithParams, self).__init__(**argd)
def updateParticle(self, node_id, **params):
"""\
updateParticle(node_id, \*\*params) -> updates the given node's parameters/attributes if it exists
- node_id -- an id for an already existing node
- params -- the updated parameters/attributes dictionary of the particle, e.g. name, texture, colour and size
"""
for p in self.physics.particles:
if p.ID == node_id:
p.updateAttrs(**params)
p.needRedraw = True
return
def doCommand(self, msg):
"""\
Proceses a topology command tuple:
[ "ADD", "NODE", <id>, <name>, <positionSpec>, <particle type> ]
[ "DEL", "NODE", <id> ]
[ "ADD", "LINK", <id from>, <id to> ]
[ "DEL", "LINK", <id from>, <id to> ]
[ "DEL", "ALL" ]
[ "GET", "ALL" ]
"""
#print 'doCommand'
if len(msg) >= 2:
cmd = msg[0].upper(), msg[1].upper()
# Add default arguments when they are not provided
if cmd == ("ADD", "NODE"):
if len(msg) == 4:
msg += ['randompos', '-']
elif len(msg) == 5:
msg += ['-']
if cmd == ("ADD", "NODE") and (len(msg) == 6 or len(msg) == 7):
if len(msg) == 7 and msg[6].strip() != "":
params = paramStr2paramDict(msg[6])
else:
params = {}
if msg[2] in [p.ID for p in self.physics.particles]:
print "Node exists, please use a new node ID!"
else:
if self.particleTypes.has_key(msg[5]):
#print 'ADD NODE begin'
ptype = self.particleTypes[msg[5]]
ident = msg[2]
name = msg[3]
posSpec = msg[4]
pos = self._generatePos(posSpec)
particle = ptype(position = pos, ID=ident, name=name, **params)
particle.originaltype = msg[5]
#self.particles.append(particle)
#print self.particles[0]
self.addParticle(particle)
self.isNewNode = True
#print id(particle)
#print 'ADD NODE end'
elif cmd == ("DEL", "NODE") and len(msg) == 3:
ident = msg[2]
self.removeParticle(ident)
elif cmd == ("ADD", "LINK") and len(msg) == 4:
src = msg[2]
dst = msg[3]
self.makeBond(src, dst)
elif cmd == ("DEL", "LINK") and len(msg) == 4:
src = msg[2]
dst = msg[3]
self.breakBond(src, dst)
elif cmd == ("DEL", "ALL") and len(msg) == 2:
self.removeParticle(*self.physics.particleDict.keys())
self.currentLevel = 0
self.currentParentParticleID = ''
elif cmd == ("GET", "ALL") and len(msg) == 2:
topology = [("DEL","ALL")]
topology.extend(self.getTopology())
self.send( ("TOPOLOGY", topology), "outbox" )
elif cmd == ("UPDATE_NAME", "NODE") and len(msg) == 4:
node_id = msg[2]
new_name = msg[3]
self.updateParticleLabel(node_id, new_name)
self.send( ("UPDATE_NAME", "NODE", node_id, new_name), "outbox" )
elif cmd == ("GET_NAME", "NODE") and len(msg) == 3:
node_id = msg[2]
name = self.getParticleLabel(node_id)
self.send( ("GET_NAME", "NODE", node_id, name), "outbox" )
elif cmd == ("UPDATE", "NODE") and len(msg) == 4:
node_id = msg[2]
params = paramStr2paramDict(msg[3])
self.updateParticle(node_id, **params)
self.send( ("UPDATE", "NODE", node_id, msg[3]), "outbox" )
else:
print "Command Error: please check your command format!"
else:
print "Command Error: not enough parameters!"
__kamaelia_components__ = ( TopologyViewer3DWithParams, )
if __name__ == "__main__":
from Kamaelia.Util.DataSource import DataSource
from Kamaelia.Visualisation.PhysicsGraph.lines_to_tokenlists import lines_to_tokenlists
from Kamaelia.Util.Console import ConsoleEchoer,ConsoleReader
from Kamaelia.Chassis.Graphline import Graphline
# Data can be from both DataSource and console inputs
print "Please type the command you want to draw"
Graphline(
CONSOLEREADER = ConsoleReader(">>> "),
# DATASOURCE = DataSource(['ADD NODE 1Node 1Node randompos -', 'ADD NODE 2Node 2Node randompos -',
# 'ADD NODE 3Node 3Node randompos -', 'ADD NODE 4Node 4Node randompos -',
# 'ADD LINK 1Node 2Node','ADD LINK 2Node 3Node', 'ADD LINK 3Node 4Node',
# 'ADD LINK 4Node 1Node']),
DATASOURCE = DataSource(['ADD NODE 1Node 1Node randompos teapot image=../../../Docs/cat.gif',
'ADD NODE 2Node 2Node randompos - image=../../../Docs/cat.gif',
'ADD NODE 3Node 3Node randompos sphere image=../../../Docs/cat.gif',
'ADD NODE 4Node 4Node randompos - image=http://kamaelia.sourceforge.net/Kamaelia.gif',
'ADD NODE 5Node 5Node randompos sphere image=http://edit.kamaelia.org/Kamaelia.gif',
'ADD NODE 6Node 6Node randompos -',
'ADD NODE 7Node 7Node randompos sphere',
'ADD LINK 1Node 2Node',
'ADD LINK 1Node 3Node', 'ADD LINK 1Node 4Node',
'ADD LINK 1Node 5Node','ADD LINK 1Node 6Node', 'ADD LINK 1Node 7Node',
'ADD NODE 1Node:1Node 1Node:1Node randompos - image=../../../Docs/cat.gif',
'ADD NODE 1Node:2Node 1Node:2Node randompos -',
'ADD NODE 1Node:3Node 1Node:3Node randompos -',
'ADD NODE 1Node:4Node 1Node:4Node randompos -',
'ADD LINK 1Node:1Node 1Node:2Node', 'ADD LINK 1Node:2Node 1Node:3Node',
'ADD LINK 1Node:3Node 1Node:4Node', 'ADD LINK 1Node:4Node 1Node:1Node',
'ADD NODE 1Node:1Node:1Node 1Node:1Node:1Node randompos - image=../../../Docs/cat.gif',
'ADD NODE 1Node:1Node:2Node 1Node:1Node:2Node randompos -',
'ADD LINK 1Node:1Node:1Node 1Node:1Node:2Node',
'ADD NODE 5Node:1Node 5Node:1Node randompos sphere image=../../../Docs/cat.gif',
'ADD NODE 5Node:2Node 5Node:2Node randompos sphere',
'ADD LINK 5Node:1Node 5Node:2Node'
]),
TOKENS = lines_to_tokenlists(),
VIEWER = TopologyViewer3DWithParams(),
CONSOLEECHOER = ConsoleEchoer(),
linkages = {
("CONSOLEREADER","outbox") : ("TOKENS","inbox"),
("DATASOURCE","outbox") : ("TOKENS","inbox"),
("TOKENS","outbox") : ("VIEWER","inbox"),
("VIEWER","outbox") : ("CONSOLEECHOER","inbox"),
}
).run()
|
blrm/openshift-tools
|
refs/heads/stg
|
ansible/roles/lib_oa_openshift/action_plugins/conditional_set_fact.py
|
45
|
"""
Ansible action plugin to help with setting facts conditionally based on other facts.
"""
from ansible.plugins.action import ActionBase
DOCUMENTATION = '''
---
action_plugin: conditional_set_fact
short_description: This will set a fact if the value is defined
description:
- "To avoid constant set_fact & when conditions for each var we can use this"
author:
- Eric Wolinetz ewolinet@redhat.com
'''
EXAMPLES = '''
- name: Conditionally set fact
conditional_set_fact:
fact1: not_defined_variable
- name: Conditionally set fact
conditional_set_fact:
fact1: not_defined_variable
fact2: defined_variable
- name: Conditionally set fact falling back on default
conditional_set_fact:
fact1: not_defined_var | defined_variable
'''
# pylint: disable=too-few-public-methods
class ActionModule(ActionBase):
"""Action plugin to execute deprecated var checks."""
def run(self, tmp=None, task_vars=None):
result = super(ActionModule, self).run(tmp, task_vars)
result['changed'] = False
facts = self._task.args.get('facts', [])
var_list = self._task.args.get('vars', [])
local_facts = dict()
for param in var_list:
other_vars = var_list[param].replace(" ", "")
for other_var in other_vars.split('|'):
if other_var in facts:
local_facts[param] = facts[other_var]
break
if local_facts:
result['changed'] = True
result['ansible_facts'] = local_facts
return result
|
fitzgen/servo
|
refs/heads/master
|
tests/wpt/css-tests/tools/pytest/testing/code/test_source.py
|
171
|
# flake8: noqa
# disable flake check on this file because some constructs are strange
# or redundant on purpose and can't be disable on a line-by-line basis
import sys
import _pytest._code
import py
import pytest
from _pytest._code import Source
from _pytest._code.source import _ast
if _ast is not None:
astonly = pytest.mark.nothing
else:
astonly = pytest.mark.xfail("True", reason="only works with AST-compile")
failsonjython = pytest.mark.xfail("sys.platform.startswith('java')")
def test_source_str_function():
x = Source("3")
assert str(x) == "3"
x = Source(" 3")
assert str(x) == "3"
x = Source("""
3
""", rstrip=False)
assert str(x) == "\n3\n "
x = Source("""
3
""", rstrip=True)
assert str(x) == "\n3"
def test_unicode():
try:
unicode
except NameError:
return
x = Source(unicode("4"))
assert str(x) == "4"
co = _pytest._code.compile(unicode('u"\xc3\xa5"', 'utf8'), mode='eval')
val = eval(co)
assert isinstance(val, unicode)
def test_source_from_function():
source = _pytest._code.Source(test_source_str_function)
assert str(source).startswith('def test_source_str_function():')
def test_source_from_method():
class TestClass:
def test_method(self):
pass
source = _pytest._code.Source(TestClass().test_method)
assert source.lines == ["def test_method(self):",
" pass"]
def test_source_from_lines():
lines = ["a \n", "b\n", "c"]
source = _pytest._code.Source(lines)
assert source.lines == ['a ', 'b', 'c']
def test_source_from_inner_function():
def f():
pass
source = _pytest._code.Source(f, deindent=False)
assert str(source).startswith(' def f():')
source = _pytest._code.Source(f)
assert str(source).startswith('def f():')
def test_source_putaround_simple():
source = Source("raise ValueError")
source = source.putaround(
"try:", """\
except ValueError:
x = 42
else:
x = 23""")
assert str(source)=="""\
try:
raise ValueError
except ValueError:
x = 42
else:
x = 23"""
def test_source_putaround():
source = Source()
source = source.putaround("""
if 1:
x=1
""")
assert str(source).strip() == "if 1:\n x=1"
def test_source_strips():
source = Source("")
assert source == Source()
assert str(source) == ''
assert source.strip() == source
def test_source_strip_multiline():
source = Source()
source.lines = ["", " hello", " "]
source2 = source.strip()
assert source2.lines == [" hello"]
def test_syntaxerror_rerepresentation():
ex = pytest.raises(SyntaxError, _pytest._code.compile, 'xyz xyz')
assert ex.value.lineno == 1
assert ex.value.offset in (4,7) # XXX pypy/jython versus cpython?
assert ex.value.text.strip(), 'x x'
def test_isparseable():
assert Source("hello").isparseable()
assert Source("if 1:\n pass").isparseable()
assert Source(" \nif 1:\n pass").isparseable()
assert not Source("if 1:\n").isparseable()
assert not Source(" \nif 1:\npass").isparseable()
assert not Source(chr(0)).isparseable()
class TestAccesses:
source = Source("""\
def f(x):
pass
def g(x):
pass
""")
def test_getrange(self):
x = self.source[0:2]
assert x.isparseable()
assert len(x.lines) == 2
assert str(x) == "def f(x):\n pass"
def test_getline(self):
x = self.source[0]
assert x == "def f(x):"
def test_len(self):
assert len(self.source) == 4
def test_iter(self):
l = [x for x in self.source]
assert len(l) == 4
class TestSourceParsingAndCompiling:
source = Source("""\
def f(x):
assert (x ==
3 +
4)
""").strip()
def test_compile(self):
co = _pytest._code.compile("x=3")
d = {}
exec (co, d)
assert d['x'] == 3
def test_compile_and_getsource_simple(self):
co = _pytest._code.compile("x=3")
exec (co)
source = _pytest._code.Source(co)
assert str(source) == "x=3"
def test_compile_and_getsource_through_same_function(self):
def gensource(source):
return _pytest._code.compile(source)
co1 = gensource("""
def f():
raise KeyError()
""")
co2 = gensource("""
def f():
raise ValueError()
""")
source1 = py.std.inspect.getsource(co1)
assert 'KeyError' in source1
source2 = py.std.inspect.getsource(co2)
assert 'ValueError' in source2
def test_getstatement(self):
#print str(self.source)
ass = str(self.source[1:])
for i in range(1, 4):
#print "trying start in line %r" % self.source[i]
s = self.source.getstatement(i)
#x = s.deindent()
assert str(s) == ass
def test_getstatementrange_triple_quoted(self):
#print str(self.source)
source = Source("""hello('''
''')""")
s = source.getstatement(0)
assert s == str(source)
s = source.getstatement(1)
assert s == str(source)
@astonly
def test_getstatementrange_within_constructs(self):
source = Source("""\
try:
try:
raise ValueError
except SomeThing:
pass
finally:
42
""")
assert len(source) == 7
# check all lineno's that could occur in a traceback
#assert source.getstatementrange(0) == (0, 7)
#assert source.getstatementrange(1) == (1, 5)
assert source.getstatementrange(2) == (2, 3)
assert source.getstatementrange(3) == (3, 4)
assert source.getstatementrange(4) == (4, 5)
#assert source.getstatementrange(5) == (0, 7)
assert source.getstatementrange(6) == (6, 7)
def test_getstatementrange_bug(self):
source = Source("""\
try:
x = (
y +
z)
except:
pass
""")
assert len(source) == 6
assert source.getstatementrange(2) == (1, 4)
def test_getstatementrange_bug2(self):
source = Source("""\
assert (
33
==
[
X(3,
b=1, c=2
),
]
)
""")
assert len(source) == 9
assert source.getstatementrange(5) == (0, 9)
def test_getstatementrange_ast_issue58(self):
source = Source("""\
def test_some():
for a in [a for a in
CAUSE_ERROR]: pass
x = 3
""")
assert getstatement(2, source).lines == source.lines[2:3]
assert getstatement(3, source).lines == source.lines[3:4]
@pytest.mark.skipif("sys.version_info < (2,6)")
def test_getstatementrange_out_of_bounds_py3(self):
source = Source("if xxx:\n from .collections import something")
r = source.getstatementrange(1)
assert r == (1,2)
def test_getstatementrange_with_syntaxerror_issue7(self):
source = Source(":")
pytest.raises(SyntaxError, lambda: source.getstatementrange(0))
@pytest.mark.skipif("sys.version_info < (2,6)")
def test_compile_to_ast(self):
import ast
source = Source("x = 4")
mod = source.compile(flag=ast.PyCF_ONLY_AST)
assert isinstance(mod, ast.Module)
compile(mod, "<filename>", "exec")
def test_compile_and_getsource(self):
co = self.source.compile()
py.builtin.exec_(co, globals())
f(7)
excinfo = pytest.raises(AssertionError, "f(6)")
frame = excinfo.traceback[-1].frame
stmt = frame.code.fullsource.getstatement(frame.lineno)
#print "block", str(block)
assert str(stmt).strip().startswith('assert')
def test_compilefuncs_and_path_sanity(self):
def check(comp, name):
co = comp(self.source, name)
if not name:
expected = "codegen %s:%d>" %(mypath, mylineno+2+1)
else:
expected = "codegen %r %s:%d>" % (name, mypath, mylineno+2+1)
fn = co.co_filename
assert fn.endswith(expected)
mycode = _pytest._code.Code(self.test_compilefuncs_and_path_sanity)
mylineno = mycode.firstlineno
mypath = mycode.path
for comp in _pytest._code.compile, _pytest._code.Source.compile:
for name in '', None, 'my':
yield check, comp, name
def test_offsetless_synerr(self):
pytest.raises(SyntaxError, _pytest._code.compile, "lambda a,a: 0", mode='eval')
def test_getstartingblock_singleline():
class A:
def __init__(self, *args):
frame = sys._getframe(1)
self.source = _pytest._code.Frame(frame).statement
x = A('x', 'y')
l = [i for i in x.source.lines if i.strip()]
assert len(l) == 1
def test_getstartingblock_multiline():
class A:
def __init__(self, *args):
frame = sys._getframe(1)
self.source = _pytest._code.Frame(frame).statement
x = A('x',
'y' \
,
'z')
l = [i for i in x.source.lines if i.strip()]
assert len(l) == 4
def test_getline_finally():
def c(): pass
excinfo = pytest.raises(TypeError, """
teardown = None
try:
c(1)
finally:
if teardown:
teardown()
""")
source = excinfo.traceback[-1].statement
assert str(source).strip() == 'c(1)'
def test_getfuncsource_dynamic():
source = """
def f():
raise ValueError
def g(): pass
"""
co = _pytest._code.compile(source)
py.builtin.exec_(co, globals())
assert str(_pytest._code.Source(f)).strip() == 'def f():\n raise ValueError'
assert str(_pytest._code.Source(g)).strip() == 'def g(): pass'
def test_getfuncsource_with_multine_string():
def f():
c = '''while True:
pass
'''
assert str(_pytest._code.Source(f)).strip() == "def f():\n c = '''while True:\n pass\n'''"
def test_deindent():
from _pytest._code.source import deindent as deindent
assert deindent(['\tfoo', '\tbar', ]) == ['foo', 'bar']
def f():
c = '''while True:
pass
'''
import inspect
lines = deindent(inspect.getsource(f).splitlines())
assert lines == ["def f():", " c = '''while True:", " pass", "'''"]
source = """
def f():
def g():
pass
"""
lines = deindent(source.splitlines())
assert lines == ['', 'def f():', ' def g():', ' pass', ' ']
@pytest.mark.xfail("sys.version_info[:3] < (2,7,0) or "
"((3,0) <= sys.version_info[:2] < (3,2))")
def test_source_of_class_at_eof_without_newline(tmpdir):
# this test fails because the implicit inspect.getsource(A) below
# does not return the "x = 1" last line.
source = _pytest._code.Source('''
class A(object):
def method(self):
x = 1
''')
path = tmpdir.join("a.py")
path.write(source)
s2 = _pytest._code.Source(tmpdir.join("a.py").pyimport().A)
assert str(source).strip() == str(s2).strip()
if True:
def x():
pass
def test_getsource_fallback():
from _pytest._code.source import getsource
expected = """def x():
pass"""
src = getsource(x)
assert src == expected
def test_idem_compile_and_getsource():
from _pytest._code.source import getsource
expected = "def x(): pass"
co = _pytest._code.compile(expected)
src = getsource(co)
assert src == expected
def test_findsource_fallback():
from _pytest._code.source import findsource
src, lineno = findsource(x)
assert 'test_findsource_simple' in str(src)
assert src[lineno] == ' def x():'
def test_findsource():
from _pytest._code.source import findsource
co = _pytest._code.compile("""if 1:
def x():
pass
""")
src, lineno = findsource(co)
assert 'if 1:' in str(src)
d = {}
eval(co, d)
src, lineno = findsource(d['x'])
assert 'if 1:' in str(src)
assert src[lineno] == " def x():"
def test_getfslineno():
from _pytest._code import getfslineno
def f(x):
pass
fspath, lineno = getfslineno(f)
assert fspath.basename == "test_source.py"
assert lineno == _pytest._code.getrawcode(f).co_firstlineno - 1 # see findsource
class A(object):
pass
fspath, lineno = getfslineno(A)
_, A_lineno = py.std.inspect.findsource(A)
assert fspath.basename == "test_source.py"
assert lineno == A_lineno
assert getfslineno(3) == ("", -1)
class B:
pass
B.__name__ = "B2"
assert getfslineno(B)[1] == -1
def test_code_of_object_instance_with_call():
class A:
pass
pytest.raises(TypeError, lambda: _pytest._code.Source(A()))
class WithCall:
def __call__(self):
pass
code = _pytest._code.Code(WithCall())
assert 'pass' in str(code.source())
class Hello(object):
def __call__(self):
pass
pytest.raises(TypeError, lambda: _pytest._code.Code(Hello))
def getstatement(lineno, source):
from _pytest._code.source import getstatementrange_ast
source = _pytest._code.Source(source, deindent=False)
ast, start, end = getstatementrange_ast(lineno, source)
return source[start:end]
def test_oneline():
source = getstatement(0, "raise ValueError")
assert str(source) == "raise ValueError"
def test_comment_and_no_newline_at_end():
from _pytest._code.source import getstatementrange_ast
source = Source(['def test_basic_complex():',
' assert 1 == 2',
'# vim: filetype=pyopencl:fdm=marker'])
ast, start, end = getstatementrange_ast(1, source)
assert end == 2
def test_oneline_and_comment():
source = getstatement(0, "raise ValueError\n#hello")
assert str(source) == "raise ValueError"
@pytest.mark.xfail(hasattr(sys, "pypy_version_info"),
reason='does not work on pypy')
def test_comments():
source = '''def test():
"comment 1"
x = 1
# comment 2
# comment 3
assert False
"""
comment 4
"""
'''
for line in range(2,6):
assert str(getstatement(line, source)) == ' x = 1'
for line in range(6,10):
assert str(getstatement(line, source)) == ' assert False'
assert str(getstatement(10, source)) == '"""'
def test_comment_in_statement():
source = '''test(foo=1,
# comment 1
bar=2)
'''
for line in range(1,3):
assert str(getstatement(line, source)) == \
'test(foo=1,\n # comment 1\n bar=2)'
def test_single_line_else():
source = getstatement(1, "if False: 2\nelse: 3")
assert str(source) == "else: 3"
def test_single_line_finally():
source = getstatement(1, "try: 1\nfinally: 3")
assert str(source) == "finally: 3"
def test_issue55():
source = ('def round_trip(dinp):\n assert 1 == dinp\n'
'def test_rt():\n round_trip("""\n""")\n')
s = getstatement(3, source)
assert str(s) == ' round_trip("""\n""")'
def XXXtest_multiline():
source = getstatement(0, """\
raise ValueError(
23
)
x = 3
""")
assert str(source) == "raise ValueError(\n 23\n)"
class TestTry:
pytestmark = astonly
source = """\
try:
raise ValueError
except Something:
raise IndexError(1)
else:
raise KeyError()
"""
def test_body(self):
source = getstatement(1, self.source)
assert str(source) == " raise ValueError"
def test_except_line(self):
source = getstatement(2, self.source)
assert str(source) == "except Something:"
def test_except_body(self):
source = getstatement(3, self.source)
assert str(source) == " raise IndexError(1)"
def test_else(self):
source = getstatement(5, self.source)
assert str(source) == " raise KeyError()"
class TestTryFinally:
source = """\
try:
raise ValueError
finally:
raise IndexError(1)
"""
def test_body(self):
source = getstatement(1, self.source)
assert str(source) == " raise ValueError"
def test_finally(self):
source = getstatement(3, self.source)
assert str(source) == " raise IndexError(1)"
class TestIf:
pytestmark = astonly
source = """\
if 1:
y = 3
elif False:
y = 5
else:
y = 7
"""
def test_body(self):
source = getstatement(1, self.source)
assert str(source) == " y = 3"
def test_elif_clause(self):
source = getstatement(2, self.source)
assert str(source) == "elif False:"
def test_elif(self):
source = getstatement(3, self.source)
assert str(source) == " y = 5"
def test_else(self):
source = getstatement(5, self.source)
assert str(source) == " y = 7"
def test_semicolon():
s = """\
hello ; pytest.skip()
"""
source = getstatement(0, s)
assert str(source) == s.strip()
def test_def_online():
s = """\
def func(): raise ValueError(42)
def something():
pass
"""
source = getstatement(0, s)
assert str(source) == "def func(): raise ValueError(42)"
def XXX_test_expression_multiline():
source = """\
something
'''
'''"""
result = getstatement(1, source)
assert str(result) == "'''\n'''"
|
campagnola/acq4
|
refs/heads/develop
|
acq4/util/units.py
|
3
|
from __future__ import print_function
from acq4.pyqtgraph.units import *
|
titasakgm/brc-stock
|
refs/heads/master
|
openerp/addons/report_geraldo/lib/geraldo/site/newsite/site-geraldo/utils/decorators.py
|
9
|
"""TODO: These should be commented"""
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from django.core.urlresolvers import reverse
from django.conf import settings
def page(template=None, context=None, **decorator_args):
def _wrapper(fn):
def _innerWrapper(request, *args, **kw):
context_dict = decorator_args.copy()
g = fn(request, *args, **kw)
if issubclass(type(g), HttpResponse):
return g
if not hasattr(g, 'next'): #Is this a generator? Otherwise make it a tuple!
g = (g,)
for i in g:
if issubclass(type(i), HttpResponse):
return i
if type(i) == type(()):
context_dict[i[0]] = i[1]
else:
context_dict.update(i)
template_name = context_dict.get("template", template)
context_instance = context_dict.get("context", context)
if not context_instance:
context_instance = RequestContext(request, context_dict)
return render_to_response(template_name, context_dict, context_instance)
return _innerWrapper
return _wrapper
from google.appengine.api import users
class admin_required(object):
def __init__(self, func):
self.func = func
def __call__(self, request, *args, **kwargs):
if users.is_current_user_admin() or settings.LOCAL:
return self.func(request, *args, **kwargs)
else:
return HttpResponseRedirect(reverse('admin_user_required'))
|
haad/ansible
|
refs/heads/devel
|
lib/ansible/plugins/action/unarchive.py
|
2
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2013, Dylan Martin <dmartin@seattlecentral.edu>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.errors import AnsibleError, AnsibleAction, AnsibleActionFail, AnsibleActionSkip
from ansible.module_utils._text import to_text
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
TRANSFERS_FILES = True
def run(self, tmp=None, task_vars=None):
''' handler for unarchive operations '''
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
source = self._task.args.get('src', None)
dest = self._task.args.get('dest', None)
remote_src = boolean(self._task.args.get('remote_src', False), strict=False)
creates = self._task.args.get('creates', None)
decrypt = self._task.args.get('decrypt', True)
try:
# "copy" is deprecated in favor of "remote_src".
if 'copy' in self._task.args:
# They are mutually exclusive.
if 'remote_src' in self._task.args:
raise AnsibleActionFail("parameters are mutually exclusive: ('copy', 'remote_src')")
# We will take the information from copy and store it in
# the remote_src var to use later in this file.
self._task.args['remote_src'] = remote_src = not boolean(self._task.args.pop('copy'), strict=False)
if source is None or dest is None:
raise AnsibleActionFail("src (or content) and dest are required")
if creates:
# do not run the command if the line contains creates=filename
# and the filename already exists. This allows idempotence
# of command executions.
creates = self._remote_expand_user(creates)
if self._remote_file_exists(creates):
raise AnsibleActionSkip("skipped, since %s exists" % creates)
dest = self._remote_expand_user(dest) # CCTODO: Fix path for Windows hosts.
source = os.path.expanduser(source)
if not remote_src:
try:
source = self._loader.get_real_file(self._find_needle('files', source), decrypt=decrypt)
except AnsibleError as e:
raise AnsibleActionFail(to_text(e))
try:
remote_stat = self._execute_remote_stat(dest, all_vars=task_vars, follow=True)
except AnsibleError as e:
raise AnsibleActionFail(to_text(e))
if not remote_stat['exists'] or not remote_stat['isdir']:
raise AnsibleActionFail("dest '%s' must be an existing dir" % dest)
if not remote_src:
# transfer the file to a remote tmp location
tmp_src = self._connection._shell.join_path(self._connection._shell.tempdir, 'source')
self._transfer_file(source, tmp_src)
# handle diff mode client side
# handle check mode client side
if not remote_src:
# fix file permissions when the copy is done as a different user
self._fixup_perms2((self._connection._shell.tempdir, tmp_src))
# Build temporary module_args.
new_module_args = self._task.args.copy()
new_module_args.update(
dict(
src=tmp_src,
original_basename=os.path.basename(source),
),
)
else:
new_module_args = self._task.args.copy()
new_module_args.update(
dict(
original_basename=os.path.basename(source),
),
)
# remove action plugin only key
for key in ('decrypt',):
if key in new_module_args:
del new_module_args[key]
# execute the unarchive module now, with the updated args
result.update(self._execute_module(module_args=new_module_args, task_vars=task_vars))
except AnsibleAction as e:
result.update(e.result)
finally:
self._remove_tmp_path(self._connection._shell.tempdir)
return result
|
ar4s/django
|
refs/heads/master
|
tests/test_runner/test_discover_runner.py
|
4
|
from contextlib import contextmanager
import os
import sys
from unittest import expectedFailure, TestSuite, TextTestRunner, defaultTestLoader
from django.test import TestCase
from django.test.runner import DiscoverRunner
def expectedFailureIf(condition):
"""Marks a test as an expected failure if ``condition`` is met."""
if condition:
return expectedFailure
return lambda func: func
class DiscoverRunnerTest(TestCase):
def test_dotted_test_module(self):
count = DiscoverRunner().build_suite(
["test_discovery_sample.tests_sample"],
).countTestCases()
self.assertEqual(count, 2)
def test_dotted_test_class_vanilla_unittest(self):
count = DiscoverRunner().build_suite(
["test_discovery_sample.tests_sample.TestVanillaUnittest"],
).countTestCases()
self.assertEqual(count, 1)
def test_dotted_test_class_django_testcase(self):
count = DiscoverRunner().build_suite(
["test_discovery_sample.tests_sample.TestDjangoTestCase"],
).countTestCases()
self.assertEqual(count, 1)
def test_dotted_test_method_django_testcase(self):
count = DiscoverRunner().build_suite(
["test_discovery_sample.tests_sample.TestDjangoTestCase.test_sample"],
).countTestCases()
self.assertEqual(count, 1)
def test_pattern(self):
count = DiscoverRunner(
pattern="*_tests.py",
).build_suite(["test_discovery_sample"]).countTestCases()
self.assertEqual(count, 1)
def test_file_path(self):
@contextmanager
def change_cwd_to_tests():
"""Change CWD to tests directory (one level up from this file)"""
current_dir = os.path.abspath(os.path.dirname(__file__))
tests_dir = os.path.join(current_dir, '..')
old_cwd = os.getcwd()
os.chdir(tests_dir)
yield
os.chdir(old_cwd)
with change_cwd_to_tests():
count = DiscoverRunner().build_suite(
["test_discovery_sample/"],
).countTestCases()
self.assertEqual(count, 3)
def test_overrideable_test_suite(self):
self.assertEqual(DiscoverRunner().test_suite, TestSuite)
def test_overrideable_test_runner(self):
self.assertEqual(DiscoverRunner().test_runner, TextTestRunner)
def test_overrideable_test_loader(self):
self.assertEqual(DiscoverRunner().test_loader, defaultTestLoader)
|
rogegg/iOrg2
|
refs/heads/master
|
datareader/views.py
|
1
|
from django.shortcuts import render
from django.contrib.auth.models import Permission,User, Group
from django.http import HttpResponse
from .models import *
from django.contrib.auth.decorators import login_required
from django.db import models, migrations
from django.shortcuts import redirect
import gspread
import httplib2
from oauth2client.service_account import ServiceAccountCredentials
from datareader import models
import datareader
LOCK = False #Variable cerrojo
# Create your views here.
def index(request):
return render(request,'index.html')
@login_required()
def concept(request):
topic_list = []
#creamos los datos necesarios para la plantilla html
for i in range(0,len(Topic.objects.all())):
topic_list.append({
"name" : Topic.objects.all()[i].name,
"subtopics" : []
})
for j in range(0,len( SubTopic.objects.filter(topic=Topic.objects.all()[i]) )):
topic_list[i]["subtopics"].append({
"name" : SubTopic.objects.filter(topic=Topic.objects.all()[i])[j].name,
"concepts" : Concept.objects.filter(subtopic=SubTopic.objects.filter(topic=Topic.objects.all()[i])[j])
})
context = {
'topic_list' : topic_list
}
#return HttpResponse("<p>HEEEEEEEEEEEE</p>")
return render(request, 'datareader/concepts.html', context)
def populate_test(request):
concept_list = Concept.objects.all()
# a = Concept.objects.get(name="Primer concepto")
# c = Concept.objects.get(name="Primer concepto")
# concept_list = [a,c]
# context = {'object_list': a}
# context = {'object_list': concept_list}
populate = populate_concept_page()
# populate = Concept.objects.get(name="Primer concepto")
all_concepts = Concept.objects.all()
context = {'object_list': all_concepts}
# context = {'object_list': populate}
#return HttpResponse("<p>HEEEEEEEEEEEE</p>")
return render(request, 'datareader/populate_test.html', context)
@login_required()
def auto_evaluacion(request):
return render(request, 'autoevaluacion.html')
@login_required()
def admin_site(request):
return render(request, 'user/admin_site.html')
@login_required()
def update_concepts(request):
user = request.user
permission_len = len(Permission.objects.filter(user=user).filter(codename="add_concept"))
if permission_len>0:
global LOCK
if not LOCK:
LOCK = True
populate_concept_page()
LOCK = False
return render(request, 'user/admin_site.html',{"status_concepts":True})
else:
return redirect('index')
@login_required()
def update_questions_vf(request):
user = request.user
permission_len = len(Permission.objects.filter(user=user).filter(codename="add_concept"))
if permission_len>0:
global LOCK
if not LOCK:
LOCK = True
populate_questions("vf")
LOCK = False
return render(request, 'user/admin_site.html',{"status_questions_vf":True})
else:
return redirect('index')
@login_required()
def update_questions_opm(request):
user = request.user
permission_len = len(Permission.objects.filter(user=user).filter(codename="add_concept"))
if permission_len>0:
global LOCK
if not LOCK:
LOCK = True
populate_questions_opm()
LOCK = False
return render(request, 'user/admin_site.html',{"status_questions_opm":True})
else:
return redirect('index')
|
nacker/pythonProject
|
refs/heads/master
|
Django/test1/booktest/tests.py
|
24123
|
from django.test import TestCase
# Create your tests here.
|
chrz89/upb-son-editor-backend
|
refs/heads/master
|
src/son_editor/apis/schemaapi.py
|
1
|
import logging
from flask import Response
from flask import request
from flask_restplus import Resource, Namespace
from son_editor.impl import workspaceimpl
from son_editor.util import descriptorutil
from son_editor.util.constants import WORKSPACES
from son_editor.util.descriptorutil import SCHEMA_ID_VNF
from son_editor.util.requestutil import prepare_response
namespace = Namespace(WORKSPACES + "/<int:ws_id>/schema", description="Schema API")
logger = logging.getLogger(__name__)
@namespace.route("/<schema_id>")
class Schema(Resource):
"""
Single schema retrieval
"""
def get(self, ws_id, schema_id):
"""
Return the requested schema
:param ws_id:
:param schema_id:
:return:
"""
if schema_id == SCHEMA_ID_VNF:
schema_index = workspaceimpl.get_workspace(ws_id)['vnf_schema_index']
else:
schema_index = workspaceimpl.get_workspace(ws_id)['ns_schema_index']
return prepare_response(descriptorutil.get_schema(schema_index, schema_id))
@namespace.route("/")
class Schemas(Resource):
"""Get all schemas for this server"""
def get(self, ws_id):
"""
Returns a list of all schemas configured for this server
:param ws_id:
:return:
"""
return prepare_response(descriptorutil.get_schemas())
|
PeterDowdy/py-paradox-convert
|
refs/heads/master
|
tests/deserialization_tests.py
|
1
|
import unittest
import deserializer
__author__ = 'peter'
class DeserializationTests(unittest.TestCase):
def test_deserialize_single_key_value_pair(self):
input = ['name','=','value']
expected_output = [{ 'name': 'value' }]
output = deserializer.deserialize(input)
self.assertEquals(cmp(expected_output, output), 0)
def test_deserialize_single_key_multi_value(self):
input = ['name','=','{','first','second', '}']
expected_output = [{ 'name': ['first', 'second']}]
output = deserializer.deserialize(input)
self.assertEquals(cmp(expected_output, output), 0)
def test_deserialize_nested_key(self):
input = ['name','=','{','sub_name','=','derp','}']
expected_output = [{ 'name': [{'sub_name': 'derp'}]}]
output = deserializer.deserialize(input)
self.assertEquals(cmp(expected_output, output), 0)
def test_deserialize_array_of_kvps(self):
input = ['name one','=','value one','name two', '=', 'value two']
expected_output = [{'name one': 'value one'},{'name two':'value two'}]
output = deserializer.deserialize(input)
self.assertEquals(cmp(expected_output, output), 0)
def test_deserialize_nested_array(self):
input = ['name','=','{','sub_name','=','derp','sub_name_2','=','derp2','}']
expected_output = [{ 'name': [{'sub_name': 'derp'}, {'sub_name_2': 'derp2'}]}]
output = deserializer.deserialize(input)
self.assertEquals(cmp(expected_output, output), 0)
def test_deserialize_doubly_nested_key(self):
input = ['name','=','{','sub_name','=','derp','sub_name_2','=','{','more_nesting','=','a thing','}','}']
expected_output = [{ 'name': [{'sub_name': 'derp'}, {'sub_name_2': [{'more_nesting':'a thing'}]}]}]
output = deserializer.deserialize(input)
self.assertEquals(cmp(expected_output, output), 0)
|
DaniilLeksin/gc
|
refs/heads/master
|
wx/tools/XRCed/model.py
|
7
|
# Name: model.py
# Purpose: Model class and related
# Author: Roman Rolinsky <rolinsky@femagsoft.com>
# Created: 07.06.2007
# RCS-ID: $Id: model.py 65578 2010-09-21 07:39:45Z ROL $
import os,sys
from xml.dom import minidom
from globals import *
# Redefine writing to include encoding
class MyDocument(minidom.Document):
def __init__(self):
minidom.Document.__init__(self)
self.encoding = ''
def writexml(self, writer, indent="", addindent="", newl="", encoding=""):
if encoding: encdstr = 'encoding="%s"' % encoding
else: encdstr = ''
writer.write('<?xml version="1.0" %s?>\n' % encdstr)
for node in self.childNodes:
node.writexml(writer, indent, addindent, newl)
# Model object is used for data manipulation
class _Model:
def __init__(self):
self.dom = None
def init(self, dom=None):
self.external = []
self.allowExec = None
if self.dom: self.dom.unlink()
if not dom:
self.dom = MyDocument()
self.mainNode = self.dom.createElement('resource')
self.dom.appendChild(self.mainNode)
# Dummy node to be replaced by the node being tested
self.testElem = self.dom.createElement('dummy')
else:
dom.normalize()
self.dom = dom
self.testElem = self.dom.createElement('dummy')
self.mainNode = dom.documentElement
# Test element node is always first
self.mainNode.insertBefore(self.testElem, self.mainNode.firstChild)
def loadXML(self, path):
f = open(path)
dom = minidom.parse(f)
f.close()
self.init(dom)
# Set encoding global variable and default encoding
if dom.encoding:
wx.SetDefaultPyEncoding(dom.encoding.encode())
else:
dom.encoding = ''
def saveXML(self, path):
if self.dom.encoding:
import codecs
f = codecs.open(path, 'w', self.dom.encoding)
else:
f = open(path, 'wt')
# Make temporary copy for formatting it
domCopy = MyDocument()
mainNode = domCopy.appendChild(self.mainNode.cloneNode(True))
# Remove first child (testElem)
mainNode.removeChild(mainNode.firstChild).unlink()
self.indent(domCopy, mainNode)
domCopy.writexml(f, encoding = self.dom.encoding)
f.close()
domCopy.unlink()
def saveTestMemoryFile(self):
# Save in memory FS
memFile = MemoryFile(TEST_FILE)
encd = self.dom.encoding
if not encd: encd = None
try:
self.dom.writexml(memFile, encoding=encd)
except:
logger.exception('error writing temporary XML file')
wx.LogError('Error writing temporary XML file')
memFile.close() # write to wxMemoryFS
def indent(self, domCopy, node, indent = 0):
'''Indent node which must be a comment or an element node and children.'''
if indent != 0:
prevNode = node.previousSibling
if prevNode and prevNode.nodeType == prevNode.TEXT_NODE:
prevNode.data = '\n' + ' ' * indent
else:
text = domCopy.createTextNode('\n' + ' ' * indent)
node.parentNode.insertBefore(text, node)
# Indent element/comment children recurcively
if node.hasChildNodes():
lastIndented = None
for n in node.childNodes[:]:
if n.nodeType in [n.ELEMENT_NODE, n.COMMENT_NODE]:
self.indent(domCopy, n, indent + 2)
lastIndented = n
# Insert newline after last element/comment child
if lastIndented:
n = lastIndented.nextSibling
if n and n.nodeType == n.TEXT_NODE:
n.data = '\n' + ' ' * indent
else:
text = domCopy.createTextNode('\n' + ' ' * indent)
node.appendChild(text)
def createObjectNode(self, className):
node = self.dom.createElement('object')
node.setAttribute('class', className)
return node
def createRefNode(self, ref):
node = self.dom.createElement('object_ref')
node.setAttribute('ref', ref)
return node
def createCommentNode(self):
node = self.dom.createComment('')
return node
def createComponentNode(self, className):
node = self.dom.createElement('component')
node.setAttribute('class', className)
return node
def parseString(self, data):
return minidom.parseString(data).childNodes[0]
def setTestElem(self, elem):
oldTestElem = Model.testElem
self.mainNode.replaceChild(elem, oldTestElem)
self.testElem = elem
oldTestElem.unlink()
def addExternal(self, path):
f = open(path)
self.external.append(minidom.parse(f))
f.close()
def findResource(self, name, classname='', recursive=True):
found = DoFindResource(self.mainNode, name, classname, recursive)
if found: return found
# Try to look in external files
for dom in self.external:
found = DoFindResource(dom.documentElement, name, '', True)
if found: return found
wx.LogError('XRC resource "%s" not found!' % name)
Model = _Model()
class MemoryFile:
'''Memory file proxy for python-like file object.'''
def __init__(self, name):
self.name = name
self.buffer = ''
def write(self, data):
if Model.dom.encoding:
encoding = Model.dom.encoding
else:
encoding = wx.GetDefaultPyEncoding()
try:
self.buffer += data.encode(encoding)
except UnicodeEncodeError:
self.buffer += data.encode(encoding, 'xmlcharrefreplace')
def close(self):
wx.MemoryFSHandler.AddFile(self.name, self.buffer)
# Imitation of FindResource/DoFindResource from xmlres.cpp
def DoFindResource(parent, name, classname, recursive):
for n in parent.childNodes:
if n.nodeType == minidom.Node.ELEMENT_NODE and \
n.tagName in ['object', 'object_ref'] and \
n.getAttribute('name') == name:
cls = n.getAttribute('class')
if not classname or cls == classname: return n
if not cls and n.tagName == 'object_ref':
refName = n.getAttribute('ref')
if not refName: continue
refNode = FindResource(refName)
if refName and refNode.getAttribute('class') == classname:
return n
if recursive:
for n in parent.childNodes:
if n.nodeType == minidom.Node.ELEMENT_NODE and \
n.tagName in ['object', 'object_ref']:
found = DoFindResource(n, name, classname, True)
if found: return found
|
cparawhore/ProyectoSubastas
|
refs/heads/master
|
site-packages/django/contrib/gis/tests/geo3d/tests.py
|
62
|
from __future__ import unicode_literals
import os
import re
from unittest import skipUnless
from django.contrib.gis.gdal import HAS_GDAL
from django.contrib.gis.geos import HAS_GEOS
from django.contrib.gis.tests.utils import postgis
from django.test import TestCase
from django.utils._os import upath
if HAS_GEOS:
from django.contrib.gis.db.models import Union, Extent3D
from django.contrib.gis.geos import GEOSGeometry, LineString, Point, Polygon
from .models import (City3D, Interstate2D, Interstate3D, InterstateProj2D,
InterstateProj3D, Point2D, Point3D, MultiPoint3D, Polygon2D, Polygon3D)
if HAS_GDAL:
from django.contrib.gis.utils import LayerMapping, LayerMapError
data_path = os.path.realpath(os.path.join(os.path.dirname(upath(__file__)), '..', 'data'))
city_file = os.path.join(data_path, 'cities', 'cities.shp')
vrt_file = os.path.join(data_path, 'test_vrt', 'test_vrt.vrt')
# The coordinates of each city, with Z values corresponding to their
# altitude in meters.
city_data = (
('Houston', (-95.363151, 29.763374, 18)),
('Dallas', (-96.801611, 32.782057, 147)),
('Oklahoma City', (-97.521157, 34.464642, 380)),
('Wellington', (174.783117, -41.315268, 14)),
('Pueblo', (-104.609252, 38.255001, 1433)),
('Lawrence', (-95.235060, 38.971823, 251)),
('Chicago', (-87.650175, 41.850385, 181)),
('Victoria', (-123.305196, 48.462611, 15)),
)
# Reference mapping of city name to its altitude (Z value).
city_dict = dict((name, coords) for name, coords in city_data)
# 3D freeway data derived from the National Elevation Dataset:
# http://seamless.usgs.gov/products/9arc.php
interstate_data = (
('I-45',
'LINESTRING(-95.3708481 29.7765870 11.339,-95.3694580 29.7787980 4.536,-95.3690305 29.7797359 9.762,-95.3691886 29.7812450 12.448,-95.3696447 29.7850144 10.457,-95.3702511 29.7868518 9.418,-95.3706724 29.7881286 14.858,-95.3711632 29.7896157 15.386,-95.3714525 29.7936267 13.168,-95.3717848 29.7955007 15.104,-95.3717719 29.7969804 16.516,-95.3717305 29.7982117 13.923,-95.3717254 29.8000778 14.385,-95.3719875 29.8013539 15.160,-95.3720575 29.8026785 15.544,-95.3721321 29.8040912 14.975,-95.3722074 29.8050998 15.688,-95.3722779 29.8060430 16.099,-95.3733818 29.8076750 15.197,-95.3741563 29.8103686 17.268,-95.3749458 29.8129927 19.857,-95.3763564 29.8144557 15.435)',
(11.339, 4.536, 9.762, 12.448, 10.457, 9.418, 14.858,
15.386, 13.168, 15.104, 16.516, 13.923, 14.385, 15.16,
15.544, 14.975, 15.688, 16.099, 15.197, 17.268, 19.857,
15.435),
),
)
# Bounding box polygon for inner-loop of Houston (in projected coordinate
# system 32140), with elevation values from the National Elevation Dataset
# (see above).
bbox_data = (
'POLYGON((941527.97 4225693.20,962596.48 4226349.75,963152.57 4209023.95,942051.75 4208366.38,941527.97 4225693.20))',
(21.71, 13.21, 9.12, 16.40, 21.71)
)
@skipUnless(HAS_GEOS and HAS_GDAL and postgis, "Geos, GDAL and postgis are required.")
class Geo3DTest(TestCase):
"""
Only a subset of the PostGIS routines are 3D-enabled, and this TestCase
tries to test the features that can handle 3D and that are also
available within GeoDjango. For more information, see the PostGIS docs
on the routines that support 3D:
http://postgis.refractions.net/documentation/manual-1.4/ch08.html#PostGIS_3D_Functions
"""
def _load_interstate_data(self):
# Interstate (2D / 3D and Geographic/Projected variants)
for name, line, exp_z in interstate_data:
line_3d = GEOSGeometry(line, srid=4269)
line_2d = LineString([l[:2] for l in line_3d.coords], srid=4269)
# Creating a geographic and projected version of the
# interstate in both 2D and 3D.
Interstate3D.objects.create(name=name, line=line_3d)
InterstateProj3D.objects.create(name=name, line=line_3d)
Interstate2D.objects.create(name=name, line=line_2d)
InterstateProj2D.objects.create(name=name, line=line_2d)
def _load_city_data(self):
for name, pnt_data in city_data:
City3D.objects.create(name=name, point=Point(*pnt_data, srid=4326))
def _load_polygon_data(self):
bbox_wkt, bbox_z = bbox_data
bbox_2d = GEOSGeometry(bbox_wkt, srid=32140)
bbox_3d = Polygon(tuple((x, y, z) for (x, y), z in zip(bbox_2d[0].coords, bbox_z)), srid=32140)
Polygon2D.objects.create(name='2D BBox', poly=bbox_2d)
Polygon3D.objects.create(name='3D BBox', poly=bbox_3d)
def test_3d_hasz(self):
"""
Make sure data is 3D and has expected Z values -- shouldn't change
because of coordinate system.
"""
self._load_interstate_data()
for name, line, exp_z in interstate_data:
interstate = Interstate3D.objects.get(name=name)
interstate_proj = InterstateProj3D.objects.get(name=name)
for i in [interstate, interstate_proj]:
self.assertTrue(i.line.hasz)
self.assertEqual(exp_z, tuple(i.line.z))
self._load_city_data()
for name, pnt_data in city_data:
city = City3D.objects.get(name=name)
z = pnt_data[2]
self.assertTrue(city.point.hasz)
self.assertEqual(z, city.point.z)
def test_3d_polygons(self):
"""
Test the creation of polygon 3D models.
"""
self._load_polygon_data()
p3d = Polygon3D.objects.get(name='3D BBox')
self.assertTrue(p3d.poly.hasz)
self.assertIsInstance(p3d.poly, Polygon)
self.assertEqual(p3d.poly.srid, 32140)
def test_3d_layermapping(self):
"""
Testing LayerMapping on 3D models.
"""
point_mapping = {'point': 'POINT'}
mpoint_mapping = {'mpoint': 'MULTIPOINT'}
# The VRT is 3D, but should still be able to map sans the Z.
lm = LayerMapping(Point2D, vrt_file, point_mapping, transform=False)
lm.save()
self.assertEqual(3, Point2D.objects.count())
# The city shapefile is 2D, and won't be able to fill the coordinates
# in the 3D model -- thus, a LayerMapError is raised.
self.assertRaises(LayerMapError, LayerMapping,
Point3D, city_file, point_mapping, transform=False)
# 3D model should take 3D data just fine.
lm = LayerMapping(Point3D, vrt_file, point_mapping, transform=False)
lm.save()
self.assertEqual(3, Point3D.objects.count())
# Making sure LayerMapping.make_multi works right, by converting
# a Point25D into a MultiPoint25D.
lm = LayerMapping(MultiPoint3D, vrt_file, mpoint_mapping, transform=False)
lm.save()
self.assertEqual(3, MultiPoint3D.objects.count())
def test_kml(self):
"""
Test GeoQuerySet.kml() with Z values.
"""
self._load_city_data()
h = City3D.objects.kml(precision=6).get(name='Houston')
# KML should be 3D.
# `SELECT ST_AsKML(point, 6) FROM geo3d_city3d WHERE name = 'Houston';`
ref_kml_regex = re.compile(r'^<Point><coordinates>-95.363\d+,29.763\d+,18</coordinates></Point>$')
self.assertTrue(ref_kml_regex.match(h.kml))
def test_geojson(self):
"""
Test GeoQuerySet.geojson() with Z values.
"""
self._load_city_data()
h = City3D.objects.geojson(precision=6).get(name='Houston')
# GeoJSON should be 3D
# `SELECT ST_AsGeoJSON(point, 6) FROM geo3d_city3d WHERE name='Houston';`
ref_json_regex = re.compile(r'^{"type":"Point","coordinates":\[-95.363151,29.763374,18(\.0+)?\]}$')
self.assertTrue(ref_json_regex.match(h.geojson))
def test_union(self):
"""
Testing the Union aggregate of 3D models.
"""
# PostGIS query that returned the reference EWKT for this test:
# `SELECT ST_AsText(ST_Union(point)) FROM geo3d_city3d;`
self._load_city_data()
ref_ewkt = 'SRID=4326;MULTIPOINT(-123.305196 48.462611 15,-104.609252 38.255001 1433,-97.521157 34.464642 380,-96.801611 32.782057 147,-95.363151 29.763374 18,-95.23506 38.971823 251,-87.650175 41.850385 181,174.783117 -41.315268 14)'
ref_union = GEOSGeometry(ref_ewkt)
union = City3D.objects.aggregate(Union('point'))['point__union']
self.assertTrue(union.hasz)
self.assertEqual(ref_union, union)
def test_extent(self):
"""
Testing the Extent3D aggregate for 3D models.
"""
self._load_city_data()
# `SELECT ST_Extent3D(point) FROM geo3d_city3d;`
ref_extent3d = (-123.305196, -41.315268, 14, 174.783117, 48.462611, 1433)
extent1 = City3D.objects.aggregate(Extent3D('point'))['point__extent3d']
extent2 = City3D.objects.extent3d()
def check_extent3d(extent3d, tol=6):
for ref_val, ext_val in zip(ref_extent3d, extent3d):
self.assertAlmostEqual(ref_val, ext_val, tol)
for e3d in [extent1, extent2]:
check_extent3d(e3d)
def test_perimeter(self):
"""
Testing GeoQuerySet.perimeter() on 3D fields.
"""
self._load_polygon_data()
# Reference query for values below:
# `SELECT ST_Perimeter3D(poly), ST_Perimeter2D(poly) FROM geo3d_polygon3d;`
ref_perim_3d = 76859.2620451
ref_perim_2d = 76859.2577803
tol = 6
self.assertAlmostEqual(ref_perim_2d,
Polygon2D.objects.perimeter().get(name='2D BBox').perimeter.m,
tol)
self.assertAlmostEqual(ref_perim_3d,
Polygon3D.objects.perimeter().get(name='3D BBox').perimeter.m,
tol)
def test_length(self):
"""
Testing GeoQuerySet.length() on 3D fields.
"""
# ST_Length_Spheroid Z-aware, and thus does not need to use
# a separate function internally.
# `SELECT ST_Length_Spheroid(line, 'SPHEROID["GRS 1980",6378137,298.257222101]')
# FROM geo3d_interstate[2d|3d];`
self._load_interstate_data()
tol = 3
ref_length_2d = 4368.1721949481
ref_length_3d = 4368.62547052088
self.assertAlmostEqual(ref_length_2d,
Interstate2D.objects.length().get(name='I-45').length.m,
tol)
self.assertAlmostEqual(ref_length_3d,
Interstate3D.objects.length().get(name='I-45').length.m,
tol)
# Making sure `ST_Length3D` is used on for a projected
# and 3D model rather than `ST_Length`.
# `SELECT ST_Length(line) FROM geo3d_interstateproj2d;`
ref_length_2d = 4367.71564892392
# `SELECT ST_Length3D(line) FROM geo3d_interstateproj3d;`
ref_length_3d = 4368.16897234101
self.assertAlmostEqual(ref_length_2d,
InterstateProj2D.objects.length().get(name='I-45').length.m,
tol)
self.assertAlmostEqual(ref_length_3d,
InterstateProj3D.objects.length().get(name='I-45').length.m,
tol)
def test_scale(self):
"""
Testing GeoQuerySet.scale() on Z values.
"""
self._load_city_data()
# Mapping of City name to reference Z values.
zscales = (-3, 4, 23)
for zscale in zscales:
for city in City3D.objects.scale(1.0, 1.0, zscale):
self.assertEqual(city_dict[city.name][2] * zscale, city.scale.z)
def test_translate(self):
"""
Testing GeoQuerySet.translate() on Z values.
"""
self._load_city_data()
ztranslations = (5.23, 23, -17)
for ztrans in ztranslations:
for city in City3D.objects.translate(0, 0, ztrans):
self.assertEqual(city_dict[city.name][2] + ztrans, city.translate.z)
|
dewitt/appengine-unshorten
|
refs/heads/master
|
third_party/simplejson/tests/test_pass3.py
|
261
|
from unittest import TestCase
import simplejson as json
# from http://json.org/JSON_checker/test/pass3.json
JSON = r'''
{
"JSON Test Pattern pass3": {
"The outermost value": "must be an object or array.",
"In this test": "It is an object."
}
}
'''
class TestPass3(TestCase):
def test_parse(self):
# test in/out equivalence and parsing
res = json.loads(JSON)
out = json.dumps(res)
self.assertEquals(res, json.loads(out))
|
darkrho/scrapy-scrapy
|
refs/heads/master
|
scrapy/contrib/statsmailer.py
|
144
|
import warnings
from scrapy.exceptions import ScrapyDeprecationWarning
warnings.warn("Module `scrapy.contrib.statsmailer` is deprecated, "
"use `scrapy.extensions.statsmailer` instead",
ScrapyDeprecationWarning, stacklevel=2)
from scrapy.extensions.statsmailer import *
|
elainenaomi/sciwonc-dataflow-examples
|
refs/heads/master
|
dissertation2017/Experiment 1B/instances/9_1_workflow_full_10files_secondary_nocons_nosh_nors_noannot_with_proj_3s/statscpumemory_0/StatsCPUMemory.py
|
50
|
#!/usr/bin/env python
"""
This activity will calculate the average_cpu of CPU request in whole data.
These fields are optional and could be null.
"""
# It will connect to DataStoreClient
from sciwonc.dataflow.DataStoreClient import DataStoreClient
import ConfigDB_StatsCPUMemory_0
import math
# connector and config
client = DataStoreClient("mongodb", ConfigDB_StatsCPUMemory_0)
# according to config
data = client.getData() # return an array of docs (like a csv reader)
output = []
sum_cpu = 0
sum_memory = 0
sum_ratio = 0
total_valid_tasks = 0
total_tasks = 0
total_variance_cpu = 0
total_variance_memory = 0
total_variance_ratio = 0
if(data):
# processing
while True:
doc = data.next()
if doc is None:
break;
total_tasks += 1
if doc['CPU request'] and doc['memory request']:
sum_cpu = sum_cpu + float(doc['CPU request'])
sum_memory = sum_memory + float(doc['memory request'])
ratio = float(doc['CPU request'])/float(doc['memory request']) if float(doc['memory request']) > 0 else 0
sum_ratio = sum_ratio + ratio
total_valid_tasks += 1
# average
average_cpu = sum_cpu / total_valid_tasks if total_valid_tasks > 0 else None
average_memory = sum_memory / total_valid_tasks if total_valid_tasks > 0 else None
average_ratio = sum_ratio / total_valid_tasks if total_valid_tasks > 0 else None
# variance
if average_cpu and average_memory and average_ratio:
data = client.getData() # return an array of docs (like a csv reader)
# processing
while True:
doc = data.next()
if doc is None:
break;
if doc['CPU request'] and doc['memory request']:
total_variance_cpu = total_variance_cpu + (float(doc['CPU request']) - average_cpu) ** 2
total_variance_memory = total_variance_memory + (float(doc['memory request']) - average_memory) ** 2
ratio = float(doc['CPU request'])/float(doc['memory request']) if float(doc['memory request']) > 0 else 0
total_variance_ratio = total_variance_ratio + (ratio - average_ratio) ** 2
newline = {}
newline['sum cpu'] = sum_cpu
newline['sum variance cpu'] = total_variance_cpu
newline['average cpu'] = average_cpu if average_cpu > 0 else None
newline['standard deviation cpu'] = math.sqrt(total_variance_cpu/total_valid_tasks)
newline['variance cpu'] = total_variance_cpu/total_valid_tasks
newline['sum memory'] = sum_memory
newline['sum variance memory'] = total_variance_memory
newline['average memory'] = average_memory if average_memory > 0 else None
newline['standard deviation memory'] = math.sqrt(total_variance_memory/total_valid_tasks)
newline['variance memory'] = total_variance_memory/total_valid_tasks
newline['sum ratio'] = sum_ratio
newline['sum variance ratio'] = total_variance_ratio
newline['average ratio'] = average_ratio if average_ratio > 0 else None
newline['standard deviation ratio'] = math.sqrt(total_variance_ratio/total_valid_tasks)
newline['variance ratio'] = total_variance_ratio/total_valid_tasks
newline['total valid tasks'] = total_valid_tasks
newline['total tasks'] = total_tasks
output.append(newline)
# save
client.saveData(output)
|
h4ck3rm1k3/orca-sonar
|
refs/heads/master
|
src/orca/scripts/toolkits/Gecko/keymaps.py
|
1
|
# Orca
#
# Copyright 2010 Joanmarie Diggs, Mesar Hameed.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
""" A list of common keybindings and unbound keys
pulled out from script.py: __getLaptopBindings()
with the goal of being more readable and less monolithic.
"""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2010 Joanmarie Diggs, Mesar Hameed."
__license__ = "LGPL"
import orca.settings as settings
# Storing values
defaultModifierMask = settings.defaultModifierMask
ORCA_MODIFIER_MASK = settings.ORCA_MODIFIER_MASK
NO_MODIFIER_MASK = settings.NO_MODIFIER_MASK
ORCA_SHIFT_MODIFIER_MASK = settings.ORCA_SHIFT_MODIFIER_MASK
ORCA_CTRL_MODIFIER_MASK = settings.ORCA_CTRL_MODIFIER_MASK
CTRL_MODIFIER_MASK = settings.CTRL_MODIFIER_MASK
ALT_MODIFIER_MASK = settings.ALT_MODIFIER_MASK
SHIFT_MODIFIER_MASK = settings.SHIFT_MODIFIER_MASK
# KeyBindings that use the arrow keys for navigating HTML content.
arrowKeymap = (
("Right", defaultModifierMask, NO_MODIFIER_MASK, "goNextCharacterHandler"),
("Left", defaultModifierMask, NO_MODIFIER_MASK,
"goPreviousCharacterHandler"),
("Right", defaultModifierMask, CTRL_MODIFIER_MASK, "goNextWordHandler"),
("Left", defaultModifierMask, CTRL_MODIFIER_MASK, "goPreviousWordHandler"),
("Up", defaultModifierMask, NO_MODIFIER_MASK, "goPreviousLineHandler"),
("Down", defaultModifierMask, NO_MODIFIER_MASK, "goNextLineHandler"),
("Down", defaultModifierMask, ALT_MODIFIER_MASK, "expandComboBoxHandler"),
("Home", defaultModifierMask, CTRL_MODIFIER_MASK, "goTopOfFileHandler"),
("End", defaultModifierMask, CTRL_MODIFIER_MASK, "goBottomOfFileHandler"),
("Home", defaultModifierMask, NO_MODIFIER_MASK, "goBeginningOfLineHandler"),
("End", defaultModifierMask, NO_MODIFIER_MASK, "goEndOfLineHandler"),
)
commonKeymap = (
# keybindings to provide chat room message history.
("F1", defaultModifierMask, ORCA_MODIFIER_MASK, "reviewLiveAnnouncement"),
("F2", defaultModifierMask, ORCA_MODIFIER_MASK, "reviewLiveAnnouncement"),
("F3", defaultModifierMask, ORCA_MODIFIER_MASK, "reviewLiveAnnouncement"),
("F4", defaultModifierMask, ORCA_MODIFIER_MASK, "reviewLiveAnnouncement"),
("F5", defaultModifierMask, ORCA_MODIFIER_MASK, "reviewLiveAnnouncement"),
("F6", defaultModifierMask, ORCA_MODIFIER_MASK, "reviewLiveAnnouncement"),
("F7", defaultModifierMask, ORCA_MODIFIER_MASK, "reviewLiveAnnouncement"),
("F8", defaultModifierMask, ORCA_MODIFIER_MASK, "reviewLiveAnnouncement"),
("F9", defaultModifierMask, ORCA_MODIFIER_MASK, "reviewLiveAnnouncement"),
# misc
("backslash", defaultModifierMask, SHIFT_MODIFIER_MASK,
"setLivePolitenessOff"),
("backslash", defaultModifierMask, ORCA_SHIFT_MODIFIER_MASK,
"monitorLiveRegions"),
("backslash", defaultModifierMask, NO_MODIFIER_MASK,
"advanceLivePoliteness"),
("F12", defaultModifierMask, ORCA_MODIFIER_MASK,
"toggleCaretNavigationHandler"),
("Right", defaultModifierMask, ORCA_MODIFIER_MASK,
"goNextObjectInOrderHandler"),
("Left", defaultModifierMask, ORCA_MODIFIER_MASK,
"goPreviousObjectInOrderHandler"),
)
desktopKeymap = (
("KP_Multiply", defaultModifierMask, ORCA_MODIFIER_MASK,
"moveToMouseOverHandler"),
)
laptopKeymap = (
("0", defaultModifierMask, ORCA_MODIFIER_MASK, "moveToMouseOverHandler"),
)
|
rubik/poly
|
refs/heads/master
|
poly/core.py
|
1
|
import re
import copy
import operator
import functools
import fractions
import itertools
__all__ = ['monomial', 'parse', 'Poly']
def dict2poly(func):
def wrapper(*args, **kwargs):
d = func(*args, **kwargs)
return sorted(zip(d.values(), d.keys()), key=lambda i: -i[1])
return wrapper
def polyize(func):
def wrapper(*args, **kwargs):
return Poly(func(*args, **kwargs))
return wrapper
def monomial(coeff=1, power=1):
return Poly([(coeff, power)])
class Poly(object):
_coeff_format = {
1: '+',
-1: '-'
}
_exp_format = {
0: '',
1: 'x',
}
def __init__(self, poly=[]):
self.poly = self.simplify(poly)
def __repr__(self):
if not self.poly:
return '0'
return ' '.join(map(self._format_monomial, self.poly))
def _format_monomial(self, m):
c, p = m
if c in self._coeff_format:
coeff = self._coeff_format[c]
else:
coeff = str(c) if c < 0 else '+' + str(c)
if p in self._exp_format:
power = str(abs(c)) if c in (-1, 1) and p == 0 \
else self._exp_format[p]
else:
power = 'x^{0}'.format(p)
return (coeff + power).replace('+', '+ ').replace('-', '- ')
@staticmethod
@dict2poly
def simplify(poly):
new_poly = {}
if not poly:
return new_poly
for coeff, power in poly:
if not coeff:
continue
if power in new_poly:
if new_poly[power] + coeff == 0:
del new_poly[power]
continue
new_poly[power] += coeff
continue
new_poly[power] = coeff
return new_poly
@classmethod
@polyize
@dict2poly
def from_string(cls, string):
'''
Build a polynomial from a string. You can use ** or ^ to indicate
exponentiation. Valid polynomials include:
3x - 2
x + 1
4x**2 + x - 1
-2x^3 + x**2 - x + 1
'''
monomial_re = re.compile(r'([-+]?\d*)(x?)\^?(\d*)')
string = string.replace(' ', '').replace('**', '^')
poly = {}
signs = {'+': 1, '-': -1}
for c, x, p in monomial_re.findall(string):
if not any((c, x, p)):
continue
coeff = signs[c] if c in ('+', '-') else int(c or 1)
power = int(p or 1) if x else 0
# multiple monomials with the same degree
if power in poly:
poly[power] += coeff
continue
poly[power] = coeff
return poly
## CONVENIENCE METHODS & PROPERTIES ##
@property
def degree(self):
if not self:
return 0
return self.poly[0][1]
@property
def rhs(self):
rhs = self.poly[-1]
if rhs[1] == 0:
return rhs[0]
return 0
def append(self, other):
return Poly(self.poly + other.poly)
def is_num(self):
if not self:
return True
return len(self.poly) == 1 and self.poly[0][1] == 0
## OPERATORS ##
def __copy__(self):
return Poly(copy.copy(self.poly))
def __deepcopy__(self, memo):
return Poly(copy.deepcopy(self.poly, memo))
def __nonzero__(self):
return bool(self.poly)
def __bool__(self):
return bool(self.poly)
def __len__(self):
return len(self.poly)
def __getitem__(self, item):
if isinstance(item, slice):
return Poly(self.poly[item])
return Poly([self.poly[item]])
def __eq__(self, other):
return self.poly == other.poly
def __ne__(self, other):
return self.poly != other.poly
def __pos__(self):
return self
def __neg__(self):
return Poly([(-1, 0)]) * self
def __add__(self, other):
return Poly(self.poly + other.poly)
def __sub__(self, other):
return Poly(self.poly + (-other).poly)
def __mul__(self, other):
monomial_pairs = itertools.product(self.poly, other.poly)
return Poly((a[0] * b[0], a[1] + b[1]) for a, b in monomial_pairs)
def __divmod__(self, other):
def div(a, b):
return Poly([(fractions.Fraction(a[0]) / b[0], a[1] - b[1])])
A, B = copy.deepcopy(self), copy.deepcopy(other)
Q = Poly()
if A.degree < B.degree:
raise ValueError('The polynomials are not divisible')
while A.degree >= B.degree:
if not A:
return Q, Poly()
if A.is_num() and B.is_num():
return Q.append(div(A.poly[0], B.poly[0])), Poly()
quotient = div(A.poly[0], B.poly[0])
A = A[1:]
Q = Q.append(quotient)
if len(B) == 1:
continue
m = B[1:]
A += -quotient * m
return Q, A
def __div__(self, other):
return self.__divmod__(other)[0]
def __mod__(self, other):
return self.__divmod__(other)[1]
def __pow__(self, exp):
if exp < 0:
return NotImplemented
elif exp == 0:
if not self:
return NotImplemented
return monomial(power=0)
elif exp == 1:
return copy.deepcopy(self)
elif len(self) == 1:
return Poly([(self.poly[0][0], self.poly[0][1] * exp)])
return functools.reduce(operator.mul, itertools.repeat(self, exp))
|
frappe/erpnext
|
refs/heads/develop
|
erpnext/non_profit/doctype/chapter/test_chapter.py
|
24
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import unittest
class TestChapter(unittest.TestCase):
pass
|
michalkilawok/blockly
|
refs/heads/master
|
i18n/create_messages.py
|
128
|
#!/usr/bin/python
# Generate .js files defining Blockly core and language messages.
#
# Copyright 2013 Google Inc.
# https://developers.google.com/blockly/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import codecs
import os
import re
import sys
from common import read_json_file
_NEWLINE_PATTERN = re.compile('[\n\r]')
def string_is_ascii(s):
try:
s.decode('ascii')
return True
except UnicodeEncodeError:
return False
def main():
"""Generate .js files defining Blockly core and language messages."""
# Process command-line arguments.
parser = argparse.ArgumentParser(description='Convert JSON files to JS.')
parser.add_argument('--source_lang', default='en',
help='ISO 639-1 source language code')
parser.add_argument('--source_lang_file',
default=os.path.join('json', 'en.json'),
help='Path to .json file for source language')
parser.add_argument('--source_synonym_file',
default=os.path.join('json', 'synonyms.json'),
help='Path to .json file with synonym definitions')
parser.add_argument('--output_dir', default='js/',
help='relative directory for output files')
parser.add_argument('--key_file', default='keys.json',
help='relative path to input keys file')
parser.add_argument('--quiet', action='store_true', default=False,
help='do not write anything to standard output')
parser.add_argument('files', nargs='+', help='input files')
args = parser.parse_args()
if not args.output_dir.endswith(os.path.sep):
args.output_dir += os.path.sep
# Read in source language .json file, which provides any values missing
# in target languages' .json files.
source_defs = read_json_file(os.path.join(os.curdir, args.source_lang_file))
# Make sure the source file doesn't contain a newline or carriage return.
for key, value in source_defs.items():
if _NEWLINE_PATTERN.search(value):
print('ERROR: definition of {0} in {1} contained a newline character.'.
format(key, args.source_lang_file))
sys.exit(1)
sorted_keys = source_defs.keys()
sorted_keys.sort()
# Read in synonyms file, which must be output in every language.
synonym_defs = read_json_file(os.path.join(
os.curdir, args.source_synonym_file))
synonym_text = '\n'.join(['Blockly.Msg.{0} = Blockly.Msg.{1};'.format(
key, synonym_defs[key]) for key in synonym_defs])
# Create each output file.
for arg_file in args.files:
(_, filename) = os.path.split(arg_file)
target_lang = filename[:filename.index('.')]
if target_lang not in ('qqq', 'keys', 'synonyms'):
target_defs = read_json_file(os.path.join(os.curdir, arg_file))
# Verify that keys are 'ascii'
bad_keys = [key for key in target_defs if not string_is_ascii(key)]
if bad_keys:
print(u'These keys in {0} contain non ascii characters: {1}'.format(
filename, ', '.join(bad_keys)))
# If there's a '\n' or '\r', remove it and print a warning.
for key, value in target_defs.items():
if _NEWLINE_PATTERN.search(value):
print(u'WARNING: definition of {0} in {1} contained '
'a newline character.'.
format(key, arg_file))
target_defs[key] = _NEWLINE_PATTERN.sub(' ', value)
# Output file.
outname = os.path.join(os.curdir, args.output_dir, target_lang + '.js')
with codecs.open(outname, 'w', 'utf-8') as outfile:
outfile.write(
"""// This file was automatically generated. Do not modify.
'use strict';
goog.provide('Blockly.Msg.{0}');
goog.require('Blockly.Msg');
""".format(target_lang.replace('-', '.')))
# For each key in the source language file, output the target value
# if present; otherwise, output the source language value with a
# warning comment.
for key in sorted_keys:
if key in target_defs:
value = target_defs[key]
comment = ''
del target_defs[key]
else:
value = source_defs[key]
comment = ' // untranslated'
value = value.replace('"', '\\"')
outfile.write(u'Blockly.Msg.{0} = "{1}";{2}\n'.format(
key, value, comment))
# Announce any keys defined only for target language.
if target_defs:
extra_keys = [key for key in target_defs if key not in synonym_defs]
synonym_keys = [key for key in target_defs if key in synonym_defs]
if not args.quiet:
if extra_keys:
print(u'These extra keys appeared in {0}: {1}'.format(
filename, ', '.join(extra_keys)))
if synonym_keys:
print(u'These synonym keys appeared in {0}: {1}'.format(
filename, ', '.join(synonym_keys)))
outfile.write(synonym_text)
if not args.quiet:
print('Created {0}.'.format(outname))
if __name__ == '__main__':
main()
|
collects/VTK
|
refs/heads/master
|
Filters/Parallel/Testing/Python/TestImageStreamer.py
|
26
|
#!/usr/bin/env python
reader = vtk.vtkImageReader()
reader.ReleaseDataFlagOff()
reader.SetDataByteOrderToLittleEndian()
reader.SetDataExtent(0,63,0,63,1,93)
reader.SetFilePrefix("" + str(VTK_DATA_ROOT) + "/Data/headsq/quarter")
reader.SetDataMask(0x7fff)
rangeStart = 0.0
rangeEnd = 0.2
LUT = vtk.vtkLookupTable()
LUT.SetTableRange(0,1800)
LUT.SetSaturationRange(1,1)
LUT.SetHueRange(rangeStart,rangeEnd)
LUT.SetValueRange(1,1)
LUT.SetAlphaRange(1,1)
LUT.Build()
# added these unused default arguments so that the prototype
# matches as required in python.
def changeLUT (a=0,b=0,__vtk__temp0=0,__vtk__temp1=0):
global rangeStart, rangeEnd
rangeStart = expr.expr(globals(), locals(),["rangeStart","+","0.1"])
rangeEnd = expr.expr(globals(), locals(),["rangeEnd","+","0.1"])
if (rangeEnd > 1.0):
rangeStart = 0.0
rangeEnd = 0.2
pass
LUT.SetHueRange(rangeStart,rangeEnd)
LUT.Build()
mapToRGBA = vtk.vtkImageMapToColors()
mapToRGBA.SetInputConnection(reader.GetOutputPort())
mapToRGBA.SetOutputFormatToRGBA()
mapToRGBA.SetLookupTable(LUT)
mapToRGBA.AddObserver("EndEvent",changeLUT)
streamer = vtk.vtkMemoryLimitImageDataStreamer()
streamer.SetInputConnection(mapToRGBA.GetOutputPort())
streamer.SetMemoryLimit(100)
streamer.UpdateWholeExtent()
# set the window/level to 255.0/127.5 to view full range
viewer = vtk.vtkImageViewer()
viewer.SetInputConnection(streamer.GetOutputPort())
viewer.SetColorWindow(255.0)
viewer.SetColorLevel(127.5)
viewer.SetZSlice(50)
viewer.Render()
# --- end of script --
|
Links2004/esptool
|
refs/heads/master
|
setup.py
|
12
|
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='esptool',
version='0.1.0',
description='A utility to communicate with the ROM bootloader in Espressif ESP8266.',
long_description=long_description,
url='https://github.com/themadinventor/esptool',
author='Fredrik Ahlberg',
author_email='fredrik@z80.se',
license='GPLv2+',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Software Development :: Embedded Systems',
'Environment :: Console',
'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',
'Programming Language :: Python :: 2.7',
],
install_requires=[
'pyserial',
],
scripts=[
'esptool.py',
],
)
|
cechrist/cardoon
|
refs/heads/master
|
cardoon/devices/inductor.py
|
1
|
"""
:mod:`inductor` -- Linear inductor
-----------------------------------
.. module:: inductor
.. moduleauthor:: Carlos Christoffersen
"""
import numpy as np
from cardoon.globalVars import const, glVar
import cardoon.circuit as cir
class Device(cir.Element):
"""
Inductor
--------
Connection diagram::
__ __ __ _
0 / \/ \/ \/ \ 1
o----+ /\ /\ /\ +-------o External view
(_/ (_/ (_/
Netlist example::
ind:l1 1 0 l=3uH
Internal Topology
+++++++++++++++++
Internal implementation uses a gyrator (adds il internal node)::
il/gyr Term: il
0 o---------+ +----------------+
| gyr V(il) | |
+ /|\ /^\ |
Vin ( | ) ( | ) gyr Vin ----- gyr^2 * L
- \V/ \|/ -----
| | |
1 o---------+ +----------------+
|
--- tref
V
"""
# Device category
category = "Basic components"
# devtype is the 'model' name
devType = "ind"
# Number of terminals. If numTerms is set here, the parser knows
# in advance how many external terminals to expect. By default the
# parser makes no assumptions and allows any number of connections
#
numTerms = 2
paramDict = dict(
l = ('Inductance', 'H', float, 0.)
)
def __init__(self, instanceName):
# Here the Element constructor must be called. Do not connect
# internal nodes here.
cir.Element.__init__(self, instanceName)
def process_params(self):
# Called once the external terminals have been connected and
# the non-default parameters have been set. Make sanity checks
# here. Internal terminals/devices should also be defined
# here. Raise cir.CircuitError if a fatal error is found.
# remove any existing internal connections
self.clean_internal_terms()
if not self.l:
raise cir.CircuitError(self.instanceName
+ ': Inductance can not be zero')
# Connect internal terminal
til = self.add_internal_term('il', '{0} A'.format(glVar.gyr))
tref = self.add_reference_term()
# Setup gyrator
# Access to global variables is through the glVar
self.linearVCCS = [((0,1), (tref, til), glVar.gyr),
((til, tref), (0,1), glVar.gyr)]
cap = self.l * glVar.gyr**2
self.linearVCQS = [((til, tref), (til, tref), cap)]
# Adjust according to temperature (not needed so far)
# self.set_temp_vars(self.temp)
# def set_temp_vars(self, temp):
# """
# Calculate temperature-dependent variables for temp given in C
# """
# # Absolute temperature (note temp is in deg. C)
# # T = const.T0 + temp
# deltaT = temp - self.tnom
# self.g /= (1. + (self.tc1 + self.tc2 * deltaT) * deltaT)
|
ColorFuzzy/tornado
|
refs/heads/master
|
maint/test/redbot/red_test.py
|
91
|
#!/usr/bin/env python
import logging
from redbot.resource import HttpResource
import redbot.speak as rs
import thor
import threading
from tornado import gen
from tornado.options import parse_command_line
from tornado.testing import AsyncHTTPTestCase, LogTrapTestCase
from tornado.web import RequestHandler, Application, asynchronous
import unittest
class HelloHandler(RequestHandler):
def get(self):
self.write("Hello world")
class RedirectHandler(RequestHandler):
def get(self, path):
self.redirect(path, status=int(self.get_argument('status', '302')))
class PostHandler(RequestHandler):
def post(self):
assert self.get_argument('foo') == 'bar'
self.redirect('/hello', status=303)
class ChunkedHandler(RequestHandler):
@asynchronous
@gen.engine
def get(self):
self.write('hello ')
yield gen.Task(self.flush)
self.write('world')
yield gen.Task(self.flush)
self.finish()
class CacheHandler(RequestHandler):
def get(self, computed_etag):
self.write(computed_etag)
def compute_etag(self):
return self._write_buffer[0]
class TestMixin(object):
def get_handlers(self):
return [
('/hello', HelloHandler),
('/redirect(/.*)', RedirectHandler),
('/post', PostHandler),
('/chunked', ChunkedHandler),
('/cache/(.*)', CacheHandler),
]
def get_app_kwargs(self):
return dict(static_path='.')
def get_allowed_warnings(self):
return [
# We can't set a non-heuristic freshness at the framework level,
# so just ignore this warning
rs.FRESHNESS_HEURISTIC,
# For our small test responses the Content-Encoding header
# wipes out any gains from compression
rs.CONNEG_GZIP_BAD,
]
def get_allowed_errors(self):
return []
def check_url(self, path, method='GET', body=None, headers=None,
expected_status=200, allowed_warnings=None,
allowed_errors=None):
url = self.get_url(path)
red = self.run_redbot(url, method, body, headers)
if not red.response.complete:
if isinstance(red.response.http_error, Exception):
logging.warning((red.response.http_error.desc, vars(red.response.http_error), url))
raise red.response.http_error.res_error
else:
raise Exception("unknown error; incomplete response")
self.assertEqual(int(red.response.status_code), expected_status)
allowed_warnings = (allowed_warnings or []) + self.get_allowed_warnings()
allowed_errors = (allowed_errors or []) + self.get_allowed_errors()
errors = []
warnings = []
for msg in red.response.notes:
if msg.level == 'bad':
logger = logging.error
if not isinstance(msg, tuple(allowed_errors)):
errors.append(msg)
elif msg.level == 'warning':
logger = logging.warning
if not isinstance(msg, tuple(allowed_warnings)):
warnings.append(msg)
elif msg.level in ('good', 'info', 'uri'):
logger = logging.info
else:
raise Exception('unknown level' + msg.level)
logger('%s: %s (%s)', msg.category, msg.show_summary('en'),
msg.__class__.__name__)
logger(msg.show_text('en'))
self.assertEqual(len(warnings) + len(errors), 0,
'Had %d unexpected warnings and %d errors' %
(len(warnings), len(errors)))
def run_redbot(self, url, method, body, headers):
red = HttpResource(url, method=method, req_body=body,
req_hdrs=headers)
def work():
red.run(thor.stop)
thor.run()
self.io_loop.add_callback(self.stop)
thread = threading.Thread(target=work)
thread.start()
self.wait()
thread.join()
return red
def test_hello(self):
self.check_url('/hello')
def test_static(self):
# TODO: 304 responses SHOULD return the same etag that a full
# response would. We currently do for If-None-Match, but not
# for If-Modified-Since (because IMS does not otherwise
# require us to read the file from disk)
self.check_url('/static/red_test.py',
allowed_warnings=[rs.MISSING_HDRS_304])
def test_static_versioned_url(self):
self.check_url('/static/red_test.py?v=1234',
allowed_warnings=[rs.MISSING_HDRS_304])
def test_redirect(self):
self.check_url('/redirect/hello', expected_status=302)
def test_permanent_redirect(self):
self.check_url('/redirect/hello?status=301', expected_status=301)
def test_404(self):
self.check_url('/404', expected_status=404)
def test_post(self):
body = 'foo=bar'
# Without an explicit Content-Length redbot will try to send the
# request chunked.
self.check_url(
'/post', method='POST', body=body,
headers=[('Content-Length', str(len(body))),
('Content-Type', 'application/x-www-form-urlencoded')],
expected_status=303)
def test_chunked(self):
self.check_url('/chunked')
def test_strong_etag_match(self):
computed_etag = '"xyzzy"'
etags = '"xyzzy"'
self.check_url(
'/cache/' + computed_etag, method='GET',
headers=[('If-None-Match', etags)],
expected_status=304)
def test_multiple_strong_etag_match(self):
computed_etag = '"xyzzy1"'
etags = '"xyzzy1", "xyzzy2"'
self.check_url(
'/cache/' + computed_etag, method='GET',
headers=[('If-None-Match', etags)],
expected_status=304)
def test_strong_etag_not_match(self):
computed_etag = '"xyzzy"'
etags = '"xyzzy1"'
self.check_url(
'/cache/' + computed_etag, method='GET',
headers=[('If-None-Match', etags)],
expected_status=200)
def test_multiple_strong_etag_not_match(self):
computed_etag = '"xyzzy"'
etags = '"xyzzy1", "xyzzy2"'
self.check_url(
'/cache/' + computed_etag, method='GET',
headers=[('If-None-Match', etags)],
expected_status=200)
def test_wildcard_etag(self):
computed_etag = '"xyzzy"'
etags = '*'
self.check_url(
'/cache/' + computed_etag, method='GET',
headers=[('If-None-Match', etags)],
expected_status=304,
allowed_warnings=[rs.MISSING_HDRS_304])
def test_weak_etag_match(self):
computed_etag = '"xyzzy1"'
etags = 'W/"xyzzy1"'
self.check_url(
'/cache/' + computed_etag, method='GET',
headers=[('If-None-Match', etags)],
expected_status=304)
def test_multiple_weak_etag_match(self):
computed_etag = '"xyzzy2"'
etags = 'W/"xyzzy1", W/"xyzzy2"'
self.check_url(
'/cache/' + computed_etag, method='GET',
headers=[('If-None-Match', etags)],
expected_status=304)
def test_weak_etag_not_match(self):
computed_etag = '"xyzzy2"'
etags = 'W/"xyzzy1"'
self.check_url(
'/cache/' + computed_etag, method='GET',
headers=[('If-None-Match', etags)],
expected_status=200)
def test_multiple_weak_etag_not_match(self):
computed_etag = '"xyzzy3"'
etags = 'W/"xyzzy1", W/"xyzzy2"'
self.check_url(
'/cache/' + computed_etag, method='GET',
headers=[('If-None-Match', etags)],
expected_status=200)
class DefaultHTTPTest(AsyncHTTPTestCase, LogTrapTestCase, TestMixin):
def get_app(self):
return Application(self.get_handlers(), **self.get_app_kwargs())
class GzipHTTPTest(AsyncHTTPTestCase, LogTrapTestCase, TestMixin):
def get_app(self):
return Application(self.get_handlers(), gzip=True, **self.get_app_kwargs())
def get_allowed_errors(self):
return super(GzipHTTPTest, self).get_allowed_errors() + [
# TODO: The Etag is supposed to change when Content-Encoding is
# used. This should be fixed, but it's difficult to do with the
# way GZipContentEncoding fits into the pipeline, and in practice
# it doesn't seem likely to cause any problems as long as we're
# using the correct Vary header.
rs.VARY_ETAG_DOESNT_CHANGE,
]
if __name__ == '__main__':
parse_command_line()
unittest.main()
|
mhbu50/erpnext
|
refs/heads/develop
|
erpnext/templates/__init__.py
|
12133432
| |
elsonrodriguez/madhatter
|
refs/heads/madhatter
|
koan/__init__.py
|
12133432
| |
praekelt/jmbo-football
|
refs/heads/master
|
football/migrations/__init__.py
|
12133432
| |
mmauroy/SickRage
|
refs/heads/master
|
lib/dogpile/cache/plugins/__init__.py
|
12133432
| |
mglukhikh/intellij-community
|
refs/heads/master
|
python/lib/Lib/site-packages/django/contrib/localflavor/us/__init__.py
|
12133432
| |
AlphaNerd80/Lists
|
refs/heads/master
|
superlists/__init__.py
|
12133432
| |
ronfung/incubator-airflow
|
refs/heads/master
|
airflow/contrib/task_runner/__init__.py
|
1049
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
wfxiang08/sqlalchemy
|
refs/heads/feature/wftest
|
test/orm/inheritance/test_selects.py
|
30
|
from sqlalchemy import String, Integer, ForeignKey, select
from sqlalchemy.orm import mapper, Session
from sqlalchemy import testing
from sqlalchemy.testing import fixtures, eq_
from sqlalchemy.testing.schema import Table, Column
class InheritingSelectablesTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
foo = Table('foo', metadata,
Column('a', String(30), primary_key=1),
Column('b', String(30), nullable=0))
cls.tables.bar = foo.select(foo.c.b == 'bar').alias('bar')
cls.tables.baz = foo.select(foo.c.b == 'baz').alias('baz')
def test_load(self):
foo, bar, baz = self.tables.foo, self.tables.bar, self.tables.baz
# TODO: add persistence test also
testing.db.execute(foo.insert(), a='not bar', b='baz')
testing.db.execute(foo.insert(), a='also not bar', b='baz')
testing.db.execute(foo.insert(), a='i am bar', b='bar')
testing.db.execute(foo.insert(), a='also bar', b='bar')
class Foo(fixtures.ComparableEntity):
pass
class Bar(Foo):
pass
class Baz(Foo):
pass
mapper(Foo, foo, polymorphic_on=foo.c.b)
mapper(Baz, baz,
with_polymorphic=('*', foo.join(baz, foo.c.b == 'baz').alias('baz')),
inherits=Foo,
inherit_condition=(foo.c.a == baz.c.a),
inherit_foreign_keys=[baz.c.a],
polymorphic_identity='baz')
mapper(Bar, bar,
with_polymorphic=('*', foo.join(bar, foo.c.b == 'bar').alias('bar')),
inherits=Foo,
inherit_condition=(foo.c.a == bar.c.a),
inherit_foreign_keys=[bar.c.a],
polymorphic_identity='bar')
s = Session()
assert [Baz(), Baz(), Bar(), Bar()] == s.query(Foo).order_by(Foo.b.desc()).all()
assert [Bar(), Bar()] == s.query(Bar).all()
class JoinFromSelectPersistenceTest(fixtures.MappedTest):
"""test for [ticket:2885]"""
@classmethod
def define_tables(cls, metadata):
Table('base', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('type', String(50))
)
Table('child', metadata,
# 1. name of column must be different, so that we rely on
# mapper._table_to_equated to link the two cols
Column('child_id', Integer, ForeignKey('base.id'), primary_key=True),
Column('name', String(50))
)
@classmethod
def setup_classes(cls):
class Base(cls.Comparable):
pass
class Child(Base):
pass
def test_map_to_select(self):
Base, Child = self.classes.Base, self.classes.Child
base, child = self.tables.base, self.tables.child
base_select = select([base]).alias()
mapper(Base, base_select, polymorphic_on=base_select.c.type,
polymorphic_identity='base')
mapper(Child, child, inherits=Base,
polymorphic_identity='child')
sess = Session()
# 2. use an id other than "1" here so can't rely on
# the two inserts having the same id
c1 = Child(id=12, name='c1')
sess.add(c1)
sess.commit()
sess.close()
c1 = sess.query(Child).one()
eq_(c1.name, 'c1')
|
moshez/luggage
|
refs/heads/master
|
setup.py
|
1
|
# Copyright (c) Moshe Zadka
# See LICENSE for details.
import setuptools
setuptools.setup(
url='https://github.com/moshez/luggage',
name='luggage',
packages=setuptools.find_packages(),
version='0.0.1',
)
|
mookerji/libsbp
|
refs/heads/master
|
generator/sbpg/targets/test_c.py
|
1
|
#!/usr/bin/env python
# Copyright (C) 2015 Swift Navigation Inc.
# Contact: Joshua Gross <josh@swiftnav.com>
#
# This source is subject to the license found in the file 'LICENSE' which must
# be be distributed together with this source. All other rights reserved.
#
# THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND,
# EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE.
"""
Generator for c tests target.
"""
from sbpg.targets.templating import *
import base64
TEST_TEMPLATE_NAME = "sbp_c_test.c.j2"
CHECK_SUITES_TEMPLATE_NAME = "sbp_c_suites.h.j2"
CHECK_MAIN_TEMPLATE_NAME = "sbp_c_main.c.j2"
def commentify(value):
"""
Builds a comment.
"""
if value is None:
return
if len(value.split('\n')) == 1:
return "* " + value
else:
return '\n'.join([' * ' + l for l in value.split('\n')[:-1]])
def extensions(includes):
"""Formats a list of header files to include.
"""
return ["".join([i.split(".")[0], ".h"]) for i in includes if i.split(".")[0] != "types"]
import re
CONSTRUCT_CODE = set(['u8', 'u16', 'u32', 'u64', 's8', 's16', 's32',
's64', 'float', 'double'])
def convert(value):
"""Converts to a C language appropriate identifier format.
"""
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', value)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower() + "_t"
def mk_id(field):
"""Builds an identifier from a field.
"""
name = field.type_id
if name == "string":
return "%s" % ("char")
elif name == "array" and field.size:
if field.options['fill'].value not in CONSTRUCT_CODE:
return "%s" % convert(field.options['fill'].value)
else:
return "%s" % field.options['fill'].value
elif name == "array":
return "%s" % convert(field.options['fill'].value)
elif name not in CONSTRUCT_CODE:
return convert(name)
else:
return name
def mk_size(field):
"""Builds an identifier for a container type.
"""
name = field.type_id
if name == "string" and field.options.get('size', None):
return "%s[%d];" % (field.identifier, field.options.get('size').value)
elif name == "string":
return "%s[0];" % field.identifier
elif name == "array" and field.options.get('size', None):
return "%s[%d];" % (field.identifier, field.options.get('size').value)
elif name == "array":
return "%s[0];" % field.identifier
else:
return '%s;' % field.identifier
def b64_decode(field):
return base64.standard_b64decode(field)
def stringType(value):
return type(value) == str
def arrayType(value):
return type(value) == list
def dictType(value):
return type(value) == dict
def floatType(value):
return type(value) == float
def isEmpty(value):
return len(value) == 0
def strEscape(value):
return "((char []){" + ",".join(["(char)" + str(ord(ch)) for ch in value]) + ",0})"
def toStr(value):
return str(value)
JENV.filters['commentify'] = commentify
JENV.filters['mk_id'] = mk_id
JENV.filters['mk_size'] = mk_size
JENV.filters['convert'] = convert
JENV.filters['type'] = type
JENV.filters['str_escape'] = strEscape
JENV.filters['toStr'] = toStr
JENV.tests['stringType'] = stringType
JENV.tests['arrayType'] = arrayType
JENV.tests['dictType'] = dictType
JENV.tests['floatType'] = floatType
JENV.tests['empty'] = isEmpty
def render_source(output_dir, package_spec):
"""
Render and output to a directory given a package specification.
"""
path, name = package_spec.filepath
destination_filename = "%s/%s.c" % (output_dir, name)
py_template = JENV.get_template(TEST_TEMPLATE_NAME)
with open(destination_filename, 'w') as f:
f.write(py_template.render(s=package_spec,
description=package_spec.description,
pkg_name=package_spec.package,
include=package_spec.package.split('.')[1],
filepath="/".join(package_spec.filepath) + ".yaml"))
def render_check_suites(output_dir, all_package_specs):
destination_filename = "%s/%s.h" % (output_dir, "check_suites")
py_template = JENV.get_template(CHECK_SUITES_TEMPLATE_NAME)
with open(destination_filename, 'w') as f:
f.write(py_template.render(package_suites=all_package_specs))
def render_check_main(output_dir, all_package_specs):
destination_filename = "%s/%s.c" % (output_dir, "check_main")
py_template = JENV.get_template(CHECK_MAIN_TEMPLATE_NAME)
with open(destination_filename, 'w') as f:
f.write(py_template.render(package_suites=all_package_specs))
|
slisson/intellij-community
|
refs/heads/master
|
python/testData/console/indent10.py
|
80
|
x = '''Multiline starts;
next line with indent;
next line with indent;
multiline ends'''
x = '''Multiline starts;
first
second
third
fourth
fifth
multiline ends'''
x = '''Multiline starts;
#line
#line
multiline ends'''
|
ciber96/mtasa-blue
|
refs/heads/master
|
vendor/google-breakpad/src/tools/gyp/test/mac/gyptest-loadable-module.py
|
146
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Tests that a loadable_module target is built correctly.
"""
import TestGyp
import os
import struct
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
CHDIR = 'loadable-module'
test.run_gyp('test.gyp', chdir=CHDIR)
test.build('test.gyp', test.ALL, chdir=CHDIR)
# Binary.
binary = test.built_file_path(
'test_loadable_module.plugin/Contents/MacOS/test_loadable_module',
chdir=CHDIR)
test.must_exist(binary)
MH_BUNDLE = 8
if struct.unpack('4I', open(binary, 'rb').read(16))[3] != MH_BUNDLE:
test.fail_test()
# Info.plist.
info_plist = test.built_file_path(
'test_loadable_module.plugin/Contents/Info.plist', chdir=CHDIR)
test.must_exist(info_plist)
test.must_contain(info_plist, """
<key>CFBundleExecutable</key>
<string>test_loadable_module</string>
""")
# PkgInfo.
test.built_file_must_not_exist(
'test_loadable_module.plugin/Contents/PkgInfo', chdir=CHDIR)
test.built_file_must_not_exist(
'test_loadable_module.plugin/Contents/Resources', chdir=CHDIR)
test.pass_test()
|
izonder/intellij-community
|
refs/heads/master
|
plugins/hg4idea/testData/bin/mercurial/base85.py
|
96
|
# base85.py: pure python base85 codec
#
# Copyright (C) 2009 Brendan Cully <brendan@kublai.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import struct
_b85chars = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" \
"abcdefghijklmnopqrstuvwxyz!#$%&()*+-;<=>?@^_`{|}~"
_b85chars2 = [(a + b) for a in _b85chars for b in _b85chars]
_b85dec = {}
def _mkb85dec():
for i, c in enumerate(_b85chars):
_b85dec[c] = i
def b85encode(text, pad=False):
"""encode text in base85 format"""
l = len(text)
r = l % 4
if r:
text += '\0' * (4 - r)
longs = len(text) >> 2
words = struct.unpack('>%dL' % (longs), text)
out = ''.join(_b85chars[(word // 52200625) % 85] +
_b85chars2[(word // 7225) % 7225] +
_b85chars2[word % 7225]
for word in words)
if pad:
return out
# Trim padding
olen = l % 4
if olen:
olen += 1
olen += l // 4 * 5
return out[:olen]
def b85decode(text):
"""decode base85-encoded text"""
if not _b85dec:
_mkb85dec()
l = len(text)
out = []
for i in range(0, len(text), 5):
chunk = text[i:i + 5]
acc = 0
for j, c in enumerate(chunk):
try:
acc = acc * 85 + _b85dec[c]
except KeyError:
raise ValueError('bad base85 character at position %d'
% (i + j))
if acc > 4294967295:
raise ValueError('Base85 overflow in hunk starting at byte %d' % i)
out.append(acc)
# Pad final chunk if necessary
cl = l % 5
if cl:
acc *= 85 ** (5 - cl)
if cl > 1:
acc += 0xffffff >> (cl - 2) * 8
out[-1] = acc
out = struct.pack('>%dL' % (len(out)), *out)
if cl:
out = out[:-(5 - cl)]
return out
|
dpaiton/OpenPV
|
refs/heads/master
|
pv-core/analysis/python/plot_amoeba_responseon_off.py
|
1
|
"""
Make a histogram of normally distributed random numbers and plot the
analytic PDF over it
"""
import sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import matplotlib.cm as cm
import matplotlib.image as mpimg
import PVReadWeights as rw
import PVReadSparse as rs
import math
"""
mi=mpimg.imread(sys.argv[3])
imgplot = plt.imshow(mi, interpolation='Nearest')
imgplot.set_cmap('hot')
plt.show()
"""
def nearby_neighbor(kzPre, zScaleLog2Pre, zScaleLog2Post):
a = math.pow(2.0, (zScaleLog2Pre - zScaleLog2Post))
ia = a
if ia < 2:
k0 = 0
else:
k0 = ia/2 - 1
if a < 1.0 and kzPre < 0:
k = kzPre - (1.0/a) + 1
else:
k = kzPre
return k0 + (a * k)
def zPatchHead(kzPre, nzPatch, zScaleLog2Pre, zScaleLog2Post):
a = math.pow(2.0, (zScaleLog2Pre - zScaleLog2Post))
if a == 1:
shift = -(0.5 * nzPatch)
return shift + nearby_neighbor(kzPre, zScaleLog2Pre, zScaleLog2Post)
shift = 1 - (0.5 * nzPatch)
if (nzPatch % 2) == 0 and a < 1:
kpos = (kzPre < 0)
if kzPre < 0:
kpos = -(1+kzPre)
else:
kpos = kzPre
l = (2*a*kpos) % 2
if kzPre < 0:
shift -= l == 1
else:
shift -= l == 0
elif (nzPatch % 2) == 1 and a < 1:
shift = -(0.5 * nzPatch)
neighbor = nearby_neighbor(kzPre, zScaleLog2Pre, zScaleLog2Post)
if nzPatch == 1:
return neighbor
return shift + neighbor
"""
a = zPatchHead(int(sys.argv[1]), 5, -math.log(4, 2), -math.log(1, 2))
print a
print int(a)
sys.exit()
"""
vmax = 100.0 # Hz
space = 1
extended = False
w = rw.PVReadWeights(sys.argv[1])
wOff = rw.PVReadWeights(sys.argv[2])
sw = rw.PVReadWeights(sys.argv[3])
swOff = rw.PVReadWeights(sys.argv[4])
nx = w.nx
ny = w.ny
nxp = w.nxp
nyp = w.nyp
nx_im = nx * (nxp + space) + space
ny_im = ny * (nyp + space) + space
predub = np.zeros(((nx*nx),(nxp * nxp)))
predubOff = np.zeros(((nx*nx),(nxp * nxp)))
spredub = np.zeros(((nx*nx),(nxp * nxp)))
spredubOff = np.zeros(((nx*nx),(nxp * nxp)))
numpat = w.numPatches
print "numpat = ", numpat
for k in range(numpat):
p = w.next_patch()
pOff = wOff.next_patch()
sp = sw.next_patch()
spOff = swOff.next_patch()
predub[k] = p
predubOff[k] = pOff
spredub[k] = sp
spredubOff[k] = spOff
print "weights done"
#print "p = ", P
#if k == 500:
# sys.exit()
#end fig loop
activ = rs.PVReadSparse(sys.argv[5], extended)
sactiv = rs.PVReadSparse(sys.argv[6], extended)
end = int(sys.argv[7])
step = int(sys.argv[8])
begin = int(sys.argv[9])
count = 0
for end in range(begin+step, end, step):
A = activ.avg_activity(begin, end)
sA = sactiv.avg_activity(begin, end)
this = 10 + count
count += 1
print "this = ", this
print "file = ", sys.argv[this]
print
numrows, numcols = A.shape
min = np.min(A)
max = np.max(A)
s = np.zeros(numcols)
for col in range(numcols):
s[col] = np.sum(A[:,col])
s = s/numrows
b = np.reshape(A, (len(A)* len(A)))
c = np.shape(b)[0]
mi=mpimg.imread(sys.argv[this])
print "a w start"
rr = nx / 64
im = np.zeros((64, 64))
ims = np.zeros((64, 64))
for yi in range(len(A)):
for xi in range(len(A)):
x = int(zPatchHead(int(xi), 5, -math.log(rr, 2), -math.log(1, 2)))
y = int(zPatchHead(int(yi), 5, -math.log(rr, 2), -math.log(1, 2)))
if 58 > x >= 0 and 58 > y >= 0:
if A[yi, xi] > 0:
patch = predub[yi * (nx) + xi]
patchOff = predubOff[yi * (nx) + xi]
spatch = spredub[yi * (nx) + xi]
spatchOff = spredubOff[yi * (nx) + xi]
patch = np.reshape(patch, (nxp, nxp))
patchOff = np.reshape(patchOff, (nxp, nxp))
spatch = np.reshape(spatch, (nxp, nxp))
spatchOff = np.reshape(spatchOff, (nxp, nxp))
for yy in range(nyp):
for xx in range(nxp):
im[y + yy, x + xx] += patch[yy, xx] * A[yi, xi]
im[y + yy, x + xx] -= patchOff[yy, xx] * A[yi, xi]
ims[y + yy, x + xx] += spatch[yy, xx] * sA[yi, xi]
ims[y + yy, x + xx] -= spatchOff[yy, xx] * sA[yi, xi]
fig = plt.figure()
ax = fig.add_subplot(3,1,1)
ax.imshow(mi, interpolation='Nearest', cmap='gray')
ax = fig.add_subplot(3,1,2)
#ax.imshow(mi, interpolation='Nearest', cmap='gray', origin="lower")
ax.set_xlabel('regular')
ax.imshow(im, cmap=cm.jet, interpolation='nearest', vmin = 0.0, vmax = np.max(im))
ax = fig.add_subplot(313)
ax.set_xlabel('scrambled')
ax.imshow(ims, cmap=cm.jet, interpolation='nearest', vmin = 0.0, vmax = np.max(ims))
plt.show()
#end fig loop
|
FATruden/boto
|
refs/heads/master
|
tests/integration/ec2/vpc/__init__.py
|
12133432
| |
Forage/Gramps
|
refs/heads/trunk
|
gramps/plugins/docgen/__init__.py
|
12133432
| |
qma/pants
|
refs/heads/master
|
contrib/node/src/python/pants/contrib/node/targets/__init__.py
|
12133432
| |
vladnicoara/SDLive-Blog
|
refs/heads/master
|
plugins/livedesk-embed/gui-themes/themes/brasil247/item/source/__init__.py
|
12133432
| |
munificent/magpie
|
refs/heads/master
|
dep/gyp/test/actions-multiple/src/filter.py
|
349
|
#!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
data = open(sys.argv[3], 'r').read()
fh = open(sys.argv[4], 'w')
fh.write(data.replace(sys.argv[1], sys.argv[2]))
fh.close()
|
awkspace/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/f5/bigip_smtp.py
|
14
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_smtp
short_description: Manages SMTP settings on the BIG-IP
description:
- Allows configuring of the BIG-IP to send mail via an SMTP server by
configuring the parameters of an SMTP server.
version_added: 2.6
options:
name:
description:
- Specifies the name of the SMTP server configuration.
required: True
partition:
description:
- Device partition to manage resources on.
default: Common
smtp_server:
description:
- SMTP server host name in the format of a fully qualified domain name.
- This value is required when create a new SMTP configuration.
smtp_server_port:
description:
- Specifies the SMTP port number.
- When creating a new SMTP configuration, the default is C(25) when
C(encryption) is C(none) or C(tls). The default is C(465) when C(ssl)
is selected.
local_host_name:
description:
- Host name used in SMTP headers in the format of a fully qualified
domain name. This setting does not refer to the BIG-IP system's hostname.
from_address:
description:
- Email address that the email is being sent from. This is the "Reply-to"
address that the recipient sees.
encryption:
description:
- Specifies whether the SMTP server requires an encrypted connection in
order to send mail.
choices:
- none
- ssl
- tls
authentication:
description:
- Credentials can be set on an SMTP server's configuration even if that
authentication is not used (think staging configs or emergency changes).
This parameter acts as a switch to make the specified C(smtp_server_username)
and C(smtp_server_password) parameters active or not.
- When C(yes), the authentication parameters will be active.
- When C(no), the authentication parameters will be inactive.
type: bool
smtp_server_username:
description:
- User name that the SMTP server requires when validating a user.
smtp_server_password:
description:
- Password that the SMTP server requires when validating a user.
state:
description:
- When C(present), ensures that the SMTP configuration exists.
- When C(absent), ensures that the SMTP configuration does not exist.
default: present
choices:
- present
- absent
update_password:
description:
- Passwords are stored encrypted, so the module cannot know if the supplied
C(smtp_server_password) is the same or different than the existing password.
This parameter controls the updating of the C(smtp_server_password)
credential.
- When C(always), will always update the password.
- When C(on_create), will only set the password for newly created SMTP server
configurations.
default: always
choices:
- always
- on_create
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Create a base SMTP server configuration
bigip_smtp:
name: my-smtp
smtp_server: 1.1.1.1
smtp_server_username: mail-admin
smtp_server_password: mail-secret
local_host_name: smtp.mydomain.com
from_address: no-reply@mydomain.com
state: present
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
'''
RETURN = r'''
smtp_server:
description: The new C(smtp_server) value of the SMTP configuration.
returned: changed
type: str
sample: mail.mydomain.com
smtp_server_port:
description: The new C(smtp_server_port) value of the SMTP configuration.
returned: changed
type: int
sample: 25
local_host_name:
description: The new C(local_host_name) value of the SMTP configuration.
returned: changed
type: str
sample: smtp.mydomain.com
from_address:
description: The new C(from_address) value of the SMTP configuration.
returned: changed
type: str
sample: no-reply@mydomain.com
encryption:
description: The new C(encryption) value of the SMTP configuration.
returned: changed
type: str
sample: tls
authentication:
description: Whether the authentication parameters are active or not.
returned: changed
type: bool
sample: yes
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import exit_json
from library.module_utils.network.f5.common import fail_json
from library.module_utils.network.f5.common import transform_name
from library.module_utils.network.f5.common import is_valid_hostname
from library.module_utils.network.f5.ipaddress import is_valid_ip
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import exit_json
from ansible.module_utils.network.f5.common import fail_json
from ansible.module_utils.network.f5.common import transform_name
from ansible.module_utils.network.f5.common import is_valid_hostname
from ansible.module_utils.network.f5.ipaddress import is_valid_ip
class Parameters(AnsibleF5Parameters):
api_map = {
'username': 'smtp_server_username',
'passwordEncrypted': 'smtp_server_password',
'localHostName': 'local_host_name',
'smtpServerHostName': 'smtp_server',
'smtpServerPort': 'smtp_server_port',
'encryptedConnection': 'encryption',
'authenticationEnabled': 'authentication_enabled',
'authenticationDisabled': 'authentication_disabled',
'fromAddress': 'from_address',
}
api_attributes = [
'username',
'passwordEncrypted',
'localHostName',
'smtpServerHostName',
'smtpServerPort',
'encryptedConnection',
'authenticationEnabled',
'authenticationDisabled',
'fromAddress',
]
returnables = [
'smtp_server_username',
'smtp_server_password',
'local_host_name',
'smtp_server',
'smtp_server_port',
'encryption',
'authentication',
'from_address',
]
updatables = [
'smtp_server_username',
'smtp_server_password',
'local_host_name',
'smtp_server',
'smtp_server_port',
'encryption',
'authentication',
'from_address',
]
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
@property
def local_host_name(self):
if self._values['local_host_name'] is None:
return None
if is_valid_ip(self._values['local_host_name']):
return self._values['local_host_name']
elif is_valid_hostname(self._values['local_host_name']):
# else fallback to checking reasonably well formatted hostnames
return str(self._values['local_host_name'])
raise F5ModuleError(
"The provided 'local_host_name' value {0} is not a valid IP or hostname".format(
str(self._values['local_host_name'])
)
)
@property
def authentication_enabled(self):
if self._values['authentication'] is None:
return None
if self._values['authentication']:
return True
@property
def authentication_disabled(self):
if self._values['authentication'] is None:
return None
if not self._values['authentication']:
return True
@property
def smtp_server_port(self):
if self._values['smtp_server_port'] is None:
return None
return int(self._values['smtp_server_port'])
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
@property
def smtp_server_password(self):
return None
@property
def smtp_server_username(self):
return None
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def smtp_server_password(self):
if self.want.update_password == 'on_create':
return None
return self.want.smtp_server_password
@property
def authentication(self):
if self.want.authentication_enabled:
if self.want.authentication_enabled != self.have.authentication_enabled:
return dict(
authentication_enabled=self.want.authentication_enabled
)
if self.want.authentication_disabled:
if self.want.authentication_disabled != self.have.authentication_disabled:
return dict(
authentication_disable=self.want.authentication_disabled
)
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def absent(self):
if self.exists():
return self.remove()
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/sys/smtp-server/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def create_on_device(self):
params = self.want.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/sys/smtp-server/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def update_on_device(self):
params = self.want.api_params()
uri = "https://{0}:{1}/mgmt/tm/sys/smtp-server/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/sys/smtp-server/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.delete(uri)
if resp.status == 200:
return True
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/sys/smtp-server/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
smtp_server=dict(),
smtp_server_port=dict(type='int'),
smtp_server_username=dict(no_log=True),
smtp_server_password=dict(no_log=True),
local_host_name=dict(),
encryption=dict(choices=['none', 'ssl', 'tls']),
update_password=dict(
default='always',
choices=['always', 'on_create']
),
from_address=dict(),
authentication=dict(type='bool'),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
),
state=dict(
default='present',
choices=['present', 'absent']
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
client = F5RestClient(**module.params)
try:
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
exit_json(module, results, client)
except F5ModuleError as ex:
cleanup_tokens(client)
fail_json(module, ex, client)
if __name__ == '__main__':
main()
|
rpiotti/Flask-AppBuilder
|
refs/heads/master
|
examples/quickcharts2/build/lib/app/models.py
|
6
|
from sqlalchemy import Column, Integer, String, ForeignKey, Date, Float
from sqlalchemy.orm import relationship
from flask.ext.appbuilder import Model
import datetime
class Country(Model):
id = Column(Integer, primary_key=True)
name = Column(String(50), unique = True, nullable=False)
def __repr__(self):
return self.name
class PoliticalType(Model):
id = Column(Integer, primary_key=True)
name = Column(String(50), unique = True, nullable=False)
def __repr__(self):
return self.name
class CountryStats(Model):
id = Column(Integer, primary_key=True)
stat_date = Column(Date, nullable=True)
population = Column(Float)
unemployed = Column(Float)
college = Column(Float)
country_id = Column(Integer, ForeignKey('country.id'), nullable=False)
country = relationship("Country")
political_type_id = Column(Integer, ForeignKey('political_type.id'), nullable=False)
political_type = relationship("PoliticalType")
def __repr__(self):
return "{0}:{1}:{2}:{3}".format(self.country, self.political_type, self.population, self.college)
def month_year(self):
return datetime.datetime(self.stat_date.year, self.stat_date.month, 1)
def country_political(self):
return str(self.country) + ' - ' + str(self.political_type)
|
kkdd/arangodb
|
refs/heads/devel
|
3rdParty/V8-4.3.61/third_party/python_26/Lib/email/charset.py
|
58
|
# Copyright (C) 2001-2006 Python Software Foundation
# Author: Ben Gertzfield, Barry Warsaw
# Contact: email-sig@python.org
__all__ = [
'Charset',
'add_alias',
'add_charset',
'add_codec',
]
import email.base64mime
import email.quoprimime
from email import errors
from email.encoders import encode_7or8bit
# Flags for types of header encodings
QP = 1 # Quoted-Printable
BASE64 = 2 # Base64
SHORTEST = 3 # the shorter of QP and base64, but only for headers
# In "=?charset?q?hello_world?=", the =?, ?q?, and ?= add up to 7
MISC_LEN = 7
DEFAULT_CHARSET = 'us-ascii'
# Defaults
CHARSETS = {
# input header enc body enc output conv
'iso-8859-1': (QP, QP, None),
'iso-8859-2': (QP, QP, None),
'iso-8859-3': (QP, QP, None),
'iso-8859-4': (QP, QP, None),
# iso-8859-5 is Cyrillic, and not especially used
# iso-8859-6 is Arabic, also not particularly used
# iso-8859-7 is Greek, QP will not make it readable
# iso-8859-8 is Hebrew, QP will not make it readable
'iso-8859-9': (QP, QP, None),
'iso-8859-10': (QP, QP, None),
# iso-8859-11 is Thai, QP will not make it readable
'iso-8859-13': (QP, QP, None),
'iso-8859-14': (QP, QP, None),
'iso-8859-15': (QP, QP, None),
'iso-8859-16': (QP, QP, None),
'windows-1252':(QP, QP, None),
'viscii': (QP, QP, None),
'us-ascii': (None, None, None),
'big5': (BASE64, BASE64, None),
'gb2312': (BASE64, BASE64, None),
'euc-jp': (BASE64, None, 'iso-2022-jp'),
'shift_jis': (BASE64, None, 'iso-2022-jp'),
'iso-2022-jp': (BASE64, None, None),
'koi8-r': (BASE64, BASE64, None),
'utf-8': (SHORTEST, BASE64, 'utf-8'),
# We're making this one up to represent raw unencoded 8-bit
'8bit': (None, BASE64, 'utf-8'),
}
# Aliases for other commonly-used names for character sets. Map
# them to the real ones used in email.
ALIASES = {
'latin_1': 'iso-8859-1',
'latin-1': 'iso-8859-1',
'latin_2': 'iso-8859-2',
'latin-2': 'iso-8859-2',
'latin_3': 'iso-8859-3',
'latin-3': 'iso-8859-3',
'latin_4': 'iso-8859-4',
'latin-4': 'iso-8859-4',
'latin_5': 'iso-8859-9',
'latin-5': 'iso-8859-9',
'latin_6': 'iso-8859-10',
'latin-6': 'iso-8859-10',
'latin_7': 'iso-8859-13',
'latin-7': 'iso-8859-13',
'latin_8': 'iso-8859-14',
'latin-8': 'iso-8859-14',
'latin_9': 'iso-8859-15',
'latin-9': 'iso-8859-15',
'latin_10':'iso-8859-16',
'latin-10':'iso-8859-16',
'cp949': 'ks_c_5601-1987',
'euc_jp': 'euc-jp',
'euc_kr': 'euc-kr',
'ascii': 'us-ascii',
}
# Map charsets to their Unicode codec strings.
CODEC_MAP = {
'gb2312': 'eucgb2312_cn',
'big5': 'big5_tw',
# Hack: We don't want *any* conversion for stuff marked us-ascii, as all
# sorts of garbage might be sent to us in the guise of 7-bit us-ascii.
# Let that stuff pass through without conversion to/from Unicode.
'us-ascii': None,
}
# Convenience functions for extending the above mappings
def add_charset(charset, header_enc=None, body_enc=None, output_charset=None):
"""Add character set properties to the global registry.
charset is the input character set, and must be the canonical name of a
character set.
Optional header_enc and body_enc is either Charset.QP for
quoted-printable, Charset.BASE64 for base64 encoding, Charset.SHORTEST for
the shortest of qp or base64 encoding, or None for no encoding. SHORTEST
is only valid for header_enc. It describes how message headers and
message bodies in the input charset are to be encoded. Default is no
encoding.
Optional output_charset is the character set that the output should be
in. Conversions will proceed from input charset, to Unicode, to the
output charset when the method Charset.convert() is called. The default
is to output in the same character set as the input.
Both input_charset and output_charset must have Unicode codec entries in
the module's charset-to-codec mapping; use add_codec(charset, codecname)
to add codecs the module does not know about. See the codecs module's
documentation for more information.
"""
if body_enc == SHORTEST:
raise ValueError('SHORTEST not allowed for body_enc')
CHARSETS[charset] = (header_enc, body_enc, output_charset)
def add_alias(alias, canonical):
"""Add a character set alias.
alias is the alias name, e.g. latin-1
canonical is the character set's canonical name, e.g. iso-8859-1
"""
ALIASES[alias] = canonical
def add_codec(charset, codecname):
"""Add a codec that map characters in the given charset to/from Unicode.
charset is the canonical name of a character set. codecname is the name
of a Python codec, as appropriate for the second argument to the unicode()
built-in, or to the encode() method of a Unicode string.
"""
CODEC_MAP[charset] = codecname
class Charset:
"""Map character sets to their email properties.
This class provides information about the requirements imposed on email
for a specific character set. It also provides convenience routines for
converting between character sets, given the availability of the
applicable codecs. Given a character set, it will do its best to provide
information on how to use that character set in an email in an
RFC-compliant way.
Certain character sets must be encoded with quoted-printable or base64
when used in email headers or bodies. Certain character sets must be
converted outright, and are not allowed in email. Instances of this
module expose the following information about a character set:
input_charset: The initial character set specified. Common aliases
are converted to their `official' email names (e.g. latin_1
is converted to iso-8859-1). Defaults to 7-bit us-ascii.
header_encoding: If the character set must be encoded before it can be
used in an email header, this attribute will be set to
Charset.QP (for quoted-printable), Charset.BASE64 (for
base64 encoding), or Charset.SHORTEST for the shortest of
QP or BASE64 encoding. Otherwise, it will be None.
body_encoding: Same as header_encoding, but describes the encoding for the
mail message's body, which indeed may be different than the
header encoding. Charset.SHORTEST is not allowed for
body_encoding.
output_charset: Some character sets must be converted before the can be
used in email headers or bodies. If the input_charset is
one of them, this attribute will contain the name of the
charset output will be converted to. Otherwise, it will
be None.
input_codec: The name of the Python codec used to convert the
input_charset to Unicode. If no conversion codec is
necessary, this attribute will be None.
output_codec: The name of the Python codec used to convert Unicode
to the output_charset. If no conversion codec is necessary,
this attribute will have the same value as the input_codec.
"""
def __init__(self, input_charset=DEFAULT_CHARSET):
# RFC 2046, $4.1.2 says charsets are not case sensitive. We coerce to
# unicode because its .lower() is locale insensitive. If the argument
# is already a unicode, we leave it at that, but ensure that the
# charset is ASCII, as the standard (RFC XXX) requires.
try:
if isinstance(input_charset, unicode):
input_charset.encode('ascii')
else:
input_charset = unicode(input_charset, 'ascii')
except UnicodeError:
raise errors.CharsetError(input_charset)
input_charset = input_charset.lower()
# Set the input charset after filtering through the aliases
self.input_charset = ALIASES.get(input_charset, input_charset)
# We can try to guess which encoding and conversion to use by the
# charset_map dictionary. Try that first, but let the user override
# it.
henc, benc, conv = CHARSETS.get(self.input_charset,
(SHORTEST, BASE64, None))
if not conv:
conv = self.input_charset
# Set the attributes, allowing the arguments to override the default.
self.header_encoding = henc
self.body_encoding = benc
self.output_charset = ALIASES.get(conv, conv)
# Now set the codecs. If one isn't defined for input_charset,
# guess and try a Unicode codec with the same name as input_codec.
self.input_codec = CODEC_MAP.get(self.input_charset,
self.input_charset)
self.output_codec = CODEC_MAP.get(self.output_charset,
self.output_charset)
def __str__(self):
return self.input_charset.lower()
__repr__ = __str__
def __eq__(self, other):
return str(self) == str(other).lower()
def __ne__(self, other):
return not self.__eq__(other)
def get_body_encoding(self):
"""Return the content-transfer-encoding used for body encoding.
This is either the string `quoted-printable' or `base64' depending on
the encoding used, or it is a function in which case you should call
the function with a single argument, the Message object being
encoded. The function should then set the Content-Transfer-Encoding
header itself to whatever is appropriate.
Returns "quoted-printable" if self.body_encoding is QP.
Returns "base64" if self.body_encoding is BASE64.
Returns "7bit" otherwise.
"""
assert self.body_encoding != SHORTEST
if self.body_encoding == QP:
return 'quoted-printable'
elif self.body_encoding == BASE64:
return 'base64'
else:
return encode_7or8bit
def convert(self, s):
"""Convert a string from the input_codec to the output_codec."""
if self.input_codec != self.output_codec:
return unicode(s, self.input_codec).encode(self.output_codec)
else:
return s
def to_splittable(self, s):
"""Convert a possibly multibyte string to a safely splittable format.
Uses the input_codec to try and convert the string to Unicode, so it
can be safely split on character boundaries (even for multibyte
characters).
Returns the string as-is if it isn't known how to convert it to
Unicode with the input_charset.
Characters that could not be converted to Unicode will be replaced
with the Unicode replacement character U+FFFD.
"""
if isinstance(s, unicode) or self.input_codec is None:
return s
try:
return unicode(s, self.input_codec, 'replace')
except LookupError:
# Input codec not installed on system, so return the original
# string unchanged.
return s
def from_splittable(self, ustr, to_output=True):
"""Convert a splittable string back into an encoded string.
Uses the proper codec to try and convert the string from Unicode back
into an encoded format. Return the string as-is if it is not Unicode,
or if it could not be converted from Unicode.
Characters that could not be converted from Unicode will be replaced
with an appropriate character (usually '?').
If to_output is True (the default), uses output_codec to convert to an
encoded format. If to_output is False, uses input_codec.
"""
if to_output:
codec = self.output_codec
else:
codec = self.input_codec
if not isinstance(ustr, unicode) or codec is None:
return ustr
try:
return ustr.encode(codec, 'replace')
except LookupError:
# Output codec not installed
return ustr
def get_output_charset(self):
"""Return the output character set.
This is self.output_charset if that is not None, otherwise it is
self.input_charset.
"""
return self.output_charset or self.input_charset
def encoded_header_len(self, s):
"""Return the length of the encoded header string."""
cset = self.get_output_charset()
# The len(s) of a 7bit encoding is len(s)
if self.header_encoding == BASE64:
return email.base64mime.base64_len(s) + len(cset) + MISC_LEN
elif self.header_encoding == QP:
return email.quoprimime.header_quopri_len(s) + len(cset) + MISC_LEN
elif self.header_encoding == SHORTEST:
lenb64 = email.base64mime.base64_len(s)
lenqp = email.quoprimime.header_quopri_len(s)
return min(lenb64, lenqp) + len(cset) + MISC_LEN
else:
return len(s)
def header_encode(self, s, convert=False):
"""Header-encode a string, optionally converting it to output_charset.
If convert is True, the string will be converted from the input
charset to the output charset automatically. This is not useful for
multibyte character sets, which have line length issues (multibyte
characters must be split on a character, not a byte boundary); use the
high-level Header class to deal with these issues. convert defaults
to False.
The type of encoding (base64 or quoted-printable) will be based on
self.header_encoding.
"""
cset = self.get_output_charset()
if convert:
s = self.convert(s)
# 7bit/8bit encodings return the string unchanged (modulo conversions)
if self.header_encoding == BASE64:
return email.base64mime.header_encode(s, cset)
elif self.header_encoding == QP:
return email.quoprimime.header_encode(s, cset, maxlinelen=None)
elif self.header_encoding == SHORTEST:
lenb64 = email.base64mime.base64_len(s)
lenqp = email.quoprimime.header_quopri_len(s)
if lenb64 < lenqp:
return email.base64mime.header_encode(s, cset)
else:
return email.quoprimime.header_encode(s, cset, maxlinelen=None)
else:
return s
def body_encode(self, s, convert=True):
"""Body-encode a string and convert it to output_charset.
If convert is True (the default), the string will be converted from
the input charset to output charset automatically. Unlike
header_encode(), there are no issues with byte boundaries and
multibyte charsets in email bodies, so this is usually pretty safe.
The type of encoding (base64 or quoted-printable) will be based on
self.body_encoding.
"""
if convert:
s = self.convert(s)
# 7bit/8bit encodings return the string unchanged (module conversions)
if self.body_encoding is BASE64:
return email.base64mime.body_encode(s)
elif self.body_encoding is QP:
return email.quoprimime.body_encode(s)
else:
return s
|
MalloyPower/parsing-python
|
refs/heads/master
|
front-end/testsuite-python-lib/Python-2.6/Lib/encodings/iso8859_10.py
|
593
|
""" Python Character Mapping Codec iso8859_10 generated from 'MAPPINGS/ISO8859/8859-10.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-10',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u0104' # 0xA1 -> LATIN CAPITAL LETTER A WITH OGONEK
u'\u0112' # 0xA2 -> LATIN CAPITAL LETTER E WITH MACRON
u'\u0122' # 0xA3 -> LATIN CAPITAL LETTER G WITH CEDILLA
u'\u012a' # 0xA4 -> LATIN CAPITAL LETTER I WITH MACRON
u'\u0128' # 0xA5 -> LATIN CAPITAL LETTER I WITH TILDE
u'\u0136' # 0xA6 -> LATIN CAPITAL LETTER K WITH CEDILLA
u'\xa7' # 0xA7 -> SECTION SIGN
u'\u013b' # 0xA8 -> LATIN CAPITAL LETTER L WITH CEDILLA
u'\u0110' # 0xA9 -> LATIN CAPITAL LETTER D WITH STROKE
u'\u0160' # 0xAA -> LATIN CAPITAL LETTER S WITH CARON
u'\u0166' # 0xAB -> LATIN CAPITAL LETTER T WITH STROKE
u'\u017d' # 0xAC -> LATIN CAPITAL LETTER Z WITH CARON
u'\xad' # 0xAD -> SOFT HYPHEN
u'\u016a' # 0xAE -> LATIN CAPITAL LETTER U WITH MACRON
u'\u014a' # 0xAF -> LATIN CAPITAL LETTER ENG
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\u0105' # 0xB1 -> LATIN SMALL LETTER A WITH OGONEK
u'\u0113' # 0xB2 -> LATIN SMALL LETTER E WITH MACRON
u'\u0123' # 0xB3 -> LATIN SMALL LETTER G WITH CEDILLA
u'\u012b' # 0xB4 -> LATIN SMALL LETTER I WITH MACRON
u'\u0129' # 0xB5 -> LATIN SMALL LETTER I WITH TILDE
u'\u0137' # 0xB6 -> LATIN SMALL LETTER K WITH CEDILLA
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\u013c' # 0xB8 -> LATIN SMALL LETTER L WITH CEDILLA
u'\u0111' # 0xB9 -> LATIN SMALL LETTER D WITH STROKE
u'\u0161' # 0xBA -> LATIN SMALL LETTER S WITH CARON
u'\u0167' # 0xBB -> LATIN SMALL LETTER T WITH STROKE
u'\u017e' # 0xBC -> LATIN SMALL LETTER Z WITH CARON
u'\u2015' # 0xBD -> HORIZONTAL BAR
u'\u016b' # 0xBE -> LATIN SMALL LETTER U WITH MACRON
u'\u014b' # 0xBF -> LATIN SMALL LETTER ENG
u'\u0100' # 0xC0 -> LATIN CAPITAL LETTER A WITH MACRON
u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
u'\u012e' # 0xC7 -> LATIN CAPITAL LETTER I WITH OGONEK
u'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON
u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\u0118' # 0xCA -> LATIN CAPITAL LETTER E WITH OGONEK
u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\u0116' # 0xCC -> LATIN CAPITAL LETTER E WITH DOT ABOVE
u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xd0' # 0xD0 -> LATIN CAPITAL LETTER ETH (Icelandic)
u'\u0145' # 0xD1 -> LATIN CAPITAL LETTER N WITH CEDILLA
u'\u014c' # 0xD2 -> LATIN CAPITAL LETTER O WITH MACRON
u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\u0168' # 0xD7 -> LATIN CAPITAL LETTER U WITH TILDE
u'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
u'\u0172' # 0xD9 -> LATIN CAPITAL LETTER U WITH OGONEK
u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\xde' # 0xDE -> LATIN CAPITAL LETTER THORN (Icelandic)
u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S (German)
u'\u0101' # 0xE0 -> LATIN SMALL LETTER A WITH MACRON
u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
u'\u012f' # 0xE7 -> LATIN SMALL LETTER I WITH OGONEK
u'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON
u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
u'\u0119' # 0xEA -> LATIN SMALL LETTER E WITH OGONEK
u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
u'\u0117' # 0xEC -> LATIN SMALL LETTER E WITH DOT ABOVE
u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xf0' # 0xF0 -> LATIN SMALL LETTER ETH (Icelandic)
u'\u0146' # 0xF1 -> LATIN SMALL LETTER N WITH CEDILLA
u'\u014d' # 0xF2 -> LATIN SMALL LETTER O WITH MACRON
u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\u0169' # 0xF7 -> LATIN SMALL LETTER U WITH TILDE
u'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
u'\u0173' # 0xF9 -> LATIN SMALL LETTER U WITH OGONEK
u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE
u'\xfe' # 0xFE -> LATIN SMALL LETTER THORN (Icelandic)
u'\u0138' # 0xFF -> LATIN SMALL LETTER KRA
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
jamespcole/home-assistant
|
refs/heads/master
|
homeassistant/components/tesla/switch.py
|
1
|
"""Support for Tesla charger switches."""
import logging
from homeassistant.components.switch import ENTITY_ID_FORMAT, SwitchDevice
from homeassistant.const import STATE_OFF, STATE_ON
from . import DOMAIN as TESLA_DOMAIN, TeslaDevice
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['tesla']
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Tesla switch platform."""
controller = hass.data[TESLA_DOMAIN]['devices']['controller']
devices = []
for device in hass.data[TESLA_DOMAIN]['devices']['switch']:
if device.bin_type == 0x8:
devices.append(ChargerSwitch(device, controller))
elif device.bin_type == 0x9:
devices.append(RangeSwitch(device, controller))
add_entities(devices, True)
class ChargerSwitch(TeslaDevice, SwitchDevice):
"""Representation of a Tesla charger switch."""
def __init__(self, tesla_device, controller):
"""Initialise of the switch."""
self._state = None
super().__init__(tesla_device, controller)
self.entity_id = ENTITY_ID_FORMAT.format(self.tesla_id)
def turn_on(self, **kwargs):
"""Send the on command."""
_LOGGER.debug("Enable charging: %s", self._name)
self.tesla_device.start_charge()
def turn_off(self, **kwargs):
"""Send the off command."""
_LOGGER.debug("Disable charging for: %s", self._name)
self.tesla_device.stop_charge()
@property
def is_on(self):
"""Get whether the switch is in on state."""
return self._state == STATE_ON
def update(self):
"""Update the state of the switch."""
_LOGGER.debug("Updating state for: %s", self._name)
self.tesla_device.update()
self._state = STATE_ON if self.tesla_device.is_charging() \
else STATE_OFF
class RangeSwitch(TeslaDevice, SwitchDevice):
"""Representation of a Tesla max range charging switch."""
def __init__(self, tesla_device, controller):
"""Initialise of the switch."""
self._state = None
super().__init__(tesla_device, controller)
self.entity_id = ENTITY_ID_FORMAT.format(self.tesla_id)
def turn_on(self, **kwargs):
"""Send the on command."""
_LOGGER.debug("Enable max range charging: %s", self._name)
self.tesla_device.set_max()
def turn_off(self, **kwargs):
"""Send the off command."""
_LOGGER.debug("Disable max range charging: %s", self._name)
self.tesla_device.set_standard()
@property
def is_on(self):
"""Get whether the switch is in on state."""
return self._state == STATE_ON
def update(self):
"""Update the state of the switch."""
_LOGGER.debug("Updating state for: %s", self._name)
self.tesla_device.update()
self._state = STATE_ON if self.tesla_device.is_maxrange() \
else STATE_OFF
|
eformat/vertx-web
|
refs/heads/master
|
vertx-web/src/test/sockjs-protocol/venv/lib/python2.7/site-packages/pip/runner.py
|
658
|
import sys
import os
def run():
base = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
## FIXME: this is kind of crude; if we could create a fake pip
## module, then exec into it and update pip.__path__ properly, we
## wouldn't have to update sys.path:
sys.path.insert(0, base)
import pip
return pip.main()
if __name__ == '__main__':
exit = run()
if exit:
sys.exit(exit)
|
jkahn/pydoop-code
|
refs/heads/master
|
pydoop/pipes_runner.py
|
1
|
# BEGIN_COPYRIGHT
#
# Copyright 2009-2013 CRS4.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# END_COPYRIGHT
import os, tempfile
from pydoop.text_protocol import text_down_protocol
from pydoop.text_protocol import up_serializer
class pipes_runner(object):
def __init__(self, program, output_visitor,
down_protocol=text_down_protocol):
self.program = program
self.output_visitor = output_visitor
fd, self.tmp_filename = tempfile.mkstemp(prefix="pydoop_")
os.close(fd)
self.down_channel = down_protocol(self.program, out_file=self.tmp_filename)
# FIXME the following should be done with some metaclass magic...
for n in ['start', 'abort',
'set_job_conf', 'set_input_types',
'run_map', 'run_reduce',
'reduce_key', 'reduce_value', 'map_item']:
self.__setattr__(n, self.down_channel.__getattribute__(n))
def close(self):
self.down_channel.close()
with open(self.tmp_filename) as of:
for l in of:
l = l.strip()
parts = l.split('\t')
cmd = parts[0]
f = self.output_visitor.__getattribute__(cmd)
x = map(up_serializer.deserialize, parts[1:])
f(*x)
os.remove(self.tmp_filename)
|
jwren/intellij-community
|
refs/heads/master
|
python/testData/resolve/DunderDocInDeclarationNewStyleClass.py
|
35
|
class A(object):
print(__doc__)
# <ref>
|
btgorman/RISE-power-water-ss-1phase
|
refs/heads/master
|
model_outputs/list_generator.py
|
1
|
import csv
import collections
import itertools
with open('list_set_of_set.csv', 'r') as file:
reader = csv.reader(file)
new_list = list(reader)
listid = []
listid.append(1)
listlist = [[]]
listset = [set()]
incr = 1 # track when pipeid changes
idx = 0 # listlist idx
for row in new_list:
pipeid = int(row[1])
if pipeid != incr:
listid.append(pipeid)
listlist.append([])
listset.append(set())
incr = pipeid
idx += 1
listlist[idx].append(tuple(map(int, tuple(filter(None, row[2::])))))
for itemidx in range(0, len(listlist)):
for elem in listlist[itemidx]:
listset[itemidx].update(elem)
# print(listlist[0])
# print(listset[0])
# print('')
totalcomb = [[]]
totalperm = [[]]
comb_counter = [{}]
for idx in range(0, len(listlist)):
if idx > 0:
totalcomb.append([])
totalperm.append([])
comb_counter.append({})
for num_combs in range(1, 4+1):
if num_combs <= len(listset[idx]):
totalcomb[idx] += itertools.combinations(list(listset[idx]), num_combs)
for elem in listlist[idx]:
for num_perms in range(1, len(elem)+1):
totalperm[idx] += itertools.permutations(elem, num_perms)
c = collections.Counter(totalperm[idx])
for elem in totalcomb[idx]:
comb_counter[idx][elem] = c[elem]
checkidx = 37
print(listid[checkidx])
print(listlist[checkidx])
print(listset[checkidx])
print('')
print(totalcomb[checkidx])
print(totalperm[checkidx])
print(comb_counter[checkidx])
with open('critical_junctions.csv', 'w', newline='') as file:
writer = csv.writer(file)
for idx in range(0, len(listlist)):
for key,value in comb_counter[idx].items():
writer.writerow([listid[idx], list(key), value])
|
gameduell/duell
|
refs/heads/master
|
bin/win/python2.7.9/Lib/site-packages/pip/_vendor/requests/packages/chardet/big5prober.py
|
2930
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import Big5DistributionAnalysis
from .mbcssm import Big5SMModel
class Big5Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(Big5SMModel)
self._mDistributionAnalyzer = Big5DistributionAnalysis()
self.reset()
def get_charset_name(self):
return "Big5"
|
danielchatfield/shreddi.es
|
refs/heads/master
|
libs/flask/testsuite/templating.py
|
562
|
# -*- coding: utf-8 -*-
"""
flask.testsuite.templating
~~~~~~~~~~~~~~~~~~~~~~~~~~
Template functionality
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import flask
import unittest
from flask.testsuite import FlaskTestCase
class TemplatingTestCase(FlaskTestCase):
def test_context_processing(self):
app = flask.Flask(__name__)
@app.context_processor
def context_processor():
return {'injected_value': 42}
@app.route('/')
def index():
return flask.render_template('context_template.html', value=23)
rv = app.test_client().get('/')
self.assert_equal(rv.data, b'<p>23|42')
def test_original_win(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return flask.render_template_string('{{ config }}', config=42)
rv = app.test_client().get('/')
self.assert_equal(rv.data, b'42')
def test_request_less_rendering(self):
app = flask.Flask(__name__)
app.config['WORLD_NAME'] = 'Special World'
@app.context_processor
def context_processor():
return dict(foo=42)
with app.app_context():
rv = flask.render_template_string('Hello {{ config.WORLD_NAME }} '
'{{ foo }}')
self.assert_equal(rv, 'Hello Special World 42')
def test_standard_context(self):
app = flask.Flask(__name__)
app.secret_key = 'development key'
@app.route('/')
def index():
flask.g.foo = 23
flask.session['test'] = 'aha'
return flask.render_template_string('''
{{ request.args.foo }}
{{ g.foo }}
{{ config.DEBUG }}
{{ session.test }}
''')
rv = app.test_client().get('/?foo=42')
self.assert_equal(rv.data.split(), [b'42', b'23', b'False', b'aha'])
def test_escaping(self):
text = '<p>Hello World!'
app = flask.Flask(__name__)
@app.route('/')
def index():
return flask.render_template('escaping_template.html', text=text,
html=flask.Markup(text))
lines = app.test_client().get('/').data.splitlines()
self.assert_equal(lines, [
b'<p>Hello World!',
b'<p>Hello World!',
b'<p>Hello World!',
b'<p>Hello World!',
b'<p>Hello World!',
b'<p>Hello World!'
])
def test_no_escaping(self):
app = flask.Flask(__name__)
with app.test_request_context():
self.assert_equal(flask.render_template_string('{{ foo }}',
foo='<test>'), '<test>')
self.assert_equal(flask.render_template('mail.txt', foo='<test>'),
'<test> Mail')
def test_macros(self):
app = flask.Flask(__name__)
with app.test_request_context():
macro = flask.get_template_attribute('_macro.html', 'hello')
self.assert_equal(macro('World'), 'Hello World!')
def test_template_filter(self):
app = flask.Flask(__name__)
@app.template_filter()
def my_reverse(s):
return s[::-1]
self.assert_in('my_reverse', app.jinja_env.filters.keys())
self.assert_equal(app.jinja_env.filters['my_reverse'], my_reverse)
self.assert_equal(app.jinja_env.filters['my_reverse']('abcd'), 'dcba')
def test_add_template_filter(self):
app = flask.Flask(__name__)
def my_reverse(s):
return s[::-1]
app.add_template_filter(my_reverse)
self.assert_in('my_reverse', app.jinja_env.filters.keys())
self.assert_equal(app.jinja_env.filters['my_reverse'], my_reverse)
self.assert_equal(app.jinja_env.filters['my_reverse']('abcd'), 'dcba')
def test_template_filter_with_name(self):
app = flask.Flask(__name__)
@app.template_filter('strrev')
def my_reverse(s):
return s[::-1]
self.assert_in('strrev', app.jinja_env.filters.keys())
self.assert_equal(app.jinja_env.filters['strrev'], my_reverse)
self.assert_equal(app.jinja_env.filters['strrev']('abcd'), 'dcba')
def test_add_template_filter_with_name(self):
app = flask.Flask(__name__)
def my_reverse(s):
return s[::-1]
app.add_template_filter(my_reverse, 'strrev')
self.assert_in('strrev', app.jinja_env.filters.keys())
self.assert_equal(app.jinja_env.filters['strrev'], my_reverse)
self.assert_equal(app.jinja_env.filters['strrev']('abcd'), 'dcba')
def test_template_filter_with_template(self):
app = flask.Flask(__name__)
@app.template_filter()
def super_reverse(s):
return s[::-1]
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
self.assert_equal(rv.data, b'dcba')
def test_add_template_filter_with_template(self):
app = flask.Flask(__name__)
def super_reverse(s):
return s[::-1]
app.add_template_filter(super_reverse)
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
self.assert_equal(rv.data, b'dcba')
def test_template_filter_with_name_and_template(self):
app = flask.Flask(__name__)
@app.template_filter('super_reverse')
def my_reverse(s):
return s[::-1]
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
self.assert_equal(rv.data, b'dcba')
def test_add_template_filter_with_name_and_template(self):
app = flask.Flask(__name__)
def my_reverse(s):
return s[::-1]
app.add_template_filter(my_reverse, 'super_reverse')
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
self.assert_equal(rv.data, b'dcba')
def test_template_test(self):
app = flask.Flask(__name__)
@app.template_test()
def boolean(value):
return isinstance(value, bool)
self.assert_in('boolean', app.jinja_env.tests.keys())
self.assert_equal(app.jinja_env.tests['boolean'], boolean)
self.assert_true(app.jinja_env.tests['boolean'](False))
def test_add_template_test(self):
app = flask.Flask(__name__)
def boolean(value):
return isinstance(value, bool)
app.add_template_test(boolean)
self.assert_in('boolean', app.jinja_env.tests.keys())
self.assert_equal(app.jinja_env.tests['boolean'], boolean)
self.assert_true(app.jinja_env.tests['boolean'](False))
def test_template_test_with_name(self):
app = flask.Flask(__name__)
@app.template_test('boolean')
def is_boolean(value):
return isinstance(value, bool)
self.assert_in('boolean', app.jinja_env.tests.keys())
self.assert_equal(app.jinja_env.tests['boolean'], is_boolean)
self.assert_true(app.jinja_env.tests['boolean'](False))
def test_add_template_test_with_name(self):
app = flask.Flask(__name__)
def is_boolean(value):
return isinstance(value, bool)
app.add_template_test(is_boolean, 'boolean')
self.assert_in('boolean', app.jinja_env.tests.keys())
self.assert_equal(app.jinja_env.tests['boolean'], is_boolean)
self.assert_true(app.jinja_env.tests['boolean'](False))
def test_template_test_with_template(self):
app = flask.Flask(__name__)
@app.template_test()
def boolean(value):
return isinstance(value, bool)
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
self.assert_in(b'Success!', rv.data)
def test_add_template_test_with_template(self):
app = flask.Flask(__name__)
def boolean(value):
return isinstance(value, bool)
app.add_template_test(boolean)
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
self.assert_in(b'Success!', rv.data)
def test_template_test_with_name_and_template(self):
app = flask.Flask(__name__)
@app.template_test('boolean')
def is_boolean(value):
return isinstance(value, bool)
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
self.assert_in(b'Success!', rv.data)
def test_add_template_test_with_name_and_template(self):
app = flask.Flask(__name__)
def is_boolean(value):
return isinstance(value, bool)
app.add_template_test(is_boolean, 'boolean')
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
self.assert_in(b'Success!', rv.data)
def test_add_template_global(self):
app = flask.Flask(__name__)
@app.template_global()
def get_stuff():
return 42
self.assert_in('get_stuff', app.jinja_env.globals.keys())
self.assert_equal(app.jinja_env.globals['get_stuff'], get_stuff)
self.assert_true(app.jinja_env.globals['get_stuff'](), 42)
with app.app_context():
rv = flask.render_template_string('{{ get_stuff() }}')
self.assert_equal(rv, '42')
def test_custom_template_loader(self):
class MyFlask(flask.Flask):
def create_global_jinja_loader(self):
from jinja2 import DictLoader
return DictLoader({'index.html': 'Hello Custom World!'})
app = MyFlask(__name__)
@app.route('/')
def index():
return flask.render_template('index.html')
c = app.test_client()
rv = c.get('/')
self.assert_equal(rv.data, b'Hello Custom World!')
def test_iterable_loader(self):
app = flask.Flask(__name__)
@app.context_processor
def context_processor():
return {'whiskey': 'Jameson'}
@app.route('/')
def index():
return flask.render_template(
['no_template.xml', # should skip this one
'simple_template.html', # should render this
'context_template.html'],
value=23)
rv = app.test_client().get('/')
self.assert_equal(rv.data, b'<h1>Jameson</h1>')
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TemplatingTestCase))
return suite
|
erwilan/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/misc/ovirt.py
|
63
|
#!/usr/bin/python
# (c) 2013, Vincent Van der Kussen <vincent at vanderkussen.org>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt
author: "Vincent Van der Kussen (@vincentvdk)"
short_description: oVirt/RHEV platform management
description:
- This module only supports oVirt/RHEV version 3. A newer module M(ovirt_vms) supports oVirt/RHV version 4.
- Allows you to create new instances, either from scratch or an image, in addition to deleting or stopping instances on the oVirt/RHEV platform.
version_added: "1.4"
options:
user:
description:
- the user to authenticate with
default: null
required: true
aliases: []
url:
description:
- the url of the oVirt instance
default: null
required: true
aliases: []
instance_name:
description:
- the name of the instance to use
default: null
required: true
aliases: [ vmname ]
password:
description:
- password of the user to authenticate with
default: null
required: true
aliases: []
image:
description:
- template to use for the instance
default: null
required: false
aliases: []
resource_type:
description:
- whether you want to deploy an image or create an instance from scratch.
default: null
required: false
aliases: []
choices: [ 'new', 'template' ]
zone:
description:
- deploy the image to this oVirt cluster
default: null
required: false
aliases: []
instance_disksize:
description:
- size of the instance's disk in GB
default: null
required: false
aliases: [ vm_disksize]
instance_cpus:
description:
- the instance's number of cpu's
default: 1
required: false
aliases: [ vmcpus ]
instance_nic:
description:
- name of the network interface in oVirt/RHEV
default: null
required: false
aliases: [ vmnic ]
instance_network:
description:
- the logical network the machine should belong to
default: rhevm
required: false
aliases: [ vmnetwork ]
instance_mem:
description:
- the instance's amount of memory in MB
default: null
required: false
aliases: [ vmmem ]
instance_type:
description:
- define if the instance is a server or desktop
default: server
required: false
aliases: [ vmtype ]
choices: [ 'server', 'desktop' ]
disk_alloc:
description:
- define if disk is thin or preallocated
default: thin
required: false
aliases: []
choices: [ 'thin', 'preallocated' ]
disk_int:
description:
- interface type of the disk
default: virtio
required: false
aliases: []
choices: [ 'virtio', 'ide' ]
instance_os:
description:
- type of Operating System
default: null
required: false
aliases: [ vmos ]
instance_cores:
description:
- define the instance's number of cores
default: 1
required: false
aliases: [ vmcores ]
sdomain:
description:
- the Storage Domain where you want to create the instance's disk on.
default: null
required: false
aliases: []
region:
description:
- the oVirt/RHEV datacenter where you want to deploy to
default: null
required: false
aliases: []
instance_dns:
description:
- define the instance's Primary DNS server
required: false
aliases: [ dns ]
version_added: "2.1"
instance_domain:
description:
- define the instance's Domain
required: false
aliases: [ domain ]
version_added: "2.1"
instance_hostname:
description:
- define the instance's Hostname
required: false
aliases: [ hostname ]
version_added: "2.1"
instance_ip:
description:
- define the instance's IP
required: false
aliases: [ ip ]
version_added: "2.1"
instance_netmask:
description:
- define the instance's Netmask
required: false
aliases: [ netmask ]
version_added: "2.1"
instance_rootpw:
description:
- define the instance's Root password
required: false
aliases: [ rootpw ]
version_added: "2.1"
instance_key:
description:
- define the instance's Authorized key
required: false
aliases: [ key ]
version_added: "2.1"
state:
description:
- create, terminate or remove instances
default: 'present'
required: false
aliases: []
choices: ['present', 'absent', 'shutdown', 'started', 'restarted']
requirements:
- "python >= 2.6"
- "ovirt-engine-sdk-python"
'''
EXAMPLES = '''
# Basic example provisioning from image.
ovirt:
user: admin@internal
url: https://ovirt.example.com
instance_name: ansiblevm04
password: secret
image: centos_64
zone: cluster01
resource_type: template"
# Full example to create new instance from scratch
ovirt:
instance_name: testansible
resource_type: new
instance_type: server
user: admin@internal
password: secret
url: https://ovirt.example.com
instance_disksize: 10
zone: cluster01
region: datacenter1
instance_cpus: 1
instance_nic: nic1
instance_network: rhevm
instance_mem: 1000
disk_alloc: thin
sdomain: FIBER01
instance_cores: 1
instance_os: rhel_6x64
disk_int: virtio"
# stopping an instance
ovirt:
instance_name: testansible
state: stopped
user: admin@internal
password: secret
url: https://ovirt.example.com
# starting an instance
ovirt:
instance_name: testansible
state: started
user: admin@internal
password: secret
url: https://ovirt.example.com
# starting an instance with cloud init information
ovirt:
instance_name: testansible
state: started
user: admin@internal
password: secret
url: https://ovirt.example.com
hostname: testansible
domain: ansible.local
ip: 192.0.2.100
netmask: 255.255.255.0
gateway: 192.0.2.1
rootpw: bigsecret
'''
try:
from ovirtsdk.api import API
from ovirtsdk.xml import params
HAS_OVIRTSDK = True
except ImportError:
HAS_OVIRTSDK = False
# ------------------------------------------------------------------- #
# create connection with API
#
def conn(url, user, password):
api = API(url=url, username=user, password=password, insecure=True)
try:
value = api.test()
except:
raise Exception("error connecting to the oVirt API")
return api
# ------------------------------------------------------------------- #
# Create VM from scratch
def create_vm(conn, vmtype, vmname, zone, vmdisk_size, vmcpus, vmnic, vmnetwork, vmmem, vmdisk_alloc, sdomain, vmcores, vmos, vmdisk_int):
if vmdisk_alloc == 'thin':
# define VM params
vmparams = params.VM(name=vmname, cluster=conn.clusters.get(name=zone), os=params.OperatingSystem(type_=vmos),
template=conn.templates.get(name="Blank"), memory=1024 * 1024 * int(vmmem),
cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores))), type_=vmtype)
# define disk params
vmdisk= params.Disk(size=1024 * 1024 * 1024 * int(vmdisk_size), wipe_after_delete=True, sparse=True, interface=vmdisk_int, type_="System", format='cow',
storage_domains=params.StorageDomains(storage_domain=[conn.storagedomains.get(name=sdomain)]))
# define network parameters
network_net = params.Network(name=vmnetwork)
nic_net1 = params.NIC(name='nic1', network=network_net, interface='virtio')
elif vmdisk_alloc == 'preallocated':
# define VM params
vmparams = params.VM(name=vmname, cluster=conn.clusters.get(name=zone), os=params.OperatingSystem(type_=vmos),
template=conn.templates.get(name="Blank"), memory=1024 * 1024 * int(vmmem),
cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores))) ,type_=vmtype)
# define disk params
vmdisk= params.Disk(size=1024 * 1024 * 1024 * int(vmdisk_size), wipe_after_delete=True, sparse=False, interface=vmdisk_int, type_="System",
format='raw', storage_domains=params.StorageDomains(storage_domain=[conn.storagedomains.get(name=sdomain)]))
# define network parameters
network_net = params.Network(name=vmnetwork)
nic_net1 = params.NIC(name=vmnic, network=network_net, interface='virtio')
try:
conn.vms.add(vmparams)
except:
raise Exception("Error creating VM with specified parameters")
vm = conn.vms.get(name=vmname)
try:
vm.disks.add(vmdisk)
except:
raise Exception("Error attaching disk")
try:
vm.nics.add(nic_net1)
except:
raise Exception("Error adding nic")
# create an instance from a template
def create_vm_template(conn, vmname, image, zone):
vmparams = params.VM(name=vmname, cluster=conn.clusters.get(name=zone), template=conn.templates.get(name=image),disks=params.Disks(clone=True))
try:
conn.vms.add(vmparams)
except:
raise Exception('error adding template %s' % image)
# start instance
def vm_start(conn, vmname, hostname=None, ip=None, netmask=None, gateway=None,
domain=None, dns=None, rootpw=None, key=None):
vm = conn.vms.get(name=vmname)
use_cloud_init = False
nics = None
nic = None
if hostname or ip or netmask or gateway or domain or dns or rootpw or key:
use_cloud_init = True
if ip and netmask and gateway:
ipinfo = params.IP(address=ip, netmask=netmask, gateway=gateway)
nic = params.GuestNicConfiguration(name='eth0', boot_protocol='STATIC', ip=ipinfo, on_boot=True)
nics = params.Nics()
nics = params.GuestNicsConfiguration(nic_configuration=[nic])
initialization=params.Initialization(regenerate_ssh_keys=True, host_name=hostname, domain=domain, user_name='root',
root_password=rootpw, nic_configurations=nics, dns_servers=dns,
authorized_ssh_keys=key)
action = params.Action(use_cloud_init=use_cloud_init, vm=params.VM(initialization=initialization))
vm.start(action=action)
# Stop instance
def vm_stop(conn, vmname):
vm = conn.vms.get(name=vmname)
vm.stop()
# restart instance
def vm_restart(conn, vmname):
state = vm_status(conn, vmname)
vm = conn.vms.get(name=vmname)
vm.stop()
while conn.vms.get(vmname).get_status().get_state() != 'down':
time.sleep(5)
vm.start()
# remove an instance
def vm_remove(conn, vmname):
vm = conn.vms.get(name=vmname)
vm.delete()
# ------------------------------------------------------------------- #
# VM statuses
#
# Get the VMs status
def vm_status(conn, vmname):
status = conn.vms.get(name=vmname).status.state
return status
# Get VM object and return it's name if object exists
def get_vm(conn, vmname):
vm = conn.vms.get(name=vmname)
if vm is None:
name = "empty"
else:
name = vm.get_name()
return name
# ------------------------------------------------------------------- #
# Hypervisor operations
#
# not available yet
# ------------------------------------------------------------------- #
# Main
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(default='present', choices=['present', 'absent', 'shutdown', 'started', 'restart']),
#name = dict(required=True),
user = dict(required=True),
url = dict(required=True),
instance_name = dict(required=True, aliases=['vmname']),
password = dict(required=True, no_log=True),
image = dict(),
resource_type = dict(choices=['new', 'template']),
zone = dict(),
instance_disksize = dict(aliases=['vm_disksize']),
instance_cpus = dict(default=1, aliases=['vmcpus']),
instance_nic = dict(aliases=['vmnic']),
instance_network = dict(default='rhevm', aliases=['vmnetwork']),
instance_mem = dict(aliases=['vmmem']),
instance_type = dict(default='server', aliases=['vmtype'], choices=['server', 'desktop']),
disk_alloc = dict(default='thin', choices=['thin', 'preallocated']),
disk_int = dict(default='virtio', choices=['virtio', 'ide']),
instance_os = dict(aliases=['vmos']),
instance_cores = dict(default=1, aliases=['vmcores']),
instance_hostname = dict(aliases=['hostname']),
instance_ip = dict(aliases=['ip']),
instance_netmask = dict(aliases=['netmask']),
instance_gateway = dict(aliases=['gateway']),
instance_domain = dict(aliases=['domain']),
instance_dns = dict(aliases=['dns']),
instance_rootpw = dict(aliases=['rootpw']),
instance_key = dict(aliases=['key']),
sdomain = dict(),
region = dict(),
)
)
if not HAS_OVIRTSDK:
module.fail_json(msg='ovirtsdk required for this module')
state = module.params['state']
user = module.params['user']
url = module.params['url']
vmname = module.params['instance_name']
password = module.params['password']
image = module.params['image'] # name of the image to deploy
resource_type = module.params['resource_type'] # template or from scratch
zone = module.params['zone'] # oVirt cluster
vmdisk_size = module.params['instance_disksize'] # disksize
vmcpus = module.params['instance_cpus'] # number of cpu
vmnic = module.params['instance_nic'] # network interface
vmnetwork = module.params['instance_network'] # logical network
vmmem = module.params['instance_mem'] # mem size
vmdisk_alloc = module.params['disk_alloc'] # thin, preallocated
vmdisk_int = module.params['disk_int'] # disk interface virtio or ide
vmos = module.params['instance_os'] # Operating System
vmtype = module.params['instance_type'] # server or desktop
vmcores = module.params['instance_cores'] # number of cores
sdomain = module.params['sdomain'] # storage domain to store disk on
region = module.params['region'] # oVirt Datacenter
hostname = module.params['instance_hostname']
ip = module.params['instance_ip']
netmask = module.params['instance_netmask']
gateway = module.params['instance_gateway']
domain = module.params['instance_domain']
dns = module.params['instance_dns']
rootpw = module.params['instance_rootpw']
key = module.params['instance_key']
#initialize connection
try:
c = conn(url+"/api", user, password)
except Exception as e:
module.fail_json(msg='%s' % e)
if state == 'present':
if get_vm(c, vmname) == "empty":
if resource_type == 'template':
try:
create_vm_template(c, vmname, image, zone)
except Exception as e:
module.fail_json(msg='%s' % e)
module.exit_json(changed=True, msg="deployed VM %s from template %s" % (vmname,image))
elif resource_type == 'new':
# FIXME: refactor, use keyword args.
try:
create_vm(c, vmtype, vmname, zone, vmdisk_size, vmcpus, vmnic, vmnetwork, vmmem, vmdisk_alloc, sdomain, vmcores, vmos, vmdisk_int)
except Exception as e:
module.fail_json(msg='%s' % e)
module.exit_json(changed=True, msg="deployed VM %s from scratch" % vmname)
else:
module.exit_json(changed=False, msg="You did not specify a resource type")
else:
module.exit_json(changed=False, msg="VM %s already exists" % vmname)
if state == 'started':
if vm_status(c, vmname) == 'up':
module.exit_json(changed=False, msg="VM %s is already running" % vmname)
else:
#vm_start(c, vmname)
vm_start(c, vmname, hostname, ip, netmask, gateway, domain, dns, rootpw, key)
module.exit_json(changed=True, msg="VM %s started" % vmname)
if state == 'shutdown':
if vm_status(c, vmname) == 'down':
module.exit_json(changed=False, msg="VM %s is already shutdown" % vmname)
else:
vm_stop(c, vmname)
module.exit_json(changed=True, msg="VM %s is shutting down" % vmname)
if state == 'restart':
if vm_status(c, vmname) == 'up':
vm_restart(c, vmname)
module.exit_json(changed=True, msg="VM %s is restarted" % vmname)
else:
module.exit_json(changed=False, msg="VM %s is not running" % vmname)
if state == 'absent':
if get_vm(c, vmname) == "empty":
module.exit_json(changed=False, msg="VM %s does not exist" % vmname)
else:
vm_remove(c, vmname)
module.exit_json(changed=True, msg="VM %s removed" % vmname)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
evilhero/mylar
|
refs/heads/master
|
lib/cherrypy/test/test_auth_digest.py
|
42
|
# This file is part of CherryPy <http://www.cherrypy.org/>
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:expandtab:fileencoding=utf-8
import cherrypy
from cherrypy.lib import auth_digest
from cherrypy.test import helper
class DigestAuthTest(helper.CPWebCase):
def setup_server():
class Root:
def index(self):
return "This is public."
index.exposed = True
class DigestProtected:
def index(self):
return "Hello %s, you've been authorized." % cherrypy.request.login
index.exposed = True
def fetch_users():
return {'test': 'test'}
get_ha1 = cherrypy.lib.auth_digest.get_ha1_dict_plain(fetch_users())
conf = {'/digest': {'tools.auth_digest.on': True,
'tools.auth_digest.realm': 'localhost',
'tools.auth_digest.get_ha1': get_ha1,
'tools.auth_digest.key': 'a565c27146791cfb',
'tools.auth_digest.debug': 'True'}}
root = Root()
root.digest = DigestProtected()
cherrypy.tree.mount(root, config=conf)
setup_server = staticmethod(setup_server)
def testPublic(self):
self.getPage("/")
self.assertStatus('200 OK')
self.assertHeader('Content-Type', 'text/html;charset=utf-8')
self.assertBody('This is public.')
def testDigest(self):
self.getPage("/digest/")
self.assertStatus(401)
value = None
for k, v in self.headers:
if k.lower() == "www-authenticate":
if v.startswith("Digest"):
value = v
break
if value is None:
self._handlewebError("Digest authentification scheme was not found")
value = value[7:]
items = value.split(', ')
tokens = {}
for item in items:
key, value = item.split('=')
tokens[key.lower()] = value
missing_msg = "%s is missing"
bad_value_msg = "'%s' was expecting '%s' but found '%s'"
nonce = None
if 'realm' not in tokens:
self._handlewebError(missing_msg % 'realm')
elif tokens['realm'] != '"localhost"':
self._handlewebError(bad_value_msg % ('realm', '"localhost"', tokens['realm']))
if 'nonce' not in tokens:
self._handlewebError(missing_msg % 'nonce')
else:
nonce = tokens['nonce'].strip('"')
if 'algorithm' not in tokens:
self._handlewebError(missing_msg % 'algorithm')
elif tokens['algorithm'] != '"MD5"':
self._handlewebError(bad_value_msg % ('algorithm', '"MD5"', tokens['algorithm']))
if 'qop' not in tokens:
self._handlewebError(missing_msg % 'qop')
elif tokens['qop'] != '"auth"':
self._handlewebError(bad_value_msg % ('qop', '"auth"', tokens['qop']))
get_ha1 = auth_digest.get_ha1_dict_plain({'test' : 'test'})
# Test user agent response with a wrong value for 'realm'
base_auth = 'Digest username="test", realm="wrong realm", nonce="%s", uri="/digest/", algorithm=MD5, response="%s", qop=auth, nc=%s, cnonce="1522e61005789929"'
auth_header = base_auth % (nonce, '11111111111111111111111111111111', '00000001')
auth = auth_digest.HttpDigestAuthorization(auth_header, 'GET')
# calculate the response digest
ha1 = get_ha1(auth.realm, 'test')
response = auth.request_digest(ha1)
# send response with correct response digest, but wrong realm
auth_header = base_auth % (nonce, response, '00000001')
self.getPage('/digest/', [('Authorization', auth_header)])
self.assertStatus(401)
# Test that must pass
base_auth = 'Digest username="test", realm="localhost", nonce="%s", uri="/digest/", algorithm=MD5, response="%s", qop=auth, nc=%s, cnonce="1522e61005789929"'
auth_header = base_auth % (nonce, '11111111111111111111111111111111', '00000001')
auth = auth_digest.HttpDigestAuthorization(auth_header, 'GET')
# calculate the response digest
ha1 = get_ha1('localhost', 'test')
response = auth.request_digest(ha1)
# send response with correct response digest
auth_header = base_auth % (nonce, response, '00000001')
self.getPage('/digest/', [('Authorization', auth_header)])
self.assertStatus('200 OK')
self.assertBody("Hello test, you've been authorized.")
|
cmshobe/landlab
|
refs/heads/master
|
tests/grid/test_hex_grid/test_edges.py
|
3
|
import numpy as np
import pytest
from landlab import HexModelGrid
def test_perimeter_nodes():
"""Test perimeter nodes of a hex grid."""
grid = HexModelGrid((3, 4), node_layout="rect")
assert np.all(grid.perimeter_nodes == [3, 7, 11, 10, 9, 8, 4, 0, 1, 2])
def test_right_edge_nodes():
"""Test right edge nodes of a hex grid."""
grid = HexModelGrid((3, 4), node_layout="rect")
assert np.all(grid.nodes_at_right_edge == [3, 7, 11])
def test_top_edge_nodes():
"""Test top edge nodes of a hex grid."""
grid = HexModelGrid((3, 4), node_layout="rect")
assert np.all(grid.nodes_at_top_edge == [8, 9, 10, 11])
def test_left_edge_nodes():
"""Test left edge nodes of a hex grid."""
grid = HexModelGrid((3, 4), node_layout="rect")
assert np.all(grid.nodes_at_left_edge == [0, 4, 8])
def test_bottom_edge_nodes():
"""Test bottom edge nodes of a hex grid."""
grid = HexModelGrid((3, 4), node_layout="rect")
assert np.all(grid.nodes_at_bottom_edge == [0, 1, 2, 3])
def test_edges_are_readonly(edge_name):
grid = HexModelGrid((3, 4), node_layout="rect")
assert not grid.perimeter_nodes.flags["WRITEABLE"]
with pytest.raises(ValueError):
getattr(grid, "nodes_at_" + edge_name)[0] = 999
def test_edges_are_cached(edge_name):
grid = HexModelGrid((3, 4), node_layout="rect")
x = grid.perimeter_nodes
assert grid.perimeter_nodes is x
x = getattr(grid, "nodes_at_" + edge_name)
assert getattr(grid, "nodes_at_" + edge_name) is x
|
crewjam/yact
|
refs/heads/master
|
third_party/build.py
|
1
|
# Copyright (c) 2010 Ross Kinder. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# This script downloads and builds the third party dependencies for Windows.
# For each dependency do roughly the following steps:
# - download and extract the source code
# - patch the source and build environment as needed
# - force the runtime library configuration to be /MD or /MDd
# - force the building of static libraries
# - fix bugs
# - build both the Debug and Release versions of the library
# - create an empty file in called <project>/.stamp to indicate success
#
# If the stamp file is present, then the script will not try to build the
# dependency again. If you have trouble with a build, the best approach is to
# remove the source directory before rerunning the script.
#
# The current windows dependencies are:
#
# ================= ===================================================
# Name Purpose
# ================= ===================================================
# Google Gtest Unittests
# Google Gmock Unittests
# ================= ===================================================
#
import urllib
import os.path
import tarfile
import subprocess
import os
import stat
import shutil
import zipfile
import xml.dom.minidom
import sys
from os.path import isfile, isdir, join as pj, exists
import hashlib
import optparse
# To save typing:
# pj = os.path.join
# wd = working directory
wd = os.path.dirname(os.path.abspath(__file__))
class XmlEditor(object):
def __init__(self, path):
print "[xml]", path
self.path = path
content = file(path, "rb").read()
content = content.replace('<?xml version="1.0" encoding="shift_jis"?>', '')
self.dom = xml.dom.minidom.parseString(content)
def Save(self):
os.chmod(self.path, stat.S_IWRITE)
self.dom.writexml(file(self.path, "wb"))
def rm(path):
if not exists(path):
return
if isdir(path):
for sub_path in os.listdir(path):
rm(pj(path, sub_path))
print "[rm]", path
while isdir(path):
try:
os.rmdir(path)
except WindowsError, err:
pass
else:
print "[rm]", path
os.chmod(path, stat.S_IWRITE)
for i in range(10):
try:
os.unlink(path)
except:
continue
break
class Builder(object):
# True if the dependencies should be built with /MT or /MTd
# False if the dependencies should be build with /MD or /MDd
STATIC_RUNTIME = False
MSBUILD_COMMAND = "msbuild"
def __init__(self):
pass
def Fetch(self):
raise NotImplemented()
def Patch(self):
raise NotImplemented()
def Build(self):
raise NotImplemented()
def WriteStamp(self):
file(pj(self.path, ".stamp"), "w").write("")
def HasStamp(self):
return isfile(pj(self.path, ".stamp"))
#---- Helper Functions -------------------------------------------------------
def Download(self, url, checksum):
path = url.split("/")[-1]
path = pj(wd, path)
if isfile(path):
if hashlib.md5(file(path, "rb").read()).hexdigest() == checksum:
print "[ ok ]", url
return
print "[download]", url
urllib.urlretrieve(url, path)
assert hashlib.md5(file(path, "rb").read()).hexdigest() == checksum
def ExtractTarGz(self, path, out = None):
if out is None: out = wd
if not exists(path): path = pj(wd, path)
print "[extract]", path
tar = tarfile.open(path, mode="r:gz")
tar.extractall(out)
def ExtractZip(self, path, out = None):
if out is None: out = wd
if not exists(path): path = pj(wd, path)
print "[extract]", path
archive = zipfile.ZipFile(path, mode = "r")
archive.extractall(out)
def UpgradeVisualStudioFiles(self, root = None):
if root is not None: root = self.path
for dirpath, dirnames, filenames in os.walk(root):
for filename in filenames:
filename = pj(dirpath, filename)
if filename.endswith(".vcproj"):
self.UpgradeVisualStudioFile(filename)
def UpgradeVisualStudioFile(self, filename):
if filename.endswith(".sln"):
os.chmod(filename, stat.S_IWRITE)
subprocess.call(["devenv", "/upgrade", filename])
return
try:
xml = XmlEditor(filename)
except:
print "[WARNING] Cannot parse XML, upgrading anyway: " + filename
subprocess.call(["devenv", "/upgrade", filename])
return
for el in xml.dom.getElementsByTagName("VisualStudioProject"):
if float(el.getAttribute("Version")) >= 10.0:
continue
print "[upgrade]", filename
os.chmod(filename, stat.S_IWRITE)
subprocess.call(["devenv", "/upgrade", filename])
def SetRuntimeLibrary(self, filename = None):
if filename is None:
filename = self.path
if isdir(filename):
for dirpath, dirnames, filenames in os.walk(filename):
for filename in filenames:
filename = pj(dirpath, filename)
if filename.endswith(".vcproj") or filename.endswith(".vsprops"):
print '[ setruntime ]', filename
self.SetRuntimeLibrary(filename)
return
xml = XmlEditor(filename)
for filter in xml.dom.getElementsByTagName("Tool"):
if filter.getAttribute("Name") != u'VCCLCompilerTool':
continue
if self.STATIC_RUNTIME:
if filter.getAttribute("RuntimeLibrary") == u"2":
filter.setAttribute("RuntimeLibrary", "0")
elif filter.getAttribute("RuntimeLibrary") == u"3":
filter.setAttribute("RuntimeLibrary", "1")
else:
if filter.getAttribute("RuntimeLibrary") == u"0":
filter.setAttribute("RuntimeLibrary", "2")
elif filter.getAttribute("RuntimeLibrary") == u"1":
filter.setAttribute("RuntimeLibrary", "3")
xml.Save()
def BuildSolution(self, path, target=None, configurations=None, args=None):
cmd = [self.MSBUILD_COMMAND, path]
if target is not None:
cmd += ["/t:" + target]
if args:
cmd += args
if configurations is None:
configurations = ["Debug", "Release"]
for configuration in configurations:
subprocess.check_call(cmd + ["/p:Configuration=" + configuration],
cwd = pj(wd))
class GtestBuilder(Builder):
path = pj(wd, "gtest-1.5.0")
vcproj = pj(path, "msvc", "gtest.vcproj")
def Fetch(self):
self.Download("http://googletest.googlecode.com/files/gtest-1.5.0.tar.gz",
"7e27f5f3b79dd1ce9092e159cdbd0635")
def Patch(self):
rm(self.path)
self.ExtractTarGz("gtest-1.5.0.tar.gz")
self.UpgradeVisualStudioFiles(pj(self.path, "msvc"))
xml = XmlEditor(self.vcproj)
for el in xml.dom.getElementsByTagName("Tool"):
if el.getAttribute("Name") != u"VCLibrarianTool":
continue
el.setAttribute("OutputFile",
el.getAttribute("OutputFile").replace("gtestd.lib", "gtest.lib"))
xml.Save()
self.SetRuntimeLibrary(pj(self.path, "msvc"))
def Build(self):
self.BuildSolution(pj(self.path, "msvc", "gtest.sln"))
class GmockBuilder(Builder):
path = pj(wd, "gmock-1.5.0")
def Fetch(self):
self.Download("http://googlemock.googlecode.com/files/gmock-1.5.0.tar.gz",
"d9e62a4702c300ae9c87284ca8da7fac")
def Patch(self):
rm(self.path)
self.ExtractTarGz("gmock-1.5.0.tar.gz")
self.UpgradeVisualStudioFiles(pj(self.path, "msvc"))
self.SetRuntimeLibrary(pj(self.path, "msvc"))
def Build(self):
self.BuildSolution(pj(self.path, "msvc", "gmock.sln"))
builders = [
GtestBuilder(),
GmockBuilder(),
]
def main(argv):
global builders
parser = optparse.OptionParser()
parser.add_option("--build", action="store_true")
parser.add_option("--rebuild", action="store_true")
parser.add_option("--clean", action="store_true")
parser.add_option("--Debug", action="store_true")
parser.add_option("--Release", action="store_true")
parser.add_option("--msbuild", default="msbuild")
parser.add_option("--static-runtime", action="store_true", default=False)
parser.add_option("--dll-runtime", action="store_false",
dest="static_runtime")
(options, args) = parser.parse_args(argv)
if os.name != 'nt':
print >>sys.stderr, "This program should only be used to build the "\
"Windows dependencies."
Builder.STATIC_RUNTIME = options.static_runtime
Builder.MSBUILD_COMMAND = options.msbuild
if options.rebuild or options.clean:
for builder in builders:
rm(builder.path)
if options.clean:
return
builders = [builder for builder in builders if not builder.HasStamp()]
for builder in builders:
builder.Fetch()
for builder in builders:
builder.Patch()
for builder in builders:
builder.Build()
builder.WriteStamp()
if __name__ == "__main__":
main(sys.argv)
|
h00dy/rura
|
refs/heads/master
|
rura/config/__init__.py
|
12133432
| |
iivic/BoiseStateX
|
refs/heads/master
|
lms/djangoapps/teams/tests/__init__.py
|
12133432
| |
emonty/dox
|
refs/heads/master
|
dox/tests/__init__.py
|
12133432
| |
rmoorman/sqlalchemy-i18n
|
refs/heads/master
|
tests/test_expressions.py
|
2
|
from sqlalchemy_i18n.expressions import current_locale
class TestCurrentLocaleExpression(object):
def test_render(self):
assert str(current_locale()) == ':current_locale'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.