repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
jleinonen/eoshdf
|
refs/heads/master
|
eoshdf/eoshdf.py
|
1
|
"""
Copyright (C) 2015--2016 Jussi Leinonen
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to
do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
"""
import numpy as np
from pyhdf import SD, HDF, VS
class EOSHDF(object):
"""
Reader for HDF4 files, specifically those created by the NASA EOS data
systems.
Usage instructions:
To open a file, initialize an EOSHDF object, passing a file name to the
constructor. For example:
import eoshdf
eos = eoshdf.EOSHDF("example.hdf")
To list the available datasets, use the list_datasets method:
eos.list_datasets()
To read a dataset, use the read_data method, passing the dataset name:
data = eos.read_data("example_dataset")
NOTE: No conversion factors or missing value masking are applied to read
data. If you need these, you must apply them manually.
"""
def __init__(self, file_name):
self._file_name = file_name
self._hdf = None
self._vs = None
self._sd = None
def __del__(self):
self._close_all()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self._close_all()
def _close_all(self):
if self._vs is not None:
self._vs.end()
self._vs = None
self._hdf.close()
self._hdf = None
if self._sd is not None:
self._sd.end()
self._sd = None
def _open_vs(self):
if self._vs is None:
self._hdf = HDF.HDF(self._file_name)
self._vs = self._hdf.vstart()
return self._vs
def _open_sd(self):
if self._sd is None:
self._sd = SD.SD(self._file_name)
return self._sd
def list_datasets(self):
"""
Lists all available datasets. If you need to distinguish between
the VS and SD interfaces, use the list_VS_datasets and
list_SD_datasets methods.
Returns:
A list of the available datasets.
This method might list some spurious datasets. In that case,
refer to the data documentation to find which dataset to read.
"""
vs_datasets = [v[0] for v in self.list_VS_datasets()]
sd_datasets = list(self.list_SD_datasets().keys())
return list(sorted(vs_datasets + sd_datasets))
def read_data(self, ds_name, dtype=np.float64):
"""
Reads a dataset. If you need to distinguish between
the VS and SD interfaces, use the read_VS_data and
read_SD_data methods.
Args:
ds_name: The dataset name.
dtype: The datatype for the returned array.
Returns:
A numpy array containing the data.
NOTE: No conversion factors or missing value masking are applied.
If you need these, you must apply them manually.
"""
try:
data = self.read_SD_data(ds_name, dtype=dtype)
except HDF.HDF4Error:
try:
data = self.read_VS_data(ds_name, dtype=dtype)
except HDF.HDF4Error:
raise IOError("Nonexistent data set.")
return data
def read_VS_data(self, ds_name, dtype=np.float64):
vs = self._open_vs()
vd = vs.attach(ds_name)
arr = np.array(vd[:], dtype=dtype).ravel()
vd.detach()
return arr
def list_VS_datasets(self):
vs = self._open_vs()
return vs.vdatainfo()
def read_SD_data(self, ds_name, dtype=np.float64):
sd = self._open_sd()
sds = sd.select(ds_name)
arr = np.array(sds[:], dtype=dtype)
sds.endaccess()
return arr
def list_SD_datasets(self):
sd = self._open_sd()
return sd.datasets()
def read_1D_data(self, var_name, dtype=np.float64):
return self.read_VS_data(var_name, dtype=dtype)
def read_2D_data(self, var_name, dtype=np.float64):
return self.read_SD_data(var_name, dtype=dtype)
|
sinisterchipmunk/tomato
|
refs/heads/master
|
ext/tomato/external/v8/tools/jsmin.py
|
52
|
#!/usr/bin/python2.4
# Copyright 2009 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A JavaScript minifier.
It is far from being a complete JS parser, so there are many valid
JavaScript programs that will be ruined by it. Another strangeness is that
it accepts $ and % as parts of identifiers. It doesn't merge lines or strip
out blank lines in order to ease debugging. Variables at the top scope are
properties of the global object so we can't rename them. It is assumed that
you introduce variables with var as if JavaScript followed C++ scope rules
around curly braces, so the declaration must be above the first use.
Use as:
import jsmin
minifier = JavaScriptMinifier()
program1 = minifier.JSMinify(program1)
program2 = minifier.JSMinify(program2)
"""
import re
class JavaScriptMinifier(object):
"""An object that you can feed code snippets to to get them minified."""
def __init__(self):
# We prepopulate the list of identifiers that shouldn't be used. These
# short language keywords could otherwise be used by the script as variable
# names.
self.seen_identifiers = {"do": True, "in": True}
self.identifier_counter = 0
self.in_comment = False
self.map = {}
self.nesting = 0
def LookAtIdentifier(self, m):
"""Records identifiers or keywords that we see in use.
(So we can avoid renaming variables to these strings.)
Args:
m: The match object returned by re.search.
Returns:
Nothing.
"""
identifier = m.group(1)
self.seen_identifiers[identifier] = True
def Push(self):
"""Called when we encounter a '{'."""
self.nesting += 1
def Pop(self):
"""Called when we encounter a '}'."""
self.nesting -= 1
# We treat each top-level opening brace as a single scope that can span
# several sets of nested braces.
if self.nesting == 0:
self.map = {}
self.identifier_counter = 0
def Declaration(self, m):
"""Rewrites bits of the program selected by a regexp.
These can be curly braces, literal strings, function declarations and var
declarations. (These last two must be on one line including the opening
curly brace of the function for their variables to be renamed).
Args:
m: The match object returned by re.search.
Returns:
The string that should replace the match in the rewritten program.
"""
matched_text = m.group(0)
if matched_text == "{":
self.Push()
return matched_text
if matched_text == "}":
self.Pop()
return matched_text
if re.match("[\"'/]", matched_text):
return matched_text
m = re.match(r"var ", matched_text)
if m:
var_names = matched_text[m.end():]
var_names = re.split(r",", var_names)
return "var " + ",".join(map(self.FindNewName, var_names))
m = re.match(r"(function\b[^(]*)\((.*)\)\{$", matched_text)
if m:
up_to_args = m.group(1)
args = m.group(2)
args = re.split(r",", args)
self.Push()
return up_to_args + "(" + ",".join(map(self.FindNewName, args)) + "){"
if matched_text in self.map:
return self.map[matched_text]
return matched_text
def CharFromNumber(self, number):
"""A single-digit base-52 encoding using a-zA-Z."""
if number < 26:
return chr(number + 97)
number -= 26
return chr(number + 65)
def FindNewName(self, var_name):
"""Finds a new 1-character or 2-character name for a variable.
Enters it into the mapping table for this scope.
Args:
var_name: The name of the variable before renaming.
Returns:
The new name of the variable.
"""
new_identifier = ""
# Variable names that end in _ are member variables of the global object,
# so they can be visible from code in a different scope. We leave them
# alone.
if var_name in self.map:
return self.map[var_name]
if self.nesting == 0:
return var_name
while True:
identifier_first_char = self.identifier_counter % 52
identifier_second_char = self.identifier_counter / 52
new_identifier = self.CharFromNumber(identifier_first_char)
if identifier_second_char != 0:
new_identifier = (
self.CharFromNumber(identifier_second_char - 1) + new_identifier)
self.identifier_counter += 1
if not new_identifier in self.seen_identifiers:
break
self.map[var_name] = new_identifier
return new_identifier
def RemoveSpaces(self, m):
"""Returns literal strings unchanged, replaces other inputs with group 2.
Other inputs are replaced with the contents of capture 1. This is either
a single space or an empty string.
Args:
m: The match object returned by re.search.
Returns:
The string that should be inserted instead of the matched text.
"""
entire_match = m.group(0)
replacement = m.group(1)
if re.match(r"'.*'$", entire_match):
return entire_match
if re.match(r'".*"$', entire_match):
return entire_match
if re.match(r"/.+/$", entire_match):
return entire_match
return replacement
def JSMinify(self, text):
"""The main entry point. Takes a text and returns a compressed version.
The compressed version hopefully does the same thing. Line breaks are
preserved.
Args:
text: The text of the code snippet as a multiline string.
Returns:
The compressed text of the code snippet as a multiline string.
"""
new_lines = []
for line in re.split(r"\n", text):
line = line.replace("\t", " ")
if self.in_comment:
m = re.search(r"\*/", line)
if m:
line = line[m.end():]
self.in_comment = False
else:
new_lines.append("")
continue
if not self.in_comment:
line = re.sub(r"/\*.*?\*/", " ", line)
line = re.sub(r"//.*", "", line)
m = re.search(r"/\*", line)
if m:
line = line[:m.start()]
self.in_comment = True
# Strip leading and trailing spaces.
line = re.sub(r"^ +", "", line)
line = re.sub(r" +$", "", line)
# A regexp that matches a literal string surrounded by "double quotes".
# This regexp can handle embedded backslash-escaped characters including
# embedded backslash-escaped double quotes.
double_quoted_string = r'"(?:[^"\\]|\\.)*"'
# A regexp that matches a literal string surrounded by 'double quotes'.
single_quoted_string = r"'(?:[^'\\]|\\.)*'"
# A regexp that matches a regexp literal surrounded by /slashes/.
# Don't allow a regexp to have a ) before the first ( since that's a
# syntax error and it's probably just two unrelated slashes.
slash_quoted_regexp = r"/(?:(?=\()|(?:[^()/\\]|\\.)+)(?:\([^/\\]|\\.)*/"
# Replace multiple spaces with a single space.
line = re.sub("|".join([double_quoted_string,
single_quoted_string,
slash_quoted_regexp,
"( )+"]),
self.RemoveSpaces,
line)
# Strip single spaces unless they have an identifier character both before
# and after the space. % and $ are counted as identifier characters.
line = re.sub("|".join([double_quoted_string,
single_quoted_string,
slash_quoted_regexp,
r"(?<![a-zA-Z_0-9$%]) | (?![a-zA-Z_0-9$%])()"]),
self.RemoveSpaces,
line)
# Collect keywords and identifiers that are already in use.
if self.nesting == 0:
re.sub(r"([a-zA-Z0-9_$%]+)", self.LookAtIdentifier, line)
function_declaration_regexp = (
r"\bfunction" # Function definition keyword...
r"( [\w$%]+)?" # ...optional function name...
r"\([\w$%,]+\)\{") # ...argument declarations.
# Unfortunately the keyword-value syntax { key:value } makes the key look
# like a variable where in fact it is a literal string. We use the
# presence or absence of a question mark to try to distinguish between
# this case and the ternary operator: "condition ? iftrue : iffalse".
if re.search(r"\?", line):
block_trailing_colon = r""
else:
block_trailing_colon = r"(?![:\w$%])"
# Variable use. Cannot follow a period precede a colon.
variable_use_regexp = r"(?<![.\w$%])[\w$%]+" + block_trailing_colon
line = re.sub("|".join([double_quoted_string,
single_quoted_string,
slash_quoted_regexp,
r"\{", # Curly braces.
r"\}",
r"\bvar [\w$%,]+", # var declarations.
function_declaration_regexp,
variable_use_regexp]),
self.Declaration,
line)
new_lines.append(line)
return "\n".join(new_lines) + "\n"
|
baskervald/steamchat.py
|
refs/heads/master
|
steamchat/view.py
|
2
|
import curses
FLIST_WIDTH = 40
class View:
def main(self, stdscr):
self.resize()
self.stdscr = stdscr
while self.running:
self.stdscr.refresh()
self.flist.refresh()
self.chat.refresh()
self.test.refresh()
self.handler(self.stdscr.getch())
def __init__(self):
print('inited')
def resize(self):
# Resizing doesn't even come close to fucking working
self.width = curses.COLS
self.height = curses.LINES
curses.resizeterm(curses.LINES, curses.COLS)
if hasattr(self, 'flist'):
del self.flist
self.flist = curses.newwin(self.height, FLIST_WIDTH, 0, 0)
self.flist.border(0)
if hasattr(self, 'chat'):
del self.chat
self.chat = curses.newwin(self.height, self.width-FLIST_WIDTH-1, 0, FLIST_WIDTH+1)
self.chat.border(0)
if hasattr(self, 'test'):
del self.test
self.test = curses.newwin(self.height - 15, self.width - FLIST_WIDTH - 3, 1, FLIST_WIDTH+2)
self.test.border(0)
def start(self):
self.running = True
curses.wrapper(self.main)
def handler(self, char):
if char == ord('p'):
self.running = False
elif char == curses.KEY_RESIZE:
self.resize()
View().start()
|
alazaro/tennis_tournament
|
refs/heads/master
|
permission_backend_nonrel/backends.py
|
6
|
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth.models import User
from models import UserPermissionList, GroupPermissionList
class NonrelPermissionBackend(ModelBackend):
"""
Implements Django's permission system on Django-Nonrel
"""
supports_object_permissions = False
supports_anonymous_user = True
def get_group_permissions(self, user_obj, user_perm_obj=None):
"""
Returns a set of permission strings that this user has through his/her
groups.
"""
if not hasattr(user_obj, '_group_perm_cache'):
perms = set([])
if user_perm_obj is None:
user_perm_obj, created = UserPermissionList.objects.get_or_create(user=user_obj)
group_perm_lists = GroupPermissionList.objects.filter(group__id__in=user_perm_obj.group_fk_list)
for group_perm_list in group_perm_lists:
perms.update(group_perm_list.permission_list)
user_obj._group_perm_cache = perms
return user_obj._group_perm_cache
def get_all_permissions(self, user_obj):
if user_obj.is_anonymous():
return set()
if not hasattr(user_obj, '_perm_cache'):
try:
pl = UserPermissionList.objects.get(user=user_obj)
user_obj._perm_cache = set(pl.permission_list)
except UserPermissionList.DoesNotExist:
pl = None
user_obj._perm_cache = set()
user_obj._perm_cache.update(self.get_group_permissions(user_obj,
pl))
return user_obj._perm_cache
def has_perm(self, user_obj, perm):
return perm in self.get_all_permissions(user_obj)
def has_module_perms(self, user_obj, app_label):
"""
Returns True if user_obj has any permissions in the given app_label.
"""
for perm in self.get_all_permissions(user_obj):
if perm[:perm.index('.')] == app_label:
return True
return False
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
|
glue-viz/glue-qt
|
refs/heads/master
|
glue/core/tests/test_link_manager.py
|
2
|
#pylint: disable=I0011,W0613,W0201,W0212,E1101,E1103
import numpy as np
from ..data import Data, Component
from ..component_link import ComponentLink
from ..link_manager import (LinkManager, accessible_links, discover_links,
find_dependents)
from ..data import ComponentID, DerivedComponent
from ..data_collection import DataCollection
comp = Component(data=np.array([1, 2, 3]))
def example_components(self, add_derived=True):
""" Link Topology
--- c1---c3--\
data --| --c5,c6 (c7,c8 disconnected)
--- c2---c4--/
"""
self.data = Data()
c1 = ComponentID('c1')
c2 = ComponentID('c2')
c3 = ComponentID('c3')
c4 = ComponentID('c4')
c5 = ComponentID('c5')
c6 = ComponentID('c6')
c7 = ComponentID('c7')
c8 = ComponentID('c8')
dummy_using = lambda x, y: (x, y)
self.cs = [c1, c2, c3, c4, c5, c6, c7, c8]
self.links = [ComponentLink([c1], c3, lambda x:x),
ComponentLink([c2], c4, lambda x:x),
ComponentLink([c3], c1, lambda x:x),
ComponentLink([c4], c2, lambda x:x),
ComponentLink([c3, c4], c5, dummy_using),
ComponentLink([c3, c4], c6, dummy_using)]
self.data.add_component(comp, c1)
self.data.add_component(comp, c2)
if add_derived:
for i in [0, 1, 4, 5]:
dc = DerivedComponent(self.data, self.links[i])
self.data.add_component(dc, dc.link.get_to_id())
self.primary = [c1, c2]
self.direct = [c3, c4]
self.derived = [c5, c6]
self.inaccessible = [c7, c8]
class TestAccessibleLinks(object):
def setup_method(self, method):
self.cs = [ComponentID("%i" % i) for i in xrange(10)]
def test_returned_if_available(self):
cids = self.cs[0:5]
links = [ComponentLink([self.cs[0]], self.cs[1])]
assert links[0] in accessible_links(cids, links)
def test_returned_if_reachable(self):
cids = self.cs[0:5]
links = [ComponentLink([self.cs[0]], self.cs[6])]
assert links[0] in accessible_links(cids, links)
def test_not_returned_if_not_reachable(self):
cids = self.cs[0:5]
links = [ComponentLink([self.cs[6]], self.cs[7])]
assert not links[0] in accessible_links(cids, links)
class TestDiscoverLinks(object):
def setup_method(self, method):
example_components(self)
def test_correct_discover(self):
"""discover_links finds the correct links"""
links = discover_links(self.data, self.links)
for i in self.inaccessible:
assert not i in links
for d in self.direct:
assert d in links
for d in self.derived:
assert d in links
for p in self.primary:
assert not p in links
def test_links_point_to_proper_ids(self):
""" Dictionary values are ComponentLinks which
point to the keys """
links = discover_links(self.data, self.links)
for cid in links:
assert cid == links[cid].get_to_id()
def test_shortest_path(self):
""" Shortcircuit c5 to c1, yielding 2 ways to get to c5.
Ensure that the shortest path is chosen """
self.links.append(ComponentLink([self.cs[0]], self.cs[4]))
links = discover_links(self.data, self.links)
assert links[self.cs[4]] is self.links[-1]
class TestFindDependents(object):
def setup_method(self, method):
example_components(self)
def test_propagated(self):
to_remove = self.links[0]
result = find_dependents(self.data, to_remove)
expected = set([self.cs[2], self.cs[4], self.cs[5]])
assert expected == result
def test_basic(self):
to_remove = self.links[4]
result = find_dependents(self.data, to_remove)
expected = set([self.cs[4]])
assert expected == result
class TestLinkManager(object):
def test_add_links(self):
id1 = ComponentID('id1')
id2 = ComponentID('id2')
id3 = ComponentID('id3')
lm = LinkManager()
using = lambda x, y: 0
link = ComponentLink([id1, id2], id3, using)
lm.add_link(link)
links = lm.links
assert links == [link]
def test_remove_link(self):
id1 = ComponentID('id1')
id2 = ComponentID('id2')
id3 = ComponentID('id3')
lm = LinkManager()
using = lambda x, y: 0
link = ComponentLink([id1, id2], id3, using)
lm.add_link(link)
lm.remove_link(link)
links = lm.links
assert links == []
def test_setup(self):
example_components(self, add_derived=False)
expected = set()
assert set(self.data.derived_components) == expected
def test_update_data_components_adds_correctly(self):
example_components(self, add_derived=False)
lm = LinkManager()
map(lm.add_link, self.links)
lm.update_data_components(self.data)
derived = set(self.data.derived_components)
expected = set(self.derived + self.direct)
assert derived == expected
def test_update_data_components_removes_correctly(self):
#add all but last link to manager
example_components(self, add_derived=False)
lm = LinkManager()
map(lm.add_link, self.links[:-1])
#manually add last link as derived component
dc = DerivedComponent(self.data, self.links[-1])
self.data.add_component(dc, dc.link.get_to_id())
removed = set([dc.link.get_to_id()])
assert dc.link.get_to_id() in self.data.derived_components
# this link should be removed upon update_components
lm.update_data_components(self.data)
derived = set(self.data.derived_components)
expected = set(self.direct + self.derived) - removed
assert derived == expected
def test_derived_links_correctwith_mergers(self):
"""When the link manager merges components, links that depend on the
merged components remain functional"""
from ..link_helpers import LinkSame
d1 = Data(x=[[1, 2], [3, 4]])
d2 = Data(u=[[5, 6], [7, 8]])
dc = DataCollection([d1, d2])
#link world coordinates...
dc.add_link(LinkSame(
d1.get_world_component_id(0), d2.get_world_component_id(0)))
dc.add_link(LinkSame(
d1.get_world_component_id(1), d2.get_world_component_id(1)))
#and then retrieve pixel coordinates
np.testing.assert_array_equal(
d2[d1.get_pixel_component_id(0)], [[0, 0], [1, 1]])
np.testing.assert_array_equal(
d1[d2.get_pixel_component_id(1)], [[0, 1], [0, 1]])
def test_binary_links_correct_with_mergers(self):
"""Regression test. BinaryComponentLinks should work after mergers"""
from ..link_helpers import LinkSame
d1 = Data(x=[1, 2, 3], y=[2, 3, 4])
d2 = Data(u=[2, 3, 4], v=[3, 4, 5])
z = d1.id['x'] + d1.id['y']
d1.add_component_link(z, 'z')
dc = DataCollection([d1, d2])
dc.add_link(LinkSame(d2.id['u'], d1.id['x']))
assert d1.find_component_id('x') is None
np.testing.assert_array_equal(d1['z'], [3, 5, 7])
def test_complex_links_correct_with_mergers(self):
"""Regression test. multi-level links should work after mergers"""
from ..link_helpers import LinkSame
d1 = Data(x=[1, 2, 3], y=[2, 3, 4])
d2 = Data(u=[2, 3, 4], v=[3, 4, 5])
z = d1.id['x'] + d1.id['y'] + 5
d1.add_component_link(z, 'z')
dc = DataCollection([d1, d2])
dc.add_link(LinkSame(d2.id['u'], d1.id['x']))
assert d1.find_component_id('x') is None
np.testing.assert_array_equal(d1['z'], [8, 10, 12])
|
Lujeni/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/azure/azure_rm_devtestlabartifactsource_info.py
|
20
|
#!/usr/bin/python
#
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_devtestlabartifactsource_info
version_added: "2.9"
short_description: Get Azure DevTest Lab Artifact Source facts
description:
- Get facts of Azure DevTest Lab Artifact Source.
options:
resource_group:
description:
- The name of the resource group.
required: True
type: str
lab_name:
description:
- The name of DevTest Lab.
required: True
type: str
name:
description:
- The name of DevTest Lab Artifact Source.
type: str
tags:
description:
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
type: list
extends_documentation_fragment:
- azure
author:
- Zim Kalinowski (@zikalino)
'''
EXAMPLES = '''
- name: Get instance of DevTest Lab Artifact Source
azure_rm_devtestlabartifactsource_info:
resource_group: myResourceGroup
lab_name: myLab
name: myArtifactSource
'''
RETURN = '''
artifactsources:
description:
- A list of dictionaries containing facts for DevTest Lab Artifact Source.
returned: always
type: complex
contains:
id:
description:
- The identifier of the artifact source.
returned: always
type: str
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DevTestLab/labs/myLab/ar
tifactSources/myArtifactSource"
resource_group:
description:
- Name of the resource group.
returned: always
type: str
sample: myResourceGroup
lab_name:
description:
- Name of the lab.
returned: always
type: str
sample: myLab
name:
description:
- The name of the artifact source.
returned: always
type: str
sample: myArtifactSource
display_name:
description:
- The artifact source's display name.
returned: always
type: str
sample: Public Artifact Repo
source_type:
description:
- The artifact source's type.
returned: always
type: str
sample: github
is_enabled:
description:
- Is the artifact source enabled.
returned: always
type: str
sample: True
uri:
description:
- URI of the artifact source.
returned: always
type: str
sample: https://github.com/Azure/azure-devtestlab.git
folder_path:
description:
- The folder containing artifacts.
returned: always
type: str
sample: /Artifacts
arm_template_folder_path:
description:
- The folder containing Azure Resource Manager templates.
returned: always
type: str
sample: /Environments
provisioning_state:
description:
- Provisioning state of artifact source.
returned: always
type: str
sample: Succeeded
tags:
description:
- The tags of the resource.
returned: always
type: complex
sample: "{ 'MyTag': 'MyValue' }"
'''
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.devtestlabs import DevTestLabsClient
from msrest.serialization import Model
except ImportError:
# This is handled in azure_rm_common
pass
class AzureRMDtlArtifactSourceInfo(AzureRMModuleBase):
def __init__(self):
# define user inputs into argument
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
lab_name=dict(
type='str',
required=True
),
name=dict(
type='str'
),
tags=dict(
type='list'
)
)
# store the results of the module operation
self.results = dict(
changed=False
)
self.mgmt_client = None
self.resource_group = None
self.lab_name = None
self.name = None
self.tags = None
super(AzureRMDtlArtifactSourceInfo, self).__init__(self.module_arg_spec, supports_tags=False)
def exec_module(self, **kwargs):
is_old_facts = self.module._name == 'azure_rm_devtestlabartifactsource_facts'
if is_old_facts:
self.module.deprecate("The 'azure_rm_devtestlabartifactsource_facts' module has been renamed to 'azure_rm_devtestlabartifactsource_info'",
version='2.13')
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient,
base_url=self._cloud_environment.endpoints.resource_manager)
if self.name:
self.results['artifactsources'] = self.get()
else:
self.results['artifactsources'] = self.list()
return self.results
def get(self):
response = None
results = []
try:
response = self.mgmt_client.artifact_sources.get(resource_group_name=self.resource_group,
lab_name=self.lab_name,
name=self.name)
self.log("Response : {0}".format(response))
except CloudError as e:
self.fail('Could not get facts for Artifact Source.')
if response and self.has_tags(response.tags, self.tags):
results.append(self.format_response(response))
return results
def list(self):
response = None
results = []
try:
response = self.mgmt_client.artifact_sources.list(resource_group_name=self.resource_group,
lab_name=self.lab_name)
self.log("Response : {0}".format(response))
except CloudError as e:
self.fail('Could not get facts for Artifact Source.')
if response is not None:
for item in response:
if self.has_tags(item.tags, self.tags):
results.append(self.format_response(item))
return results
def format_response(self, item):
d = item.as_dict()
d = {
'id': d.get('id'),
'resource_group': self.parse_resource_to_dict(d.get('id')).get('resource_group'),
'lab_name': self.parse_resource_to_dict(d.get('id')).get('name'),
'name': d.get('name'),
'display_name': d.get('display_name'),
'tags': d.get('tags'),
'source_type': d.get('source_type').lower(),
'is_enabled': d.get('status') == 'Enabled',
'uri': d.get('uri'),
'arm_template_folder_path': d.get('arm_template_folder_path'),
'folder_path': d.get('folder_path'),
'provisioning_state': d.get('provisioning_state')
}
return d
def main():
AzureRMDtlArtifactSourceInfo()
if __name__ == '__main__':
main()
|
stasiek/robotframework
|
refs/heads/master
|
atest/testdata/test_libraries/MyLibDir/SubModuleLib.py
|
37
|
def keyword_in_mylibdir_submodulelib():
pass
|
abhishekgahlot/compbio
|
refs/heads/master
|
compbio/synteny/fuzzy.py
|
2
|
"""
Implements a fuzzy definition of synteny
"""
from itertools import chain
import rasmus
from rasmus import util
from rasmus.linked_list import LinkedList
from rasmus.sets import UnionFind
from compbio.regionlib import Region
from . import SyntenyBlock
def iter_windows(hits, radius):
"""Iterate through blast hits using a window with a radius in the
query genome"""
hits = util.PushIter(hits)
cache = LinkedList()
upstream = set()
downstream = set()
try:
center = hits.next()
except StopIteration:
return
while True:
# discard anyone in the upstream that is not within radius distance
for hit in list(upstream):
if hit[0].end + radius < center[0].start:
upstream.remove(hit)
# populate downstream with all regions within in radius
for hit in hits:
if hit[0].start - radius > center[0].end:
hits.push(hit)
break
downstream.add(hit)
cache.append(hit)
yield (center, upstream, downstream)
# populate upstream
upstream.add(center)
# move center to next hit
try:
center = cache.pop_front()
except IndexError:
break
# remove new center from downstream
downstream.remove(center)
def print_window(center, upstream, downstream):
print center[0]
print
print "\n".join([str(x[0]) for x in upstream])
print
print "\n".join([str(x[0]) for x in downstream])
print "-"*70
def iter_chroms(hits):
"""
Returns an iterator of iterators it, such that each it iterates over
hits from the same species and chromosome.
"""
hits = util.PushIter(hits)
try:
hit = hits.next()
except StopIteration:
# no hits to iterate
return
# initiate which species and chrom we are going to start with
last_sp = [hit[0].species]
last_chrom = [hit[0].seqname]
hits.push(hit)
def inner_iter(hits):
"""An iterator of hits from only one species, chromome"""
for hit in hits:
if hit[0].species != last_sp[0] or hit[0].seqname != last_chrom[0]:
# if species,chrom changes, push hit back and return
last_sp[0] = hit[0].species
last_chrom[0] = hit[0].seqname
hits.push(hit)
return
yield hit
while hits.peek(None) != None:
yield inner_iter(hits)
def find_syntenic_neighbors(hits, radius, radius2=None):
"""
For each hit find the neighboring hits that are syntenic.
hits -- iterable of tuples (region1, region2, extra)
radius -- radius of window in query genome
radius2 -- radius of window in subject genome (default=radius)
hits must be sorted by query region species, chrom, and start
"""
if radius2 is None:
radius2 = radius
for hits2 in iter_chroms(hits):
for center, upstream, downstream in iter_windows(hits2, radius):
start = center[1].start - radius2
end = center[1].end + radius2
syntenic = []
for hit in chain(upstream, downstream):
# determine which subjects are wihtin the window of
if (hit[1].species == center[1].species and
hit[1].seqname == center[1].seqname and
util.overlap(start, end, hit[1].start, hit[1].end)):
syntenic.append(hit)
yield (center, syntenic)
def samedir_hits(hit1, hit2):
dir1 = hit1[0].strand * hit1[1].strand
dir2 = hit2[0].strand * hit2[1].strand
if dir1 != dir2:
return False
if dir1 > 0:
return ((hit2[0].end >= hit1[0].start and
hit2[1].end >= hit1[1].start) or
(hit2[0].start <= hit1[0].end and
hit2[1].start <= hit1[1].end))
elif dir1 < 0:
return ((hit2[0].start <= hit1[0].end and
hit2[1].end >= hit1[1].start) or
(hit2[0].end >= hit1[0].start and
hit2[1].start <= hit1[1].end))
return True
def cluster_hits(hits, radius1, radius2=None, samedir=False):
"""
Cluster hits using windows
hits -- iterable of tuples (region1, region2, extra)
radius -- radius of window in query genome
radius2 -- radius of window in subject genome (default=radius)
samdir -- whether or not to require genes in same direction
hits must be sorted by query region species, chrom, and start
"""
# connected components set
comps = {}
for hit, syntenic in find_syntenic_neighbors(hits, radius1, radius2):
# get block of hit
block = comps.get(hit, None)
if block is None:
block = UnionFind([hit])
comps[hit] = block
# union block with syntenic hits
for hit2 in syntenic:
block2 = comps.get(hit2, None)
# check whether hits are in the same direction
if samedir and not samedir_hits(hit, hit2):
if hit2 not in comps:
comps[hit2] = UnionFind([hit2])
continue
if block2 is None:
comps[hit2] = block
block.add(hit2)
else:
block2.union(block)
# get the set of blocks
comps = set(b.root() for b in comps.itervalues())
return comps
def hits2synteny_block(hits):
"""
Create a Synteny block from a cluster of hits
hits -- list of tuples (region1, region2, extra)
"""
# find containing regions within each genome
start1 = util.INF
end1 = -util.INF
start2 = util.INF
end2 = -util.INF
for hit in hits:
a, b = hit[:2]
start1 = min(start1, a.start)
end1 = max(end1, a.end)
start2 = min(start2, b.start)
end2 = max(end2, b.end)
return SyntenyBlock(Region(a.species, a.seqname, "synreg", start1, end1),
Region(b.species, b.seqname, "synreg", start2, end2),
data={"hits": hits})
|
klmitch/neutron
|
refs/heads/master
|
neutron/tests/common/base.py
|
34
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import functools
import unittest.case
from oslo_db.sqlalchemy import test_base
import testtools.testcase
from neutron.common import constants as n_const
from neutron.tests import base
from neutron.tests import tools
def create_resource(prefix, creation_func, *args, **kwargs):
"""Create a new resource that does not already exist.
If prefix isn't 'max_length' in size, a random suffix is concatenated to
ensure it is random. Otherwise, 'prefix' is used as is.
:param prefix: The prefix for a randomly generated name
:param creation_func: A function taking the name of the resource
to be created as it's first argument. An error is assumed
to indicate a name collision.
:param *args *kwargs: These will be passed to the create function.
"""
# Don't generate a random name if prefix is already full-length.
if len(prefix) == n_const.DEVICE_NAME_MAX_LEN:
return creation_func(prefix, *args, **kwargs)
while True:
name = base.get_rand_name(
max_length=n_const.DEVICE_NAME_MAX_LEN,
prefix=prefix)
try:
return creation_func(name, *args, **kwargs)
except RuntimeError:
pass
def no_skip_on_missing_deps(wrapped):
"""Do not allow a method/test to skip on missing dependencies.
This decorator raises an error if a skip is raised by wrapped method when
OS_FAIL_ON_MISSING_DEPS is evaluated to True. This decorator should be used
only for missing dependencies (including missing system requirements).
"""
@functools.wraps(wrapped)
def wrapper(*args, **kwargs):
try:
return wrapped(*args, **kwargs)
except (testtools.TestCase.skipException, unittest.case.SkipTest) as e:
if base.bool_from_env('OS_FAIL_ON_MISSING_DEPS'):
tools.fail(
'%s cannot be skipped because OS_FAIL_ON_MISSING_DEPS '
'is enabled, skip reason: %s' % (wrapped.__name__, e))
raise
return wrapper
class MySQLTestCase(test_base.MySQLOpportunisticTestCase):
"""Base test class for MySQL tests.
If the MySQL db is unavailable then this test is skipped, unless
OS_FAIL_ON_MISSING_DEPS is enabled.
"""
SKIP_ON_UNAVAILABLE_DB = not base.bool_from_env('OS_FAIL_ON_MISSING_DEPS')
class PostgreSQLTestCase(test_base.PostgreSQLOpportunisticTestCase):
"""Base test class for PostgreSQL tests.
If the PostgreSQL db is unavailable then this test is skipped, unless
OS_FAIL_ON_MISSING_DEPS is enabled.
"""
SKIP_ON_UNAVAILABLE_DB = not base.bool_from_env('OS_FAIL_ON_MISSING_DEPS')
|
lindamar/ecclesi
|
refs/heads/master
|
env/lib/python2.7/site-packages/setuptools/command/build_ext.py
|
193
|
import os
import sys
import itertools
import imp
from distutils.command.build_ext import build_ext as _du_build_ext
from distutils.file_util import copy_file
from distutils.ccompiler import new_compiler
from distutils.sysconfig import customize_compiler, get_config_var
from distutils.errors import DistutilsError
from distutils import log
from setuptools.extension import Library
from setuptools.extern import six
try:
# Attempt to use Cython for building extensions, if available
from Cython.Distutils.build_ext import build_ext as _build_ext
except ImportError:
_build_ext = _du_build_ext
# make sure _config_vars is initialized
get_config_var("LDSHARED")
from distutils.sysconfig import _config_vars as _CONFIG_VARS
def _customize_compiler_for_shlib(compiler):
if sys.platform == "darwin":
# building .dylib requires additional compiler flags on OSX; here we
# temporarily substitute the pyconfig.h variables so that distutils'
# 'customize_compiler' uses them before we build the shared libraries.
tmp = _CONFIG_VARS.copy()
try:
# XXX Help! I don't have any idea whether these are right...
_CONFIG_VARS['LDSHARED'] = (
"gcc -Wl,-x -dynamiclib -undefined dynamic_lookup")
_CONFIG_VARS['CCSHARED'] = " -dynamiclib"
_CONFIG_VARS['SO'] = ".dylib"
customize_compiler(compiler)
finally:
_CONFIG_VARS.clear()
_CONFIG_VARS.update(tmp)
else:
customize_compiler(compiler)
have_rtld = False
use_stubs = False
libtype = 'shared'
if sys.platform == "darwin":
use_stubs = True
elif os.name != 'nt':
try:
import dl
use_stubs = have_rtld = hasattr(dl, 'RTLD_NOW')
except ImportError:
pass
if_dl = lambda s: s if have_rtld else ''
def get_abi3_suffix():
"""Return the file extension for an abi3-compliant Extension()"""
for suffix, _, _ in (s for s in imp.get_suffixes() if s[2] == imp.C_EXTENSION):
if '.abi3' in suffix: # Unix
return suffix
elif suffix == '.pyd': # Windows
return suffix
class build_ext(_build_ext):
def run(self):
"""Build extensions in build directory, then copy if --inplace"""
old_inplace, self.inplace = self.inplace, 0
_build_ext.run(self)
self.inplace = old_inplace
if old_inplace:
self.copy_extensions_to_source()
def copy_extensions_to_source(self):
build_py = self.get_finalized_command('build_py')
for ext in self.extensions:
fullname = self.get_ext_fullname(ext.name)
filename = self.get_ext_filename(fullname)
modpath = fullname.split('.')
package = '.'.join(modpath[:-1])
package_dir = build_py.get_package_dir(package)
dest_filename = os.path.join(package_dir,
os.path.basename(filename))
src_filename = os.path.join(self.build_lib, filename)
# Always copy, even if source is older than destination, to ensure
# that the right extensions for the current Python/platform are
# used.
copy_file(
src_filename, dest_filename, verbose=self.verbose,
dry_run=self.dry_run
)
if ext._needs_stub:
self.write_stub(package_dir or os.curdir, ext, True)
def get_ext_filename(self, fullname):
filename = _build_ext.get_ext_filename(self, fullname)
if fullname in self.ext_map:
ext = self.ext_map[fullname]
use_abi3 = (
six.PY3
and getattr(ext, 'py_limited_api')
and get_abi3_suffix()
)
if use_abi3:
so_ext = _get_config_var_837('EXT_SUFFIX')
filename = filename[:-len(so_ext)]
filename = filename + get_abi3_suffix()
if isinstance(ext, Library):
fn, ext = os.path.splitext(filename)
return self.shlib_compiler.library_filename(fn, libtype)
elif use_stubs and ext._links_to_dynamic:
d, fn = os.path.split(filename)
return os.path.join(d, 'dl-' + fn)
return filename
def initialize_options(self):
_build_ext.initialize_options(self)
self.shlib_compiler = None
self.shlibs = []
self.ext_map = {}
def finalize_options(self):
_build_ext.finalize_options(self)
self.extensions = self.extensions or []
self.check_extensions_list(self.extensions)
self.shlibs = [ext for ext in self.extensions
if isinstance(ext, Library)]
if self.shlibs:
self.setup_shlib_compiler()
for ext in self.extensions:
ext._full_name = self.get_ext_fullname(ext.name)
for ext in self.extensions:
fullname = ext._full_name
self.ext_map[fullname] = ext
# distutils 3.1 will also ask for module names
# XXX what to do with conflicts?
self.ext_map[fullname.split('.')[-1]] = ext
ltd = self.shlibs and self.links_to_dynamic(ext) or False
ns = ltd and use_stubs and not isinstance(ext, Library)
ext._links_to_dynamic = ltd
ext._needs_stub = ns
filename = ext._file_name = self.get_ext_filename(fullname)
libdir = os.path.dirname(os.path.join(self.build_lib, filename))
if ltd and libdir not in ext.library_dirs:
ext.library_dirs.append(libdir)
if ltd and use_stubs and os.curdir not in ext.runtime_library_dirs:
ext.runtime_library_dirs.append(os.curdir)
def setup_shlib_compiler(self):
compiler = self.shlib_compiler = new_compiler(
compiler=self.compiler, dry_run=self.dry_run, force=self.force
)
_customize_compiler_for_shlib(compiler)
if self.include_dirs is not None:
compiler.set_include_dirs(self.include_dirs)
if self.define is not None:
# 'define' option is a list of (name,value) tuples
for (name, value) in self.define:
compiler.define_macro(name, value)
if self.undef is not None:
for macro in self.undef:
compiler.undefine_macro(macro)
if self.libraries is not None:
compiler.set_libraries(self.libraries)
if self.library_dirs is not None:
compiler.set_library_dirs(self.library_dirs)
if self.rpath is not None:
compiler.set_runtime_library_dirs(self.rpath)
if self.link_objects is not None:
compiler.set_link_objects(self.link_objects)
# hack so distutils' build_extension() builds a library instead
compiler.link_shared_object = link_shared_object.__get__(compiler)
def get_export_symbols(self, ext):
if isinstance(ext, Library):
return ext.export_symbols
return _build_ext.get_export_symbols(self, ext)
def build_extension(self, ext):
ext._convert_pyx_sources_to_lang()
_compiler = self.compiler
try:
if isinstance(ext, Library):
self.compiler = self.shlib_compiler
_build_ext.build_extension(self, ext)
if ext._needs_stub:
cmd = self.get_finalized_command('build_py').build_lib
self.write_stub(cmd, ext)
finally:
self.compiler = _compiler
def links_to_dynamic(self, ext):
"""Return true if 'ext' links to a dynamic lib in the same package"""
# XXX this should check to ensure the lib is actually being built
# XXX as dynamic, and not just using a locally-found version or a
# XXX static-compiled version
libnames = dict.fromkeys([lib._full_name for lib in self.shlibs])
pkg = '.'.join(ext._full_name.split('.')[:-1] + [''])
return any(pkg + libname in libnames for libname in ext.libraries)
def get_outputs(self):
return _build_ext.get_outputs(self) + self.__get_stubs_outputs()
def __get_stubs_outputs(self):
# assemble the base name for each extension that needs a stub
ns_ext_bases = (
os.path.join(self.build_lib, *ext._full_name.split('.'))
for ext in self.extensions
if ext._needs_stub
)
# pair each base with the extension
pairs = itertools.product(ns_ext_bases, self.__get_output_extensions())
return list(base + fnext for base, fnext in pairs)
def __get_output_extensions(self):
yield '.py'
yield '.pyc'
if self.get_finalized_command('build_py').optimize:
yield '.pyo'
def write_stub(self, output_dir, ext, compile=False):
log.info("writing stub loader for %s to %s", ext._full_name,
output_dir)
stub_file = (os.path.join(output_dir, *ext._full_name.split('.')) +
'.py')
if compile and os.path.exists(stub_file):
raise DistutilsError(stub_file + " already exists! Please delete.")
if not self.dry_run:
f = open(stub_file, 'w')
f.write(
'\n'.join([
"def __bootstrap__():",
" global __bootstrap__, __file__, __loader__",
" import sys, os, pkg_resources, imp" + if_dl(", dl"),
" __file__ = pkg_resources.resource_filename"
"(__name__,%r)"
% os.path.basename(ext._file_name),
" del __bootstrap__",
" if '__loader__' in globals():",
" del __loader__",
if_dl(" old_flags = sys.getdlopenflags()"),
" old_dir = os.getcwd()",
" try:",
" os.chdir(os.path.dirname(__file__))",
if_dl(" sys.setdlopenflags(dl.RTLD_NOW)"),
" imp.load_dynamic(__name__,__file__)",
" finally:",
if_dl(" sys.setdlopenflags(old_flags)"),
" os.chdir(old_dir)",
"__bootstrap__()",
"" # terminal \n
])
)
f.close()
if compile:
from distutils.util import byte_compile
byte_compile([stub_file], optimize=0,
force=True, dry_run=self.dry_run)
optimize = self.get_finalized_command('install_lib').optimize
if optimize > 0:
byte_compile([stub_file], optimize=optimize,
force=True, dry_run=self.dry_run)
if os.path.exists(stub_file) and not self.dry_run:
os.unlink(stub_file)
if use_stubs or os.name == 'nt':
# Build shared libraries
#
def link_shared_object(
self, objects, output_libname, output_dir=None, libraries=None,
library_dirs=None, runtime_library_dirs=None, export_symbols=None,
debug=0, extra_preargs=None, extra_postargs=None, build_temp=None,
target_lang=None):
self.link(
self.SHARED_LIBRARY, objects, output_libname,
output_dir, libraries, library_dirs, runtime_library_dirs,
export_symbols, debug, extra_preargs, extra_postargs,
build_temp, target_lang
)
else:
# Build static libraries everywhere else
libtype = 'static'
def link_shared_object(
self, objects, output_libname, output_dir=None, libraries=None,
library_dirs=None, runtime_library_dirs=None, export_symbols=None,
debug=0, extra_preargs=None, extra_postargs=None, build_temp=None,
target_lang=None):
# XXX we need to either disallow these attrs on Library instances,
# or warn/abort here if set, or something...
# libraries=None, library_dirs=None, runtime_library_dirs=None,
# export_symbols=None, extra_preargs=None, extra_postargs=None,
# build_temp=None
assert output_dir is None # distutils build_ext doesn't pass this
output_dir, filename = os.path.split(output_libname)
basename, ext = os.path.splitext(filename)
if self.library_filename("x").startswith('lib'):
# strip 'lib' prefix; this is kludgy if some platform uses
# a different prefix
basename = basename[3:]
self.create_static_lib(
objects, basename, output_dir, debug, target_lang
)
def _get_config_var_837(name):
"""
In https://github.com/pypa/setuptools/pull/837, we discovered
Python 3.3.0 exposes the extension suffix under the name 'SO'.
"""
if sys.version_info < (3, 3, 1):
name = 'SO'
return get_config_var(name)
|
facebookresearch/faiss
|
refs/heads/master
|
faiss/python/__init__.py
|
1
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#@nolint
# not linting this file because it imports * from swigfaiss, which
# causes a ton of useless warnings.
import numpy as np
import sys
import inspect
import array
import warnings
# We import * so that the symbol foo can be accessed as faiss.foo.
from .loader import *
__version__ = "%d.%d.%d" % (FAISS_VERSION_MAJOR,
FAISS_VERSION_MINOR,
FAISS_VERSION_PATCH)
##################################################################
# The functions below add or replace some methods for classes
# this is to be able to pass in numpy arrays directly
# The C++ version of the classnames will be suffixed with _c
##################################################################
def replace_method(the_class, name, replacement, ignore_missing=False):
""" Replaces a method in a class with another version. The old method
is renamed to method_name_c (because presumably it was implemented in C) """
try:
orig_method = getattr(the_class, name)
except AttributeError:
if ignore_missing:
return
raise
if orig_method.__name__ == 'replacement_' + name:
# replacement was done in parent class
return
setattr(the_class, name + '_c', orig_method)
setattr(the_class, name, replacement)
def handle_Clustering():
def replacement_train(self, x, index, weights=None):
"""Perform clustering on a set of vectors. The index is used for assignment.
Parameters
----------
x : array_like
Training vectors, shape (n, self.d). `dtype` must be float32.
index : faiss.Index
Index used for assignment. The dimension of the index should be `self.d`.
weights : array_like, optional
Per training sample weight (size n) used when computing the weighted
average to obtain the centroid (default is 1 for all training vectors).
"""
n, d = x.shape
assert d == self.d
if weights is not None:
assert weights.shape == (n, )
self.train_c(n, swig_ptr(x), index, swig_ptr(weights))
else:
self.train_c(n, swig_ptr(x), index)
def replacement_train_encoded(self, x, codec, index, weights=None):
""" Perform clustering on a set of compressed vectors. The index is used for assignment.
The decompression is performed on-the-fly.
Parameters
----------
x : array_like
Training vectors, shape (n, codec.code_size()). `dtype` must be `uint8`.
codec : faiss.Index
Index used to decode the vectors. Should have dimension `self.d`.
index : faiss.Index
Index used for assignment. The dimension of the index should be `self.d`.
weigths : array_like, optional
Per training sample weight (size n) used when computing the weighted
average to obtain the centroid (default is 1 for all training vectors).
"""
n, d = x.shape
assert d == codec.sa_code_size()
assert codec.d == index.d
if weights is not None:
assert weights.shape == (n, )
self.train_encoded_c(n, swig_ptr(x), codec, index, swig_ptr(weights))
else:
self.train_encoded_c(n, swig_ptr(x), codec, index)
replace_method(Clustering, 'train', replacement_train)
replace_method(Clustering, 'train_encoded', replacement_train_encoded)
handle_Clustering()
def handle_Quantizer(the_class):
def replacement_train(self, x):
""" Train the quantizer on a set of training vectors.
Parameters
----------
x : array_like
Training vectors, shape (n, self.d). `dtype` must be float32.
"""
n, d = x.shape
assert d == self.d
self.train_c(n, swig_ptr(x))
def replacement_compute_codes(self, x):
""" Compute the codes corresponding to a set of vectors.
Parameters
----------
x : array_like
Vectors to encode, shape (n, self.d). `dtype` must be float32.
Returns
-------
codes : array_like
Corresponding code for each vector, shape (n, self.code_size)
and `dtype` uint8.
"""
n, d = x.shape
assert d == self.d
codes = np.empty((n, self.code_size), dtype='uint8')
self.compute_codes_c(swig_ptr(x), swig_ptr(codes), n)
return codes
def replacement_decode(self, codes):
"""Reconstruct an approximation of vectors given their codes.
Parameters
----------
codes : array_like
Codes to decode, shape (n, self.code_size). `dtype` must be uint8.
Returns
-------
Reconstructed vectors for each code, shape `(n, d)` and `dtype` float32.
"""
n, cs = codes.shape
assert cs == self.code_size
x = np.empty((n, self.d), dtype='float32')
self.decode_c(swig_ptr(codes), swig_ptr(x), n)
return x
replace_method(the_class, 'train', replacement_train)
replace_method(the_class, 'compute_codes', replacement_compute_codes)
replace_method(the_class, 'decode', replacement_decode)
handle_Quantizer(ProductQuantizer)
handle_Quantizer(ScalarQuantizer)
handle_Quantizer(ResidualQuantizer)
handle_Quantizer(LocalSearchQuantizer)
def handle_NSG(the_class):
def replacement_build(self, x, graph):
n, d = x.shape
assert d == self.d
assert graph.ndim == 2
assert graph.shape[0] == n
K = graph.shape[1]
self.build_c(n, swig_ptr(x), swig_ptr(graph), K)
replace_method(the_class, 'build', replacement_build)
def handle_Index(the_class):
def replacement_add(self, x):
"""Adds vectors to the index.
The index must be trained before vectors can be added to it.
The vectors are implicitly numbered in sequence. When `n` vectors are
added to the index, they are given ids `ntotal`, `ntotal + 1`, ..., `ntotal + n - 1`.
Parameters
----------
x : array_like
Query vectors, shape (n, d) where d is appropriate for the index.
`dtype` must be float32.
"""
n, d = x.shape
assert d == self.d
self.add_c(n, swig_ptr(x))
def replacement_add_with_ids(self, x, ids):
"""Adds vectors with arbitrary ids to the index (not all indexes support this).
The index must be trained before vectors can be added to it.
Vector `i` is stored in `x[i]` and has id `ids[i]`.
Parameters
----------
x : array_like
Query vectors, shape (n, d) where d is appropriate for the index.
`dtype` must be float32.
ids : array_like
Array if ids of size n. The ids must be of type `int64`. Note that `-1` is reserved
in result lists to mean "not found" so it's better to not use it as an id.
"""
n, d = x.shape
assert d == self.d
assert ids.shape == (n, ), 'not same nb of vectors as ids'
self.add_with_ids_c(n, swig_ptr(x), swig_ptr(ids))
def replacement_assign(self, x, k, labels=None):
"""Find the k nearest neighbors of the set of vectors x in the index.
This is the same as the `search` method, but discards the distances.
Parameters
----------
x : array_like
Query vectors, shape (n, d) where d is appropriate for the index.
`dtype` must be float32.
k : int
Number of nearest neighbors.
labels : array_like, optional
Labels array to store the results.
Returns
-------
labels: array_like
Labels of the nearest neighbors, shape (n, k).
When not enough results are found, the label is set to -1
"""
n, d = x.shape
assert d == self.d
if labels is None:
labels = np.empty((n, k), dtype=np.int64)
else:
assert labels.shape == (n, k)
self.assign_c(n, swig_ptr(x), swig_ptr(labels), k)
return labels
def replacement_train(self, x):
"""Trains the index on a representative set of vectors.
The index must be trained before vectors can be added to it.
Parameters
----------
x : array_like
Query vectors, shape (n, d) where d is appropriate for the index.
`dtype` must be float32.
"""
n, d = x.shape
assert d == self.d
self.train_c(n, swig_ptr(x))
def replacement_search(self, x, k, D=None, I=None):
"""Find the k nearest neighbors of the set of vectors x in the index.
Parameters
----------
x : array_like
Query vectors, shape (n, d) where d is appropriate for the index.
`dtype` must be float32.
k : int
Number of nearest neighbors.
D : array_like, optional
Distance array to store the result.
I : array_like, optional
Labels array to store the results.
Returns
-------
D : array_like
Distances of the nearest neighbors, shape (n, k). When not enough results are found
the label is set to +Inf or -Inf.
I : array_like
Labels of the nearest neighbors, shape (n, k).
When not enough results are found, the label is set to -1
"""
n, d = x.shape
assert d == self.d
assert k > 0
if D is None:
D = np.empty((n, k), dtype=np.float32)
else:
assert D.shape == (n, k)
if I is None:
I = np.empty((n, k), dtype=np.int64)
else:
assert I.shape == (n, k)
self.search_c(n, swig_ptr(x), k, swig_ptr(D), swig_ptr(I))
return D, I
def replacement_search_and_reconstruct(self, x, k, D=None, I=None, R=None):
"""Find the k nearest neighbors of the set of vectors x in the index,
and return an approximation of these vectors.
Parameters
----------
x : array_like
Query vectors, shape (n, d) where d is appropriate for the index.
`dtype` must be float32.
k : int
Number of nearest neighbors.
D : array_like, optional
Distance array to store the result.
I : array_like, optional
Labels array to store the result.
R : array_like, optional
reconstruction array to store
Returns
-------
D : array_like
Distances of the nearest neighbors, shape (n, k). When not enough results are found
the label is set to +Inf or -Inf.
I : array_like
Labels of the nearest neighbors, shape (n, k). When not enough results are found,
the label is set to -1
R : array_like
Approximate (reconstructed) nearest neighbor vectors, shape (n, k, d).
"""
n, d = x.shape
assert d == self.d
assert k > 0
if D is None:
D = np.empty((n, k), dtype=np.float32)
else:
assert D.shape == (n, k)
if I is None:
I = np.empty((n, k), dtype=np.int64)
else:
assert I.shape == (n, k)
if R is None:
R = np.empty((n, k, d), dtype=np.float32)
else:
assert R.shape == (n, k, d)
self.search_and_reconstruct_c(n, swig_ptr(x),
k, swig_ptr(D),
swig_ptr(I),
swig_ptr(R))
return D, I, R
def replacement_remove_ids(self, x):
"""Remove some ids from the index.
This is a O(ntotal) operation by default, so could be expensive.
Parameters
----------
x : array_like or faiss.IDSelector
Either an IDSelector that returns True for vectors to remove, or a
list of ids to reomove (1D array of int64). When `x` is a list,
it is wrapped into an IDSelector.
Returns
-------
n_remove: int
number of vectors that were removed
"""
if isinstance(x, IDSelector):
sel = x
else:
assert x.ndim == 1
index_ivf = try_extract_index_ivf (self)
if index_ivf and index_ivf.direct_map.type == DirectMap.Hashtable:
sel = IDSelectorArray(x.size, swig_ptr(x))
else:
sel = IDSelectorBatch(x.size, swig_ptr(x))
return self.remove_ids_c(sel)
def replacement_reconstruct(self, key, x=None):
"""Approximate reconstruction of one vector from the index.
Parameters
----------
key : int
Id of the vector to reconstruct
x : array_like, optional
pre-allocated array to store the results
Returns
-------
x : array_like
Reconstructed vector, size `self.d`, `dtype`=float32
"""
if x is None:
x = np.empty(self.d, dtype=np.float32)
else:
assert x.shape == (self.d, )
self.reconstruct_c(key, swig_ptr(x))
return x
def replacement_reconstruct_n(self, n0, ni, x=None):
"""Approximate reconstruction of vectors `n0` ... `n0 + ni - 1` from the index.
Missing vectors trigger an exception.
Parameters
----------
n0 : int
Id of the first vector to reconstruct
ni : int
Number of vectors to reconstruct
x : array_like, optional
pre-allocated array to store the results
Returns
-------
x : array_like
Reconstructed vectors, size (`ni`, `self.d`), `dtype`=float32
"""
if x is None:
x = np.empty((ni, self.d), dtype=np.float32)
else:
assert x.shape == (ni, self.d)
self.reconstruct_n_c(n0, ni, swig_ptr(x))
return x
def replacement_update_vectors(self, keys, x):
n = keys.size
assert keys.shape == (n, )
assert x.shape == (n, self.d)
self.update_vectors_c(n, swig_ptr(keys), swig_ptr(x))
# The CPU does not support passed-in output buffers
def replacement_range_search(self, x, thresh):
"""Search vectors that are within a distance of the query vectors.
Parameters
----------
x : array_like
Query vectors, shape (n, d) where d is appropriate for the index.
`dtype` must be float32.
thresh : float
Threshold to select neighbors. All elements within this radius are returned,
except for maximum inner product indexes, where the elements above the
threshold are returned
Returns
-------
lims: array_like
Startring index of the results for each query vector, size n+1.
D : array_like
Distances of the nearest neighbors, shape `lims[n]`. The distances for
query i are in `D[lims[i]:lims[i+1]]`.
I : array_like
Labels of nearest neighbors, shape `lims[n]`. The labels for query i
are in `I[lims[i]:lims[i+1]]`.
"""
n, d = x.shape
assert d == self.d
res = RangeSearchResult(n)
self.range_search_c(n, swig_ptr(x), thresh, res)
# get pointers and copy them
lims = rev_swig_ptr(res.lims, n + 1).copy()
nd = int(lims[-1])
D = rev_swig_ptr(res.distances, nd).copy()
I = rev_swig_ptr(res.labels, nd).copy()
return lims, D, I
def replacement_sa_encode(self, x, codes=None):
n, d = x.shape
assert d == self.d
if codes is None:
codes = np.empty((n, self.sa_code_size()), dtype=np.uint8)
else:
assert codes.shape == (n, self.sa_code_size())
self.sa_encode_c(n, swig_ptr(x), swig_ptr(codes))
return codes
def replacement_sa_decode(self, codes, x=None):
n, cs = codes.shape
assert cs == self.sa_code_size()
if x is None:
x = np.empty((n, self.d), dtype=np.float32)
else:
assert x.shape == (n, self.d)
self.sa_decode_c(n, swig_ptr(codes), swig_ptr(x))
return x
replace_method(the_class, 'add', replacement_add)
replace_method(the_class, 'add_with_ids', replacement_add_with_ids)
replace_method(the_class, 'assign', replacement_assign)
replace_method(the_class, 'train', replacement_train)
replace_method(the_class, 'search', replacement_search)
replace_method(the_class, 'remove_ids', replacement_remove_ids)
replace_method(the_class, 'reconstruct', replacement_reconstruct)
replace_method(the_class, 'reconstruct_n', replacement_reconstruct_n)
replace_method(the_class, 'range_search', replacement_range_search)
replace_method(the_class, 'update_vectors', replacement_update_vectors,
ignore_missing=True)
replace_method(the_class, 'search_and_reconstruct',
replacement_search_and_reconstruct, ignore_missing=True)
replace_method(the_class, 'sa_encode', replacement_sa_encode)
replace_method(the_class, 'sa_decode', replacement_sa_decode)
# get/set state for pickle
# the data is serialized to std::vector -> numpy array -> python bytes
# so not very efficient for now.
def index_getstate(self):
return {"this": serialize_index(self).tobytes()}
def index_setstate(self, st):
index2 = deserialize_index(np.frombuffer(st["this"], dtype="uint8"))
self.this = index2.this
the_class.__getstate__ = index_getstate
the_class.__setstate__ = index_setstate
def handle_IndexBinary(the_class):
def replacement_add(self, x):
n, d = x.shape
assert d * 8 == self.d
self.add_c(n, swig_ptr(x))
def replacement_add_with_ids(self, x, ids):
n, d = x.shape
assert d * 8 == self.d
assert ids.shape == (n, ), 'not same nb of vectors as ids'
self.add_with_ids_c(n, swig_ptr(x), swig_ptr(ids))
def replacement_train(self, x):
n, d = x.shape
assert d * 8 == self.d
self.train_c(n, swig_ptr(x))
def replacement_reconstruct(self, key):
x = np.empty(self.d // 8, dtype=np.uint8)
self.reconstruct_c(key, swig_ptr(x))
return x
def replacement_search(self, x, k):
n, d = x.shape
assert d * 8 == self.d
assert k > 0
distances = np.empty((n, k), dtype=np.int32)
labels = np.empty((n, k), dtype=np.int64)
self.search_c(n, swig_ptr(x),
k, swig_ptr(distances),
swig_ptr(labels))
return distances, labels
def replacement_range_search(self, x, thresh):
n, d = x.shape
assert d * 8 == self.d
res = RangeSearchResult(n)
self.range_search_c(n, swig_ptr(x), thresh, res)
# get pointers and copy them
lims = rev_swig_ptr(res.lims, n + 1).copy()
nd = int(lims[-1])
D = rev_swig_ptr(res.distances, nd).copy()
I = rev_swig_ptr(res.labels, nd).copy()
return lims, D, I
def replacement_remove_ids(self, x):
if isinstance(x, IDSelector):
sel = x
else:
assert x.ndim == 1
sel = IDSelectorBatch(x.size, swig_ptr(x))
return self.remove_ids_c(sel)
replace_method(the_class, 'add', replacement_add)
replace_method(the_class, 'add_with_ids', replacement_add_with_ids)
replace_method(the_class, 'train', replacement_train)
replace_method(the_class, 'search', replacement_search)
replace_method(the_class, 'range_search', replacement_range_search)
replace_method(the_class, 'reconstruct', replacement_reconstruct)
replace_method(the_class, 'remove_ids', replacement_remove_ids)
def handle_VectorTransform(the_class):
def apply_method(self, x):
n, d = x.shape
assert d == self.d_in
y = np.empty((n, self.d_out), dtype=np.float32)
self.apply_noalloc(n, swig_ptr(x), swig_ptr(y))
return y
def replacement_reverse_transform(self, x):
n, d = x.shape
assert d == self.d_out
y = np.empty((n, self.d_in), dtype=np.float32)
self.reverse_transform_c(n, swig_ptr(x), swig_ptr(y))
return y
def replacement_vt_train(self, x):
n, d = x.shape
assert d == self.d_in
self.train_c(n, swig_ptr(x))
replace_method(the_class, 'train', replacement_vt_train)
# apply is reserved in Pyton...
the_class.apply_py = apply_method
the_class.apply = apply_method
replace_method(the_class, 'reverse_transform',
replacement_reverse_transform)
def handle_AutoTuneCriterion(the_class):
def replacement_set_groundtruth(self, D, I):
if D:
assert I.shape == D.shape
self.nq, self.gt_nnn = I.shape
self.set_groundtruth_c(
self.gt_nnn, swig_ptr(D) if D else None, swig_ptr(I))
def replacement_evaluate(self, D, I):
assert I.shape == D.shape
assert I.shape == (self.nq, self.nnn)
return self.evaluate_c(swig_ptr(D), swig_ptr(I))
replace_method(the_class, 'set_groundtruth', replacement_set_groundtruth)
replace_method(the_class, 'evaluate', replacement_evaluate)
def handle_ParameterSpace(the_class):
def replacement_explore(self, index, xq, crit):
assert xq.shape == (crit.nq, index.d)
ops = OperatingPoints()
self.explore_c(index, crit.nq, swig_ptr(xq),
crit, ops)
return ops
replace_method(the_class, 'explore', replacement_explore)
def handle_MatrixStats(the_class):
original_init = the_class.__init__
def replacement_init(self, m):
assert len(m.shape) == 2
original_init(self, m.shape[0], m.shape[1], swig_ptr(m))
the_class.__init__ = replacement_init
handle_MatrixStats(MatrixStats)
def handle_IOWriter(the_class):
def write_bytes(self, b):
return self(swig_ptr(b), 1, len(b))
the_class.write_bytes = write_bytes
handle_IOWriter(IOWriter)
def handle_IOReader(the_class):
def read_bytes(self, totsz):
buf = bytearray(totsz)
was_read = self(swig_ptr(buf), 1, len(buf))
return bytes(buf[:was_read])
the_class.read_bytes = read_bytes
handle_IOReader(IOReader)
this_module = sys.modules[__name__]
for symbol in dir(this_module):
obj = getattr(this_module, symbol)
# print symbol, isinstance(obj, (type, types.ClassType))
if inspect.isclass(obj):
the_class = obj
if issubclass(the_class, Index):
handle_Index(the_class)
if issubclass(the_class, IndexBinary):
handle_IndexBinary(the_class)
if issubclass(the_class, VectorTransform):
handle_VectorTransform(the_class)
if issubclass(the_class, AutoTuneCriterion):
handle_AutoTuneCriterion(the_class)
if issubclass(the_class, ParameterSpace):
handle_ParameterSpace(the_class)
if issubclass(the_class, IndexNSG):
handle_NSG(the_class)
###########################################
# Utility to add a deprecation warning to
# classes from the SWIG interface
###########################################
def _make_deprecated_swig_class(deprecated_name, base_name):
"""
Dynamically construct deprecated classes as wrappers around renamed ones
The deprecation warning added in their __new__-method will trigger upon
construction of an instance of the class, but only once per session.
We do this here (in __init__.py) because the base classes are defined in
the SWIG interface, making it cumbersome to add the deprecation there.
Parameters
----------
deprecated_name : string
Name of the class to be deprecated; _not_ present in SWIG interface.
base_name : string
Name of the class that is replacing deprecated_name; must already be
imported into the current namespace.
Returns
-------
None
However, the deprecated class gets added to the faiss namespace
"""
base_class = globals()[base_name]
def new_meth(cls, *args, **kwargs):
msg = f"The class faiss.{deprecated_name} is deprecated in favour of faiss.{base_name}!"
warnings.warn(msg, DeprecationWarning, stacklevel=2)
instance = super(base_class, cls).__new__(cls, *args, **kwargs)
return instance
# three-argument version of "type" uses (name, tuple-of-bases, dict-of-attributes)
klazz = type(deprecated_name, (base_class,), {"__new__": new_meth})
# this ends up adding the class to the "faiss" namespace, in a way that it
# is available both through "import faiss" and "from faiss import *"
globals()[deprecated_name] = klazz
###########################################
# Add Python references to objects
# we do this at the Python class wrapper level.
###########################################
def add_ref_in_constructor(the_class, parameter_no):
# adds a reference to parameter parameter_no in self
# so that that parameter does not get deallocated before self
original_init = the_class.__init__
def replacement_init(self, *args):
original_init(self, *args)
self.referenced_objects = [args[parameter_no]]
def replacement_init_multiple(self, *args):
original_init(self, *args)
pset = parameter_no[len(args)]
self.referenced_objects = [args[no] for no in pset]
if type(parameter_no) == dict:
# a list of parameters to keep, depending on the number of arguments
the_class.__init__ = replacement_init_multiple
else:
the_class.__init__ = replacement_init
def add_ref_in_method(the_class, method_name, parameter_no):
original_method = getattr(the_class, method_name)
def replacement_method(self, *args):
ref = args[parameter_no]
if not hasattr(self, 'referenced_objects'):
self.referenced_objects = [ref]
else:
self.referenced_objects.append(ref)
return original_method(self, *args)
setattr(the_class, method_name, replacement_method)
def add_ref_in_function(function_name, parameter_no):
# assumes the function returns an object
original_function = getattr(this_module, function_name)
def replacement_function(*args):
result = original_function(*args)
ref = args[parameter_no]
result.referenced_objects = [ref]
return result
setattr(this_module, function_name, replacement_function)
add_ref_in_constructor(IndexIVFFlat, 0)
add_ref_in_constructor(IndexIVFFlatDedup, 0)
add_ref_in_constructor(IndexPreTransform, {2: [0, 1], 1: [0]})
add_ref_in_method(IndexPreTransform, 'prepend_transform', 0)
add_ref_in_constructor(IndexIVFPQ, 0)
add_ref_in_constructor(IndexIVFPQR, 0)
add_ref_in_constructor(IndexIVFPQFastScan, 0)
add_ref_in_constructor(Index2Layer, 0)
add_ref_in_constructor(Level1Quantizer, 0)
add_ref_in_constructor(IndexIVFScalarQuantizer, 0)
add_ref_in_constructor(IndexIDMap, 0)
add_ref_in_constructor(IndexIDMap2, 0)
add_ref_in_constructor(IndexHNSW, 0)
add_ref_in_method(IndexShards, 'add_shard', 0)
add_ref_in_method(IndexBinaryShards, 'add_shard', 0)
add_ref_in_constructor(IndexRefineFlat, {2:[0], 1:[0]})
add_ref_in_constructor(IndexRefine, {2:[0, 1]})
add_ref_in_constructor(IndexBinaryIVF, 0)
add_ref_in_constructor(IndexBinaryFromFloat, 0)
add_ref_in_constructor(IndexBinaryIDMap, 0)
add_ref_in_constructor(IndexBinaryIDMap2, 0)
add_ref_in_method(IndexReplicas, 'addIndex', 0)
add_ref_in_method(IndexBinaryReplicas, 'addIndex', 0)
add_ref_in_constructor(BufferedIOWriter, 0)
add_ref_in_constructor(BufferedIOReader, 0)
# seems really marginal...
# remove_ref_from_method(IndexReplicas, 'removeIndex', 0)
###########################################
# GPU functions
###########################################
def index_cpu_to_gpu_multiple_py(resources, index, co=None, gpus=None):
""" builds the C++ vectors for the GPU indices and the
resources. Handles the case where the resources are assigned to
the list of GPUs """
if gpus is None:
gpus = range(len(resources))
vres = GpuResourcesVector()
vdev = Int32Vector()
for i, res in zip(gpus, resources):
vdev.push_back(i)
vres.push_back(res)
index = index_cpu_to_gpu_multiple(vres, vdev, index, co)
return index
def index_cpu_to_all_gpus(index, co=None, ngpu=-1):
index_gpu = index_cpu_to_gpus_list(index, co=co, gpus=None, ngpu=ngpu)
return index_gpu
def index_cpu_to_gpus_list(index, co=None, gpus=None, ngpu=-1):
""" Here we can pass list of GPU ids as a parameter or ngpu to
use first n GPU's. gpus mut be a list or None"""
if (gpus is None) and (ngpu == -1): # All blank
gpus = range(get_num_gpus())
elif (gpus is None) and (ngpu != -1): # Get number of GPU's only
gpus = range(ngpu)
res = [StandardGpuResources() for _ in gpus]
index_gpu = index_cpu_to_gpu_multiple_py(res, index, co, gpus)
return index_gpu
# allows numpy ndarray usage with bfKnn
def knn_gpu(res, xq, xb, k, D=None, I=None, metric=METRIC_L2):
"""
Compute the k nearest neighbors of a vector on one GPU without constructing an index
Parameters
----------
res : StandardGpuResources
GPU resources to use during computation
xq : array_like
Query vectors, shape (nq, d) where d is appropriate for the index.
`dtype` must be float32.
xb : array_like
Database vectors, shape (nb, d) where d is appropriate for the index.
`dtype` must be float32.
k : int
Number of nearest neighbors.
D : array_like, optional
Output array for distances of the nearest neighbors, shape (nq, k)
I : array_like, optional
Output array for the nearest neighbors, shape (nq, k)
distance_type : MetricType, optional
distance measure to use (either METRIC_L2 or METRIC_INNER_PRODUCT)
Returns
-------
D : array_like
Distances of the nearest neighbors, shape (nq, k)
I : array_like
Labels of the nearest neighbors, shape (nq, k)
"""
nq, d = xq.shape
if xq.flags.c_contiguous:
xq_row_major = True
elif xq.flags.f_contiguous:
xq = xq.T
xq_row_major = False
else:
raise TypeError('xq matrix should be row (C) or column-major (Fortran)')
xq_ptr = swig_ptr(xq)
if xq.dtype == np.float32:
xq_type = DistanceDataType_F32
elif xq.dtype == np.float16:
xq_type = DistanceDataType_F16
else:
raise TypeError('xq must be f32 or f16')
nb, d2 = xb.shape
assert d2 == d
if xb.flags.c_contiguous:
xb_row_major = True
elif xb.flags.f_contiguous:
xb = xb.T
xb_row_major = False
else:
raise TypeError('xb matrix should be row (C) or column-major (Fortran)')
xb_ptr = swig_ptr(xb)
if xb.dtype == np.float32:
xb_type = DistanceDataType_F32
elif xb.dtype == np.float16:
xb_type = DistanceDataType_F16
else:
raise TypeError('xb must be float32 or float16')
if D is None:
D = np.empty((nq, k), dtype=np.float32)
else:
assert D.shape == (nq, k)
# interface takes void*, we need to check this
assert D.dtype == np.float32
D_ptr = swig_ptr(D)
if I is None:
I = np.empty((nq, k), dtype=np.int64)
else:
assert I.shape == (nq, k)
I_ptr = swig_ptr(I)
if I.dtype == np.int64:
I_type = IndicesDataType_I64
elif I.dtype == I.dtype == np.int32:
I_type = IndicesDataType_I32
else:
raise TypeError('I must be i64 or i32')
args = GpuDistanceParams()
args.metric = metric
args.k = k
args.dims = d
args.vectors = xb_ptr
args.vectorsRowMajor = xb_row_major
args.vectorType = xb_type
args.numVectors = nb
args.queries = xq_ptr
args.queriesRowMajor = xq_row_major
args.queryType = xq_type
args.numQueries = nq
args.outDistances = D_ptr
args.outIndices = I_ptr
args.outIndicesType = I_type
# no stream synchronization needed, inputs and outputs are guaranteed to
# be on the CPU (numpy arrays)
bfKnn(res, args)
return D, I
# allows numpy ndarray usage with bfKnn for all pairwise distances
def pairwise_distance_gpu(res, xq, xb, D=None, metric=METRIC_L2):
"""
Compute all pairwise distances between xq and xb on one GPU without constructing an index
Parameters
----------
res : StandardGpuResources
GPU resources to use during computation
xq : array_like
Query vectors, shape (nq, d) where d is appropriate for the index.
`dtype` must be float32.
xb : array_like
Database vectors, shape (nb, d) where d is appropriate for the index.
`dtype` must be float32.
D : array_like, optional
Output array for all pairwise distances, shape (nq, nb)
distance_type : MetricType, optional
distance measure to use (either METRIC_L2 or METRIC_INNER_PRODUCT)
Returns
-------
D : array_like
All pairwise distances, shape (nq, nb)
"""
nq, d = xq.shape
if xq.flags.c_contiguous:
xq_row_major = True
elif xq.flags.f_contiguous:
xq = xq.T
xq_row_major = False
else:
raise TypeError('xq matrix should be row (C) or column-major (Fortran)')
xq_ptr = swig_ptr(xq)
if xq.dtype == np.float32:
xq_type = DistanceDataType_F32
elif xq.dtype == np.float16:
xq_type = DistanceDataType_F16
else:
raise TypeError('xq must be float32 or float16')
nb, d2 = xb.shape
assert d2 == d
if xb.flags.c_contiguous:
xb_row_major = True
elif xb.flags.f_contiguous:
xb = xb.T
xb_row_major = False
else:
raise TypeError('xb matrix should be row (C) or column-major (Fortran)')
xb_ptr = swig_ptr(xb)
if xb.dtype == np.float32:
xb_type = DistanceDataType_F32
elif xb.dtype == np.float16:
xb_type = DistanceDataType_F16
else:
raise TypeError('xb must be float32 or float16')
if D is None:
D = np.empty((nq, nb), dtype=np.float32)
else:
assert D.shape == (nq, nb)
# interface takes void*, we need to check this
assert D.dtype == np.float32
D_ptr = swig_ptr(D)
args = GpuDistanceParams()
args.metric = metric
args.k = -1 # selects all pairwise distances
args.dims = d
args.vectors = xb_ptr
args.vectorsRowMajor = xb_row_major
args.vectorType = xb_type
args.numVectors = nb
args.queries = xq_ptr
args.queriesRowMajor = xq_row_major
args.queryType = xq_type
args.numQueries = nq
args.outDistances = D_ptr
# no stream synchronization needed, inputs and outputs are guaranteed to
# be on the CPU (numpy arrays)
bfKnn(res, args)
return D
###########################################
# numpy array / std::vector conversions
###########################################
sizeof_long = array.array('l').itemsize
deprecated_name_map = {
# deprecated: replacement
'Float': 'Float32',
'Double': 'Float64',
'Char': 'Int8',
'Int': 'Int32',
'Long': 'Int32' if sizeof_long == 4 else 'Int64',
'LongLong': 'Int64',
'Byte': 'UInt8',
# previously misspelled variant
'Uint64': 'UInt64',
}
for depr_prefix, base_prefix in deprecated_name_map.items():
_make_deprecated_swig_class(depr_prefix + "Vector", base_prefix + "Vector")
# same for the three legacy *VectorVector classes
if depr_prefix in ['Float', 'Long', 'Byte']:
_make_deprecated_swig_class(depr_prefix + "VectorVector",
base_prefix + "VectorVector")
# mapping from vector names in swigfaiss.swig and the numpy dtype names
# TODO: once deprecated classes are removed, remove the dict and just use .lower() below
vector_name_map = {
'Float32': 'float32',
'Float64': 'float64',
'Int8': 'int8',
'Int16': 'int16',
'Int32': 'int32',
'Int64': 'int64',
'UInt8': 'uint8',
'UInt16': 'uint16',
'UInt32': 'uint32',
'UInt64': 'uint64',
**{k: v.lower() for k, v in deprecated_name_map.items()}
}
def vector_to_array(v):
""" convert a C++ vector to a numpy array """
classname = v.__class__.__name__
assert classname.endswith('Vector')
dtype = np.dtype(vector_name_map[classname[:-6]])
a = np.empty(v.size(), dtype=dtype)
if v.size() > 0:
memcpy(swig_ptr(a), v.data(), a.nbytes)
return a
def vector_float_to_array(v):
return vector_to_array(v)
def copy_array_to_vector(a, v):
""" copy a numpy array to a vector """
n, = a.shape
classname = v.__class__.__name__
assert classname.endswith('Vector')
dtype = np.dtype(vector_name_map[classname[:-6]])
assert dtype == a.dtype, (
'cannot copy a %s array to a %s (should be %s)' % (
a.dtype, classname, dtype))
v.resize(n)
if n > 0:
memcpy(v.data(), swig_ptr(a), a.nbytes)
# same for AlignedTable
def copy_array_to_AlignedTable(a, v):
n, = a.shape
# TODO check class name
assert v.itemsize() == a.itemsize
v.resize(n)
if n > 0:
memcpy(v.get(), swig_ptr(a), a.nbytes)
def array_to_AlignedTable(a):
if a.dtype == 'uint16':
v = AlignedTableUint16(a.size)
elif a.dtype == 'uint8':
v = AlignedTableUint8(a.size)
else:
assert False
copy_array_to_AlignedTable(a, v)
return v
def AlignedTable_to_array(v):
""" convert an AlignedTable to a numpy array """
classname = v.__class__.__name__
assert classname.startswith('AlignedTable')
dtype = classname[12:].lower()
a = np.empty(v.size(), dtype=dtype)
if a.size > 0:
memcpy(swig_ptr(a), v.data(), a.nbytes)
return a
###########################################
# Wrapper for a few functions
###########################################
def kmin(array, k):
"""return k smallest values (and their indices) of the lines of a
float32 array"""
m, n = array.shape
I = np.zeros((m, k), dtype='int64')
D = np.zeros((m, k), dtype='float32')
ha = float_maxheap_array_t()
ha.ids = swig_ptr(I)
ha.val = swig_ptr(D)
ha.nh = m
ha.k = k
ha.heapify()
ha.addn(n, swig_ptr(array))
ha.reorder()
return D, I
def kmax(array, k):
"""return k largest values (and their indices) of the lines of a
float32 array"""
m, n = array.shape
I = np.zeros((m, k), dtype='int64')
D = np.zeros((m, k), dtype='float32')
ha = float_minheap_array_t()
ha.ids = swig_ptr(I)
ha.val = swig_ptr(D)
ha.nh = m
ha.k = k
ha.heapify()
ha.addn(n, swig_ptr(array))
ha.reorder()
return D, I
def pairwise_distances(xq, xb, mt=METRIC_L2, metric_arg=0):
"""compute the whole pairwise distance matrix between two sets of
vectors"""
nq, d = xq.shape
nb, d2 = xb.shape
assert d == d2
dis = np.empty((nq, nb), dtype='float32')
if mt == METRIC_L2:
pairwise_L2sqr(
d, nq, swig_ptr(xq),
nb, swig_ptr(xb),
swig_ptr(dis))
else:
pairwise_extra_distances(
d, nq, swig_ptr(xq),
nb, swig_ptr(xb),
mt, metric_arg,
swig_ptr(dis))
return dis
def rand(n, seed=12345):
res = np.empty(n, dtype='float32')
float_rand(swig_ptr(res), res.size, seed)
return res
def randint(n, seed=12345, vmax=None):
res = np.empty(n, dtype='int64')
if vmax is None:
int64_rand(swig_ptr(res), res.size, seed)
else:
int64_rand_max(swig_ptr(res), res.size, vmax, seed)
return res
lrand = randint
def randn(n, seed=12345):
res = np.empty(n, dtype='float32')
float_randn(swig_ptr(res), res.size, seed)
return res
def eval_intersection(I1, I2):
""" size of intersection between each line of two result tables"""
n = I1.shape[0]
assert I2.shape[0] == n
k1, k2 = I1.shape[1], I2.shape[1]
ninter = 0
for i in range(n):
ninter += ranklist_intersection_size(
k1, swig_ptr(I1[i]), k2, swig_ptr(I2[i]))
return ninter
def normalize_L2(x):
fvec_renorm_L2(x.shape[1], x.shape[0], swig_ptr(x))
######################################################
# MapLong2Long interface
######################################################
def replacement_map_add(self, keys, vals):
n, = keys.shape
assert (n,) == keys.shape
self.add_c(n, swig_ptr(keys), swig_ptr(vals))
def replacement_map_search_multiple(self, keys):
n, = keys.shape
vals = np.empty(n, dtype='int64')
self.search_multiple_c(n, swig_ptr(keys), swig_ptr(vals))
return vals
replace_method(MapLong2Long, 'add', replacement_map_add)
replace_method(MapLong2Long, 'search_multiple', replacement_map_search_multiple)
######################################################
# search_with_parameters interface
######################################################
search_with_parameters_c = search_with_parameters
def search_with_parameters(index, x, k, params=None, output_stats=False):
n, d = x.shape
assert d == index.d
if not params:
# if not provided use the ones set in the IVF object
params = IVFSearchParameters()
index_ivf = extract_index_ivf(index)
params.nprobe = index_ivf.nprobe
params.max_codes = index_ivf.max_codes
nb_dis = np.empty(1, 'uint64')
ms_per_stage = np.empty(3, 'float64')
distances = np.empty((n, k), dtype=np.float32)
labels = np.empty((n, k), dtype=np.int64)
search_with_parameters_c(
index, n, swig_ptr(x),
k, swig_ptr(distances),
swig_ptr(labels),
params, swig_ptr(nb_dis), swig_ptr(ms_per_stage)
)
if not output_stats:
return distances, labels
else:
stats = {
'ndis': nb_dis[0],
'pre_transform_ms': ms_per_stage[0],
'coarse_quantizer_ms': ms_per_stage[1],
'invlist_scan_ms': ms_per_stage[2],
}
return distances, labels, stats
range_search_with_parameters_c = range_search_with_parameters
def range_search_with_parameters(index, x, radius, params=None, output_stats=False):
n, d = x.shape
assert d == index.d
if not params:
# if not provided use the ones set in the IVF object
params = IVFSearchParameters()
index_ivf = extract_index_ivf(index)
params.nprobe = index_ivf.nprobe
params.max_codes = index_ivf.max_codes
nb_dis = np.empty(1, 'uint64')
ms_per_stage = np.empty(3, 'float64')
res = RangeSearchResult(n)
range_search_with_parameters_c(
index, n, swig_ptr(x),
radius, res,
params, swig_ptr(nb_dis), swig_ptr(ms_per_stage)
)
lims = rev_swig_ptr(res.lims, n + 1).copy()
nd = int(lims[-1])
Dout = rev_swig_ptr(res.distances, nd).copy()
Iout = rev_swig_ptr(res.labels, nd).copy()
if not output_stats:
return lims, Dout, Iout
else:
stats = {
'ndis': nb_dis[0],
'pre_transform_ms': ms_per_stage[0],
'coarse_quantizer_ms': ms_per_stage[1],
'invlist_scan_ms': ms_per_stage[2],
}
return lims, Dout, Iout, stats
######################################################
# KNN function
######################################################
def knn(xq, xb, k, metric=METRIC_L2):
"""
Compute the k nearest neighbors of a vector without constructing an index
Parameters
----------
xq : array_like
Query vectors, shape (nq, d) where d is appropriate for the index.
`dtype` must be float32.
xb : array_like
Database vectors, shape (nb, d) where d is appropriate for the index.
`dtype` must be float32.
k : int
Number of nearest neighbors.
distance_type : MetricType, optional
distance measure to use (either METRIC_L2 or METRIC_INNER_PRODUCT)
Returns
-------
D : array_like
Distances of the nearest neighbors, shape (nq, k)
I : array_like
Labels of the nearest neighbors, shape (nq, k)
"""
nq, d = xq.shape
nb, d2 = xb.shape
assert d == d2
I = np.empty((nq, k), dtype='int64')
D = np.empty((nq, k), dtype='float32')
if metric == METRIC_L2:
heaps = float_maxheap_array_t()
heaps.k = k
heaps.nh = nq
heaps.val = swig_ptr(D)
heaps.ids = swig_ptr(I)
knn_L2sqr(
swig_ptr(xq), swig_ptr(xb),
d, nq, nb, heaps
)
elif metric == METRIC_INNER_PRODUCT:
heaps = float_minheap_array_t()
heaps.k = k
heaps.nh = nq
heaps.val = swig_ptr(D)
heaps.ids = swig_ptr(I)
knn_inner_product(
swig_ptr(xq), swig_ptr(xb),
d, nq, nb, heaps
)
else:
raise NotImplementedError("only L2 and INNER_PRODUCT are supported")
return D, I
###########################################
# Kmeans object
###########################################
class Kmeans:
"""Object that performs k-means clustering and manages the centroids.
The `Kmeans` class is essentially a wrapper around the C++ `Clustering` object.
Parameters
----------
d : int
dimension of the vectors to cluster
k : int
number of clusters
gpu: bool or int, optional
False: don't use GPU
True: use all GPUs
number: use this many GPUs
progressive_dim_steps:
use a progressive dimension clustering (with that number of steps)
Subsequent parameters are fields of the Clustring object. The most important are:
niter: int, optional
clustering iterations
nredo: int, optional
redo clustering this many times and keep best
verbose: bool, optional
spherical: bool, optional
do we want normalized centroids?
int_centroids: bool, optional
round centroids coordinates to integer
seed: int, optional
seed for the random number generator
"""
def __init__(self, d, k, **kwargs):
"""d: input dimension, k: nb of centroids. Additional
parameters are passed on the ClusteringParameters object,
including niter=25, verbose=False, spherical = False
"""
self.d = d
self.k = k
self.gpu = False
if "progressive_dim_steps" in kwargs:
self.cp = ProgressiveDimClusteringParameters()
else:
self.cp = ClusteringParameters()
for k, v in kwargs.items():
if k == 'gpu':
if v == True or v == -1:
v = get_num_gpus()
self.gpu = v
else:
# if this raises an exception, it means that it is a non-existent field
getattr(self.cp, k)
setattr(self.cp, k, v)
self.centroids = None
def train(self, x, weights=None, init_centroids=None):
""" Perform k-means clustering.
On output of the function call:
- the centroids are in the centroids field of size (`k`, `d`).
- the objective value at each iteration is in the array obj (size `niter`)
- detailed optimization statistics are in the array iteration_stats.
Parameters
----------
x : array_like
Training vectors, shape (n, d), `dtype` must be float32 and n should
be larger than the number of clusters `k`.
weights : array_like
weight associated to each vector, shape `n`
init_centroids : array_like
initial set of centroids, shape (n, d)
Returns
-------
final_obj: float
final optimization objective
"""
n, d = x.shape
assert d == self.d
if self.cp.__class__ == ClusteringParameters:
# regular clustering
clus = Clustering(d, self.k, self.cp)
if init_centroids is not None:
nc, d2 = init_centroids.shape
assert d2 == d
copy_array_to_vector(init_centroids.ravel(), clus.centroids)
if self.cp.spherical:
self.index = IndexFlatIP(d)
else:
self.index = IndexFlatL2(d)
if self.gpu:
self.index = index_cpu_to_all_gpus(self.index, ngpu=self.gpu)
clus.train(x, self.index, weights)
else:
# not supported for progressive dim
assert weights is None
assert init_centroids is None
assert not self.cp.spherical
clus = ProgressiveDimClustering(d, self.k, self.cp)
if self.gpu:
fac = GpuProgressiveDimIndexFactory(ngpu=self.gpu)
else:
fac = ProgressiveDimIndexFactory()
clus.train(n, swig_ptr(x), fac)
centroids = vector_float_to_array(clus.centroids)
self.centroids = centroids.reshape(self.k, d)
stats = clus.iteration_stats
stats = [stats.at(i) for i in range(stats.size())]
self.obj = np.array([st.obj for st in stats])
# copy all the iteration_stats objects to a python array
stat_fields = 'obj time time_search imbalance_factor nsplit'.split()
self.iteration_stats = [
{field: getattr(st, field) for field in stat_fields}
for st in stats
]
return self.obj[-1] if self.obj.size > 0 else 0.0
def assign(self, x):
assert self.centroids is not None, "should train before assigning"
self.index.reset()
self.index.add(self.centroids)
D, I = self.index.search(x, 1)
return D.ravel(), I.ravel()
# IndexProxy was renamed to IndexReplicas, remap the old name for any old code
# people may have
IndexProxy = IndexReplicas
ConcatenatedInvertedLists = HStackInvertedLists
###########################################
# serialization of indexes to byte arrays
###########################################
def serialize_index(index):
""" convert an index to a numpy uint8 array """
writer = VectorIOWriter()
write_index(index, writer)
return vector_to_array(writer.data)
def deserialize_index(data):
reader = VectorIOReader()
copy_array_to_vector(data, reader.data)
return read_index(reader)
def serialize_index_binary(index):
""" convert an index to a numpy uint8 array """
writer = VectorIOWriter()
write_index_binary(index, writer)
return vector_to_array(writer.data)
def deserialize_index_binary(data):
reader = VectorIOReader()
copy_array_to_vector(data, reader.data)
return read_index_binary(reader)
###########################################
# ResultHeap
###########################################
class ResultHeap:
"""Accumulate query results from a sliced dataset. The final result will
be in self.D, self.I."""
def __init__(self, nq, k):
" nq: number of query vectors, k: number of results per query "
self.I = np.zeros((nq, k), dtype='int64')
self.D = np.zeros((nq, k), dtype='float32')
self.nq, self.k = nq, k
heaps = float_maxheap_array_t()
heaps.k = k
heaps.nh = nq
heaps.val = swig_ptr(self.D)
heaps.ids = swig_ptr(self.I)
heaps.heapify()
self.heaps = heaps
def add_result(self, D, I):
"""D, I do not need to be in a particular order (heap or sorted)"""
assert D.shape == (self.nq, self.k)
assert I.shape == (self.nq, self.k)
self.heaps.addn_with_ids(
self.k, swig_ptr(D),
swig_ptr(I), self.k)
def finalize(self):
self.heaps.reorder()
|
Muxi-X/muxi_site
|
refs/heads/develop
|
muxiwebsite/auth/views.py
|
2
|
# coding: utf-8
"""
views.py
~~~~~~~~
视图文件
/login: 统一登录页
/logout: 统一登出页
"""
from . import auth
from .. import db
from ..models import User
from .forms import LoginForm, RegisterForm
from flask import render_template, redirect, request, url_for, flash, session
from flask_login import login_user, login_required, logout_user, current_user
import base64
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
@auth.route('/login/', methods=["POST"])
def login1():
"""登录页面"""
form = LoginForm()
user = User.query.filter_by(username=form.username.data).first()
if user is not None and user.verify_password(form.password.data):
login_user(user)
if session['refer'] and not session['refer'].endswith(url_for("auth.register")):
return redirect(session['refer'])
else:
return redirect(url_for('i.index'))
else:
flash("用户名或密码不存在!")
return redirect(url_for("auth.login"))
@auth.route('/login/', methods=["GET"])
def login():
if not request.referrer == url_for('auth.login', _external=True):
session['refer'] = request.referrer
form = LoginForm()
if form.validate_on_submit():
return redirect(url_for('auth.login1'))
return render_template("muxi_login.html", form=form)
@auth.route('/register/', methods=["POST", "GET"])
def register():
"""注册页面"""
form = RegisterForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if user is not None:
flash("username has been registered!")
return redirect(url_for("auth.register"))
elif form.password.data != form.passwordconfirm.data:
flash("password do not match!")
return redirect(url_for("auth.register"))
else:
user = User(
username=form.username.data,
email=form.email.data,
password=base64.b64encode(form.password.data),
avatar_url='http://7xrvvt.com1.z0.glb.clouddn.com/shakedog.gif',
role_id=3
)
db.session.add(user)
db.session.commit()
return redirect(url_for("auth.login"))
return render_template("muxi_register.html", form=form)
@login_required
@auth.route('/logout/')
def logout():
"""登出界面"""
logout_user()
return redirect(url_for('i.index'))
|
stormi/weblate
|
refs/heads/master
|
weblate/trans/tests/test_search.py
|
8
|
# -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2015 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <http://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Tests for search views.
"""
import re
from django.core.urlresolvers import reverse
from weblate.trans.tests.test_views import ViewTestCase
from weblate.trans.tests import OverrideSettings
from weblate.trans.search import update_index_unit
from weblate.trans.models import IndexUpdate
class SearchViewTest(ViewTestCase):
def setUp(self):
super(SearchViewTest, self).setUp()
self.translation = self.subproject.translation_set.get(
language_code='cs'
)
self.translate_url = self.translation.get_translate_url()
def do_search(self, params, expected, url=None):
'''
Helper method for performing search test.
'''
if url is None:
url = self.translate_url
response = self.client.get(url, params)
if expected is None:
self.assertRedirects(
response,
self.translation.get_absolute_url()
)
else:
self.assertContains(
response,
expected
)
return response
def test_all_search(self):
'''
Searching in all projects.
'''
response = self.client.get(
reverse('search'),
{'q': 'hello'}
)
self.assertContains(
response,
'<span class="hlmatch">Hello</span>, world'
)
def test_project_search(self):
'''
Searching within project.
'''
# Default
self.do_search(
{'q': 'hello'},
'Fulltext search for'
)
# Fulltext
self.do_search(
{'q': 'hello', 'search': 'ftx'},
'Fulltext search for'
)
# Substring
self.do_search(
{'q': 'hello', 'search': 'substring'},
'Substring search for'
)
# Exact string
self.do_search(
{'q': 'Thank you for using Weblate.', 'search': 'exact'},
'Search for exact string'
)
# Short string
self.do_search(
{'q': 'x'},
'Ensure this value has at least 2 characters (it has 1).'
)
# Wrong type
self.do_search(
{'q': 'xxxxx', 'search': 'xxxx'},
'Select a valid choice. xxxx is not one of the available choices.'
)
def test_review(self):
# Review
self.do_search(
{'date': '2010-01-10', 'type': 'review'},
None
)
# Review, invalid date
self.do_search(
{'date': '2010-01-', 'type': 'review'},
'Enter a valid date.'
)
def test_search_links(self):
response = self.do_search(
{'q': 'Weblate', 'search': 'substring'},
'Substring search for'
)
# Extract search ID
search_id = re.findall(r'sid=([0-9a-f-]*)&', response.content)[0]
# Try access to pages
response = self.client.get(
self.translate_url,
{'sid': search_id, 'offset': 0}
)
self.assertContains(
response,
'http://demo.weblate.org/',
)
response = self.client.get(
self.translate_url,
{'sid': search_id, 'offset': 1}
)
self.assertContains(
response,
'Thank you for using Weblate.',
)
# Invalid offset
response = self.client.get(
self.translate_url,
{'sid': search_id, 'offset': 'bug'}
)
self.assertContains(
response,
'http://demo.weblate.org/',
)
# Go to end
response = self.client.get(
self.translate_url,
{'sid': search_id, 'offset': 2}
)
self.assertRedirects(
response,
self.translation.get_absolute_url()
)
# Try invalid SID (should be deleted above)
response = self.client.get(
self.translate_url,
{'sid': search_id, 'offset': 1}
)
self.assertRedirects(
response,
self.translation.get_absolute_url()
)
def test_invalid_sid(self):
response = self.client.get(
self.translate_url,
{'sid': 'invalid'}
)
self.assertRedirects(
response,
self.translation.get_absolute_url()
)
def test_mixed_sid(self):
"""
Tests using SID from other translation.
"""
translation = self.subproject.translation_set.get(
language_code='de'
)
response = self.do_search(
{'q': 'Weblate', 'search': 'substring'},
'Substring search for',
url=translation.get_translate_url()
)
search_id = re.findall(r'sid=([0-9a-f-]*)&', response.content)[0]
response = self.client.get(
self.translate_url,
{'sid': search_id, 'offset': 0}
)
self.assertRedirects(
response,
self.translation.get_absolute_url()
)
def test_seach_checksum(self):
unit = self.translation.unit_set.get(
source='Try Weblate at <http://demo.weblate.org/>!\n'
)
response = self.do_search(
{'checksum': unit.checksum},
'3 / 4'
)
# Extract search ID
search_id = re.findall(r'sid=([0-9a-f-]*)&', response.content)[0]
# Navigation
response = self.do_search(
{'sid': search_id, 'offset': 0},
'1 / 4'
)
response = self.do_search(
{'sid': search_id, 'offset': 3},
'4 / 4'
)
response = self.do_search(
{'sid': search_id, 'offset': 4},
None
)
def test_search_type(self):
self.do_search(
{'type': 'untranslated'},
'Untranslated strings'
)
self.do_search(
{'type': 'fuzzy'},
None
)
self.do_search(
{'type': 'suggestions'},
None
)
self.do_search(
{'type': 'allchecks'},
None
)
self.do_search(
{'type': 'plurals'},
None
)
self.do_search(
{'type': 'all'},
'1 / 4'
)
def test_search_errors(self):
self.do_search(
{'type': 'nonexisting-type'},
'nonexisting-type is not one of the available choices'
)
self.do_search(
{'date': 'nonexisting'},
'date: Enter a valid date.'
)
def test_search_plural(self):
response = self.do_search(
{'q': 'banana'},
'banana'
)
self.assertContains(response, 'One')
self.assertContains(response, 'Few')
self.assertContains(response, 'Other')
self.assertNotContains(response, 'Plural form ')
def test_checksum(self):
response = self.do_search({'checksum': 'invalid'}, None)
self.assertRedirects(
response,
self.get_translation().get_absolute_url()
)
class SearchBackendTest(ViewTestCase):
def do_index_update(self):
translation = self.subproject.translation_set.get(language_code='cs')
unit = translation.unit_set.get(
source='Try Weblate at <http://demo.weblate.org/>!\n'
)
update_index_unit(unit, True)
update_index_unit(unit, False)
@OverrideSettings(OFFLOAD_INDEXING=False)
def test_add(self):
self.do_index_update()
self.assertEqual(IndexUpdate.objects.count(), 0)
@OverrideSettings(OFFLOAD_INDEXING=True)
def test_add_offload(self):
self.do_index_update()
self.assertEqual(IndexUpdate.objects.count(), 1)
|
barbarubra/Don-t-know-What-i-m-doing.
|
refs/heads/master
|
python-build/python-libs/gdata/src/gdata/oauth/rsa.py
|
225
|
#!/usr/bin/python
"""
requires tlslite - http://trevp.net/tlslite/
"""
import binascii
from gdata.tlslite.utils import keyfactory
from gdata.tlslite.utils import cryptomath
# XXX andy: ugly local import due to module name, oauth.oauth
import gdata.oauth as oauth
class OAuthSignatureMethod_RSA_SHA1(oauth.OAuthSignatureMethod):
def get_name(self):
return "RSA-SHA1"
def _fetch_public_cert(self, oauth_request):
# not implemented yet, ideas are:
# (1) do a lookup in a table of trusted certs keyed off of consumer
# (2) fetch via http using a url provided by the requester
# (3) some sort of specific discovery code based on request
#
# either way should return a string representation of the certificate
raise NotImplementedError
def _fetch_private_cert(self, oauth_request):
# not implemented yet, ideas are:
# (1) do a lookup in a table of trusted certs keyed off of consumer
#
# either way should return a string representation of the certificate
raise NotImplementedError
def build_signature_base_string(self, oauth_request, consumer, token):
sig = (
oauth.escape(oauth_request.get_normalized_http_method()),
oauth.escape(oauth_request.get_normalized_http_url()),
oauth.escape(oauth_request.get_normalized_parameters()),
)
key = ''
raw = '&'.join(sig)
return key, raw
def build_signature(self, oauth_request, consumer, token):
key, base_string = self.build_signature_base_string(oauth_request,
consumer,
token)
# Fetch the private key cert based on the request
cert = self._fetch_private_cert(oauth_request)
# Pull the private key from the certificate
privatekey = keyfactory.parsePrivateKey(cert)
# Convert base_string to bytes
#base_string_bytes = cryptomath.createByteArraySequence(base_string)
# Sign using the key
signed = privatekey.hashAndSign(base_string)
return binascii.b2a_base64(signed)[:-1]
def check_signature(self, oauth_request, consumer, token, signature):
decoded_sig = base64.b64decode(signature);
key, base_string = self.build_signature_base_string(oauth_request,
consumer,
token)
# Fetch the public key cert based on the request
cert = self._fetch_public_cert(oauth_request)
# Pull the public key from the certificate
publickey = keyfactory.parsePEMKey(cert, public=True)
# Check the signature
ok = publickey.hashAndVerify(decoded_sig, base_string)
return ok
class TestOAuthSignatureMethod_RSA_SHA1(OAuthSignatureMethod_RSA_SHA1):
def _fetch_public_cert(self, oauth_request):
cert = """
-----BEGIN CERTIFICATE-----
MIIBpjCCAQ+gAwIBAgIBATANBgkqhkiG9w0BAQUFADAZMRcwFQYDVQQDDA5UZXN0
IFByaW5jaXBhbDAeFw03MDAxMDEwODAwMDBaFw0zODEyMzEwODAwMDBaMBkxFzAV
BgNVBAMMDlRlc3QgUHJpbmNpcGFsMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKB
gQC0YjCwIfYoprq/FQO6lb3asXrxLlJFuCvtinTF5p0GxvQGu5O3gYytUvtC2JlY
zypSRjVxwxrsuRcP3e641SdASwfrmzyvIgP08N4S0IFzEURkV1wp/IpH7kH41Etb
mUmrXSwfNZsnQRE5SYSOhh+LcK2wyQkdgcMv11l4KoBkcwIDAQABMA0GCSqGSIb3
DQEBBQUAA4GBAGZLPEuJ5SiJ2ryq+CmEGOXfvlTtEL2nuGtr9PewxkgnOjZpUy+d
4TvuXJbNQc8f4AMWL/tO9w0Fk80rWKp9ea8/df4qMq5qlFWlx6yOLQxumNOmECKb
WpkUQDIDJEoFUzKMVuJf4KO/FJ345+BNLGgbJ6WujreoM1X/gYfdnJ/J
-----END CERTIFICATE-----
"""
return cert
def _fetch_private_cert(self, oauth_request):
cert = """
-----BEGIN PRIVATE KEY-----
MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBALRiMLAh9iimur8V
A7qVvdqxevEuUkW4K+2KdMXmnQbG9Aa7k7eBjK1S+0LYmVjPKlJGNXHDGuy5Fw/d
7rjVJ0BLB+ubPK8iA/Tw3hLQgXMRRGRXXCn8ikfuQfjUS1uZSatdLB81mydBETlJ
hI6GH4twrbDJCR2Bwy/XWXgqgGRzAgMBAAECgYBYWVtleUzavkbrPjy0T5FMou8H
X9u2AC2ry8vD/l7cqedtwMPp9k7TubgNFo+NGvKsl2ynyprOZR1xjQ7WgrgVB+mm
uScOM/5HVceFuGRDhYTCObE+y1kxRloNYXnx3ei1zbeYLPCHdhxRYW7T0qcynNmw
rn05/KO2RLjgQNalsQJBANeA3Q4Nugqy4QBUCEC09SqylT2K9FrrItqL2QKc9v0Z
zO2uwllCbg0dwpVuYPYXYvikNHHg+aCWF+VXsb9rpPsCQQDWR9TT4ORdzoj+Nccn
qkMsDmzt0EfNaAOwHOmVJ2RVBspPcxt5iN4HI7HNeG6U5YsFBb+/GZbgfBT3kpNG
WPTpAkBI+gFhjfJvRw38n3g/+UeAkwMI2TJQS4n8+hid0uus3/zOjDySH3XHCUno
cn1xOJAyZODBo47E+67R4jV1/gzbAkEAklJaspRPXP877NssM5nAZMU0/O/NGCZ+
3jPgDUno6WbJn5cqm8MqWhW1xGkImgRk+fkDBquiq4gPiT898jusgQJAd5Zrr6Q8
AO/0isr/3aa6O6NLQxISLKcPDk2NOccAfS/xOtfOz4sJYM3+Bs4Io9+dZGSDCA54
Lw03eHTNQghS0A==
-----END PRIVATE KEY-----
"""
return cert
|
pavelchristof/gomoku-ai
|
refs/heads/master
|
tensorflow/examples/adding_an_op/fact_test.py
|
166
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test that user ops can be used as expected."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
class FactTest(tf.test.TestCase):
def test(self):
with self.test_session():
print(tf.user_ops.my_fact().eval())
if __name__ == '__main__':
tf.test.main()
|
40223221/w16b_test
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/heapq.py
|
628
|
"""Heap queue algorithm (a.k.a. priority queue).
Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
all k, counting elements from 0. For the sake of comparison,
non-existing elements are considered to be infinite. The interesting
property of a heap is that a[0] is always its smallest element.
Usage:
heap = [] # creates an empty heap
heappush(heap, item) # pushes a new item on the heap
item = heappop(heap) # pops the smallest item from the heap
item = heap[0] # smallest item on the heap without popping it
heapify(x) # transforms list into a heap, in-place, in linear time
item = heapreplace(heap, item) # pops and returns smallest item, and adds
# new item; the heap size is unchanged
Our API differs from textbook heap algorithms as follows:
- We use 0-based indexing. This makes the relationship between the
index for a node and the indexes for its children slightly less
obvious, but is more suitable since Python uses 0-based indexing.
- Our heappop() method returns the smallest item, not the largest.
These two make it possible to view the heap as a regular Python list
without surprises: heap[0] is the smallest item, and heap.sort()
maintains the heap invariant!
"""
# Original code by Kevin O'Connor, augmented by Tim Peters and Raymond Hettinger
__about__ = """Heap queues
[explanation by François Pinard]
Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
all k, counting elements from 0. For the sake of comparison,
non-existing elements are considered to be infinite. The interesting
property of a heap is that a[0] is always its smallest element.
The strange invariant above is meant to be an efficient memory
representation for a tournament. The numbers below are `k', not a[k]:
0
1 2
3 4 5 6
7 8 9 10 11 12 13 14
15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
In the tree above, each cell `k' is topping `2*k+1' and `2*k+2'. In
an usual binary tournament we see in sports, each cell is the winner
over the two cells it tops, and we can trace the winner down the tree
to see all opponents s/he had. However, in many computer applications
of such tournaments, we do not need to trace the history of a winner.
To be more memory efficient, when a winner is promoted, we try to
replace it by something else at a lower level, and the rule becomes
that a cell and the two cells it tops contain three different items,
but the top cell "wins" over the two topped cells.
If this heap invariant is protected at all time, index 0 is clearly
the overall winner. The simplest algorithmic way to remove it and
find the "next" winner is to move some loser (let's say cell 30 in the
diagram above) into the 0 position, and then percolate this new 0 down
the tree, exchanging values, until the invariant is re-established.
This is clearly logarithmic on the total number of items in the tree.
By iterating over all items, you get an O(n ln n) sort.
A nice feature of this sort is that you can efficiently insert new
items while the sort is going on, provided that the inserted items are
not "better" than the last 0'th element you extracted. This is
especially useful in simulation contexts, where the tree holds all
incoming events, and the "win" condition means the smallest scheduled
time. When an event schedule other events for execution, they are
scheduled into the future, so they can easily go into the heap. So, a
heap is a good structure for implementing schedulers (this is what I
used for my MIDI sequencer :-).
Various structures for implementing schedulers have been extensively
studied, and heaps are good for this, as they are reasonably speedy,
the speed is almost constant, and the worst case is not much different
than the average case. However, there are other representations which
are more efficient overall, yet the worst cases might be terrible.
Heaps are also very useful in big disk sorts. You most probably all
know that a big sort implies producing "runs" (which are pre-sorted
sequences, which size is usually related to the amount of CPU memory),
followed by a merging passes for these runs, which merging is often
very cleverly organised[1]. It is very important that the initial
sort produces the longest runs possible. Tournaments are a good way
to that. If, using all the memory available to hold a tournament, you
replace and percolate items that happen to fit the current run, you'll
produce runs which are twice the size of the memory for random input,
and much better for input fuzzily ordered.
Moreover, if you output the 0'th item on disk and get an input which
may not fit in the current tournament (because the value "wins" over
the last output value), it cannot fit in the heap, so the size of the
heap decreases. The freed memory could be cleverly reused immediately
for progressively building a second heap, which grows at exactly the
same rate the first heap is melting. When the first heap completely
vanishes, you switch heaps and start a new run. Clever and quite
effective!
In a word, heaps are useful memory structures to know. I use them in
a few applications, and I think it is good to keep a `heap' module
around. :-)
--------------------
[1] The disk balancing algorithms which are current, nowadays, are
more annoying than clever, and this is a consequence of the seeking
capabilities of the disks. On devices which cannot seek, like big
tape drives, the story was quite different, and one had to be very
clever to ensure (far in advance) that each tape movement will be the
most effective possible (that is, will best participate at
"progressing" the merge). Some tapes were even able to read
backwards, and this was also used to avoid the rewinding time.
Believe me, real good tape sorts were quite spectacular to watch!
From all times, sorting has always been a Great Art! :-)
"""
__all__ = ['heappush', 'heappop', 'heapify', 'heapreplace', 'merge',
'nlargest', 'nsmallest', 'heappushpop']
from itertools import islice, count, tee, chain
def heappush(heap, item):
"""Push item onto heap, maintaining the heap invariant."""
heap.append(item)
_siftdown(heap, 0, len(heap)-1)
def heappop(heap):
"""Pop the smallest item off the heap, maintaining the heap invariant."""
lastelt = heap.pop() # raises appropriate IndexError if heap is empty
if heap:
returnitem = heap[0]
heap[0] = lastelt
_siftup(heap, 0)
else:
returnitem = lastelt
return returnitem
def heapreplace(heap, item):
"""Pop and return the current smallest value, and add the new item.
This is more efficient than heappop() followed by heappush(), and can be
more appropriate when using a fixed-size heap. Note that the value
returned may be larger than item! That constrains reasonable uses of
this routine unless written as part of a conditional replacement:
if item > heap[0]:
item = heapreplace(heap, item)
"""
returnitem = heap[0] # raises appropriate IndexError if heap is empty
heap[0] = item
_siftup(heap, 0)
return returnitem
def heappushpop(heap, item):
"""Fast version of a heappush followed by a heappop."""
if heap and heap[0] < item:
item, heap[0] = heap[0], item
_siftup(heap, 0)
return item
def heapify(x):
"""Transform list into a heap, in-place, in O(len(x)) time."""
n = len(x)
# Transform bottom-up. The largest index there's any point to looking at
# is the largest with a child index in-range, so must have 2*i + 1 < n,
# or i < (n-1)/2. If n is even = 2*j, this is (2*j-1)/2 = j-1/2 so
# j-1 is the largest, which is n//2 - 1. If n is odd = 2*j+1, this is
# (2*j+1-1)/2 = j so j-1 is the largest, and that's again n//2-1.
for i in reversed(range(n//2)):
_siftup(x, i)
def _heappushpop_max(heap, item):
"""Maxheap version of a heappush followed by a heappop."""
if heap and item < heap[0]:
item, heap[0] = heap[0], item
_siftup_max(heap, 0)
return item
def _heapify_max(x):
"""Transform list into a maxheap, in-place, in O(len(x)) time."""
n = len(x)
for i in reversed(range(n//2)):
_siftup_max(x, i)
def nlargest(n, iterable):
"""Find the n largest elements in a dataset.
Equivalent to: sorted(iterable, reverse=True)[:n]
"""
if n < 0:
return []
it = iter(iterable)
result = list(islice(it, n))
if not result:
return result
heapify(result)
_heappushpop = heappushpop
for elem in it:
_heappushpop(result, elem)
result.sort(reverse=True)
return result
def nsmallest(n, iterable):
"""Find the n smallest elements in a dataset.
Equivalent to: sorted(iterable)[:n]
"""
if n < 0:
return []
it = iter(iterable)
result = list(islice(it, n))
if not result:
return result
_heapify_max(result)
_heappushpop = _heappushpop_max
for elem in it:
_heappushpop(result, elem)
result.sort()
return result
# 'heap' is a heap at all indices >= startpos, except possibly for pos. pos
# is the index of a leaf with a possibly out-of-order value. Restore the
# heap invariant.
def _siftdown(heap, startpos, pos):
newitem = heap[pos]
# Follow the path to the root, moving parents down until finding a place
# newitem fits.
while pos > startpos:
parentpos = (pos - 1) >> 1
parent = heap[parentpos]
if newitem < parent:
heap[pos] = parent
pos = parentpos
continue
break
heap[pos] = newitem
# The child indices of heap index pos are already heaps, and we want to make
# a heap at index pos too. We do this by bubbling the smaller child of
# pos up (and so on with that child's children, etc) until hitting a leaf,
# then using _siftdown to move the oddball originally at index pos into place.
#
# We *could* break out of the loop as soon as we find a pos where newitem <=
# both its children, but turns out that's not a good idea, and despite that
# many books write the algorithm that way. During a heap pop, the last array
# element is sifted in, and that tends to be large, so that comparing it
# against values starting from the root usually doesn't pay (= usually doesn't
# get us out of the loop early). See Knuth, Volume 3, where this is
# explained and quantified in an exercise.
#
# Cutting the # of comparisons is important, since these routines have no
# way to extract "the priority" from an array element, so that intelligence
# is likely to be hiding in custom comparison methods, or in array elements
# storing (priority, record) tuples. Comparisons are thus potentially
# expensive.
#
# On random arrays of length 1000, making this change cut the number of
# comparisons made by heapify() a little, and those made by exhaustive
# heappop() a lot, in accord with theory. Here are typical results from 3
# runs (3 just to demonstrate how small the variance is):
#
# Compares needed by heapify Compares needed by 1000 heappops
# -------------------------- --------------------------------
# 1837 cut to 1663 14996 cut to 8680
# 1855 cut to 1659 14966 cut to 8678
# 1847 cut to 1660 15024 cut to 8703
#
# Building the heap by using heappush() 1000 times instead required
# 2198, 2148, and 2219 compares: heapify() is more efficient, when
# you can use it.
#
# The total compares needed by list.sort() on the same lists were 8627,
# 8627, and 8632 (this should be compared to the sum of heapify() and
# heappop() compares): list.sort() is (unsurprisingly!) more efficient
# for sorting.
def _siftup(heap, pos):
endpos = len(heap)
startpos = pos
newitem = heap[pos]
# Bubble up the smaller child until hitting a leaf.
childpos = 2*pos + 1 # leftmost child position
while childpos < endpos:
# Set childpos to index of smaller child.
rightpos = childpos + 1
if rightpos < endpos and not heap[childpos] < heap[rightpos]:
childpos = rightpos
# Move the smaller child up.
heap[pos] = heap[childpos]
pos = childpos
childpos = 2*pos + 1
# The leaf at pos is empty now. Put newitem there, and bubble it up
# to its final resting place (by sifting its parents down).
heap[pos] = newitem
_siftdown(heap, startpos, pos)
def _siftdown_max(heap, startpos, pos):
'Maxheap variant of _siftdown'
newitem = heap[pos]
# Follow the path to the root, moving parents down until finding a place
# newitem fits.
while pos > startpos:
parentpos = (pos - 1) >> 1
parent = heap[parentpos]
if parent < newitem:
heap[pos] = parent
pos = parentpos
continue
break
heap[pos] = newitem
def _siftup_max(heap, pos):
'Maxheap variant of _siftup'
endpos = len(heap)
startpos = pos
newitem = heap[pos]
# Bubble up the larger child until hitting a leaf.
childpos = 2*pos + 1 # leftmost child position
while childpos < endpos:
# Set childpos to index of larger child.
rightpos = childpos + 1
if rightpos < endpos and not heap[rightpos] < heap[childpos]:
childpos = rightpos
# Move the larger child up.
heap[pos] = heap[childpos]
pos = childpos
childpos = 2*pos + 1
# The leaf at pos is empty now. Put newitem there, and bubble it up
# to its final resting place (by sifting its parents down).
heap[pos] = newitem
_siftdown_max(heap, startpos, pos)
# If available, use C implementation
#_heapq does not exist in brython, so lets just comment it out.
#try:
# from _heapq import *
#except ImportError:
# pass
def merge(*iterables):
'''Merge multiple sorted inputs into a single sorted output.
Similar to sorted(itertools.chain(*iterables)) but returns a generator,
does not pull the data into memory all at once, and assumes that each of
the input streams is already sorted (smallest to largest).
>>> list(merge([1,3,5,7], [0,2,4,8], [5,10,15,20], [], [25]))
[0, 1, 2, 3, 4, 5, 5, 7, 8, 10, 15, 20, 25]
'''
_heappop, _heapreplace, _StopIteration = heappop, heapreplace, StopIteration
_len = len
h = []
h_append = h.append
for itnum, it in enumerate(map(iter, iterables)):
try:
next = it.__next__
h_append([next(), itnum, next])
except _StopIteration:
pass
heapify(h)
while _len(h) > 1:
try:
while True:
v, itnum, next = s = h[0]
yield v
s[0] = next() # raises StopIteration when exhausted
_heapreplace(h, s) # restore heap condition
except _StopIteration:
_heappop(h) # remove empty iterator
if h:
# fast case when only a single iterator remains
v, itnum, next = h[0]
yield v
yield from next.__self__
# Extend the implementations of nsmallest and nlargest to use a key= argument
_nsmallest = nsmallest
def nsmallest(n, iterable, key=None):
"""Find the n smallest elements in a dataset.
Equivalent to: sorted(iterable, key=key)[:n]
"""
# Short-cut for n==1 is to use min() when len(iterable)>0
if n == 1:
it = iter(iterable)
head = list(islice(it, 1))
if not head:
return []
if key is None:
return [min(chain(head, it))]
return [min(chain(head, it), key=key)]
# When n>=size, it's faster to use sorted()
try:
size = len(iterable)
except (TypeError, AttributeError):
pass
else:
if n >= size:
return sorted(iterable, key=key)[:n]
# When key is none, use simpler decoration
if key is None:
it = zip(iterable, count()) # decorate
result = _nsmallest(n, it)
return [r[0] for r in result] # undecorate
# General case, slowest method
in1, in2 = tee(iterable)
it = zip(map(key, in1), count(), in2) # decorate
result = _nsmallest(n, it)
return [r[2] for r in result] # undecorate
_nlargest = nlargest
def nlargest(n, iterable, key=None):
"""Find the n largest elements in a dataset.
Equivalent to: sorted(iterable, key=key, reverse=True)[:n]
"""
# Short-cut for n==1 is to use max() when len(iterable)>0
if n == 1:
it = iter(iterable)
head = list(islice(it, 1))
if not head:
return []
if key is None:
return [max(chain(head, it))]
return [max(chain(head, it), key=key)]
# When n>=size, it's faster to use sorted()
try:
size = len(iterable)
except (TypeError, AttributeError):
pass
else:
if n >= size:
return sorted(iterable, key=key, reverse=True)[:n]
# When key is none, use simpler decoration
if key is None:
it = zip(iterable, count(0,-1)) # decorate
result = _nlargest(n, it)
return [r[0] for r in result] # undecorate
# General case, slowest method
in1, in2 = tee(iterable)
it = zip(map(key, in1), count(0,-1), in2) # decorate
result = _nlargest(n, it)
return [r[2] for r in result] # undecorate
if __name__ == "__main__":
# Simple sanity test
heap = []
data = [1, 3, 5, 7, 9, 2, 4, 6, 8, 0]
for item in data:
heappush(heap, item)
sort = []
while heap:
sort.append(heappop(heap))
print(sort)
import doctest
doctest.testmod()
|
txm/make-good
|
refs/heads/master
|
django/contrib/localflavor/nl/forms.py
|
311
|
"""
NL-specific Form helpers
"""
import re
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import Field, Select
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_unicode
pc_re = re.compile('^\d{4}[A-Z]{2}$')
sofi_re = re.compile('^\d{9}$')
numeric_re = re.compile('^\d+$')
class NLZipCodeField(Field):
"""
A Dutch postal code field.
"""
default_error_messages = {
'invalid': _('Enter a valid postal code'),
}
def clean(self, value):
super(NLZipCodeField, self).clean(value)
if value in EMPTY_VALUES:
return u''
value = value.strip().upper().replace(' ', '')
if not pc_re.search(value):
raise ValidationError(self.error_messages['invalid'])
if int(value[:4]) < 1000:
raise ValidationError(self.error_messages['invalid'])
return u'%s %s' % (value[:4], value[4:])
class NLProvinceSelect(Select):
"""
A Select widget that uses a list of provinces of the Netherlands as its
choices.
"""
def __init__(self, attrs=None):
from nl_provinces import PROVINCE_CHOICES
super(NLProvinceSelect, self).__init__(attrs, choices=PROVINCE_CHOICES)
class NLPhoneNumberField(Field):
"""
A Dutch telephone number field.
"""
default_error_messages = {
'invalid': _('Enter a valid phone number'),
}
def clean(self, value):
super(NLPhoneNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
phone_nr = re.sub('[\-\s\(\)]', '', smart_unicode(value))
if len(phone_nr) == 10 and numeric_re.search(phone_nr):
return value
if phone_nr[:3] == '+31' and len(phone_nr) == 12 and \
numeric_re.search(phone_nr[3:]):
return value
raise ValidationError(self.error_messages['invalid'])
class NLSoFiNumberField(Field):
"""
A Dutch social security number (SoFi/BSN) field.
http://nl.wikipedia.org/wiki/Sofinummer
"""
default_error_messages = {
'invalid': _('Enter a valid SoFi number'),
}
def clean(self, value):
super(NLSoFiNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
if not sofi_re.search(value):
raise ValidationError(self.error_messages['invalid'])
if int(value) == 0:
raise ValidationError(self.error_messages['invalid'])
checksum = 0
for i in range(9, 1, -1):
checksum += int(value[9-i]) * i
checksum -= int(value[-1])
if checksum % 11 != 0:
raise ValidationError(self.error_messages['invalid'])
return value
|
redhat-openstack/horizon
|
refs/heads/mitaka-patches
|
openstack_dashboard/dashboards/identity/ngusers/panel.py
|
38
|
# Copyright 2015 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
import horizon
from openstack_dashboard.dashboards.identity import dashboard
class NGUsers(horizon.Panel):
name = _("Users")
slug = 'ngusers'
policy_rules = (("identity", "identity:get_user"),
("identity", "identity:list_users"))
dashboard.Identity.register(NGUsers)
|
mmalyska/eve-wspace
|
refs/heads/develop
|
evewspace/SiteTracker/migrations/0003_auto__add_systemweight.py
|
17
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'SystemWeight'
db.create_table('SiteTracker_systemweight', (
('system', self.gf('django.db.models.fields.related.OneToOneField')(related_name='st_weight', unique=True, primary_key=True, to=orm['Map.System'])),
('weight', self.gf('django.db.models.fields.FloatField')()),
))
db.send_create_signal('SiteTracker', ['SystemWeight'])
def backwards(self, orm):
# Deleting model 'SystemWeight'
db.delete_table('SiteTracker_systemweight')
models = {
'Map.system': {
'Meta': {'object_name': 'System', '_ormbases': ['core.SystemData']},
'first_visited': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'info': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'last_visited': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'lastscanned': ('django.db.models.fields.DateTimeField', [], {}),
'npckills': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'occupied': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'podkills': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'shipkills': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'sysclass': ('django.db.models.fields.IntegerField', [], {}),
'systemdata_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.SystemData']", 'unique': 'True', 'primary_key': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'SiteTracker.claim': {
'Meta': {'object_name': 'Claim'},
'bonus': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'period': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'claims'", 'to': "orm['SiteTracker.ClaimPeriod']"}),
'shareclaimed': ('django.db.models.fields.FloatField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'claims'", 'to': "orm['auth.User']"})
},
'SiteTracker.claimperiod': {
'Meta': {'object_name': 'ClaimPeriod'},
'closetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'endtime': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'loothauledby': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'loothauled'", 'null': 'True', 'to': "orm['auth.User']"}),
'lootsoldby': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'lootsold'", 'null': 'True', 'to': "orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'starttime': ('django.db.models.fields.DateTimeField', [], {})
},
'SiteTracker.fleet': {
'Meta': {'object_name': 'Fleet'},
'current_boss': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'currently_bossing'", 'to': "orm['auth.User']"}),
'ended': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initial_boss': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bossfleets'", 'to': "orm['auth.User']"}),
'roles_needed': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'fleets_need'", 'symmetrical': 'False', 'to': "orm['SiteTracker.SiteRole']"}),
'started': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'system': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stfleets'", 'to': "orm['Map.System']"})
},
'SiteTracker.payoutentry': {
'Meta': {'object_name': 'PayoutEntry'},
'claim': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'payout'", 'to': "orm['SiteTracker.Claim']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iskshare': ('django.db.models.fields.BigIntegerField', [], {}),
'report': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'entries'", 'to': "orm['SiteTracker.PayoutReport']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'payouts'", 'to': "orm['auth.User']"})
},
'SiteTracker.payoutreport': {
'Meta': {'object_name': 'PayoutReport'},
'createdby': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'payoutreports'", 'to': "orm['auth.User']"}),
'datepaid': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'grossprofit': ('django.db.models.fields.BigIntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'period': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reports'", 'to': "orm['SiteTracker.ClaimPeriod']"})
},
'SiteTracker.siterecord': {
'Meta': {'object_name': 'SiteRecord'},
'boss': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sitescredited'", 'to': "orm['auth.User']"}),
'fleet': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sites'", 'to': "orm['SiteTracker.Fleet']"}),
'fleetsize': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'raw_points': ('django.db.models.fields.IntegerField', [], {}),
'site_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sitesrun'", 'to': "orm['SiteTracker.SiteType']"}),
'system': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sitescompleted'", 'to': "orm['Map.System']"}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'weighted_points': ('django.db.models.fields.IntegerField', [], {})
},
'SiteTracker.siterole': {
'Meta': {'object_name': 'SiteRole'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'long_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'short_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'})
},
'SiteTracker.sitetype': {
'Meta': {'object_name': 'SiteType'},
'defunct': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'longname': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'shortname': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '8'})
},
'SiteTracker.siteweight': {
'Meta': {'object_name': 'SiteWeight'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'raw_points': ('django.db.models.fields.IntegerField', [], {}),
'site_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'weights'", 'to': "orm['SiteTracker.SiteType']"}),
'sysclass': ('django.db.models.fields.IntegerField', [], {})
},
'SiteTracker.systemweight': {
'Meta': {'object_name': 'SystemWeight'},
'system': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'st_weight'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['Map.System']"}),
'weight': ('django.db.models.fields.FloatField', [], {})
},
'SiteTracker.userlog': {
'Meta': {'object_name': 'UserLog'},
'fleet': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'members'", 'to': "orm['SiteTracker.Fleet']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jointime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'leavetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sitetrackerlogs'", 'to': "orm['auth.User']"})
},
'SiteTracker.usersite': {
'Meta': {'object_name': 'UserSite'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pending': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'members'", 'to': "orm['SiteTracker.SiteRecord']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sites'", 'to': "orm['auth.User']"})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.constellation': {
'Meta': {'object_name': 'Constellation', 'db_table': "'mapConstellations'", 'managed': 'False'},
'id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True', 'db_column': "'constellationID'"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_column': "'constellationName'"}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'constellations'", 'db_column': "'regionID'", 'to': "orm['core.Region']"}),
'x': ('django.db.models.fields.FloatField', [], {}),
'y': ('django.db.models.fields.FloatField', [], {}),
'z': ('django.db.models.fields.FloatField', [], {})
},
'core.region': {
'Meta': {'object_name': 'Region', 'db_table': "'mapRegions'", 'managed': 'False'},
'id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True', 'db_column': "'regionID'"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_column': "'regionName'"}),
'x': ('django.db.models.fields.FloatField', [], {}),
'y': ('django.db.models.fields.FloatField', [], {}),
'z': ('django.db.models.fields.FloatField', [], {})
},
'core.systemdata': {
'Meta': {'object_name': 'SystemData', 'db_table': "'mapSolarSystems'", 'managed': 'False'},
'constellation': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'systems'", 'db_column': "'constellationID'", 'to': "orm['core.Constellation']"}),
'id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True', 'db_column': "'solarSystemID'"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_column': "'solarSystemName'"}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'systems'", 'db_column': "'regionID'", 'to': "orm['core.Region']"}),
'security': ('django.db.models.fields.FloatField', [], {}),
'x': ('django.db.models.fields.FloatField', [], {}),
'y': ('django.db.models.fields.FloatField', [], {}),
'z': ('django.db.models.fields.FloatField', [], {})
}
}
complete_apps = ['SiteTracker']
|
wemanuel/smry
|
refs/heads/master
|
smry/server-auth/ls/google-cloud-sdk/platform/gsutil/third_party/pyasn1-modules/pyasn1_modules/rfc3414.py
|
127
|
#
# SNMPv3 message syntax
#
# ASN.1 source from:
# http://www.ietf.org/rfc/rfc3414.txt
#
from pyasn1.type import univ, namedtype, namedval, tag, constraint
class UsmSecurityParameters(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('msgAuthoritativeEngineID', univ.OctetString()),
namedtype.NamedType('msgAuthoritativeEngineBoots', univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, 2147483647))),
namedtype.NamedType('msgAuthoritativeEngineTime', univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, 2147483647))),
namedtype.NamedType('msgUserName', univ.OctetString().subtype(subtypeSpec=constraint.ValueSizeConstraint(0, 32))),
namedtype.NamedType('msgAuthenticationParameters', univ.OctetString()),
namedtype.NamedType('msgPrivacyParameters', univ.OctetString())
)
|
jbenden/ansible
|
refs/heads/devel
|
lib/ansible/parsing/quoting.py
|
241
|
# (c) 2014 James Cammarata, <jcammarata@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
def is_quoted(data):
return len(data) > 1 and data[0] == data[-1] and data[0] in ('"', "'") and data[-2] != '\\'
def unquote(data):
''' removes first and last quotes from a string, if the string starts and ends with the same quotes '''
if is_quoted(data):
return data[1:-1]
return data
|
erinxocon/OctoPrint-Neopixelstatus
|
refs/heads/master
|
octoprint_NeoPixelStatus/color_utils.py
|
1
|
#!/usr/bin/env python
"""Helper functions to make color manipulations easier."""
from __future__ import division
import math
def remap(x, oldmin, oldmax, newmin, newmax):
"""Remap the float x from the range oldmin-oldmax to the range newmin-newmax
Does not clamp values that exceed min or max.
For example, to make a sine wave that goes between 0 and 256:
remap(math.sin(time.time()), -1, 1, 0, 256)
"""
zero_to_one = (x-oldmin) / (oldmax-oldmin)
return zero_to_one*(newmax-newmin) + newmin
def clamp(x, minn, maxx):
"""Restrict the float x to the range minn-maxx."""
return max(minn, min(maxx, x))
def cos(x, offset=0, period=1, minn=0, maxx=1):
"""A cosine curve scaled to fit in a 0-1 range and 0-1 domain by default.
offset: how much to slide the curve across the domain (should be 0-1)
period: the length of one wave
minn, maxx: the output range
"""
value = math.cos((x/period - offset) * math.pi * 2) / 2 + 0.5
return value*(maxx-minn) + minn
def contrast(color, center, mult):
"""Expand the color values by a factor of mult around the pivot value of center.
color: an (r, g, b) tuple
center: a float -- the fixed point
mult: a float -- expand or contract the values around the center point
"""
r, g, b = color
r = (r - center) * mult + center
g = (g - center) * mult + center
b = (b - center) * mult + center
return (r, g, b)
def clip_black_by_luminance(color, threshold):
"""If the color's luminance is less than threshold, replace it with black.
color: an (r, g, b) tuple
threshold: a float
"""
r, g, b = color
if r+g+b < threshold*3:
return (0, 0, 0)
return (r, g, b)
def clip_black_by_channels(color, threshold):
"""Replace any individual r, g, or b value less than threshold with 0.
color: an (r, g, b) tuple
threshold: a float
"""
r, g, b = color
if r < threshold:
r = 0
if g < threshold:
g = 0
if b < threshold:
b = 0
return (r, g, b)
def mod_dist(a, b, n):
"""Return the distance between floats a and b, modulo n.
The result is always non-negative.
For example, thinking of a clock:
mod_dist(11, 1, 12) == 2 because you can "wrap around".
"""
return min((a-b) % n, (b-a) % n)
def gamma(color, gamma):
"""Apply a gamma curve to the color.
The color values should be in the range 0-1.
"""
r, g, b = color
return (max(r, 0) ** gamma, max(g, 0) ** gamma, max(b, 0) ** gamma)
|
maxrothman/aws-alfred-workflow
|
refs/heads/master
|
venv/lib/python2.7/site-packages/docutils/languages/__init__.py
|
170
|
# $Id: __init__.py 7648 2013-04-18 07:36:22Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
# Internationalization details are documented in
# <http://docutils.sf.net/docs/howto/i18n.html>.
"""
This package contains modules for language-dependent features of Docutils.
"""
__docformat__ = 'reStructuredText'
import sys
from docutils.utils import normalize_language_tag
if sys.version_info < (2,5):
from docutils._compat import __import__
_languages = {}
def get_language(language_code, reporter=None):
"""Return module with language localizations.
`language_code` is a "BCP 47" language tag.
If there is no matching module, warn and fall back to English.
"""
# TODO: use a dummy module returning emtpy strings?, configurable?
for tag in normalize_language_tag(language_code):
tag = tag.replace('-','_') # '-' not valid in module names
if tag in _languages:
return _languages[tag]
try:
module = __import__(tag, globals(), locals(), level=1)
except ImportError:
try:
module = __import__(tag, globals(), locals(), level=0)
except ImportError:
continue
_languages[tag] = module
return module
if reporter is not None:
reporter.warning(
'language "%s" not supported: ' % language_code +
'Docutils-generated text will be in English.')
module = __import__('en', globals(), locals(), level=1)
_languages[tag] = module # warn only one time!
return module
|
jnns/wagtail
|
refs/heads/master
|
wagtail/wagtailadmin/templatetags/wagtailadmin_tags.py
|
12
|
from __future__ import unicode_literals
from django.conf import settings
from django import template
from django.contrib.humanize.templatetags.humanize import intcomma
from django.template.defaultfilters import stringfilter
from wagtail.wagtailcore import hooks
from wagtail.wagtailcore.models import get_navigation_menu_items, UserPagePermissionsProxy, PageViewRestriction
from wagtail.wagtailcore.utils import camelcase_to_underscore, escape_script
from wagtail.wagtailcore.utils import cautious_slugify as _cautious_slugify
from wagtail.wagtailadmin.menu import admin_menu
register = template.Library()
register.filter('intcomma', intcomma)
@register.inclusion_tag('wagtailadmin/shared/explorer_nav.html')
def explorer_nav():
return {
'nodes': get_navigation_menu_items()
}
@register.inclusion_tag('wagtailadmin/shared/explorer_nav_child.html')
def explorer_subnav(nodes):
return {
'nodes': nodes
}
@register.inclusion_tag('wagtailadmin/shared/main_nav.html', takes_context=True)
def main_nav(context):
request = context['request']
return {
'menu_html': admin_menu.render_html(request),
'request': request,
}
@register.simple_tag
def main_nav_js():
return admin_menu.media['js']
@register.filter("ellipsistrim")
def ellipsistrim(value, max_length):
if len(value) > max_length:
truncd_val = value[:max_length]
if not len(value) == (max_length + 1) and value[max_length + 1] != " ":
truncd_val = truncd_val[:truncd_val.rfind(" ")]
return truncd_val + "..."
return value
@register.filter
def fieldtype(bound_field):
try:
return camelcase_to_underscore(bound_field.field.__class__.__name__)
except AttributeError:
try:
return camelcase_to_underscore(bound_field.__class__.__name__)
except AttributeError:
return ""
@register.filter
def widgettype(bound_field):
try:
return camelcase_to_underscore(bound_field.field.widget.__class__.__name__)
except AttributeError:
try:
return camelcase_to_underscore(bound_field.widget.__class__.__name__)
except AttributeError:
return ""
@register.filter
def meta_description(model):
try:
return model.model_class()._meta.description
except:
return ""
@register.assignment_tag(takes_context=True)
def page_permissions(context, page):
"""
Usage: {% page_permissions page as page_perms %}
Sets the variable 'page_perms' to a PagePermissionTester object that can be queried to find out
what actions the current logged-in user can perform on the given page.
"""
# Create a UserPagePermissionsProxy object to represent the user's global permissions, and
# cache it in the context for the duration of the page request, if one does not exist already
if 'user_page_permissions' not in context:
context['user_page_permissions'] = UserPagePermissionsProxy(context['request'].user)
# Now retrieve a PagePermissionTester from it, specific to the given page
return context['user_page_permissions'].for_page(page)
@register.assignment_tag(takes_context=True)
def test_page_is_public(context, page):
"""
Usage: {% test_page_is_public page as is_public %}
Sets 'is_public' to True iff there are no page view restrictions in place on
this page.
Caches the list of page view restrictions in the context, to avoid repeated
DB queries on repeated calls.
"""
if 'all_page_view_restriction_paths' not in context:
context['all_page_view_restriction_paths'] = PageViewRestriction.objects.select_related('page').values_list('page__path', flat=True)
is_private = any([
page.path.startswith(restricted_path)
for restricted_path in context['all_page_view_restriction_paths']
])
return not is_private
@register.simple_tag
def hook_output(hook_name):
"""
Example: {% hook_output 'insert_editor_css' %}
Whenever we have a hook whose functions take no parameters and return a string, this tag can be used
to output the concatenation of all of those return values onto the page.
Note that the output is not escaped - it is the hook function's responsibility to escape unsafe content.
"""
snippets = [fn() for fn in hooks.get_hooks(hook_name)]
return ''.join(snippets)
@register.assignment_tag
def usage_count_enabled():
return getattr(settings, 'WAGTAIL_USAGE_COUNT_ENABLED', False)
@register.assignment_tag
def base_url_setting():
return getattr(settings, 'BASE_URL', None)
class EscapeScriptNode(template.Node):
TAG_NAME = 'escapescript'
def __init__(self, nodelist):
super(EscapeScriptNode, self).__init__()
self.nodelist = nodelist
def render(self, context):
out = self.nodelist.render(context)
return escape_script(out)
@classmethod
def handle(cls, parser, token):
nodelist = parser.parse(('end' + EscapeScriptNode.TAG_NAME,))
parser.delete_first_token()
return cls(nodelist)
register.tag(EscapeScriptNode.TAG_NAME, EscapeScriptNode.handle)
# Helpers for Widget.render_with_errors, our extension to the Django widget API that allows widgets to
# take on the responsibility of rendering their own error messages
@register.filter
def render_with_errors(bound_field):
"""
Usage: {{ field|render_with_errors }} as opposed to {{ field }}.
If the field (a BoundField instance) has errors on it, and the associated widget implements
a render_with_errors method, call that; otherwise, call the regular widget rendering mechanism.
"""
widget = bound_field.field.widget
if bound_field.errors and hasattr(widget, 'render_with_errors'):
return widget.render_with_errors(bound_field.html_name, bound_field.value(), attrs={'id': bound_field.auto_id}, errors=bound_field.errors)
else:
return bound_field.as_widget()
@register.filter
def has_unrendered_errors(bound_field):
"""
Return true if this field has errors that were not accounted for by render_with_errors, because
the widget does not support the render_with_errors method
"""
return bound_field.errors and not hasattr(bound_field.field.widget, 'render_with_errors')
@register.filter(is_safe=True)
@stringfilter
def cautious_slugify(value):
return _cautious_slugify(value)
|
ModestoCabrera/is210-week-11-warmup
|
refs/heads/master
|
car.py
|
7
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Contains the car class."""
class Car(object):
"""A moving vehicle definition."""
def __init__(self, color='red'):
"""Constructor for the Car() class.
Args:
color (string): The color of the car. Defaults to ``'red'``.
Attributes:
color (string): The color of the car.
"""
self.color = color
class Tire(object):
"""A round rubber thing."""
def __init__(self, miles=0):
"""Constructor for the Tire() class.
Args:
miles (integer): The number of miles on the Tire. Defaults to 0.
Attributes:
miles (integer): The number of miles on the Tire.
"""
self.miles = miles
def add_miles(self, miles):
"""Increments the tire mileage by the specified miles.
Args:
miles (integer): The number of miles to add to the tire.
"""
self.miles += miles
|
MarcJoan/django
|
refs/heads/master
|
tests/migrations/test_migrations_squashed_erroneous/3_squashed_5.py
|
770
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
replaces = [
("migrations", "3_auto"),
("migrations", "4_auto"),
("migrations", "5_auto"),
]
dependencies = [("migrations", "2_auto")]
operations = [
migrations.RunPython(migrations.RunPython.noop)
]
|
mozilla/build-relengapi
|
refs/heads/master
|
relengapi/blueprints/tooltool/test_tables.py
|
1
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import
from nose.tools import eq_
from relengapi.blueprints.tooltool import tables
from relengapi.lib import time
from relengapi.lib.testing.context import TestContext
test_context = TestContext(databases=[tables.DB_DECLARATIVE_BASE])
@test_context
def test_file_batches_relationship(app):
with app.app_context():
session = app.db.session(tables.DB_DECLARATIVE_BASE)
file = tables.File(size=100, sha512='abcd', visibility='internal')
session.add(file)
batch = tables.Batch(
uploaded=time.now(), author="dustin", message="hi")
session.add(batch)
bf = tables.BatchFile(batch=batch, file=file, filename="foo.txt")
session.add(bf)
session.commit()
with app.app_context():
file = tables.File.query.first()
eq_(file.batches['foo.txt'].message, 'hi')
with app.app_context():
batch = tables.Batch.query.first()
eq_(batch.files['foo.txt'].sha512, 'abcd')
|
Shaps/ansible
|
refs/heads/devel
|
lib/ansible/modules/packaging/os/rpm_key.py
|
31
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Ansible module to import third party repo keys to your rpm db
# Copyright: (c) 2013, Héctor Acosta <hector.acosta@gazzang.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: rpm_key
author:
- Hector Acosta (@hacosta) <hector.acosta@gazzang.com>
short_description: Adds or removes a gpg key from the rpm db
description:
- Adds or removes (rpm --import) a gpg key to your rpm database.
version_added: "1.3"
options:
key:
description:
- Key that will be modified. Can be a url, a file on the managed node, or a keyid if the key
already exists in the database.
required: true
state:
description:
- If the key will be imported or removed from the rpm db.
default: present
choices: [ absent, present ]
validate_certs:
description:
- If C(no) and the C(key) is a url starting with https, SSL certificates will not be validated.
- This should only be used on personally controlled sites using self-signed certificates.
type: bool
default: 'yes'
fingerprint:
description:
- The long-form fingerprint of the key being imported.
- This will be used to verify the specified key.
type: str
version_added: 2.9
'''
EXAMPLES = '''
# Example action to import a key from a url
- rpm_key:
state: present
key: http://apt.sw.be/RPM-GPG-KEY.dag.txt
# Example action to import a key from a file
- rpm_key:
state: present
key: /path/to/key.gpg
# Example action to ensure a key is not present in the db
- rpm_key:
state: absent
key: DEADB33F
# Verify the key, using a fingerprint, before import
- rpm_key:
key: /path/to/RPM-GPG-KEY.dag.txt
fingerprint: EBC6 E12C 62B1 C734 026B 2122 A20E 5214 6B8D 79E6
'''
import re
import os.path
import tempfile
# import module snippets
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
from ansible.module_utils._text import to_native
def is_pubkey(string):
"""Verifies if string is a pubkey"""
pgp_regex = ".*?(-----BEGIN PGP PUBLIC KEY BLOCK-----.*?-----END PGP PUBLIC KEY BLOCK-----).*"
return bool(re.match(pgp_regex, to_native(string, errors='surrogate_or_strict'), re.DOTALL))
class RpmKey(object):
def __init__(self, module):
# If the key is a url, we need to check if it's present to be idempotent,
# to do that, we need to check the keyid, which we can get from the armor.
keyfile = None
should_cleanup_keyfile = False
self.module = module
self.rpm = self.module.get_bin_path('rpm', True)
state = module.params['state']
key = module.params['key']
fingerprint = module.params['fingerprint']
if fingerprint:
fingerprint = fingerprint.replace(' ', '').upper()
self.gpg = self.module.get_bin_path('gpg')
if not self.gpg:
self.gpg = self.module.get_bin_path('gpg2', required=True)
if '://' in key:
keyfile = self.fetch_key(key)
keyid = self.getkeyid(keyfile)
should_cleanup_keyfile = True
elif self.is_keyid(key):
keyid = key
elif os.path.isfile(key):
keyfile = key
keyid = self.getkeyid(keyfile)
else:
self.module.fail_json(msg="Not a valid key %s" % key)
keyid = self.normalize_keyid(keyid)
if state == 'present':
if self.is_key_imported(keyid):
module.exit_json(changed=False)
else:
if not keyfile:
self.module.fail_json(msg="When importing a key, a valid file must be given")
if fingerprint:
has_fingerprint = self.getfingerprint(keyfile)
if fingerprint != has_fingerprint:
self.module.fail_json(
msg="The specified fingerprint, '%s', does not match the key fingerprint '%s'" % (fingerprint, has_fingerprint)
)
self.import_key(keyfile)
if should_cleanup_keyfile:
self.module.cleanup(keyfile)
module.exit_json(changed=True)
else:
if self.is_key_imported(keyid):
self.drop_key(keyid)
module.exit_json(changed=True)
else:
module.exit_json(changed=False)
def fetch_key(self, url):
"""Downloads a key from url, returns a valid path to a gpg key"""
rsp, info = fetch_url(self.module, url)
if info['status'] != 200:
self.module.fail_json(msg="failed to fetch key at %s , error was: %s" % (url, info['msg']))
key = rsp.read()
if not is_pubkey(key):
self.module.fail_json(msg="Not a public key: %s" % url)
tmpfd, tmpname = tempfile.mkstemp()
self.module.add_cleanup_file(tmpname)
tmpfile = os.fdopen(tmpfd, "w+b")
tmpfile.write(key)
tmpfile.close()
return tmpname
def normalize_keyid(self, keyid):
"""Ensure a keyid doesn't have a leading 0x, has leading or trailing whitespace, and make sure is uppercase"""
ret = keyid.strip().upper()
if ret.startswith('0x'):
return ret[2:]
elif ret.startswith('0X'):
return ret[2:]
else:
return ret
def getkeyid(self, keyfile):
stdout, stderr = self.execute_command([self.gpg, '--no-tty', '--batch', '--with-colons', '--fixed-list-mode', keyfile])
for line in stdout.splitlines():
line = line.strip()
if line.startswith('pub:'):
return line.split(':')[4]
self.module.fail_json(msg="Unexpected gpg output")
def getfingerprint(self, keyfile):
stdout, stderr = self.execute_command([
self.gpg, '--no-tty', '--batch', '--with-colons',
'--fixed-list-mode', '--with-fingerprint', keyfile
])
for line in stdout.splitlines():
line = line.strip()
if line.startswith('fpr:'):
# As mentioned here,
#
# https://git.gnupg.org/cgi-bin/gitweb.cgi?p=gnupg.git;a=blob_plain;f=doc/DETAILS
#
# The description of the `fpr` field says
#
# "fpr :: Fingerprint (fingerprint is in field 10)"
#
return line.split(':')[9]
self.module.fail_json(msg="Unexpected gpg output")
def is_keyid(self, keystr):
"""Verifies if a key, as provided by the user is a keyid"""
return re.match('(0x)?[0-9a-f]{8}', keystr, flags=re.IGNORECASE)
def execute_command(self, cmd):
rc, stdout, stderr = self.module.run_command(cmd, use_unsafe_shell=True)
if rc != 0:
self.module.fail_json(msg=stderr)
return stdout, stderr
def is_key_imported(self, keyid):
cmd = self.rpm + ' -q gpg-pubkey'
rc, stdout, stderr = self.module.run_command(cmd)
if rc != 0: # No key is installed on system
return False
cmd += ' --qf "%{description}" | ' + self.gpg + ' --no-tty --batch --with-colons --fixed-list-mode -'
stdout, stderr = self.execute_command(cmd)
for line in stdout.splitlines():
if keyid in line.split(':')[4]:
return True
return False
def import_key(self, keyfile):
if not self.module.check_mode:
self.execute_command([self.rpm, '--import', keyfile])
def drop_key(self, keyid):
if not self.module.check_mode:
self.execute_command([self.rpm, '--erase', '--allmatches', "gpg-pubkey-%s" % keyid[-8:].lower()])
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='present', choices=['absent', 'present']),
key=dict(type='str', required=True),
fingerprint=dict(type='str'),
validate_certs=dict(type='bool', default=True),
),
supports_check_mode=True,
)
RpmKey(module)
if __name__ == '__main__':
main()
|
AloneRoad/Inforlearn
|
refs/heads/1.0-rc3
|
.google_appengine/lib/django/django/template/loader_tags.py
|
33
|
from django.template import TemplateSyntaxError, TemplateDoesNotExist, resolve_variable
from django.template import Library, Node
from django.template.loader import get_template, get_template_from_string, find_template_source
from django.conf import settings
register = Library()
class ExtendsError(Exception):
pass
class BlockNode(Node):
def __init__(self, name, nodelist, parent=None):
self.name, self.nodelist, self.parent = name, nodelist, parent
def __repr__(self):
return "<Block Node: %s. Contents: %r>" % (self.name, self.nodelist)
def render(self, context):
context.push()
# Save context in case of block.super().
self.context = context
context['block'] = self
result = self.nodelist.render(context)
context.pop()
return result
def super(self):
if self.parent:
return self.parent.render(self.context)
return ''
def add_parent(self, nodelist):
if self.parent:
self.parent.add_parent(nodelist)
else:
self.parent = BlockNode(self.name, nodelist)
class ExtendsNode(Node):
def __init__(self, nodelist, parent_name, parent_name_expr, template_dirs=None):
self.nodelist = nodelist
self.parent_name, self.parent_name_expr = parent_name, parent_name_expr
self.template_dirs = template_dirs
def get_parent(self, context):
if self.parent_name_expr:
self.parent_name = self.parent_name_expr.resolve(context)
parent = self.parent_name
if not parent:
error_msg = "Invalid template name in 'extends' tag: %r." % parent
if self.parent_name_expr:
error_msg += " Got this from the %r variable." % self.parent_name_expr #TODO nice repr.
raise TemplateSyntaxError, error_msg
if hasattr(parent, 'render'):
return parent # parent is a Template object
try:
source, origin = find_template_source(parent, self.template_dirs)
except TemplateDoesNotExist:
raise TemplateSyntaxError, "Template %r cannot be extended, because it doesn't exist" % parent
else:
return get_template_from_string(source, origin, parent)
def render(self, context):
compiled_parent = self.get_parent(context)
parent_is_child = isinstance(compiled_parent.nodelist[0], ExtendsNode)
parent_blocks = dict([(n.name, n) for n in compiled_parent.nodelist.get_nodes_by_type(BlockNode)])
for block_node in self.nodelist.get_nodes_by_type(BlockNode):
# Check for a BlockNode with this node's name, and replace it if found.
try:
parent_block = parent_blocks[block_node.name]
except KeyError:
# This BlockNode wasn't found in the parent template, but the
# parent block might be defined in the parent's *parent*, so we
# add this BlockNode to the parent's ExtendsNode nodelist, so
# it'll be checked when the parent node's render() is called.
if parent_is_child:
compiled_parent.nodelist[0].nodelist.append(block_node)
else:
# Keep any existing parents and add a new one. Used by BlockNode.
parent_block.parent = block_node.parent
parent_block.add_parent(parent_block.nodelist)
parent_block.nodelist = block_node.nodelist
return compiled_parent.render(context)
class ConstantIncludeNode(Node):
def __init__(self, template_path):
try:
t = get_template(template_path)
self.template = t
except:
if settings.TEMPLATE_DEBUG:
raise
self.template = None
def render(self, context):
if self.template:
return self.template.render(context)
else:
return ''
class IncludeNode(Node):
def __init__(self, template_name):
self.template_name = template_name
def render(self, context):
try:
template_name = resolve_variable(self.template_name, context)
t = get_template(template_name)
return t.render(context)
except TemplateSyntaxError, e:
if settings.TEMPLATE_DEBUG:
raise
return ''
except:
return '' # Fail silently for invalid included templates.
def do_block(parser, token):
"""
Define a block that can be overridden by child templates.
"""
bits = token.contents.split()
if len(bits) != 2:
raise TemplateSyntaxError, "'%s' tag takes only one argument" % bits[0]
block_name = bits[1]
# Keep track of the names of BlockNodes found in this template, so we can
# check for duplication.
try:
if block_name in parser.__loaded_blocks:
raise TemplateSyntaxError, "'%s' tag with name '%s' appears more than once" % (bits[0], block_name)
parser.__loaded_blocks.append(block_name)
except AttributeError: # parser.__loaded_blocks isn't a list yet
parser.__loaded_blocks = [block_name]
nodelist = parser.parse(('endblock', 'endblock %s' % block_name))
parser.delete_first_token()
return BlockNode(block_name, nodelist)
def do_extends(parser, token):
"""
Signal that this template extends a parent template.
This tag may be used in two ways: ``{% extends "base" %}`` (with quotes)
uses the literal value "base" as the name of the parent template to extend,
or ``{% extends variable %}`` uses the value of ``variable`` as either the
name of the parent template to extend (if it evaluates to a string,) or as
the parent tempate itelf (if it evaluates to a Template object).
"""
bits = token.contents.split()
if len(bits) != 2:
raise TemplateSyntaxError, "'%s' takes one argument" % bits[0]
parent_name, parent_name_expr = None, None
if bits[1][0] in ('"', "'") and bits[1][-1] == bits[1][0]:
parent_name = bits[1][1:-1]
else:
parent_name_expr = parser.compile_filter(bits[1])
nodelist = parser.parse()
if nodelist.get_nodes_by_type(ExtendsNode):
raise TemplateSyntaxError, "'%s' cannot appear more than once in the same template" % bits[0]
return ExtendsNode(nodelist, parent_name, parent_name_expr)
def do_include(parser, token):
"""
Loads a template and renders it with the current context.
Example::
{% include "foo/some_include" %}
"""
bits = token.contents.split()
if len(bits) != 2:
raise TemplateSyntaxError, "%r tag takes one argument: the name of the template to be included" % bits[0]
path = bits[1]
if path[0] in ('"', "'") and path[-1] == path[0]:
return ConstantIncludeNode(path[1:-1])
return IncludeNode(bits[1])
register.tag('block', do_block)
register.tag('extends', do_extends)
register.tag('include', do_include)
|
ondra-novak/chromium.src
|
refs/heads/nw
|
native_client_sdk/PRESUBMIT.py
|
33
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for isolate.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts for
details on the presubmit API built into gcl.
"""
def CommonChecks(input_api, output_api):
output = []
disabled_warnings = [
'F0401', # Unable to import module
'R0401', # Cyclic import
'W0613', # Unused argument
'E1103', # subprocess.communicate() generates these :(
'R0201', # method could be function (doesn't reference self)
]
black_list = [
r'src[\\\/]build_tools[\\\/]tests[\\\/].*',
r'src[\\\/]build_tools[\\\/]sdk_tools[\\\/]third_party[\\\/].*',
r'src[\\\/]doc[\\\/]*',
r'src[\\\/]gonacl_appengine[\\\/]*',
]
canned = input_api.canned_checks
output.extend(canned.RunPylint(input_api, output_api, black_list=black_list,
disabled_warnings=disabled_warnings))
return output
def CheckChangeOnUpload(input_api, output_api):
return CommonChecks(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CommonChecks(input_api, output_api)
def GetPreferredTryMasters(project, change):
return {
'tryserver.chromium.linux': {
'linux_nacl_sdk': set(['defaulttests']),
'linux_nacl_sdk_build': set(['defaulttests']),
},
'tryserver.chromium.win': {
'win_nacl_sdk': set(['defaulttests']),
'win_nacl_sdk_build': set(['defaulttests']),
},
'tryserver.chromium.mac': {
'mac_nacl_sdk': set(['defaulttests']),
'mac_nacl_sdk_build': set(['defaulttests']),
}
}
|
shubhdev/openedx
|
refs/heads/master
|
lms/djangoapps/courseware/tests/test_course_info.py
|
30
|
"""
Test the course_info xblock
"""
import mock
from nose.plugins.attrib import attr
from django.core.urlresolvers import reverse
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.django_utils import TEST_DATA_MIXED_CLOSED_MODULESTORE
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from student.models import CourseEnrollment
from .helpers import LoginEnrollmentTestCase
@attr('shard_1')
class CourseInfoTestCase(LoginEnrollmentTestCase, ModuleStoreTestCase):
"""
Tests for the Course Info page
"""
def setUp(self):
super(CourseInfoTestCase, self).setUp()
self.course = CourseFactory.create()
self.page = ItemFactory.create(
category="course_info", parent_location=self.course.location,
data="OOGIE BLOOGIE", display_name="updates"
)
def test_logged_in_unenrolled(self):
self.setup_user()
url = reverse('info', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("OOGIE BLOOGIE", resp.content)
self.assertIn("You are not currently enrolled in this course", resp.content)
def test_logged_in_enrolled(self):
self.enroll(self.course)
url = reverse('info', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertNotIn("You are not currently enrolled in this course", resp.content)
def test_anonymous_user(self):
url = reverse('info', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertNotIn("OOGIE BLOOGIE", resp.content)
def test_logged_in_not_enrolled(self):
self.setup_user()
url = reverse('info', args=[self.course.id.to_deprecated_string()])
self.client.get(url)
# Check whether the user has been enrolled in the course.
# There was a bug in which users would be automatically enrolled
# with is_active=False (same as if they enrolled and immediately unenrolled).
# This verifies that the user doesn't have *any* enrollment record.
enrollment_exists = CourseEnrollment.objects.filter(
user=self.user, course_id=self.course.id
).exists()
self.assertFalse(enrollment_exists)
@attr('shard_1')
class CourseInfoTestCaseXML(LoginEnrollmentTestCase, ModuleStoreTestCase):
"""
Tests for the Course Info page for an XML course
"""
MODULESTORE = TEST_DATA_MIXED_CLOSED_MODULESTORE
# The following XML test course (which lives at common/test/data/2014)
# is closed; we're testing that a course info page still appears when
# the course is already closed
xml_course_key = SlashSeparatedCourseKey('edX', 'detached_pages', '2014')
# this text appears in that course's course info page
# common/test/data/2014/info/updates.html
xml_data = "course info 463139"
@mock.patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_logged_in_xml(self):
self.setup_user()
url = reverse('info', args=[self.xml_course_key.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn(self.xml_data, resp.content)
@mock.patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_anonymous_user_xml(self):
url = reverse('info', args=[self.xml_course_key.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertNotIn(self.xml_data, resp.content)
|
Kwpolska/rot
|
refs/heads/master
|
pbar.py
|
1
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# River of Text
# Copyright © 2013, Kwpolska.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions, and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the author of this software nor the names of
# contributors to this software may be used to endorse or promote
# products derived from this software without specific prior written
# consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division
import sys
def drawpbar(val, m, size):
ratio = val / m
sys.stdout.write((u'\r[{0: <' + str(size) + u'}]{1: >4.0%}').format('#' * int(ratio * size), ratio))
sys.stdout.flush()
|
googleads/google-ads-python
|
refs/heads/master
|
google/ads/googleads/v8/services/types/media_file_service.py
|
1
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v8.enums.types import (
response_content_type as gage_response_content_type,
)
from google.ads.googleads.v8.resources.types import (
media_file as gagr_media_file,
)
from google.rpc import status_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v8.services",
marshal="google.ads.googleads.v8",
manifest={
"GetMediaFileRequest",
"MutateMediaFilesRequest",
"MediaFileOperation",
"MutateMediaFilesResponse",
"MutateMediaFileResult",
},
)
class GetMediaFileRequest(proto.Message):
r"""Request message for
[MediaFileService.GetMediaFile][google.ads.googleads.v8.services.MediaFileService.GetMediaFile]
Attributes:
resource_name (str):
Required. The resource name of the media file
to fetch.
"""
resource_name = proto.Field(proto.STRING, number=1,)
class MutateMediaFilesRequest(proto.Message):
r"""Request message for
[MediaFileService.MutateMediaFiles][google.ads.googleads.v8.services.MediaFileService.MutateMediaFiles]
Attributes:
customer_id (str):
Required. The ID of the customer whose media
files are being modified.
operations (Sequence[google.ads.googleads.v8.services.types.MediaFileOperation]):
Required. The list of operations to perform
on individual media file.
partial_failure (bool):
If true, successful operations will be
carried out and invalid operations will return
errors. If false, all operations will be carried
out in one transaction if and only if they are
all valid. Default is false.
validate_only (bool):
If true, the request is validated but not
executed. Only errors are returned, not results.
response_content_type (google.ads.googleads.v8.enums.types.ResponseContentTypeEnum.ResponseContentType):
The response content type setting. Determines
whether the mutable resource or just the
resource name should be returned post mutation.
"""
customer_id = proto.Field(proto.STRING, number=1,)
operations = proto.RepeatedField(
proto.MESSAGE, number=2, message="MediaFileOperation",
)
partial_failure = proto.Field(proto.BOOL, number=3,)
validate_only = proto.Field(proto.BOOL, number=4,)
response_content_type = proto.Field(
proto.ENUM,
number=5,
enum=gage_response_content_type.ResponseContentTypeEnum.ResponseContentType,
)
class MediaFileOperation(proto.Message):
r"""A single operation to create media file.
Attributes:
create (google.ads.googleads.v8.resources.types.MediaFile):
Create operation: No resource name is
expected for the new media file.
"""
create = proto.Field(
proto.MESSAGE,
number=1,
oneof="operation",
message=gagr_media_file.MediaFile,
)
class MutateMediaFilesResponse(proto.Message):
r"""Response message for a media file mutate.
Attributes:
partial_failure_error (google.rpc.status_pb2.Status):
Errors that pertain to operation failures in the partial
failure mode. Returned only when partial_failure = true and
all errors occur inside the operations. If any errors occur
outside the operations (e.g. auth errors), we return an RPC
level error.
results (Sequence[google.ads.googleads.v8.services.types.MutateMediaFileResult]):
All results for the mutate.
"""
partial_failure_error = proto.Field(
proto.MESSAGE, number=3, message=status_pb2.Status,
)
results = proto.RepeatedField(
proto.MESSAGE, number=2, message="MutateMediaFileResult",
)
class MutateMediaFileResult(proto.Message):
r"""The result for the media file mutate.
Attributes:
resource_name (str):
The resource name returned for successful
operations.
media_file (google.ads.googleads.v8.resources.types.MediaFile):
The mutated media file with only mutable fields after
mutate. The field will only be returned when
response_content_type is set to "MUTABLE_RESOURCE".
"""
resource_name = proto.Field(proto.STRING, number=1,)
media_file = proto.Field(
proto.MESSAGE, number=2, message=gagr_media_file.MediaFile,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
larsmans/scikit-learn
|
refs/heads/master
|
sklearn/linear_model/least_angle.py
|
8
|
"""
Least Angle Regression algorithm. See the documentation on the
Generalized Linear Model for a complete discussion.
"""
from __future__ import print_function
# Author: Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux
#
# License: BSD 3 clause
from math import log
import sys
import warnings
from distutils.version import LooseVersion
import numpy as np
from scipy import linalg, interpolate
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel
from ..base import RegressorMixin
from ..utils import arrayfuncs, as_float_array, check_array, check_X_y
from ..cross_validation import _check_cv as check_cv
from ..utils import ConvergenceWarning
from ..externals.joblib import Parallel, delayed
from ..externals.six.moves import xrange
import scipy
solve_triangular_args = {}
if LooseVersion(scipy.__version__) >= LooseVersion('0.12'):
solve_triangular_args = {'check_finite': False}
def lars_path(X, y, Xy=None, Gram=None, max_iter=500,
alpha_min=0, method='lar', copy_X=True,
eps=np.finfo(np.float).eps,
copy_Gram=True, verbose=0, return_path=True,
return_n_iter=False):
"""Compute Least Angle Regression or Lasso path using LARS algorithm [1]
The optimization objective for the case method='lasso' is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
in the case of method='lars', the objective function is only known in
the form of an implicit equation (see discussion in [1])
Parameters
-----------
X : array, shape: (n_samples, n_features)
Input data.
y : array, shape: (n_samples)
Input targets.
max_iter : integer, optional (default=500)
Maximum number of iterations to perform, set to infinity for no limit.
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features.
alpha_min : float, optional (default=0)
Minimum correlation along the path. It corresponds to the
regularization parameter alpha parameter in the Lasso.
method : {'lar', 'lasso'}, optional (default='lar')
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
eps : float, optional (default=``np.finfo(np.float).eps``)
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : bool, optional (default=True)
If ``False``, ``X`` is overwritten.
copy_Gram : bool, optional (default=True)
If ``False``, ``Gram`` is overwritten.
verbose : int (default=0)
Controls output verbosity.
return_path: bool, (optional=True)
If ``return_path==True`` returns the entire path, else returns only the
last point of the path.
Returns
--------
alphas: array, shape: [n_alphas + 1]
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter``, ``n_features`` or the
number of nodes in the path with ``alpha >= alpha_min``, whichever
is smaller.
active: array, shape [n_alphas]
Indices of active variables at the end of the path.
coefs: array, shape (n_features, n_alphas + 1)
Coefficients along the path
n_iter : int
Number of iterations run. Returned only if return_n_iter is set
to True.
See also
--------
lasso_path
LassoLars
Lars
LassoLarsCV
LarsCV
sklearn.decomposition.sparse_encode
References
----------
.. [1] "Least Angle Regression", Effron et al.
http://www-stat.stanford.edu/~tibs/ftp/lars.pdf
.. [2] `Wikipedia entry on the Least-angle regression
<http://en.wikipedia.org/wiki/Least-angle_regression>`_
.. [3] `Wikipedia entry on the Lasso
<http://en.wikipedia.org/wiki/Lasso_(statistics)#Lasso_method>`_
"""
n_features = X.shape[1]
n_samples = y.size
max_features = min(max_iter, n_features)
if return_path:
coefs = np.zeros((max_features + 1, n_features))
alphas = np.zeros(max_features + 1)
else:
coef, prev_coef = np.zeros(n_features), np.zeros(n_features)
alpha, prev_alpha = np.array([0.]), np.array([0.]) # better ideas?
n_iter, n_active = 0, 0
active, indices = list(), np.arange(n_features)
# holds the sign of covariance
sign_active = np.empty(max_features, dtype=np.int8)
drop = False
# will hold the cholesky factorization. Only lower part is
# referenced.
# We are initializing this to "zeros" and not empty, because
# it is passed to scipy linalg functions and thus if it has NaNs,
# even if they are in the upper part that it not used, we
# get errors raised.
# Once we support only scipy > 0.12 we can use check_finite=False and
# go back to "empty"
L = np.zeros((max_features, max_features), dtype=X.dtype)
swap, nrm2 = linalg.get_blas_funcs(('swap', 'nrm2'), (X,))
solve_cholesky, = get_lapack_funcs(('potrs',), (X,))
if Gram is None:
if copy_X:
# force copy. setting the array to be fortran-ordered
# speeds up the calculation of the (partial) Gram matrix
# and allows to easily swap columns
X = X.copy('F')
elif Gram == 'auto':
Gram = None
if X.shape[0] > X.shape[1]:
Gram = np.dot(X.T, X)
elif copy_Gram:
Gram = Gram.copy()
if Xy is None:
Cov = np.dot(X.T, y)
else:
Cov = Xy.copy()
if verbose:
if verbose > 1:
print("Step\t\tAdded\t\tDropped\t\tActive set size\t\tC")
else:
sys.stdout.write('.')
sys.stdout.flush()
tiny = np.finfo(np.float).tiny # to avoid division by 0 warning
tiny32 = np.finfo(np.float32).tiny # to avoid division by 0 warning
equality_tolerance = np.finfo(np.float32).eps
while True:
if Cov.size:
C_idx = np.argmax(np.abs(Cov))
C_ = Cov[C_idx]
C = np.fabs(C_)
else:
C = 0.
if return_path:
alpha = alphas[n_iter, np.newaxis]
coef = coefs[n_iter]
prev_alpha = alphas[n_iter - 1, np.newaxis]
prev_coef = coefs[n_iter - 1]
alpha[0] = C / n_samples
if alpha[0] <= alpha_min + equality_tolerance: # early stopping
if abs(alpha[0] - alpha_min) > equality_tolerance:
# interpolation factor 0 <= ss < 1
if n_iter > 0:
# In the first iteration, all alphas are zero, the formula
# below would make ss a NaN
ss = ((prev_alpha[0] - alpha_min) /
(prev_alpha[0] - alpha[0]))
coef[:] = prev_coef + ss * (coef - prev_coef)
alpha[0] = alpha_min
if return_path:
coefs[n_iter] = coef
break
if n_iter >= max_iter or n_active >= n_features:
break
if not drop:
##########################################################
# Append x_j to the Cholesky factorization of (Xa * Xa') #
# #
# ( L 0 ) #
# L -> ( ) , where L * w = Xa' x_j #
# ( w z ) and z = ||x_j|| #
# #
##########################################################
sign_active[n_active] = np.sign(C_)
m, n = n_active, C_idx + n_active
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
indices[n], indices[m] = indices[m], indices[n]
Cov_not_shortened = Cov
Cov = Cov[1:] # remove Cov[0]
if Gram is None:
X.T[n], X.T[m] = swap(X.T[n], X.T[m])
c = nrm2(X.T[n_active]) ** 2
L[n_active, :n_active] = \
np.dot(X.T[n_active], X.T[:n_active].T)
else:
# swap does only work inplace if matrix is fortran
# contiguous ...
Gram[m], Gram[n] = swap(Gram[m], Gram[n])
Gram[:, m], Gram[:, n] = swap(Gram[:, m], Gram[:, n])
c = Gram[n_active, n_active]
L[n_active, :n_active] = Gram[n_active, :n_active]
# Update the cholesky decomposition for the Gram matrix
if n_active:
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = np.dot(L[n_active, :n_active], L[n_active, :n_active])
diag = max(np.sqrt(np.abs(c - v)), eps)
L[n_active, n_active] = diag
if diag < 1e-7:
# The system is becoming too ill-conditioned.
# We have degenerate vectors in our active set.
# We'll 'drop for good' the last regressor added.
# Note: this case is very rare. It is no longer triggered by the
# test suite. The `equality_tolerance` margin added in 0.16.0 to
# get early stopping to work consistently on all versions of
# Python including 32 bit Python under Windows seems to make it
# very difficult to trigger the 'drop for good' strategy.
warnings.warn('Regressors in active set degenerate. '
'Dropping a regressor, after %i iterations, '
'i.e. alpha=%.3e, '
'with an active set of %i regressors, and '
'the smallest cholesky pivot element being %.3e'
% (n_iter, alpha, n_active, diag),
ConvergenceWarning)
# XXX: need to figure a 'drop for good' way
Cov = Cov_not_shortened
Cov[0] = 0
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
continue
active.append(indices[n_active])
n_active += 1
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, active[-1], '',
n_active, C))
if method == 'lasso' and n_iter > 0 and prev_alpha[0] < alpha[0]:
# alpha is increasing. This is because the updates of Cov are
# bringing in too much numerical error that is greater than
# than the remaining correlation with the
# regressors. Time to bail out
warnings.warn('Early stopping the lars path, as the residues '
'are small and the current value of alpha is no '
'longer well controlled. %i iterations, alpha=%.3e, '
'previous alpha=%.3e, with an active set of %i '
'regressors.'
% (n_iter, alpha, prev_alpha, n_active),
ConvergenceWarning)
break
# least squares solution
least_squares, info = solve_cholesky(L[:n_active, :n_active],
sign_active[:n_active],
lower=True)
if least_squares.size == 1 and least_squares == 0:
# This happens because sign_active[:n_active] = 0
least_squares[...] = 1
AA = 1.
else:
# is this really needed ?
AA = 1. / np.sqrt(np.sum(least_squares * sign_active[:n_active]))
if not np.isfinite(AA):
# L is too ill-conditioned
i = 0
L_ = L[:n_active, :n_active].copy()
while not np.isfinite(AA):
L_.flat[::n_active + 1] += (2 ** i) * eps
least_squares, info = solve_cholesky(
L_, sign_active[:n_active], lower=True)
tmp = max(np.sum(least_squares * sign_active[:n_active]),
eps)
AA = 1. / np.sqrt(tmp)
i += 1
least_squares *= AA
if Gram is None:
# equiangular direction of variables in the active set
eq_dir = np.dot(X.T[:n_active].T, least_squares)
# correlation between each unactive variables and
# eqiangular vector
corr_eq_dir = np.dot(X.T[n_active:], eq_dir)
else:
# if huge number of features, this takes 50% of time, I
# think could be avoided if we just update it using an
# orthogonal (QR) decomposition of X
corr_eq_dir = np.dot(Gram[:n_active, n_active:].T,
least_squares)
g1 = arrayfuncs.min_pos((C - Cov) / (AA - corr_eq_dir + tiny))
g2 = arrayfuncs.min_pos((C + Cov) / (AA + corr_eq_dir + tiny))
gamma_ = min(g1, g2, C / AA)
# TODO: better names for these variables: z
drop = False
z = -coef[active] / (least_squares + tiny32)
z_pos = arrayfuncs.min_pos(z)
if z_pos < gamma_:
# some coefficients have changed sign
idx = np.where(z == z_pos)[0][::-1]
# update the sign, important for LAR
sign_active[idx] = -sign_active[idx]
if method == 'lasso':
gamma_ = z_pos
drop = True
n_iter += 1
if return_path:
if n_iter >= coefs.shape[0]:
del coef, alpha, prev_alpha, prev_coef
# resize the coefs and alphas array
add_features = 2 * max(1, (max_features - n_active))
coefs = np.resize(coefs, (n_iter + add_features, n_features))
alphas = np.resize(alphas, n_iter + add_features)
coef = coefs[n_iter]
prev_coef = coefs[n_iter - 1]
alpha = alphas[n_iter, np.newaxis]
prev_alpha = alphas[n_iter - 1, np.newaxis]
else:
# mimic the effect of incrementing n_iter on the array references
prev_coef = coef
prev_alpha[0] = alpha[0]
coef = np.zeros_like(coef)
coef[active] = prev_coef[active] + gamma_ * least_squares
# update correlations
Cov -= gamma_ * corr_eq_dir
# See if any coefficient has changed sign
if drop and method == 'lasso':
# handle the case when idx is not length of 1
[arrayfuncs.cholesky_delete(L[:n_active, :n_active], ii) for ii in
idx]
n_active -= 1
m, n = idx, n_active
# handle the case when idx is not length of 1
drop_idx = [active.pop(ii) for ii in idx]
if Gram is None:
# propagate dropped variable
for ii in idx:
for i in range(ii, n_active):
X.T[i], X.T[i + 1] = swap(X.T[i], X.T[i + 1])
# yeah this is stupid
indices[i], indices[i + 1] = indices[i + 1], indices[i]
# TODO: this could be updated
residual = y - np.dot(X[:, :n_active], coef[active])
temp = np.dot(X.T[n_active], residual)
Cov = np.r_[temp, Cov]
else:
for ii in idx:
for i in range(ii, n_active):
indices[i], indices[i + 1] = indices[i + 1], indices[i]
Gram[i], Gram[i + 1] = swap(Gram[i], Gram[i+1])
Gram[:, i], Gram[:, i + 1] = swap(Gram[:, i],
Gram[:, i + 1])
# Cov_n = Cov_j + x_j * X + increment(betas) TODO:
# will this still work with multiple drops ?
# recompute covariance. Probably could be done better
# wrong as Xy is not swapped with the rest of variables
# TODO: this could be updated
residual = y - np.dot(X, coef)
temp = np.dot(X.T[drop_idx], residual)
Cov = np.r_[temp, Cov]
sign_active = np.delete(sign_active, idx)
sign_active = np.append(sign_active, 0.) # just to maintain size
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, '', drop_idx,
n_active, abs(temp)))
if return_path:
# resize coefs in case of early stop
alphas = alphas[:n_iter + 1]
coefs = coefs[:n_iter + 1]
if return_n_iter:
return alphas, active, coefs.T, n_iter
else:
return alphas, active, coefs.T
else:
if return_n_iter:
return alpha, active, coef, n_iter
else:
return alpha, active, coef
###############################################################################
# Estimator classes
class Lars(LinearModel, RegressorMixin):
"""Least Angle Regression model a.k.a. LAR
Parameters
----------
n_nonzero_coefs : int, optional
Target number of non-zero coefficients. Use ``np.inf`` for no limit.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
eps: float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If True the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``n_nonzero_coefs`` or ``n_features``, \
whichever is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) \
| list of n_targets such arrays
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lars(n_nonzero_coefs=1)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Lars(copy_X=True, eps=..., fit_intercept=True, fit_path=True,
n_nonzero_coefs=1, normalize=True, precompute='auto', verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
See also
--------
lars_path, LarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, fit_intercept=True, verbose=False, normalize=True,
precompute='auto', n_nonzero_coefs=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True):
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.method = 'lar'
self.precompute = precompute
self.n_nonzero_coefs = n_nonzero_coefs
self.eps = eps
self.copy_X = copy_X
self.fit_path = fit_path
def _get_gram(self):
# precompute if n_samples > n_features
precompute = self.precompute
if hasattr(precompute, '__array__'):
Gram = precompute
elif precompute == 'auto':
Gram = 'auto'
else:
Gram = None
return Gram
def fit(self, X, y, Xy=None):
"""Fit the model using X, y as training data.
parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Xy : array-like, shape (n_samples,) or (n_samples, n_targets), \
optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
returns
-------
self : object
returns an instance of self.
"""
X = check_array(X)
y = np.asarray(y)
n_features = X.shape[1]
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize,
self.copy_X)
if y.ndim == 1:
y = y[:, np.newaxis]
n_targets = y.shape[1]
alpha = getattr(self, 'alpha', 0.)
if hasattr(self, 'n_nonzero_coefs'):
alpha = 0. # n_nonzero_coefs parametrization takes priority
max_iter = self.n_nonzero_coefs
else:
max_iter = self.max_iter
precompute = self.precompute
if not hasattr(precompute, '__array__') and (
precompute is True or
(precompute == 'auto' and X.shape[0] > X.shape[1]) or
(precompute == 'auto' and y.shape[1] > 1)):
Gram = np.dot(X.T, X)
else:
Gram = self._get_gram()
self.alphas_ = []
self.n_iter_ = []
if self.fit_path:
self.coef_ = []
self.active_ = []
self.coef_path_ = []
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, active, coef_path, n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=True,
return_n_iter=True)
self.alphas_.append(alphas)
self.active_.append(active)
self.n_iter_.append(n_iter_)
self.coef_path_.append(coef_path)
self.coef_.append(coef_path[:, -1])
if n_targets == 1:
self.alphas_, self.active_, self.coef_path_, self.coef_ = [
a[0] for a in (self.alphas_, self.active_, self.coef_path_,
self.coef_)]
self.n_iter_ = self.n_iter_[0]
else:
self.coef_ = np.empty((n_targets, n_features))
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, _, self.coef_[k], n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=False, return_n_iter=True)
self.alphas_.append(alphas)
self.n_iter_.append(n_iter_)
if n_targets == 1:
self.alphas_ = self.alphas_[0]
self.n_iter_ = self.n_iter_[0]
self._set_intercept(X_mean, y_mean, X_std)
return self
class LassoLars(Lars):
"""Lasso model fit with Least Angle Regression a.k.a. Lars
It is a Linear Model trained with an L1 prior as regularizer.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Parameters
----------
alpha : float
Constant that multiplies the penalty term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by :class:`LinearRegression`. For numerical reasons, using
``alpha = 0`` with the LassoLars object is not advised and you
should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If ``True`` the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``max_iter``, ``n_features``, or the number of \
nodes in the path with correlation greater than ``alpha``, whichever \
is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) or list
If a list is passed it's expected to be one of n_targets such arrays.
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int.
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLars(alpha=0.01)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1, 0, -1])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLars(alpha=0.01, copy_X=True, eps=..., fit_intercept=True,
fit_path=True, max_iter=500, normalize=True, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -0.963257...]
See also
--------
lars_path
lasso_path
Lasso
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, alpha=1.0, fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.method = 'lasso'
self.precompute = precompute
self.copy_X = copy_X
self.eps = eps
self.fit_path = fit_path
###############################################################################
# Cross-validated estimator classes
def _lars_path_residues(X_train, y_train, X_test, y_test, Gram=None,
copy=True, method='lars', verbose=False,
fit_intercept=True, normalize=True, max_iter=500,
eps=np.finfo(np.float).eps):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array, shape (n_samples, n_features)
The data to fit the LARS on
y_train : array, shape (n_samples)
The target variable to fit LARS on
X_test : array, shape (n_samples, n_features)
The data to compute the residues on
y_test : array, shape (n_samples)
The target variable to compute the residues on
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features
copy : boolean, optional
Whether X_train, X_test, y_train and y_test should be copied;
if False, they may be overwritten.
method : 'lar' | 'lasso'
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
verbose : integer, optional
Sets the amount of verbosity
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Returns
--------
alphas : array, shape (n_alphas,)
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter`` or ``n_features``, whichever
is smaller.
active : list
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas)
Coefficients along the path
residues : array, shape (n_alphas, n_samples)
Residues of the prediction on the test data
"""
if copy:
X_train = X_train.copy()
y_train = y_train.copy()
X_test = X_test.copy()
y_test = y_test.copy()
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
alphas, active, coefs = lars_path(
X_train, y_train, Gram=Gram, copy_X=False, copy_Gram=False,
method=method, verbose=max(0, verbose - 1), max_iter=max_iter, eps=eps)
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
residues = np.dot(X_test, coefs) - y_test[:, np.newaxis]
return alphas, active, coefs, residues.T
class LarsCV(Lars):
"""Cross-validated Least Angle Regression model
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter: integer, optional
Maximum number of iterations to perform.
cv : cross-validation generator, optional
see :mod:`sklearn.cross_validation`. If ``None`` is passed, default to
a 5-fold strategy
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps: float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
method = 'lar'
def __init__(self, fit_intercept=True, verbose=False, max_iter=500,
normalize=True, precompute='auto', cv=None,
max_n_alphas=1000, n_jobs=1, eps=np.finfo(np.float).eps,
copy_X=True):
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.copy_X = copy_X
self.cv = cv
self.max_n_alphas = max_n_alphas
self.n_jobs = n_jobs
self.eps = eps
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
Returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X, y = check_X_y(X, y)
# init cross-validation generator
cv = check_cv(self.cv, X, y, classifier=False)
Gram = 'auto' if self.precompute else None
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_lars_path_residues)(
X[train], y[train], X[test], y[test], Gram=Gram, copy=False,
method=self.method, verbose=max(0, self.verbose - 1),
normalize=self.normalize, fit_intercept=self.fit_intercept,
max_iter=self.max_iter, eps=self.eps)
for train, test in cv)
all_alphas = np.concatenate(list(zip(*cv_paths))[0])
# Unique also sorts
all_alphas = np.unique(all_alphas)
# Take at most max_n_alphas values
stride = int(max(1, int(len(all_alphas) / float(self.max_n_alphas))))
all_alphas = all_alphas[::stride]
mse_path = np.empty((len(all_alphas), len(cv_paths)))
for index, (alphas, active, coefs, residues) in enumerate(cv_paths):
alphas = alphas[::-1]
residues = residues[::-1]
if alphas[0] != 0:
alphas = np.r_[0, alphas]
residues = np.r_[residues[0, np.newaxis], residues]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
residues = np.r_[residues, residues[-1, np.newaxis]]
this_residues = interpolate.interp1d(alphas,
residues,
axis=0)(all_alphas)
this_residues **= 2
mse_path[:, index] = np.mean(this_residues, axis=-1)
mask = np.all(np.isfinite(mse_path), axis=-1)
all_alphas = all_alphas[mask]
mse_path = mse_path[mask]
# Select the alpha that minimizes left-out error
i_best_alpha = np.argmin(mse_path.mean(axis=-1))
best_alpha = all_alphas[i_best_alpha]
# Store our parameters
self.alpha_ = best_alpha
self.cv_alphas_ = all_alphas
self.cv_mse_path_ = mse_path
# Now compute the full model
# it will call a lasso internally when self if LassoLarsCV
# as self.method == 'lasso'
Lars.fit(self, X, y)
return self
@property
def alpha(self):
# impedance matching for the above Lars.fit (should not be documented)
return self.alpha_
class LassoLarsCV(LarsCV):
"""Cross-validated Lasso, using the LARS algorithm
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter: integer, optional
Maximum number of iterations to perform.
cv : cross-validation generator, optional
see sklearn.cross_validation module. If None is passed, default to
a 5-fold strategy
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps: float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
Notes
-----
The object solves the same problem as the LassoCV object. However,
unlike the LassoCV, it find the relevant alphas values by itself.
In general, because of this property, it will be more stable.
However, it is more fragile to heavily multicollinear datasets.
It is more efficient than the LassoCV if only a small number of
features are selected compared to the total number, for instance if
there are very few samples compared to the number of features.
See also
--------
lars_path, LassoLars, LarsCV, LassoCV
"""
method = 'lasso'
class LassoLarsIC(LassoLars):
"""Lasso model fit with Lars using BIC or AIC for model selection
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
AIC is the Akaike information criterion and BIC is the Bayes
Information criterion. Such criteria are useful to select the value
of the regularization parameter by making a trade-off between the
goodness of fit and the complexity of the model. A good model should
explain well the data while being simple.
Parameters
----------
criterion: 'bic' | 'aic'
The type of criterion to use.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter: integer, optional
Maximum number of iterations to perform. Can be used for
early stopping.
eps: float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
alpha_ : float
the alpha parameter chosen by the information criterion
n_iter_ : int
number of iterations run by lars_path to find the grid of
alphas.
criterion_ : array, shape (n_alphas,)
The value of the information criteria ('aic', 'bic') across all
alphas. The alpha which has the smallest information criteria
is chosen.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLarsIC(criterion='bic')
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLarsIC(copy_X=True, criterion='bic', eps=..., fit_intercept=True,
max_iter=500, normalize=True, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
Notes
-----
The estimation of the number of degrees of freedom is given by:
"On the degrees of freedom of the lasso"
Hui Zou, Trevor Hastie, and Robert Tibshirani
Ann. Statist. Volume 35, Number 5 (2007), 2173-2192.
http://en.wikipedia.org/wiki/Akaike_information_criterion
http://en.wikipedia.org/wiki/Bayesian_information_criterion
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
def __init__(self, criterion='aic', fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True):
self.criterion = criterion
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.copy_X = copy_X
self.precompute = precompute
self.eps = eps
def fit(self, X, y, copy_X=True):
"""Fit the model using X, y as training data.
parameters
----------
x : array-like, shape (n_samples, n_features)
training data.
y : array-like, shape (n_samples,)
target values.
returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X = check_array(X)
y = np.asarray(y)
X, y, Xmean, ymean, Xstd = LinearModel._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
max_iter = self.max_iter
Gram = self._get_gram()
alphas_, active_, coef_path_, self.n_iter_ = lars_path(
X, y, Gram=Gram, copy_X=copy_X, copy_Gram=True, alpha_min=0.0,
method='lasso', verbose=self.verbose, max_iter=max_iter,
eps=self.eps, return_n_iter=True)
n_samples = X.shape[0]
if self.criterion == 'aic':
K = 2 # AIC
elif self.criterion == 'bic':
K = log(n_samples) # BIC
else:
raise ValueError('criterion should be either bic or aic')
R = y[:, np.newaxis] - np.dot(X, coef_path_) # residuals
mean_squared_error = np.mean(R ** 2, axis=0)
df = np.zeros(coef_path_.shape[1], dtype=np.int) # Degrees of freedom
for k, coef in enumerate(coef_path_.T):
mask = np.abs(coef) > np.finfo(coef.dtype).eps
if not np.any(mask):
continue
# get the number of degrees of freedom equal to:
# Xc = X[:, mask]
# Trace(Xc * inv(Xc.T, Xc) * Xc.T) ie the number of non-zero coefs
df[k] = np.sum(mask)
self.alphas_ = alphas_
with np.errstate(divide='ignore'):
self.criterion_ = n_samples * np.log(mean_squared_error) + K * df
n_best = np.argmin(self.criterion_)
self.alpha_ = alphas_[n_best]
self.coef_ = coef_path_[:, n_best]
self._set_intercept(Xmean, ymean, Xstd)
return self
|
silentfuzzle/calibre
|
refs/heads/master
|
src/calibre/utils/search_query_parser.py
|
14
|
#!/usr/bin/env python2
# encoding: utf-8
__license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
__docformat__ = 'restructuredtext en'
'''
A parser for search queries with a syntax very similar to that used by
the Google search engine.
For details on the search query syntax see :class:`SearchQueryParser`.
To use the parser, subclass :class:`SearchQueryParser` and implement the
methods :method:`SearchQueryParser.universal_set` and
:method:`SearchQueryParser.get_matches`. See for example :class:`Tester`.
If this module is run, it will perform a series of unit tests.
'''
import sys, operator, weakref, re
from calibre.constants import preferred_encoding
from calibre.utils.icu import sort_key
from calibre import prints
'''
This class manages access to the preference holding the saved search queries.
It exists to ensure that unicode is used throughout, and also to permit
adding other fields, such as whether the search is a 'favorite'
'''
class SavedSearchQueries(object):
queries = {}
opt_name = ''
def __init__(self, db, _opt_name):
self.opt_name = _opt_name
if db is not None:
self.queries = db.prefs.get(self.opt_name, {})
else:
self.queries = {}
try:
self._db = weakref.ref(db)
except TypeError:
# db could be None
self._db = lambda : None
@property
def db(self):
return self._db()
def force_unicode(self, x):
if not isinstance(x, unicode):
x = x.decode(preferred_encoding, 'replace')
return x
def add(self, name, value):
db = self.db
if db is not None:
self.queries[self.force_unicode(name)] = self.force_unicode(value).strip()
db.prefs[self.opt_name] = self.queries
def lookup(self, name):
return self.queries.get(self.force_unicode(name), None)
def delete(self, name):
db = self.db
if db is not None:
self.queries.pop(self.force_unicode(name), False)
db.prefs[self.opt_name] = self.queries
def rename(self, old_name, new_name):
db = self.db
if db is not None:
self.queries[self.force_unicode(new_name)] = \
self.queries.get(self.force_unicode(old_name), None)
self.queries.pop(self.force_unicode(old_name), False)
db.prefs[self.opt_name] = self.queries
def set_all(self, smap):
db = self.db
if db is not None:
self.queries = db.prefs[self.opt_name] = smap
def names(self):
return sorted(self.queries.keys(),key=sort_key)
'''
Create a global instance of the saved searches. It is global so that the searches
are common across all instances of the parser (devices, library, etc).
'''
ss = SavedSearchQueries(None, None)
def set_saved_searches(db, opt_name):
global ss
ss = SavedSearchQueries(db, opt_name)
def saved_searches():
global ss
return ss
def global_lookup_saved_search(name):
return ss.lookup(name)
'''
Parse a search expression into a series of potentially recursive operations.
Note that the interpreter wants binary operators, not n-ary ops. This is why we
recurse instead of iterating when building sequences of the same op.
The syntax is more than a bit twisted. In particular, the handling of colons
in the base token requires semantic analysis.
Also note that the query string is lowercased before analysis. This is OK because
calibre's searches are all case-insensitive.
Grammar:
prog ::= or_expression
or_expression ::= and_expression [ 'or' or_expression ]
and_expression ::= not_expression [ [ 'and' ] and_expression ]
not_expression ::= [ 'not' ] location_expression
location_expression ::= base_token | ( '(' or_expression ')' )
base_token ::= a sequence of letters and colons, perhaps quoted
'''
class Parser(object):
def __init__(self):
self.current_token = 0
self.tokens = None
OPCODE = 1
WORD = 2
QUOTED_WORD = 3
EOF = 4
# Had to translate named constants to numeric values
lex_scanner = re.Scanner([
(r'[()]', lambda x,t: (1, t)),
(r'@.+?:[^")\s]+', lambda x,t: (2, unicode(t))),
(r'[^"()\s]+', lambda x,t: (2, unicode(t))),
(r'".*?((?<!\\)")', lambda x,t: (3, t[1:-1])),
(r'\s+', None)
], flags=re.DOTALL)
def token(self, advance=False):
if self.is_eof():
return None
res = self.tokens[self.current_token][1]
if advance:
self.current_token += 1
return res
def lcase_token(self, advance=False):
if self.is_eof():
return None
res = self.tokens[self.current_token][1]
if advance:
self.current_token += 1
return icu_lower(res)
def token_type(self):
if self.is_eof():
return self.EOF
return self.tokens[self.current_token][0]
def is_eof(self):
return self.current_token >= len(self.tokens)
def advance(self):
self.current_token += 1
def parse(self, expr, locations):
self.locations = locations
# Strip out escaped backslashes, quotes and parens so that the
# lex scanner doesn't get confused. We put them back later.
expr = expr.replace(u'\\\\', u'\x01').replace(u'\\"', u'\x02')
expr = expr.replace(u'\\(', u'\x03').replace(u'\\)', u'\x04')
self.tokens = self.lex_scanner.scan(expr)[0]
for (i,tok) in enumerate(self.tokens):
tt, tv = tok
if tt == self.WORD or tt == self.QUOTED_WORD:
self.tokens[i] = (tt,
tv.replace(u'\x01', u'\\').replace(u'\x02', u'"').
replace(u'\x03', u'(').replace(u'\x04', u')'))
self.current_token = 0
prog = self.or_expression()
if not self.is_eof():
raise ParseException(_('Extra characters at end of search'))
#prints(self.tokens, '\n', prog)
return prog
def or_expression(self):
lhs = self.and_expression()
if self.lcase_token() == 'or':
self.advance()
return ['or', lhs, self.or_expression()]
return lhs
def and_expression(self):
lhs = self.not_expression()
if self.lcase_token() == 'and':
self.advance()
return ['and', lhs, self.and_expression()]
# Account for the optional 'and'
if (self.token_type() in [self.WORD, self.QUOTED_WORD] and
self.lcase_token() != 'or'):
return ['and', lhs, self.and_expression()]
return lhs
def not_expression(self):
if self.lcase_token() == 'not':
self.advance()
return ['not', self.not_expression()]
return self.location_expression()
def location_expression(self):
if self.token_type() == self.OPCODE and self.token() == '(':
self.advance()
res = self.or_expression()
if self.token_type() != self.OPCODE or self.token(advance=True) != ')':
raise ParseException(_('missing )'))
return res
if self.token_type() not in (self.WORD, self.QUOTED_WORD):
raise ParseException(_('Invalid syntax. Expected a lookup name or a word'))
return self.base_token()
def base_token(self):
if self.token_type() == self.QUOTED_WORD:
return ['token', 'all', self.token(advance=True)]
words = self.token(advance=True).split(':')
# The complexity here comes from having colon-separated search
# values. That forces us to check that the first "word" in a colon-
# separated group is a valid location. If not, then the token must
# be reconstructed. We also have the problem that locations can be
# followed by quoted strings that appear as the next token. and that
# tokens can be a sequence of colons.
# We have a location if there is more than one word and the first
# word is in locations. This check could produce a "wrong" answer if
# the search string is something like 'author: "foo"' because it
# will be interpreted as 'author:"foo"'. I am choosing to accept the
# possible error. The expression should be written '"author:" foo'
if len(words) > 1 and words[0].lower() in self.locations:
loc = words[0].lower()
words = words[1:]
if len(words) == 1 and self.token_type() == self.QUOTED_WORD:
return ['token', loc, self.token(advance=True)]
return ['token', icu_lower(loc), ':'.join(words)]
return ['token', 'all', ':'.join(words)]
class ParseException(Exception):
@property
def msg(self):
if len(self.args) > 0:
return self.args[0]
return ""
class SearchQueryParser(object):
'''
Parses a search query.
A search query consists of tokens. The tokens can be combined using
the `or`, `and` and `not` operators as well as grouped using parentheses.
When no operator is specified between two tokens, `and` is assumed.
Each token is a string of the form `location:query`. `location` is a string
from :member:`DEFAULT_LOCATIONS`. It is optional. If it is omitted, it is assumed to
be `all`. `query` is an arbitrary string that must not contain parentheses.
If it contains whitespace, it should be quoted by enclosing it in `"` marks.
Examples::
* `Asimov` [search for the string "Asimov" in location `all`]
* `comments:"This is a good book"` [search for "This is a good book" in `comments`]
* `author:Asimov tag:unread` [search for books by Asimov that have been tagged as unread]
* `author:Asimov or author:Hardy` [search for books by Asimov or Hardy]
* `(author:Asimov or author:Hardy) and not tag:read` [search for unread books by Asimov or Hardy]
'''
@staticmethod
def run_tests(parser, result, tests):
failed = []
for test in tests:
prints('\tTesting:', test[0], end=' ')
res = parser.parseString(test[0])
if list(res.get(result, None)) == test[1]:
print 'OK'
else:
print 'FAILED:', 'Expected:', test[1], 'Got:', list(res.get(result, None))
failed.append(test[0])
return failed
def __init__(self, locations, test=False, optimize=False, lookup_saved_search=None, parse_cache=None):
self.sqp_initialize(locations, test=test, optimize=optimize)
self.parser = Parser()
self.lookup_saved_search = global_lookup_saved_search if lookup_saved_search is None else lookup_saved_search
self.sqp_parse_cache = parse_cache
def sqp_change_locations(self, locations):
self.sqp_initialize(locations, optimize=self.optimize)
if self.sqp_parse_cache is not None:
self.sqp_parse_cache.clear()
def sqp_initialize(self, locations, test=False, optimize=False):
self.locations = locations
self._tests_failed = False
self.optimize = optimize
def parse(self, query, candidates=None):
# empty the list of searches used for recursion testing
self.recurse_level = 0
self.searches_seen = set([])
candidates = self.universal_set()
return self._parse(query, candidates=candidates)
# this parse is used internally because it doesn't clear the
# recursive search test list. However, we permit seeing the
# same search a few times because the search might appear within
# another search.
def _parse(self, query, candidates=None):
self.recurse_level += 1
try:
res = self.sqp_parse_cache.get(query, None)
except AttributeError:
res = None
if res is None:
try:
res = self.parser.parse(query, self.locations)
except RuntimeError:
raise ParseException(_('Failed to parse query, recursion limit reached: %s')%repr(query))
if self.sqp_parse_cache is not None:
self.sqp_parse_cache[query] = res
if candidates is None:
candidates = self.universal_set()
t = self.evaluate(res, candidates)
self.recurse_level -= 1
return t
def method(self, group_name):
return getattr(self, 'evaluate_'+group_name)
def evaluate(self, parse_result, candidates):
return self.method(parse_result[0])(parse_result[1:], candidates)
def evaluate_and(self, argument, candidates):
# RHS checks only those items matched by LHS
# returns result of RHS check: RHmatches(LHmatches(c))
# return self.evaluate(argument[0]).intersection(self.evaluate(argument[1]))
l = self.evaluate(argument[0], candidates)
return l.intersection(self.evaluate(argument[1], l))
def evaluate_or(self, argument, candidates):
# RHS checks only those elements not matched by LHS
# returns LHS union RHS: LHmatches(c) + RHmatches(c-LHmatches(c))
# return self.evaluate(argument[0]).union(self.evaluate(argument[1]))
l = self.evaluate(argument[0], candidates)
return l.union(self.evaluate(argument[1], candidates.difference(l)))
def evaluate_not(self, argument, candidates):
# unary op checks only candidates. Result: list of items matching
# returns: c - matches(c)
# return self.universal_set().difference(self.evaluate(argument[0]))
return candidates.difference(self.evaluate(argument[0], candidates))
# def evaluate_parenthesis(self, argument, candidates):
# return self.evaluate(argument[0], candidates)
def evaluate_token(self, argument, candidates):
location = argument[0]
query = argument[1]
if location.lower() == 'search':
if query.startswith('='):
query = query[1:]
try:
if query in self.searches_seen:
raise ParseException(_('Recursive saved search: {0}').format(query))
if self.recurse_level > 5:
self.searches_seen.add(query)
return self._parse(self.lookup_saved_search(query), candidates)
except ParseException as e:
raise e
except: # convert all exceptions (e.g., missing key) to a parse error
import traceback
traceback.print_exc()
raise ParseException(_('Unknown error in saved search: {0}').format(query))
return self._get_matches(location, query, candidates)
def _get_matches(self, location, query, candidates):
if self.optimize:
return self.get_matches(location, query, candidates=candidates)
else:
return self.get_matches(location, query)
def get_matches(self, location, query, candidates=None):
'''
Should return the set of matches for :param:'location` and :param:`query`.
The search must be performed over all entries if :param:`candidates` is
None otherwise only over the items in candidates.
:param:`location` is one of the items in :member:`SearchQueryParser.DEFAULT_LOCATIONS`.
:param:`query` is a string literal.
:return: None or a subset of the set returned by :meth:`universal_set`.
'''
return set([])
def universal_set(self):
'''
Should return the set of all matches.
'''
return set([])
# Testing {{{
class Tester(SearchQueryParser):
texts = {
1: [u'Eugenie Grandet', u'Honor\xe9 de Balzac', u'manybooks.net', u'lrf'],
2: [u'Fanny Hill', u'John Cleland', u'manybooks.net', u'lrf'],
3: [u'Persuasion', u'Jane Austen', u'manybooks.net', u'lrf'],
4: [u'Psmith, Journalist', u'P. G. Wodehouse', u'Some Publisher', u'lrf'],
5: [u'The Complete Works of William Shakespeare',
u'William Shakespeare',
u'manybooks.net',
u'lrf'],
6: [u'The History of England, Volume I',
u'David Hume',
u'manybooks.net',
u'lrf'],
7: [u'Someone Comes to Town, Someone Leaves Town',
u'Cory Doctorow',
u'Tor Books',
u'lrf'],
8: [u'Stalky and Co.', u'Rudyard Kipling', u'manybooks.net', u'lrf'],
9: [u'A Game of Thrones', u'George R. R. Martin', None, u'lrf,rar'],
10: [u'A Clash of Kings', u'George R. R. Martin', None, u'lrf,rar'],
11: [u'A Storm of Swords', u'George R. R. Martin', None, u'lrf,rar'],
12: [u'Biggles - Pioneer Air Fighter', u'W. E. Johns', None, u'lrf,rtf'],
13: [u'Biggles of the Camel Squadron',
u'W. E. Johns',
u'London:Thames, (1977)',
u'lrf,rtf'],
14: [u'A Feast for Crows', u'George R. R. Martin', None, u'lrf,rar'],
15: [u'Cryptonomicon', u'Neal Stephenson', None, u'lrf,rar'],
16: [u'Quicksilver', u'Neal Stephenson', None, u'lrf,zip'],
17: [u'The Comedies of William Shakespeare',
u'William Shakespeare',
None,
u'lrf'],
18: [u'The Histories of William Shakespeare',
u'William Shakespeare',
None,
u'lrf'],
19: [u'The Tragedies of William Shakespeare',
u'William Shakespeare',
None,
u'lrf'],
20: [u'An Ideal Husband', u'Oscar Wilde', u'manybooks.net', u'lrf'],
21: [u'Flight of the Nighthawks', u'Raymond E. Feist', None, u'lrf,rar'],
22: [u'Into a Dark Realm', u'Raymond E. Feist', None, u'lrf,rar'],
23: [u'The Sundering', u'Walter Jon Williams', None, u'lrf,rar'],
24: [u'The Praxis', u'Walter Jon Williams', None, u'lrf,rar'],
25: [u'Conventions of War', u'Walter Jon Williams', None, u'lrf,rar'],
26: [u'Banewreaker', u'Jacqueline Carey', None, u'lrf,rar'],
27: [u'Godslayer', u'Jacqueline Carey', None, u'lrf,rar'],
28: [u"Kushiel's Scion", u'Jacqueline Carey', None, u'lrf,rar'],
29: [u'Underworld', u'Don DeLillo', None, u'lrf,rar'],
30: [u'Genghis Khan and The Making of the Modern World',
u'Jack Weatherford Orc',
u'Three Rivers Press',
u'lrf,zip'],
31: [u'The Best and the Brightest',
u'David Halberstam',
u'Modern Library',
u'lrf,zip'],
32: [u'The Killer Angels', u'Michael Shaara', None, u'html,lrf'],
33: [u'Band Of Brothers', u'Stephen E Ambrose', None, u'lrf,txt'],
34: [u'The Gates of Rome', u'Conn Iggulden', None, u'lrf,rar'],
35: [u'The Death of Kings', u'Conn Iggulden', u'Bantam Dell', u'lit,lrf'],
36: [u'The Field of Swords', u'Conn Iggulden', None, u'lrf,rar'],
37: [u'Masterman Ready', u'Marryat, Captain Frederick', None, u'lrf'],
38: [u'With the Lightnings',
u'David Drake',
u'Baen Publishing Enterprises',
u'lit,lrf'],
39: [u'Lt. Leary, Commanding',
u'David Drake',
u'Baen Publishing Enterprises',
u'lit,lrf'],
40: [u'The Far Side of The Stars',
u'David Drake',
u'Baen Publishing Enterprises',
u'lrf,rar'],
41: [u'The Way to Glory',
u'David Drake',
u'Baen Publishing Enterprises',
u'lrf,rar'],
42: [u'Some Golden Harbor', u'David Drake', u'Baen Books', u'lrf,rar'],
43: [u'Harry Potter And The Half-Blood Prince',
u'J. K. Rowling',
None,
u'lrf,rar'],
44: [u'Harry Potter and the Order of the Phoenix',
u'J. K. Rowling',
None,
u'lrf,rtf'],
45: [u'The Stars at War', u'David Weber , Steve White', None, u'lrf,rtf'],
46: [u'The Stars at War II',
u'Steve White',
u'Baen Publishing Enterprises',
u'lrf,rar'],
47: [u'Exodus', u'Steve White,Shirley Meier', u'Baen Books', u'lrf,rar'],
48: [u'Harry Potter and the Goblet of Fire',
u'J. K. Rowling',
None,
u'lrf,rar'],
49: [u'Harry Potter and the Prisoner of Azkaban',
u'J. K. Rowling',
None,
u'lrf,rtf'],
50: [u'Harry Potter and the Chamber of Secrets',
u'J. K. Rowling',
None,
u'lit,lrf'],
51: [u'Harry Potter and the Deathly Hallows',
u'J.K. Rowling',
None,
u'lit,lrf,pdf'],
52: [u"His Majesty's Dragon", u'Naomi Novik', None, u'lrf,rar'],
53: [u'Throne of Jade', u'Naomi Novik', u'Del Rey', u'lit,lrf'],
54: [u'Black Powder War', u'Naomi Novik', u'Del Rey', u'lrf,rar'],
55: [u'War and Peace', u'Leo Tolstoy', u'gutenberg.org', u'lrf,txt'],
56: [u'Anna Karenina', u'Leo Tolstoy', u'gutenberg.org', u'lrf,txt'],
57: [u'A Shorter History of Rome',
u'Eugene Lawrence,Sir William Smith',
u'gutenberg.org',
u'lrf,zip'],
58: [u'The Name of the Rose', u'Umberto Eco', None, u'lrf,rar'],
71: [u"Wind Rider's Oath", u'David Weber', u'Baen', u'lrf'],
74: [u'Rally Cry', u'William R Forstchen', None, u'htm,lrf'],
86: [u'Empire of Ivory', u'Naomi Novik', None, u'lrf,rar'],
87: [u"Renegade's Magic", u'Robin Hobb', None, u'lrf,rar'],
89: [u'Master and commander',
u"Patrick O'Brian",
u'Fontana,\n1971',
u'lit,lrf'],
91: [u'A Companion to Wolves',
u'Sarah Monette,Elizabeth Beär',
None,
u'lrf,rar'],
92: [u'The Lions of al-Rassan', u'Guy Gavriel Kay', u'Eos', u'lit,lrf'],
93: [u'Gardens of the Moon', u'Steven Erikson', u'Tor Fantasy', u'lit,lrf'],
95: [u'The Master and Margarita',
u'Mikhail Bulgakov',
u'N.Y. : Knopf, 1992.',
u'lrf,rtf'],
120: [u'Deadhouse Gates',
u'Steven Erikson',
u'London : Bantam Books, 2001.',
u'lit,lrf'],
121: [u'Memories of Ice', u'Steven Erikson', u'Bantam Books', u'lit,lrf'],
123: [u'House of Chains', u'Steven Erikson', u'Bantam Books', u'lit,lrf'],
125: [u'Midnight Tides', u'Steven Erikson', u'Bantam Books', u'lit,lrf'],
126: [u'The Bonehunters', u'Steven Erikson', u'Bantam Press', u'lit,lrf'],
129: [u'Guns, germs, and steel: the fates of human societies',
u'Jared Diamond',
u'New York : W.W. Norton, c1997.',
u'lit,lrf'],
136: [u'Wildcards', u'George R. R. Martin', None, u'html,lrf'],
138: [u'Off Armageddon Reef', u'David Weber', u'Tor Books', u'lit,lrf'],
144: [u'Atonement',
u'Ian McEwan',
u'New York : Nan A. Talese/Doubleday, 2002.',
u'lrf,rar'],
146: [u'1632', u'Eric Flint', u'Baen Books', u'lit,lrf'],
147: [u'1633', u'David Weber,Eric Flint,Dru Blair', u'Baen', u'lit,lrf'],
148: [u'1634: The Baltic War',
u'David Weber,Eric Flint',
u'Baen',
u'lit,lrf'],
150: [u'The Dragonbone Chair', u'Tad Williams', u'DAW Trade', u'lrf,rtf'],
152: [u'The Little Book That Beats the Market',
u'Joel Greenblatt',
u'Wiley',
u'epub,lrf'],
153: [u'Pride of Carthage', u'David Anthony Durham', u'Anchor', u'lit,lrf'],
154: [u'Stone of farewell',
u'Tad Williams',
u'New York : DAW Books, 1990.',
u'lrf,txt'],
166: [u'American Gods', u'Neil Gaiman', u'HarperTorch', u'lit,lrf'],
176: [u'Pillars of the Earth',
u'Ken Follett',
u'New American Library',
u'lit,lrf'],
182: [u'The Eye of the world',
u'Robert Jordan',
u'New York : T. Doherty Associates, c1990.',
u'lit,lrf'],
188: [u'The Great Hunt', u'Robert Jordan', u'ATOM', u'lrf,zip'],
189: [u'The Dragon Reborn', u'Robert Jordan', None, u'lit,lrf'],
190: [u'The Shadow Rising', u'Robert Jordan', None, u'lit,lrf'],
191: [u'The Fires of Heaven',
u'Robert Jordan',
u'Time Warner Books Uk',
u'lit,lrf'],
216: [u'Lord of chaos',
u'Robert Jordan',
u'New York : TOR, c1994.',
u'lit,lrf'],
217: [u'A Crown of Swords', u'Robert Jordan', None, u'lit,lrf'],
236: [u'The Path of Daggers', u'Robert Jordan', None, u'lit,lrf'],
238: [u'The Client',
u'John Grisham',
u'New York : Island, 1994, c1993.',
u'lit,lrf'],
240: [u"Winter's Heart", u'Robert Jordan', None, u'lit,lrf'],
242: [u'In the Beginning was the Command Line',
u'Neal Stephenson',
None,
u'lrf,txt'],
249: [u'Crossroads of Twilight', u'Robert Jordan', None, u'lit,lrf'],
251: [u'Caves of Steel', u'Isaac Asimov', u'Del Rey', u'lrf,zip'],
253: [u"Hunter's Run",
u'George R. R. Martin,Gardner Dozois,Daniel Abraham',
u'Eos',
u'lrf,rar'],
257: [u'Knife of Dreams', u'Robert Jordan', None, u'lit,lrf'],
258: [u'Saturday',
u'Ian McEwan',
u'London : Jonathan Cape, 2005.',
u'lrf,txt'],
259: [u'My name is Red',
u'Orhan Pamuk; translated from the Turkish by Erda\u011f G\xf6knar',
u'New York : Alfred A. Knopf, 2001.',
u'lit,lrf'],
265: [u'Harbinger', u'David Mack', u'Star Trek', u'lit,lrf'],
267: [u'Summon the Thunder',
u'Dayton Ward,Kevin Dilmore',
u'Pocket Books',
u'lit,lrf'],
268: [u'Shalimar the Clown',
u'Salman Rushdie',
u'New York : Random House, 2005.',
u'lit,lrf'],
269: [u'Reap the Whirlwind', u'David Mack', u'Star Trek', u'lit,lrf'],
272: [u'Mistborn', u'Brandon Sanderson', u'Tor Fantasy', u'lrf,rar'],
273: [u'The Thousandfold Thought',
u'R. Scott Bakker',
u'Overlook TP',
u'lrf,rtf'],
276: [u'Elantris',
u'Brandon Sanderson',
u'New York : Tor, 2005.',
u'lrf,rar'],
291: [u'Sundiver',
u'David Brin',
u'New York : Bantam Books, 1995.',
u'lit,lrf'],
299: [u'Imperium', u'Robert Harris', u'Arrow', u'lrf,rar'],
300: [u'Startide Rising', u'David Brin', u'Bantam', u'htm,lrf'],
301: [u'The Uplift War', u'David Brin', u'Spectra', u'lit,lrf'],
304: [u'Brightness Reef', u'David Brin', u'Orbit', u'lrf,rar'],
305: [u"Infinity's Shore", u'David Brin', u'Spectra', u'txt'],
306: [u"Heaven's Reach", u'David Brin', u'Spectra', u'lrf,rar'],
325: [u"Foundation's Triumph", u'David Brin', u'Easton Press', u'lit,lrf'],
327: [u'I am Charlotte Simmons', u'Tom Wolfe', u'Vintage', u'htm,lrf'],
335: [u'The Currents of Space', u'Isaac Asimov', None, u'lit,lrf'],
340: [u'The Other Boleyn Girl',
u'Philippa Gregory',
u'Touchstone',
u'lit,lrf'],
341: [u"Old Man's War", u'John Scalzi', u'Tor', u'htm,lrf'],
342: [u'The Ghost Brigades',
u'John Scalzi',
u'Tor Science Fiction',
u'html,lrf'],
343: [u'The Last Colony', u'John S"calzi', u'Tor Books', u'html,lrf'],
344: [u'Gossip Girl', u'Cecily von Ziegesar', u'Warner Books', u'lrf,rtf'],
347: [u'Little Brother', u'Cory Doctorow', u'Tor Teen', u'lrf'],
348: [u'The Reality Dysfunction',
u'Peter F. Hamilton',
u'Pan MacMillan',
u'lit,lrf'],
353: [u'A Thousand Splendid Suns',
u'Khaled Hosseini',
u'Center Point Large Print',
u'lit,lrf'],
354: [u'Amsterdam', u'Ian McEwan', u'Anchor', u'lrf,txt'],
355: [u'The Neutronium Alchemist',
u'Peter F. Hamilton',
u'Aspect',
u'lit,lrf'],
356: [u'The Naked God', u'Peter F. Hamilton', u'Aspect', u'lit,lrf'],
421: [u'A Shadow in Summer', u'Daniel Abraham', u'Tor Fantasy', u'lrf,rar'],
427: [u'Lonesome Dove', u'Larry M\\cMurtry', None, u'lit,lrf'],
440: [u'Ghost', u'John Ringo', u'Baen', u'lit,lrf'],
441: [u'Kildar', u'John Ringo', u'Baen', u'lit,lrf'],
443: [u'Hidden Empire ', u'Kevin J. Anderson', u'Aspect', u'lrf,rar'],
444: [u'The Gun Seller',
u'Hugh Laurie',
u'Washington Square Press',
u'lrf,rar']
}
tests = {
'Dysfunction' : set([348]),
'title:Dysfunction' : set([348]),
'Title:Dysfunction' : set([348]),
'title:Dysfunction OR author:Laurie': set([348, 444]),
'(tag:txt or tag:pdf)': set([33, 258, 354, 305, 242, 51, 55, 56, 154]),
'(tag:txt OR tag:pdf) and author:Tolstoy': set([55, 56]),
'Tolstoy txt': set([55, 56]),
'Hamilton Amsterdam' : set([]),
u'Beär' : set([91]),
'dysfunc or tolstoy': set([348, 55, 56]),
'tag:txt AND NOT tolstoy': set([33, 258, 354, 305, 242, 154]),
'not tag:lrf' : set([305]),
'london:thames': set([13]),
'publisher:london:thames': set([13]),
'"(1977)"': set([13]),
'jack weatherford orc': set([30]),
'S\\"calzi': {343},
'author:S\\"calzi': {343},
'"S\\"calzi"': {343},
'M\\\\cMurtry': {427},
}
fields = {'title':0, 'author':1, 'publisher':2, 'tag':3}
_universal_set = set(texts.keys())
def universal_set(self):
return self._universal_set
def get_matches(self, location, query, candidates=None):
location = location.lower()
if location in self.fields.keys():
getter = operator.itemgetter(self.fields[location])
elif location == 'all':
getter = lambda y: ''.join(x if x else '' for x in y)
else:
getter = lambda x: ''
if not query:
return set([])
query = query.lower()
if candidates:
return set(key for key, val in self.texts.items()
if key in candidates and query and query
in getattr(getter(val), 'lower', lambda : '')())
else:
return set(key for key, val in self.texts.items()
if query and query in getattr(getter(val), 'lower', lambda : '')())
def run_tests(self):
failed = []
for query in self.tests.keys():
prints('Testing query:', query, end=' ')
res = self.parse(query)
if res != self.tests[query]:
print 'FAILED', 'Expected:', self.tests[query], 'Got:', res
failed.append(query)
else:
print 'OK'
return failed
def main(args=sys.argv):
print 'testing unoptimized'
tester = Tester(['authors', 'author', 'series', 'formats', 'format',
'publisher', 'rating', 'tags', 'tag', 'comments', 'comment', 'cover',
'isbn', 'ondevice', 'pubdate', 'size', 'date', 'title', u'#read',
'all', 'search'], test=True)
failed = tester.run_tests()
if tester._tests_failed or failed:
print '>>>>>>>>>>>>>> Tests Failed <<<<<<<<<<<<<<<'
return 1
print '\n\ntesting optimized'
tester = Tester(['authors', 'author', 'series', 'formats', 'format',
'publisher', 'rating', 'tags', 'tag', 'comments', 'comment', 'cover',
'isbn', 'ondevice', 'pubdate', 'size', 'date', 'title', u'#read',
'all', 'search'], test=True, optimize=True)
failed = tester.run_tests()
if tester._tests_failed or failed:
print '>>>>>>>>>>>>>> Tests Failed <<<<<<<<<<<<<<<'
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
# }}}
|
Juraci/tempest
|
refs/heads/master
|
tempest/api/compute/images/test_list_images.py
|
17
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest import config
from tempest import test
CONF = config.CONF
class ListImagesTestJSON(base.BaseV2ComputeTest):
@classmethod
def skip_checks(cls):
super(ListImagesTestJSON, cls).skip_checks()
if not CONF.service_available.glance:
skip_msg = ("%s skipped as glance is not available" % cls.__name__)
raise cls.skipException(skip_msg)
@classmethod
def setup_clients(cls):
super(ListImagesTestJSON, cls).setup_clients()
cls.client = cls.images_client
@test.idempotent_id('490d0898-e12a-463f-aef0-c50156b9f789')
def test_get_image(self):
# Returns the correct details for a single image
image = self.client.show_image(self.image_ref)
self.assertEqual(self.image_ref, image['id'])
@test.idempotent_id('fd51b7f4-d4a3-4331-9885-866658112a6f')
def test_list_images(self):
# The list of all images should contain the image
images = self.client.list_images()
found = any([i for i in images if i['id'] == self.image_ref])
self.assertTrue(found)
@test.idempotent_id('9f94cb6b-7f10-48c5-b911-a0b84d7d4cd6')
def test_list_images_with_detail(self):
# Detailed list of all images should contain the expected images
images = self.client.list_images(detail=True)
found = any([i for i in images if i['id'] == self.image_ref])
self.assertTrue(found)
|
shawnbrown/gpn
|
refs/heads/master
|
gpn/tests/test_node.py
|
1
|
# -*- coding: utf-8 -*-
import os
import sqlite3
import sys
try:
from StringIO import StringIO
except ImportError:
from io import StringIO # New stdlib location in 3.0
from gpn.tests import _unittest as unittest
from gpn.tests.common import MkdtempTestCase
from gpn.node import Node
from gpn.connector import _schema
from gpn.connector import _SharedConnection
from gpn import IN_MEMORY
from gpn import TEMP_FILE
from gpn import READ_ONLY
class TestInstantiation(MkdtempTestCase):
def _make_node(self, filename):
global _schema
self._existing_node = filename
connection = sqlite3.connect(self._existing_node)
cursor = connection.cursor()
cursor.execute('PRAGMA synchronous=OFF')
for operation in _schema:
cursor.execute(operation)
cursor.execute('PRAGMA synchronous=FULL')
connection.close()
def test_existing_node(self):
"""Existing node should load without errors."""
filename = 'temp_node.node'
self._make_node(filename)
ptn = Node(self._existing_node) # Use existing file.
self.assertEqual(ptn.name, 'temp_node')
@unittest.skipIf(sqlite3.sqlite_version_info < (3, 8, 0),
'The query_only PRAGMA was added to SQLite in version 3.8.0')
def test_read_only_node(self):
"""The READ_ONLY flag should open a Node in read-only mode."""
self._make_node('existing_node')
ptn = Node(self._existing_node, mode=READ_ONLY)
connection = ptn._connect()
cursor = connection.cursor()
regex = 'attempt to write a readonly database'
with self.assertRaisesRegex((sqlite3.OperationalError,
sqlite3.IntegrityError), regex):
cursor.execute('INSERT INTO cell DEFAULT VALUES')
def test_new_node(self):
"""Named nodes that do not exist should be created."""
filepath = 'new_node.node'
self.assertFalse(os.path.exists(filepath))
ptn = Node(filepath) # Create new file.
del ptn
self.assertTrue(os.path.exists(filepath))
def test_subdirectory(self):
"""Subdirectory reference should also be supported."""
os.mkdir('subdir')
filepath = 'subdir/new_node.node'
self.assertFalse(os.path.exists(filepath))
ptn = Node(filepath) # Create new file.
self.assertEqual(ptn.name, 'subdir/new_node')
del ptn
self.assertTrue(os.path.exists(filepath))
def test_path_name_error(self):
"""If a path is specified, it should be used to set the node name.
If a `name` attribute is also provided, it must not be accepted.
"""
regex = 'Cannot specify both path and name.'
with self.assertRaisesRegex(AssertionError, regex):
Node('some_path.node', name='some_name')
def test_temporary_node(self):
"""Unnamed nodes should be temporary (in memory or tempfile)."""
# In memory.
ptn = Node()
self.assertFalse(ptn._connect._init_as_temp)
self.assertIsInstance(ptn._connect._dbsrc, _SharedConnection)
self.assertIsNone(ptn.name)
# On disk.
ptn = Node(mode=TEMP_FILE)
self.assertTrue(ptn._connect._init_as_temp)
self.assertTrue(os.path.isfile(ptn._connect._dbsrc))
self.assertIsNone(ptn.name)
def test_named_temporary_nodes(self):
# In memory.
node_name = 'temp_with_name'
ptn = Node(name=node_name)
self.assertFalse(ptn._connect._init_as_temp)
self.assertIsInstance(ptn._connect._dbsrc, _SharedConnection)
self.assertEqual(ptn.name, node_name)
# On disk.
ptn = Node(name=node_name, mode=TEMP_FILE)
self.assertTrue(ptn._connect._init_as_temp)
self.assertTrue(os.path.isfile(ptn._connect._dbsrc))
self.assertEqual(ptn.name, node_name)
class TestHash(unittest.TestCase):
def test_get_hash(self):
node = Node(mode=IN_MEMORY)
connection = node._connect()
cursor = connection.cursor()
# Hash of empty node should be None.
result = node._get_hash(cursor)
self.assertIsNone(result)
# Build node.
cursor.execute("INSERT INTO hierarchy VALUES (1, 'state', 0)")
cursor.execute("INSERT INTO hierarchy VALUES (2, 'county', 1)")
cursor.execute("INSERT INTO cell VALUES (1, 0)")
cursor.execute("INSERT INTO label VALUES (1, 1, 'Indiana')")
cursor.execute("INSERT INTO label VALUES (2, 2, 'LaPorte')")
cursor.execute("INSERT INTO cell_label VALUES (1, 1, 1, 1)")
cursor.execute("INSERT INTO cell_label VALUES (2, 1, 2, 2)")
# Expected hash of "11Indiana12LaPorte" (independently verified).
expected = 'a0eadc7b0547b9405dae9e3c50e038a550d9a718af10b53e567995a9378c22d7'
result = node._get_hash(cursor)
self.assertEqual(expected, result)
class TestTransactionHandling(unittest.TestCase):
def setUp(self):
self._node = Node(mode=IN_MEMORY)
connection = self._node._connect()
cursor = connection.cursor()
cursor.executescript("""
INSERT INTO hierarchy VALUES (1, 'country', 0);
INSERT INTO hierarchy VALUES (2, 'region', 1);
INSERT INTO cell VALUES (1, 0);
INSERT INTO label VALUES (1, 1, 'USA');
INSERT INTO label VALUES (2, 2, 'Northeast');
INSERT INTO cell_label VALUES (1, 1, 1, 1);
INSERT INTO cell_label VALUES (2, 1, 2, 2);
INSERT INTO cell VALUES (2, 0);
INSERT INTO label VALUES (3, 2, 'Midwest');
INSERT INTO cell_label VALUES (3, 2, 1, 1);
INSERT INTO cell_label VALUES (4, 2, 2, 3);
""")
def test_commit(self):
with self._node._connect() as connection:
connection.isolation_level = None
cursor = connection.cursor()
cursor.execute('BEGIN TRANSACTION')
cursor.execute('INSERT INTO cell VALUES (3, 0)') # <- Change.
cursor.execute('SELECT COUNT(*) FROM cell')
msg = 'Changes should be committed.'
self.assertEqual([(3,)], cursor.fetchall(), msg)
def test_rollback(self):
try:
with self._node._connect() as connection:
connection.isolation_level = None # <- REQUIRED!
cursor = connection.cursor() # <- REQUIRED!
cursor.execute('BEGIN TRANSACTION') # <- REQUIRED!
cursor.execute('DROP TABLE cell_label') # <- Change.
cursor.execute('INSERT INTO cell VALUES (3, 0)') # <- Change.
cursor.execute('This is not valid SQL -- operational error!') # <- Error!
except sqlite3.OperationalError:
pass
connection = self._node._connect()
cursor = connection.cursor()
msg = 'Changes should be rolled back.'
cursor.execute('SELECT COUNT(*) FROM cell')
self.assertEqual([(2,)], cursor.fetchall(), msg)
cursor.execute('SELECT COUNT(*) FROM cell_label')
self.assertEqual([(4,)], cursor.fetchall(), msg)
class TestInsert(unittest.TestCase):
def test_insert_one_cell(self):
node = Node(mode=IN_MEMORY)
connection = node._connect()
cursor = connection.cursor()
cursor.execute("INSERT INTO hierarchy VALUES (1, 'state', 0)")
cursor.execute("INSERT INTO hierarchy VALUES (2, 'county', 1)")
cursor.execute("INSERT INTO hierarchy VALUES (3, 'town', 2)")
items = [('state', 'OH'), ('county', 'Franklin'), ('town', 'Columbus')]
node._insert_one_cell(cursor, items) # <- Inserting here!
# Cell table.
cursor.execute('SELECT * FROM cell ORDER BY cell_id')
expected = [(1, 0)]
self.assertEqual(expected, cursor.fetchall())
# Label table.
cursor.execute('SELECT * FROM label ORDER BY label_id')
expected = [(1, 1, 'OH'),
(2, 2, 'Franklin'),
(3, 3, 'Columbus')]
self.assertEqual(expected, cursor.fetchall())
# Cell_label table,
expected = [(1, 1, 1, 1), (2, 1, 2, 2), (3, 1, 3, 3)]
cursor.execute('SELECT * FROM cell_label ORDER BY cell_label_id')
self.assertEqual(expected, cursor.fetchall())
def test_insert_cells(self):
self.maxDiff = None
fh = StringIO('state,county,town\n'
'OH,Allen,Lima\n'
'OH,Cuyahoga,Cleveland\n'
'OH,Franklin,Columbus\n'
'OH,Hamilton,Cincinnati\n'
'OH,Montgomery,Dayton\n')
node = Node(mode=IN_MEMORY)
node._insert_cells(fh) # <- Inserting here!
connection = node._connect()
cursor = connection.cursor()
# Hierarchy table.
cursor.execute('SELECT * FROM hierarchy ORDER BY hierarchy_level')
expected = [(1, 'state', 0), (2, 'county', 1), (3, 'town', 2)]
self.assertEqual(expected, cursor.fetchall())
# Cell table.
cursor.execute('SELECT * FROM cell ORDER BY cell_id')
expected = [(1, 0), (2, 0), (3, 0), (4, 0), (5, 0), (6, 0)]
self.assertEqual(expected, cursor.fetchall())
# Label table.
cursor.execute('SELECT * FROM label ORDER BY label_id')
expected = [(1, 1, 'OH'), (2, 2, 'Allen'),
(3, 3, 'Lima'), (4, 2, 'Cuyahoga'),
(5, 3, 'Cleveland'), (6, 2, 'Franklin'),
(7, 3, 'Columbus'), (8, 2, 'Hamilton'),
(9, 3, 'Cincinnati'), (10, 2, 'Montgomery'),
(11, 3, 'Dayton'), (12, 1, 'UNMAPPED'),
(13, 2, 'UNMAPPED'), (14, 3, 'UNMAPPED')]
self.assertEqual(expected, cursor.fetchall())
# Cell_label table,
cursor.execute('SELECT * FROM cell_label ORDER BY cell_label_id')
expected = [(1, 1, 1, 1), (2, 1, 2, 2), (3, 1, 3, 3),
(4, 2, 1, 1), (5, 2, 2, 4), (6, 2, 3, 5),
(7, 3, 1, 1), (8, 3, 2, 6), (9, 3, 3, 7),
(10, 4, 1, 1), (11, 4, 2, 8), (12, 4, 3, 9),
(13, 5, 1, 1), (14, 5, 2, 10), (15, 5, 3, 11),
(16, 6, 1, 12), (17, 6, 2, 13), (18, 6, 3, 14)]
self.assertEqual(expected, cursor.fetchall())
# Node table (hash should be set).
cursor.execute('SELECT node_id, node_hash FROM node')
hashval = '71eeab7a5b4609a1978bd5c19e7d490556c5e42c503b39480c504bbaf99efe30'
self.assertEqual([(1, hashval)], cursor.fetchall())
def test_insert_cells_multiple_files(self):
"""Insert should accept multiple files."""
node = Node(mode=IN_MEMORY)
fh = StringIO('state,county,town\n'
'OH,Allen,Lima\n')
node._insert_cells(fh) # <- Inserting.
fh = StringIO('state,county,town\n'
'OH,Cuyahoga,Cleveland\n')
node._insert_cells(fh) # <- Inserting second file.
connection = node._connect()
cursor = connection.cursor()
# Hierarchy table.
cursor.execute('SELECT * FROM hierarchy ORDER BY hierarchy_level')
expected = [(1, 'state', 0), (2, 'county', 1), (3, 'town', 2)]
self.assertEqual(expected, cursor.fetchall())
# Cell table.
cursor.execute('SELECT * FROM cell ORDER BY cell_id')
expected = [(1, 0), (2, 0), (3, 0)]
self.assertEqual(expected, cursor.fetchall())
# Label table.
cursor.execute('SELECT * FROM label ORDER BY label_id')
expected = [(1, 1, 'OH'), (2, 2, 'Allen'),
(3, 3, 'Lima'), (4, 1, 'UNMAPPED'),
(5, 2, 'UNMAPPED'), (6, 3, 'UNMAPPED'),
(7, 2, 'Cuyahoga'), (8, 3, 'Cleveland')]
self.assertEqual(expected, cursor.fetchall())
# Node table should have two hashes.
cursor.execute('SELECT node_id, node_hash FROM node')
expected = [(1, '5011d6c33da25f6a98422461595f275f'
'289a7a745a9e89ab6b4d36675efd944b'),
(2, '9184abbd5461828e01fe82209463221a'
'65d4c21b40287d633cf7e324a27475f5')]
self.assertEqual(expected, cursor.fetchall())
def test_insert_cells_bad_header(self):
"""Files must have the same header"""
node = Node(mode=IN_MEMORY)
fh = StringIO('state,county,town\n'
'OH,Hamilton,Cincinnati\n')
node._insert_cells(fh)
regex = 'Fieldnames must match hierarchy values.'
with self.assertRaisesRegex(AssertionError, regex):
fh = StringIO('state,county\n'
'OH,Montgomery\n')
node._insert_cells(fh)
def test_insert_cells_duplicate(self):
"""Duplicate rows should fail and rollback to previous state."""
fh = StringIO('state,county,town\n'
'OH,Cuyahoga,Cleveland\n')
node = Node(mode=IN_MEMORY)
node._insert_cells(fh) # <- First insert!
regex = 'duplicate label set'
with self.assertRaisesRegex(sqlite3.IntegrityError, regex):
fh = StringIO('state,county,town\n'
'OH,Franklin,Columbus\n'
'OH,Hamilton,Cincinnati\n'
'OH,Hamilton,Cincinnati\n')
node._insert_cells(fh) # <- Second insert!
connection = node._connect()
cursor = connection.cursor()
# Cell table should include only values from first insert.
cursor.execute('SELECT * FROM cell ORDER BY cell_id')
expected = [(1, 0), (2, 0)]
self.assertEqual(expected, cursor.fetchall())
# Label table should include only values from first insert.
cursor.execute('SELECT * FROM label ORDER BY label_id')
expected = [(1, 1, 'OH'), (2, 2, 'Cuyahoga'), (3, 3, 'Cleveland'),
(4, 1, 'UNMAPPED'), (5, 2, 'UNMAPPED'), (6, 3, 'UNMAPPED')]
self.assertEqual(expected, cursor.fetchall())
def test_unmapped_levels(self):
"""Unmapped cells must have valid hierarchy levels."""
fh = StringIO('state,county,town\n'
'OH,Cuyahoga,Cleveland\n')
node = Node(mode=IN_MEMORY)
node._insert_cells(fh) # <- First insert!
regex = 'invalid unmapped level'
with self.assertRaisesRegex(sqlite3.IntegrityError, regex):
fh = StringIO('state,county,town\n'
'OH,Franklin,Columbus\n'
'OH,UNMAPPED,Cincinnati\n')
node._insert_cells(fh) # <- Second insert!
connection = node._connect()
cursor = connection.cursor()
# Cell table should include only values from first insert.
cursor.execute('SELECT * FROM cell ORDER BY cell_id')
expected = [(1, 0), (2, 0)]
self.assertEqual(expected, cursor.fetchall())
# Label table should include only values from first insert.
cursor.execute('SELECT * FROM label ORDER BY label_id')
expected = [(1, 1, 'OH'), (2, 2, 'Cuyahoga'), (3, 3, 'Cleveland'),
(4, 1, 'UNMAPPED'), (5, 2, 'UNMAPPED'), (6, 3, 'UNMAPPED')]
self.assertEqual(expected, cursor.fetchall())
class TestSelect(unittest.TestCase):
def setUp(self):
fh = StringIO('country,region,state,city\n' # cell_ids
'USA,Midwest,IL,Chicago\n' # 1
'USA,Northeast,NY,New York\n' # 2
'USA,Northeast,PA,Philadelphia\n' # 3
'USA,South,TX,Dallas\n' # 4
'USA,South,TX,Houston\n' # 5
'USA,South,TX,San Antonio\n' # 6
'USA,West,AZ,Phoenix\n' # 7
'USA,West,CA,Los Angeles\n' # 8
'USA,West,CA,San Diego\n' # 9
'USA,West,CA,San Jose\n') # 10
self.node = Node(mode=IN_MEMORY)
self.node._insert_cells(fh)
def test_select_cell_id(self):
""" """
connection = self.node._connect()
cursor = connection.cursor()
result = self.node._select_cell_id(cursor, region='Northeast')
self.assertEqual([2, 3], list(result))
result = self.node._select_cell_id(cursor, region='West', state='CA')
self.assertEqual([8, 9, 10], list(result))
kwds = {'region': 'West', 'state': 'CA'}
result = self.node._select_cell_id(cursor, **kwds)
self.assertEqual([8, 9, 10], list(result))
result = self.node._select_cell_id(cursor, state='XX')
self.assertEqual([], list(result))
#result = node._select_cell_id()
#self.assertEqual([], list(result))
def test_select_cell(self):
result = self.node.select_cell(region='West', state='CA')
expected = [
{'country': 'USA', 'region': 'West', 'state': 'CA', 'city': 'Los Angeles'},
{'country': 'USA', 'region': 'West', 'state': 'CA', 'city': 'San Diego'},
{'country': 'USA', 'region': 'West', 'state': 'CA', 'city': 'San Jose'},
]
self.assertEqual(expected, list(result))
class TestFileImportExport(MkdtempTestCase):
def setUp(self):
super(self.__class__, self).setUp()
fh = StringIO('country,region,state,city\n'
'USA,Midwest,IL,Chicago\n'
'USA,Northeast,NY,New York\n'
'USA,Northeast,PA,Philadelphia\n')
node = Node(mode=IN_MEMORY)
node._insert_cells(fh)
self.node = node
def test_export(self):
filename = 'tempexport.csv'
self.node.export_cells(filename)
with open(filename) as fh:
file_contents = fh.read()
expected_contents = ('cell_id,country,region,state,city\n'
'1,USA,Midwest,IL,Chicago\n'
'2,USA,Northeast,NY,New York\n'
'3,USA,Northeast,PA,Philadelphia\n'
'4,UNMAPPED,UNMAPPED,UNMAPPED,UNMAPPED\n')
self.assertEqual(expected_contents, file_contents)
def test_already_exists(self):
filename = 'tempexport.csv'
with open(filename, 'w') as fh:
fh.write('foo\n1\n2\n3')
regex = filename + ' already exists'
with self.assertRaisesRegex(AssertionError, regex):
self.node.export_cells(filename)
class TestRepr(unittest.TestCase):
def test_empty(self):
node = Node()
expected = ("<class 'gpn.node.Node'>\n"
"Name: None\n"
"Cells: None\n"
"Hierarchy: None\n"
"Edges: None")
self.assertEqual(expected, repr(node))
def test_basic(self):
fh = StringIO('country,region,state,city\n'
'USA,Midwest,IL,Chicago\n'
'USA,Northeast,NY,New York\n'
'USA,Northeast,PA,Philadelphia\n')
node = Node(mode=IN_MEMORY, name='newptn')
node._insert_cells(fh)
expected = ("<class 'gpn.node.Node'>\n"
"Name: newptn\n"
"Cells: 4\n"
"Hierarchy: country (USA), region, state, city\n"
"Edges: None")
self.assertEqual(expected, repr(node))
if __name__ == '__main__':
unittest.main()
|
grengojbo/django-livesettings
|
refs/heads/master
|
tests/urls.py
|
2
|
from django.conf.urls.defaults import patterns, include
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
(r'^admin/settings/$', include('livesettings.urls')),
)
|
aabbox/kbengine
|
refs/heads/master
|
kbe/res/scripts/common/Lib/test/test_crypt.py
|
91
|
from test import support
import unittest
crypt = support.import_module('crypt')
class CryptTestCase(unittest.TestCase):
def test_crypt(self):
c = crypt.crypt('mypassword', 'ab')
if support.verbose:
print('Test encryption: ', c)
def test_salt(self):
self.assertEqual(len(crypt._saltchars), 64)
for method in crypt.methods:
salt = crypt.mksalt(method)
self.assertEqual(len(salt),
method.salt_chars + (3 if method.ident else 0))
def test_saltedcrypt(self):
for method in crypt.methods:
pw = crypt.crypt('assword', method)
self.assertEqual(len(pw), method.total_size)
pw = crypt.crypt('assword', crypt.mksalt(method))
self.assertEqual(len(pw), method.total_size)
def test_methods(self):
# Gurantee that METHOD_CRYPT is the last method in crypt.methods.
self.assertTrue(len(crypt.methods) >= 1)
self.assertEqual(crypt.METHOD_CRYPT, crypt.methods[-1])
if __name__ == "__main__":
unittest.main()
|
paulsheridan/data-structures
|
refs/heads/master
|
src/test_graphtraverse.py
|
1
|
import pytest
GRAPH_LIST = [[{1: {2: 3}, 2: {}}, 1, 2, True],
[{1: {2: 3}, 2: {3: 9}, 3: {1: 9}}, 1, 9, False],
[{1: {2: 2, 3: 2}, 2: {}, 3: {}}, 1, 3, True],
[{1: {2: 2, 3: 2}, 2: {4: 6, 5: 6, 6: 6},
3: {7: 2, 8: 2}, 4: {}, 5: {}, 6: {}, 7:
{6: 1}, 8: {}, 99: {}}, 1, 99, False],
[{1: {2: 1, 3: 5}, 2: {4: 3, 5: 3, 6: 3}, 3: {7: 2, 8: 2},
4: {}, 5: {}, 6: {}, 7: {6: 22}, 8: {}}, 1, 8, True]]
@pytest.mark.parametrize('graph, start, finish, bool',
GRAPH_LIST)
def test_depth_traversal_param(graph, start, finish, bool):
from simple_graph import Graph
from graphtraverse import graph_path
new_graph = Graph()
new_graph.node_map = graph
assert graph_path(new_graph, start, finish) == bool
@pytest.mark.parametrize('graph, start, finish, bool',
GRAPH_LIST)
def test_depth_based_path(graph, start, finish, bool):
from simple_graph import Graph
from graphtraverse import depth_based_path
new_graph = Graph()
new_graph.node_map = graph
assert depth_based_path(new_graph, start, finish) == bool
|
bradwoo8621/Swift-Study
|
refs/heads/master
|
Instagram/Pods/AVOSCloudCrashReporting/Breakpad/src/tools/gyp/test/win/gyptest-cl-warning-as-error.py
|
344
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure warning-as-error is extracted properly.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'compiler-flags'
test.run_gyp('warning-as-error.gyp', chdir=CHDIR)
# The source file contains a warning, so if WarnAsError is false (or
# default, which is also false), then the build should succeed, otherwise it
# must fail.
test.build('warning-as-error.gyp', 'test_warn_as_error_false', chdir=CHDIR)
test.build('warning-as-error.gyp', 'test_warn_as_error_unset', chdir=CHDIR)
test.build('warning-as-error.gyp', 'test_warn_as_error_true', chdir=CHDIR,
status=1)
test.pass_test()
|
acshi/osf.io
|
refs/heads/develop
|
website/project/taxonomies/__init__.py
|
4
|
from modularodm import fields
from modularodm.exceptions import ValidationValueError
from framework.mongo import (
ObjectId,
StoredObject,
utils as mongo_utils
)
from website.util import api_v2_url
@mongo_utils.unique_on(['text'])
class Subject(StoredObject):
_id = fields.StringField(primary=True, default=lambda: str(ObjectId()))
text = fields.StringField(required=True)
parents = fields.ForeignField('subject', list=True)
children = fields.ForeignField('subject', list=True)
@property
def absolute_api_v2_url(self):
return api_v2_url('taxonomies/{}/'.format(self._id))
@property
def child_count(self):
return len(self.children)
def get_absolute_url(self):
return self.absolute_api_v2_url
@property
def hierarchy(self):
if self.parents:
return self.parents[0].hierarchy + [self._id]
return [self._id]
def validate_subject_hierarchy(subject_hierarchy):
validated_hierarchy, raw_hierarchy = [], set(subject_hierarchy)
for subject_id in subject_hierarchy:
subject = Subject.load(subject_id)
if not subject:
raise ValidationValueError('Subject with id <{}> could not be found.'.format(subject_id))
if subject.parents.exists():
continue
raw_hierarchy.remove(subject_id)
validated_hierarchy.append(subject._id)
while raw_hierarchy:
if not set(subject.children.values_list('_id', flat=True)) & raw_hierarchy:
raise ValidationValueError('Invalid subject hierarchy: {}'.format(subject_hierarchy))
else:
for child in subject.children.filter(_id__in=raw_hierarchy):
subject = child
validated_hierarchy.append(child._id)
raw_hierarchy.remove(child._id)
break
if set(validated_hierarchy) == set(subject_hierarchy):
return
else:
raise ValidationValueError('Invalid subject hierarchy: {}'.format(subject_hierarchy))
raise ValidationValueError('Unable to find root subject in {}'.format(subject_hierarchy))
|
masayuko/beaker
|
refs/heads/rework2
|
tests/test_domain_setting.py
|
1
|
import re
import os
from beaker.middleware import SessionMiddleware
from nose import SkipTest
try:
from webtest import TestApp
except ImportError:
raise SkipTest("webtest not installed")
def teardown():
import shutil
shutil.rmtree('./cache', True)
def simple_app(environ, start_response):
session = environ['beaker.session']
domain = environ.get('domain')
if domain:
session.domain = domain
if not session.has_key('value'):
session['value'] = 0
session['value'] += 1
if not environ['PATH_INFO'].startswith('/nosave'):
session.save()
start_response('200 OK', [('Content-type', 'text/plain; charset=utf-8')])
msg = 'The current value is: %s, session id is %s' % (session.get('value', 0),
session.id)
return [msg.encode('utf-8')]
def test_same_domain():
options = {'session.data_dir':'./cache',
'session.secret':'blah',
'session.cookie_domain': '.hoop.com'}
app = TestApp(SessionMiddleware(simple_app, **options))
res = app.get('/', extra_environ=dict(HTTP_HOST='subdomain.hoop.com'))
assert 'current value is: 1' in res
assert 'Domain=.hoop.com' in res.headers['Set-Cookie']
res = app.get('/', extra_environ=dict(HTTP_HOST='another.hoop.com'))
assert 'current value is: 2' in res
assert [] == res.headers.getall('Set-Cookie')
res = app.get('/', extra_environ=dict(HTTP_HOST='more.subdomain.hoop.com'))
assert 'current value is: 3' in res
def test_different_domain():
options = {'session.data_dir':'./cache',
'session.secret':'blah'}
app = TestApp(SessionMiddleware(simple_app, **options))
res = app.get('/', extra_environ=dict(domain='.hoop.com',
HTTP_HOST='www.hoop.com'))
res = app.get('/', extra_environ=dict(domain='.hoop.co.uk',
HTTP_HOST='www.hoop.com'))
assert 'Domain=.hoop.co.uk' in res.headers['Set-Cookie']
assert 'current value is: 2' in res
res = app.get('/', extra_environ=dict(domain='.hoop.co.uk',
HTTP_HOST='www.test.com'))
assert 'current value is: 1' in res
if __name__ == '__main__':
from paste import httpserver
wsgi_app = SessionMiddleware(simple_app, {})
httpserver.serve(wsgi_app, host='127.0.0.1', port=8080)
|
mpetyx/pychatbot
|
refs/heads/master
|
AIML/howie-src-0.6.0/scripts/acronym.py
|
2
|
#!/usr/bin/python
"""
Acronym Decoder v1.0
usage: acronym.py [-max N] [-f] A.C.R.O.N.Y.M.
Prints the known expansions of the specified acronym. If the optional -max
parameter is specified, at most N matches will be output. If -f is passed,
the output is printed in "friendly" mode; otherwise, the matches will be output
one per line.
"""
def usage(): print __doc__
import getopt
import os
import re
import string
import sys
import urllib
### Compiled regular expressions
# Removes all unwanted characters from an acronym.
acroFormatRE = re.compile(r'[^A-Z\-]')
# matches the line BEFORE a definition
firstLineRE = re.compile(r'<td valign="middle" width="15%".*><b>(?P<acro>[A-Z\-]+)</b></td>')
# matches the definition of an acronym, excluding any paranthetical elaborations
secondLineRE = re.compile(r'<td valign="middle" width="75%".*>(<b>)?(?P<def>[^<(]+)(\([^<]+\))?(</b>)?$')
# matches the string indicating that no definitions were found
failureRE = re.compile(r'Sorry, <b>[A-Z\-]+</b> was not found in the database')
def formatAcro(acro):
return acroFormatRE.sub("", acro.upper())
def parse(f, acro, max=-1):
defOnNextLine = False
linenum = 0
found = []
for line in f.readlines():
# If we've found the maximum number of matches,
# stop now.
if max >= 0 and len(found) >= max:
break
# if we haven't found anything yet, check for failure
if len(found) == 0 and not defOnNextLine:
match = failureRE.search(line)
if match is not None:
break
# check this line against the appropriate RE.
# If the first line has already matched, look for an actual
# definition on the second line.
line = line.strip()
linenum += 1
if defOnNextLine:
defOnNextLine = False
match = secondLineRE.search(line)
if match is None:
# This is bad; there should be a definition here...
print "WARNING: did not find expected definition on line", linenum
continue
# add this def to the found list.
found.append( match.group("def").strip() )
else:
match = firstLineRE.search(line)
if match is not None and match.group("acro") == acro:
defOnNextLine = True
return found
if __name__ == "__main__":
# process command-line args
try:
opts, args = getopt.getopt(sys.argv[1:], "m:f", ["max"])
except getopt.GetoptError:
usage()
sys.exit(2)
maxMatches = -1
friendlyMode = False
for o,a in opts:
if o in ["-m", "-max"]:
maxMatches = int(a)
elif o == "-f":
friendlyMode = True
if len(args) != 1:
usage()
sys.exit(2)
# format the acronym to search for
acro = formatAcro(args[0])
# Submit the query and open a file handle to the results page.
class AppURLopener(urllib.FancyURLopener):
def __init__(self, *args):
self.version = "Mozilla/4.0"
urllib.FancyURLopener.__init__(self, *args)
urllib._urlopener = AppURLopener()
f = urllib.urlopen("http://www.acro" + "nymfind"+"er.com/af-q"+"uery.asp?Str"+"ing=exact&A"+"cronym=%s" % acro)
# Parse the results page to find a list of definitions. The list
# will be empty if none were found.
defs = parse(f, acro, maxMatches)
# Print the definitions.
if len(defs) == 0:
if friendlyMode: print "I don't know what %s stands for." % acro
else:
if friendlyMode:
print acro, "stands for:",
for d in defs[:-1]:
print d + ",",
print defs[-1] + "."
else:
for d in defs: print d
|
GamecoinFuture/Gamecoin
|
refs/heads/master
|
share/qt/extract_strings_qt.py
|
2945
|
#!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
import operator
OUT_CPP="src/qt/bitcoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = glob.glob('src/*.cpp') + glob.glob('src/*.h')
# xgettext -n --keyword=_ $FILES
child = Popen(['xgettext','--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *bitcoin_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("bitcoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};')
f.close()
|
SimonBiggs/poc-brachyoptimisation
|
refs/heads/master
|
utilities.py
|
1
|
# Copyright (C) 2015 Simon Biggs
# This program is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see
# http://www.gnu.org/licenses/.
import numpy as np
from scipy.optimize import basinhopping
from matplotlib.colors import LinearSegmentedColormap
class BasinhoppingWrapper(object):
def __init__(self, n=5,
optimiser_confidence=0.00001,
basinhopping_confidence=0.00001,
debug=None, bounds=None, **kwargs):
self.to_minimise = kwargs['to_minimise']
self.n = n
self.optimiser_confidence = optimiser_confidence
self.basinhopping_confidence = basinhopping_confidence
self.results_store = np.array([])
self.initial = kwargs['initial']
self.step_noise = kwargs['step_noise']
self.bounds = bounds
self.debug = debug
if len(self.initial) != len(self.step_noise):
raise Exception(
"Step noise and initial conditions must be equal length."
)
self.result = self.run_basinhopping()
def step_function(self, optimiser_input):
for i, noise in enumerate(self.step_noise):
optimiser_input[i] += np.random.normal(scale=noise)
return optimiser_input
def callback_function(self,
optimiser_output,
minimise_function_result,
was_accepted):
if not(was_accepted):
return
if type(self.debug) is not None:
self.debug(optimiser_output)
if minimise_function_result is not np.nan:
self.results_store = np.append(
self.results_store, minimise_function_result)
if len(self.results_store) < self.n:
return
sorted_results = np.sort(self.results_store)
test = (
sorted_results[self.n-1] <
sorted_results[0] + self.basinhopping_confidence)
# print(sorted_results)
if test:
return True
def run_basinhopping(self):
self.successful_results = np.empty(self.n)
self.successful_results[:] = np.nan
self.current_success_number = 0
minimizer_config = {
"method": 'L-BFGS-B',
"options": {'gtol': self.optimiser_confidence},
"bounds": self.bounds
}
output = basinhopping(
self.to_minimise,
self.initial,
niter=1000,
minimizer_kwargs=minimizer_config,
take_step=self.step_function,
callback=self.callback_function
)
return output.x
def create_green_cm():
cm_data = [
[ 0.26700401, 0.00487433, 0.32941519],
[ 0.26851048, 0.00960483, 0.33542652],
[ 0.26994384, 0.01462494, 0.34137895],
[ 0.27130489, 0.01994186, 0.34726862],
[ 0.27259384, 0.02556309, 0.35309303],
[ 0.27380934, 0.03149748, 0.35885256],
[ 0.27495242, 0.03775181, 0.36454323],
[ 0.27602238, 0.04416723, 0.37016418],
[ 0.2770184 , 0.05034437, 0.37571452],
[ 0.27794143, 0.05632444, 0.38119074],
[ 0.27879067, 0.06214536, 0.38659204],
[ 0.2795655 , 0.06783587, 0.39191723],
[ 0.28026658, 0.07341724, 0.39716349],
[ 0.28089358, 0.07890703, 0.40232944],
[ 0.28144581, 0.0843197 , 0.40741404],
[ 0.28192358, 0.08966622, 0.41241521],
[ 0.28232739, 0.09495545, 0.41733086],
[ 0.28265633, 0.10019576, 0.42216032],
[ 0.28291049, 0.10539345, 0.42690202],
[ 0.28309095, 0.11055307, 0.43155375],
[ 0.28319704, 0.11567966, 0.43611482],
[ 0.28322882, 0.12077701, 0.44058404],
[ 0.28318684, 0.12584799, 0.44496 ],
[ 0.283072 , 0.13089477, 0.44924127],
[ 0.28288389, 0.13592005, 0.45342734],
[ 0.28262297, 0.14092556, 0.45751726],
[ 0.28229037, 0.14591233, 0.46150995],
[ 0.28188676, 0.15088147, 0.46540474],
[ 0.28141228, 0.15583425, 0.46920128],
[ 0.28086773, 0.16077132, 0.47289909],
[ 0.28025468, 0.16569272, 0.47649762],
[ 0.27957399, 0.17059884, 0.47999675],
[ 0.27882618, 0.1754902 , 0.48339654],
[ 0.27801236, 0.18036684, 0.48669702],
[ 0.27713437, 0.18522836, 0.48989831],
[ 0.27619376, 0.19007447, 0.49300074],
[ 0.27519116, 0.1949054 , 0.49600488],
[ 0.27412802, 0.19972086, 0.49891131],
[ 0.27300596, 0.20452049, 0.50172076],
[ 0.27182812, 0.20930306, 0.50443413],
[ 0.27059473, 0.21406899, 0.50705243],
[ 0.26930756, 0.21881782, 0.50957678],
[ 0.26796846, 0.22354911, 0.5120084 ],
[ 0.26657984, 0.2282621 , 0.5143487 ],
[ 0.2651445 , 0.23295593, 0.5165993 ],
[ 0.2636632 , 0.23763078, 0.51876163],
[ 0.26213801, 0.24228619, 0.52083736],
[ 0.26057103, 0.2469217 , 0.52282822],
[ 0.25896451, 0.25153685, 0.52473609],
[ 0.25732244, 0.2561304 , 0.52656332],
[ 0.25564519, 0.26070284, 0.52831152],
[ 0.25393498, 0.26525384, 0.52998273],
[ 0.25219404, 0.26978306, 0.53157905],
[ 0.25042462, 0.27429024, 0.53310261],
[ 0.24862899, 0.27877509, 0.53455561],
[ 0.2468114 , 0.28323662, 0.53594093],
[ 0.24497208, 0.28767547, 0.53726018],
[ 0.24311324, 0.29209154, 0.53851561],
[ 0.24123708, 0.29648471, 0.53970946],
[ 0.23934575, 0.30085494, 0.54084398],
[ 0.23744138, 0.30520222, 0.5419214 ],
[ 0.23552606, 0.30952657, 0.54294396],
[ 0.23360277, 0.31382773, 0.54391424],
[ 0.2316735 , 0.3181058 , 0.54483444],
[ 0.22973926, 0.32236127, 0.54570633],
[ 0.22780192, 0.32659432, 0.546532 ],
[ 0.2258633 , 0.33080515, 0.54731353],
[ 0.22392515, 0.334994 , 0.54805291],
[ 0.22198915, 0.33916114, 0.54875211],
[ 0.22005691, 0.34330688, 0.54941304],
[ 0.21812995, 0.34743154, 0.55003755],
[ 0.21620971, 0.35153548, 0.55062743],
[ 0.21429757, 0.35561907, 0.5511844 ],
[ 0.21239477, 0.35968273, 0.55171011],
[ 0.2105031 , 0.36372671, 0.55220646],
[ 0.20862342, 0.36775151, 0.55267486],
[ 0.20675628, 0.37175775, 0.55311653],
[ 0.20490257, 0.37574589, 0.55353282],
[ 0.20306309, 0.37971644, 0.55392505],
[ 0.20123854, 0.38366989, 0.55429441],
[ 0.1994295 , 0.38760678, 0.55464205],
[ 0.1976365 , 0.39152762, 0.55496905],
[ 0.19585993, 0.39543297, 0.55527637],
[ 0.19410009, 0.39932336, 0.55556494],
[ 0.19235719, 0.40319934, 0.55583559],
[ 0.19063135, 0.40706148, 0.55608907],
[ 0.18892259, 0.41091033, 0.55632606],
[ 0.18723083, 0.41474645, 0.55654717],
[ 0.18555593, 0.4185704 , 0.55675292],
[ 0.18389763, 0.42238275, 0.55694377],
[ 0.18225561, 0.42618405, 0.5571201 ],
[ 0.18062949, 0.42997486, 0.55728221],
[ 0.17901879, 0.43375572, 0.55743035],
[ 0.17742298, 0.4375272 , 0.55756466],
[ 0.17584148, 0.44128981, 0.55768526],
[ 0.17427363, 0.4450441 , 0.55779216],
[ 0.17271876, 0.4487906 , 0.55788532],
[ 0.17117615, 0.4525298 , 0.55796464],
[ 0.16964573, 0.45626209, 0.55803034],
[ 0.16812641, 0.45998802, 0.55808199],
[ 0.1666171 , 0.46370813, 0.55811913],
[ 0.16511703, 0.4674229 , 0.55814141],
[ 0.16362543, 0.47113278, 0.55814842],
[ 0.16214155, 0.47483821, 0.55813967],
[ 0.16066467, 0.47853961, 0.55811466],
[ 0.15919413, 0.4822374 , 0.5580728 ],
[ 0.15772933, 0.48593197, 0.55801347],
[ 0.15626973, 0.4896237 , 0.557936 ],
[ 0.15481488, 0.49331293, 0.55783967],
[ 0.15336445, 0.49700003, 0.55772371],
[ 0.1519182 , 0.50068529, 0.55758733],
[ 0.15047605, 0.50436904, 0.55742968],
[ 0.14903918, 0.50805136, 0.5572505 ],
[ 0.14760731, 0.51173263, 0.55704861],
[ 0.14618026, 0.51541316, 0.55682271],
[ 0.14475863, 0.51909319, 0.55657181],
[ 0.14334327, 0.52277292, 0.55629491],
[ 0.14193527, 0.52645254, 0.55599097],
[ 0.14053599, 0.53013219, 0.55565893],
[ 0.13914708, 0.53381201, 0.55529773],
[ 0.13777048, 0.53749213, 0.55490625],
[ 0.1364085 , 0.54117264, 0.55448339],
[ 0.13506561, 0.54485335, 0.55402906],
[ 0.13374299, 0.54853458, 0.55354108],
[ 0.13244401, 0.55221637, 0.55301828],
[ 0.13117249, 0.55589872, 0.55245948],
[ 0.1299327 , 0.55958162, 0.55186354],
[ 0.12872938, 0.56326503, 0.55122927],
[ 0.12756771, 0.56694891, 0.55055551],
[ 0.12645338, 0.57063316, 0.5498411 ],
[ 0.12539383, 0.57431754, 0.54908564],
[ 0.12439474, 0.57800205, 0.5482874 ],
[ 0.12346281, 0.58168661, 0.54744498],
[ 0.12260562, 0.58537105, 0.54655722],
[ 0.12183122, 0.58905521, 0.54562298],
[ 0.12114807, 0.59273889, 0.54464114],
[ 0.12056501, 0.59642187, 0.54361058],
[ 0.12009154, 0.60010387, 0.54253043],
[ 0.11973756, 0.60378459, 0.54139999],
[ 0.11951163, 0.60746388, 0.54021751],
[ 0.11942341, 0.61114146, 0.53898192],
[ 0.11948255, 0.61481702, 0.53769219],
[ 0.11969858, 0.61849025, 0.53634733],
[ 0.12008079, 0.62216081, 0.53494633],
[ 0.12063824, 0.62582833, 0.53348834],
[ 0.12137972, 0.62949242, 0.53197275],
[ 0.12231244, 0.63315277, 0.53039808],
[ 0.12344358, 0.63680899, 0.52876343],
[ 0.12477953, 0.64046069, 0.52706792],
[ 0.12632581, 0.64410744, 0.52531069],
[ 0.12808703, 0.64774881, 0.52349092],
[ 0.13006688, 0.65138436, 0.52160791],
[ 0.13226797, 0.65501363, 0.51966086],
[ 0.13469183, 0.65863619, 0.5176488 ],
[ 0.13733921, 0.66225157, 0.51557101],
[ 0.14020991, 0.66585927, 0.5134268 ],
[ 0.14330291, 0.66945881, 0.51121549],
[ 0.1466164 , 0.67304968, 0.50893644],
[ 0.15014782, 0.67663139, 0.5065889 ],
[ 0.15389405, 0.68020343, 0.50417217],
[ 0.15785146, 0.68376525, 0.50168574],
[ 0.16201598, 0.68731632, 0.49912906],
[ 0.1663832 , 0.69085611, 0.49650163],
[ 0.1709484 , 0.69438405, 0.49380294],
[ 0.17570671, 0.6978996 , 0.49103252],
[ 0.18065314, 0.70140222, 0.48818938],
[ 0.18578266, 0.70489133, 0.48527326],
[ 0.19109018, 0.70836635, 0.48228395],
[ 0.19657063, 0.71182668, 0.47922108],
[ 0.20221902, 0.71527175, 0.47608431],
[ 0.20803045, 0.71870095, 0.4728733 ],
[ 0.21400015, 0.72211371, 0.46958774],
[ 0.22012381, 0.72550945, 0.46622638],
[ 0.2263969 , 0.72888753, 0.46278934],
[ 0.23281498, 0.73224735, 0.45927675],
[ 0.2393739 , 0.73558828, 0.45568838],
[ 0.24606968, 0.73890972, 0.45202405],
[ 0.25289851, 0.74221104, 0.44828355],
[ 0.25985676, 0.74549162, 0.44446673],
[ 0.26694127, 0.74875084, 0.44057284],
[ 0.27414922, 0.75198807, 0.4366009 ],
[ 0.28147681, 0.75520266, 0.43255207],
[ 0.28892102, 0.75839399, 0.42842626],
[ 0.29647899, 0.76156142, 0.42422341],
[ 0.30414796, 0.76470433, 0.41994346],
[ 0.31192534, 0.76782207, 0.41558638],
[ 0.3198086 , 0.77091403, 0.41115215],
[ 0.3277958 , 0.77397953, 0.40664011],
[ 0.33588539, 0.7770179 , 0.40204917],
[ 0.34407411, 0.78002855, 0.39738103],
[ 0.35235985, 0.78301086, 0.39263579],
[ 0.36074053, 0.78596419, 0.38781353],
[ 0.3692142 , 0.78888793, 0.38291438],
[ 0.37777892, 0.79178146, 0.3779385 ],
[ 0.38643282, 0.79464415, 0.37288606],
[ 0.39517408, 0.79747541, 0.36775726],
[ 0.40400101, 0.80027461, 0.36255223],
[ 0.4129135 , 0.80304099, 0.35726893],
[ 0.42190813, 0.80577412, 0.35191009],
[ 0.43098317, 0.80847343, 0.34647607],
[ 0.44013691, 0.81113836, 0.3409673 ],
[ 0.44936763, 0.81376835, 0.33538426],
[ 0.45867362, 0.81636288, 0.32972749],
[ 0.46805314, 0.81892143, 0.32399761],
[ 0.47750446, 0.82144351, 0.31819529],
[ 0.4870258 , 0.82392862, 0.31232133],
[ 0.49661536, 0.82637633, 0.30637661],
[ 0.5062713 , 0.82878621, 0.30036211],
[ 0.51599182, 0.83115784, 0.29427888],
[ 0.52577622, 0.83349064, 0.2881265 ],
[ 0.5356211 , 0.83578452, 0.28190832],
[ 0.5455244 , 0.83803918, 0.27562602],
[ 0.55548397, 0.84025437, 0.26928147],
[ 0.5654976 , 0.8424299 , 0.26287683],
[ 0.57556297, 0.84456561, 0.25641457],
[ 0.58567772, 0.84666139, 0.24989748],
[ 0.59583934, 0.84871722, 0.24332878],
[ 0.60604528, 0.8507331 , 0.23671214],
[ 0.61629283, 0.85270912, 0.23005179],
[ 0.62657923, 0.85464543, 0.22335258],
[ 0.63690157, 0.85654226, 0.21662012],
[ 0.64725685, 0.85839991, 0.20986086],
[ 0.65764197, 0.86021878, 0.20308229],
[ 0.66805369, 0.86199932, 0.19629307],
[ 0.67848868, 0.86374211, 0.18950326],
[ 0.68894351, 0.86544779, 0.18272455],
[ 0.69941463, 0.86711711, 0.17597055],
[ 0.70989842, 0.86875092, 0.16925712],
[ 0.72039115, 0.87035015, 0.16260273],
[ 0.73088902, 0.87191584, 0.15602894],
[ 0.74138803, 0.87344918, 0.14956101],
[ 0.75188414, 0.87495143, 0.14322828],
[ 0.76237342, 0.87642392, 0.13706449],
[ 0.77285183, 0.87786808, 0.13110864],
[ 0.78331535, 0.87928545, 0.12540538],
[ 0.79375994, 0.88067763, 0.12000532],
[ 0.80418159, 0.88204632, 0.11496505],
[ 0.81457634, 0.88339329, 0.11034678],
[ 0.82494028, 0.88472036, 0.10621724],
[ 0.83526959, 0.88602943, 0.1026459 ],
[ 0.84556056, 0.88732243, 0.09970219],
[ 0.8558096 , 0.88860134, 0.09745186],
[ 0.86601325, 0.88986815, 0.09595277],
[ 0.87616824, 0.89112487, 0.09525046],
[ 0.88627146, 0.89237353, 0.09537439],
[ 0.89632002, 0.89361614, 0.09633538],
[ 0.90631121, 0.89485467, 0.09812496],
[ 0.91624212, 0.89609127, 0.1007168 ],
[ 0.92610579, 0.89732977, 0.10407067],
[ 0.93590444, 0.8985704 , 0.10813094],
[ 0.94563626, 0.899815 , 0.11283773],
[ 0.95529972, 0.90106534, 0.11812832],
[ 0.96489353, 0.90232311, 0.12394051],
[ 0.97441665, 0.90358991, 0.13021494],
[ 0.98386829, 0.90486726, 0.13689671],
[ 0.99324789, 0.90615657, 0.1439362 ]]
cm_green = LinearSegmentedColormap.from_list('optionD', cm_data)
return cm_green
|
ifduyue/django
|
refs/heads/master
|
tests/template_tests/filter_tests/test_truncatechars_html.py
|
40
|
from django.template.defaultfilters import truncatechars_html
from django.test import SimpleTestCase
class FunctionTests(SimpleTestCase):
def test_truncate_zero(self):
self.assertEqual(truncatechars_html('<p>one <a href="#">two - three <br>four</a> five</p>', 0), '...')
def test_truncate(self):
self.assertEqual(
truncatechars_html('<p>one <a href="#">two - three <br>four</a> five</p>', 6),
'<p>one...</p>',
)
def test_truncate2(self):
self.assertEqual(
truncatechars_html('<p>one <a href="#">two - three <br>four</a> five</p>', 11),
'<p>one <a href="#">two ...</a></p>',
)
def test_truncate3(self):
self.assertEqual(
truncatechars_html('<p>one <a href="#">two - three <br>four</a> five</p>', 100),
'<p>one <a href="#">two - three <br>four</a> five</p>',
)
def test_truncate_unicode(self):
self.assertEqual(truncatechars_html('<b>\xc5ngstr\xf6m</b> was here', 5), '<b>\xc5n...</b>')
def test_truncate_something(self):
self.assertEqual(truncatechars_html('a<b>b</b>c', 3), 'a<b>b</b>c')
|
CristianBB/SickRage
|
refs/heads/develop
|
sickbeard/notifiers/pytivo.py
|
3
|
# coding=utf-8
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import os
import sickbeard
from urllib import urlencode
from urllib2 import Request, urlopen, HTTPError
from sickbeard import logger
from sickrage.helper.encoding import ek
from sickrage.helper.exceptions import ex
class pyTivoNotifier(object):
def notify_snatch(self, ep_name):
pass
def notify_download(self, ep_name):
pass
def notify_subtitle_download(self, ep_name, lang):
pass
def notify_git_update(self, new_version):
pass
def update_library(self, ep_obj):
# Values from config
if not sickbeard.USE_PYTIVO:
return False
host = sickbeard.PYTIVO_HOST
shareName = sickbeard.PYTIVO_SHARE_NAME
tsn = sickbeard.PYTIVO_TIVO_NAME
# There are two more values required, the container and file.
#
# container: The share name, show name and season
#
# file: The file name
#
# Some slicing and dicing of variables is required to get at these values.
#
# There might be better ways to arrive at the values, but this is the best I have been able to
# come up with.
#
# Calculated values
showPath = ep_obj.show.location
showName = ep_obj.show.name
rootShowAndSeason = ek(os.path.dirname, ep_obj.location)
absPath = ep_obj.location
# Some show names have colons in them which are illegal in a path location, so strip them out.
# (Are there other characters?)
showName = showName.replace(":", "")
root = showPath.replace(showName, "")
showAndSeason = rootShowAndSeason.replace(root, "")
container = shareName + "/" + showAndSeason
filename = "/" + absPath.replace(root, "")
# Finally create the url and make request
requestUrl = "http://" + host + "/TiVoConnect?" + urlencode(
{'Command': 'Push', 'Container': container, 'File': filename, 'tsn': tsn})
logger.log(u"pyTivo notification: Requesting " + requestUrl, logger.DEBUG)
request = Request(requestUrl)
try:
response = urlopen(request) # @UnusedVariable
except HTTPError, e:
if hasattr(e, 'reason'):
logger.log(u"pyTivo notification: Error, failed to reach a server - " + e.reason, logger.ERROR)
return False
elif hasattr(e, 'code'):
logger.log(u"pyTivo notification: Error, the server couldn't fulfill the request - " + e.code, logger.ERROR)
return False
except Exception, e:
logger.log(u"PYTIVO: Unknown exception: " + ex(e), logger.ERROR)
return False
else:
logger.log(u"pyTivo notification: Successfully requested transfer of file")
return True
notifier = pyTivoNotifier
|
carsonmcdonald/selenium
|
refs/heads/master
|
py/test/selenium/webdriver/common/typing_tests.py
|
60
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
class TypingTests(unittest.TestCase):
def testShouldFireKeyPressEvents(self):
self._loadPage("javascriptPage")
keyReporter = self.driver.find_element(by=By.ID, value="keyReporter")
keyReporter.send_keys("a")
result = self.driver.find_element(by=By.ID, value="result")
self.assertTrue("press:" in result.text)
def testShouldFireKeyDownEvents(self):
self._loadPage("javascriptPage")
keyReporter = self.driver.find_element(by=By.ID, value="keyReporter")
keyReporter.send_keys("I")
result = self.driver.find_element(by=By.ID, value="result")
self.assertTrue("down" in result.text)
def testShouldFireKeyUpEvents(self):
self._loadPage("javascriptPage")
keyReporter = self.driver.find_element(by=By.ID, value="keyReporter")
keyReporter.send_keys("a")
result = self.driver.find_element(by=By.ID, value="result")
self.assertTrue("up:" in result.text)
def testShouldTypeLowerCaseLetters(self):
self._loadPage("javascriptPage")
keyReporter = self.driver.find_element(by=By.ID, value="keyReporter")
keyReporter.send_keys("abc def")
self.assertEqual(keyReporter.get_attribute("value"), "abc def")
def testShouldBeAbleToTypeCapitalLetters(self):
self._loadPage("javascriptPage")
keyReporter = self.driver.find_element(by=By.ID, value="keyReporter")
keyReporter.send_keys("ABC DEF")
self.assertEqual(keyReporter.get_attribute("value"), "ABC DEF")
def testShouldBeAbleToTypeQuoteMarks(self):
self._loadPage("javascriptPage")
keyReporter = self.driver.find_element(by=By.ID, value="keyReporter")
keyReporter.send_keys("\"")
self.assertEqual(keyReporter.get_attribute("value"), "\"")
def testShouldBeAbleToTypeTheAtCharacter(self):
self._loadPage("javascriptPage")
keyReporter = self.driver.find_element(by=By.ID, value="keyReporter")
keyReporter.send_keys("@")
self.assertEqual(keyReporter.get_attribute("value"), "@")
def testShouldBeAbleToMixUpperAndLowerCaseLetters(self):
self._loadPage("javascriptPage")
keyReporter = self.driver.find_element(by=By.ID, value="keyReporter")
keyReporter.send_keys("me@eXample.com")
self.assertEqual(keyReporter.get_attribute("value"), "me@eXample.com")
def testArrowKeysShouldNotBePrintable(self):
self._loadPage("javascriptPage")
keyReporter = self.driver.find_element(by=By.ID, value="keyReporter")
keyReporter.send_keys(Keys.ARROW_LEFT)
self.assertEqual(keyReporter.get_attribute("value"), "")
def testShouldBeAbleToUseArrowKeys(self):
self._loadPage("javascriptPage")
keyReporter = self.driver.find_element(by=By.ID, value="keyReporter")
keyReporter.send_keys("Tet", Keys.ARROW_LEFT, "s")
self.assertEqual(keyReporter.get_attribute("value"), "Test")
def testWillSimulateAKeyUpWhenEnteringTextIntoInputElements(self):
self._loadPage("javascriptPage")
element = self.driver.find_element(by=By.ID, value="keyUp")
element.send_keys("I like cheese")
result = self.driver.find_element(by=By.ID, value="result")
self.assertEqual(result.text, "I like cheese")
def testWillSimulateAKeyDownWhenEnteringTextIntoInputElements(self):
self._loadPage("javascriptPage")
element = self.driver.find_element(by=By.ID, value="keyDown")
element.send_keys("I like cheese")
result = self.driver.find_element(by=By.ID, value="result")
# Because the key down gets the result before the input element is
# filled, we're a letter short here
self.assertEqual(result.text, "I like chees")
def testWillSimulateAKeyPressWhenEnteringTextIntoInputElements(self):
self._loadPage("javascriptPage")
element = self.driver.find_element(by=By.ID, value="keyPress")
element.send_keys("I like cheese")
result = self.driver.find_element(by=By.ID, value="result")
# Because the key down gets the result before the input element is
# filled, we're a letter short here
self.assertEqual(result.text, "I like chees")
def testWillSimulateAKeyUpWhenEnteringTextIntoTextAreas(self):
self._loadPage("javascriptPage")
element = self.driver.find_element(by=By.ID, value="keyUpArea")
element.send_keys("I like cheese")
result = self.driver.find_element(by=By.ID, value="result")
self.assertEqual(result.text, "I like cheese")
def testWillSimulateAKeyDownWhenEnteringTextIntoTextAreas(self):
self._loadPage("javascriptPage")
element = self.driver.find_element(by=By.ID, value="keyDownArea")
element.send_keys("I like cheese")
result = self.driver.find_element(by=By.ID, value="result")
# Because the key down gets the result before the input element is
# filled, we're a letter short here
self.assertEqual(result.text, "I like chees")
def testWillSimulateAKeyPressWhenEnteringTextIntoTextAreas(self):
self._loadPage("javascriptPage")
element = self.driver.find_element(by=By.ID, value="keyPressArea")
element.send_keys("I like cheese")
result = self.driver.find_element(by=By.ID, value="result")
# Because the key down gets the result before the input element is
# filled, we're a letter short here
self.assertEqual(result.text, "I like chees")
#@Ignore(value = {HTMLUNIT, CHROME_NON_WINDOWS, SELENESE, ANDROID},
# reason = "untested user agents")
def testShouldReportKeyCodeOfArrowKeysUpDownEvents(self):
self._loadPage("javascriptPage")
result = self.driver.find_element(by=By.ID, value="result")
element = self.driver.find_element(by=By.ID, value="keyReporter")
element.send_keys(Keys.ARROW_DOWN)
self.assertTrue("down: 40" in result.text.strip())
self.assertTrue("up: 40" in result.text.strip())
element.send_keys(Keys.ARROW_UP)
self.assertTrue("down: 38" in result.text.strip())
self.assertTrue("up: 38" in result.text.strip())
element.send_keys(Keys.ARROW_LEFT)
self.assertTrue("down: 37" in result.text.strip())
self.assertTrue("up: 37" in result.text.strip())
element.send_keys(Keys.ARROW_RIGHT)
self.assertTrue("down: 39" in result.text.strip())
self.assertTrue("up: 39" in result.text.strip())
# And leave no rubbish/printable keys in the "keyReporter"
self.assertEqual(element.get_attribute("value"), "")
def testNumericNonShiftKeys(self):
self._loadPage("javascriptPage")
element = self.driver.find_element(by=By.ID, value="keyReporter")
numericLineCharsNonShifted = "`1234567890-=[]\\,.'/42"
element.send_keys(numericLineCharsNonShifted)
self.assertEqual(element.get_attribute("value"), numericLineCharsNonShifted)
#@Ignore(value = {HTMLUNIT, CHROME_NON_WINDOWS, SELENESE, ANDROID},
#reason = "untested user agent")
def testNumericShiftKeys(self):
self._loadPage("javascriptPage")
result = self.driver.find_element(by=By.ID, value="result")
element = self.driver.find_element(by=By.ID, value="keyReporter")
numericShiftsEtc = "~!@#$%^&*()_+{}:i\"<>?|END~"
element.send_keys(numericShiftsEtc)
self.assertEqual(element.get_attribute("value"), numericShiftsEtc)
self.assertTrue(" up: 16" in result.text.strip())
def testLowerCaseAlphaKeys(self):
self._loadPage("javascriptPage")
element = self.driver.find_element(by=By.ID, value="keyReporter")
lowerAlphas = "abcdefghijklmnopqrstuvwxyz"
element.send_keys(lowerAlphas)
self.assertEqual(element.get_attribute("value"), lowerAlphas)
def testUppercaseAlphaKeys(self):
self._loadPage("javascriptPage")
result = self.driver.find_element(by=By.ID, value="result")
element = self.driver.find_element(by=By.ID, value="keyReporter")
upperAlphas = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
element.send_keys(upperAlphas)
self.assertEqual(element.get_attribute("value"), upperAlphas)
self.assertTrue(" up: 16" in result.text.strip())
def testAllPrintableKeys(self):
self._loadPage("javascriptPage")
result = self.driver.find_element(by=By.ID, value="result")
element = self.driver.find_element(by=By.ID, value="keyReporter")
allPrintable = "!\"#$%&'()*+,-./0123456789:<=>?@ ABCDEFGHIJKLMNOPQRSTUVWXYZ [\\]^_`abcdefghijklmnopqrstuvwxyz{|}~"
element.send_keys(allPrintable)
self.assertTrue(element.get_attribute("value"), allPrintable)
self.assertTrue(" up: 16" in result.text.strip())
def testArrowKeysAndPageUpAndDown(self):
self._loadPage("javascriptPage")
element = self.driver.find_element(by=By.ID, value="keyReporter")
element.send_keys("a" + Keys.LEFT + "b" + Keys.RIGHT +
Keys.UP + Keys.DOWN + Keys.PAGE_UP + Keys.PAGE_DOWN + "1")
self.assertEqual(element.get_attribute("value"), "ba1")
#def testHomeAndEndAndPageUpAndPageDownKeys(self):
# // FIXME: macs don't have HOME keys, would PGUP work?
# if (Platform.getCurrent().is(Platform.MAC)) {
# return
# }
# self._loadPage("javascriptPage")
# element = self.driver.find_element(by=By.ID, value="keyReporter")
# element.send_keys("abc" + Keys.HOME + "0" + Keys.LEFT + Keys.RIGHT +
# Keys.PAGE_UP + Keys.PAGE_DOWN + Keys.END + "1" + Keys.HOME +
# "0" + Keys.PAGE_UP + Keys.END + "111" + Keys.HOME + "00")
# self.assertThat(element.get_attribute("value"), is("0000abc1111"))
#@Ignore(value = {HTMLUNIT, CHROME_NON_WINDOWS, SELENESE, ANDROID},
# reason = "untested user agents")
def testDeleteAndBackspaceKeys(self):
self._loadPage("javascriptPage")
element = self.driver.find_element(by=By.ID, value="keyReporter")
element.send_keys("abcdefghi")
self.assertEqual(element.get_attribute("value"), "abcdefghi")
element.send_keys(Keys.LEFT, Keys.LEFT, Keys.DELETE)
self.assertEqual(element.get_attribute("value"), "abcdefgi")
element.send_keys(Keys.LEFT, Keys.LEFT, Keys.BACK_SPACE)
self.assertEqual(element.get_attribute("value"), "abcdfgi")
#@Ignore(value = {HTMLUNIT, CHROME_NON_WINDOWS, SELENESE}, reason = "untested user agents")
def testSpecialSpaceKeys(self):
self._loadPage("javascriptPage")
element = self.driver.find_element(by=By.ID, value="keyReporter")
element.send_keys("abcd" + Keys.SPACE + "fgh" + Keys.SPACE + "ij")
self.assertEqual(element.get_attribute("value"), "abcd fgh ij")
def testNumberpadAndFunctionKeys(self):
self._loadPage("javascriptPage")
element = self.driver.find_element(by=By.ID, value="keyReporter")
element.send_keys("abcd" + Keys.MULTIPLY + Keys.SUBTRACT + Keys.ADD +
Keys.DECIMAL + Keys.SEPARATOR + Keys.NUMPAD0 + Keys.NUMPAD9 +
Keys.ADD + Keys.SEMICOLON + Keys.EQUALS + Keys.DIVIDE +
Keys.NUMPAD3 + "abcd")
self.assertEqual(element.get_attribute("value"), "abcd*-+.,09+;=/3abcd")
element.clear()
element.send_keys("FUNCTION" + Keys.F2 + "-KEYS" + Keys.F2)
element.send_keys("" + Keys.F2 + "-TOO" + Keys.F2)
self.assertEqual(element.get_attribute("value"), "FUNCTION-KEYS-TOO")
def testShiftSelectionDeletes(self):
self._loadPage("javascriptPage")
element = self.driver.find_element(by=By.ID, value="keyReporter")
element.send_keys("abcd efgh")
self.assertEqual(element.get_attribute("value"), "abcd efgh")
element.send_keys(Keys.SHIFT, Keys.LEFT, Keys.LEFT, Keys.LEFT)
element.send_keys(Keys.DELETE)
self.assertEqual(element.get_attribute("value"), "abcd e")
def testShouldTypeIntoInputElementsThatHaveNoTypeAttribute(self):
self._loadPage("formPage")
element = self.driver.find_element(by=By.ID, value="no-type")
element.send_keys("Should Say Cheese")
self.assertEqual(element.get_attribute("value"), "Should Say Cheese")
def testShouldTypeAnInteger(self):
self._loadPage("javascriptPage")
element = self.driver.find_element(by=By.ID, value="keyReporter")
element.send_keys(1234)
self.assertEqual(element.get_attribute("value"), "1234")
def _pageURL(self, name):
return self.webserver.where_is(name + '.html')
def _loadSimplePage(self):
self._loadPage("simpleTest")
def _loadPage(self, name):
self.driver.get(self._pageURL(name))
|
michael-dev2rights/ansible
|
refs/heads/ansible-d2r
|
lib/ansible/modules/cloud/amazon/ec2_vpc_nat_gateway_facts.py
|
26
|
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: ec2_vpc_nat_gateway_facts
short_description: Retrieves AWS VPC Managed Nat Gateway details using AWS methods.
description:
- Gets various details related to AWS VPC Managed Nat Gateways
version_added: "2.3"
requirements: [ boto3 ]
options:
nat_gateway_ids:
description:
- Get details of specific nat gateway IDs
required: false
default: None
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value.
See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeNatGateways.html)
for possible filters.
required: false
default: None
author: Karen Cheng(@Etherdaemon)
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Simple example of listing all nat gateways
- name: List all managed nat gateways in ap-southeast-2
ec2_vpc_nat_gateway_facts:
region: ap-southeast-2
register: all_ngws
- name: Debugging the result
debug:
msg: "{{ all_ngws.result }}"
- name: Get details on specific nat gateways
ec2_vpc_nat_gateway_facts:
nat_gateway_ids:
- nat-1234567891234567
- nat-7654321987654321
region: ap-southeast-2
register: specific_ngws
- name: Get all nat gateways with specific filters
ec2_vpc_nat_gateway_facts:
region: ap-southeast-2
filters:
state: ['pending']
register: pending_ngws
- name: Get nat gateways with specific filter
ec2_vpc_nat_gateway_facts:
region: ap-southeast-2
filters:
subnet-id: subnet-12345678
state: ['available']
register: existing_nat_gateways
'''
RETURN = '''
result:
description: The result of the describe, converted to ansible snake case style.
See http://boto3.readthedocs.io/en/latest/reference/services/ec2.html#EC2.Client.describe_nat_gateways for the response.
returned: success
type: list
'''
import json
try:
import botocore
except ImportError:
pass # will be detected by imported HAS_BOTO3
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (ec2_argument_spec, get_aws_connection_info, boto3_conn,
camel_dict_to_snake_dict, ansible_dict_to_boto3_filter_list, HAS_BOTO3)
def date_handler(obj):
return obj.isoformat() if hasattr(obj, 'isoformat') else obj
def get_nat_gateways(client, module, nat_gateway_id=None):
params = dict()
params['Filter'] = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
params['NatGatewayIds'] = module.params.get('nat_gateway_ids')
try:
result = json.loads(json.dumps(client.describe_nat_gateways(**params), default=date_handler))
except Exception as e:
module.fail_json(msg=str(e.message))
return [camel_dict_to_snake_dict(gateway) for gateway in result['NatGateways']]
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
filters=dict(default={}, type='dict'),
nat_gateway_ids=dict(default=[], type='list'),
)
)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
# Validate Requirements
if not HAS_BOTO3:
module.fail_json(msg='botocore/boto3 is required.')
try:
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
if region:
connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params)
else:
module.fail_json(msg="region must be specified")
except botocore.exceptions.NoCredentialsError as e:
module.fail_json(msg=str(e))
results = get_nat_gateways(connection, module)
module.exit_json(result=results)
if __name__ == '__main__':
main()
|
marcydoty/geraldo
|
refs/heads/master
|
site/newsite/django_1_0/django/utils/termcolors.py
|
73
|
"""
termcolors.py
"""
color_names = ('black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white')
foreground = dict([(color_names[x], '3%s' % x) for x in range(8)])
background = dict([(color_names[x], '4%s' % x) for x in range(8)])
del color_names
RESET = '0'
opt_dict = {'bold': '1', 'underscore': '4', 'blink': '5', 'reverse': '7', 'conceal': '8'}
def colorize(text='', opts=(), **kwargs):
"""
Returns your text, enclosed in ANSI graphics codes.
Depends on the keyword arguments 'fg' and 'bg', and the contents of
the opts tuple/list.
Returns the RESET code if no parameters are given.
Valid colors:
'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'
Valid options:
'bold'
'underscore'
'blink'
'reverse'
'conceal'
'noreset' - string will not be auto-terminated with the RESET code
Examples:
colorize('hello', fg='red', bg='blue', opts=('blink',))
colorize()
colorize('goodbye', opts=('underscore',))
print colorize('first line', fg='red', opts=('noreset',))
print 'this should be red too'
print colorize('and so should this')
print 'this should not be red'
"""
text = str(text)
code_list = []
if text == '' and len(opts) == 1 and opts[0] == 'reset':
return '\x1b[%sm' % RESET
for k, v in kwargs.iteritems():
if k == 'fg':
code_list.append(foreground[v])
elif k == 'bg':
code_list.append(background[v])
for o in opts:
if o in opt_dict:
code_list.append(opt_dict[o])
if 'noreset' not in opts:
text = text + '\x1b[%sm' % RESET
return ('\x1b[%sm' % ';'.join(code_list)) + text
def make_style(opts=(), **kwargs):
"""
Returns a function with default parameters for colorize()
Example:
bold_red = make_style(opts=('bold',), fg='red')
print bold_red('hello')
KEYWORD = make_style(fg='yellow')
COMMENT = make_style(fg='blue', opts=('bold',))
"""
return lambda text: colorize(text, opts, **kwargs)
|
lucafavatella/intellij-community
|
refs/heads/cli-wip
|
python/testData/testRunner/env/unit/subfolder/__init__.py
|
981
|
__author__ = 'traff'
|
benjaminy/Charcoal
|
refs/heads/master
|
Testing/MicroTests/JustCalling/rec.py
|
1
|
A = [ 4, 8, 3, 5, 6, 7, 2, 1, 0 ]
def f( n, x ):
if n < 2:
return A[ x ]
else:
return f( ( n + 1 ) / 2, f( n / 2, x ) )
N = 100000000
print( f( N, 1 ) )
|
pvlib/pvlib-python
|
refs/heads/master
|
pvlib/tests/test_modelchain.py
|
1
|
import sys
import numpy as np
import pandas as pd
from pvlib import iam, modelchain, pvsystem, temperature, inverter
from pvlib.modelchain import ModelChain
from pvlib.pvsystem import PVSystem
from pvlib.tracking import SingleAxisTracker
from pvlib.location import Location
from pvlib._deprecation import pvlibDeprecationWarning
from .conftest import assert_series_equal, assert_frame_equal
import pytest
from .conftest import fail_on_pvlib_version, requires_tables
@pytest.fixture(scope='function')
def sapm_dc_snl_ac_system(sapm_module_params, cec_inverter_parameters,
sapm_temperature_cs5p_220m):
module = 'Canadian_Solar_CS5P_220M___2009_'
module_parameters = sapm_module_params.copy()
temp_model_params = sapm_temperature_cs5p_220m.copy()
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module=module,
module_parameters=module_parameters,
temperature_model_parameters=temp_model_params,
inverter_parameters=cec_inverter_parameters)
return system
@pytest.fixture
def cec_dc_snl_ac_system(cec_module_cs5p_220m, cec_inverter_parameters,
sapm_temperature_cs5p_220m):
module_parameters = cec_module_cs5p_220m.copy()
module_parameters['b'] = 0.05
module_parameters['EgRef'] = 1.121
module_parameters['dEgdT'] = -0.0002677
temp_model_params = sapm_temperature_cs5p_220m.copy()
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module=module_parameters['Name'],
module_parameters=module_parameters,
temperature_model_parameters=temp_model_params,
inverter_parameters=cec_inverter_parameters)
return system
@pytest.fixture
def cec_dc_snl_ac_arrays(cec_module_cs5p_220m, cec_inverter_parameters,
sapm_temperature_cs5p_220m):
module_parameters = cec_module_cs5p_220m.copy()
module_parameters['b'] = 0.05
module_parameters['EgRef'] = 1.121
module_parameters['dEgdT'] = -0.0002677
temp_model_params = sapm_temperature_cs5p_220m.copy()
array_one = pvsystem.Array(
surface_tilt=32.2, surface_azimuth=180,
module=module_parameters['Name'],
module_parameters=module_parameters.copy(),
temperature_model_parameters=temp_model_params.copy()
)
array_two = pvsystem.Array(
surface_tilt=42.2, surface_azimuth=220,
module=module_parameters['Name'],
module_parameters=module_parameters.copy(),
temperature_model_parameters=temp_model_params.copy()
)
system = PVSystem(
arrays=[array_one, array_two],
inverter_parameters=cec_inverter_parameters
)
return system
@pytest.fixture
def cec_dc_native_snl_ac_system(cec_module_cs5p_220m, cec_inverter_parameters,
sapm_temperature_cs5p_220m):
module_parameters = cec_module_cs5p_220m.copy()
temp_model_params = sapm_temperature_cs5p_220m.copy()
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module=module_parameters['Name'],
module_parameters=module_parameters,
temperature_model_parameters=temp_model_params,
inverter_parameters=cec_inverter_parameters)
return system
@pytest.fixture
def pvsyst_dc_snl_ac_system(pvsyst_module_params, cec_inverter_parameters,
sapm_temperature_cs5p_220m):
module = 'PVsyst test module'
module_parameters = pvsyst_module_params
module_parameters['b'] = 0.05
temp_model_params = sapm_temperature_cs5p_220m.copy()
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module=module,
module_parameters=module_parameters,
temperature_model_parameters=temp_model_params,
inverter_parameters=cec_inverter_parameters)
return system
@pytest.fixture
def pvsyst_dc_snl_ac_arrays(pvsyst_module_params, cec_inverter_parameters,
sapm_temperature_cs5p_220m):
module = 'PVsyst test module'
module_parameters = pvsyst_module_params
module_parameters['b'] = 0.05
temp_model_params = sapm_temperature_cs5p_220m.copy()
array_one = pvsystem.Array(
surface_tilt=32.2, surface_azimuth=180,
module=module,
module_parameters=module_parameters.copy(),
temperature_model_parameters=temp_model_params.copy()
)
array_two = pvsystem.Array(
surface_tilt=42.2, surface_azimuth=220,
module=module,
module_parameters=module_parameters.copy(),
temperature_model_parameters=temp_model_params.copy()
)
system = PVSystem(
arrays=[array_one, array_two],
inverter_parameters=cec_inverter_parameters
)
return system
@pytest.fixture
def cec_dc_adr_ac_system(sam_data, cec_module_cs5p_220m,
sapm_temperature_cs5p_220m):
module_parameters = cec_module_cs5p_220m.copy()
module_parameters['b'] = 0.05
module_parameters['EgRef'] = 1.121
module_parameters['dEgdT'] = -0.0002677
temp_model_params = sapm_temperature_cs5p_220m.copy()
inverters = sam_data['adrinverter']
inverter = inverters['Zigor__Sunzet_3_TL_US_240V__CEC_2011_'].copy()
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module=module_parameters['Name'],
module_parameters=module_parameters,
temperature_model_parameters=temp_model_params,
inverter_parameters=inverter)
return system
@pytest.fixture
def pvwatts_dc_snl_ac_system(cec_inverter_parameters):
module_parameters = {'pdc0': 220, 'gamma_pdc': -0.003}
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module_parameters=module_parameters,
inverter_parameters=cec_inverter_parameters)
return system
@pytest.fixture(scope="function")
def pvwatts_dc_pvwatts_ac_system(sapm_temperature_cs5p_220m):
module_parameters = {'pdc0': 220, 'gamma_pdc': -0.003}
temp_model_params = sapm_temperature_cs5p_220m.copy()
inverter_parameters = {'pdc0': 220, 'eta_inv_nom': 0.95}
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module_parameters=module_parameters,
temperature_model_parameters=temp_model_params,
inverter_parameters=inverter_parameters)
return system
@pytest.fixture(scope="function")
def pvwatts_dc_pvwatts_ac_system_arrays(sapm_temperature_cs5p_220m):
module_parameters = {'pdc0': 220, 'gamma_pdc': -0.003}
temp_model_params = sapm_temperature_cs5p_220m.copy()
inverter_parameters = {'pdc0': 220, 'eta_inv_nom': 0.95}
array_one = pvsystem.Array(
surface_tilt=32.2, surface_azimuth=180,
module_parameters=module_parameters.copy(),
temperature_model_parameters=temp_model_params.copy()
)
array_two = pvsystem.Array(
surface_tilt=42.2, surface_azimuth=220,
module_parameters=module_parameters.copy(),
temperature_model_parameters=temp_model_params.copy()
)
system = PVSystem(
arrays=[array_one, array_two], inverter_parameters=inverter_parameters)
return system
@pytest.fixture(scope="function")
def pvwatts_dc_pvwatts_ac_faiman_temp_system():
module_parameters = {'pdc0': 220, 'gamma_pdc': -0.003}
temp_model_params = {'u0': 25.0, 'u1': 6.84}
inverter_parameters = {'pdc0': 220, 'eta_inv_nom': 0.95}
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module_parameters=module_parameters,
temperature_model_parameters=temp_model_params,
inverter_parameters=inverter_parameters)
return system
@pytest.fixture(scope="function")
def pvwatts_dc_pvwatts_ac_pvsyst_temp_system():
module_parameters = {'pdc0': 220, 'gamma_pdc': -0.003}
temp_model_params = {'u_c': 29.0, 'u_v': 0.0, 'module_efficiency': 0.1,
'alpha_absorption': 0.9}
inverter_parameters = {'pdc0': 220, 'eta_inv_nom': 0.95}
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module_parameters=module_parameters,
temperature_model_parameters=temp_model_params,
inverter_parameters=inverter_parameters)
return system
@pytest.fixture(scope="function")
def pvwatts_dc_pvwatts_ac_fuentes_temp_system():
module_parameters = {'pdc0': 220, 'gamma_pdc': -0.003}
temp_model_params = {'noct_installed': 45}
inverter_parameters = {'pdc0': 220, 'eta_inv_nom': 0.95}
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module_parameters=module_parameters,
temperature_model_parameters=temp_model_params,
inverter_parameters=inverter_parameters)
return system
@pytest.fixture(scope="function")
def pvwatts_dc_pvwatts_ac_noct_sam_temp_system():
module_parameters = {'pdc0': 220, 'gamma_pdc': -0.003}
temp_model_params = {'noct': 45, 'module_efficiency': 0.2}
inverter_parameters = {'pdc0': 220, 'eta_inv_nom': 0.95}
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module_parameters=module_parameters,
temperature_model_parameters=temp_model_params,
inverter_parameters=inverter_parameters)
return system
@pytest.fixture(scope="function")
def system_no_aoi(cec_module_cs5p_220m, sapm_temperature_cs5p_220m,
cec_inverter_parameters):
module_parameters = cec_module_cs5p_220m.copy()
module_parameters['EgRef'] = 1.121
module_parameters['dEgdT'] = -0.0002677
temp_model_params = sapm_temperature_cs5p_220m.copy()
inverter_parameters = cec_inverter_parameters.copy()
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module_parameters=module_parameters,
temperature_model_parameters=temp_model_params,
inverter_parameters=inverter_parameters)
return system
@pytest.fixture
def system_no_temp(cec_module_cs5p_220m, cec_inverter_parameters):
module_parameters = cec_module_cs5p_220m.copy()
module_parameters['EgRef'] = 1.121
module_parameters['dEgdT'] = -0.0002677
inverter_parameters = cec_inverter_parameters.copy()
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module_parameters=module_parameters,
inverter_parameters=inverter_parameters)
return system
@pytest.fixture
def location():
return Location(32.2, -111, altitude=700)
@pytest.fixture
def weather():
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
weather = pd.DataFrame({'ghi': [500, 0], 'dni': [800, 0], 'dhi': [100, 0]},
index=times)
return weather
@pytest.fixture
def total_irrad(weather):
return pd.DataFrame({'poa_global': [800., 500.],
'poa_direct': [500., 300.],
'poa_diffuse': [300., 200.]}, index=weather.index)
@pytest.fixture(scope='function')
def sapm_dc_snl_ac_system_Array(sapm_module_params, cec_inverter_parameters,
sapm_temperature_cs5p_220m):
module = 'Canadian_Solar_CS5P_220M___2009_'
module_parameters = sapm_module_params.copy()
temp_model_params = sapm_temperature_cs5p_220m.copy()
array_one = pvsystem.Array(surface_tilt=32, surface_azimuth=180,
albedo=0.2, module=module,
module_parameters=module_parameters,
temperature_model_parameters=temp_model_params,
modules_per_string=1,
strings=1)
array_two = pvsystem.Array(surface_tilt=15, surface_azimuth=180,
albedo=0.2, module=module,
module_parameters=module_parameters,
temperature_model_parameters=temp_model_params,
modules_per_string=1,
strings=1)
return PVSystem(arrays=[array_one, array_two],
inverter_parameters=cec_inverter_parameters)
@pytest.fixture(scope='function')
def sapm_dc_snl_ac_system_same_arrays(sapm_module_params,
cec_inverter_parameters,
sapm_temperature_cs5p_220m):
"""A system with two identical arrays."""
module = 'Canadian_Solar_CS5P_220M___2009_'
module_parameters = sapm_module_params.copy()
temp_model_params = sapm_temperature_cs5p_220m.copy()
array_one = pvsystem.Array(surface_tilt=32.2, surface_azimuth=180,
module=module,
module_parameters=module_parameters,
temperature_model_parameters=temp_model_params,
modules_per_string=1,
strings=1)
array_two = pvsystem.Array(surface_tilt=32.2, surface_azimuth=180,
module=module,
module_parameters=module_parameters,
temperature_model_parameters=temp_model_params,
modules_per_string=1,
strings=1)
return PVSystem(arrays=[array_one, array_two],
inverter_parameters=cec_inverter_parameters)
def test_ModelChain_creation(sapm_dc_snl_ac_system, location):
ModelChain(sapm_dc_snl_ac_system, location)
def test_with_sapm(sapm_dc_snl_ac_system, location, weather):
mc = ModelChain.with_sapm(sapm_dc_snl_ac_system, location)
assert mc.dc_model == mc.sapm
mc.run_model(weather)
def test_with_pvwatts(pvwatts_dc_pvwatts_ac_system, location, weather):
mc = ModelChain.with_pvwatts(pvwatts_dc_pvwatts_ac_system, location)
assert mc.dc_model == mc.pvwatts_dc
assert mc.temperature_model == mc.sapm_temp
mc.run_model(weather)
def test_run_model_with_irradiance(sapm_dc_snl_ac_system, location):
mc = ModelChain(sapm_dc_snl_ac_system, location)
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
irradiance = pd.DataFrame({'dni': 900, 'ghi': 600, 'dhi': 150},
index=times)
ac = mc.run_model(irradiance).results.ac
expected = pd.Series(np.array([187.80746494643176, -0.02]),
index=times)
assert_series_equal(ac, expected)
@pytest.fixture(scope='function')
def multi_array_sapm_dc_snl_ac_system(
sapm_temperature_cs5p_220m, sapm_module_params,
cec_inverter_parameters):
module_parameters = sapm_module_params
temp_model_parameters = sapm_temperature_cs5p_220m.copy()
inverter_parameters = cec_inverter_parameters
array_one = pvsystem.Array(
surface_tilt=32.2, surface_azimuth=180,
module_parameters=module_parameters,
temperature_model_parameters=temp_model_parameters
)
array_two = pvsystem.Array(
surface_tilt=32.2, surface_azimuth=220,
module_parameters=module_parameters,
temperature_model_parameters=temp_model_parameters
)
two_array_system = PVSystem(
arrays=[array_one, array_two],
inverter_parameters=inverter_parameters
)
array_one_system = PVSystem(
arrays=[array_one],
inverter_parameters=inverter_parameters
)
array_two_system = PVSystem(
arrays=[array_two],
inverter_parameters=inverter_parameters
)
return {'two_array_system': two_array_system,
'array_one_system': array_one_system,
'array_two_system': array_two_system}
def test_run_model_from_irradiance_arrays_no_loss(
multi_array_sapm_dc_snl_ac_system, location):
mc_both = ModelChain(
multi_array_sapm_dc_snl_ac_system['two_array_system'],
location,
aoi_model='no_loss',
spectral_model='no_loss',
losses_model='no_loss'
)
mc_one = ModelChain(
multi_array_sapm_dc_snl_ac_system['array_one_system'],
location,
aoi_model='no_loss',
spectral_model='no_loss',
losses_model='no_loss'
)
mc_two = ModelChain(
multi_array_sapm_dc_snl_ac_system['array_two_system'],
location,
aoi_model='no_loss',
spectral_model='no_loss',
losses_model='no_loss'
)
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
irradiance = pd.DataFrame({'dni': 900, 'ghi': 600, 'dhi': 150},
index=times)
mc_one.run_model(irradiance)
mc_two.run_model(irradiance)
mc_both.run_model(irradiance)
assert_frame_equal(
mc_both.results.dc[0],
mc_one.results.dc
)
assert_frame_equal(
mc_both.results.dc[1],
mc_two.results.dc
)
@pytest.mark.parametrize("input_type", [tuple, list])
def test_run_model_from_irradiance_arrays_no_loss_input_type(
multi_array_sapm_dc_snl_ac_system, location, input_type):
mc_both = ModelChain(
multi_array_sapm_dc_snl_ac_system['two_array_system'],
location,
aoi_model='no_loss',
spectral_model='no_loss',
losses_model='no_loss'
)
mc_one = ModelChain(
multi_array_sapm_dc_snl_ac_system['array_one_system'],
location,
aoi_model='no_loss',
spectral_model='no_loss',
losses_model='no_loss'
)
mc_two = ModelChain(
multi_array_sapm_dc_snl_ac_system['array_two_system'],
location,
aoi_model='no_loss',
spectral_model='no_loss',
losses_model='no_loss'
)
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
irradiance = pd.DataFrame({'dni': 900, 'ghi': 600, 'dhi': 150},
index=times)
mc_one.run_model(irradiance)
mc_two.run_model(irradiance)
mc_both.run_model(input_type((irradiance, irradiance)))
assert_frame_equal(
mc_both.results.dc[0], mc_one.results.dc
)
assert_frame_equal(
mc_both.results.dc[1], mc_two.results.dc
)
@pytest.mark.parametrize('inverter', ['adr'])
def test_ModelChain_invalid_inverter_params_arrays(
inverter, sapm_dc_snl_ac_system_same_arrays,
location, adr_inverter_parameters):
inverter_params = {'adr': adr_inverter_parameters}
sapm_dc_snl_ac_system_same_arrays.inverter_parameters = \
inverter_params[inverter]
with pytest.raises(ValueError,
match=r'adr inverter function cannot'):
ModelChain(sapm_dc_snl_ac_system_same_arrays, location)
@pytest.mark.parametrize("input_type", [tuple, list])
def test_prepare_inputs_multi_weather(
sapm_dc_snl_ac_system_Array, location, input_type):
times = pd.date_range(start='20160101 1200-0700',
end='20160101 1800-0700', freq='6H')
mc = ModelChain(sapm_dc_snl_ac_system_Array, location)
weather = pd.DataFrame({'ghi': 1, 'dhi': 1, 'dni': 1},
index=times)
mc.prepare_inputs(input_type((weather, weather)))
num_arrays = sapm_dc_snl_ac_system_Array.num_arrays
assert len(mc.results.total_irrad) == num_arrays
def test_prepare_inputs_no_irradiance(sapm_dc_snl_ac_system, location):
mc = ModelChain(sapm_dc_snl_ac_system, location)
weather = pd.DataFrame()
with pytest.raises(ValueError):
mc.prepare_inputs(weather)
def test_prepare_inputs_arrays_one_missing_irradiance(
sapm_dc_snl_ac_system_Array, location):
"""If any of the input DataFrames is missing a column then a
ValueError is raised."""
mc = ModelChain(sapm_dc_snl_ac_system_Array, location)
weather = pd.DataFrame(
{'ghi': [1], 'dhi': [1], 'dni': [1]}
)
weather_incomplete = pd.DataFrame(
{'ghi': [1], 'dhi': [1]}
)
with pytest.raises(ValueError,
match=r"Incomplete input data\. .*"):
mc.prepare_inputs((weather, weather_incomplete))
with pytest.raises(ValueError,
match=r"Incomplete input data\. .*"):
mc.prepare_inputs((weather_incomplete, weather))
@pytest.mark.parametrize("input_type", [tuple, list])
def test_prepare_inputs_weather_wrong_length(
sapm_dc_snl_ac_system_Array, location, input_type):
mc = ModelChain(sapm_dc_snl_ac_system_Array, location)
weather = pd.DataFrame({'ghi': [1], 'dhi': [1], 'dni': [1]})
with pytest.raises(ValueError,
match="Input must be same length as number of Arrays "
r"in system\. Expected 2, got 1\."):
mc.prepare_inputs(input_type((weather,)))
with pytest.raises(ValueError,
match="Input must be same length as number of Arrays "
r"in system\. Expected 2, got 3\."):
mc.prepare_inputs(input_type((weather, weather, weather)))
def test_ModelChain_times_error_arrays(sapm_dc_snl_ac_system_Array, location):
"""ModelChain.times is assigned a single index given multiple weather
DataFrames.
"""
error_str = r"Input DataFrames must have same index\."
mc = ModelChain(sapm_dc_snl_ac_system_Array, location)
irradiance = {'ghi': [1, 2], 'dhi': [1, 2], 'dni': [1, 2]}
times_one = pd.date_range(start='1/1/2020', freq='6H', periods=2)
times_two = pd.date_range(start='1/1/2020 00:15', freq='6H', periods=2)
weather_one = pd.DataFrame(irradiance, index=times_one)
weather_two = pd.DataFrame(irradiance, index=times_two)
with pytest.raises(ValueError, match=error_str):
mc.prepare_inputs((weather_one, weather_two))
# test with overlapping, but differently sized indices.
times_three = pd.date_range(start='1/1/2020', freq='6H', periods=3)
irradiance_three = irradiance
irradiance_three['ghi'].append(3)
irradiance_three['dhi'].append(3)
irradiance_three['dni'].append(3)
weather_three = pd.DataFrame(irradiance_three, index=times_three)
with pytest.raises(ValueError, match=error_str):
mc.prepare_inputs((weather_one, weather_three))
def test_ModelChain_times_arrays(sapm_dc_snl_ac_system_Array, location):
"""ModelChain.times is assigned a single index given multiple weather
DataFrames.
"""
mc = ModelChain(sapm_dc_snl_ac_system_Array, location)
irradiance_one = {'ghi': [1, 2], 'dhi': [1, 2], 'dni': [1, 2]}
irradiance_two = {'ghi': [2, 1], 'dhi': [2, 1], 'dni': [2, 1]}
times = pd.date_range(start='1/1/2020', freq='6H', periods=2)
weather_one = pd.DataFrame(irradiance_one, index=times)
weather_two = pd.DataFrame(irradiance_two, index=times)
mc.prepare_inputs((weather_one, weather_two))
assert mc.results.times.equals(times)
mc = ModelChain(sapm_dc_snl_ac_system_Array, location)
mc.prepare_inputs(weather_one)
assert mc.results.times.equals(times)
@pytest.mark.parametrize("missing", ['dhi', 'ghi', 'dni'])
def test_prepare_inputs_missing_irrad_component(
sapm_dc_snl_ac_system, location, missing):
mc = ModelChain(sapm_dc_snl_ac_system, location)
weather = pd.DataFrame({'dhi': [1, 2], 'dni': [1, 2], 'ghi': [1, 2]})
weather.drop(columns=missing, inplace=True)
with pytest.raises(ValueError):
mc.prepare_inputs(weather)
@pytest.mark.parametrize('ac_model', ['sandia', 'pvwatts'])
@pytest.mark.parametrize("input_type", [tuple, list])
def test_run_model_arrays_weather(sapm_dc_snl_ac_system_same_arrays,
pvwatts_dc_pvwatts_ac_system_arrays,
location, ac_model, input_type):
system = {'sandia': sapm_dc_snl_ac_system_same_arrays,
'pvwatts': pvwatts_dc_pvwatts_ac_system_arrays}
mc = ModelChain(system[ac_model], location, aoi_model='no_loss',
spectral_model='no_loss')
times = pd.date_range('20200101 1200-0700', periods=2, freq='2H')
weather_one = pd.DataFrame({'dni': [900, 800],
'ghi': [600, 500],
'dhi': [150, 100]},
index=times)
weather_two = pd.DataFrame({'dni': [500, 400],
'ghi': [300, 200],
'dhi': [75, 65]},
index=times)
mc.run_model(input_type((weather_one, weather_two)))
assert (mc.results.dc[0] != mc.results.dc[1]).all().all()
assert not mc.results.ac.empty
def test_run_model_perez(sapm_dc_snl_ac_system, location):
mc = ModelChain(sapm_dc_snl_ac_system, location,
transposition_model='perez')
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
irradiance = pd.DataFrame({'dni': 900, 'ghi': 600, 'dhi': 150},
index=times)
ac = mc.run_model(irradiance).results.ac
expected = pd.Series(np.array([187.94295642, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected)
def test_run_model_gueymard_perez(sapm_dc_snl_ac_system, location):
mc = ModelChain(sapm_dc_snl_ac_system, location,
airmass_model='gueymard1993',
transposition_model='perez')
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
irradiance = pd.DataFrame({'dni': 900, 'ghi': 600, 'dhi': 150},
index=times)
ac = mc.run_model(irradiance).results.ac
expected = pd.Series(np.array([187.94317405, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected)
def test_run_model_with_weather_sapm_temp(sapm_dc_snl_ac_system, location,
weather, mocker):
# test with sapm cell temperature model
weather['wind_speed'] = 5
weather['temp_air'] = 10
mc = ModelChain(sapm_dc_snl_ac_system, location)
mc.temperature_model = 'sapm'
m_sapm = mocker.spy(sapm_dc_snl_ac_system, 'get_cell_temperature')
mc.run_model(weather)
assert m_sapm.call_count == 1
# assert_called_once_with cannot be used with series, so need to use
# assert_series_equal on call_args
assert_series_equal(m_sapm.call_args[0][1], weather['temp_air']) # temp
assert_series_equal(m_sapm.call_args[0][2], weather['wind_speed']) # wind
assert m_sapm.call_args[1]['model'] == 'sapm'
assert not mc.results.ac.empty
def test_run_model_with_weather_pvsyst_temp(sapm_dc_snl_ac_system, location,
weather, mocker):
# test with pvsyst cell temperature model
weather['wind_speed'] = 5
weather['temp_air'] = 10
sapm_dc_snl_ac_system.arrays[0].racking_model = 'freestanding'
sapm_dc_snl_ac_system.arrays[0].temperature_model_parameters = \
temperature._temperature_model_params('pvsyst', 'freestanding')
mc = ModelChain(sapm_dc_snl_ac_system, location)
mc.temperature_model = 'pvsyst'
m_pvsyst = mocker.spy(sapm_dc_snl_ac_system, 'get_cell_temperature')
mc.run_model(weather)
assert m_pvsyst.call_count == 1
assert_series_equal(m_pvsyst.call_args[0][1], weather['temp_air'])
assert_series_equal(m_pvsyst.call_args[0][2], weather['wind_speed'])
assert m_pvsyst.call_args[1]['model'] == 'pvsyst'
assert not mc.results.ac.empty
def test_run_model_with_weather_faiman_temp(sapm_dc_snl_ac_system, location,
weather, mocker):
# test with faiman cell temperature model
weather['wind_speed'] = 5
weather['temp_air'] = 10
sapm_dc_snl_ac_system.arrays[0].temperature_model_parameters = {
'u0': 25.0, 'u1': 6.84
}
mc = ModelChain(sapm_dc_snl_ac_system, location)
mc.temperature_model = 'faiman'
m_faiman = mocker.spy(sapm_dc_snl_ac_system, 'get_cell_temperature')
mc.run_model(weather)
assert m_faiman.call_count == 1
assert_series_equal(m_faiman.call_args[0][1], weather['temp_air'])
assert_series_equal(m_faiman.call_args[0][2], weather['wind_speed'])
assert m_faiman.call_args[1]['model'] == 'faiman'
assert not mc.results.ac.empty
def test_run_model_with_weather_fuentes_temp(sapm_dc_snl_ac_system, location,
weather, mocker):
weather['wind_speed'] = 5
weather['temp_air'] = 10
sapm_dc_snl_ac_system.arrays[0].temperature_model_parameters = {
'noct_installed': 45
}
mc = ModelChain(sapm_dc_snl_ac_system, location)
mc.temperature_model = 'fuentes'
m_fuentes = mocker.spy(sapm_dc_snl_ac_system, 'get_cell_temperature')
mc.run_model(weather)
assert m_fuentes.call_count == 1
assert_series_equal(m_fuentes.call_args[0][1], weather['temp_air'])
assert_series_equal(m_fuentes.call_args[0][2], weather['wind_speed'])
assert m_fuentes.call_args[1]['model'] == 'fuentes'
assert not mc.results.ac.empty
def test_run_model_with_weather_noct_sam_temp(sapm_dc_snl_ac_system, location,
weather, mocker):
weather['wind_speed'] = 5
weather['temp_air'] = 10
sapm_dc_snl_ac_system.temperature_model_parameters = {
'noct': 45, 'module_efficiency': 0.2
}
mc = ModelChain(sapm_dc_snl_ac_system, location)
mc.temperature_model = 'noct_sam'
m_noct_sam = mocker.spy(sapm_dc_snl_ac_system, 'get_cell_temperature')
mc.run_model(weather)
assert m_noct_sam.call_count == 1
assert_series_equal(m_noct_sam.call_args[0][1], weather['temp_air'])
assert_series_equal(m_noct_sam.call_args[0][2], weather['wind_speed'])
# check that effective_irradiance was used
assert m_noct_sam.call_args[1] == {
'effective_irradiance': mc.results.effective_irradiance,
'model': 'noct_sam'}
def test_run_model_tracker(sapm_dc_snl_ac_system, location, weather, mocker):
system = SingleAxisTracker(
module_parameters=sapm_dc_snl_ac_system.arrays[0].module_parameters,
temperature_model_parameters=(
sapm_dc_snl_ac_system.arrays[0].temperature_model_parameters
),
inverter_parameters=sapm_dc_snl_ac_system.inverter_parameters)
mocker.spy(system, 'singleaxis')
mc = ModelChain(system, location)
mc.run_model(weather)
assert system.singleaxis.call_count == 1
assert (mc.results.tracking.columns == ['tracker_theta',
'aoi',
'surface_azimuth',
'surface_tilt']).all()
assert mc.results.ac[0] > 0
assert np.isnan(mc.results.ac[1])
assert isinstance(mc.results.dc, pd.DataFrame)
def test_run_model_tracker_list(
sapm_dc_snl_ac_system, location, weather, mocker):
system = SingleAxisTracker(
module_parameters=sapm_dc_snl_ac_system.arrays[0].module_parameters,
temperature_model_parameters=(
sapm_dc_snl_ac_system.arrays[0].temperature_model_parameters
),
inverter_parameters=sapm_dc_snl_ac_system.inverter_parameters)
mocker.spy(system, 'singleaxis')
mc = ModelChain(system, location)
mc.run_model([weather])
assert system.singleaxis.call_count == 1
assert (mc.results.tracking.columns == ['tracker_theta',
'aoi',
'surface_azimuth',
'surface_tilt']).all()
assert mc.results.ac[0] > 0
assert np.isnan(mc.results.ac[1])
assert isinstance(mc.results.dc, tuple)
assert len(mc.results.dc) == 1
def test__assign_total_irrad(sapm_dc_snl_ac_system, location, weather,
total_irrad):
data = pd.concat([weather, total_irrad], axis=1)
mc = ModelChain(sapm_dc_snl_ac_system, location)
mc._assign_total_irrad(data)
assert_frame_equal(mc.results.total_irrad, total_irrad)
def test_prepare_inputs_from_poa(sapm_dc_snl_ac_system, location,
weather, total_irrad):
data = pd.concat([weather, total_irrad], axis=1)
mc = ModelChain(sapm_dc_snl_ac_system, location)
mc.prepare_inputs_from_poa(data)
weather_expected = weather.copy()
weather_expected['temp_air'] = 20
weather_expected['wind_speed'] = 0
# order as expected
weather_expected = weather_expected[
['ghi', 'dhi', 'dni', 'wind_speed', 'temp_air']]
# weather attribute
assert_frame_equal(mc.results.weather, weather_expected)
# total_irrad attribute
assert_frame_equal(mc.results.total_irrad, total_irrad)
assert not pd.isnull(mc.results.solar_position.index[0])
@pytest.mark.parametrize("input_type", [tuple, list])
def test_prepare_inputs_from_poa_multi_data(
sapm_dc_snl_ac_system_Array, location, total_irrad, weather,
input_type):
mc = ModelChain(sapm_dc_snl_ac_system_Array, location)
poa = pd.concat([weather, total_irrad], axis=1)
mc.prepare_inputs_from_poa(input_type((poa, poa)))
num_arrays = sapm_dc_snl_ac_system_Array.num_arrays
assert len(mc.results.total_irrad) == num_arrays
@pytest.mark.parametrize("input_type", [tuple, list])
def test_prepare_inputs_from_poa_wrong_number_arrays(
sapm_dc_snl_ac_system_Array, location, total_irrad, weather,
input_type):
len_error = r"Input must be same length as number of Arrays in system\. " \
r"Expected 2, got [0-9]+\."
type_error = r"Input must be a tuple of length 2, got .*\."
mc = ModelChain(sapm_dc_snl_ac_system_Array, location)
poa = pd.concat([weather, total_irrad], axis=1)
with pytest.raises(TypeError, match=type_error):
mc.prepare_inputs_from_poa(poa)
with pytest.raises(ValueError, match=len_error):
mc.prepare_inputs_from_poa(input_type((poa,)))
with pytest.raises(ValueError, match=len_error):
mc.prepare_inputs_from_poa(input_type((poa, poa, poa)))
def test_prepare_inputs_from_poa_arrays_different_indices(
sapm_dc_snl_ac_system_Array, location, total_irrad, weather):
error_str = r"Input DataFrames must have same index\."
mc = ModelChain(sapm_dc_snl_ac_system_Array, location)
poa = pd.concat([weather, total_irrad], axis=1)
with pytest.raises(ValueError, match=error_str):
mc.prepare_inputs_from_poa((poa, poa.shift(periods=1, freq='6H')))
def test_prepare_inputs_from_poa_arrays_missing_column(
sapm_dc_snl_ac_system_Array, location, weather, total_irrad):
mc = ModelChain(sapm_dc_snl_ac_system_Array, location)
poa = pd.concat([weather, total_irrad], axis=1)
with pytest.raises(ValueError, match=r"Incomplete input data\. "
r"Data needs to contain .*\. "
r"Detected data in element 1 "
r"contains: .*"):
mc.prepare_inputs_from_poa((poa, poa.drop(columns='poa_global')))
def test__prepare_temperature(sapm_dc_snl_ac_system, location, weather,
total_irrad):
data = weather.copy()
data[['poa_global', 'poa_diffuse', 'poa_direct']] = total_irrad
mc = ModelChain(sapm_dc_snl_ac_system, location, aoi_model='no_loss',
spectral_model='no_loss')
# prepare_temperature expects mc.total_irrad and mc.results.weather
# to be set
mc._assign_weather(data)
mc._assign_total_irrad(data)
mc._prepare_temperature(data)
expected = pd.Series([48.928025, 38.080016], index=data.index)
assert_series_equal(mc.results.cell_temperature, expected)
data['module_temperature'] = [40., 30.]
mc._prepare_temperature(data)
expected = pd.Series([42.4, 31.5], index=data.index)
assert_series_equal(mc.results.cell_temperature, expected)
data['cell_temperature'] = [50., 35.]
mc._prepare_temperature(data)
assert_series_equal(mc.results.cell_temperature, data['cell_temperature'])
def test__prepare_temperature_len1_weather_tuple(
sapm_dc_snl_ac_system, location, weather, total_irrad):
# GH 1192
weather['module_temperature'] = [40., 30.]
data = weather.copy()
mc = ModelChain(sapm_dc_snl_ac_system, location, aoi_model='no_loss',
spectral_model='no_loss')
mc.run_model([data])
expected = pd.Series([42.617244212941394, 30.0], index=data.index)
assert_series_equal(mc.results.cell_temperature[0], expected)
data = weather.copy().rename(
columns={
"ghi": "poa_global", "dhi": "poa_diffuse", "dni": "poa_direct"}
)
mc = ModelChain(sapm_dc_snl_ac_system, location, aoi_model='no_loss',
spectral_model='no_loss')
mc.run_model_from_poa([data])
expected = pd.Series([41.5, 30.0], index=data.index)
assert_series_equal(mc.results.cell_temperature[0], expected)
data = weather.copy()[["module_temperature", "ghi"]].rename(
columns={"ghi": "effective_irradiance"}
)
mc = ModelChain(sapm_dc_snl_ac_system, location, aoi_model='no_loss',
spectral_model='no_loss')
mc.run_model_from_effective_irradiance([data])
expected = pd.Series([41.5, 30.0], index=data.index)
assert_series_equal(mc.results.cell_temperature[0], expected)
def test__prepare_temperature_arrays_weather(sapm_dc_snl_ac_system_same_arrays,
location, weather,
total_irrad):
data = weather.copy()
data[['poa_global', 'poa_direct', 'poa_diffuse']] = total_irrad
data_two = data.copy()
mc = ModelChain(sapm_dc_snl_ac_system_same_arrays, location,
aoi_model='no_loss', spectral_model='no_loss')
# prepare_temperature expects mc.results.total_irrad and mc.results.weather
# to be set
mc._assign_weather((data, data_two))
mc._assign_total_irrad((data, data_two))
mc._prepare_temperature((data, data_two))
expected = pd.Series([48.928025, 38.080016], index=data.index)
assert_series_equal(mc.results.cell_temperature[0], expected)
assert_series_equal(mc.results.cell_temperature[1], expected)
data['module_temperature'] = [40., 30.]
mc._prepare_temperature((data, data_two))
expected = pd.Series([42.4, 31.5], index=data.index)
assert (mc.results.cell_temperature[1] != expected).all()
assert_series_equal(mc.results.cell_temperature[0], expected)
data['cell_temperature'] = [50., 35.]
mc._prepare_temperature((data, data_two))
assert_series_equal(
mc.results.cell_temperature[0], data['cell_temperature'])
data_two['module_temperature'] = [40., 30.]
mc._prepare_temperature((data, data_two))
assert_series_equal(mc.results.cell_temperature[1], expected)
assert_series_equal(
mc.results.cell_temperature[0], data['cell_temperature'])
data_two['cell_temperature'] = [10.0, 20.0]
mc._prepare_temperature((data, data_two))
assert_series_equal(
mc.results.cell_temperature[1], data_two['cell_temperature'])
assert_series_equal(
mc.results.cell_temperature[0], data['cell_temperature'])
@pytest.mark.parametrize('temp_params,temp_model',
[({'a': -3.47, 'b': -.0594, 'deltaT': 3},
ModelChain.sapm_temp),
({'u_c': 29.0, 'u_v': 0},
ModelChain.pvsyst_temp),
({'u0': 25.0, 'u1': 6.84},
ModelChain.faiman_temp),
({'noct_installed': 45},
ModelChain.fuentes_temp),
({'noct': 45, 'module_efficiency': 0.2},
ModelChain.noct_sam_temp)])
def test_temperature_models_arrays_multi_weather(
temp_params, temp_model,
sapm_dc_snl_ac_system_same_arrays,
location, weather, total_irrad):
for array in sapm_dc_snl_ac_system_same_arrays.arrays:
array.temperature_model_parameters = temp_params
# set air temp so it does not default to the same value for both arrays
weather['temp_air'] = 25
weather_one = weather
weather_two = weather.copy() * 0.5
mc = ModelChain(sapm_dc_snl_ac_system_same_arrays, location,
aoi_model='no_loss', spectral_model='no_loss')
mc.prepare_inputs((weather_one, weather_two))
temp_model(mc)
assert (mc.results.cell_temperature[0]
!= mc.results.cell_temperature[1]).all()
def test_run_model_solar_position_weather(
pvwatts_dc_pvwatts_ac_system, location, weather, mocker):
mc = ModelChain(pvwatts_dc_pvwatts_ac_system, location,
aoi_model='no_loss', spectral_model='no_loss')
weather['pressure'] = 90000
weather['temp_air'] = 25
m = mocker.spy(location, 'get_solarposition')
mc.run_model(weather)
# assert_called_once_with cannot be used with series, so need to use
# assert_series_equal on call_args
assert_series_equal(m.call_args[1]['temperature'], weather['temp_air'])
assert_series_equal(m.call_args[1]['pressure'], weather['pressure'])
def test_run_model_from_poa(sapm_dc_snl_ac_system, location, total_irrad):
mc = ModelChain(sapm_dc_snl_ac_system, location, aoi_model='no_loss',
spectral_model='no_loss')
ac = mc.run_model_from_poa(total_irrad).results.ac
expected = pd.Series(np.array([149.280238, 96.678385]),
index=total_irrad.index)
assert_series_equal(ac, expected)
@pytest.mark.parametrize("input_type", [tuple, list])
def test_run_model_from_poa_arrays(sapm_dc_snl_ac_system_Array, location,
weather, total_irrad, input_type):
data = weather.copy()
data[['poa_global', 'poa_diffuse', 'poa_direct']] = total_irrad
mc = ModelChain(sapm_dc_snl_ac_system_Array, location, aoi_model='no_loss',
spectral_model='no_loss')
mc.run_model_from_poa(input_type((data, data)))
# arrays have different orientation, but should give same dc power
# because we are the same passing POA irradiance and air
# temperature.
assert_frame_equal(mc.results.dc[0], mc.results.dc[1])
def test_run_model_from_poa_arrays_solar_position_weather(
sapm_dc_snl_ac_system_Array, location, weather, total_irrad, mocker):
data = weather.copy()
data[['poa_global', 'poa_diffuse', 'poa_direct']] = total_irrad
data['pressure'] = 90000
data['temp_air'] = 25
data2 = data.copy()
data2['pressure'] = 95000
data2['temp_air'] = 30
mc = ModelChain(sapm_dc_snl_ac_system_Array, location, aoi_model='no_loss',
spectral_model='no_loss')
m = mocker.spy(location, 'get_solarposition')
mc.run_model_from_poa((data, data2))
# mc uses only the first weather data for solar position corrections
assert_series_equal(m.call_args[1]['temperature'], data['temp_air'])
assert_series_equal(m.call_args[1]['pressure'], data['pressure'])
def test_run_model_from_poa_tracking(sapm_dc_snl_ac_system, location,
total_irrad):
system = SingleAxisTracker(
module_parameters=sapm_dc_snl_ac_system.arrays[0].module_parameters,
temperature_model_parameters=(
sapm_dc_snl_ac_system.arrays[0].temperature_model_parameters
),
inverter_parameters=sapm_dc_snl_ac_system.inverter_parameters)
mc = ModelChain(system, location, aoi_model='no_loss',
spectral_model='no_loss')
ac = mc.run_model_from_poa(total_irrad).results.ac
assert (mc.results.tracking.columns == ['tracker_theta',
'aoi',
'surface_azimuth',
'surface_tilt']).all()
expected = pd.Series(np.array([149.280238, 96.678385]),
index=total_irrad.index)
assert_series_equal(ac, expected)
@pytest.mark.parametrize("input_type", [lambda x: x[0], tuple, list])
def test_run_model_from_effective_irradiance(sapm_dc_snl_ac_system, location,
weather, total_irrad, input_type):
data = weather.copy()
data[['poa_global', 'poa_diffuse', 'poa_direct']] = total_irrad
data['effective_irradiance'] = data['poa_global']
mc = ModelChain(sapm_dc_snl_ac_system, location, aoi_model='no_loss',
spectral_model='no_loss')
ac = mc.run_model_from_effective_irradiance(input_type((data,))).results.ac
expected = pd.Series(np.array([149.280238, 96.678385]),
index=data.index)
assert_series_equal(ac, expected)
@pytest.mark.parametrize("input_type", [tuple, list])
def test_run_model_from_effective_irradiance_multi_array(
sapm_dc_snl_ac_system_Array, location, weather, total_irrad,
input_type):
data = weather.copy()
data[['poa_global', 'poa_diffuse', 'poa_direct']] = total_irrad
data['effective_irradiance'] = data['poa_global']
mc = ModelChain(sapm_dc_snl_ac_system_Array, location, aoi_model='no_loss',
spectral_model='no_loss')
mc.run_model_from_effective_irradiance(input_type((data, data)))
# arrays have different orientation, but should give same dc power
# because we are the same passing POA irradiance and air
# temperature.
assert_frame_equal(mc.results.dc[0], mc.results.dc[1])
@pytest.mark.parametrize("input_type", [lambda x: x[0], tuple, list])
def test_run_model_from_effective_irradiance_no_poa_global(
sapm_dc_snl_ac_system, location, weather, total_irrad, input_type):
data = weather.copy()
data['effective_irradiance'] = total_irrad['poa_global']
mc = ModelChain(sapm_dc_snl_ac_system, location, aoi_model='no_loss',
spectral_model='no_loss')
ac = mc.run_model_from_effective_irradiance(input_type((data,))).results.ac
expected = pd.Series(np.array([149.280238, 96.678385]),
index=data.index)
assert_series_equal(ac, expected)
def test_run_model_from_effective_irradiance_poa_global_differs(
sapm_dc_snl_ac_system, location, weather, total_irrad):
data = weather.copy()
data[['poa_global', 'poa_diffuse', 'poa_direct']] = total_irrad
data['effective_irradiance'] = data['poa_global'] * 0.8
mc = ModelChain(sapm_dc_snl_ac_system, location, aoi_model='no_loss',
spectral_model='no_loss')
ac = mc.run_model_from_effective_irradiance(data).results.ac
expected = pd.Series(np.array([118.302801, 76.099841]),
index=data.index)
assert_series_equal(ac, expected)
@pytest.mark.parametrize("input_type", [tuple, list])
def test_run_model_from_effective_irradiance_arrays_error(
sapm_dc_snl_ac_system_Array, location, weather, total_irrad,
input_type):
data = weather.copy()
data[['poa_global', 'poa_diffuse', 'poa_direct']] = total_irrad
data['effetive_irradiance'] = data['poa_global']
mc = ModelChain(sapm_dc_snl_ac_system_Array, location)
len_error = r"Input must be same length as number of Arrays in system\. " \
r"Expected 2, got [0-9]+\."
type_error = r"Input must be a tuple of length 2, got DataFrame\."
with pytest.raises(TypeError, match=type_error):
mc.run_model_from_effective_irradiance(data)
with pytest.raises(ValueError, match=len_error):
mc.run_model_from_effective_irradiance(input_type((data,)))
with pytest.raises(ValueError, match=len_error):
mc.run_model_from_effective_irradiance(input_type((data, data, data)))
with pytest.raises(ValueError,
match=r"Input DataFrames must have same index\."):
mc.run_model_from_effective_irradiance(
(data, data.shift(periods=1, freq='6H'))
)
@pytest.mark.parametrize("input_type", [tuple, list])
def test_run_model_from_effective_irradiance_arrays(
sapm_dc_snl_ac_system_Array, location, weather, total_irrad,
input_type):
data = weather.copy()
data[['poa_global', 'poa_diffuse', 'poa_direct']] = total_irrad
data['effective_irradiance'] = data['poa_global']
data['cell_temperature'] = 40
mc = ModelChain(sapm_dc_snl_ac_system_Array, location)
mc.run_model_from_effective_irradiance(input_type((data, data)))
# arrays have different orientation, but should give same dc power
# because we are the same passing effective irradiance and cell
# temperature.
assert_frame_equal(mc.results.dc[0], mc.results.dc[1])
# test that unequal inputs create unequal results
data_two = data.copy()
data_two['effective_irradiance'] = data['poa_global'] * 0.5
mc.run_model_from_effective_irradiance(input_type((data, data_two)))
assert (mc.results.dc[0] != mc.results.dc[1]).all().all()
def test_run_model_from_effective_irradiance_minimal_input(
sapm_dc_snl_ac_system, sapm_dc_snl_ac_system_Array,
location, total_irrad):
data = pd.DataFrame({'effective_irradiance': total_irrad['poa_global'],
'cell_temperature': 40},
index=total_irrad.index)
mc = ModelChain(sapm_dc_snl_ac_system, location)
mc.run_model_from_effective_irradiance(data)
# make sure, for a single Array, the result is the correct type and value
assert_series_equal(mc.results.cell_temperature, data['cell_temperature'])
assert not mc.results.dc.empty
assert not mc.results.ac.empty
# test with multiple arrays
mc = ModelChain(sapm_dc_snl_ac_system_Array, location)
mc.run_model_from_effective_irradiance((data, data))
assert_frame_equal(mc.results.dc[0], mc.results.dc[1])
assert not mc.results.ac.empty
def test_run_model_singleton_weather_single_array(cec_dc_snl_ac_system,
location, weather):
mc = ModelChain(cec_dc_snl_ac_system, location,
aoi_model="no_loss", spectral_model="no_loss")
mc.run_model([weather])
assert isinstance(mc.results.weather, tuple)
assert isinstance(mc.results.total_irrad, tuple)
assert isinstance(mc.results.aoi, tuple)
assert isinstance(mc.results.aoi_modifier, tuple)
assert isinstance(mc.results.spectral_modifier, tuple)
assert isinstance(mc.results.effective_irradiance, tuple)
assert isinstance(mc.results.dc, tuple)
assert isinstance(mc.results.cell_temperature, tuple)
assert len(mc.results.cell_temperature) == 1
assert isinstance(mc.results.cell_temperature[0], pd.Series)
def test_run_model_from_poa_singleton_weather_single_array(
sapm_dc_snl_ac_system, location, total_irrad):
mc = ModelChain(sapm_dc_snl_ac_system, location,
aoi_model='no_loss', spectral_model='no_loss')
ac = mc.run_model_from_poa([total_irrad]).results.ac
expected = pd.Series(np.array([149.280238, 96.678385]),
index=total_irrad.index)
assert isinstance(mc.results.weather, tuple)
assert isinstance(mc.results.cell_temperature, tuple)
assert len(mc.results.cell_temperature) == 1
assert isinstance(mc.results.cell_temperature[0], pd.Series)
assert_series_equal(ac, expected)
def test_run_model_from_effective_irradiance_weather_single_array(
sapm_dc_snl_ac_system, location, weather, total_irrad):
data = weather.copy()
data[['poa_global', 'poa_diffuse', 'poa_direct']] = total_irrad
data['effective_irradiance'] = data['poa_global']
mc = ModelChain(sapm_dc_snl_ac_system, location, aoi_model='no_loss',
spectral_model='no_loss')
ac = mc.run_model_from_effective_irradiance([data]).results.ac
expected = pd.Series(np.array([149.280238, 96.678385]),
index=data.index)
assert isinstance(mc.results.weather, tuple)
assert isinstance(mc.results.cell_temperature, tuple)
assert len(mc.results.cell_temperature) == 1
assert isinstance(mc.results.cell_temperature[0], pd.Series)
assert isinstance(mc.results.dc, tuple)
assert len(mc.results.dc) == 1
assert isinstance(mc.results.dc[0], pd.DataFrame)
assert_series_equal(ac, expected)
def poadc(mc):
mc.results.dc = mc.results.total_irrad['poa_global'] * 0.2
mc.results.dc.name = None # assert_series_equal will fail without this
@pytest.mark.parametrize('dc_model', [
'sapm', 'cec', 'desoto', 'pvsyst', 'singlediode', 'pvwatts_dc'])
def test_infer_dc_model(sapm_dc_snl_ac_system, cec_dc_snl_ac_system,
pvsyst_dc_snl_ac_system, pvwatts_dc_pvwatts_ac_system,
location, dc_model, weather, mocker):
dc_systems = {'sapm': sapm_dc_snl_ac_system,
'cec': cec_dc_snl_ac_system,
'desoto': cec_dc_snl_ac_system,
'pvsyst': pvsyst_dc_snl_ac_system,
'singlediode': cec_dc_snl_ac_system,
'pvwatts_dc': pvwatts_dc_pvwatts_ac_system}
dc_model_function = {'sapm': 'sapm',
'cec': 'calcparams_cec',
'desoto': 'calcparams_desoto',
'pvsyst': 'calcparams_pvsyst',
'singlediode': 'calcparams_desoto',
'pvwatts_dc': 'pvwatts_dc'}
temp_model_function = {'sapm': 'sapm',
'cec': 'sapm',
'desoto': 'sapm',
'pvsyst': 'pvsyst',
'singlediode': 'sapm',
'pvwatts_dc': 'sapm'}
temp_model_params = {'sapm': {'a': -3.40641, 'b': -0.0842075, 'deltaT': 3},
'pvsyst': {'u_c': 29.0, 'u_v': 0}}
system = dc_systems[dc_model]
for array in system.arrays:
array.temperature_model_parameters = temp_model_params[
temp_model_function[dc_model]]
# remove Adjust from model parameters for desoto, singlediode
if dc_model in ['desoto', 'singlediode']:
for array in system.arrays:
array.module_parameters.pop('Adjust')
m = mocker.spy(pvsystem, dc_model_function[dc_model])
mc = ModelChain(system, location,
aoi_model='no_loss', spectral_model='no_loss',
temperature_model=temp_model_function[dc_model])
mc.run_model(weather)
assert m.call_count == 1
assert isinstance(mc.results.dc, (pd.Series, pd.DataFrame))
def test_infer_dc_model_incomplete(multi_array_sapm_dc_snl_ac_system,
location):
match = 'Could not infer DC model from the module_parameters attributes '
system = multi_array_sapm_dc_snl_ac_system['two_array_system']
system.arrays[0].module_parameters.pop('A0')
with pytest.raises(ValueError, match=match):
ModelChain(system, location)
@pytest.mark.parametrize('dc_model', ['cec', 'desoto', 'pvsyst'])
def test_singlediode_dc_arrays(location, dc_model,
cec_dc_snl_ac_arrays,
pvsyst_dc_snl_ac_arrays,
weather):
systems = {'cec': cec_dc_snl_ac_arrays,
'pvsyst': pvsyst_dc_snl_ac_arrays,
'desoto': cec_dc_snl_ac_arrays}
temp_sapm = {'a': -3.40641, 'b': -0.0842075, 'deltaT': 3}
temp_pvsyst = {'u_c': 29.0, 'u_v': 0}
temp_model_params = {'cec': temp_sapm,
'desoto': temp_sapm,
'pvsyst': temp_pvsyst}
temp_model = {'cec': 'sapm', 'desoto': 'sapm', 'pvsyst': 'pvsyst'}
system = systems[dc_model]
for array in system.arrays:
array.temperature_model_parameters = temp_model_params[dc_model]
if dc_model == 'desoto':
for array in system.arrays:
array.module_parameters.pop('Adjust')
mc = ModelChain(system, location,
aoi_model='no_loss', spectral_model='no_loss',
temperature_model=temp_model[dc_model])
mc.run_model(weather)
assert isinstance(mc.results.dc, tuple)
assert len(mc.results.dc) == system.num_arrays
for dc in mc.results.dc:
assert isinstance(dc, (pd.Series, pd.DataFrame))
@pytest.mark.parametrize('dc_model', ['sapm', 'cec', 'cec_native'])
def test_infer_spectral_model(location, sapm_dc_snl_ac_system,
cec_dc_snl_ac_system,
cec_dc_native_snl_ac_system, dc_model):
dc_systems = {'sapm': sapm_dc_snl_ac_system,
'cec': cec_dc_snl_ac_system,
'cec_native': cec_dc_native_snl_ac_system}
system = dc_systems[dc_model]
mc = ModelChain(system, location, aoi_model='physical')
assert isinstance(mc, ModelChain)
@pytest.mark.parametrize('temp_model', [
'sapm_temp', 'faiman_temp', 'pvsyst_temp', 'fuentes_temp',
'noct_sam_temp'])
def test_infer_temp_model(location, sapm_dc_snl_ac_system,
pvwatts_dc_pvwatts_ac_pvsyst_temp_system,
pvwatts_dc_pvwatts_ac_faiman_temp_system,
pvwatts_dc_pvwatts_ac_fuentes_temp_system,
pvwatts_dc_pvwatts_ac_noct_sam_temp_system,
temp_model):
dc_systems = {'sapm_temp': sapm_dc_snl_ac_system,
'pvsyst_temp': pvwatts_dc_pvwatts_ac_pvsyst_temp_system,
'faiman_temp': pvwatts_dc_pvwatts_ac_faiman_temp_system,
'fuentes_temp': pvwatts_dc_pvwatts_ac_fuentes_temp_system,
'noct_sam_temp': pvwatts_dc_pvwatts_ac_noct_sam_temp_system}
system = dc_systems[temp_model]
mc = ModelChain(system, location, aoi_model='physical',
spectral_model='no_loss')
assert temp_model == mc.temperature_model.__name__
assert isinstance(mc, ModelChain)
def test_infer_temp_model_invalid(location, sapm_dc_snl_ac_system):
sapm_dc_snl_ac_system.arrays[0].temperature_model_parameters.pop('a')
with pytest.raises(ValueError):
ModelChain(sapm_dc_snl_ac_system, location,
aoi_model='physical', spectral_model='no_loss')
def test_temperature_model_inconsistent(location, sapm_dc_snl_ac_system):
with pytest.raises(ValueError):
ModelChain(sapm_dc_snl_ac_system, location, aoi_model='physical',
spectral_model='no_loss', temperature_model='pvsyst')
def test_dc_model_user_func(pvwatts_dc_pvwatts_ac_system, location, weather,
mocker):
m = mocker.spy(sys.modules[__name__], 'poadc')
mc = ModelChain(pvwatts_dc_pvwatts_ac_system, location, dc_model=poadc,
aoi_model='no_loss', spectral_model='no_loss')
mc.run_model(weather)
assert m.call_count == 1
assert isinstance(mc.results.ac, (pd.Series, pd.DataFrame))
assert not mc.results.ac.empty
def test_pvwatts_dc_multiple_strings(pvwatts_dc_pvwatts_ac_system, location,
weather, mocker):
system = pvwatts_dc_pvwatts_ac_system
m = mocker.spy(system, 'scale_voltage_current_power')
mc1 = ModelChain(system, location,
aoi_model='no_loss', spectral_model='no_loss')
mc1.run_model(weather)
assert m.call_count == 1
system.arrays[0].modules_per_string = 2
mc2 = ModelChain(system, location,
aoi_model='no_loss', spectral_model='no_loss')
mc2.run_model(weather)
assert isinstance(mc2.results.ac, (pd.Series, pd.DataFrame))
assert not mc2.results.ac.empty
expected = pd.Series(data=[2., np.nan], index=mc2.results.dc.index,
name='p_mp')
assert_series_equal(mc2.results.dc / mc1.results.dc, expected)
def acdc(mc):
mc.results.ac = mc.results.dc
@pytest.mark.parametrize('inverter_model', ['sandia', 'adr',
'pvwatts', 'sandia_multi',
'pvwatts_multi'])
def test_ac_models(sapm_dc_snl_ac_system, cec_dc_adr_ac_system,
pvwatts_dc_pvwatts_ac_system, cec_dc_snl_ac_arrays,
pvwatts_dc_pvwatts_ac_system_arrays,
location, inverter_model, weather, mocker):
ac_systems = {'sandia': sapm_dc_snl_ac_system,
'sandia_multi': cec_dc_snl_ac_arrays,
'adr': cec_dc_adr_ac_system,
'pvwatts': pvwatts_dc_pvwatts_ac_system,
'pvwatts_multi': pvwatts_dc_pvwatts_ac_system_arrays}
inverter_to_ac_model = {
'sandia': 'sandia',
'sandia_multi': 'sandia',
'adr': 'adr',
'pvwatts': 'pvwatts',
'pvwatts_multi': 'pvwatts'}
ac_model = inverter_to_ac_model[inverter_model]
system = ac_systems[inverter_model]
mc_inferred = ModelChain(system, location,
aoi_model='no_loss', spectral_model='no_loss')
mc = ModelChain(system, location, ac_model=ac_model,
aoi_model='no_loss', spectral_model='no_loss')
# tests ModelChain.infer_ac_model
assert mc_inferred.ac_model.__name__ == mc.ac_model.__name__
m = mocker.spy(inverter, inverter_model)
mc.run_model(weather)
assert m.call_count == 1
assert isinstance(mc.results.ac, pd.Series)
assert not mc.results.ac.empty
assert mc.results.ac[1] < 1
def test_ac_model_user_func(pvwatts_dc_pvwatts_ac_system, location, weather,
mocker):
m = mocker.spy(sys.modules[__name__], 'acdc')
mc = ModelChain(pvwatts_dc_pvwatts_ac_system, location, ac_model=acdc,
aoi_model='no_loss', spectral_model='no_loss')
mc.run_model(weather)
assert m.call_count == 1
assert_series_equal(mc.results.ac, mc.results.dc)
assert not mc.results.ac.empty
def test_ac_model_not_a_model(pvwatts_dc_pvwatts_ac_system, location, weather):
exc_text = 'not a valid AC power model'
with pytest.raises(ValueError, match=exc_text):
ModelChain(pvwatts_dc_pvwatts_ac_system, location,
ac_model='not_a_model', aoi_model='no_loss',
spectral_model='no_loss')
def test_infer_ac_model_invalid_params(location):
# only the keys are relevant here, using arbitrary values
module_parameters = {'pdc0': 1, 'gamma_pdc': 1}
system = pvsystem.PVSystem(
arrays=[pvsystem.Array(
module_parameters=module_parameters
)],
inverter_parameters={'foo': 1, 'bar': 2}
)
with pytest.raises(ValueError, match='could not infer AC model'):
ModelChain(system, location)
def constant_aoi_loss(mc):
mc.results.aoi_modifier = 0.9
@pytest.mark.parametrize('aoi_model', [
'sapm', 'ashrae', 'physical', 'martin_ruiz'
])
def test_aoi_models(sapm_dc_snl_ac_system, location, aoi_model,
weather, mocker):
mc = ModelChain(sapm_dc_snl_ac_system, location, dc_model='sapm',
aoi_model=aoi_model, spectral_model='no_loss')
m = mocker.spy(sapm_dc_snl_ac_system, 'get_iam')
mc.run_model(weather=weather)
assert m.call_count == 1
assert isinstance(mc.results.ac, pd.Series)
assert not mc.results.ac.empty
assert mc.results.ac[0] > 150 and mc.results.ac[0] < 200
assert mc.results.ac[1] < 1
@pytest.mark.parametrize('aoi_model', [
'sapm', 'ashrae', 'physical', 'martin_ruiz'
])
def test_aoi_models_singleon_weather_single_array(
sapm_dc_snl_ac_system, location, aoi_model, weather):
mc = ModelChain(sapm_dc_snl_ac_system, location, dc_model='sapm',
aoi_model=aoi_model, spectral_model='no_loss')
mc.run_model(weather=[weather])
assert isinstance(mc.results.aoi_modifier, tuple)
assert len(mc.results.aoi_modifier) == 1
assert isinstance(mc.results.ac, pd.Series)
assert not mc.results.ac.empty
assert mc.results.ac[0] > 150 and mc.results.ac[0] < 200
assert mc.results.ac[1] < 1
def test_aoi_model_no_loss(sapm_dc_snl_ac_system, location, weather):
mc = ModelChain(sapm_dc_snl_ac_system, location, dc_model='sapm',
aoi_model='no_loss', spectral_model='no_loss')
mc.run_model(weather)
assert mc.results.aoi_modifier == 1.0
assert not mc.results.ac.empty
assert mc.results.ac[0] > 150 and mc.results.ac[0] < 200
assert mc.results.ac[1] < 1
def test_aoi_model_user_func(sapm_dc_snl_ac_system, location, weather, mocker):
m = mocker.spy(sys.modules[__name__], 'constant_aoi_loss')
mc = ModelChain(sapm_dc_snl_ac_system, location, dc_model='sapm',
aoi_model=constant_aoi_loss, spectral_model='no_loss')
mc.run_model(weather)
assert m.call_count == 1
assert mc.results.aoi_modifier == 0.9
assert not mc.results.ac.empty
assert mc.results.ac[0] > 140 and mc.results.ac[0] < 200
assert mc.results.ac[1] < 1
@pytest.mark.parametrize('aoi_model', [
'sapm', 'ashrae', 'physical', 'martin_ruiz'
])
def test_infer_aoi_model(location, system_no_aoi, aoi_model):
for k in iam._IAM_MODEL_PARAMS[aoi_model]:
system_no_aoi.arrays[0].module_parameters.update({k: 1.0})
mc = ModelChain(system_no_aoi, location, spectral_model='no_loss')
assert isinstance(mc, ModelChain)
def test_infer_aoi_model_invalid(location, system_no_aoi):
exc_text = 'could not infer AOI model'
with pytest.raises(ValueError, match=exc_text):
ModelChain(system_no_aoi, location, spectral_model='no_loss')
def constant_spectral_loss(mc):
mc.results.spectral_modifier = 0.9
@pytest.mark.parametrize('spectral_model', [
'sapm', 'first_solar', 'no_loss', constant_spectral_loss
])
def test_spectral_models(sapm_dc_snl_ac_system, location, spectral_model,
weather):
# add pw to weather dataframe
weather['precipitable_water'] = [0.3, 0.5]
mc = ModelChain(sapm_dc_snl_ac_system, location, dc_model='sapm',
aoi_model='no_loss', spectral_model=spectral_model)
spectral_modifier = mc.run_model(weather).results.spectral_modifier
assert isinstance(spectral_modifier, (pd.Series, float, int))
@pytest.mark.parametrize('spectral_model', [
'sapm', 'first_solar', 'no_loss', constant_spectral_loss
])
def test_spectral_models_singleton_weather_single_array(
sapm_dc_snl_ac_system, location, spectral_model, weather):
# add pw to weather dataframe
weather['precipitable_water'] = [0.3, 0.5]
mc = ModelChain(sapm_dc_snl_ac_system, location, dc_model='sapm',
aoi_model='no_loss', spectral_model=spectral_model)
spectral_modifier = mc.run_model([weather]).results.spectral_modifier
assert isinstance(spectral_modifier, tuple)
assert len(spectral_modifier) == 1
assert isinstance(spectral_modifier[0], (pd.Series, float, int))
def constant_losses(mc):
mc.results.losses = 0.9
mc.results.dc *= mc.results.losses
def dc_constant_losses(mc):
mc.results.dc['p_mp'] *= 0.9
def test_dc_ohmic_model_ohms_from_percent(cec_dc_snl_ac_system,
cec_dc_snl_ac_arrays,
location,
weather,
mocker):
m = mocker.spy(pvsystem, 'dc_ohms_from_percent')
system = cec_dc_snl_ac_system
for array in system.arrays:
array.array_losses_parameters = dict(dc_ohmic_percent=3)
mc = ModelChain(system, location,
aoi_model='no_loss',
spectral_model='no_loss',
dc_ohmic_model='dc_ohms_from_percent')
mc.run_model(weather)
assert m.call_count == 1
assert isinstance(mc.results.dc_ohmic_losses, pd.Series)
system = cec_dc_snl_ac_arrays
for array in system.arrays:
array.array_losses_parameters = dict(dc_ohmic_percent=3)
mc = ModelChain(system, location,
aoi_model='no_loss',
spectral_model='no_loss',
dc_ohmic_model='dc_ohms_from_percent')
mc.run_model(weather)
assert m.call_count == 3
assert len(mc.results.dc_ohmic_losses) == len(mc.system.arrays)
assert isinstance(mc.results.dc_ohmic_losses, tuple)
def test_dc_ohmic_model_no_dc_ohmic_loss(cec_dc_snl_ac_system,
location,
weather,
mocker):
m = mocker.spy(modelchain.ModelChain, 'no_dc_ohmic_loss')
mc = ModelChain(cec_dc_snl_ac_system, location,
aoi_model='no_loss',
spectral_model='no_loss',
dc_ohmic_model='no_loss')
mc.run_model(weather)
assert mc.dc_ohmic_model == mc.no_dc_ohmic_loss
assert m.call_count == 1
assert mc.results.dc_ohmic_losses is None
def test_dc_ohmic_ext_def(cec_dc_snl_ac_system, location,
weather, mocker):
m = mocker.spy(sys.modules[__name__], 'dc_constant_losses')
mc = ModelChain(cec_dc_snl_ac_system, location,
aoi_model='no_loss',
spectral_model='no_loss',
dc_ohmic_model=dc_constant_losses)
mc.run_model(weather)
assert m.call_count == 1
assert isinstance(mc.results.ac, (pd.Series, pd.DataFrame))
assert not mc.results.ac.empty
def test_dc_ohmic_not_a_model(cec_dc_snl_ac_system, location,
weather, mocker):
exc_text = 'not_a_dc_model is not a valid losses model'
with pytest.raises(ValueError, match=exc_text):
ModelChain(cec_dc_snl_ac_system, location,
aoi_model='no_loss',
spectral_model='no_loss',
dc_ohmic_model='not_a_dc_model')
def test_losses_models_pvwatts(pvwatts_dc_pvwatts_ac_system, location, weather,
mocker):
age = 1
pvwatts_dc_pvwatts_ac_system.losses_parameters = dict(age=age)
m = mocker.spy(pvsystem, 'pvwatts_losses')
mc = ModelChain(pvwatts_dc_pvwatts_ac_system, location, dc_model='pvwatts',
aoi_model='no_loss', spectral_model='no_loss',
losses_model='pvwatts')
mc.run_model(weather)
assert m.call_count == 1
m.assert_called_with(age=age)
assert isinstance(mc.results.ac, (pd.Series, pd.DataFrame))
assert not mc.results.ac.empty
# check that we're applying correction to dc
# GH 696
dc_with_loss = mc.results.dc
mc = ModelChain(pvwatts_dc_pvwatts_ac_system, location, dc_model='pvwatts',
aoi_model='no_loss', spectral_model='no_loss',
losses_model='no_loss')
mc.run_model(weather)
assert not np.allclose(mc.results.dc, dc_with_loss, equal_nan=True)
def test_losses_models_pvwatts_arrays(multi_array_sapm_dc_snl_ac_system,
location, weather):
age = 1
system_both = multi_array_sapm_dc_snl_ac_system['two_array_system']
system_both.losses_parameters = dict(age=age)
mc = ModelChain(system_both, location,
aoi_model='no_loss', spectral_model='no_loss',
losses_model='pvwatts')
mc.run_model(weather)
dc_with_loss = mc.results.dc
mc = ModelChain(system_both, location,
aoi_model='no_loss', spectral_model='no_loss',
losses_model='no_loss')
mc.run_model(weather)
assert not np.allclose(mc.results.dc[0], dc_with_loss[0], equal_nan=True)
assert not np.allclose(mc.results.dc[1], dc_with_loss[1], equal_nan=True)
assert not mc.results.ac.empty
def test_losses_models_ext_def(pvwatts_dc_pvwatts_ac_system, location, weather,
mocker):
m = mocker.spy(sys.modules[__name__], 'constant_losses')
mc = ModelChain(pvwatts_dc_pvwatts_ac_system, location, dc_model='pvwatts',
aoi_model='no_loss', spectral_model='no_loss',
losses_model=constant_losses)
mc.run_model(weather)
assert m.call_count == 1
assert isinstance(mc.results.ac, (pd.Series, pd.DataFrame))
assert mc.results.losses == 0.9
assert not mc.results.ac.empty
def test_losses_models_no_loss(pvwatts_dc_pvwatts_ac_system, location, weather,
mocker):
m = mocker.spy(pvsystem, 'pvwatts_losses')
mc = ModelChain(pvwatts_dc_pvwatts_ac_system, location, dc_model='pvwatts',
aoi_model='no_loss', spectral_model='no_loss',
losses_model='no_loss')
assert mc.losses_model == mc.no_extra_losses
mc.run_model(weather)
assert m.call_count == 0
assert mc.results.losses == 1
def test_invalid_dc_model_params(sapm_dc_snl_ac_system, cec_dc_snl_ac_system,
pvwatts_dc_pvwatts_ac_system, location):
kwargs = {'dc_model': 'sapm', 'ac_model': 'sandia',
'aoi_model': 'no_loss', 'spectral_model': 'no_loss',
'temperature_model': 'sapm', 'losses_model': 'no_loss'}
for array in sapm_dc_snl_ac_system.arrays:
array.module_parameters.pop('A0') # remove a parameter
with pytest.raises(ValueError):
ModelChain(sapm_dc_snl_ac_system, location, **kwargs)
kwargs['dc_model'] = 'singlediode'
for array in cec_dc_snl_ac_system.arrays:
array.module_parameters.pop('a_ref') # remove a parameter
with pytest.raises(ValueError):
ModelChain(cec_dc_snl_ac_system, location, **kwargs)
kwargs['dc_model'] = 'pvwatts'
kwargs['ac_model'] = 'pvwatts'
for array in pvwatts_dc_pvwatts_ac_system.arrays:
array.module_parameters.pop('pdc0')
match = 'one or more Arrays are missing one or more required parameters'
with pytest.raises(ValueError, match=match):
ModelChain(pvwatts_dc_pvwatts_ac_system, location, **kwargs)
@pytest.mark.parametrize('model', [
'dc_model', 'ac_model', 'aoi_model', 'spectral_model',
'temperature_model', 'losses_model'
])
def test_invalid_models(model, sapm_dc_snl_ac_system, location):
kwargs = {'dc_model': 'pvwatts', 'ac_model': 'pvwatts',
'aoi_model': 'no_loss', 'spectral_model': 'no_loss',
'temperature_model': 'sapm', 'losses_model': 'no_loss'}
kwargs[model] = 'invalid'
with pytest.raises(ValueError):
ModelChain(sapm_dc_snl_ac_system, location, **kwargs)
def test_bad_get_orientation():
with pytest.raises(ValueError):
modelchain.get_orientation('bad value')
# tests for PVSystem with multiple Arrays
def test_with_sapm_pvsystem_arrays(sapm_dc_snl_ac_system_Array, location,
weather):
mc = ModelChain.with_sapm(sapm_dc_snl_ac_system_Array, location,
ac_model='sandia')
assert mc.dc_model == mc.sapm
assert mc.ac_model == mc.sandia_inverter
mc.run_model(weather)
assert mc.results
def test_ModelChain_no_extra_kwargs(sapm_dc_snl_ac_system, location):
with pytest.raises(TypeError, match="arbitrary_kwarg"):
ModelChain(sapm_dc_snl_ac_system, location, arbitrary_kwarg='value')
@fail_on_pvlib_version('1.0')
def test_ModelChain_attributes_deprecated_10(sapm_dc_snl_ac_system, location):
match = 'Use ModelChain.results'
mc = ModelChain(sapm_dc_snl_ac_system, location)
with pytest.warns(pvlibDeprecationWarning, match=match):
mc.aoi
with pytest.warns(pvlibDeprecationWarning, match=match):
mc.aoi = 5
@requires_tables
def test_basic_chain_alt_az(sam_data, cec_inverter_parameters,
sapm_temperature_cs5p_220m):
times = pd.date_range(start='20160101 1200-0700',
end='20160101 1800-0700', freq='6H')
latitude = 32.2
longitude = -111
surface_tilt = 0
surface_azimuth = 0
modules = sam_data['sandiamod']
module_parameters = modules['Canadian_Solar_CS5P_220M___2009_']
temp_model_params = sapm_temperature_cs5p_220m.copy()
dc, ac = modelchain.basic_chain(times, latitude, longitude,
surface_tilt, surface_azimuth,
module_parameters, temp_model_params,
cec_inverter_parameters)
expected = pd.Series(np.array([111.621405, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected)
@requires_tables
def test_basic_chain_altitude_pressure(sam_data, cec_inverter_parameters,
sapm_temperature_cs5p_220m):
times = pd.date_range(start='20160101 1200-0700',
end='20160101 1800-0700', freq='6H')
latitude = 32.2
longitude = -111
altitude = 700
surface_tilt = 0
surface_azimuth = 0
modules = sam_data['sandiamod']
module_parameters = modules['Canadian_Solar_CS5P_220M___2009_']
temp_model_params = sapm_temperature_cs5p_220m.copy()
dc, ac = modelchain.basic_chain(times, latitude, longitude,
surface_tilt, surface_azimuth,
module_parameters, temp_model_params,
cec_inverter_parameters,
pressure=93194)
expected = pd.Series(np.array([113.190045, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected)
dc, ac = modelchain.basic_chain(times, latitude, longitude,
surface_tilt, surface_azimuth,
module_parameters, temp_model_params,
cec_inverter_parameters,
altitude=altitude)
expected = pd.Series(np.array([113.189814, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected)
def test_complete_irradiance_clean_run(sapm_dc_snl_ac_system, location):
"""The DataFrame should not change if all columns are passed"""
mc = ModelChain(sapm_dc_snl_ac_system, location)
times = pd.date_range('2010-07-05 9:00:00', periods=2, freq='H')
i = pd.DataFrame(
{'dni': [2, 3], 'dhi': [4, 6], 'ghi': [9, 5]}, index=times)
mc.complete_irradiance(i)
assert_series_equal(mc.results.weather['dni'],
pd.Series([2, 3], index=times, name='dni'))
assert_series_equal(mc.results.weather['dhi'],
pd.Series([4, 6], index=times, name='dhi'))
assert_series_equal(mc.results.weather['ghi'],
pd.Series([9, 5], index=times, name='ghi'))
@requires_tables
def test_complete_irradiance(sapm_dc_snl_ac_system, location):
"""Check calculations"""
mc = ModelChain(sapm_dc_snl_ac_system, location)
times = pd.date_range('2010-07-05 7:00:00-0700', periods=2, freq='H')
i = pd.DataFrame({'dni': [49.756966, 62.153947],
'ghi': [372.103976116, 497.087579068],
'dhi': [356.543700, 465.44400]}, index=times)
with pytest.warns(UserWarning):
mc.complete_irradiance(i[['ghi', 'dni']])
assert_series_equal(mc.results.weather['dhi'],
pd.Series([356.543700, 465.44400],
index=times, name='dhi'))
with pytest.warns(UserWarning):
mc.complete_irradiance(i[['dhi', 'dni']])
assert_series_equal(mc.results.weather['ghi'],
pd.Series([372.103976116, 497.087579068],
index=times, name='ghi'))
mc.complete_irradiance(i[['dhi', 'ghi']])
assert_series_equal(mc.results.weather['dni'],
pd.Series([49.756966, 62.153947],
index=times, name='dni'))
@pytest.mark.filterwarnings("ignore:This function is not safe at the moment")
@pytest.mark.parametrize("input_type", [tuple, list])
@requires_tables
def test_complete_irradiance_arrays(
sapm_dc_snl_ac_system_same_arrays, location, input_type):
"""ModelChain.complete_irradiance can accept a tuple of weather
DataFrames."""
times = pd.date_range(start='2020-01-01 0700-0700', periods=2, freq='H')
weather = pd.DataFrame({'dni': [2, 3],
'dhi': [4, 6],
'ghi': [9, 5]}, index=times)
mc = ModelChain(sapm_dc_snl_ac_system_same_arrays, location)
with pytest.raises(ValueError,
match=r"Input DataFrames must have same index\."):
mc.complete_irradiance(input_type((weather, weather[1:])))
mc.complete_irradiance(input_type((weather, weather)))
for mc_weather in mc.results.weather:
assert_series_equal(mc_weather['dni'],
pd.Series([2, 3], index=times, name='dni'))
assert_series_equal(mc_weather['dhi'],
pd.Series([4, 6], index=times, name='dhi'))
assert_series_equal(mc_weather['ghi'],
pd.Series([9, 5], index=times, name='ghi'))
mc = ModelChain(sapm_dc_snl_ac_system_same_arrays, location)
mc.complete_irradiance(input_type((weather[['ghi', 'dhi']],
weather[['dhi', 'dni']])))
assert 'dni' in mc.results.weather[0].columns
assert 'ghi' in mc.results.weather[1].columns
mc.complete_irradiance(input_type((weather, weather[['ghi', 'dni']])))
assert_series_equal(mc.results.weather[0]['dhi'],
pd.Series([4, 6], index=times, name='dhi'))
assert_series_equal(mc.results.weather[0]['ghi'],
pd.Series([9, 5], index=times, name='ghi'))
assert_series_equal(mc.results.weather[0]['dni'],
pd.Series([2, 3], index=times, name='dni'))
assert 'dhi' in mc.results.weather[1].columns
@pytest.mark.parametrize("input_type", [tuple, list])
def test_complete_irradiance_arrays_wrong_length(
sapm_dc_snl_ac_system_same_arrays, location, input_type):
mc = ModelChain(sapm_dc_snl_ac_system_same_arrays, location)
times = pd.date_range(start='2020-01-01 0700-0700', periods=2, freq='H')
weather = pd.DataFrame({'dni': [2, 3],
'dhi': [4, 6],
'ghi': [9, 5]}, index=times)
error_str = "Input must be same length as number " \
r"of Arrays in system\. Expected 2, got [0-9]+\."
with pytest.raises(ValueError, match=error_str):
mc.complete_irradiance(input_type((weather,)))
with pytest.raises(ValueError, match=error_str):
mc.complete_irradiance(input_type((weather, weather, weather)))
def test_unknown_attribute(sapm_dc_snl_ac_system, location):
mc = ModelChain(sapm_dc_snl_ac_system, location)
with pytest.raises(AttributeError):
mc.unknown_attribute
def test_inconsistent_array_params(location,
sapm_module_params,
cec_module_params):
module_error = ".* selected for the DC model but one or more Arrays are " \
"missing one or more required parameters"
temperature_error = "could not infer temperature model from " \
r"system\.temperature_model_parameters\. Check " \
r"that all Arrays in system\.arrays have " \
r"parameters for the same temperature model\. " \
r"Common temperature model parameters: .*"
different_module_system = pvsystem.PVSystem(
arrays=[
pvsystem.Array(
module_parameters=sapm_module_params),
pvsystem.Array(
module_parameters=cec_module_params),
pvsystem.Array(
module_parameters=cec_module_params)]
)
with pytest.raises(ValueError, match=module_error):
ModelChain(different_module_system, location, dc_model='cec')
different_temp_system = pvsystem.PVSystem(
arrays=[
pvsystem.Array(
module_parameters=cec_module_params,
temperature_model_parameters={'a': 1,
'b': 1,
'deltaT': 1}),
pvsystem.Array(
module_parameters=cec_module_params,
temperature_model_parameters={'a': 2,
'b': 2,
'deltaT': 2}),
pvsystem.Array(
module_parameters=cec_module_params,
temperature_model_parameters={'b': 3, 'deltaT': 3})]
)
with pytest.raises(ValueError, match=temperature_error):
ModelChain(different_temp_system, location,
ac_model='sandia',
aoi_model='no_loss', spectral_model='no_loss',
temperature_model='sapm')
def test_modelchain__common_keys():
dictionary = {'a': 1, 'b': 1}
series = pd.Series(dictionary)
assert {'a', 'b'} == modelchain._common_keys(
{'a': 1, 'b': 1}
)
assert {'a', 'b'} == modelchain._common_keys(
pd.Series({'a': 1, 'b': 1})
)
assert {'a', 'b'} == modelchain._common_keys(
(dictionary, series)
)
no_a = dictionary.copy()
del no_a['a']
assert {'b'} == modelchain._common_keys(
(dictionary, no_a)
)
assert {'b'} == modelchain._common_keys(
(series, pd.Series(no_a))
)
assert {'b'} == modelchain._common_keys(
(series, no_a)
)
def test__irrad_for_celltemp():
total_irrad = pd.DataFrame(index=[0, 1], columns=['poa_global'],
data=[10., 20.])
empty = total_irrad.drop('poa_global', axis=1)
effect_irrad = pd.Series(index=total_irrad.index, data=[5., 8.])
# test with single array inputs
poa = modelchain._irrad_for_celltemp(total_irrad, effect_irrad)
assert_series_equal(poa, total_irrad['poa_global'])
poa = modelchain._irrad_for_celltemp(empty, effect_irrad)
assert_series_equal(poa, effect_irrad)
# test with tuples
poa = modelchain._irrad_for_celltemp(
(total_irrad, total_irrad), (effect_irrad, effect_irrad))
assert len(poa) == 2
assert_series_equal(poa[0], total_irrad['poa_global'])
assert_series_equal(poa[1], total_irrad['poa_global'])
poa = modelchain._irrad_for_celltemp(
(empty, empty), (effect_irrad, effect_irrad))
assert len(poa) == 2
assert_series_equal(poa[0], effect_irrad)
assert_series_equal(poa[1], effect_irrad)
|
anryko/ansible
|
refs/heads/devel
|
lib/ansible/modules/source_control/github/github_webhook_info.py
|
25
|
#!/usr/bin/python
#
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: github_webhook_info
short_description: Query information about GitHub webhooks
version_added: "2.8"
description:
- "Query information about GitHub webhooks"
- This module was called C(github_webhook_facts) before Ansible 2.9. The usage did not change.
requirements:
- "PyGithub >= 1.3.5"
options:
repository:
description:
- Full name of the repository to configure a hook for
required: true
aliases:
- repo
user:
description:
- User to authenticate to GitHub as
required: true
password:
description:
- Password to authenticate to GitHub with
required: false
token:
description:
- Token to authenticate to GitHub with
required: false
github_url:
description:
- Base URL of the github api
required: false
default: https://api.github.com
author:
- "Chris St. Pierre (@stpierre)"
'''
EXAMPLES = '''
- name: list hooks for a repository (password auth)
github_webhook_info:
repository: ansible/ansible
user: "{{ github_user }}"
password: "{{ github_password }}"
register: ansible_webhooks
- name: list hooks for a repository on GitHub Enterprise (token auth)
github_webhook_info:
repository: myorg/myrepo
user: "{{ github_user }}"
token: "{{ github_user_api_token }}"
github_url: https://github.example.com/api/v3/
register: myrepo_webhooks
'''
RETURN = '''
---
hooks:
description: A list of hooks that exist for the repo
returned: always
type: list
sample: >
[{"has_shared_secret": true,
"url": "https://jenkins.example.com/ghprbhook/",
"events": ["issue_comment", "pull_request"],
"insecure_ssl": "1",
"content_type": "json",
"active": true,
"id": 6206,
"last_response": {"status": "active", "message": "OK", "code": 200}}]
'''
import traceback
GITHUB_IMP_ERR = None
try:
import github
HAS_GITHUB = True
except ImportError:
GITHUB_IMP_ERR = traceback.format_exc()
HAS_GITHUB = False
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native
def _munge_hook(hook_obj):
retval = {
"active": hook_obj.active,
"events": hook_obj.events,
"id": hook_obj.id,
"url": hook_obj.url,
}
retval.update(hook_obj.config)
retval["has_shared_secret"] = "secret" in retval
if "secret" in retval:
del retval["secret"]
retval["last_response"] = hook_obj.last_response.raw_data
return retval
def main():
module = AnsibleModule(
argument_spec=dict(
repository=dict(type='str', required=True, aliases=["repo"]),
user=dict(type='str', required=True),
password=dict(type='str', required=False, no_log=True),
token=dict(type='str', required=False, no_log=True),
github_url=dict(
type='str', required=False, default="https://api.github.com")),
mutually_exclusive=(('password', 'token'), ),
required_one_of=(("password", "token"), ),
supports_check_mode=True)
if module._name == 'github_webhook_facts':
module.deprecate("The 'github_webhook_facts' module has been renamed to 'github_webhook_info'", version='2.13')
if not HAS_GITHUB:
module.fail_json(msg=missing_required_lib('PyGithub'),
exception=GITHUB_IMP_ERR)
try:
github_conn = github.Github(
module.params["user"],
module.params.get("password") or module.params.get("token"),
base_url=module.params["github_url"])
except github.GithubException as err:
module.fail_json(msg="Could not connect to GitHub at %s: %s" % (
module.params["github_url"], to_native(err)))
try:
repo = github_conn.get_repo(module.params["repository"])
except github.BadCredentialsException as err:
module.fail_json(msg="Could not authenticate to GitHub at %s: %s" % (
module.params["github_url"], to_native(err)))
except github.UnknownObjectException as err:
module.fail_json(
msg="Could not find repository %s in GitHub at %s: %s" % (
module.params["repository"], module.params["github_url"],
to_native(err)))
except Exception as err:
module.fail_json(
msg="Could not fetch repository %s from GitHub at %s: %s" %
(module.params["repository"], module.params["github_url"],
to_native(err)),
exception=traceback.format_exc())
try:
hooks = [_munge_hook(h) for h in repo.get_hooks()]
except github.GithubException as err:
module.fail_json(
msg="Unable to get hooks from repository %s: %s" %
(module.params["repository"], to_native(err)),
exception=traceback.format_exc())
module.exit_json(changed=False, hooks=hooks)
if __name__ == '__main__':
main()
|
hosseinmh/jango_learning
|
refs/heads/master
|
.venv/lib/python2.7/site-packages/setuptools/depends.py
|
336
|
import sys
import imp
import marshal
from distutils.version import StrictVersion
from imp import PKG_DIRECTORY, PY_COMPILED, PY_SOURCE, PY_FROZEN
from .py33compat import Bytecode
__all__ = [
'Require', 'find_module', 'get_module_constant', 'extract_constant'
]
class Require:
"""A prerequisite to building or installing a distribution"""
def __init__(self, name, requested_version, module, homepage='',
attribute=None, format=None):
if format is None and requested_version is not None:
format = StrictVersion
if format is not None:
requested_version = format(requested_version)
if attribute is None:
attribute = '__version__'
self.__dict__.update(locals())
del self.self
def full_name(self):
"""Return full package/distribution name, w/version"""
if self.requested_version is not None:
return '%s-%s' % (self.name, self.requested_version)
return self.name
def version_ok(self, version):
"""Is 'version' sufficiently up-to-date?"""
return self.attribute is None or self.format is None or \
str(version) != "unknown" and version >= self.requested_version
def get_version(self, paths=None, default="unknown"):
"""Get version number of installed module, 'None', or 'default'
Search 'paths' for module. If not found, return 'None'. If found,
return the extracted version attribute, or 'default' if no version
attribute was specified, or the value cannot be determined without
importing the module. The version is formatted according to the
requirement's version format (if any), unless it is 'None' or the
supplied 'default'.
"""
if self.attribute is None:
try:
f, p, i = find_module(self.module, paths)
if f:
f.close()
return default
except ImportError:
return None
v = get_module_constant(self.module, self.attribute, default, paths)
if v is not None and v is not default and self.format is not None:
return self.format(v)
return v
def is_present(self, paths=None):
"""Return true if dependency is present on 'paths'"""
return self.get_version(paths) is not None
def is_current(self, paths=None):
"""Return true if dependency is present and up-to-date on 'paths'"""
version = self.get_version(paths)
if version is None:
return False
return self.version_ok(version)
def find_module(module, paths=None):
"""Just like 'imp.find_module()', but with package support"""
parts = module.split('.')
while parts:
part = parts.pop(0)
f, path, (suffix, mode, kind) = info = imp.find_module(part, paths)
if kind == PKG_DIRECTORY:
parts = parts or ['__init__']
paths = [path]
elif parts:
raise ImportError("Can't find %r in %s" % (parts, module))
return info
def get_module_constant(module, symbol, default=-1, paths=None):
"""Find 'module' by searching 'paths', and extract 'symbol'
Return 'None' if 'module' does not exist on 'paths', or it does not define
'symbol'. If the module defines 'symbol' as a constant, return the
constant. Otherwise, return 'default'."""
try:
f, path, (suffix, mode, kind) = find_module(module, paths)
except ImportError:
# Module doesn't exist
return None
try:
if kind == PY_COMPILED:
f.read(8) # skip magic & date
code = marshal.load(f)
elif kind == PY_FROZEN:
code = imp.get_frozen_object(module)
elif kind == PY_SOURCE:
code = compile(f.read(), path, 'exec')
else:
# Not something we can parse; we'll have to import it. :(
if module not in sys.modules:
imp.load_module(module, f, path, (suffix, mode, kind))
return getattr(sys.modules[module], symbol, None)
finally:
if f:
f.close()
return extract_constant(code, symbol, default)
def extract_constant(code, symbol, default=-1):
"""Extract the constant value of 'symbol' from 'code'
If the name 'symbol' is bound to a constant value by the Python code
object 'code', return that value. If 'symbol' is bound to an expression,
return 'default'. Otherwise, return 'None'.
Return value is based on the first assignment to 'symbol'. 'symbol' must
be a global, or at least a non-"fast" local in the code block. That is,
only 'STORE_NAME' and 'STORE_GLOBAL' opcodes are checked, and 'symbol'
must be present in 'code.co_names'.
"""
if symbol not in code.co_names:
# name's not there, can't possibly be an assignment
return None
name_idx = list(code.co_names).index(symbol)
STORE_NAME = 90
STORE_GLOBAL = 97
LOAD_CONST = 100
const = default
for byte_code in Bytecode(code):
op = byte_code.opcode
arg = byte_code.arg
if op == LOAD_CONST:
const = code.co_consts[arg]
elif arg == name_idx and (op == STORE_NAME or op == STORE_GLOBAL):
return const
else:
const = default
def _update_globals():
"""
Patch the globals to remove the objects not available on some platforms.
XXX it'd be better to test assertions about bytecode instead.
"""
if not sys.platform.startswith('java') and sys.platform != 'cli':
return
incompatible = 'extract_constant', 'get_module_constant'
for name in incompatible:
del globals()[name]
__all__.remove(name)
_update_globals()
|
AnubhaAgrawal/todoman
|
refs/heads/master
|
tests/test_formatter.py
|
1
|
from datetime import date, datetime, time, timedelta
import pytest
import pytz
from freezegun import freeze_time
from todoman.cli import cli
from todoman.formatters import rgb_to_ansi
@pytest.mark.parametrize('interval', [
(65, 'in a minute'),
(-10800, '3 hours ago'),
])
@pytest.mark.parametrize('tz', ['CET', 'HST'])
@freeze_time('2017-03-25')
def test_humanized_date(runner, create, interval, now_for_tz, tz):
seconds, expected = interval
due = now_for_tz(tz) + timedelta(seconds=seconds)
create(
'test.ics',
'SUMMARY:Hi human!\n'
'DUE;VALUE=DATE-TIME;TZID={}:{}\n'
.format(tz, due.strftime('%Y%m%dT%H%M%S'))
)
result = runner.invoke(cli, ['--humanize', 'list', '--status', 'ANY'])
assert not result.exception
assert expected in result.output
def test_format_priority(default_formatter):
assert default_formatter.format_priority(None) == 'none'
assert default_formatter.format_priority(0) == 'none'
assert default_formatter.format_priority(5) == 'medium'
for i in range(1, 5):
assert default_formatter.format_priority(i) == 'high'
for i in range(6, 10):
assert default_formatter.format_priority(i) == 'low'
def test_format_priority_compact(default_formatter):
assert default_formatter.format_priority_compact(None) == ''
assert default_formatter.format_priority_compact(0) == ''
assert default_formatter.format_priority_compact(5) == '!!'
for i in range(1, 5):
assert default_formatter.format_priority_compact(i) == '!!!'
for i in range(6, 10):
assert default_formatter.format_priority_compact(i) == '!'
def test_format_date(default_formatter):
assert default_formatter.format_datetime(date(2017, 3, 4)) == '2017-03-04'
def test_format_datetime(default_formatter):
assert default_formatter.format_datetime(datetime(2017, 3, 4, 17, 00)) == \
'2017-03-04 17:00'
def test_detailed_format(runner, todo_factory):
todo_factory(
description='Test detailed formatting\n'
'This includes multiline descriptions\n'
'Blah!',
location='Over the hills, and far away',
)
# TODO:use formatter instead of runner?
result = runner.invoke(cli, ['show', '1'])
expected = (
'1 [ ] YARR! @default\n\n'
'Description Test detailed formatting\n'
' This includes multiline descriptions\n'
' Blah!\n'
'Location Over the hills, and far away'
)
assert not result.exception
assert result.output.strip() == expected
def test_parse_time(default_formatter):
tz = pytz.timezone('CET')
parsed = default_formatter.parse_datetime('12:00')
expected = datetime.combine(
date.today(),
time(hour=12, minute=0),
).replace(tzinfo=tz)
assert parsed == expected
def test_parse_datetime(default_formatter):
tz = pytz.timezone('CET')
parsed = default_formatter.parse_datetime('2017-03-05')
assert parsed == datetime(2017, 3, 5).replace(tzinfo=tz)
parsed = default_formatter.parse_datetime('2017-03-05 12:00')
assert parsed == datetime(2017, 3, 5, 12).replace(tzinfo=tz)
# Notes. will round to the NEXT matching date, so we need to freeze time
# for this one:
with freeze_time('2017-03-04'):
parsed = default_formatter.parse_datetime(
'Mon Mar 6 22:50:52 -03 2017'
)
assert parsed == datetime(2017, 3, 6, 20, 17).replace(tzinfo=tz)
assert default_formatter.parse_datetime('') is None
assert default_formatter.parse_datetime(None) is None
def test_humanized_parse_datetime(humanized_formatter):
tz = pytz.timezone('CET')
humanized_formatter.now = datetime(2017, 3, 6, 22, 17).replace(tzinfo=tz)
dt = datetime(2017, 3, 6, 20, 17).replace(tzinfo=tz)
assert humanized_formatter.format_datetime(dt) == '2 hours ago'
assert humanized_formatter.format_datetime(None) == ''
def test_simple_action(default_formatter, todo_factory):
todo = todo_factory()
assert default_formatter.simple_action('Delete', todo) == \
'Delete "YARR!"'
def test_formatting_parsing_consitency(default_formatter):
tz = pytz.timezone('CET')
dt = datetime(2017, 3, 8, 21, 6).replace(tzinfo=tz)
formatted = default_formatter.format_datetime(dt)
assert default_formatter.parse_datetime(formatted) == dt
def test_rgb_to_ansi():
assert rgb_to_ansi(None) is None
assert rgb_to_ansi('#8ab6d') is None
assert rgb_to_ansi('#8ab6d2f') is None
assert rgb_to_ansi('red') is None
assert rgb_to_ansi('#8ab6d2') == '\x1b[38;2;138;182;210m'
|
lwiecek/django
|
refs/heads/master
|
tests/messages_tests/test_api.py
|
337
|
from django.contrib import messages
from django.test import RequestFactory, SimpleTestCase
class DummyStorage(object):
"""
dummy message-store to test the api methods
"""
def __init__(self):
self.store = []
def add(self, level, message, extra_tags=''):
self.store.append(message)
class ApiTest(SimpleTestCase):
def setUp(self):
self.rf = RequestFactory()
self.request = self.rf.request()
self.storage = DummyStorage()
def test_ok(self):
msg = 'some message'
self.request._messages = self.storage
messages.add_message(self.request, messages.DEBUG, msg)
self.assertIn(msg, self.storage.store)
def test_request_is_none(self):
msg = 'some message'
self.request._messages = self.storage
with self.assertRaises(TypeError):
messages.add_message(None, messages.DEBUG, msg)
self.assertEqual([], self.storage.store)
def test_middleware_missing(self):
msg = 'some message'
with self.assertRaises(messages.MessageFailure):
messages.add_message(self.request, messages.DEBUG, msg)
self.assertEqual([], self.storage.store)
def test_middleware_missing_silently(self):
msg = 'some message'
messages.add_message(self.request, messages.DEBUG, msg,
fail_silently=True)
self.assertEqual([], self.storage.store)
|
dentaku65/pelisalacarta
|
refs/heads/master
|
python/main-classic/channels/ayuda.py
|
2
|
# -*- coding: utf-8 -*-
#----------------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# ayuda - Videos de ayuda y tutoriales para pelisalacarta
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
# contribuci?n de jurrabi
#----------------------------------------------------------------------
import re
from core import scrapertools
from core import config
from core import logger
from core.item import Item
CHANNELNAME = "ayuda"
def isGeneric():
return True
def mainlist(item):
logger.info("pelisalacarta.channels.ayuda mainlist")
itemlist = []
platform_name = config.get_platform()
cuantos = 0
if "kodi" in platform_name or platform_name=="xbmceden" or platform_name=="xbmcfrodo" or platform_name=="xbmcgotham":
itemlist.append( Item(channel=CHANNELNAME, action="force_creation_advancedsettings" , title="Crear fichero advancedsettings.xml optimizado"))
cuantos = cuantos + 1
if "kodi" in platform_name or "xbmc" in platform_name or "boxee" in platform_name:
itemlist.append( Item(channel=CHANNELNAME, action="updatebiblio" , title="Buscar nuevos episodios y actualizar biblioteca"))
cuantos = cuantos + 1
if cuantos>0:
itemlist.append( Item(channel=CHANNELNAME, action="tutoriales" , title="Ver guías y tutoriales en vídeo"))
else:
itemlist.extend(tutoriales(item))
return itemlist
def tutoriales(item):
logger.info("pelisalacarta.channels.ayuda tutoriales")
itemlist = []
return playlists(item,"tvalacarta")
def force_creation_advancedsettings(item):
# Ruta del advancedsettings
import xbmc,xbmcgui,os
advancedsettings = xbmc.translatePath("special://userdata/advancedsettings.xml")
# Copia el advancedsettings.xml desde el directorio resources al userdata
fichero = open( os.path.join(config.get_runtime_path(),"resources","advancedsettings.xml") )
texto = fichero.read()
fichero.close()
fichero = open(advancedsettings,"w")
fichero.write(texto)
fichero.close()
dialog2 = xbmcgui.Dialog()
dialog2.ok("plugin", "Se ha creado un fichero advancedsettings.xml","con la configuración óptima para el streaming.")
return []
def updatebiblio(item):
import library_service
itemlist = []
itemlist.append( Item(channel=CHANNELNAME, action="" , title="Actualizacion en curso..."))
return itemlist
# Show all YouTube playlists for the selected channel
def playlists(item,channel_id):
logger.info("youtube_channel.playlists ")
itemlist=[]
item.url = "http://gdata.youtube.com/feeds/api/users/"+channel_id+"/playlists?v=2&start-index=1&max-results=30"
# Fetch video list from YouTube feed
data = scrapertools.cache_page( item.url )
logger.info("data="+data)
# Extract items from feed
pattern = "<entry(.*?)</entry>"
matches = re.compile(pattern,re.DOTALL).findall(data)
for entry in matches:
logger.info("entry="+entry)
# Not the better way to parse XML, but clean and easy
title = scrapertools.find_single_match(entry,"<titl[^>]+>([^<]+)</title>")
plot = scrapertools.find_single_match(entry,"<media\:descriptio[^>]+>([^<]+)</media\:description>")
thumbnail = scrapertools.find_single_match(entry,"<media\:thumbnail url='([^']+)'")
url = scrapertools.find_single_match(entry,"<content type\='application/atom\+xml\;type\=feed' src='([^']+)'/>")
# Appends a new item to the xbmc item list
itemlist.append( Item(channel=CHANNELNAME, title=title , action="videos" , url=url, thumbnail=thumbnail, plot=plot , folder=True) )
return itemlist
# Show all YouTube videos for the selected playlist
def videos(item):
logger.info("youtube_channel.videos ")
itemlist=[]
# Fetch video list from YouTube feed
data = scrapertools.cache_page( item.url )
logger.info("data="+data)
# Extract items from feed
pattern = "<entry(.*?)</entry>"
matches = re.compile(pattern,re.DOTALL).findall(data)
for entry in matches:
logger.info("entry="+entry)
# Not the better way to parse XML, but clean and easy
title = scrapertools.find_single_match(entry,"<titl[^>]+>([^<]+)</title>")
plot = scrapertools.find_single_match(entry,"<summa[^>]+>([^<]+)</summa")
thumbnail = scrapertools.find_single_match(entry,"<media\:thumbnail url='([^']+)'")
video_id = scrapertools.find_single_match(entry,"http\://www.youtube.com/watch\?v\=([0-9A-Za-z_-]{11})")
url = video_id
# Appends a new item to the xbmc item list
itemlist.append( Item(channel=CHANNELNAME, title=title , action="play" , server="youtube", url=url, thumbnail=thumbnail, plot=plot , folder=False) )
return itemlist
|
wzbozon/scikit-learn
|
refs/heads/master
|
sklearn/cross_decomposition/pls_.py
|
187
|
"""
The :mod:`sklearn.pls` module implements Partial Least Squares (PLS).
"""
# Author: Edouard Duchesnay <edouard.duchesnay@cea.fr>
# License: BSD 3 clause
from ..base import BaseEstimator, RegressorMixin, TransformerMixin
from ..utils import check_array, check_consistent_length
from ..externals import six
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import linalg
from ..utils import arpack
from ..utils.validation import check_is_fitted
__all__ = ['PLSCanonical', 'PLSRegression', 'PLSSVD']
def _nipals_twoblocks_inner_loop(X, Y, mode="A", max_iter=500, tol=1e-06,
norm_y_weights=False):
"""Inner loop of the iterative NIPALS algorithm.
Provides an alternative to the svd(X'Y); returns the first left and right
singular vectors of X'Y. See PLS for the meaning of the parameters. It is
similar to the Power method for determining the eigenvectors and
eigenvalues of a X'Y.
"""
y_score = Y[:, [0]]
x_weights_old = 0
ite = 1
X_pinv = Y_pinv = None
eps = np.finfo(X.dtype).eps
# Inner loop of the Wold algo.
while True:
# 1.1 Update u: the X weights
if mode == "B":
if X_pinv is None:
X_pinv = linalg.pinv(X) # compute once pinv(X)
x_weights = np.dot(X_pinv, y_score)
else: # mode A
# Mode A regress each X column on y_score
x_weights = np.dot(X.T, y_score) / np.dot(y_score.T, y_score)
# 1.2 Normalize u
x_weights /= np.sqrt(np.dot(x_weights.T, x_weights)) + eps
# 1.3 Update x_score: the X latent scores
x_score = np.dot(X, x_weights)
# 2.1 Update y_weights
if mode == "B":
if Y_pinv is None:
Y_pinv = linalg.pinv(Y) # compute once pinv(Y)
y_weights = np.dot(Y_pinv, x_score)
else:
# Mode A regress each Y column on x_score
y_weights = np.dot(Y.T, x_score) / np.dot(x_score.T, x_score)
# 2.2 Normalize y_weights
if norm_y_weights:
y_weights /= np.sqrt(np.dot(y_weights.T, y_weights)) + eps
# 2.3 Update y_score: the Y latent scores
y_score = np.dot(Y, y_weights) / (np.dot(y_weights.T, y_weights) + eps)
# y_score = np.dot(Y, y_weights) / np.dot(y_score.T, y_score) ## BUG
x_weights_diff = x_weights - x_weights_old
if np.dot(x_weights_diff.T, x_weights_diff) < tol or Y.shape[1] == 1:
break
if ite == max_iter:
warnings.warn('Maximum number of iterations reached')
break
x_weights_old = x_weights
ite += 1
return x_weights, y_weights, ite
def _svd_cross_product(X, Y):
C = np.dot(X.T, Y)
U, s, Vh = linalg.svd(C, full_matrices=False)
u = U[:, [0]]
v = Vh.T[:, [0]]
return u, v
def _center_scale_xy(X, Y, scale=True):
""" Center X, Y and scale if the scale parameter==True
Returns
-------
X, Y, x_mean, y_mean, x_std, y_std
"""
# center
x_mean = X.mean(axis=0)
X -= x_mean
y_mean = Y.mean(axis=0)
Y -= y_mean
# scale
if scale:
x_std = X.std(axis=0, ddof=1)
x_std[x_std == 0.0] = 1.0
X /= x_std
y_std = Y.std(axis=0, ddof=1)
y_std[y_std == 0.0] = 1.0
Y /= y_std
else:
x_std = np.ones(X.shape[1])
y_std = np.ones(Y.shape[1])
return X, Y, x_mean, y_mean, x_std, y_std
class _PLS(six.with_metaclass(ABCMeta), BaseEstimator, TransformerMixin,
RegressorMixin):
"""Partial Least Squares (PLS)
This class implements the generic PLS algorithm, constructors' parameters
allow to obtain a specific implementation such as:
- PLS2 regression, i.e., PLS 2 blocks, mode A, with asymmetric deflation
and unnormalized y weights such as defined by [Tenenhaus 1998] p. 132.
With univariate response it implements PLS1.
- PLS canonical, i.e., PLS 2 blocks, mode A, with symmetric deflation and
normalized y weights such as defined by [Tenenhaus 1998] (p. 132) and
[Wegelin et al. 2000]. This parametrization implements the original Wold
algorithm.
We use the terminology defined by [Wegelin et al. 2000].
This implementation uses the PLS Wold 2 blocks algorithm based on two
nested loops:
(i) The outer loop iterate over components.
(ii) The inner loop estimates the weights vectors. This can be done
with two algo. (a) the inner loop of the original NIPALS algo. or (b) a
SVD on residuals cross-covariance matrices.
n_components : int, number of components to keep. (default 2).
scale : boolean, scale data? (default True)
deflation_mode : str, "canonical" or "regression". See notes.
mode : "A" classical PLS and "B" CCA. See notes.
norm_y_weights: boolean, normalize Y weights to one? (default False)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, the maximum number of iterations (default 500)
of the NIPALS inner loop (used only if algorithm="nipals")
tol : non-negative real, default 1e-06
The tolerance used in the iterative algorithm.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effects.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm given is "svd".
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In French but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSRegression
CCA
PLS_SVD
"""
@abstractmethod
def __init__(self, n_components=2, scale=True, deflation_mode="regression",
mode="A", algorithm="nipals", norm_y_weights=False,
max_iter=500, tol=1e-06, copy=True):
self.n_components = n_components
self.deflation_mode = deflation_mode
self.mode = mode
self.norm_y_weights = norm_y_weights
self.scale = scale
self.algorithm = algorithm
self.max_iter = max_iter
self.tol = tol
self.copy = copy
def fit(self, X, Y):
"""Fit model to data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples in the number of samples and
n_features is the number of predictors.
Y : array-like of response, shape = [n_samples, n_targets]
Target vectors, where n_samples in the number of samples and
n_targets is the number of response variables.
"""
# copy since this will contains the residuals (deflated) matrices
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
n = X.shape[0]
p = X.shape[1]
q = Y.shape[1]
if self.n_components < 1 or self.n_components > p:
raise ValueError('Invalid number of components: %d' %
self.n_components)
if self.algorithm not in ("svd", "nipals"):
raise ValueError("Got algorithm %s when only 'svd' "
"and 'nipals' are known" % self.algorithm)
if self.algorithm == "svd" and self.mode == "B":
raise ValueError('Incompatible configuration: mode B is not '
'implemented with svd algorithm')
if self.deflation_mode not in ["canonical", "regression"]:
raise ValueError('The deflation mode is unknown')
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_\
= _center_scale_xy(X, Y, self.scale)
# Residuals (deflated) matrices
Xk = X
Yk = Y
# Results matrices
self.x_scores_ = np.zeros((n, self.n_components))
self.y_scores_ = np.zeros((n, self.n_components))
self.x_weights_ = np.zeros((p, self.n_components))
self.y_weights_ = np.zeros((q, self.n_components))
self.x_loadings_ = np.zeros((p, self.n_components))
self.y_loadings_ = np.zeros((q, self.n_components))
self.n_iter_ = []
# NIPALS algo: outer loop, over components
for k in range(self.n_components):
if np.all(np.dot(Yk.T, Yk) < np.finfo(np.double).eps):
# Yk constant
warnings.warn('Y residual constant at iteration %s' % k)
break
# 1) weights estimation (inner loop)
# -----------------------------------
if self.algorithm == "nipals":
x_weights, y_weights, n_iter_ = \
_nipals_twoblocks_inner_loop(
X=Xk, Y=Yk, mode=self.mode, max_iter=self.max_iter,
tol=self.tol, norm_y_weights=self.norm_y_weights)
self.n_iter_.append(n_iter_)
elif self.algorithm == "svd":
x_weights, y_weights = _svd_cross_product(X=Xk, Y=Yk)
# compute scores
x_scores = np.dot(Xk, x_weights)
if self.norm_y_weights:
y_ss = 1
else:
y_ss = np.dot(y_weights.T, y_weights)
y_scores = np.dot(Yk, y_weights) / y_ss
# test for null variance
if np.dot(x_scores.T, x_scores) < np.finfo(np.double).eps:
warnings.warn('X scores are null at iteration %s' % k)
break
# 2) Deflation (in place)
# ----------------------
# Possible memory footprint reduction may done here: in order to
# avoid the allocation of a data chunk for the rank-one
# approximations matrix which is then subtracted to Xk, we suggest
# to perform a column-wise deflation.
#
# - regress Xk's on x_score
x_loadings = np.dot(Xk.T, x_scores) / np.dot(x_scores.T, x_scores)
# - subtract rank-one approximations to obtain remainder matrix
Xk -= np.dot(x_scores, x_loadings.T)
if self.deflation_mode == "canonical":
# - regress Yk's on y_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, y_scores)
/ np.dot(y_scores.T, y_scores))
Yk -= np.dot(y_scores, y_loadings.T)
if self.deflation_mode == "regression":
# - regress Yk's on x_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, x_scores)
/ np.dot(x_scores.T, x_scores))
Yk -= np.dot(x_scores, y_loadings.T)
# 3) Store weights, scores and loadings # Notation:
self.x_scores_[:, k] = x_scores.ravel() # T
self.y_scores_[:, k] = y_scores.ravel() # U
self.x_weights_[:, k] = x_weights.ravel() # W
self.y_weights_[:, k] = y_weights.ravel() # C
self.x_loadings_[:, k] = x_loadings.ravel() # P
self.y_loadings_[:, k] = y_loadings.ravel() # Q
# Such that: X = TP' + Err and Y = UQ' + Err
# 4) rotations from input space to transformed space (scores)
# T = X W(P'W)^-1 = XW* (W* : p x k matrix)
# U = Y C(Q'C)^-1 = YC* (W* : q x k matrix)
self.x_rotations_ = np.dot(
self.x_weights_,
linalg.pinv(np.dot(self.x_loadings_.T, self.x_weights_)))
if Y.shape[1] > 1:
self.y_rotations_ = np.dot(
self.y_weights_,
linalg.pinv(np.dot(self.y_loadings_.T, self.y_weights_)))
else:
self.y_rotations_ = np.ones(1)
if True or self.deflation_mode == "regression":
# FIXME what's with the if?
# Estimate regression coefficient
# Regress Y on T
# Y = TQ' + Err,
# Then express in function of X
# Y = X W(P'W)^-1Q' + Err = XB + Err
# => B = W*Q' (p x q)
self.coef_ = np.dot(self.x_rotations_, self.y_loadings_.T)
self.coef_ = (1. / self.x_std_.reshape((p, 1)) * self.coef_ *
self.y_std_)
return self
def transform(self, X, Y=None, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy)
# Normalize
X -= self.x_mean_
X /= self.x_std_
# Apply rotation
x_scores = np.dot(X, self.x_rotations_)
if Y is not None:
Y = check_array(Y, ensure_2d=False, copy=copy)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Y -= self.y_mean_
Y /= self.y_std_
y_scores = np.dot(Y, self.y_rotations_)
return x_scores, y_scores
return x_scores
def predict(self, X, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Notes
-----
This call requires the estimation of a p x q matrix, which may
be an issue in high dimensional space.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy)
# Normalize
X -= self.x_mean_
X /= self.x_std_
Ypred = np.dot(X, self.coef_)
return Ypred + self.y_mean_
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
check_is_fitted(self, 'x_mean_')
return self.fit(X, y, **fit_params).transform(X, y)
class PLSRegression(_PLS):
"""PLS regression
PLSRegression implements the PLS 2 blocks regression known as PLS2 or PLS1
in case of one dimensional response.
This class inherits from _PLS with mode="A", deflation_mode="regression",
norm_y_weights=False and algorithm="nipals".
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2)
Number of components to keep.
scale : boolean, (default True)
whether to scale the data
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real
Tolerance used in the iterative algorithm default 1e-06.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find weights u, v that optimizes:
``max corr(Xk u, Yk v) * var(Xk u) var(Yk u)``, such that ``|u| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on
the current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current X score. This performs the PLS regression known as PLS2. This
mode is prediction oriented.
This implementation provides the same results that 3 PLS packages
provided in the R language (R-project):
- "mixOmics" with function pls(X, Y, mode = "regression")
- "plspm " with function plsreg2(X, Y)
- "pls" with function oscorespls.fit(X, Y)
Examples
--------
>>> from sklearn.cross_decomposition import PLSRegression
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> pls2 = PLSRegression(n_components=2)
>>> pls2.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSRegression(copy=True, max_iter=500, n_components=2, scale=True,
tol=1e-06)
>>> Y_pred = pls2.predict(X)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="regression", mode="A",
norm_y_weights=False, max_iter=max_iter, tol=tol,
copy=copy)
class PLSCanonical(_PLS):
""" PLSCanonical implements the 2 blocks canonical PLS of the original Wold
algorithm [Tenenhaus 1998] p.204, referred as PLS-C2A in [Wegelin 2000].
This class inherits from PLS with mode="A" and deflation_mode="canonical",
norm_y_weights=True and algorithm="nipals", but svd should provide similar
results up to numerical errors.
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
scale : boolean, scale data? (default True)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real, default 1e-06
the tolerance used in the iterative algorithm
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
n_components : int, number of components to keep. (default 2).
Attributes
----------
x_weights_ : array, shape = [p, n_components]
X block weights vectors.
y_weights_ : array, shape = [q, n_components]
Y block weights vectors.
x_loadings_ : array, shape = [p, n_components]
X block loadings vectors.
y_loadings_ : array, shape = [q, n_components]
Y block loadings vectors.
x_scores_ : array, shape = [n_samples, n_components]
X scores.
y_scores_ : array, shape = [n_samples, n_components]
Y scores.
x_rotations_ : array, shape = [p, n_components]
X block to latents rotations.
y_rotations_ : array, shape = [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm provided is "svd".
Notes
-----
For each component k, find weights u, v that optimize::
max corr(Xk u, Yk v) * var(Xk u) var(Yk u), such that ``|u| = |v| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score. This performs a canonical symmetric version of the PLS
regression. But slightly different than the CCA. This is mostly used
for modeling.
This implementation provides the same results that the "plspm" package
provided in the R language (R-project), using the function plsca(X, Y).
Results are equal or collinear with the function
``pls(..., mode = "canonical")`` of the "mixOmics" package. The difference
relies in the fact that mixOmics implementation does not exactly implement
the Wold algorithm since it does not normalize y_weights to one.
Examples
--------
>>> from sklearn.cross_decomposition import PLSCanonical
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> plsca = PLSCanonical(n_components=2)
>>> plsca.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSCanonical(algorithm='nipals', copy=True, max_iter=500, n_components=2,
scale=True, tol=1e-06)
>>> X_c, Y_c = plsca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
CCA
PLSSVD
"""
def __init__(self, n_components=2, scale=True, algorithm="nipals",
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="canonical", mode="A",
norm_y_weights=True, algorithm=algorithm,
max_iter=max_iter, tol=tol, copy=copy)
class PLSSVD(BaseEstimator, TransformerMixin):
"""Partial Least Square SVD
Simply perform a svd on the crosscovariance matrix: X'Y
There are no iterative deflation here.
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, default 2
Number of components to keep.
scale : boolean, default True
Whether to scale X and Y.
copy : boolean, default True
Whether to copy X and Y, or perform in-place computations.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
See also
--------
PLSCanonical
CCA
"""
def __init__(self, n_components=2, scale=True, copy=True):
self.n_components = n_components
self.scale = scale
self.copy = copy
def fit(self, X, Y):
# copy since this will contains the centered data
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
if self.n_components > max(Y.shape[1], X.shape[1]):
raise ValueError("Invalid number of components n_components=%d"
" with X of shape %s and Y of shape %s."
% (self.n_components, str(X.shape), str(Y.shape)))
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_ =\
_center_scale_xy(X, Y, self.scale)
# svd(X'Y)
C = np.dot(X.T, Y)
# The arpack svds solver only works if the number of extracted
# components is smaller than rank(X) - 1. Hence, if we want to extract
# all the components (C.shape[1]), we have to use another one. Else,
# let's use arpacks to compute only the interesting components.
if self.n_components >= np.min(C.shape):
U, s, V = linalg.svd(C, full_matrices=False)
else:
U, s, V = arpack.svds(C, k=self.n_components)
V = V.T
self.x_scores_ = np.dot(X, U)
self.y_scores_ = np.dot(Y, V)
self.x_weights_ = U
self.y_weights_ = V
return self
def transform(self, X, Y=None):
"""Apply the dimension reduction learned on the train data."""
check_is_fitted(self, 'x_mean_')
X = check_array(X, dtype=np.float64)
Xr = (X - self.x_mean_) / self.x_std_
x_scores = np.dot(Xr, self.x_weights_)
if Y is not None:
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Yr = (Y - self.y_mean_) / self.y_std_
y_scores = np.dot(Yr, self.y_weights_)
return x_scores, y_scores
return x_scores
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
return self.fit(X, y, **fit_params).transform(X, y)
|
w84miracle/flask-sb-admin2
|
refs/heads/master
|
sbadmin.py
|
1
|
#!/usr/bin/env python
from flask import Flask, url_for, render_template, send_from_directory
import jinja2.exceptions
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/<pagename>')
def admin(pagename):
return render_template(pagename+'.html')
@app.route('/<path:resource>')
def serveStaticResource(resource):
return send_from_directory('static/', resource)
@app.route('/test')
def test():
return '<strong>It\'s Alive!</strong>'
@app.errorhandler(jinja2.exceptions.TemplateNotFound)
def template_not_found(e):
return not_found(e)
@app.errorhandler(404)
def not_found(e):
return '<strong>Page Not Found!</strong>', 404
if __name__ == '__main__':
app.run()
|
jayoshih/content-curation
|
refs/heads/master
|
contentcuration/contentcuration/migrations/0007_merge.py
|
5
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-23 22:12
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('contentcuration', '0005_formatpreset_thumbnail'),
('contentcuration', '0006_contentnode_published'),
]
operations = [
]
|
MwanzanFelipe/rockletonfortune
|
refs/heads/master
|
lib/django/core/checks/registry.py
|
162
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from itertools import chain
from django.utils.itercompat import is_iterable
class Tags(object):
"""
Built-in tags for internal checks.
"""
admin = 'admin'
caches = 'caches'
compatibility = 'compatibility'
models = 'models'
security = 'security'
signals = 'signals'
templates = 'templates'
urls = 'urls'
class CheckRegistry(object):
def __init__(self):
self.registered_checks = []
self.deployment_checks = []
def register(self, check=None, *tags, **kwargs):
"""
Can be used as a function or a decorator. Register given function
`f` labeled with given `tags`. The function should receive **kwargs
and return list of Errors and Warnings.
Example::
registry = CheckRegistry()
@registry.register('mytag', 'anothertag')
def my_check(apps, **kwargs):
# ... perform checks and collect `errors` ...
return errors
# or
registry.register(my_check, 'mytag', 'anothertag')
"""
kwargs.setdefault('deploy', False)
def inner(check):
check.tags = tags
if kwargs['deploy']:
if check not in self.deployment_checks:
self.deployment_checks.append(check)
elif check not in self.registered_checks:
self.registered_checks.append(check)
return check
if callable(check):
return inner(check)
else:
if check:
tags += (check, )
return inner
def run_checks(self, app_configs=None, tags=None, include_deployment_checks=False):
"""
Run all registered checks and return list of Errors and Warnings.
"""
errors = []
checks = self.get_checks(include_deployment_checks)
if tags is not None:
checks = [check for check in checks
if hasattr(check, 'tags') and set(check.tags) & set(tags)]
for check in checks:
new_errors = check(app_configs=app_configs)
assert is_iterable(new_errors), (
"The function %r did not return a list. All functions registered "
"with the checks registry must return a list." % check)
errors.extend(new_errors)
return errors
def tag_exists(self, tag, include_deployment_checks=False):
return tag in self.tags_available(include_deployment_checks)
def tags_available(self, deployment_checks=False):
return set(chain(*[check.tags for check in self.get_checks(deployment_checks) if hasattr(check, 'tags')]))
def get_checks(self, include_deployment_checks=False):
checks = list(self.registered_checks)
if include_deployment_checks:
checks.extend(self.deployment_checks)
return checks
registry = CheckRegistry()
register = registry.register
run_checks = registry.run_checks
tag_exists = registry.tag_exists
|
jasonrhaas/ramlfications
|
refs/heads/master
|
tests/test_validate.py
|
3
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Spotify AB
from __future__ import absolute_import, division, print_function
import os
import pytest
from ramlfications import errors
from ramlfications.config import setup_config
from ramlfications.parser import parse_raml as parse
from ramlfications._helpers import load_file
from .base import VALIDATE
raises = pytest.raises(errors.InvalidRAMLError)
# Search a list of errors for a specific error
def _error_exists(error_list, error_type, error_msg):
for e in error_list:
if isinstance(e, error_type) and e.args == error_msg:
return True
return False
def load_raml(filename):
raml_file = os.path.join(VALIDATE + filename)
return load_file(raml_file)
def load_config(filename):
config_file = os.path.join(VALIDATE + filename)
return setup_config(config_file)
def test_invalid_root_protocols():
raml = load_raml("invalid-protocols.raml")
config = load_config("valid-config.ini")
with raises as e:
parse(raml, config)
msg = ("'FTP' not a valid protocol for a RAML-defined API.",)
assert _error_exists(e.value.errors, errors.InvalidRootNodeError, msg)
def test_undefined_version():
raml = load_raml("no-version.raml")
config = load_config("valid-config.ini")
parsed_raml = parse(raml, config)
assert not parsed_raml.errors
def test_invalid_version_base_uri():
raml = load_raml("no-version-base-uri.raml")
config = load_config("valid-config.ini")
with raises as e:
parse(raml, config)
msg = ("RAML File's baseUri includes {version} parameter but no "
"version is defined.",)
assert _error_exists(e.value.errors, errors.InvalidRootNodeError, msg)
def test_undefined_base_uri_and_title():
raml = load_raml("no-base-uri-no-title.raml")
config = load_config("valid-config.ini")
with raises as e:
parse(raml, config)
assert len(e.value.errors) == 2
assert isinstance(e.value.errors[0], errors.InvalidRootNodeError)
assert isinstance(e.value.errors[1], errors.InvalidRootNodeError)
def test_invalid_base_uri_not_defined():
raml = load_raml("no-base-uri.raml")
config = load_config("valid-config.ini")
with raises as e:
parse(raml, config)
msg = ("RAML File does not define the baseUri.",)
assert _error_exists(e.value.errors, errors.InvalidRootNodeError, msg)
def test_invalid_base_uri_wrong_type():
raml = load_raml("invalid-base-uri-params.raml")
config = load_config("valid-config.ini")
with raises as e:
parse(raml, config)
msg = ("Validation errors were found.",)
msg1 = ("baseUriParameter 'domainName' must be a string",)
assert e.value.args == msg
assert _error_exists(e.value.errors, errors.InvalidRootNodeError, msg1)
def test_invalid_base_uri_optional():
raml = load_raml("optional-base-uri-params.raml")
config = load_config("valid-config.ini")
with raises as e:
parse(raml, config)
msg = ("baseUriParameter 'domainName' must be required",)
assert _error_exists(e.value.errors, errors.InvalidRootNodeError, msg)
def test_invalid_uri_params_version():
raml = load_raml("version-in-uri-params.raml")
config = load_config("valid-config.ini")
with raises as e:
parse(raml, config)
msg = ("'version' can only be defined in baseUriParameters.",)
assert _error_exists(e.value.errors, errors.InvalidRootNodeError, msg)
def test_invalid_no_title():
raml = load_raml("no-title.raml")
config = load_config("valid-config.ini")
with raises as e:
parse(raml, config)
msg = ('RAML File does not define an API title.',)
assert _error_exists(e.value.errors, errors.InvalidRootNodeError, msg)
def test_invalid_docs_not_list():
raml = load_raml("docs-not-list.raml")
config = load_config("valid-config.ini")
with pytest.raises(AssertionError) as e:
parse(raml, config)
assert ("Error parsing documentation",) == e.value.args
def test_invalid_docs_no_title():
raml = load_raml("docs-no-title.raml")
config = load_config("valid-config.ini")
with raises as e:
parse(raml, config)
msg = ("API Documentation requires a title.",)
assert _error_exists(e.value.errors, errors.InvalidRootNodeError, msg)
def test_invalid_docs_no_content():
raml = load_raml("docs-no-content.raml")
config = load_config("valid-config.ini")
with raises as e:
parse(raml, config)
msg = ("API Documentation requires content defined.",)
assert _error_exists(e.value.errors, errors.InvalidRootNodeError, msg)
def test_assigned_undefined_resource_type():
raml = load_raml("undefined-resource-type-str.raml")
config = load_config("valid-config.ini")
with raises as e:
parse(raml, config)
msg = ("Resource Type 'undefined' is assigned to '/foo' but is not "
"defined in the root of the API.",)
assert _error_exists(e.value.errors, errors.InvalidResourceNodeError, msg)
def test_no_resources_defined():
raml = load_raml("no-resources.raml")
config = load_config("valid-config.ini")
with raises as e:
parse(raml, config)
msg = ("API does not define any resources.",)
assert _error_exists(e.value.errors, errors.InvalidRootNodeError, msg)
def test_invalid_media_type():
raml = load_raml("invalid-media-type.raml")
config = load_config("valid-config.ini")
with raises as e:
parse(raml, config)
msg = ("Unsupported MIME Media Type: 'awesome/sauce'.",)
assert _error_exists(e.value.errors, errors.InvalidRootNodeError, msg)
# TODO: move assert from parser to validate
def test_invalid_trait_obj():
raml = load_raml("trait-unsupported-obj.raml")
config = load_config("valid-config.ini")
with pytest.raises(AssertionError) as e:
parse(raml, config)
msg = ("Error parsing trait",)
assert msg == e.value.args
def test_traits_undefined():
raml = load_raml("trait-undefined.raml")
config = load_config("valid-config.ini")
with raises as e:
parse(raml, config)
msg = ("Trait 'undefined' is assigned to '/users/{user_id}/playlists' "
"but is not defined in the root of the API.",)
assert _error_exists(e.value.errors, errors.InvalidResourceNodeError, msg)
def test_no_traits_defined():
raml = load_raml("no-traits-defined.raml")
config = load_config("valid-config.ini")
with raises as e:
parse(raml, config)
msg = ("Trying to assign traits that are not defined"
"in the root of the API.",)
assert _error_exists(e.value.errors, errors.InvalidResourceNodeError, msg)
# TODO: move assert from parser to validate
def test_unsupported_trait_type_str():
raml = load_raml("trait-unsupported-type-str.raml")
config = load_config("valid-config.ini")
with pytest.raises(AssertionError) as e:
parse(raml, config)
msg = ("Error parsing trait",)
assert msg == e.value.args
def test_unsupported_trait_type_array_ints():
raml = load_raml("trait-unsupported-type-array-ints.raml")
config = load_config("valid-config.ini")
with raises as e:
parse(raml, config)
msg = ("'12' needs to be a string referring to a trait, or a dictionary "
"mapping parameter values to a trait",)
assert _error_exists(e.value.errors, errors.InvalidResourceNodeError, msg)
def test_too_many_assigned_resource_types():
raml = load_raml("too-many-assigned-res-types.raml")
config = load_config("valid-config.ini")
with raises as e:
parse(raml, config)
msg = ("Too many resource types applied to '/foobar'.",)
assert _error_exists(e.value.errors, errors.InvalidResourceNodeError, msg)
#####
# Parameter Validators
#####
def test_invalid_request_header_param():
raml = load_raml("invalid-parameter-type-header.raml")
config = load_config("valid-config.ini")
with raises as e:
parse(raml, config)
msg = ("'invalidType' is not a valid primative parameter type",)
assert _error_exists(e.value.errors, errors.InvalidParameterError, msg)
def test_invalid_body_mime_type():
raml = load_raml("invalid-body-mime-type.raml")
config = load_config("valid-config.ini")
with raises as e:
parse(raml, config)
msg = ("Unsupported MIME Media Type: 'invalid/mediatype'.",)
assert _error_exists(e.value.errors, errors.InvalidParameterError, msg)
def test_invalid_body_schema():
raml = load_raml("invalid-body-form-schema.raml")
config = load_config("valid-config.ini")
with raises as e:
parse(raml, config)
msg = ("Validation errors were found.",)
msg1 = ("Body must define formParameters, not schema/example.",)
msg2 = ("Body with mime_type 'application/x-www-form-urlencoded' "
"requires formParameters.",)
assert msg == e.value.args
assert _error_exists(e.value.errors, errors.InvalidParameterError, msg1)
assert _error_exists(e.value.errors, errors.InvalidParameterError, msg2)
def test_invalid_body_example():
raml = load_raml("invalid-body-form-example.raml")
config = load_config("valid-config.ini")
with raises as e:
parse(raml, config)
msg = ("Validation errors were found.",)
msg1 = ("Body must define formParameters, not schema/example.",)
msg2 = ("Body with mime_type 'application/x-www-form-urlencoded' "
"requires formParameters.",)
assert msg == e.value.args
assert _error_exists(e.value.errors, errors.InvalidParameterError, msg1)
assert _error_exists(e.value.errors, errors.InvalidParameterError, msg2)
def test_invalid_body_no_form_params():
raml = load_raml("invalid-body-no-form-params.raml")
config = load_config("valid-config.ini")
with raises as e:
parse(raml, config)
msg = ("Body with mime_type 'application/x-www-form-urlencoded' requires "
"formParameters.",)
assert _error_exists(e.value.errors, errors.InvalidParameterError, msg)
def test_invalid_response_code_str():
raml = load_raml("invalid-response-code-str.raml")
config = load_config("valid-config.ini")
with raises as e:
parse(raml, config)
msg = (
"Response code 'foo' must be an integer representing an HTTP code.",
)
assert _error_exists(e.value.errors, errors.InvalidParameterError, msg)
def test_invalid_response_code():
raml = load_raml("invalid-response-code.raml")
config = load_config("valid-config.ini")
with raises as e:
parse(raml, config)
msg = ("'299' not a valid HTTP response code.",)
assert _error_exists(e.value.errors, errors.InvalidParameterError, msg)
#####
# Primative Validators
#####
def test_invalid_integer_number_type():
raml = load_raml("invalid-integer-number-type.raml")
config = load_config("valid-config.ini")
with raises as e:
parse(raml, config)
msg = ("invalidParamType must be either a number or integer to have "
"minimum attribute set, not 'string'.",)
assert _error_exists(e.value.errors, errors.InvalidParameterError, msg)
def test_invalid_string_type():
raml = load_raml("invalid-string-type.raml")
config = load_config("valid-config.ini")
with raises as e:
parse(raml, config)
msg = ("invalidParamType must be a string type to have min_length "
"attribute set, not 'integer'.",)
assert _error_exists(e.value.errors, errors.InvalidParameterError, msg)
#####
# ResourceType, Trait, and Security Scheme validators
#####
def test_empty_mapping_res_type():
raml = load_raml("empty-mapping-resource-type.raml")
config = load_config("valid-config.ini")
with raises as e:
parse(raml, config)
msg = ("The resourceType 'emptyType' requires definition.",)
assert _error_exists(e.value.errors, errors.InvalidResourceNodeError, msg)
def test_empty_mapping_trait():
raml = load_raml("empty-mapping-trait.raml")
config = load_config("valid-config.ini")
with raises as e:
parse(raml, config)
msg = ("The trait 'emptyTrait' requires definition.",)
assert _error_exists(e.value.errors, errors.InvalidResourceNodeError, msg)
def test_empty_mapping_sec_scheme_settings():
_raml = "empty-mapping-security-scheme-settings.raml"
raml = load_raml(_raml)
config = load_config("valid-config.ini")
with raises as e:
parse(raml, config)
msg = ("'settings' for security scheme 'EmptySettingsScheme' require "
"definition.",)
assert _error_exists(e.value.errors, errors.InvalidSecuritySchemeError,
msg)
|
vkachurka/py-leveldb
|
refs/heads/master
|
test/test.py
|
42
|
#!/usr/bin/python
# Copyright (c) Arni Mar Jonsson.
# See LICENSE for details.
import sys, string, unittest, itertools
class TestLevelDB(unittest.TestCase):
def setUp(self):
# import local leveldb
import leveldb as _leveldb
self.leveldb = _leveldb
dir(self.leveldb)
# Python2/3 compat
if hasattr(string, 'lowercase'):
self.lowercase = string.lowercase
self.uppercase = string.uppercase
else:
self.lowercase = string.ascii_lowercase
self.uppercase = string.ascii_uppercase
# comparator
if sys.version_info[0] < 3:
def my_comparison(a, b):
return cmp(a, b)
else:
def my_comparison(a, b):
if a < b:
return -1
elif a > b:
return 1
else:
return 0
self.comparator = 'bytewise'
if True:
self.comparator = ('bytewise', my_comparison)
# repair/destroy previous database, if any
self.name = 'db_a'
#self.leveldb.RepairDB(self.name, comparator = self.comparator)
self.leveldb.DestroyDB(self.name)
def _open_options(self, create_if_missing = True, error_if_exists = False):
v = {
'create_if_missing': True,
'error_if_exists': error_if_exists,
'paranoid_checks': False,
'block_cache_size': 8 * (2 << 20),
'write_buffer_size': 2 * (2 << 20),
'block_size': 4096,
'max_open_files': 1000,
'block_restart_interval': 16,
'comparator': self.comparator
}
return v
def _open(self, *args, **kwargs):
options = self._open_options(*args, **kwargs)
db = self.leveldb.LevelDB(self.name, **options)
dir(db)
return db
def testIteratorNone(self):
options = self._open_options()
db = self.leveldb.LevelDB(self.name, **options)
for s in 'abcdef':
db.Put(self._s(s), self._s(s))
kv_ = [(self._s('a'), self._s('a')), (self._s('b'), self._s('b')), (self._s('c'), self._s('c')), (self._s('d'), self._s('d')), (self._s('e'), self._s('e')), (self._s('f'), self._s('f'))]
kv = list(db.RangeIter(key_from = None, key_to = None))
self.assertEqual(kv, kv_)
kv = list(db.RangeIter(key_to = None))
self.assertEqual(kv, kv_)
kv = list(db.RangeIter(key_from = None))
self.assertEqual(kv, kv_)
kv = list(db.RangeIter())
self.assertEqual(kv, kv_)
def testIteratorCrash(self):
options = self._open_options()
db = self.leveldb.LevelDB(self.name, **options)
db.Put(self._s('a'), self._s('b'))
i = db.RangeIter(include_value = False, reverse = True)
dir(i)
del self.leveldb
def _s(self, s):
if sys.version_info[0] >= 3:
return bytearray(s, encoding = 'latin1')
else:
return s
def _join(self, i):
return self._s('').join(i)
# NOTE: modeled after test 'Snapshot'
def testSnapshotBasic(self):
db = self._open()
# destroy database, if any
db.Put(self._s('foo'), self._s('v1'))
s1 = db.CreateSnapshot()
dir(s1)
db.Put(self._s('foo'), self._s('v2'))
s2 = db.CreateSnapshot()
db.Put(self._s('foo'), self._s('v3'))
s3 = db.CreateSnapshot()
db.Put(self._s('foo'), self._s('v4'))
self.assertEqual(s1.Get(self._s('foo')), self._s('v1'))
self.assertEqual(s2.Get(self._s('foo')), self._s('v2'))
self.assertEqual(s3.Get(self._s('foo')), self._s('v3'))
self.assertEqual(db.Get(self._s('foo')), self._s('v4'))
# TBD: close properly
del s3
self.assertEqual(s1.Get(self._s('foo')), self._s('v1'))
self.assertEqual(s2.Get(self._s('foo')), self._s('v2'))
self.assertEqual(db.Get(self._s('foo')), self._s('v4'))
# TBD: close properly
del s1
self.assertEqual(s2.Get(self._s('foo')), self._s('v2'))
self.assertEqual(db.Get(self._s('foo')), self._s('v4'))
# TBD: close properly
del s2
self.assertEqual(db.Get(self._s('foo')), self._s('v4'))
# re-open
del db
db = self._open()
self.assertEqual(db.Get(self._s('foo')), self._s('v4'))
def ClearDB(self, db):
for k in list(db.RangeIter(include_value = False, reverse = True)):
db.Delete(k)
def ClearDB_batch(self, db):
b = self.leveldb.WriteBatch()
dir(b)
for k in db.RangeIter(include_value = False, reverse = True):
b.Delete(k)
db.Write(b)
def CountDB(self, db):
return sum(1 for i in db.RangeIter(reverse = True))
def _insert_lowercase(self, db):
b = self.leveldb.WriteBatch()
for c in self.lowercase:
b.Put(self._s(c), self._s('hello'))
db.Write(b)
def _insert_uppercase_batch(self, db):
b = self.leveldb.WriteBatch()
for c in self.uppercase:
b.Put(self._s(c), self._s('hello'))
db.Write(b)
def _test_uppercase_get(self, db):
for k in self.uppercase:
v = db.Get(self._s(k))
self.assertEqual(v, self._s('hello'))
self.assertTrue(k in self.uppercase)
def _test_uppercase_iter(self, db):
s = self._join(k for k, v in db.RangeIter(self._s('J'), self._s('M')))
self.assertEqual(s, self._s('JKLM'))
s = self._join(k for k, v in db.RangeIter(self._s('S')))
self.assertEqual(s, self._s('STUVWXYZ'))
s = self._join(k for k, v in db.RangeIter(key_to = self._s('E')))
self.assertEqual(s, self._s('ABCDE'))
def _test_uppercase_iter_rev(self, db):
# inside range
s = self._join(k for k, v in db.RangeIter(self._s('J'), self._s('M'), reverse = True))
self.assertEqual(s, self._s('MLKJ'))
# partly outside range
s = self._join(k for k, v in db.RangeIter(self._s('Z'), self._s(chr(ord('Z') + 1)), reverse = True))
self.assertEqual(s, self._s('Z'))
s = self._join(k for k, v in db.RangeIter(self._s(chr(ord('A') - 1)), self._s('A'), reverse = True))
self.assertEqual(s, self._s('A'))
# wholly outside range
s = self._join(k for k, v in db.RangeIter(self._s(chr(ord('Z') + 1)), self._s(chr(ord('Z') + 2)), reverse = True))
self.assertEqual(s, self._s(''))
s = self._join(k for k, v in db.RangeIter(self._s(chr(ord('A') - 2)), self._s(chr(ord('A') - 1)), reverse = True))
self.assertEqual(s, self._s(''))
# lower limit
s = self._join(k for k, v in db.RangeIter(self._s('S'), reverse = True))
self.assertEqual(s, self._s('ZYXWVUTS'))
# upper limit
s = self._join(k for k, v in db.RangeIter(key_to = self._s('E'), reverse = True))
self.assertEqual(s, self._s('EDCBA'))
def _test_lowercase_iter(self, db):
s = self._join(k for k, v in db.RangeIter(self._s('j'), self._s('m')))
self.assertEqual(s, self._s('jklm'))
s = self._join(k for k, v in db.RangeIter(self._s('s')))
self.assertEqual(s, self._s('stuvwxyz'))
s = self._join(k for k, v in db.RangeIter(key_to = self._s('e')))
self.assertEqual(s, self._s('abcde'))
def _test_lowercase_iter(self, db):
s = self._join(k for k, v in db.RangeIter(self._s('j'), self._s('m'), reverse = True))
self.assertEqual(s, self._s('mlkj'))
s = self._join(k for k, v in db.RangeIter(self._s('s'), reverse = True))
self.assertEqual(s, self._s('zyxwvuts'))
s = self._join(k for k, v in db.RangeIter(key_to = self._s('e'), reverse = True))
self.assertEqual(s, self._s('edcba'))
def _test_lowercase_get(self, db):
for k in self.lowercase:
v = db.Get(self._s(k))
self.assertEqual(v, self._s('hello'))
self.assertTrue(k in self.lowercase)
def testIterationBasic(self):
db = self._open()
self._insert_lowercase(db)
self.assertEqual(self.CountDB(db), 26)
self._test_lowercase_iter(db)
#self._test_lowercase_iter_rev(db)
self._test_lowercase_get(db)
self.ClearDB_batch(db)
self._insert_uppercase_batch(db)
self._test_uppercase_iter(db)
self._test_uppercase_iter_rev(db)
self._test_uppercase_get(db)
self.assertEqual(self.CountDB(db), 26)
def testCompact(self):
db = self._open()
s = self._s('foo' * 10)
for i in itertools.count():
db.Put(self._s('%i' % i), s)
if i > 10000:
break
db.CompactRange(self._s('1000'), self._s('10000'))
db.CompactRange(start = self._s('1000'))
db.CompactRange(end = self._s('1000'))
db.CompactRange(start = self._s('1000'), end = None)
db.CompactRange(start = None, end = self._s('1000'))
db.CompactRange()
# tried to re-produce http://code.google.com/p/leveldb/issues/detail?id=44
def testMe(self):
db = self._open()
db.Put(self._s('key1'), self._s('val1'))
del db
db = self._open()
db.Delete(self._s('key2'))
db.Delete(self._s('key1'))
del db
db = self._open()
db.Delete(self._s('key2'))
del db
db = self._open()
db.Put(self._s('key3'), self._s('val1'))
del db
db = self._open()
del db
db = self._open()
v = list(db.RangeIter())
self.assertEqual(v, [(self._s('key3'), self._s('val1'))])
if __name__ == '__main__':
unittest.main()
|
petemounce/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/nxos/nxos_ntp.py
|
45
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nxos_ntp
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages core NTP configuration.
description:
- Manages core NTP configuration.
author:
- Jason Edelman (@jedelman8)
options:
server:
description:
- Network address of NTP server.
required: false
default: null
peer:
description:
- Network address of NTP peer.
required: false
default: null
key_id:
description:
- Authentication key identifier to use with
given NTP server or peer.
required: false
default: null
prefer:
description:
- Makes given NTP server or peer the preferred
NTP server or peer for the device.
required: false
default: null
choices: ['enabled', 'disabled']
vrf_name:
description:
- Makes the device communicate with the given
NTP server or peer over a specific VRF.
required: false
default: null
source_addr:
description:
- Local source address from which NTP messages are sent.
required: false
default: null
source_int:
description:
- Local source interface from which NTP messages are sent.
Must be fully qualified interface name.
required: false
default: null
state:
description:
- Manage the state of the resource.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
# Set NTP Server with parameters
- nxos_ntp:
server: 1.2.3.4
key_id: 32
prefer: enabled
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"address": "2.2.2.2", "key_id": "48",
"peer_type": "server", "prefer": "enabled",
"source": "3.3.3.3", "source_type": "source"}
existing:
description:
- k/v pairs of existing ntp server/peer
returned: always
type: dict
sample: {"address": "2.2.2.2", "key_id": "32",
"peer_type": "server", "prefer": "enabled",
"source": "ethernet2/1", "source_type": "source-interface"}
end_state:
description: k/v pairs of ntp info after module execution
returned: always
type: dict
sample: {"address": "2.2.2.2", "key_id": "48",
"peer_type": "server", "prefer": "enabled",
"source": "3.3.3.3", "source_type": "source"}
updates:
description: command sent to the device
returned: always
type: list
sample: ["ntp server 2.2.2.2 prefer key 48",
"no ntp source-interface ethernet2/1", "ntp source 3.3.3.3"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
from ansible.module_utils.nxos import get_config, load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
import re
def execute_show_command(command, module, command_type='cli_show'):
if module.params['transport'] == 'cli':
if 'show run' not in command:
command += ' | json'
cmds = [command]
body = run_commands(module, cmds)
elif module.params['transport'] == 'nxapi':
cmds = [command]
body = run_commands(module, cmds)
return body
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def get_ntp_source(module):
source_type = None
source = None
command = 'show run | inc ntp.source'
output = execute_show_command(command, module, command_type='cli_show_ascii')
if output:
try:
if 'interface' in output[0]:
source_type = 'source-interface'
else:
source_type = 'source'
source = output[0].split()[2].lower()
except AttributeError:
source_type = None
source = None
return source_type, source
def get_ntp_peer(module):
command = 'show run | inc ntp.(server|peer)'
ntp_peer_list = []
response = execute_show_command(
command, module, command_type='cli_show_ascii')
if response:
if isinstance(response, list):
ntp = response[0]
else:
ntp = response
if ntp:
ntp_regex = (
".*ntp\s(server\s(?P<address>\S+)|peer\s(?P<peer_address>\S+))"
"\s*((?P<prefer>prefer)\s*)?(use-vrf\s(?P<vrf_name>\S+)\s*)?"
"(key\s(?P<key_id>\d+))?.*"
)
split_ntp = ntp.splitlines()
for peer_line in split_ntp:
ntp_peer = {}
try:
peer_address = None
vrf_name = None
prefer = None
key_id = None
match_ntp = re.match(ntp_regex, peer_line, re.DOTALL)
group_ntp = match_ntp.groupdict()
address = group_ntp["address"]
peer_address = group_ntp['peer_address']
prefer = group_ntp['prefer']
vrf_name = group_ntp['vrf_name']
key_id = group_ntp['key_id']
if prefer is not None:
prefer = 'enabled'
else:
prefer = 'disabled'
if address is not None:
peer_type = 'server'
elif peer_address is not None:
peer_type = 'peer'
address = peer_address
args = dict(peer_type=peer_type, address=address, prefer=prefer,
vrf_name=vrf_name, key_id=key_id)
ntp_peer = dict((k, v) for k, v in args.items())
ntp_peer_list.append(ntp_peer)
except AttributeError:
ntp_peer_list = []
return ntp_peer_list
def get_ntp_existing(address, peer_type, module):
peer_dict = {}
peer_server_list = []
peer_list = get_ntp_peer(module)
for peer in peer_list:
if peer['address'] == address:
peer_dict.update(peer)
else:
peer_server_list.append(peer)
source_type, source = get_ntp_source(module)
if (source_type is not None and source is not None):
peer_dict['source_type'] = source_type
peer_dict['source'] = source
return (peer_dict, peer_server_list)
def set_ntp_server_peer(peer_type, address, prefer, key_id, vrf_name):
command_strings = []
if prefer:
command_strings.append(' prefer')
if key_id:
command_strings.append(' key {0}'.format(key_id))
if vrf_name:
command_strings.append(' use-vrf {0}'.format(vrf_name))
command_strings.insert(0, 'ntp {0} {1}'.format(peer_type, address))
command = ''.join(command_strings)
return command
def config_ntp(delta, existing):
address = delta.get('address', existing.get('address'))
peer_type = delta.get('peer_type', existing.get('peer_type'))
vrf_name = delta.get('vrf_name', existing.get('vrf_name'))
key_id = delta.get('key_id', existing.get('key_id'))
prefer = delta.get('prefer', existing.get('prefer'))
source_type = delta.get('source_type')
source = delta.get('source')
if prefer:
if prefer == 'enabled':
prefer = True
elif prefer == 'disabled':
prefer = False
if source:
source_type = delta.get('source_type', existing.get('source_type'))
ntp_cmds = []
if peer_type:
ntp_cmds.append(set_ntp_server_peer(
peer_type, address, prefer, key_id, vrf_name))
if source:
existing_source_type = existing.get('source_type')
existing_source = existing.get('source')
if existing_source_type and source_type != existing_source_type:
ntp_cmds.append('no ntp {0} {1}'.format(existing_source_type, existing_source))
ntp_cmds.append('ntp {0} {1}'.format(source_type, source))
return ntp_cmds
def main():
argument_spec = dict(
server=dict(type='str'),
peer=dict(type='str'),
key_id=dict(type='str'),
prefer=dict(type='str', choices=['enabled', 'disabled']),
vrf_name=dict(type='str'),
source_addr=dict(type='str'),
source_int=dict(type='str'),
state=dict(choices=['absent', 'present'], default='present'),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=[
['server','peer'],
['source_addr','source_int']],
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
server = module.params['server'] or None
peer = module.params['peer'] or None
key_id = module.params['key_id']
prefer = module.params['prefer']
vrf_name = module.params['vrf_name']
source_addr = module.params['source_addr']
source_int = module.params['source_int']
state = module.params['state']
if source_int is not None:
source_int = source_int.lower()
if server:
peer_type = 'server'
address = server
elif peer:
peer_type = 'peer'
address = peer
else:
peer_type = None
address = None
source_type = None
source = None
if source_addr:
source_type = 'source'
source = source_addr
elif source_int:
source_type = 'source-interface'
source = source_int
if key_id or vrf_name or prefer:
if not server and not peer:
module.fail_json(
msg='Please supply the server or peer parameter')
args = dict(peer_type=peer_type, address=address, key_id=key_id,
prefer=prefer, vrf_name=vrf_name, source_type=source_type,
source=source)
proposed = dict((k, v) for k, v in args.items() if v is not None)
existing, peer_server_list = get_ntp_existing(address, peer_type, module)
end_state = existing
changed = False
commands = []
if state == 'present':
delta = dict(set(proposed.items()).difference(existing.items()))
if delta:
command = config_ntp(delta, existing)
if command:
commands.append(command)
elif state == 'absent':
if existing.get('peer_type') and existing.get('address'):
command = 'no ntp {0} {1}'.format(
existing['peer_type'], existing['address'])
if command:
commands.append([command])
existing_source_type = existing.get('source_type')
existing_source = existing.get('source')
proposed_source_type = proposed.get('source_type')
proposed_source = proposed.get('source')
if proposed_source_type:
if proposed_source_type == existing_source_type:
if proposed_source == existing_source:
command = 'no ntp {0} {1}'.format(
existing_source_type, existing_source)
if command:
commands.append([command])
cmds = flatten_list(commands)
if cmds:
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
changed = True
load_config(module, cmds)
end_state = get_ntp_existing(address, peer_type, module)[0]
if 'configure' in cmds:
cmds.pop(0)
results = {}
results['proposed'] = proposed
results['existing'] = existing
results['updates'] = cmds
results['changed'] = changed
results['warnings'] = warnings
results['end_state'] = end_state
results['peer_server_list'] = peer_server_list
module.exit_json(**results)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
bear/parsedatetime
|
refs/heads/master
|
tests/TestMultiple.py
|
3
|
# -*- coding: utf-8 -*-
"""
Test parsing of strings with multiple chunks
"""
from __future__ import unicode_literals
import sys
import time
import datetime
import parsedatetime as pdt
from . import utils
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
class test(unittest.TestCase):
@utils.assertEqualWithComparator
def assertExpectedResult(self, result, check, **kwargs):
return utils.compareResultByTimeTuplesAndFlags(result, check, **kwargs)
def setUp(self):
self.cal = pdt.Calendar()
(self.yr, self.mth, self.dy, self.hr,
self.mn, self.sec, self.wd, self.yd, self.isdst) = time.localtime()
def testSimpleMultipleItems(self):
s = datetime.datetime.now()
t = self.cal.inc(s, year=3) + datetime.timedelta(days=5, weeks=2)
start = s.timetuple()
target = t.timetuple()
self.assertExpectedResult(
self.cal.parse('3 years 2 weeks 5 days', start), (target, 1))
self.assertExpectedResult(
self.cal.parse('3years 2weeks 5days', start), (target, 1))
def testMultipleItemsSingleCharUnits(self):
s = datetime.datetime.now()
t = self.cal.inc(s, year=3) + datetime.timedelta(days=5, weeks=2)
start = s.timetuple()
target = t.timetuple()
self.assertExpectedResult(
self.cal.parse('3 y 2 w 5 d', start), (target, 1))
self.assertExpectedResult(
self.cal.parse('3y 2w 5d', start), (target, 1))
t = self.cal.inc(s, year=3) + datetime.timedelta(hours=5, minutes=50)
target = t.timetuple()
self.assertExpectedResult(
self.cal.parse('3y 5h 50m', start), (target, 3))
def testMultipleItemsWithPunctuation(self):
s = datetime.datetime.now()
t = self.cal.inc(s, year=3) + datetime.timedelta(days=5, weeks=2)
start = s.timetuple()
target = t.timetuple()
self.assertExpectedResult(
self.cal.parse('3 years, 2 weeks, 5 days', start), (target, 1))
self.assertExpectedResult(
self.cal.parse('3 years, 2 weeks and 5 days', start), (target, 1))
self.assertExpectedResult(
self.cal.parse('3y, 2w, 5d ', start), (target, 1))
def testUnixATStyle(self):
s = datetime.datetime.now()
t = s + datetime.timedelta(days=3)
t = t.replace(hour=16, minute=0, second=0)
start = s.timetuple()
target = t.timetuple()
self.assertExpectedResult(
self.cal.parse('4pm + 3 days', start), (target, 3))
self.assertExpectedResult(
self.cal.parse('4pm +3 days', start), (target, 3))
def testUnixATStyleNegative(self):
s = datetime.datetime.now()
t = s + datetime.timedelta(days=-3)
t = t.replace(hour=16, minute=0, second=0)
start = s.timetuple()
target = t.timetuple()
self.assertExpectedResult(
self.cal.parse('4pm - 3 days', start), (target, 3))
self.assertExpectedResult(
self.cal.parse('4pm -3 days', start), (target, 3))
if __name__ == "__main__":
unittest.main()
|
DMLoy/ECommerceBasic
|
refs/heads/master
|
lib/python2.7/site-packages/PIL/GbrImagePlugin.py
|
40
|
#
# The Python Imaging Library
# $Id$
#
# load a GIMP brush file
#
# History:
# 96-03-14 fl Created
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1996.
#
# See the README file for information on usage and redistribution.
#
import Image, ImageFile
def i32(c):
return ord(c[3]) + (ord(c[2])<<8) + (ord(c[1])<<16) + (ord(c[0])<<24L)
def _accept(prefix):
return i32(prefix) >= 20 and i32(prefix[4:8]) == 1
##
# Image plugin for the GIMP brush format.
class GbrImageFile(ImageFile.ImageFile):
format = "GBR"
format_description = "GIMP brush file"
def _open(self):
header_size = i32(self.fp.read(4))
version = i32(self.fp.read(4))
if header_size < 20 or version != 1:
raise SyntaxError, "not a GIMP brush"
width = i32(self.fp.read(4))
height = i32(self.fp.read(4))
bytes = i32(self.fp.read(4))
if width <= 0 or height <= 0 or bytes != 1:
raise SyntaxError, "not a GIMP brush"
comment = self.fp.read(header_size - 20)[:-1]
self.mode = "L"
self.size = width, height
self.info["comment"] = comment
# Since the brush is so small, we read the data immediately
self.data = self.fp.read(width * height)
def load(self):
if not self.data:
return
# create an image out of the brush data block
self.im = Image.core.new(self.mode, self.size)
self.im.fromstring(self.data)
self.data = ""
#
# registry
Image.register_open("GBR", GbrImageFile, _accept)
Image.register_extension("GBR", ".gbr")
|
semonte/intellij-community
|
refs/heads/master
|
plugins/hg4idea/testData/bin/mercurial/hgweb/wsgicgi.py
|
96
|
# hgweb/wsgicgi.py - CGI->WSGI translator
#
# Copyright 2006 Eric Hopper <hopper@omnifarious.org>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
#
# This was originally copied from the public domain code at
# http://www.python.org/dev/peps/pep-0333/#the-server-gateway-side
import os, sys
from mercurial import util
from mercurial.hgweb import common
def launch(application):
util.setbinary(sys.stdin)
util.setbinary(sys.stdout)
environ = dict(os.environ.iteritems())
environ.setdefault('PATH_INFO', '')
if environ.get('SERVER_SOFTWARE', '').startswith('Microsoft-IIS'):
# IIS includes script_name in PATH_INFO
scriptname = environ['SCRIPT_NAME']
if environ['PATH_INFO'].startswith(scriptname):
environ['PATH_INFO'] = environ['PATH_INFO'][len(scriptname):]
stdin = sys.stdin
if environ.get('HTTP_EXPECT', '').lower() == '100-continue':
stdin = common.continuereader(stdin, sys.stdout.write)
environ['wsgi.input'] = stdin
environ['wsgi.errors'] = sys.stderr
environ['wsgi.version'] = (1, 0)
environ['wsgi.multithread'] = False
environ['wsgi.multiprocess'] = True
environ['wsgi.run_once'] = True
if environ.get('HTTPS', 'off').lower() in ('on', '1', 'yes'):
environ['wsgi.url_scheme'] = 'https'
else:
environ['wsgi.url_scheme'] = 'http'
headers_set = []
headers_sent = []
out = sys.stdout
def write(data):
if not headers_set:
raise AssertionError("write() before start_response()")
elif not headers_sent:
# Before the first output, send the stored headers
status, response_headers = headers_sent[:] = headers_set
out.write('Status: %s\r\n' % status)
for header in response_headers:
out.write('%s: %s\r\n' % header)
out.write('\r\n')
out.write(data)
out.flush()
def start_response(status, response_headers, exc_info=None):
if exc_info:
try:
if headers_sent:
# Re-raise original exception if headers sent
raise exc_info[0](exc_info[1], exc_info[2])
finally:
exc_info = None # avoid dangling circular ref
elif headers_set:
raise AssertionError("Headers already set!")
headers_set[:] = [status, response_headers]
return write
content = application(environ, start_response)
try:
for chunk in content:
write(chunk)
if not headers_sent:
write('') # send headers now if body was empty
finally:
getattr(content, 'close', lambda : None)()
|
antoan2/incubator-mxnet
|
refs/heads/master
|
tests/python/unittest/test_gluon_contrib.py
|
8
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import mxnet as mx
from mxnet.gluon import contrib
from mxnet.test_utils import almost_equal
import numpy as np
from numpy.testing import assert_allclose
def check_rnn_cell(cell, prefix, in_shape=(10, 50), out_shape=(10, 100), begin_state=None):
inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(3)]
outputs, _ = cell.unroll(3, inputs, begin_state=begin_state)
outputs = mx.sym.Group(outputs)
assert sorted(cell.collect_params().keys()) == [prefix+'h2h_bias', prefix+'h2h_weight',
prefix+'i2h_bias', prefix+'i2h_weight']
assert outputs.list_outputs() == [prefix+'t0_out_output', prefix+'t1_out_output', prefix+'t2_out_output']
args, outs, auxs = outputs.infer_shape(rnn_t0_data=in_shape,
rnn_t1_data=in_shape,
rnn_t2_data=in_shape)
assert outs == [out_shape]*3
def check_rnn_forward(layer, inputs):
inputs.attach_grad()
layer.collect_params().initialize()
with mx.autograd.record():
layer.unroll(3, inputs, merge_outputs=True)[0].backward()
mx.autograd.backward(layer.unroll(3, inputs, merge_outputs=False)[0])
mx.nd.waitall()
def test_rnn_cells():
check_rnn_forward(contrib.rnn.Conv1DLSTMCell((5, 7), 10, (3,), (3,)),
mx.nd.ones((8, 3, 5, 7)))
check_rnn_forward(contrib.rnn.Conv1DRNNCell((5, 7), 10, (3,), (3,)),
mx.nd.ones((8, 3, 5, 7)))
check_rnn_forward(contrib.rnn.Conv1DGRUCell((5, 7), 10, (3,), (3,)),
mx.nd.ones((8, 3, 5, 7)))
net = mx.gluon.rnn.SequentialRNNCell()
net.add(contrib.rnn.Conv1DLSTMCell((5, 7), 10, (3,), (3,)))
net.add(contrib.rnn.Conv1DRNNCell((10, 5), 11, (3,), (3,)))
net.add(contrib.rnn.Conv1DGRUCell((11, 3), 12, (3,), (3,)))
check_rnn_forward(net, mx.nd.ones((8, 3, 5, 7)))
def test_convrnn():
cell = contrib.rnn.Conv1DRNNCell((10, 50), 100, 3, 3, prefix='rnn_')
check_rnn_cell(cell, prefix='rnn_', in_shape=(1, 10, 50), out_shape=(1, 100, 48))
cell = contrib.rnn.Conv2DRNNCell((10, 20, 50), 100, 3, 3, prefix='rnn_')
check_rnn_cell(cell, prefix='rnn_', in_shape=(1, 10, 20, 50), out_shape=(1, 100, 18, 48))
cell = contrib.rnn.Conv3DRNNCell((10, 20, 30, 50), 100, 3, 3, prefix='rnn_')
check_rnn_cell(cell, prefix='rnn_', in_shape=(1, 10, 20, 30, 50), out_shape=(1, 100, 18, 28, 48))
def test_convlstm():
cell = contrib.rnn.Conv1DLSTMCell((10, 50), 100, 3, 3, prefix='rnn_')
check_rnn_cell(cell, prefix='rnn_', in_shape=(1, 10, 50), out_shape=(1, 100, 48))
cell = contrib.rnn.Conv2DLSTMCell((10, 20, 50), 100, 3, 3, prefix='rnn_')
check_rnn_cell(cell, prefix='rnn_', in_shape=(1, 10, 20, 50), out_shape=(1, 100, 18, 48))
cell = contrib.rnn.Conv3DLSTMCell((10, 20, 30, 50), 100, 3, 3, prefix='rnn_')
check_rnn_cell(cell, prefix='rnn_', in_shape=(1, 10, 20, 30, 50), out_shape=(1, 100, 18, 28, 48))
def test_convgru():
cell = contrib.rnn.Conv1DGRUCell((10, 50), 100, 3, 3, prefix='rnn_')
check_rnn_cell(cell, prefix='rnn_', in_shape=(1, 10, 50), out_shape=(1, 100, 48))
cell = contrib.rnn.Conv2DGRUCell((10, 20, 50), 100, 3, 3, prefix='rnn_')
check_rnn_cell(cell, prefix='rnn_', in_shape=(1, 10, 20, 50), out_shape=(1, 100, 18, 48))
cell = contrib.rnn.Conv3DGRUCell((10, 20, 30, 50), 100, 3, 3, prefix='rnn_')
check_rnn_cell(cell, prefix='rnn_', in_shape=(1, 10, 20, 30, 50), out_shape=(1, 100, 18, 28, 48))
def test_vardrop():
def check_vardrop(drop_inputs, drop_states, drop_outputs):
cell = contrib.rnn.VariationalDropoutCell(mx.gluon.rnn.RNNCell(100, prefix='rnn_'),
drop_outputs=drop_outputs,
drop_states=drop_states,
drop_inputs=drop_inputs)
cell.collect_params().initialize(init='xavier')
input_data = mx.nd.random_uniform(shape=(10, 3, 50), ctx=mx.context.current_context())
with mx.autograd.record():
outputs1, _ = cell.unroll(3, input_data, merge_outputs=True)
mask1 = cell.drop_outputs_mask.asnumpy()
mx.nd.waitall()
outputs2, _ = cell.unroll(3, input_data, merge_outputs=True)
mask2 = cell.drop_outputs_mask.asnumpy()
assert not almost_equal(mask1, mask2)
assert not almost_equal(outputs1.asnumpy(), outputs2.asnumpy())
inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(3)]
outputs, _ = cell.unroll(3, inputs, merge_outputs=False)
outputs = mx.sym.Group(outputs)
args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10,50), rnn_t1_data=(10,50), rnn_t2_data=(10,50))
assert outs == [(10, 100), (10, 100), (10, 100)]
cell.reset()
cell.hybridize()
with mx.autograd.record():
outputs3, _ = cell.unroll(3, input_data, merge_outputs=True)
mx.nd.waitall()
outputs4, _ = cell.unroll(3, input_data, merge_outputs=True)
assert not almost_equal(outputs3.asnumpy(), outputs4.asnumpy())
assert not almost_equal(outputs1.asnumpy(), outputs3.asnumpy())
check_vardrop(0.5, 0.5, 0.5)
check_vardrop(0.5, 0, 0.5)
if __name__ == '__main__':
import nose
nose.runmodule()
|
closeio/flask-admin
|
refs/heads/master
|
flask_admin/contrib/sqla/typefmt.py
|
34
|
from flask_admin.model.typefmt import BASE_FORMATTERS, list_formatter
from sqlalchemy.orm.collections import InstrumentedList
DEFAULT_FORMATTERS = BASE_FORMATTERS.copy()
DEFAULT_FORMATTERS.update({
InstrumentedList: list_formatter
})
|
fhe-odoo/odoo
|
refs/heads/8.0
|
addons/l10n_fr_hr_payroll/l10n_fr_hr_payroll.py
|
340
|
#-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
class res_company(osv.osv):
_inherit = 'res.company'
_columns = {
'plafond_secu': fields.float('Plafond de la Securite Sociale', digits_compute=dp.get_precision('Payroll')),
'nombre_employes': fields.integer('Nombre d\'employes'),
'cotisation_prevoyance': fields.float('Cotisation Patronale Prevoyance', digits_compute=dp.get_precision('Payroll')),
'org_ss': fields.char('Organisme de securite sociale'),
'conv_coll': fields.char('Convention collective'),
}
class hr_contract(osv.osv):
_inherit = 'hr.contract'
_columns = {
'qualif': fields.char('Qualification'),
'niveau': fields.char('Niveau'),
'coef': fields.char('Coefficient'),
}
class hr_payslip(osv.osv):
_inherit = 'hr.payslip'
_columns = {
'payment_mode': fields.char('Mode de paiement'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
ajvpot/CTFd
|
refs/heads/master
|
migrations/versions/1093835a1051_add_default_email_templates.py
|
4
|
"""Add default email templates
Revision ID: 1093835a1051
Revises: a03403986a32
Create Date: 2020-02-15 01:32:10.959373
"""
from alembic import op
from sqlalchemy.sql import column, table
from CTFd.models import db
from CTFd.utils.email import (
DEFAULT_PASSWORD_RESET_BODY,
DEFAULT_PASSWORD_RESET_SUBJECT,
DEFAULT_SUCCESSFUL_REGISTRATION_EMAIL_BODY,
DEFAULT_SUCCESSFUL_REGISTRATION_EMAIL_SUBJECT,
DEFAULT_USER_CREATION_EMAIL_BODY,
DEFAULT_USER_CREATION_EMAIL_SUBJECT,
DEFAULT_VERIFICATION_EMAIL_BODY,
DEFAULT_VERIFICATION_EMAIL_SUBJECT,
)
# revision identifiers, used by Alembic.
revision = "1093835a1051"
down_revision = "a03403986a32"
branch_labels = None
depends_on = None
configs_table = table(
"config", column("id", db.Integer), column("key", db.Text), column("value", db.Text)
)
def get_config(key):
connection = op.get_bind()
return connection.execute(
configs_table.select().where(configs_table.c.key == key).limit(1)
).fetchone()
def set_config(key, value):
connection = op.get_bind()
connection.execute(configs_table.insert().values(key=key, value=value))
def upgrade():
# Only run if this instance already been setup before
if bool(get_config("setup")) is True:
for k, v in [
("password_reset_body", DEFAULT_PASSWORD_RESET_BODY),
("password_reset_subject", DEFAULT_PASSWORD_RESET_SUBJECT),
(
"successful_registration_email_body",
DEFAULT_SUCCESSFUL_REGISTRATION_EMAIL_BODY,
),
(
"successful_registration_email_subject",
DEFAULT_SUCCESSFUL_REGISTRATION_EMAIL_SUBJECT,
),
("user_creation_email_body", DEFAULT_USER_CREATION_EMAIL_BODY),
("user_creation_email_subject", DEFAULT_USER_CREATION_EMAIL_SUBJECT),
("verification_email_body", DEFAULT_VERIFICATION_EMAIL_BODY),
("verification_email_subject", DEFAULT_VERIFICATION_EMAIL_SUBJECT),
]:
if get_config(k) is None:
set_config(k, v)
def downgrade():
pass
|
FHannes/intellij-community
|
refs/heads/master
|
python/helpers/pydev/pydevd_attach_to_process/_test_attach_to_process.py
|
88
|
import subprocess
import sys
print(sys.executable)
if __name__ == '__main__':
p = subprocess.Popen([sys.executable, '-u', '_always_live_program.py'])
import attach_pydevd
attach_pydevd.main(attach_pydevd.process_command_line(['--pid', str(p.pid)]))
p.wait()
|
rob356/SickRage
|
refs/heads/master
|
lib/unidecode/x086.py
|
252
|
data = (
'Tuo ', # 0x00
'Wu ', # 0x01
'Rui ', # 0x02
'Rui ', # 0x03
'Qi ', # 0x04
'Heng ', # 0x05
'Lu ', # 0x06
'Su ', # 0x07
'Tui ', # 0x08
'Mang ', # 0x09
'Yun ', # 0x0a
'Pin ', # 0x0b
'Yu ', # 0x0c
'Xun ', # 0x0d
'Ji ', # 0x0e
'Jiong ', # 0x0f
'Xian ', # 0x10
'Mo ', # 0x11
'Hagi ', # 0x12
'Su ', # 0x13
'Jiong ', # 0x14
'[?] ', # 0x15
'Nie ', # 0x16
'Bo ', # 0x17
'Rang ', # 0x18
'Yi ', # 0x19
'Xian ', # 0x1a
'Yu ', # 0x1b
'Ju ', # 0x1c
'Lian ', # 0x1d
'Lian ', # 0x1e
'Yin ', # 0x1f
'Qiang ', # 0x20
'Ying ', # 0x21
'Long ', # 0x22
'Tong ', # 0x23
'Wei ', # 0x24
'Yue ', # 0x25
'Ling ', # 0x26
'Qu ', # 0x27
'Yao ', # 0x28
'Fan ', # 0x29
'Mi ', # 0x2a
'Lan ', # 0x2b
'Kui ', # 0x2c
'Lan ', # 0x2d
'Ji ', # 0x2e
'Dang ', # 0x2f
'Katsura ', # 0x30
'Lei ', # 0x31
'Lei ', # 0x32
'Hua ', # 0x33
'Feng ', # 0x34
'Zhi ', # 0x35
'Wei ', # 0x36
'Kui ', # 0x37
'Zhan ', # 0x38
'Huai ', # 0x39
'Li ', # 0x3a
'Ji ', # 0x3b
'Mi ', # 0x3c
'Lei ', # 0x3d
'Huai ', # 0x3e
'Luo ', # 0x3f
'Ji ', # 0x40
'Kui ', # 0x41
'Lu ', # 0x42
'Jian ', # 0x43
'San ', # 0x44
'[?] ', # 0x45
'Lei ', # 0x46
'Quan ', # 0x47
'Xiao ', # 0x48
'Yi ', # 0x49
'Luan ', # 0x4a
'Men ', # 0x4b
'Bie ', # 0x4c
'Hu ', # 0x4d
'Hu ', # 0x4e
'Lu ', # 0x4f
'Nue ', # 0x50
'Lu ', # 0x51
'Si ', # 0x52
'Xiao ', # 0x53
'Qian ', # 0x54
'Chu ', # 0x55
'Hu ', # 0x56
'Xu ', # 0x57
'Cuo ', # 0x58
'Fu ', # 0x59
'Xu ', # 0x5a
'Xu ', # 0x5b
'Lu ', # 0x5c
'Hu ', # 0x5d
'Yu ', # 0x5e
'Hao ', # 0x5f
'Jiao ', # 0x60
'Ju ', # 0x61
'Guo ', # 0x62
'Bao ', # 0x63
'Yan ', # 0x64
'Zhan ', # 0x65
'Zhan ', # 0x66
'Kui ', # 0x67
'Ban ', # 0x68
'Xi ', # 0x69
'Shu ', # 0x6a
'Chong ', # 0x6b
'Qiu ', # 0x6c
'Diao ', # 0x6d
'Ji ', # 0x6e
'Qiu ', # 0x6f
'Cheng ', # 0x70
'Shi ', # 0x71
'[?] ', # 0x72
'Di ', # 0x73
'Zhe ', # 0x74
'She ', # 0x75
'Yu ', # 0x76
'Gan ', # 0x77
'Zi ', # 0x78
'Hong ', # 0x79
'Hui ', # 0x7a
'Meng ', # 0x7b
'Ge ', # 0x7c
'Sui ', # 0x7d
'Xia ', # 0x7e
'Chai ', # 0x7f
'Shi ', # 0x80
'Yi ', # 0x81
'Ma ', # 0x82
'Xiang ', # 0x83
'Fang ', # 0x84
'E ', # 0x85
'Pa ', # 0x86
'Chi ', # 0x87
'Qian ', # 0x88
'Wen ', # 0x89
'Wen ', # 0x8a
'Rui ', # 0x8b
'Bang ', # 0x8c
'Bi ', # 0x8d
'Yue ', # 0x8e
'Yue ', # 0x8f
'Jun ', # 0x90
'Qi ', # 0x91
'Ran ', # 0x92
'Yin ', # 0x93
'Qi ', # 0x94
'Tian ', # 0x95
'Yuan ', # 0x96
'Jue ', # 0x97
'Hui ', # 0x98
'Qin ', # 0x99
'Qi ', # 0x9a
'Zhong ', # 0x9b
'Ya ', # 0x9c
'Ci ', # 0x9d
'Mu ', # 0x9e
'Wang ', # 0x9f
'Fen ', # 0xa0
'Fen ', # 0xa1
'Hang ', # 0xa2
'Gong ', # 0xa3
'Zao ', # 0xa4
'Fu ', # 0xa5
'Ran ', # 0xa6
'Jie ', # 0xa7
'Fu ', # 0xa8
'Chi ', # 0xa9
'Dou ', # 0xaa
'Piao ', # 0xab
'Xian ', # 0xac
'Ni ', # 0xad
'Te ', # 0xae
'Qiu ', # 0xaf
'You ', # 0xb0
'Zha ', # 0xb1
'Ping ', # 0xb2
'Chi ', # 0xb3
'You ', # 0xb4
'He ', # 0xb5
'Han ', # 0xb6
'Ju ', # 0xb7
'Li ', # 0xb8
'Fu ', # 0xb9
'Ran ', # 0xba
'Zha ', # 0xbb
'Gou ', # 0xbc
'Pi ', # 0xbd
'Bo ', # 0xbe
'Xian ', # 0xbf
'Zhu ', # 0xc0
'Diao ', # 0xc1
'Bie ', # 0xc2
'Bing ', # 0xc3
'Gu ', # 0xc4
'Ran ', # 0xc5
'Qu ', # 0xc6
'She ', # 0xc7
'Tie ', # 0xc8
'Ling ', # 0xc9
'Gu ', # 0xca
'Dan ', # 0xcb
'Gu ', # 0xcc
'Ying ', # 0xcd
'Li ', # 0xce
'Cheng ', # 0xcf
'Qu ', # 0xd0
'Mou ', # 0xd1
'Ge ', # 0xd2
'Ci ', # 0xd3
'Hui ', # 0xd4
'Hui ', # 0xd5
'Mang ', # 0xd6
'Fu ', # 0xd7
'Yang ', # 0xd8
'Wa ', # 0xd9
'Lie ', # 0xda
'Zhu ', # 0xdb
'Yi ', # 0xdc
'Xian ', # 0xdd
'Kuo ', # 0xde
'Jiao ', # 0xdf
'Li ', # 0xe0
'Yi ', # 0xe1
'Ping ', # 0xe2
'Ji ', # 0xe3
'Ha ', # 0xe4
'She ', # 0xe5
'Yi ', # 0xe6
'Wang ', # 0xe7
'Mo ', # 0xe8
'Qiong ', # 0xe9
'Qie ', # 0xea
'Gui ', # 0xeb
'Gong ', # 0xec
'Zhi ', # 0xed
'Man ', # 0xee
'Ebi ', # 0xef
'Zhi ', # 0xf0
'Jia ', # 0xf1
'Rao ', # 0xf2
'Si ', # 0xf3
'Qi ', # 0xf4
'Xing ', # 0xf5
'Lie ', # 0xf6
'Qiu ', # 0xf7
'Shao ', # 0xf8
'Yong ', # 0xf9
'Jia ', # 0xfa
'Shui ', # 0xfb
'Che ', # 0xfc
'Bai ', # 0xfd
'E ', # 0xfe
'Han ', # 0xff
)
|
IllusionRom-deprecated/android_platform_tools_idea
|
refs/heads/master
|
python/lib/Lib/site-packages/django/core/management/commands/shell.py
|
230
|
import os
from django.core.management.base import NoArgsCommand
from optparse import make_option
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--plain', action='store_true', dest='plain',
help='Tells Django to use plain Python, not IPython.'),
)
help = "Runs a Python interactive interpreter. Tries to use IPython, if it's available."
shells = ['ipython', 'bpython']
requires_model_validation = False
def ipython(self):
try:
from IPython.frontend.terminal.embed import TerminalInteractiveShell
shell = TerminalInteractiveShell()
shell.mainloop()
except ImportError:
# IPython < 0.11
# Explicitly pass an empty list as arguments, because otherwise
# IPython would use sys.argv from this script.
try:
from IPython.Shell import IPShell
shell = IPShell(argv=[])
shell.mainloop()
except ImportError:
# IPython not found at all, raise ImportError
raise
def bpython(self):
import bpython
bpython.embed()
def run_shell(self):
for shell in self.shells:
try:
return getattr(self, shell)()
except ImportError:
pass
raise ImportError
def handle_noargs(self, **options):
# XXX: (Temporary) workaround for ticket #1796: force early loading of all
# models from installed apps.
from django.db.models.loading import get_models
loaded_models = get_models()
use_plain = options.get('plain', False)
try:
if use_plain:
# Don't bother loading IPython, because the user wants plain Python.
raise ImportError
self.run_shell()
except ImportError:
import code
# Set up a dictionary to serve as the environment for the shell, so
# that tab completion works on objects that are imported at runtime.
# See ticket 5082.
imported_objects = {}
try: # Try activating rlcompleter, because it's handy.
import readline
except ImportError:
pass
else:
# We don't have to wrap the following import in a 'try', because
# we already know 'readline' was imported successfully.
import rlcompleter
readline.set_completer(rlcompleter.Completer(imported_objects).complete)
readline.parse_and_bind("tab:complete")
# We want to honor both $PYTHONSTARTUP and .pythonrc.py, so follow system
# conventions and get $PYTHONSTARTUP first then import user.
if not use_plain:
pythonrc = os.environ.get("PYTHONSTARTUP")
if pythonrc and os.path.isfile(pythonrc):
try:
execfile(pythonrc)
except NameError:
pass
# This will import .pythonrc.py as a side-effect
import user
code.interact(local=imported_objects)
|
Tetchain/pycoin
|
refs/heads/master
|
pycoin/services/__init__.py
|
20
|
from .providers import spendables_for_address, get_tx_db
|
mglukhikh/intellij-community
|
refs/heads/master
|
python/helpers/profiler/thriftpy/protocol/exc.py
|
46
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from ..thrift import TException
class TProtocolException(TException):
"""Custom Protocol Exception class"""
UNKNOWN = 0
INVALID_DATA = 1
NEGATIVE_SIZE = 2
SIZE_LIMIT = 3
BAD_VERSION = 4
def __init__(self, type=UNKNOWN, message=None):
TException.__init__(self, message)
self.type = type
|
jlegendary/scikit-learn
|
refs/heads/master
|
sklearn/decomposition/tests/test_kernel_pca.py
|
155
|
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import (assert_array_almost_equal, assert_less,
assert_equal, assert_not_equal,
assert_raises)
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.metrics.pairwise import rbf_kernel
def test_kernel_pca():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
for eigen_solver in ("auto", "dense", "arpack"):
for kernel in ("linear", "rbf", "poly", histogram):
# histogram kernel produces singular matrix inside linalg.solve
# XXX use a least-squares approximation?
inv = not callable(kernel)
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=inv)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# non-regression test: previously, gamma would be 0 by default,
# forcing all eigenvalues to 0 under the poly kernel
assert_not_equal(X_fit_transformed, [])
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
if inv:
X_pred2 = kpca.inverse_transform(X_pred_transformed)
assert_equal(X_pred2.shape, X_pred.shape)
def test_invalid_parameters():
assert_raises(ValueError, KernelPCA, 10, fit_inverse_transform=True,
kernel='precomputed')
def test_kernel_pca_sparse():
rng = np.random.RandomState(0)
X_fit = sp.csr_matrix(rng.random_sample((5, 4)))
X_pred = sp.csr_matrix(rng.random_sample((2, 4)))
for eigen_solver in ("auto", "arpack"):
for kernel in ("linear", "rbf", "poly"):
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=False)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
# X_pred2 = kpca.inverse_transform(X_pred_transformed)
# assert_equal(X_pred2.shape, X_pred.shape)
def test_kernel_pca_linear_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
# for a linear kernel, kernel PCA should find the same projection as PCA
# modulo the sign (direction)
# fit only the first four components: fifth is near zero eigenvalue, so
# can be trimmed due to roundoff error
assert_array_almost_equal(
np.abs(KernelPCA(4).fit(X_fit).transform(X_pred)),
np.abs(PCA(4).fit(X_fit).transform(X_pred)))
def test_kernel_pca_n_components():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
for c in [1, 2, 4]:
kpca = KernelPCA(n_components=c, eigen_solver=eigen_solver)
shape = kpca.fit(X_fit).transform(X_pred).shape
assert_equal(shape, (2, c))
def test_remove_zero_eig():
X = np.array([[1 - 1e-30, 1], [1, 1], [1, 1 - 1e-20]])
# n_components=None (default) => remove_zero_eig is True
kpca = KernelPCA()
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 0))
kpca = KernelPCA(n_components=2)
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 2))
kpca = KernelPCA(n_components=2, remove_zero_eig=True)
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 0))
def test_kernel_pca_precomputed():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
X_kpca = KernelPCA(4, eigen_solver=eigen_solver).\
fit(X_fit).transform(X_pred)
X_kpca2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_pred, X_fit.T))
X_kpca_train = KernelPCA(
4, eigen_solver=eigen_solver,
kernel='precomputed').fit_transform(np.dot(X_fit, X_fit.T))
X_kpca_train2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_fit, X_fit.T))
assert_array_almost_equal(np.abs(X_kpca),
np.abs(X_kpca2))
assert_array_almost_equal(np.abs(X_kpca_train),
np.abs(X_kpca_train2))
def test_kernel_pca_invalid_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((2, 4))
kpca = KernelPCA(kernel="tototiti")
assert_raises(ValueError, kpca.fit, X_fit)
def test_gridsearch_pipeline():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="rbf", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
param_grid = dict(kernel_pca__gamma=2. ** np.arange(-2, 2))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
grid_search.fit(X, y)
assert_equal(grid_search.best_score_, 1)
def test_gridsearch_pipeline_precomputed():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model using a precomputed kernel.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="precomputed", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
param_grid = dict(Perceptron__n_iter=np.arange(1, 5))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
X_kernel = rbf_kernel(X, gamma=2.)
grid_search.fit(X_kernel, y)
assert_equal(grid_search.best_score_, 1)
def test_nested_circles():
# Test the linear separability of the first 2D KPCA transform
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
# 2D nested circles are not linearly separable
train_score = Perceptron().fit(X, y).score(X, y)
assert_less(train_score, 0.8)
# Project the circles data into the first 2 components of a RBF Kernel
# PCA model.
# Note that the gamma value is data dependent. If this test breaks
# and the gamma value has to be updated, the Kernel PCA example will
# have to be updated too.
kpca = KernelPCA(kernel="rbf", n_components=2,
fit_inverse_transform=True, gamma=2.)
X_kpca = kpca.fit_transform(X)
# The data is perfectly linearly separable in that space
train_score = Perceptron().fit(X_kpca, y).score(X_kpca, y)
assert_equal(train_score, 1.0)
|
obi-two/Rebelion
|
refs/heads/master
|
data/scripts/templates/object/mobile/shared_dressed_fed_dub_commander_bith_female_01.py
|
2
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_fed_dub_commander_bith_female_01.iff"
result.attribute_template_id = 9
result.stfName("npc_name","bith_base_female")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
googlearchive/py-gfm
|
refs/heads/master
|
setup.py
|
1
|
from setuptools import setup, find_packages
setup(
name='py-gfm',
version='0.1.1',
description='An implementation of Github-Flavored Markdown written as an extension to the Python Markdown library.',
author='Dart Team',
author_email='misc@dartlang.org',
url='https://github.com/dart-lang/py-gfm',
download_url='https://github.com/dart-lang/py-gfm/tarball/0.1.0',
packages=find_packages(),
include_package_data = True,
install_requires = ['setuptools', 'markdown'],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python'
]
)
|
jiangzhuo/kbengine
|
refs/heads/master
|
kbe/src/lib/python/Lib/encodings/shift_jisx0213.py
|
816
|
#
# shift_jisx0213.py: Python Unicode Codec for SHIFT_JISX0213
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_jp, codecs
import _multibytecodec as mbc
codec = _codecs_jp.getcodec('shift_jisx0213')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='shift_jisx0213',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
vinnyoodles/algorithms
|
refs/heads/master
|
python/dcp/problem4.py
|
1
|
# This problem was asked by Stripe.
# Given an array of integers, find the first missing positive integer in linear time and constant space.
# In other words, find the lowest positive integer that does not exist in the array.
# The array can contain duplicates and negative numbers as well.
# For example, the input [3, 4, -1, 1] should give 2. The input [1, 2, 0] should give 3.
# You can modify the input array in-place.
###########################################
# Explanation
# First, the answer must lie in the range 1 - len(arr).
# This is because we want the smallest increasing number so we have to think of
# the worst case (answer would be the largest in the array).
# Knowing that the answer must be in a specific range, we can use the arr as a lookup table.
# First, place all valid numbers where they belong.
# Then, find the first number that does not belong.
def findingFirstMissingPositive(arr):
length = len(arr)
if length == 0:
return 1
for index in range(length):
value = arr[index]
while value <= length and value > 0 and arr[value - 1] != value:
new_value = arr[value - 1]
arr[value - 1] = value
value = new_value
for index in range(length):
if arr[index] != (index + 1):
return index + 1
return length + 1
|
stbka/ansible
|
refs/heads/devel
|
lib/ansible/parsing/yaml/loader.py
|
234
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
try:
from _yaml import CParser, CEmitter
HAVE_PYYAML_C = True
except ImportError:
HAVE_PYYAML_C = False
from yaml.resolver import Resolver
from ansible.parsing.yaml.constructor import AnsibleConstructor
if HAVE_PYYAML_C:
class AnsibleLoader(CParser, AnsibleConstructor, Resolver):
def __init__(self, stream, file_name=None):
CParser.__init__(self, stream)
AnsibleConstructor.__init__(self, file_name=file_name)
Resolver.__init__(self)
else:
from yaml.composer import Composer
from yaml.reader import Reader
from yaml.scanner import Scanner
from yaml.parser import Parser
class AnsibleLoader(Reader, Scanner, Parser, Composer, AnsibleConstructor, Resolver):
def __init__(self, stream, file_name=None):
Reader.__init__(self, stream)
Scanner.__init__(self)
Parser.__init__(self)
Composer.__init__(self)
AnsibleConstructor.__init__(self, file_name=file_name)
Resolver.__init__(self)
|
benfitzpatrick/cylc
|
refs/heads/master
|
lib/isodatetime/tests.py
|
2
|
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# (C) British Crown Copyright 2013-2014 Met Office.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#-----------------------------------------------------------------------------
"""This tests the ISO 8601 parsing and data model functionality."""
import copy
import multiprocessing
import unittest
from . import data
from . import dumpers
from . import parsers
from . import parser_spec
def get_timeduration_tests():
"""Yield tests for the duration class."""
tests = {
"get_days_and_seconds": [
([], {"hours": 25}, (1, 3600)),
([], {"seconds": 59}, (0, 59)),
([], {"minutes": 10}, (0, 600)),
([], {"days": 5, "minutes": 2}, (5, 120)),
([], {"hours": 2, "minutes": 5, "seconds": 11.5}, (0, 7511.5)),
([], {"hours": 23, "minutes": 1446}, (1, 83160))
],
"get_seconds": [
([], {"hours": 25}, 90000),
([], {"seconds": 59}, 59),
([], {"minutes": 10}, 600),
([], {"days": 5, "minutes": 2}, 432120),
([], {"hours": 2, "minutes": 5, "seconds": 11.5}, 7511.5),
([], {"hours": 23, "minutes": 1446}, 169560)
]
}
for method, method_tests in tests.items():
for method_args, test_props, ctrl_results in method_tests:
yield test_props, method, method_args, ctrl_results
def get_timedurationparser_tests():
"""Yield tests for the duration parser."""
test_expressions = {
"P3Y": {"years": 3},
"P90Y": {"years": 90},
"P1Y2M": {"years": 1, "months": 2},
"P20Y2M": {"years": 20, "months": 2},
"P2M": {"months": 2},
"P52M": {"months": 52},
"P20Y10M2D": {"years": 20, "months": 10, "days": 2},
"P1Y3D": {"years": 1, "days": 3},
"P4M1D": {"months": 4, "days": 1},
"P3Y404D": {"years": 3, "days": 404},
"P30Y2D": {"years": 30, "days": 2},
"PT6H": {"hours": 6},
"PT1034H": {"hours": 1034},
"P3YT4H2M": {"years": 3, "hours": 4, "minutes": 2},
"P30Y2DT10S": {"years": 30, "days": 2, "seconds": 10},
"PT2S": {"seconds": 2},
"PT2.5S": {"seconds": 2.5},
"PT2,5S": {"seconds": 2.5},
"PT5.5023H": {"hours": 5.5023},
"PT5,5023H": {"hours": 5.5023},
"P5W": {"weeks": 5},
"P100W": {"weeks": 100},
"P0004-03-02T01": {"years": 4, "months": 3, "days": 2,
"hours": 1},
"P0004-03-00": {"years": 4, "months": 3},
"P0004-078": {"years": 4, "days": 78},
"P0004-078T10,5": {"years": 4, "days": 78, "hours": 10.5},
"P00000020T133702": {"days": 20, "hours": 13, "minutes": 37,
"seconds": 02},
"-P3YT4H2M": {"years": -3, "hours": -4, "minutes": -2},
"-PT5M": {"minutes": -5},
"-P7Y": {"years": -7, "hours": 0}
}
for expression, ctrl_result in test_expressions.items():
ctrl_data = str(data.Duration(**ctrl_result))
yield expression, ctrl_data
def get_timedurationdumper_tests():
"""Yield tests for the duration dumper."""
test_expressions = {
"P3Y": {"years": 3},
"P90Y": {"years": 90},
"P1Y2M": {"years": 1, "months": 2},
"P20Y2M": {"years": 20, "months": 2},
"P2M": {"months": 2},
"P52M": {"months": 52},
"P20Y10M2D": {"years": 20, "months": 10, "days": 2},
"P1Y3D": {"years": 1, "days": 3},
"P4M1D": {"months": 4, "days": 1},
"P3Y404D": {"years": 3, "days": 404},
"P30Y2D": {"years": 30, "days": 2},
"PT6H": {"hours": 6},
"PT1034H": {"hours": 1034},
"P3YT4H2M": {"years": 3, "hours": 4, "minutes": 2},
"P30Y2DT10S": {"years": 30, "days": 2, "seconds": 10},
"PT2S": {"seconds": 2},
"PT2,5S": {"seconds": 2.5},
"PT5,5023H": {"hours": 5.5023},
"P5W": {"weeks": 5},
"P100W": {"weeks": 100},
"-P3YT4H2M": {"years": -3, "hours": -4, "minutes": -2},
"-PT5M": {"minutes": -5},
"-P7Y": {"years": -7, "hours": 0},
"PT1H": {"seconds": 3600, "standardize": True},
"P1DT5M": {"minutes": 1445, "standardize": True},
"PT59S": {"seconds": 59, "standardize": True},
"PT1H4M56S": {"minutes": 10, "seconds": 3296, "standardize": True},
}
for expression, ctrl_result in test_expressions.items():
yield expression, ctrl_result
def get_timepoint_dumper_tests():
"""Yield tests for custom timepoint dumps."""
return [
(
{"year": 44, "month_of_year": 1, "day_of_month": 4,
"hour_of_day": 5, "minute_of_hour": 1, "second_of_minute": 2,
"time_zone_hour": 0, "time_zone_minute": 0},
[("CCYY-MMDDThhmmZ", "0044-0104T0501Z"),
("YYDDDThh:mm:ss", "44004T05:01:02"),
("WwwD", "W011"),
("CCDDDThh*ss-0600", "00003T23*02-0600"),
(u"+XCCYY-MM-DDThh:mm:ss-11:45",
"+000044-01-03T17:16:02-11:45"),
(u"+XCCYYMM-DDThh-01:00", "+00004401-04T04-01:00"),
(u"+XCCYYMM-DDThh+13:00", "+00004401-04T18+13:00"),
(u"+XCCYYMM-DDThh-0100", "+00004401-04T04-0100"),
(u"+XCCYYMM-DDThh+1300", "+00004401-04T18+1300"),
(u"+XCCYYMMDDThh-0100", "+0000440104T04-0100"),
(u"+XCCYYMMDDThh+13", "+0000440104T18+13"),
(u"+XCCYYMMDDThh+hhmm", "+0000440104T05+0000"),
(u"+XCCYY-MM-DDThh:mm:ss+hh:mm",
"+000044-01-04T05:01:02+00:00"),
("DD/MM/CCYY is a silly format", "04/01/0044 is a silly format"),
("ThhZ", "T05Z"),
("%Y-%m-%dT%H:%M", "0044-01-04T05:01")]
),
(
{"year": 500200, "month_of_year": 7, "day_of_month": 28,
"expanded_year_digits": 2, "hour_of_day": 0,
"hour_of_day_decimal": 0.4356, "time_zone_hour": -8,
"time_zone_minute": -30},
[("+XCCYY-MMDDThhmmZ", "+500200-0728T0856Z"),
("+XCCYYDDDThh:mm:ss", "+500200209T00:26:08"),
("WwwD", "W311"),
("+XCCDDDThh*ss-0600", "+5002209T02*08-0600"),
(u"+XCCYY-MM-DDThh:mm:ss-11:45",
"+500200-07-27T21:11:08-11:45"),
(u"+XCCYYMM-DDThhmm-01:00", "+50020007-28T0756-01:00"),
(u"+XCCYYMM-DDThhmm+13:00", "+50020007-28T2156+13:00"),
(u"+XCCYYMM-DDThhmm-0100", "+50020007-28T0756-0100"),
(u"+XCCYYMM-DDThhmm+1300", "+50020007-28T2156+1300"),
(u"+XCCYYMMDDThhmm-0100", "+5002000728T0756-0100"),
(u"+XCCYYMMDDThhmm+13", "+5002000728T2156+13"),
(u"+XCCYYMMDDThh+hhmm", "+5002000728T00-0830"),
(u"+XCCYYWwwDThhmm+hh", "+500200W311T0026-08"),
(u"+XCCYYDDDThhmm+hh", "+500200209T0026-08"),
(u"+XCCYY-MM-DDThh:mm:ss+hh:mm",
"+500200-07-28T00:26:08-08:30"),
(u"+XCCYY-MM-DDThh:mm:ssZ", "+500200-07-28T08:56:08Z"),
("DD/MM/+XCCYY is a silly format", "28/07/+500200 is a silly format"),
("ThhmmZ", "T0856Z"),
("%m-%dT%H:%M", "07-28T00:26")]
),
(
{"year": -56, "day_of_year": 318, "expanded_year_digits": 2,
"hour_of_day": 5, "minute_of_hour": 1, "time_zone_hour": 6},
[("+XCCYY-MMDDThhmmZ", "-000056-1112T2301Z"),
("+XCCYYDDDThh:mm:ss", "-000056318T05:01:00"),
("WwwD", "W461"),
("+XCCDDDThh*ss-0600", "-0000317T17*00-0600"),
(u"+XCCYY-MM-DDThh:mm:ss-11:45",
"-000056-11-12T11:16:00-11:45"),
(u"+XCCYYMM-DDThhmm-01:00", "-00005611-12T2201-01:00"),
(u"+XCCYYMM-DDThhmm+13:00", "-00005611-13T1201+13:00"),
(u"+XCCYYMM-DDThhmm-0100", "-00005611-12T2201-0100"),
(u"+XCCYYMM-DDThhmm+1300", "-00005611-13T1201+1300"),
(u"+XCCYYMMDDThhmm-0100", "-0000561112T2201-0100"),
(u"+XCCYYMMDDThhmm+13", "-0000561113T1201+13"),
(u"+XCCYYMMDDThh+hhmm", "-0000561113T05+0600"),
(u"+XCCYYWwwDThhmm+hh", "-000056W461T0501+06"),
(u"+XCCYYDDDThhmm+hh", "-000056318T0501+06"),
(u"+XCCYY-MM-DDThh:mm:ss+hh:mm",
"-000056-11-13T05:01:00+06:00"),
(u"+XCCYY-MM-DDThh:mm:ssZ", "-000056-11-12T23:01:00Z"),
("DD/MM/+XCCYY is a silly format", "13/11/-000056 is a silly format"),
("ThhmmZ", "T2301Z"),
("%m-%dT%H:%M", "11-13T05:01")]
),
(
{"year": 1000, "week_of_year": 1, "day_of_week": 1,
"time_zone_hour": 0},
[("CCYY-MMDDThhmmZ", "0999-1230T0000Z"),
("CCYY-DDDThhmmZ", "0999-364T0000Z"),
("CCYY-Www-DThhmm+0200", "1000-W01-1T0200+0200"),
("CCYY-Www-DThhmm-0200", "0999-W52-7T2200-0200"),
("%Y-%m-%dT%H:%M", "0999-12-30T00:00")]
),
(
{"year": 999, "day_of_year": 364, "time_zone_hour": 0},
[("CCYY-MMDDThhmmZ", "0999-1230T0000Z"),
("CCYY-DDDThhmmZ", "0999-364T0000Z"),
("CCYY-Www-DThhmm+0200", "1000-W01-1T0200+0200"),
("CCYY-Www-DThhmm-0200", "0999-W52-7T2200-0200"),
("%Y-%m-%dT%H:%M", "0999-12-30T00:00")]
)
]
def get_timepointdumper_failure_tests():
"""Yield tests that raise exceptions for custom time point dumps."""
bounds_error = dumpers.TimePointDumperBoundsError
return [
(
{"year": 10000, "month_of_year": 1, "day_of_month": 4,
"time_zone_hour": 0, "time_zone_minute": 0},
[("CCYY-MMDDThhmmZ", bounds_error, 0),
("%Y-%m-%dT%H:%M", bounds_error, 0)]
),
(
{"year": -10000, "month_of_year": 1, "day_of_month": 4,
"time_zone_hour": 0, "time_zone_minute": 0},
[("CCYY-MMDDThhmmZ", bounds_error, 0),
("%Y-%m-%dT%H:%M", bounds_error, 0)]
),
(
{"year": 10000, "month_of_year": 1, "day_of_month": 4,
"time_zone_hour": 0, "time_zone_minute": 0},
[("CCYY-MMDDThhmmZ", bounds_error, 2)]
),
(
{"year": -10000, "month_of_year": 1, "day_of_month": 4,
"time_zone_hour": 0, "time_zone_minute": 0},
[("CCYY-MMDDThhmmZ", bounds_error, 2)]
),
(
{"year": 1000000, "month_of_year": 1, "day_of_month": 4,
"time_zone_hour": 0, "time_zone_minute": 0},
[("+XCCYY-MMDDThhmmZ", bounds_error, 2)]
),
(
{"year": -1000000, "month_of_year": 1, "day_of_month": 4,
"time_zone_hour": 0, "time_zone_minute": 0},
[("+XCCYY-MMDDThhmmZ", bounds_error, 2)]
)
]
def get_timepointparser_tests(allow_only_basic=False,
allow_truncated=False,
skip_time_zones=False):
"""Yield tests for the time point parser."""
# Note: test dates assume 2 expanded year digits.
test_date_map = {
"basic": {
"complete": {
"00440104": {"year": 44, "month_of_year": 1,
"day_of_month": 4},
"+5002000830": {"year": 500200, "month_of_year": 8,
"day_of_month": 30,
"expanded_year_digits": 2},
"-0000561113": {"year": -56, "month_of_year": 11,
"day_of_month": 13,
"expanded_year_digits": 2},
"-1000240210": {"year": -100024, "month_of_year": 2,
"day_of_month": 10,
"expanded_year_digits": 2},
"1967056": {"year": 1967, "day_of_year": 56},
"+123456078": {"year": 123456, "day_of_year": 78,
"expanded_year_digits": 2},
"-004560134": {"year": -4560, "day_of_year": 134,
"expanded_year_digits": 2},
"1001W011": {"year": 1001, "week_of_year": 1,
"day_of_week": 1},
"+000001W457": {"year": 1, "week_of_year": 45,
"day_of_week": 7,
"expanded_year_digits": 2},
"-010001W053": {"year": -10001, "week_of_year": 5,
"day_of_week": 3, "expanded_year_digits": 2}
},
"reduced": {
"4401-03": {"year": 4401, "month_of_year": 3},
"1982": {"year": 1982},
"19": {"year": 1900},
"+056789-01": {"year": 56789, "month_of_year": 1,
"expanded_year_digits": 2},
"-000001-12": {"year": -1, "month_of_year": 12,
"expanded_year_digits": 2},
"-789123": {"year": -789123, "expanded_year_digits": 2},
"+450001": {"year": 450001, "expanded_year_digits": 2},
# The following cannot be parsed - looks like truncated -YYMM.
# "-0023": {"year": -2300, "expanded_year_digits": 2},
"+5678": {"year": 567800, "expanded_year_digits": 2},
"1765W04": {"year": 1765, "week_of_year": 4},
"+001765W44": {"year": 1765, "week_of_year": 44,
"expanded_year_digits": 2},
"-123321W50": {"year": -123321, "week_of_year": 50,
"expanded_year_digits": 2}
},
"truncated": {
"-9001": {"year": 90, "month_of_year": 1,
"truncated": True,
"truncated_property": "year_of_century"},
"960328": {"year": 96, "month_of_year": 3,
"day_of_month": 28,
"truncated": True,
"truncated_property": "year_of_century"},
"-90": {"year": 90, "truncated": True,
"truncated_property": "year_of_century"},
"--0501": {"month_of_year": 5, "day_of_month": 1,
"truncated": True},
"--12": {"month_of_year": 12, "truncated": True},
"---30": {"day_of_month": 30, "truncated": True},
"98354": {"year": 98, "day_of_year": 354, "truncated": True,
"truncated_property": "year_of_century"},
"-034": {"day_of_year": 34, "truncated": True},
"00W031": {"year": 0, "week_of_year": 3, "day_of_week": 1,
"truncated": True,
"truncated_property": "year_of_century"},
"99W34": {"year": 99, "week_of_year": 34, "truncated": True,
"truncated_property": "year_of_century"},
"-1W02": {"year": 1, "week_of_year": 2,
"truncated": True,
"truncated_property": "year_of_decade"},
"-W031": {"week_of_year": 3, "day_of_week": 1,
"truncated": True},
"-W32": {"week_of_year": 32, "truncated": True},
"-W-1": {"day_of_week": 1, "truncated": True}
}
},
"extended": {
"complete": {
"0044-01-04": {"year": 44, "month_of_year": 1,
"day_of_month": 4},
"+500200-08-30": {"year": 500200, "month_of_year": 8,
"day_of_month": 30,
"expanded_year_digits": 2},
"-000056-11-13": {"year": -56, "month_of_year": 11,
"day_of_month": 13,
"expanded_year_digits": 2},
"-100024-02-10": {"year": -100024, "month_of_year": 2,
"day_of_month": 10,
"expanded_year_digits": 2},
"1967-056": {"year": 1967, "day_of_year": 56},
"+123456-078": {"year": 123456, "day_of_year": 78,
"expanded_year_digits": 2},
"-004560-134": {"year": -4560, "day_of_year": 134,
"expanded_year_digits": 2},
"1001-W01-1": {"year": 1001, "week_of_year": 1,
"day_of_week": 1},
"+000001-W45-7": {"year": 1, "week_of_year": 45,
"day_of_week": 7,
"expanded_year_digits": 2},
"-010001-W05-3": {"year": -10001, "week_of_year": 5,
"day_of_week": 3,
"expanded_year_digits": 2}
},
"reduced": {
"4401-03": {"year": 4401, "month_of_year": 3},
"1982": {"year": 1982},
"19": {"year": 1900},
"+056789-01": {"year": 56789, "month_of_year": 1,
"expanded_year_digits": 2},
"-000001-12": {"year": -1, "month_of_year": 12,
"expanded_year_digits": 2},
"-789123": {"year": -789123, "expanded_year_digits": 2},
"+450001": {"year": 450001, "expanded_year_digits": 2},
# The following cannot be parsed - looks like truncated -YYMM.
# "-0023": {"year": -2300, "expanded_year_digits": 2},
"+5678": {"year": 567800, "expanded_year_digits": 2},
"1765-W04": {"year": 1765, "week_of_year": 4},
"+001765-W44": {"year": 1765, "week_of_year": 44,
"expanded_year_digits": 2},
"-123321-W50": {"year": -123321, "week_of_year": 50,
"expanded_year_digits": 2}
},
"truncated": {
"-9001": {"year": 90, "month_of_year": 1,
"truncated": True,
"truncated_property": "year_of_century"},
"96-03-28": {"year": 96, "month_of_year": 3,
"day_of_month": 28,
"truncated": True,
"truncated_property": "year_of_century"},
"-90": {"year": 90, "truncated": True,
"truncated_property": "year_of_century"},
"--05-01": {"month_of_year": 5, "day_of_month": 1,
"truncated": True},
"--12": {"month_of_year": 12, "truncated": True},
"---30": {"day_of_month": 30, "truncated": True},
"98-354": {"year": 98, "day_of_year": 354, "truncated": True,
"truncated_property": "year_of_century"},
"-034": {"day_of_year": 34, "truncated": True},
"00-W03-1": {"year": 0, "week_of_year": 3, "day_of_week": 1,
"truncated": True,
"truncated_property": "year_of_century"},
"99-W34": {"year": 99, "week_of_year": 34, "truncated": True,
"truncated_property": "year_of_century"},
"-1-W02": {"year": 1, "week_of_year": 2,
"truncated": True,
"truncated_property": "year_of_decade"},
"-W03-1": {"week_of_year": 3, "day_of_week": 1,
"truncated": True},
"-W32": {"week_of_year": 32, "truncated": True},
"-W-1": {"day_of_week": 1, "truncated": True}
}
}
}
test_time_map = {
"basic": {
"complete": {
"050102": {"hour_of_day": 5, "minute_of_hour": 1,
"second_of_minute": 2},
"235902,345": {"hour_of_day": 23, "minute_of_hour": 59,
"second_of_minute": 2,
"second_of_minute_decimal": 0.345},
"235902.345": {"hour_of_day": 23, "minute_of_hour": 59,
"second_of_minute": 2,
"second_of_minute_decimal": 0.345},
"1201,4": {"hour_of_day": 12, "minute_of_hour": 1,
"minute_of_hour_decimal": 0.4},
"1201.4": {"hour_of_day": 12, "minute_of_hour": 1,
"minute_of_hour_decimal": 0.4},
"00,4356": {"hour_of_day": 0,
"hour_of_day_decimal": 0.4356},
"00.4356": {"hour_of_day": 0,
"hour_of_day_decimal": 0.4356}
},
"reduced": {
"0203": {"hour_of_day": 2, "minute_of_hour": 3},
"17": {"hour_of_day": 17}
},
"truncated": {
"-5612": {"minute_of_hour": 56, "second_of_minute": 12,
"truncated": True},
"-12": {"minute_of_hour": 12, "truncated": True},
"--45": {"second_of_minute": 45, "truncated": True},
"-1234,45": {"minute_of_hour": 12, "second_of_minute": 34,
"second_of_minute_decimal": 0.45,
"truncated": True},
"-1234.45": {"minute_of_hour": 12, "second_of_minute": 34,
"second_of_minute_decimal": 0.45,
"truncated": True},
"-34,2": {"minute_of_hour": 34, "minute_of_hour_decimal": 0.2,
"truncated": True},
"-34.2": {"minute_of_hour": 34, "minute_of_hour_decimal": 0.2,
"truncated": True},
"--59,99": {"second_of_minute": 59,
"second_of_minute_decimal": 0.99,
"truncated": True},
"--59.99": {"second_of_minute": 59,
"second_of_minute_decimal": 0.99,
"truncated": True}
}
},
"extended": {
"complete": {
"05:01:02": {"hour_of_day": 5, "minute_of_hour": 1,
"second_of_minute": 2},
"23:59:02,345": {"hour_of_day": 23, "minute_of_hour": 59,
"second_of_minute": 2,
"second_of_minute_decimal": 0.345},
"23:59:02.345": {"hour_of_day": 23, "minute_of_hour": 59,
"second_of_minute": 2,
"second_of_minute_decimal": 0.345},
"12:01,4": {"hour_of_day": 12, "minute_of_hour": 1,
"minute_of_hour_decimal": 0.4},
"12:01.4": {"hour_of_day": 12, "minute_of_hour": 1,
"minute_of_hour_decimal": 0.4},
"00,4356": {"hour_of_day": 0, "hour_of_day_decimal": 0.4356},
"00.4356": {"hour_of_day": 0, "hour_of_day_decimal": 0.4356}
},
"reduced": {
"02:03": {"hour_of_day": 2, "minute_of_hour": 3},
"17": {"hour_of_day": 17}
},
"truncated": {
"-56:12": {"minute_of_hour": 56, "second_of_minute": 12,
"truncated": True},
"-12": {"minute_of_hour": 12, "truncated": True},
"--45": {"second_of_minute": 45, "truncated": True},
"-12:34,45": {"minute_of_hour": 12, "second_of_minute": 34,
"second_of_minute_decimal": 0.45,
"truncated": True},
"-12:34.45": {"minute_of_hour": 12, "second_of_minute": 34,
"second_of_minute_decimal": 0.45,
"truncated": True},
"-34,2": {"minute_of_hour": 34, "minute_of_hour_decimal": 0.2,
"truncated": True},
"-34.2": {"minute_of_hour": 34, "minute_of_hour_decimal": 0.2,
"truncated": True},
"--59,99": {"second_of_minute": 59,
"second_of_minute_decimal": 0.99,
"truncated": True},
"--59.99": {"second_of_minute": 59,
"second_of_minute_decimal": 0.99,
"truncated": True}
}
}
}
test_time_zone_map = {
"basic": {
"Z": {"time_zone_hour": 0, "time_zone_minute": 0},
"+01": {"time_zone_hour": 1},
"-05": {"time_zone_hour": -5},
"+2301": {"time_zone_hour": 23, "time_zone_minute": 1},
"-1230": {"time_zone_hour": -12, "time_zone_minute": -30}
},
"extended": {
"Z": {"time_zone_hour": 0, "time_zone_minute": 0},
"+01": {"time_zone_hour": 1},
"-05": {"time_zone_hour": -5},
"+23:01": {"time_zone_hour": 23, "time_zone_minute": 1},
"-12:30": {"time_zone_hour": -12, "time_zone_minute": -30}
}
}
format_ok_keys = ["basic", "extended"]
if allow_only_basic:
format_ok_keys = ["basic"]
date_combo_ok_keys = ["complete"]
if allow_truncated:
date_combo_ok_keys = ["complete", "truncated"]
time_combo_ok_keys = ["complete", "reduced"]
time_designator = parser_spec.TIME_DESIGNATOR
for format_type in format_ok_keys:
date_format_tests = test_date_map[format_type]
time_format_tests = test_time_map[format_type]
time_zone_format_tests = test_time_zone_map[format_type]
for date_key in date_format_tests:
if not allow_truncated and date_key == "truncated":
continue
for date_expr, info in date_format_tests[date_key].items():
yield date_expr, info
for date_key in date_combo_ok_keys:
date_tests = date_format_tests[date_key]
# Add a blank date for time-only testing.
for date_expr, info in date_tests.items():
for time_key in time_combo_ok_keys:
time_items = time_format_tests[time_key].items()
for time_expr, time_info in time_items:
combo_expr = (
date_expr +
time_designator +
time_expr
)
combo_info = {}
for key, value in info.items() + time_info.items():
combo_info[key] = value
yield combo_expr, combo_info
if skip_time_zones:
continue
time_zone_items = time_zone_format_tests.items()
for time_zone_expr, time_zone_info in time_zone_items:
tz_expr = combo_expr + time_zone_expr
tz_info = {}
for key, value in (combo_info.items() +
time_zone_info.items()):
tz_info[key] = value
yield tz_expr, tz_info
if not allow_truncated:
continue
for time_key in time_format_tests:
time_tests = time_format_tests[time_key]
for time_expr, time_info in time_tests.items():
combo_expr = (
time_designator +
time_expr
)
# Add truncated (no date).
combo_info = {"truncated": True}
for key, value in time_info.items():
combo_info[key] = value
yield combo_expr, combo_info
if skip_time_zones:
continue
time_zone_items = time_zone_format_tests.items()
for time_zone_expr, time_zone_info in time_zone_items:
tz_expr = combo_expr + time_zone_expr
tz_info = {}
for key, value in (combo_info.items() +
time_zone_info.items()):
tz_info[key] = value
yield tz_expr, tz_info
def get_timepoint_subtract_tests():
"""Yield tests for subtracting one timepoint from another."""
return [
(
{"year": 44, "month_of_year": 1, "day_of_month": 4,
"hour_of_day": 5, "minute_of_hour": 1, "second_of_minute": 2,
"time_zone_hour": 0, "time_zone_minute": 0},
{"year": 41, "month_of_year": 12, "day_of_month": 2,
"hour_of_day": 4, "minute_of_hour": 23, "second_of_minute": 1,
"time_zone_hour": 3, "time_zone_minute": 20},
"P763DT3H58M1S"
),
(
{"year": 41, "month_of_year": 12, "day_of_month": 2,
"hour_of_day": 4, "minute_of_hour": 23, "second_of_minute": 1,
"time_zone_hour": 3, "time_zone_minute": 20},
{"year": 44, "month_of_year": 1, "day_of_month": 4,
"hour_of_day": 5, "minute_of_hour": 1, "second_of_minute": 2,
"time_zone_hour": 0, "time_zone_minute": 0},
"-P763DT3H58M1S"
),
(
{"year": 1991, "month_of_year": 6, "day_of_month": 3,
"hour_of_day": 0, "time_zone_hour": 0, "time_zone_minute": 0},
{"year": 1991, "month_of_year": 5, "day_of_month": 4,
"hour_of_day": 5, "time_zone_hour": 0, "time_zone_minute": 0},
"P29DT19H"
),
(
{"year": 1991, "month_of_year": 5, "day_of_month": 4,
"hour_of_day": 5, "time_zone_hour": 0, "time_zone_minute": 0},
{"year": 1991, "month_of_year": 6, "day_of_month": 3,
"hour_of_day": 0, "time_zone_hour": 0, "time_zone_minute": 0},
"-P29DT19H"
),
(
{"year": 2014, "month_of_year": 1, "day_of_month": 1,
"hour_of_day": 0, "time_zone_hour": 0, "time_zone_minute": 0},
{"year": 2013, "month_of_year": 12, "day_of_month": 31,
"hour_of_day": 23, "time_zone_hour": 0, "time_zone_minute": 0},
"PT1H"
),
(
{"year": 2013, "month_of_year": 12, "day_of_month": 31,
"hour_of_day": 23, "time_zone_hour": 0, "time_zone_minute": 0},
{"year": 2014, "month_of_year": 1, "day_of_month": 1,
"hour_of_day": 0, "time_zone_hour": 0, "time_zone_minute": 0},
"-PT1H"
),
(
{"year": 2014, "month_of_year": 1, "day_of_month": 1,
"hour_of_day": 0, "time_zone_hour": 0, "time_zone_minute": 0},
{"year": 2013, "month_of_year": 12, "day_of_month": 1,
"hour_of_day": 0, "time_zone_hour": 0, "time_zone_minute": 0},
"P31D"
),
(
{"year": 2013, "month_of_year": 12, "day_of_month": 1,
"hour_of_day": 0, "time_zone_hour": 0, "time_zone_minute": 0},
{"year": 2014, "month_of_year": 1, "day_of_month": 1,
"hour_of_day": 0, "time_zone_hour": 0, "time_zone_minute": 0},
"-P31D"
),
(
{"year": 44, "month_of_year": 1, "day_of_month": 4,
"hour_of_day": 5, "minute_of_hour": 1, "second_of_minute": 2,
"time_zone_hour": 0, "time_zone_minute": 0},
{"year": 41, "month_of_year": 12, "day_of_month": 2,
"hour_of_day": 13, "minute_of_hour": 23, "second_of_minute": 1,
"time_zone_hour": 3, "time_zone_minute": 20},
"P762DT18H58M1S"
),
(
{"year": 41, "month_of_year": 12, "day_of_month": 2,
"hour_of_day": 13, "minute_of_hour": 23, "second_of_minute": 1,
"time_zone_hour": 3, "time_zone_minute": 20},
{"year": 44, "month_of_year": 1, "day_of_month": 4,
"hour_of_day": 5, "minute_of_hour": 1, "second_of_minute": 2,
"time_zone_hour": 0, "time_zone_minute": 0},
"-P762DT18H58M1S"
),
]
def get_timerecurrence_expansion_tests():
"""Return test expansion expressions for data.TimeRecurrence."""
return [
("R3/1001-W01-1T00:00:00Z/1002-W52-6T00:00:00-05:30",
["1001-W01-1T00:00:00Z", "1001-W53-3T14:45:00Z",
"1002-W52-6T05:30:00Z"]),
("R3/P700D/1957-W01-1T06,5Z",
["1953-W10-1T06,5Z", "1955-W05-1T06,5Z", "1957-W01-1T06,5Z"]),
("R3/P5DT2,5S/1001-W11-1T00:30:02,5-02:00",
["1001-W09-5T00:29:57,5-02:00", "1001-W10-3T00:30:00-02:00",
"1001-W11-1T00:30:02,5-02:00"]),
("R/+000001W457T060000Z/P4M1D",
["+000001-W45-7T06:00:00Z", "+000002-W11-2T06:00:00Z",
"+000002-W28-6T06:00:00Z"]),
("R/P4M1DT6M/+002302-002T06:00:00-00:30",
["+002302-002T06:00:00-00:30", "+002301-244T05:54:00-00:30",
"+002301-120T05:48:00-00:30"]),
("R/P30Y2DT15H/-099994-02-12T17:00:00-02:30",
["-099994-02-12T17:00:00-02:30", "-100024-02-10T02:00:00-02:30",
"-100054-02-07T11:00:00-02:30"]),
("R/-100024-02-10T17:00:00-12:30/PT5.5H",
["-100024-02-10T17:00:00-12:30", "-100024-02-10T22:30:00-12:30",
"-100024-02-11T04:00:00-12:30"])
]
def get_timerecurrence_expansion_tests_for_alt_calendar(calendar_mode):
"""Return alternate calendar tests for data.TimeRecurrence."""
if calendar_mode == "360":
return get_timerecurrence_expansion_tests_360()
if calendar_mode == "365":
return get_timerecurrence_expansion_tests_365()
if calendar_mode == "366":
return get_timerecurrence_expansion_tests_366()
def get_timerecurrence_expansion_tests_360():
"""Return test expansion expressions for data.TimeRecurrence."""
return [
("R13/1984-01-30T00Z/P1M",
["1984-01-30T00:00:00Z", "1984-02-30T00:00:00Z", "1984-03-30T00:00:00Z",
"1984-04-30T00:00:00Z", "1984-05-30T00:00:00Z", "1984-06-30T00:00:00Z",
"1984-07-30T00:00:00Z", "1984-08-30T00:00:00Z", "1984-09-30T00:00:00Z",
"1984-10-30T00:00:00Z", "1984-11-30T00:00:00Z", "1984-12-30T00:00:00Z",
"1985-01-30T00:00:00Z"]),
("R2/1984-01-30T00Z/P1D",
["1984-01-30T00:00:00Z", "1984-02-01T00:00:00Z"]),
("R2/P1D/1984-02-01T00Z",
["1984-01-30T00:00:00Z", "1984-02-01T00:00:00Z"]),
("R2/P1D/1984-01-01T00Z",
["1983-12-30T00:00:00Z", "1984-01-01T00:00:00Z"]),
("R2/1983-12-30T00Z/P1D",
["1983-12-30T00:00:00Z", "1984-01-01T00:00:00Z"]),
("R2/P1D/2005-01-01T00Z",
["2004-12-30T00:00:00Z", "2005-01-01T00:00:00Z"]),
("R2/2003-12-30T00Z/P1D",
["2003-12-30T00:00:00Z", "2004-01-01T00:00:00Z"]),
("R2/P1D/2004-01-01T00Z",
["2003-12-30T00:00:00Z", "2004-01-01T00:00:00Z"]),
("R2/2004-12-30T00Z/P1D",
["2004-12-30T00:00:00Z", "2005-01-01T00:00:00Z"]),
("R3/P1Y/2005-02-30T00Z",
["2003-02-30T00:00:00Z", "2004-02-30T00:00:00Z", "2005-02-30T00:00:00Z"]),
("R3/2003-02-30T00Z/P1Y",
["2003-02-30T00:00:00Z", "2004-02-30T00:00:00Z", "2005-02-30T00:00:00Z"]),
]
def get_timerecurrence_expansion_tests_365():
"""Return test expansion expressions for data.TimeRecurrence."""
return [
("R13/1984-01-30T00Z/P1M",
["1984-01-30T00:00:00Z", "1984-02-28T00:00:00Z",
"1984-03-28T00:00:00Z", "1984-04-28T00:00:00Z",
"1984-05-28T00:00:00Z", "1984-06-28T00:00:00Z",
"1984-07-28T00:00:00Z", "1984-08-28T00:00:00Z",
"1984-09-28T00:00:00Z", "1984-10-28T00:00:00Z",
"1984-11-28T00:00:00Z", "1984-12-28T00:00:00Z",
"1985-01-28T00:00:00Z"]),
("R13/1985-01-30T00Z/P1M",
["1985-01-30T00:00:00Z", "1985-02-28T00:00:00Z",
"1985-03-28T00:00:00Z", "1985-04-28T00:00:00Z",
"1985-05-28T00:00:00Z", "1985-06-28T00:00:00Z",
"1985-07-28T00:00:00Z", "1985-08-28T00:00:00Z",
"1985-09-28T00:00:00Z", "1985-10-28T00:00:00Z",
"1985-11-28T00:00:00Z", "1985-12-28T00:00:00Z",
"1986-01-28T00:00:00Z"]),
("R2/1984-01-30T00Z/P1D",
["1984-01-30T00:00:00Z", "1984-01-31T00:00:00Z"]),
("R2/P1D/1984-02-01T00Z",
["1984-01-31T00:00:00Z", "1984-02-01T00:00:00Z"]),
("R2/P1D/1984-01-01T00Z",
["1983-12-31T00:00:00Z", "1984-01-01T00:00:00Z"]),
("R2/1983-12-30T00Z/P1D",
["1983-12-30T00:00:00Z", "1983-12-31T00:00:00Z"]),
("R2/2000-02-28T00Z/P1Y1D",
["2000-02-28T00:00:00Z", "2001-03-01T00:00:00Z"]),
("R2/2001-02-28T00Z/P1Y1D",
["2001-02-28T00:00:00Z", "2002-03-01T00:00:00Z"]),
]
def get_timerecurrence_expansion_tests_366():
"""Return test expansion expressions for data.TimeRecurrence."""
return [
("R13/1984-01-30T00Z/P1M",
["1984-01-30T00:00:00Z", "1984-02-29T00:00:00Z",
"1984-03-29T00:00:00Z", "1984-04-29T00:00:00Z",
"1984-05-29T00:00:00Z", "1984-06-29T00:00:00Z",
"1984-07-29T00:00:00Z", "1984-08-29T00:00:00Z",
"1984-09-29T00:00:00Z", "1984-10-29T00:00:00Z",
"1984-11-29T00:00:00Z", "1984-12-29T00:00:00Z",
"1985-01-29T00:00:00Z"]),
("R13/1985-01-30T00Z/P1M",
["1985-01-30T00:00:00Z", "1985-02-29T00:00:00Z",
"1985-03-29T00:00:00Z", "1985-04-29T00:00:00Z",
"1985-05-29T00:00:00Z", "1985-06-29T00:00:00Z",
"1985-07-29T00:00:00Z", "1985-08-29T00:00:00Z",
"1985-09-29T00:00:00Z", "1985-10-29T00:00:00Z",
"1985-11-29T00:00:00Z", "1985-12-29T00:00:00Z",
"1986-01-29T00:00:00Z"]),
("R2/1984-01-30T00Z/P1D",
["1984-01-30T00:00:00Z", "1984-01-31T00:00:00Z"]),
("R2/P1D/1984-02-01T00Z",
["1984-01-31T00:00:00Z", "1984-02-01T00:00:00Z"]),
("R2/P1D/1984-01-01T00Z",
["1983-12-31T00:00:00Z", "1984-01-01T00:00:00Z"]),
("R2/1983-12-30T00Z/P1D",
["1983-12-30T00:00:00Z", "1983-12-31T00:00:00Z"]),
("R2/1999-02-28T00Z/P1Y1D",
["1999-02-28T00:00:00Z", "2000-02-29T00:00:00Z"]),
("R2/2000-02-28T00Z/P1Y1D",
["2000-02-28T00:00:00Z", "2001-02-29T00:00:00Z"]),
("R2/2001-02-28T00Z/P1Y1D",
["2001-02-28T00:00:00Z", "2002-02-29T00:00:00Z"]),
]
def get_timerecurrence_membership_tests():
"""Return test membership expressions for data.TimeRecurrence."""
return [
("R3/1001-W01-1T00:00:00Z/1002-W52-6T00:00:00-05:30",
[("1001-W01-1T00:00:00Z", True),
("1000-12-29T00:00:00Z", True),
("0901-07-08T12:45:00Z", False),
("1001-W01-2T00:00:00Z", False),
("1001-W53-3T14:45:00Z", True),
("1002-W52-6T05:30:00Z", True),
("1002-W52-6T03:30:00-02:00", True),
("1002-W52-6T07:30:00+02:00", True),
("10030101T00Z", False)]),
("R3/P700D/1957-W01-1T06,5Z",
[("1953-W10-1T06,5Z", True),
("1953-03-02T06,5Z", True),
("1952-03-02T06,5Z", False),
("1955-W05-1T06,5Z", True),
("1957-W01-1T06,5Z", True),
("1956-366T06,5Z", True),
("1956-356T04,5Z", False)]),
]
def get_timerecurrenceparser_tests():
"""Yield tests for the time recurrence parser."""
test_points = ["-100024-02-10T17:00:00-12:30",
"+000001-W45-7T06Z", "1001W011",
"1955W051T06,5Z", "1999-06-01",
"1967-056", "+5002000830T235902,345",
"1765-W04"]
for reps in [None, 1, 2, 3, 10]:
if reps is None:
reps_string = ""
else:
reps_string = str(reps)
point_parser = parsers.TimePointParser()
duration_parser = parsers.DurationParser()
for point_expr in test_points:
duration_tests = get_timedurationparser_tests()
start_point = point_parser.parse(point_expr)
for duration_expr, duration_result in duration_tests:
if duration_expr.startswith("-P"):
# Our negative durations are not supported in recurrences.
continue
duration = duration_parser.parse(duration_expr)
end_point = start_point + duration
if reps is not None:
expr_1 = ("R" + reps_string + "/" + str(start_point) +
"/" + str(end_point))
yield expr_1, {"repetitions": reps,
"start_point": start_point,
"end_point": end_point}
expr_3 = ("R" + reps_string + "/" + str(start_point) +
"/" + str(duration))
yield expr_3, {"repetitions": reps,
"start_point": start_point,
"duration": duration}
expr_4 = ("R" + reps_string + "/" + str(duration) + "/" +
str(end_point))
yield expr_4, {"repetitions": reps, "duration": duration,
"end_point": end_point}
def get_local_time_zone_hours_minutes():
"""Provide an independent method of getting the local time zone."""
import datetime
utc_offset = datetime.datetime.now() - datetime.datetime.utcnow()
utc_offset_hours = (utc_offset.seconds + 1800) // 3600
utc_offset_minutes = (
((utc_offset.seconds - 3600 * utc_offset_hours) + 30) // 60
)
return utc_offset_hours, utc_offset_minutes
class TestSuite(unittest.TestCase):
"""Test the functionality of parsers and data model manipulation."""
def assertEqual(self, test, control, source=None):
"""Override the assertEqual method to provide more information."""
if source is None:
info = None
else:
info = ("Source %s produced:\n'%s'\nshould be:\n'%s'" %
(source, test, control))
super(TestSuite, self).assertEqual(test, control, info)
def test_days_in_year_range(self):
"""Test the summing-over-days-in-year-range shortcut code."""
for start_year in range(-401, 2):
for end_year in range(start_year, 2):
test_days = data.get_days_in_year_range(
start_year, end_year)
control_days = 0
for year in xrange(start_year, end_year + 1):
control_days += data.get_days_in_year(year)
self.assertEqual(
control_days, test_days, "days in %s to %s" % (
start_year, end_year)
)
def test_timeduration(self):
"""Test the duration class methods."""
for test_props, method, method_args, ctrl_results in (
get_timeduration_tests()):
duration = data.Duration(**test_props)
duration_method = getattr(duration, method)
test_results = duration_method(*method_args)
self.assertEqual(
test_results, ctrl_results,
"%s -> %s(%s)" % (test_props, method, method_args)
)
def test_timeduration_parser(self):
"""Test the duration parsing."""
parser = parsers.DurationParser()
for expression, ctrl_result in get_timedurationparser_tests():
try:
test_result = str(parser.parse(expression))
except parsers.ISO8601SyntaxError:
raise ValueError(
"DurationParser test failed to parse '%s'" %
expression
)
self.assertEqual(test_result, ctrl_result, expression)
def test_timeduration_dumper(self):
"""Test the duration dumping."""
for ctrl_expression, test_props in get_timedurationdumper_tests():
duration = data.Duration(**test_props)
test_expression = str(duration)
self.assertEqual(test_expression, ctrl_expression,
str(test_props))
def test_timepoint(self):
"""Test the time point data model (takes a while)."""
pool = multiprocessing.Pool(processes=4)
pool.map_async(test_timepoint_at_year, range(1801, 2403)).get()
def test_timepoint_plus_float_time_duration_day_of_month_type(self):
"""Test (TimePoint + Duration).day_of_month is an int."""
time_point = data.TimePoint(year=2000) + data.Duration(seconds=1.0)
self.assertEqual(type(time_point.day_of_month), int)
def test_timepoint_subtract(self):
"""Test subtracting one time point from another."""
for test_props1, test_props2, ctrl_string in (
get_timepoint_subtract_tests()):
point1 = data.TimePoint(**test_props1)
point2 = data.TimePoint(**test_props2)
test_string = str(point1 - point2)
self.assertEqual(test_string, ctrl_string,
"%s - %s" % (point1, point2))
def test_timepoint_time_zone(self):
"""Test the time zone handling of timepoint instances."""
year = 2000
month_of_year = 1
day_of_month = 1
utc_offset_hours, utc_offset_minutes = (
get_local_time_zone_hours_minutes()
)
for hour_of_day in range(24):
for minute_of_hour in [0, 30]:
test_dates = [
data.TimePoint(
year=year,
month_of_year=month_of_year,
day_of_month=day_of_month,
hour_of_day=hour_of_day,
minute_of_hour=minute_of_hour
)
]
test_dates.append(test_dates[0].copy())
test_dates.append(test_dates[0].copy())
test_dates.append(test_dates[0].copy())
test_dates[0].set_time_zone_to_utc()
self.assertEqual(test_dates[0].time_zone.hours, 0,
test_dates[0])
self.assertEqual(test_dates[0].time_zone.minutes, 0,
test_dates[0])
test_dates[1].set_time_zone_to_local()
self.assertEqual(test_dates[1].time_zone.hours,
utc_offset_hours, test_dates[1])
self.assertEqual(test_dates[1].time_zone.minutes,
utc_offset_minutes, test_dates[1])
test_dates[2].set_time_zone(
data.TimeZone(hours=-13, minutes=-45))
test_dates[3].set_time_zone(
data.TimeZone(hours=8, minutes=30))
for i in range(len(test_dates)):
i_date_str = str(test_dates[i])
date_no_tz = test_dates[i].copy()
date_no_tz.time_zone = data.TimeZone(hours=0, minutes=0)
# TODO: https://github.com/metomi/isodatetime/issues/34.
if (test_dates[i].time_zone.hours >= 0 or
test_dates[i].time_zone.minutes >= 0):
utc_offset = date_no_tz - test_dates[i]
else:
utc_offset = (test_dates[i] - date_no_tz) * -1
self.assertEqual(utc_offset.hours,
test_dates[i].time_zone.hours,
i_date_str + " utc offset (hrs)")
self.assertEqual(utc_offset.minutes,
test_dates[i].time_zone.minutes,
i_date_str + " utc offset (mins)")
for j in range(len(test_dates)):
j_date_str = str(test_dates[j])
self.assertEqual(
test_dates[i], test_dates[j],
i_date_str + " == " + j_date_str
)
duration = test_dates[j] - test_dates[i]
self.assertEqual(
duration, data.Duration(days=0),
i_date_str + " - " + j_date_str
)
def test_timepoint_dumper(self):
"""Test the dumping of TimePoint instances."""
parser = parsers.TimePointParser(allow_truncated=True,
default_to_unknown_time_zone=True)
dumper = dumpers.TimePointDumper()
for expression, timepoint_kwargs in get_timepointparser_tests(
allow_truncated=True):
ctrl_timepoint = data.TimePoint(**timepoint_kwargs)
try:
test_timepoint = parser.parse(str(ctrl_timepoint))
except parsers.ISO8601SyntaxError as syn_exc:
raise ValueError(
"Parsing failed for the dump of {0}: {1}".format(
expression, syn_exc))
self.assertEqual(test_timepoint,
ctrl_timepoint, expression)
for timepoint_kwargs, format_results in (
get_timepoint_dumper_tests()):
ctrl_timepoint = data.TimePoint(**timepoint_kwargs)
for format_, ctrl_data in format_results:
test_data = dumper.dump(ctrl_timepoint, format_)
self.assertEqual(test_data, ctrl_data, format_)
for timepoint_kwargs, format_exception_results in (
get_timepointdumper_failure_tests()):
ctrl_timepoint = data.TimePoint(**timepoint_kwargs)
for format_, ctrl_exception, num_expanded_year_digits in (
format_exception_results):
dumper = dumpers.TimePointDumper(
num_expanded_year_digits=num_expanded_year_digits)
self.assertRaises(ctrl_exception, dumper.dump,
ctrl_timepoint, format_)
def test_timepoint_parser(self):
"""Test the parsing of date/time expressions."""
# Test unknown time zone assumptions.
parser = parsers.TimePointParser(
allow_truncated=True,
default_to_unknown_time_zone=True)
for expression, timepoint_kwargs in get_timepointparser_tests(
allow_truncated=True):
timepoint_kwargs = copy.deepcopy(timepoint_kwargs)
try:
test_data = str(parser.parse(expression))
except parsers.ISO8601SyntaxError as syn_exc:
raise ValueError("Parsing failed for {0}: {1}".format(
expression, syn_exc))
ctrl_data = str(data.TimePoint(**timepoint_kwargs))
self.assertEqual(test_data, ctrl_data, expression)
ctrl_data = expression
test_data = str(parser.parse(expression, dump_as_parsed=True))
self.assertEqual(test_data, ctrl_data, expression)
# Test local time zone assumptions (the default).
utc_offset_hours, utc_offset_minutes = (
get_local_time_zone_hours_minutes()
)
parser = parsers.TimePointParser(allow_truncated=True)
for expression, timepoint_kwargs in get_timepointparser_tests(
allow_truncated=True, skip_time_zones=True):
timepoint_kwargs = copy.deepcopy(timepoint_kwargs)
try:
test_timepoint = parser.parse(expression)
except parsers.ISO8601SyntaxError as syn_exc:
raise ValueError("Parsing failed for {0}: {1}".format(
expression, syn_exc))
test_data = (test_timepoint.time_zone.hours,
test_timepoint.time_zone.minutes)
ctrl_data = (utc_offset_hours, utc_offset_minutes)
self.assertEqual(test_data, ctrl_data,
"Local time zone for " + expression)
# Test given time zone assumptions.
utc_offset_hours, utc_offset_minutes = (
get_local_time_zone_hours_minutes()
)
given_utc_offset_hours = -2 # This is an arbitrary number!
if given_utc_offset_hours == utc_offset_hours:
# No point testing this twice, change it.
given_utc_offset_hours = -3
given_utc_offset_minutes = -15
given_time_zone_hours_minutes = (
given_utc_offset_hours, given_utc_offset_minutes)
parser = parsers.TimePointParser(
allow_truncated=True,
assumed_time_zone=given_time_zone_hours_minutes
)
for expression, timepoint_kwargs in get_timepointparser_tests(
allow_truncated=True, skip_time_zones=True):
timepoint_kwargs = copy.deepcopy(timepoint_kwargs)
try:
test_timepoint = parser.parse(expression)
except parsers.ISO8601SyntaxError as syn_exc:
raise ValueError("Parsing failed for {0}: {1}".format(
expression, syn_exc))
test_data = (test_timepoint.time_zone.hours,
test_timepoint.time_zone.minutes)
ctrl_data = given_time_zone_hours_minutes
self.assertEqual(test_data, ctrl_data,
"A given time zone for " + expression)
# Test UTC time zone assumptions.
parser = parsers.TimePointParser(
allow_truncated=True,
assumed_time_zone=(0, 0)
)
for expression, timepoint_kwargs in get_timepointparser_tests(
allow_truncated=True, skip_time_zones=True):
timepoint_kwargs = copy.deepcopy(timepoint_kwargs)
try:
test_timepoint = parser.parse(expression)
except parsers.ISO8601SyntaxError as syn_exc:
raise ValueError("Parsing failed for {0}: {1}".format(
expression, syn_exc))
test_data = (test_timepoint.time_zone.hours,
test_timepoint.time_zone.minutes)
ctrl_data = (0, 0)
self.assertEqual(test_data, ctrl_data,
"UTC for " + expression)
def test_timepoint_strftime_strptime(self):
"""Test the strftime/strptime for date/time expressions."""
import datetime
parser = parsers.TimePointParser()
parse_tokens = parser_spec.STRFTIME_TRANSLATE_INFO.keys()
parse_tokens.remove("%z") # Don't test datetime's tz handling.
format_string = ""
for i, token in enumerate(parse_tokens):
format_string += token
if i % 2 == 0:
format_string += " "
if i % 3 == 0:
format_string += ":"
if i % 5 == 0:
format_string += "?foobar"
if i % 7 == 0:
format_string += "++("
strftime_string = format_string
strptime_strings = [format_string]
for key in parser_spec.STRPTIME_EXCLUSIVE_GROUP_INFO.keys():
strptime_strings[-1] = strptime_strings[-1].replace(key, "")
strptime_strings.append(format_string)
for values in parser_spec.STRPTIME_EXCLUSIVE_GROUP_INFO.values():
for value in values:
strptime_strings[-1] = strptime_strings[-1].replace(value, "")
ctrl_date = datetime.datetime(2002, 3, 1, 12, 30, 2)
# Test %z dumping.
for sign in [1, -1]:
for hour in range(0, 24):
for minute in range(0, 59):
if hour == 0 and minute == 0 and sign == -1:
# -0000, same as +0000, but invalid.
continue
test_date = data.TimePoint(
year=ctrl_date.year,
month_of_year=ctrl_date.month,
day_of_month=ctrl_date.day,
hour_of_day=ctrl_date.hour,
minute_of_hour=ctrl_date.minute,
second_of_minute=ctrl_date.second,
time_zone_hour=sign * hour,
time_zone_minute=sign * minute
)
ctrl_string = "-" if sign == -1 else "+"
ctrl_string += "%02d%02d" % (hour, minute)
self.assertEqual(test_date.strftime("%z"),
ctrl_string,
"%z for " + str(test_date))
test_date = data.TimePoint(
year=ctrl_date.year,
month_of_year=ctrl_date.month,
day_of_month=ctrl_date.day,
hour_of_day=ctrl_date.hour,
minute_of_hour=ctrl_date.minute,
second_of_minute=ctrl_date.second
)
for test_date in [test_date, test_date.copy().to_week_date(),
test_date.copy().to_ordinal_date()]:
ctrl_data = ctrl_date.strftime(strftime_string)
test_data = test_date.strftime(strftime_string)
self.assertEqual(test_data, ctrl_data, strftime_string)
for strptime_string in strptime_strings:
ctrl_dump = ctrl_date.strftime(strptime_string)
test_dump = test_date.strftime(strptime_string)
self.assertEqual(test_dump, ctrl_dump, strptime_string)
if "%s" in strptime_string:
# The datetime library can't handle this for strptime!
ctrl_data = ctrl_date
else:
ctrl_data = datetime.datetime.strptime(
ctrl_dump, strptime_string)
test_data = parser.strptime(test_dump, strptime_string)
ctrl_data = (
ctrl_data.year, ctrl_data.month, ctrl_data.day,
ctrl_data.hour, ctrl_data.minute, ctrl_data.second
)
test_data = tuple(list(test_data.get_calendar_date()) +
list(test_data.get_hour_minute_second()))
if "%y" in strptime_string:
# %y is the decadal year (00 to 99) within a century.
# The datetime library, for some reason, sets a default
# century of '2000' - so nuke this extra information.
ctrl_data = tuple([ctrl_data[0] % 100] +
list(ctrl_data[1:]))
self.assertEqual(test_data, ctrl_data, test_dump + "\n" +
strptime_string)
def test_timerecurrence_alt_calendars(self):
"""Test recurring date/time series for alternate calendars."""
for calendar_mode in ["360", "365", "366"]:
data.CALENDAR.set_mode(calendar_mode + "day")
self.assertEqual(
data.CALENDAR.mode,
getattr(data.Calendar, "MODE_%s" % calendar_mode)
)
parser = parsers.TimeRecurrenceParser()
tests = get_timerecurrence_expansion_tests_for_alt_calendar(
calendar_mode)
for expression, ctrl_results in tests:
try:
test_recurrence = parser.parse(expression)
except parsers.ISO8601SyntaxError:
raise ValueError(
"TimeRecurrenceParser test failed to parse '%s'" %
expression
)
test_results = []
for i, time_point in enumerate(test_recurrence):
test_results.append(str(time_point))
self.assertEqual(test_results, ctrl_results,
expression + "(%s)" % calendar_mode)
data.CALENDAR.set_mode()
self.assertEqual(data.CALENDAR.mode,
data.Calendar.MODE_GREGORIAN)
def test_timerecurrence(self):
"""Test the recurring date/time series data model."""
parser = parsers.TimeRecurrenceParser()
for expression, ctrl_results in get_timerecurrence_expansion_tests():
try:
test_recurrence = parser.parse(expression)
except parsers.ISO8601SyntaxError:
raise ValueError(
"TimeRecurrenceParser test failed to parse '%s'" %
expression
)
test_results = []
for i, time_point in enumerate(test_recurrence):
if i > 2:
break
test_results.append(str(time_point))
self.assertEqual(test_results, ctrl_results, expression)
if test_recurrence.start_point is None:
forward_method = test_recurrence.get_prev
backward_method = test_recurrence.get_next
else:
forward_method = test_recurrence.get_next
backward_method = test_recurrence.get_prev
test_points = [test_recurrence[0]]
test_points.append(forward_method(test_points[-1]))
test_points.append(forward_method(test_points[-1]))
test_results = [str(point) for point in test_points]
self.assertEqual(test_results, ctrl_results, expression)
if test_recurrence[2] is not None:
test_points = [test_recurrence[2]]
test_points.append(backward_method(test_points[-1]))
test_points.append(backward_method(test_points[-1]))
test_points.append(backward_method(test_points[-1]))
self.assertEqual(test_points[3], None, expression)
test_points.pop(3)
test_points.reverse()
test_results = [str(point) for point in test_points]
self.assertEqual(test_results, ctrl_results, expression)
for expression, results in get_timerecurrence_membership_tests():
try:
test_recurrence = parser.parse(expression)
except parsers.ISO8601SyntaxError:
raise ValueError(
"TimeRecurrenceParser test failed to parse '%s'" %
expression
)
for timepoint_expression, ctrl_is_member in results:
timepoint = parsers.parse_timepoint_expression(
timepoint_expression)
test_is_member = test_recurrence.get_is_valid(timepoint)
self.assertEqual(test_is_member, ctrl_is_member,
timepoint_expression + " in " + expression)
def test_timerecurrence_parser(self):
"""Test the recurring date/time series parsing."""
parser = parsers.TimeRecurrenceParser()
for expression, test_info in get_timerecurrenceparser_tests():
try:
test_data = str(parser.parse(expression))
except parsers.ISO8601SyntaxError:
raise ValueError("Parsing failed for %s" % expression)
ctrl_data = str(data.TimeRecurrence(**test_info))
self.assertEqual(test_data, ctrl_data, expression)
def assert_equal(data1, data2):
"""A function-level equivalent of the unittest method."""
assert data1 == data2
def test_timepoint_at_year(test_year):
"""Test the TimePoint and Calendar data model over a given year."""
import datetime
import random
my_date = datetime.datetime(test_year, 1, 1)
stop_date = datetime.datetime(test_year + 1, 1, 1)
test_duration_attributes = [
("weeks", 110),
("days", 770),
("hours", 770*24),
("minutes", 770 * 24 * 60),
("seconds", 770 * 24 * 60 * 60)
]
while my_date <= stop_date:
ctrl_data = my_date.isocalendar()
test_date = data.TimePoint(
year=my_date.year,
month_of_year=my_date.month,
day_of_month=my_date.day
)
test_week_date = test_date.to_week_date()
test_data = test_week_date.get_week_date()
assert_equal(test_data, ctrl_data)
ctrl_data = (my_date.year, my_date.month, my_date.day)
test_data = test_week_date.get_calendar_date()
assert_equal(test_data, ctrl_data)
ctrl_data = my_date.toordinal()
year, day_of_year = test_date.get_ordinal_date()
test_data = day_of_year
test_data += data.get_days_since_1_ad(year - 1)
assert_equal(test_data, ctrl_data)
for attribute, attr_max in test_duration_attributes:
delta_attr = random.randrange(0, attr_max)
kwargs = {attribute: delta_attr}
ctrl_data = my_date + datetime.timedelta(**kwargs)
ctrl_data = (ctrl_data.year, ctrl_data.month, ctrl_data.day)
test_data = (
test_date + data.Duration(
**kwargs)).get_calendar_date()
assert_equal(test_data, ctrl_data)
ctrl_data = (my_date - datetime.timedelta(**kwargs))
ctrl_data = (ctrl_data.year, ctrl_data.month, ctrl_data.day)
test_data = (
test_date - data.Duration(
**kwargs)).get_calendar_date()
assert_equal(test_data, ctrl_data)
kwargs = {}
for attribute, attr_max in test_duration_attributes:
delta_attr = random.randrange(0, attr_max)
kwargs[attribute] = delta_attr
test_date_minus = (
test_date - data.Duration(**kwargs))
test_data = test_date - test_date_minus
ctrl_data = data.Duration(**kwargs)
assert_equal(test_data, ctrl_data)
test_data = (test_date_minus + (test_date - test_date_minus))
ctrl_data = test_date
assert_equal(test_data, ctrl_data)
test_data = (test_date_minus + data.Duration(**kwargs))
ctrl_data = test_date
assert_equal(test_data, ctrl_data)
ctrl_data = (my_date + datetime.timedelta(minutes=450) +
datetime.timedelta(hours=5) -
datetime.timedelta(seconds=500, weeks=5))
ctrl_data = [(ctrl_data.year, ctrl_data.month, ctrl_data.day),
(ctrl_data.hour, ctrl_data.minute, ctrl_data.second)]
test_data = (
test_date + data.Duration(minutes=450) +
data.Duration(hours=5) -
data.Duration(weeks=5, seconds=500)
)
test_data = [test_data.get_calendar_date(),
test_data.get_hour_minute_second()]
assert_equal(test_data, ctrl_data)
timedelta = datetime.timedelta(days=1)
my_date += timedelta
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(TestSuite)
unittest.TextTestRunner(verbosity=2).run(suite)
|
savoirfairelinux/account-financial-tools
|
refs/heads/7.0
|
account_tax_update/model/__init__.py
|
46
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2012 Therp BV (<http://therp.nl>).
# This module copyright (C) 2013 Camptocamp (<http://www.camptocamp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import update_tax_config
from . import select_taxes
from . import account_tax
|
kyleabeauchamp/fah-projects
|
refs/heads/master
|
code/analysis/strip_water_siegetank.py
|
1
|
import numpy as np
import os
import glob
import mdtraj as md
min_traj_length = 100
trj0 = md.load("system.subset.pdb")
filenames = glob.glob("full_Trajectories/*.h5")
for in_filename in filenames:
print(in_filename)
out_filename = os.path.join("./Trajectories/", os.path.split(in_filename)[1])
trj = md.load(in_filename, atom_indices=np.arange(trj0.n_atoms))
if len(trj) > min_traj_length:
trj.save(out_filename)
|
bpsinc-native/src_third_party_webpagereplay
|
refs/heads/master
|
third_party/nbhttp/push_tcp.py
|
9
|
#!/usr/bin/env python
import traceback
__copyright__ = """\
Copyright (c) 2008-2009 Mark Nottingham
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
"""
push-based asynchronous TCP
This is a generic library for building event-based / asynchronous
TCP servers and clients.
By default, it uses the asyncore library included with Python.
However, if the pyevent library
<http://www.monkey.org/~dugsong/pyevent/> is available, it will
use that, offering higher concurrency and, perhaps, performance.
It uses a push model; i.e., the network connection pushes data to
you (using a callback), and you push data to the network connection
(using a direct method invocation).
*** Building Clients
To connect to a server, use create_client;
> host = 'www.example.com'
> port = '80'
> push_tcp.create_client(host, port, conn_handler, error_handler)
conn_handler will be called with the tcp_conn as the argument
when the connection is made. See "Working with Connections"
below for details.
error_handler will be called if the connection can't be made for some reason.
> def error_handler(host, port, reason):
> print "can't connect to %s:%s: %s" % (host, port, reason)
*** Building Servers
To start listening, use create_server;
> server = push_tcp.create_server(host, port, conn_handler)
conn_handler is called every time a new client connects; see
"Working with Connections" below for details.
The server object itself keeps track of all of the open connections, and
can be used to do things like idle connection management, etc.
*** Working with Connections
Every time a new connection is established -- whether as a client
or as a server -- the conn_handler given is called with tcp_conn
as its argument;
> def conn_handler(tcp_conn):
> print "connected to %s:%s" % tcp_conn.host, tcp_conn.port
> return read_cb, close_cb, pause_cb
It must return a (read_cb, close_cb, pause_cb) tuple.
read_cb will be called every time incoming data is available from
the connection;
> def read_cb(data):
> print "got some data:", data
When you want to write to the connection, just write to it:
> tcp_conn.write(data)
If you want to close the connection from your side, just call close:
> tcp_conn.close()
Note that this will flush any data already written.
If the other side closes the connection, close_cb will be called;
> def close_cb():
> print "oops, they don't like us any more..."
If you write too much data to the connection and the buffers fill up,
pause_cb will be called with True to tell you to stop sending data
temporarily;
> def pause_cb(paused):
> if paused:
> # stop sending data
> else:
> # it's OK to start again
Note that this is advisory; if you ignore it, the data will still be
buffered, but the buffer will grow.
Likewise, if you want to pause the connection because your buffers
are full, call pause;
> tcp_conn.pause(True)
but don't forget to tell it when it's OK to send data again;
> tcp_conn.pause(False)
*** Timed Events
It's often useful to schedule an event to be run some time in the future;
> push_tcp.schedule(10, cb, "foo")
This example will schedule the function 'cb' to be called with the argument
"foo" ten seconds in the future.
*** Running the loop
In all cases (clients, servers, and timed events), you'll need to start
the event loop before anything actually happens;
> push_tcp.run()
To stop it, just stop it;
> push_tcp.stop()
"""
__author__ = "Mark Nottingham <mnot@mnot.net>"
import sys
import socket
import ssl
import errno
import asyncore
import time
import bisect
import logging
import select
try:
import event # http://www.monkey.org/~dugsong/pyevent/
except ImportError:
event = None
class _TcpConnection(asyncore.dispatcher):
"Base class for a TCP connection."
write_bufsize = 16
read_bufsize = 1024 * 4
def __init__(self,
sock,
is_server,
host,
port,
use_ssl,
certfile,
keyfile,
connect_error_handler=None):
self.use_ssl = use_ssl
self.socket = sock
if is_server:
if self.use_ssl:
try:
self.socket = ssl.wrap_socket(sock,
server_side=True,
certfile=certfile,
keyfile=keyfile,
do_handshake_on_connect=False)
self.socket.do_handshake()
except ssl.SSLError, err:
if err.args[0] == ssl.SSL_ERROR_WANT_READ:
select.select([self.socket], [], [])
elif err.args[0] == ssl.SSL_ERROR_WANT_WRITE:
select.select([], [self.socket], [])
else:
raise
self.host = host
self.port = port
self.connect_error_handler = connect_error_handler
self.read_cb = None
self.close_cb = None
self._close_cb_called = False
self.pause_cb = None
self.tcp_connected = True # always handed a connected socket (we assume)
self._paused = False # TODO: should be paused by default
self._closing = False
self._write_buffer = []
if event:
self._revent = event.read(self.socket, self.handle_read)
self._wevent = event.write(self.socket, self.handle_write)
else: # asyncore
asyncore.dispatcher.__init__(self, self.socket)
def handle_read(self):
"""
The connection has data read for reading; call read_cb
if appropriate.
"""
# We want to read as much data as we can, loop here until we get EAGAIN
while True:
try:
data = self.recv(self.read_bufsize)
except socket.error, why:
if why[0] in [errno.EBADF, errno.ECONNRESET, errno.EPIPE, errno.ETIMEDOUT]:
self.conn_closed()
return
elif why[0] in [errno.ECONNREFUSED, errno.ENETUNREACH] and self.connect_error_handler:
self.tcp_connected = False
self.connect_error_handler(why[0])
return
elif why[0] in [errno.EAGAIN]:
return
else:
raise
if data == "":
self.conn_closed()
return
else:
self.read_cb(data)
if event:
if self.read_cb and self.tcp_connected and not self._paused:
return self._revent
return
def handle_write(self):
"The connection is ready for writing; write any buffered data."
try:
# This write could be more efficient and coalesce multiple elements
# of the _write_buffer into a single write. However, the stock
# ssl library with python needs us to pass the same buffer back
# after a socket.send() returns 0 bytes. To fix this, we need
# to use the SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER, but this can
# only be done on the context in the ssl.c code. So, we work
# around this problem by not coalescing buffers. Repeated calls
# to handle_write after SSL errors always hand the same buffer
# to the SSL library, and it works.
while len(self._write_buffer):
data = self._write_buffer[0]
sent = self.socket.send(self._write_buffer[0])
if sent == len(self._write_buffer[0]):
self._write_buffer = self._write_buffer[1:]
else:
# Only did a partial write.
self._write_buffer[0] = self._write_buffer[0][sent:]
except ssl.SSLError, err:
logging.error(str(self.socket) + "SSL Write error: " + str(err))
if err.args[0] == ssl.SSL_ERROR_WANT_READ:
select.select([self.socket], [], [])
elif err.args[0] == ssl.SSL_ERROR_WANT_WRITE:
select.select([], [self.socket], [])
else:
raise
except socket.error, why:
if why[0] in [errno.EBADF, errno.ECONNRESET, errno.EPIPE, errno.ETIMEDOUT]:
self.conn_closed()
return
elif why[0] in [errno.ECONNREFUSED, errno.ENETUNREACH] and \
self.connect_error_handler:
self.tcp_connected = False
self.connect_error_handler(why[0])
return
elif why[0] in [errno.EAGAIN]:
pass
else:
raise
if self.pause_cb and len(self._write_buffer) < self.write_bufsize:
self.pause_cb(False)
if self._closing:
self.close()
if event:
if self.tcp_connected and (len(self._write_buffer) > 0 or self._closing):
return self._wevent
def conn_closed(self):
"""
The connection has been closed by the other side. Do local cleanup
and then call close_cb.
"""
self.socket.close()
self.tcp_connected = False
if self._close_cb_called:
return
elif self.close_cb:
self._close_cb_called = True
self.close_cb()
else:
# uncomfortable race condition here, so we try again.
# not great, but ok for now.
schedule(1, self.conn_closed)
handle_close = conn_closed # for asyncore
def write(self, data):
"Write data to the connection."
# assert not self._paused
# For SSL we want to write small chunks so that we don't end up with
# multi-packet spans of data. Multi-packet spans leads to increased
# latency because all packets must be received before any of the
# packets can be delivered to the application layer.
use_small_chunks = True
if use_small_chunks:
data_length = len(data)
start_pos = 0
while data_length > 0:
chunk_length = min(data_length, 1460)
self._write_buffer.append(data[start_pos:start_pos + chunk_length])
start_pos += chunk_length
data_length -= chunk_length
else:
self._write_buffer.append(data)
if self.pause_cb and len(self._write_buffer) > self.write_bufsize:
self.pause_cb(True)
if event:
if not self._wevent.pending():
self._wevent.add()
def pause(self, paused):
"""
Temporarily stop/start reading from the connection and pushing
it to the app.
"""
if event:
if paused:
if self._revent.pending():
self._revent.delete()
else:
if not self._revent.pending():
self._revent.add()
self._paused = paused
def close(self):
"Flush buffered data (if any) and close the connection."
self.pause(True)
if len(self._write_buffer) > 0:
self._closing = True
else:
self.socket.close()
self.tcp_connected = False
def readable(self):
"asyncore-specific readable method"
if isinstance(self.socket, ssl.SSLSocket):
while self.socket.pending() > 0:
self.handle_read_event()
return self.read_cb and self.tcp_connected and not self._paused
def writable(self):
"asyncore-specific writable method"
return self.tcp_connected and \
(len(self._write_buffer) > 0 or self._closing)
def handle_error(self):
"asyncore-specific error method"
if self.use_ssl:
err = sys.exc_info()
if issubclass(err[0], socket.error):
self.connect_error_handler(err[0])
else:
raise
else:
# Treat the error as a connection closed.
t, err, tb = sys.exc_info()
logging.error("TCP error!" + str(err))
self.conn_closed()
class create_server(asyncore.dispatcher):
"An asynchrnous TCP server."
def __init__(self, host, port, use_ssl, certfile, keyfile, conn_handler):
self.host = host
self.port = port
self.use_ssl = use_ssl
self.certfile = certfile
self.keyfile = keyfile
self.conn_handler = conn_handler
if event:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setblocking(0)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((host, port))
sock.listen(socket.SOMAXCONN)
event.event(self.handle_accept, handle=sock,
evtype=event.EV_READ|event.EV_PERSIST).add()
else: # asyncore
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind((host, port))
self.listen(socket.SOMAXCONN) # TODO: set SO_SNDBUF, SO_RCVBUF
def handle_accept(self, *args):
if event:
conn, addr = args[1].accept()
else: # asyncore
conn, addr = self.accept()
tcp_conn = _TcpConnection(conn,
True,
self.host,
self.port,
self.use_ssl,
self.certfile,
self.keyfile,
self.handle_error)
tcp_conn.read_cb, tcp_conn.close_cb, tcp_conn.pause_cb = self.conn_handler(tcp_conn)
def handle_error(self, err=None):
#raise AssertionError, "this (%s) should never happen for a server." % err
pass
class create_client(asyncore.dispatcher):
"An asynchronous TCP client."
def __init__(self, host, port, conn_handler, connect_error_handler, timeout=None):
self.host = host
self.port = port
self.conn_handler = conn_handler
self.connect_error_handler = connect_error_handler
self._timeout_ev = None
self._conn_handled = False
self._error_sent = False
# TODO: socket.getaddrinfo(); needs to be non-blocking.
if event:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setblocking(0)
event.write(sock, self.handle_connect, sock).add()
try:
err = sock.connect_ex((host, port)) # FIXME: check for DNS errors, etc.
except socket.error, why:
self.handle_error(why)
return
if err != errno.EINPROGRESS: # FIXME: others?
self.handle_error(err)
else: # asyncore
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.connect((host, port))
except socket.error, why:
self.handle_error(why[0])
if timeout:
to_err = errno.ETIMEDOUT
self._timeout_ev = schedule(timeout, self.handle_error, to_err)
def handle_connect(self, sock=None):
if self._timeout_ev:
self._timeout_ev.delete()
if sock is None: # asyncore
sock = self.socket
tcp_conn = _TcpConnection(sock,
False,
self.host,
self.port,
False, # use_ssl
self.certfile,
self.keyfile,
self.handle_error)
tcp_conn.read_cb, tcp_conn.close_cb, tcp_conn.pause_cb = self.conn_handler(tcp_conn)
def handle_write(self):
pass
def handle_error(self, err=None):
if self._timeout_ev:
self._timeout_ev.delete()
if not self._error_sent:
self._error_sent = True
if err == None:
t, err, tb = sys.exc_info()
self.connect_error_handler(self.host, self.port, err)
# adapted from Medusa
class _AsyncoreLoop:
"Asyncore main loop + event scheduling."
def __init__(self):
self.events = []
self.num_channels = 0
self.max_channels = 0
self.timeout = 0.001 # 1ms
self.granularity = 0.001 # 1ms
self.socket_map = asyncore.socket_map
def run(self):
"Start the loop."
last_event_check = 0
while self.socket_map or self.events:
now = time.time()
if (now - last_event_check) >= self.granularity:
last_event_check = now
for event in self.events:
when, what = event
if now >= when:
self.events.remove(event)
what()
else:
break
# sample the number of channels
n = len(self.socket_map)
self.num_channels = n
if n > self.max_channels:
self.max_channels = n
asyncore.poll(self.timeout)
def stop(self):
"Stop the loop."
self.socket_map = {}
self.events = []
def schedule(self, delta, callback, *args):
"Schedule callable callback to be run in delta seconds with *args."
def cb():
if callback:
callback(*args)
new_event = (time.time() + delta, cb)
events = self.events
bisect.insort(events, new_event)
class event_holder:
def __init__(self):
self._deleted = False
def delete(self):
if not self._deleted:
try:
events.remove(new_event)
self._deleted = True
except ValueError: # already gone
pass
return event_holder()
if event:
schedule = event.timeout
run = event.dispatch
stop = event.abort
else:
_loop = _AsyncoreLoop()
schedule = _loop.schedule
run = _loop.run
stop = _loop.stop
|
procoder317/scikit-learn
|
refs/heads/master
|
sklearn/datasets/tests/test_svmlight_format.py
|
228
|
from bz2 import BZ2File
import gzip
from io import BytesIO
import numpy as np
import os
import shutil
from tempfile import NamedTemporaryFile
from sklearn.externals.six import b
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_in
import sklearn
from sklearn.datasets import (load_svmlight_file, load_svmlight_files,
dump_svmlight_file)
currdir = os.path.dirname(os.path.abspath(__file__))
datafile = os.path.join(currdir, "data", "svmlight_classification.txt")
multifile = os.path.join(currdir, "data", "svmlight_multilabel.txt")
invalidfile = os.path.join(currdir, "data", "svmlight_invalid.txt")
invalidfile2 = os.path.join(currdir, "data", "svmlight_invalid_order.txt")
def test_load_svmlight_file():
X, y = load_svmlight_file(datafile)
# test X's shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 21)
assert_equal(y.shape[0], 6)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (0, 15, 1.5),
(1, 5, 1.0), (1, 12, -3),
(2, 20, 27)):
assert_equal(X[i, j], val)
# tests X's zero values
assert_equal(X[0, 3], 0)
assert_equal(X[0, 5], 0)
assert_equal(X[1, 8], 0)
assert_equal(X[1, 16], 0)
assert_equal(X[2, 18], 0)
# test can change X's values
X[0, 2] *= 2
assert_equal(X[0, 2], 5)
# test y
assert_array_equal(y, [1, 2, 3, 4, 1, 2])
def test_load_svmlight_file_fd():
# test loading from file descriptor
X1, y1 = load_svmlight_file(datafile)
fd = os.open(datafile, os.O_RDONLY)
try:
X2, y2 = load_svmlight_file(fd)
assert_array_equal(X1.data, X2.data)
assert_array_equal(y1, y2)
finally:
os.close(fd)
def test_load_svmlight_file_multilabel():
X, y = load_svmlight_file(multifile, multilabel=True)
assert_equal(y, [(0, 1), (2,), (), (1, 2)])
def test_load_svmlight_files():
X_train, y_train, X_test, y_test = load_svmlight_files([datafile] * 2,
dtype=np.float32)
assert_array_equal(X_train.toarray(), X_test.toarray())
assert_array_equal(y_train, y_test)
assert_equal(X_train.dtype, np.float32)
assert_equal(X_test.dtype, np.float32)
X1, y1, X2, y2, X3, y3 = load_svmlight_files([datafile] * 3,
dtype=np.float64)
assert_equal(X1.dtype, X2.dtype)
assert_equal(X2.dtype, X3.dtype)
assert_equal(X3.dtype, np.float64)
def test_load_svmlight_file_n_features():
X, y = load_svmlight_file(datafile, n_features=22)
# test X'shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 22)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2),
(1, 5, 1.0), (1, 12, -3)):
assert_equal(X[i, j], val)
# 21 features in file
assert_raises(ValueError, load_svmlight_file, datafile, n_features=20)
def test_load_compressed():
X, y = load_svmlight_file(datafile)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, gzip.open(tmp.name, "wb"))
Xgz, ygz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xgz.toarray())
assert_array_equal(y, ygz)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, BZ2File(tmp.name, "wb"))
Xbz, ybz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xbz.toarray())
assert_array_equal(y, ybz)
@raises(ValueError)
def test_load_invalid_file():
load_svmlight_file(invalidfile)
@raises(ValueError)
def test_load_invalid_order_file():
load_svmlight_file(invalidfile2)
@raises(ValueError)
def test_load_zero_based():
f = BytesIO(b("-1 4:1.\n1 0:1\n"))
load_svmlight_file(f, zero_based=False)
def test_load_zero_based_auto():
data1 = b("-1 1:1 2:2 3:3\n")
data2 = b("-1 0:0 1:1\n")
f1 = BytesIO(data1)
X, y = load_svmlight_file(f1, zero_based="auto")
assert_equal(X.shape, (1, 3))
f1 = BytesIO(data1)
f2 = BytesIO(data2)
X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto")
assert_equal(X1.shape, (1, 4))
assert_equal(X2.shape, (1, 4))
def test_load_with_qid():
# load svmfile with qid attribute
data = b("""
3 qid:1 1:0.53 2:0.12
2 qid:1 1:0.13 2:0.1
7 qid:2 1:0.87 2:0.12""")
X, y = load_svmlight_file(BytesIO(data), query_id=False)
assert_array_equal(y, [3, 2, 7])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
res1 = load_svmlight_files([BytesIO(data)], query_id=True)
res2 = load_svmlight_file(BytesIO(data), query_id=True)
for X, y, qid in (res1, res2):
assert_array_equal(y, [3, 2, 7])
assert_array_equal(qid, [1, 1, 2])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
@raises(ValueError)
def test_load_invalid_file2():
load_svmlight_files([datafile, invalidfile, datafile])
@raises(TypeError)
def test_not_a_filename():
# in python 3 integers are valid file opening arguments (taken as unix
# file descriptors)
load_svmlight_file(.42)
@raises(IOError)
def test_invalid_filename():
load_svmlight_file("trou pic nic douille")
def test_dump():
Xs, y = load_svmlight_file(datafile)
Xd = Xs.toarray()
# slicing a csr_matrix can unsort its .indices, so test that we sort
# those correctly
Xsliced = Xs[np.arange(Xs.shape[0])]
for X in (Xs, Xd, Xsliced):
for zero_based in (True, False):
for dtype in [np.float32, np.float64, np.int32]:
f = BytesIO()
# we need to pass a comment to get the version info in;
# LibSVM doesn't grok comments so they're not put in by
# default anymore.
dump_svmlight_file(X.astype(dtype), y, f, comment="test",
zero_based=zero_based)
f.seek(0)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in("scikit-learn %s" % sklearn.__version__, comment)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in(["one", "zero"][zero_based] + "-based", comment)
X2, y2 = load_svmlight_file(f, dtype=dtype,
zero_based=zero_based)
assert_equal(X2.dtype, dtype)
assert_array_equal(X2.sorted_indices().indices, X2.indices)
if dtype == np.float32:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 4)
else:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 15)
assert_array_equal(y, y2)
def test_dump_multilabel():
X = [[1, 0, 3, 0, 5],
[0, 0, 0, 0, 0],
[0, 5, 0, 1, 0]]
y = [[0, 1, 0], [1, 0, 1], [1, 1, 0]]
f = BytesIO()
dump_svmlight_file(X, y, f, multilabel=True)
f.seek(0)
# make sure it dumps multilabel correctly
assert_equal(f.readline(), b("1 0:1 2:3 4:5\n"))
assert_equal(f.readline(), b("0,2 \n"))
assert_equal(f.readline(), b("0,1 1:5 3:1\n"))
def test_dump_concise():
one = 1
two = 2.1
three = 3.01
exact = 1.000000000000001
# loses the last decimal place
almost = 1.0000000000000001
X = [[one, two, three, exact, almost],
[1e9, 2e18, 3e27, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
y = [one, two, three, exact, almost]
f = BytesIO()
dump_svmlight_file(X, y, f)
f.seek(0)
# make sure it's using the most concise format possible
assert_equal(f.readline(),
b("1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n"))
assert_equal(f.readline(), b("2.1 0:1000000000 1:2e+18 2:3e+27\n"))
assert_equal(f.readline(), b("3.01 \n"))
assert_equal(f.readline(), b("1.000000000000001 \n"))
assert_equal(f.readline(), b("1 \n"))
f.seek(0)
# make sure it's correct too :)
X2, y2 = load_svmlight_file(f)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
def test_dump_comment():
X, y = load_svmlight_file(datafile)
X = X.toarray()
f = BytesIO()
ascii_comment = "This is a comment\nspanning multiple lines."
dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
# XXX we have to update this to support Python 3.x
utf8_comment = b("It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc")
f = BytesIO()
assert_raises(UnicodeDecodeError,
dump_svmlight_file, X, y, f, comment=utf8_comment)
unicode_comment = utf8_comment.decode("utf-8")
f = BytesIO()
dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
f = BytesIO()
assert_raises(ValueError,
dump_svmlight_file, X, y, f, comment="I've got a \0.")
def test_dump_invalid():
X, y = load_svmlight_file(datafile)
f = BytesIO()
y2d = [y]
assert_raises(ValueError, dump_svmlight_file, X, y2d, f)
f = BytesIO()
assert_raises(ValueError, dump_svmlight_file, X, y[:-1], f)
def test_dump_query_id():
# test dumping a file with query_id
X, y = load_svmlight_file(datafile)
X = X.toarray()
query_id = np.arange(X.shape[0]) // 2
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True)
f.seek(0)
X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_almost_equal(X, X1.toarray())
assert_array_almost_equal(y, y1)
assert_array_almost_equal(query_id, query_id1)
|
shownomercy/django
|
refs/heads/master
|
django/conf/locale/de_CH/formats.py
|
504
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
from __future__ import unicode_literals
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
# '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006'
]
DATETIME_INPUT_FORMATS = [
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
]
# these are the separators for non-monetary numbers. For monetary numbers,
# the DECIMAL_SEPARATOR is a . (decimal point) and the THOUSAND_SEPARATOR is a
# ' (single quote).
# For details, please refer to http://www.bk.admin.ch/dokumentation/sprachen/04915/05016/index.html?lang=de
# (in German) and the documentation
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
|
IV-GII/Ingenia
|
refs/heads/master
|
Versiones antiguas/proyecto_base/pedidos/urls.py
|
3
|
from django.conf.urls import patterns, url
from pedidos import views
urlpatterns = patterns ('',
url(r'^$',views.index, name='index'),
url(r'^alta_usuario/',views.alta_usuario, name='alta_usuario'),
url(r'^asignar_pedido/',views.asignar_pedido, name='asignar_pedido'),
url(r'^actualizar_pedido/',views.actualizar_pedido, name='actualizar_pedido'),
url(r'^estado_pedido/',views.estado_pedido, name='estado_pedido'),
)
|
ptemplier/ansible
|
refs/heads/devel
|
lib/ansible/modules/monitoring/sensu_subscription.py
|
29
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Anders Ingemann <aim@secoya.dk>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: sensu_subscription
short_description: Manage Sensu subscriptions
version_added: 2.2
description:
- Manage which I(sensu channels) a machine should subscribe to
options:
name:
description:
- The name of the channel
required: true
state:
description:
- Whether the machine should subscribe or unsubscribe from the channel
choices: [ 'present', 'absent' ]
required: false
default: present
path:
description:
- Path to the subscriptions json file
required: false
default: /etc/sensu/conf.d/subscriptions.json
backup:
description:
- Create a backup file (if yes), including the timestamp information so you
- can get the original file back if you somehow clobbered it incorrectly.
choices: [ 'yes', 'no' ]
required: false
default: no
requirements: [ ]
author: Anders Ingemann
'''
RETURN = '''
reasons:
description: the reasons why the moule changed or did not change something
returned: success
type: list
sample: ["channel subscription was absent and state is `present'"]
'''
EXAMPLES = '''
# Subscribe to the nginx channel
- name: subscribe to nginx checks
sensu_subscription: name=nginx
# Unsubscribe from the common checks channel
- name: unsubscribe from common checks
sensu_subscription: name=common state=absent
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
def sensu_subscription(module, path, name, state='present', backup=False):
changed = False
reasons = []
try:
import json
except ImportError:
import simplejson as json
try:
config = json.load(open(path))
except IOError as e:
if e.errno is 2: # File not found, non-fatal
if state == 'absent':
reasons.append('file did not exist and state is `absent\'')
return changed, reasons
config = {}
else:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
except ValueError:
msg = '{path} contains invalid JSON'.format(path=path)
module.fail_json(msg=msg)
if 'client' not in config:
if state == 'absent':
reasons.append('`client\' did not exist and state is `absent\'')
return changed, reasons
config['client'] = {}
changed = True
reasons.append('`client\' did not exist')
if 'subscriptions' not in config['client']:
if state == 'absent':
reasons.append('`client.subscriptions\' did not exist and state is `absent\'')
return changed, reasons
config['client']['subscriptions'] = []
changed = True
reasons.append('`client.subscriptions\' did not exist')
if name not in config['client']['subscriptions']:
if state == 'absent':
reasons.append('channel subscription was absent')
return changed, reasons
config['client']['subscriptions'].append(name)
changed = True
reasons.append('channel subscription was absent and state is `present\'')
else:
if state == 'absent':
config['client']['subscriptions'].remove(name)
changed = True
reasons.append('channel subscription was present and state is `absent\'')
if changed and not module.check_mode:
if backup:
module.backup_local(path)
try:
open(path, 'w').write(json.dumps(config, indent=2) + '\n')
except IOError as e:
module.fail_json(msg='Failed to write to file %s: %s' % (path, to_native(e)),
exception=traceback.format_exc())
return changed, reasons
def main():
arg_spec = {'name': {'type': 'str', 'required': True},
'path': {'type': 'str', 'default': '/etc/sensu/conf.d/subscriptions.json'},
'state': {'type': 'str', 'default': 'present', 'choices': ['present', 'absent']},
'backup': {'type': 'bool', 'default': 'no'},
}
module = AnsibleModule(argument_spec=arg_spec,
supports_check_mode=True)
path = module.params['path']
name = module.params['name']
state = module.params['state']
backup = module.params['backup']
changed, reasons = sensu_subscription(module, path, name, state, backup)
module.exit_json(path=path, name=name, changed=changed, msg='OK', reasons=reasons)
if __name__ == '__main__':
main()
|
hungtt57/matchmaker
|
refs/heads/master
|
lib/python2.7/site-packages/pyasn1/type/constraint.py
|
382
|
#
# ASN.1 subtype constraints classes.
#
# Constraints are relatively rare, but every ASN1 object
# is doing checks all the time for whether they have any
# constraints and whether they are applicable to the object.
#
# What we're going to do is define objects/functions that
# can be called unconditionally if they are present, and that
# are simply not present if there are no constraints.
#
# Original concept and code by Mike C. Fletcher.
#
import sys
from pyasn1.type import error
class AbstractConstraint:
"""Abstract base-class for constraint objects
Constraints should be stored in a simple sequence in the
namespace of their client Asn1Item sub-classes.
"""
def __init__(self, *values):
self._valueMap = {}
self._setValues(values)
self.__hashedValues = None
def __call__(self, value, idx=None):
try:
self._testValue(value, idx)
except error.ValueConstraintError:
raise error.ValueConstraintError(
'%s failed at: \"%s\"' % (self, sys.exc_info()[1])
)
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
', '.join([repr(x) for x in self._values])
)
def __eq__(self, other):
return self is other and True or self._values == other
def __ne__(self, other): return self._values != other
def __lt__(self, other): return self._values < other
def __le__(self, other): return self._values <= other
def __gt__(self, other): return self._values > other
def __ge__(self, other): return self._values >= other
if sys.version_info[0] <= 2:
def __nonzero__(self): return bool(self._values)
else:
def __bool__(self): return bool(self._values)
def __hash__(self):
if self.__hashedValues is None:
self.__hashedValues = hash((self.__class__.__name__, self._values))
return self.__hashedValues
def _setValues(self, values): self._values = values
def _testValue(self, value, idx):
raise error.ValueConstraintError(value)
# Constraints derivation logic
def getValueMap(self): return self._valueMap
def isSuperTypeOf(self, otherConstraint):
return self in otherConstraint.getValueMap() or \
otherConstraint is self or otherConstraint == self
def isSubTypeOf(self, otherConstraint):
return otherConstraint in self._valueMap or \
otherConstraint is self or otherConstraint == self
class SingleValueConstraint(AbstractConstraint):
"""Value must be part of defined values constraint"""
def _testValue(self, value, idx):
# XXX index vals for performance?
if value not in self._values:
raise error.ValueConstraintError(value)
class ContainedSubtypeConstraint(AbstractConstraint):
"""Value must satisfy all of defined set of constraints"""
def _testValue(self, value, idx):
for c in self._values:
c(value, idx)
class ValueRangeConstraint(AbstractConstraint):
"""Value must be within start and stop values (inclusive)"""
def _testValue(self, value, idx):
if value < self.start or value > self.stop:
raise error.ValueConstraintError(value)
def _setValues(self, values):
if len(values) != 2:
raise error.PyAsn1Error(
'%s: bad constraint values' % (self.__class__.__name__,)
)
self.start, self.stop = values
if self.start > self.stop:
raise error.PyAsn1Error(
'%s: screwed constraint values (start > stop): %s > %s' % (
self.__class__.__name__,
self.start, self.stop
)
)
AbstractConstraint._setValues(self, values)
class ValueSizeConstraint(ValueRangeConstraint):
"""len(value) must be within start and stop values (inclusive)"""
def _testValue(self, value, idx):
l = len(value)
if l < self.start or l > self.stop:
raise error.ValueConstraintError(value)
class PermittedAlphabetConstraint(SingleValueConstraint):
def _setValues(self, values):
self._values = ()
for v in values:
self._values = self._values + tuple(v)
def _testValue(self, value, idx):
for v in value:
if v not in self._values:
raise error.ValueConstraintError(value)
# This is a bit kludgy, meaning two op modes within a single constraing
class InnerTypeConstraint(AbstractConstraint):
"""Value must satisfy type and presense constraints"""
def _testValue(self, value, idx):
if self.__singleTypeConstraint:
self.__singleTypeConstraint(value)
elif self.__multipleTypeConstraint:
if idx not in self.__multipleTypeConstraint:
raise error.ValueConstraintError(value)
constraint, status = self.__multipleTypeConstraint[idx]
if status == 'ABSENT': # XXX presense is not checked!
raise error.ValueConstraintError(value)
constraint(value)
def _setValues(self, values):
self.__multipleTypeConstraint = {}
self.__singleTypeConstraint = None
for v in values:
if isinstance(v, tuple):
self.__multipleTypeConstraint[v[0]] = v[1], v[2]
else:
self.__singleTypeConstraint = v
AbstractConstraint._setValues(self, values)
# Boolean ops on constraints
class ConstraintsExclusion(AbstractConstraint):
"""Value must not fit the single constraint"""
def _testValue(self, value, idx):
try:
self._values[0](value, idx)
except error.ValueConstraintError:
return
else:
raise error.ValueConstraintError(value)
def _setValues(self, values):
if len(values) != 1:
raise error.PyAsn1Error('Single constraint expected')
AbstractConstraint._setValues(self, values)
class AbstractConstraintSet(AbstractConstraint):
"""Value must not satisfy the single constraint"""
def __getitem__(self, idx): return self._values[idx]
def __add__(self, value): return self.__class__(self, value)
def __radd__(self, value): return self.__class__(self, value)
def __len__(self): return len(self._values)
# Constraints inclusion in sets
def _setValues(self, values):
self._values = values
for v in values:
self._valueMap[v] = 1
self._valueMap.update(v.getValueMap())
class ConstraintsIntersection(AbstractConstraintSet):
"""Value must satisfy all constraints"""
def _testValue(self, value, idx):
for v in self._values:
v(value, idx)
class ConstraintsUnion(AbstractConstraintSet):
"""Value must satisfy at least one constraint"""
def _testValue(self, value, idx):
for v in self._values:
try:
v(value, idx)
except error.ValueConstraintError:
pass
else:
return
raise error.ValueConstraintError(
'all of %s failed for \"%s\"' % (self._values, value)
)
# XXX
# add tests for type check
|
rhelmer/socorro-lib
|
refs/heads/master
|
socorro/external/es/supersearch.py
|
1
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import datetime
import re
from elasticsearch_dsl import Search, F, Q
from elasticsearch.exceptions import NotFoundError
from socorro.external import (
BadArgumentError,
)
from socorro.external.es.super_search_fields import SuperSearchFields
from socorro.lib import datetimeutil
from socorro.lib.search_common import SearchBase
BAD_INDEX_REGEX = re.compile('\[\[(.*)\] missing\]')
class SuperSearch(SearchBase):
def __init__(self, *args, **kwargs):
self.config = kwargs.get('config')
self.es_context = self.config.elasticsearch.elasticsearch_class(
self.config.elasticsearch
)
self.all_fields = SuperSearchFields(config=self.config).get_fields()
# Create a map to associate a field's name in the database to its
# exposed name (in the results and facets).
self.database_name_to_field_name_map = dict(
(x['in_database_name'], x['name'])
for x in self.all_fields.values()
)
kwargs.update(fields=self.all_fields)
super(SuperSearch, self).__init__(
*args, **kwargs
)
def get_connection(self):
with self.es_context() as conn:
return conn
def generate_list_of_indices(self, from_date, to_date, es_index=None):
"""Return the list of indices to query to access all the crash reports
that were processed between from_date and to_date.
The naming pattern for indices in elasticsearch is configurable, it is
possible to have an index per day, per week, per month...
Parameters:
* from_date datetime object
* to_date datetime object
"""
if es_index is None:
es_index = self.config.elasticsearch_index
indices = []
current_date = from_date
while current_date <= to_date:
index = current_date.strftime(es_index)
# Make sure no index is twice in the list
# (for weekly or monthly indices for example)
if index not in indices:
indices.append(index)
current_date += datetime.timedelta(days=1)
return indices
def get_indices(self, dates):
"""Return the list of indices to use for given dates. """
start_date = None
end_date = None
for date in dates:
if '>' in date.operator:
start_date = date.value
if '<' in date.operator:
end_date = date.value
return self.generate_list_of_indices(start_date, end_date)
def format_field_names(self, hit):
"""Return a hit with each field's database name replaced by its
exposed name. """
new_hit = {}
for field in hit:
new_field = field
if '.' in new_field:
# Remove the prefix ("processed_crash." or "raw_crash.").
new_field = new_field.split('.')[-1]
new_field = self.database_name_to_field_name_map.get(
new_field, new_field
)
new_hit[new_field] = hit[field]
return new_hit
def format_fields(self, hit):
"""Return a well formatted document.
Elasticsearch returns values as lists when using the `fields` option.
This function removes the list when it contains zero or one element.
It also calls `format_field_names` to correct all the field names.
"""
hit = self.format_field_names(hit)
for field in hit:
if isinstance(hit[field], (list, tuple)):
if len(hit[field]) == 0:
hit[field] = None
elif len(hit[field]) == 1:
hit[field] = hit[field][0]
return hit
def format_aggregations(self, aggregations):
"""Return aggregations in a form that looks like facets.
We used to expose the Elasticsearch facets directly. This is thus
needed for backwards compatibility.
"""
aggs = aggregations.to_dict()
for agg in aggs:
for i, row in enumerate(aggs[agg]['buckets']):
aggs[agg]['buckets'][i] = {
'term': row['key'],
'count': row['doc_count'],
}
aggs[agg] = aggs[agg]['buckets']
return aggs
def get(self, **kwargs):
"""Return a list of results and aggregations based on parameters.
The list of accepted parameters (with types and default values) is in
the database and can be accessed with the super_search_fields service.
"""
# Filter parameters and raise potential errors.
params = self.get_parameters(**kwargs)
# Find the indices to use to optimize the elasticsearch query.
indices = self.get_indices(params['date'])
# Create and configure the search object.
search = Search(
using=self.get_connection(),
index=indices,
doc_type=self.config.elasticsearch.elasticsearch_doctype,
)
# Create filters.
filters = None
for field, sub_params in params.items():
sub_filters = None
for param in sub_params:
if param.name.startswith('_'):
if param.name == '_results_offset':
results_from = param.value[0]
elif param.name == '_results_number':
results_number = param.value[0]
# Don't use meta parameters in the query.
continue
field_data = self.all_fields[param.name]
name = '%s.%s' % (
field_data['namespace'],
field_data['in_database_name']
)
if param.data_type in ('date', 'datetime'):
param.value = datetimeutil.date_to_string(param.value)
elif param.data_type == 'enum':
param.value = [x.lower() for x in param.value]
elif param.data_type == 'str' and not param.operator:
param.value = [x.lower() for x in param.value]
args = {}
filter_type = 'term'
filter_value = None
if not param.operator:
# contains one of the terms
if len(param.value) == 1:
val = param.value[0]
if not isinstance(val, basestring) or (
isinstance(val, basestring) and ' ' not in val
):
filter_value = val
# If the term contains white spaces, we want to perform
# a phrase query. Thus we do nothing here and let this
# value be handled later.
else:
filter_type = 'terms'
filter_value = param.value
elif param.operator == '=':
# is exactly
if field_data['has_full_version']:
name = '%s.full' % name
filter_value = param.value
elif param.operator == '>':
# greater than
filter_type = 'range'
filter_value = {
'gt': param.value
}
elif param.operator == '<':
# lower than
filter_type = 'range'
filter_value = {
'lt': param.value
}
elif param.operator == '>=':
# greater than or equal to
filter_type = 'range'
filter_value = {
'gte': param.value
}
elif param.operator == '<=':
# lower than or equal to
filter_type = 'range'
filter_value = {
'lte': param.value
}
elif param.operator == '__null__':
# is null
filter_type = 'missing'
args['field'] = name
if filter_value is not None:
args[name] = filter_value
if args:
if param.operator_not:
new_filter = ~F(filter_type, **args)
else:
new_filter = F(filter_type, **args)
if sub_filters is None:
sub_filters = new_filter
elif param.data_type == 'enum':
sub_filters |= new_filter
else:
sub_filters &= new_filter
continue
# These use a wildcard and thus need to be in a query
# instead of a filter.
operator_wildcards = {
'~': '*%s*', # contains
'$': '%s*', # starts with
'^': '*%s' # ends with
}
if param.operator in operator_wildcards:
if field_data['has_full_version']:
name = '%s.full' % name
query_type = 'wildcard'
args[name] = (
operator_wildcards[param.operator] % param.value
)
elif not param.operator:
# This is a phrase that was passed down.
query_type = 'simple_query_string'
args['query'] = param.value[0]
args['fields'] = [name]
args['default_operator'] = 'and'
if args:
query = Q(query_type, **args)
if param.operator_not:
query = ~query
search = search.query(query)
else:
# If we reach this point, that means the operator is
# not supported, and we should raise an error about that.
raise NotImplementedError(
'Operator %s is not supported' % param.operator
)
if filters is None:
filters = sub_filters
elif sub_filters is not None:
filters &= sub_filters
search = search.filter(filters)
# Pagination.
results_to = results_from + results_number
search = search[results_from:results_to]
# Create facets.
for param in params['_facets']:
for value in param.value:
try:
field_ = self.all_fields[value]
except KeyError:
# That is not a known field, we can't facet on it.
raise BadArgumentError(
value,
msg='Unknown field "%s", cannot facet on it' % value
)
field_name = '%s.%s' % (
field_['namespace'],
field_['in_database_name']
)
if field_['has_full_version']:
# If the param has a full version, that means what matters
# is the full string, and not its individual terms.
field_name += '.full'
search.aggs.bucket(
value,
'terms',
field=field_name,
size=self.config.facets_max_number
)
# Query and compute results.
hits = []
fields = [
'%s.%s' % (x['namespace'], x['in_database_name'])
for x in self.all_fields.values()
if x['is_returned']
]
search = search.fields(*fields)
if params['_return_query'][0].value[0]:
# Return only the JSON query that would be sent to elasticsearch.
return {
'query': search.to_dict(),
'indices': indices,
}
# We call elasticsearch with a computed list of indices, based on
# the date range. However, if that list contains indices that do not
# exist in elasticsearch, an error will be raised. We thus want to
# remove all failing indices until we either have a valid list, or
# an empty list in which case we return no result.
while True:
try:
results = search.execute()
for hit in results:
hits.append(self.format_fields(hit.to_dict()))
total = search.count()
aggregations = self.format_aggregations(results.aggregations)
break # Yay! Results!
except NotFoundError, e:
missing_index = re.findall(BAD_INDEX_REGEX, e.error)[0]
if missing_index in indices:
del indices[indices.index(missing_index)]
else:
# Wait what? An error caused by an index that was not
# in the request? That should never happen, but in case
# it does, better know it.
raise
if indices:
# Update the list of indices and try again.
# Note: we need to first empty the list of indices before
# updating it, otherwise the removed indices never get
# actually removed.
search = search.index().index(*indices)
else:
# There is no index left in the list, return an empty
# result.
hits = []
total = 0
aggregations = {}
break
return {
'hits': hits,
'total': total,
'facets': aggregations,
}
# For backwards compatibility with the previous elasticsearch module.
# All those methods used to live in this file, but have been moved to
# the super_search_fields.py file now. Since the configuration of the
# middleware expect those to still be here, we bind them for now.
def get_fields(self, **kwargs):
return SuperSearchFields(config=self.config).get_fields(**kwargs)
def create_field(self, **kwargs):
return SuperSearchFields(config=self.config).create_field(**kwargs)
def update_field(self, **kwargs):
return SuperSearchFields(config=self.config).update_field(**kwargs)
def delete_field(self, **kwargs):
return SuperSearchFields(config=self.config).delete_field(**kwargs)
def get_missing_fields(self):
return SuperSearchFields(config=self.config).get_missing_fields()
|
itsjeyd/edx-platform
|
refs/heads/master
|
lms/djangoapps/grades/module_grades.py
|
14
|
"""
Functionality for module-level grades.
"""
# TODO The score computation in this file is not accurate
# since it is summing percentages instead of computing a
# final percentage of the individual sums.
# Regardless, this file and its code should be removed soon
# as part of TNL-5062.
from django.test.client import RequestFactory
from courseware.model_data import FieldDataCache, ScoresClient
from courseware.module_render import get_module_for_descriptor
from opaque_keys.edx.locator import BlockUsageLocator
from util.module_utils import yield_dynamic_descriptor_descendants
def _get_mock_request(student):
"""
Make a fake request because grading code expects to be able to look at
the request. We have to attach the correct user to the request before
grading that student.
"""
request = RequestFactory().get('/')
request.user = student
return request
def _calculate_score_for_modules(user_id, course, modules):
"""
Calculates the cumulative score (percent) of the given modules
"""
# removing branch and version from exam modules locator
# otherwise student module would not return scores since module usage keys would not match
modules = [m for m in modules]
locations = [
BlockUsageLocator(
course_key=course.id,
block_type=module.location.block_type,
block_id=module.location.block_id
)
if isinstance(module.location, BlockUsageLocator) and module.location.version
else module.location
for module in modules
]
scores_client = ScoresClient(course.id, user_id)
scores_client.fetch_scores(locations)
# Iterate over all of the exam modules to get score percentage of user for each of them
module_percentages = []
ignore_categories = ['course', 'chapter', 'sequential', 'vertical', 'randomize', 'library_content']
for index, module in enumerate(modules):
if module.category not in ignore_categories and (module.graded or module.has_score):
module_score = scores_client.get(locations[index])
if module_score:
correct = module_score.correct or 0
total = module_score.total or 1
module_percentages.append(correct / total)
return sum(module_percentages) / float(len(module_percentages)) if module_percentages else 0
def get_module_score(user, course, module):
"""
Collects all children of the given module and calculates the cumulative
score for this set of modules for the given user.
Arguments:
user (User): The user
course (CourseModule): The course
module (XBlock): The module
Returns:
float: The cumulative score
"""
def inner_get_module(descriptor):
"""
Delegate to get_module_for_descriptor
"""
field_data_cache = FieldDataCache([descriptor], course.id, user)
return get_module_for_descriptor(
user,
_get_mock_request(user),
descriptor,
field_data_cache,
course.id,
course=course
)
modules = yield_dynamic_descriptor_descendants(
module,
user.id,
inner_get_module
)
return _calculate_score_for_modules(user.id, course, modules)
|
nvoron23/brython
|
refs/heads/master
|
site/tests/unittests/test/test_re.py
|
24
|
from test.support import verbose, run_unittest, gc_collect, bigmemtest, _2G, \
cpython_only
import io
import re
from re import Scanner
import sre_constants
import sys
import string
import traceback
from weakref import proxy
# Misc tests from Tim Peters' re.doc
# WARNING: Don't change details in these tests if you don't know
# what you're doing. Some of these tests were carefully modeled to
# cover most of the code.
import unittest
class ReTests(unittest.TestCase):
def test_keep_buffer(self):
# See bug 14212
b = bytearray(b'x')
it = re.finditer(b'a', b)
with self.assertRaises(BufferError):
b.extend(b'x'*400)
list(it)
del it
gc_collect()
b.extend(b'x'*400)
def test_weakref(self):
s = 'QabbbcR'
x = re.compile('ab+c')
y = proxy(x)
self.assertEqual(x.findall('QabbbcR'), y.findall('QabbbcR'))
def test_search_star_plus(self):
self.assertEqual(re.search('x*', 'axx').span(0), (0, 0))
self.assertEqual(re.search('x*', 'axx').span(), (0, 0))
self.assertEqual(re.search('x+', 'axx').span(0), (1, 3))
self.assertEqual(re.search('x+', 'axx').span(), (1, 3))
self.assertEqual(re.search('x', 'aaa'), None)
self.assertEqual(re.match('a*', 'xxx').span(0), (0, 0))
self.assertEqual(re.match('a*', 'xxx').span(), (0, 0))
self.assertEqual(re.match('x*', 'xxxa').span(0), (0, 3))
self.assertEqual(re.match('x*', 'xxxa').span(), (0, 3))
self.assertEqual(re.match('a+', 'xxx'), None)
def bump_num(self, matchobj):
int_value = int(matchobj.group(0))
return str(int_value + 1)
def test_basic_re_sub(self):
self.assertEqual(re.sub("(?i)b+", "x", "bbbb BBBB"), 'x x')
self.assertEqual(re.sub(r'\d+', self.bump_num, '08.2 -2 23x99y'),
'9.3 -3 24x100y')
self.assertEqual(re.sub(r'\d+', self.bump_num, '08.2 -2 23x99y', 3),
'9.3 -3 23x99y')
self.assertEqual(re.sub('.', lambda m: r"\n", 'x'), '\\n')
self.assertEqual(re.sub('.', r"\n", 'x'), '\n')
s = r"\1\1"
self.assertEqual(re.sub('(.)', s, 'x'), 'xx')
self.assertEqual(re.sub('(.)', re.escape(s), 'x'), s)
self.assertEqual(re.sub('(.)', lambda m: s, 'x'), s)
self.assertEqual(re.sub('(?P<a>x)', '\g<a>\g<a>', 'xx'), 'xxxx')
self.assertEqual(re.sub('(?P<a>x)', '\g<a>\g<1>', 'xx'), 'xxxx')
self.assertEqual(re.sub('(?P<unk>x)', '\g<unk>\g<unk>', 'xx'), 'xxxx')
self.assertEqual(re.sub('(?P<unk>x)', '\g<1>\g<1>', 'xx'), 'xxxx')
self.assertEqual(re.sub('a',r'\t\n\v\r\f\a\b\B\Z\a\A\w\W\s\S\d\D','a'),
'\t\n\v\r\f\a\b\\B\\Z\a\\A\\w\\W\\s\\S\\d\\D')
self.assertEqual(re.sub('a', '\t\n\v\r\f\a', 'a'), '\t\n\v\r\f\a')
self.assertEqual(re.sub('a', '\t\n\v\r\f\a', 'a'),
(chr(9)+chr(10)+chr(11)+chr(13)+chr(12)+chr(7)))
self.assertEqual(re.sub('^\s*', 'X', 'test'), 'Xtest')
def test_bug_449964(self):
# fails for group followed by other escape
self.assertEqual(re.sub(r'(?P<unk>x)', '\g<1>\g<1>\\b', 'xx'),
'xx\bxx\b')
def test_bug_449000(self):
# Test for sub() on escaped characters
self.assertEqual(re.sub(r'\r\n', r'\n', 'abc\r\ndef\r\n'),
'abc\ndef\n')
self.assertEqual(re.sub('\r\n', r'\n', 'abc\r\ndef\r\n'),
'abc\ndef\n')
self.assertEqual(re.sub(r'\r\n', '\n', 'abc\r\ndef\r\n'),
'abc\ndef\n')
self.assertEqual(re.sub('\r\n', '\n', 'abc\r\ndef\r\n'),
'abc\ndef\n')
def test_bug_1661(self):
# Verify that flags do not get silently ignored with compiled patterns
pattern = re.compile('.')
self.assertRaises(ValueError, re.match, pattern, 'A', re.I)
self.assertRaises(ValueError, re.search, pattern, 'A', re.I)
self.assertRaises(ValueError, re.findall, pattern, 'A', re.I)
self.assertRaises(ValueError, re.compile, pattern, re.I)
def test_bug_3629(self):
# A regex that triggered a bug in the sre-code validator
re.compile("(?P<quote>)(?(quote))")
def test_sub_template_numeric_escape(self):
# bug 776311 and friends
self.assertEqual(re.sub('x', r'\0', 'x'), '\0')
self.assertEqual(re.sub('x', r'\000', 'x'), '\000')
self.assertEqual(re.sub('x', r'\001', 'x'), '\001')
self.assertEqual(re.sub('x', r'\008', 'x'), '\0' + '8')
self.assertEqual(re.sub('x', r'\009', 'x'), '\0' + '9')
self.assertEqual(re.sub('x', r'\111', 'x'), '\111')
self.assertEqual(re.sub('x', r'\117', 'x'), '\117')
self.assertEqual(re.sub('x', r'\1111', 'x'), '\1111')
self.assertEqual(re.sub('x', r'\1111', 'x'), '\111' + '1')
self.assertEqual(re.sub('x', r'\00', 'x'), '\x00')
self.assertEqual(re.sub('x', r'\07', 'x'), '\x07')
self.assertEqual(re.sub('x', r'\08', 'x'), '\0' + '8')
self.assertEqual(re.sub('x', r'\09', 'x'), '\0' + '9')
self.assertEqual(re.sub('x', r'\0a', 'x'), '\0' + 'a')
self.assertEqual(re.sub('x', r'\400', 'x'), '\0')
self.assertEqual(re.sub('x', r'\777', 'x'), '\377')
self.assertRaises(re.error, re.sub, 'x', r'\1', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\8', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\9', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\11', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\18', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\1a', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\90', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\99', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\118', 'x') # r'\11' + '8'
self.assertRaises(re.error, re.sub, 'x', r'\11a', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\181', 'x') # r'\18' + '1'
self.assertRaises(re.error, re.sub, 'x', r'\800', 'x') # r'\80' + '0'
# in python2.3 (etc), these loop endlessly in sre_parser.py
self.assertEqual(re.sub('(((((((((((x)))))))))))', r'\11', 'x'), 'x')
self.assertEqual(re.sub('((((((((((y))))))))))(.)', r'\118', 'xyz'),
'xz8')
self.assertEqual(re.sub('((((((((((y))))))))))(.)', r'\11a', 'xyz'),
'xza')
def test_qualified_re_sub(self):
self.assertEqual(re.sub('a', 'b', 'aaaaa'), 'bbbbb')
self.assertEqual(re.sub('a', 'b', 'aaaaa', 1), 'baaaa')
def test_bug_114660(self):
self.assertEqual(re.sub(r'(\S)\s+(\S)', r'\1 \2', 'hello there'),
'hello there')
def test_bug_462270(self):
# Test for empty sub() behaviour, see SF bug #462270
self.assertEqual(re.sub('x*', '-', 'abxd'), '-a-b-d-')
self.assertEqual(re.sub('x+', '-', 'abxd'), 'ab-d')
def test_symbolic_groups(self):
re.compile('(?P<a>x)(?P=a)(?(a)y)')
re.compile('(?P<a1>x)(?P=a1)(?(a1)y)')
self.assertRaises(re.error, re.compile, '(?P<a>)(?P<a>)')
self.assertRaises(re.error, re.compile, '(?Px)')
self.assertRaises(re.error, re.compile, '(?P=)')
self.assertRaises(re.error, re.compile, '(?P=1)')
self.assertRaises(re.error, re.compile, '(?P=a)')
self.assertRaises(re.error, re.compile, '(?P=a1)')
self.assertRaises(re.error, re.compile, '(?P=a.)')
self.assertRaises(re.error, re.compile, '(?P<)')
self.assertRaises(re.error, re.compile, '(?P<>)')
self.assertRaises(re.error, re.compile, '(?P<1>)')
self.assertRaises(re.error, re.compile, '(?P<a.>)')
self.assertRaises(re.error, re.compile, '(?())')
self.assertRaises(re.error, re.compile, '(?(a))')
self.assertRaises(re.error, re.compile, '(?(1a))')
self.assertRaises(re.error, re.compile, '(?(a.))')
# New valid/invalid identifiers in Python 3
re.compile('(?P<µ>x)(?P=µ)(?(µ)y)')
re.compile('(?P<𝔘𝔫𝔦𝔠𝔬𝔡𝔢>x)(?P=𝔘𝔫𝔦𝔠𝔬𝔡𝔢)(?(𝔘𝔫𝔦𝔠𝔬𝔡𝔢)y)')
self.assertRaises(re.error, re.compile, '(?P<©>x)')
def test_symbolic_refs(self):
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<a', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<a a>', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<>', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<1a1>', 'xx')
self.assertRaises(IndexError, re.sub, '(?P<a>x)', '\g<ab>', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)|(?P<b>y)', '\g<b>', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)|(?P<b>y)', '\\2', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<-1>', 'xx')
# New valid/invalid identifiers in Python 3
self.assertEqual(re.sub('(?P<µ>x)', r'\g<µ>', 'xx'), 'xx')
self.assertEqual(re.sub('(?P<𝔘𝔫𝔦𝔠𝔬𝔡𝔢>x)', r'\g<𝔘𝔫𝔦𝔠𝔬𝔡𝔢>', 'xx'), 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', r'\g<©>', 'xx')
def test_re_subn(self):
self.assertEqual(re.subn("(?i)b+", "x", "bbbb BBBB"), ('x x', 2))
self.assertEqual(re.subn("b+", "x", "bbbb BBBB"), ('x BBBB', 1))
self.assertEqual(re.subn("b+", "x", "xyz"), ('xyz', 0))
self.assertEqual(re.subn("b*", "x", "xyz"), ('xxxyxzx', 4))
self.assertEqual(re.subn("b*", "x", "xyz", 2), ('xxxyz', 2))
def test_re_split(self):
self.assertEqual(re.split(":", ":a:b::c"), ['', 'a', 'b', '', 'c'])
self.assertEqual(re.split(":*", ":a:b::c"), ['', 'a', 'b', 'c'])
self.assertEqual(re.split("(:*)", ":a:b::c"),
['', ':', 'a', ':', 'b', '::', 'c'])
self.assertEqual(re.split("(?::*)", ":a:b::c"), ['', 'a', 'b', 'c'])
self.assertEqual(re.split("(:)*", ":a:b::c"),
['', ':', 'a', ':', 'b', ':', 'c'])
self.assertEqual(re.split("([b:]+)", ":a:b::c"),
['', ':', 'a', ':b::', 'c'])
self.assertEqual(re.split("(b)|(:+)", ":a:b::c"),
['', None, ':', 'a', None, ':', '', 'b', None, '',
None, '::', 'c'])
self.assertEqual(re.split("(?:b)|(?::+)", ":a:b::c"),
['', 'a', '', '', 'c'])
def test_qualified_re_split(self):
self.assertEqual(re.split(":", ":a:b::c", 2), ['', 'a', 'b::c'])
self.assertEqual(re.split(':', 'a:b:c:d', 2), ['a', 'b', 'c:d'])
self.assertEqual(re.split("(:)", ":a:b::c", 2),
['', ':', 'a', ':', 'b::c'])
self.assertEqual(re.split("(:*)", ":a:b::c", 2),
['', ':', 'a', ':', 'b::c'])
def test_re_findall(self):
self.assertEqual(re.findall(":+", "abc"), [])
self.assertEqual(re.findall(":+", "a:b::c:::d"), [":", "::", ":::"])
self.assertEqual(re.findall("(:+)", "a:b::c:::d"), [":", "::", ":::"])
self.assertEqual(re.findall("(:)(:*)", "a:b::c:::d"), [(":", ""),
(":", ":"),
(":", "::")])
def test_bug_117612(self):
self.assertEqual(re.findall(r"(a|(b))", "aba"),
[("a", ""),("b", "b"),("a", "")])
def test_re_match(self):
self.assertEqual(re.match('a', 'a').groups(), ())
self.assertEqual(re.match('(a)', 'a').groups(), ('a',))
self.assertEqual(re.match(r'(a)', 'a').group(0), 'a')
self.assertEqual(re.match(r'(a)', 'a').group(1), 'a')
self.assertEqual(re.match(r'(a)', 'a').group(1, 1), ('a', 'a'))
pat = re.compile('((a)|(b))(c)?')
self.assertEqual(pat.match('a').groups(), ('a', 'a', None, None))
self.assertEqual(pat.match('b').groups(), ('b', None, 'b', None))
self.assertEqual(pat.match('ac').groups(), ('a', 'a', None, 'c'))
self.assertEqual(pat.match('bc').groups(), ('b', None, 'b', 'c'))
self.assertEqual(pat.match('bc').groups(""), ('b', "", 'b', 'c'))
# A single group
m = re.match('(a)', 'a')
self.assertEqual(m.group(0), 'a')
self.assertEqual(m.group(0), 'a')
self.assertEqual(m.group(1), 'a')
self.assertEqual(m.group(1, 1), ('a', 'a'))
pat = re.compile('(?:(?P<a1>a)|(?P<b2>b))(?P<c3>c)?')
self.assertEqual(pat.match('a').group(1, 2, 3), ('a', None, None))
self.assertEqual(pat.match('b').group('a1', 'b2', 'c3'),
(None, 'b', None))
self.assertEqual(pat.match('ac').group(1, 'b2', 3), ('a', None, 'c'))
def test_re_groupref_exists(self):
self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', '(a)').groups(),
('(', 'a'))
self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', 'a').groups(),
(None, 'a'))
self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', 'a)'), None)
self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', '(a'), None)
self.assertEqual(re.match('^(?:(a)|c)((?(1)b|d))$', 'ab').groups(),
('a', 'b'))
self.assertEqual(re.match('^(?:(a)|c)((?(1)b|d))$', 'cd').groups(),
(None, 'd'))
self.assertEqual(re.match('^(?:(a)|c)((?(1)|d))$', 'cd').groups(),
(None, 'd'))
self.assertEqual(re.match('^(?:(a)|c)((?(1)|d))$', 'a').groups(),
('a', ''))
# Tests for bug #1177831: exercise groups other than the first group
p = re.compile('(?P<g1>a)(?P<g2>b)?((?(g2)c|d))')
self.assertEqual(p.match('abc').groups(),
('a', 'b', 'c'))
self.assertEqual(p.match('ad').groups(),
('a', None, 'd'))
self.assertEqual(p.match('abd'), None)
self.assertEqual(p.match('ac'), None)
def test_re_groupref(self):
self.assertEqual(re.match(r'^(\|)?([^()]+)\1$', '|a|').groups(),
('|', 'a'))
self.assertEqual(re.match(r'^(\|)?([^()]+)\1?$', 'a').groups(),
(None, 'a'))
self.assertEqual(re.match(r'^(\|)?([^()]+)\1$', 'a|'), None)
self.assertEqual(re.match(r'^(\|)?([^()]+)\1$', '|a'), None)
self.assertEqual(re.match(r'^(?:(a)|c)(\1)$', 'aa').groups(),
('a', 'a'))
self.assertEqual(re.match(r'^(?:(a)|c)(\1)?$', 'c').groups(),
(None, None))
def test_groupdict(self):
self.assertEqual(re.match('(?P<first>first) (?P<second>second)',
'first second').groupdict(),
{'first':'first', 'second':'second'})
def test_expand(self):
self.assertEqual(re.match("(?P<first>first) (?P<second>second)",
"first second")
.expand(r"\2 \1 \g<second> \g<first>"),
"second first second first")
def test_repeat_minmax(self):
self.assertEqual(re.match("^(\w){1}$", "abc"), None)
self.assertEqual(re.match("^(\w){1}?$", "abc"), None)
self.assertEqual(re.match("^(\w){1,2}$", "abc"), None)
self.assertEqual(re.match("^(\w){1,2}?$", "abc"), None)
self.assertEqual(re.match("^(\w){3}$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){1,3}$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){1,4}$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){3,4}?$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){3}?$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){1,3}?$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){1,4}?$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){3,4}?$", "abc").group(1), "c")
self.assertEqual(re.match("^x{1}$", "xxx"), None)
self.assertEqual(re.match("^x{1}?$", "xxx"), None)
self.assertEqual(re.match("^x{1,2}$", "xxx"), None)
self.assertEqual(re.match("^x{1,2}?$", "xxx"), None)
self.assertNotEqual(re.match("^x{3}$", "xxx"), None)
self.assertNotEqual(re.match("^x{1,3}$", "xxx"), None)
self.assertNotEqual(re.match("^x{1,4}$", "xxx"), None)
self.assertNotEqual(re.match("^x{3,4}?$", "xxx"), None)
self.assertNotEqual(re.match("^x{3}?$", "xxx"), None)
self.assertNotEqual(re.match("^x{1,3}?$", "xxx"), None)
self.assertNotEqual(re.match("^x{1,4}?$", "xxx"), None)
self.assertNotEqual(re.match("^x{3,4}?$", "xxx"), None)
self.assertEqual(re.match("^x{}$", "xxx"), None)
self.assertNotEqual(re.match("^x{}$", "x{}"), None)
def test_getattr(self):
self.assertEqual(re.compile("(?i)(a)(b)").pattern, "(?i)(a)(b)")
self.assertEqual(re.compile("(?i)(a)(b)").flags, re.I | re.U)
self.assertEqual(re.compile("(?i)(a)(b)").groups, 2)
self.assertEqual(re.compile("(?i)(a)(b)").groupindex, {})
self.assertEqual(re.compile("(?i)(?P<first>a)(?P<other>b)").groupindex,
{'first': 1, 'other': 2})
self.assertEqual(re.match("(a)", "a").pos, 0)
self.assertEqual(re.match("(a)", "a").endpos, 1)
self.assertEqual(re.match("(a)", "a").string, "a")
self.assertEqual(re.match("(a)", "a").regs, ((0, 1), (0, 1)))
self.assertNotEqual(re.match("(a)", "a").re, None)
def test_special_escapes(self):
self.assertEqual(re.search(r"\b(b.)\b",
"abcd abc bcd bx").group(1), "bx")
self.assertEqual(re.search(r"\B(b.)\B",
"abc bcd bc abxd").group(1), "bx")
self.assertEqual(re.search(r"\b(b.)\b",
"abcd abc bcd bx", re.LOCALE).group(1), "bx")
self.assertEqual(re.search(r"\B(b.)\B",
"abc bcd bc abxd", re.LOCALE).group(1), "bx")
self.assertEqual(re.search(r"\b(b.)\b",
"abcd abc bcd bx", re.UNICODE).group(1), "bx")
self.assertEqual(re.search(r"\B(b.)\B",
"abc bcd bc abxd", re.UNICODE).group(1), "bx")
self.assertEqual(re.search(r"^abc$", "\nabc\n", re.M).group(0), "abc")
self.assertEqual(re.search(r"^\Aabc\Z$", "abc", re.M).group(0), "abc")
self.assertEqual(re.search(r"^\Aabc\Z$", "\nabc\n", re.M), None)
self.assertEqual(re.search(r"\b(b.)\b",
"abcd abc bcd bx").group(1), "bx")
self.assertEqual(re.search(r"\B(b.)\B",
"abc bcd bc abxd").group(1), "bx")
self.assertEqual(re.search(r"^abc$", "\nabc\n", re.M).group(0), "abc")
self.assertEqual(re.search(r"^\Aabc\Z$", "abc", re.M).group(0), "abc")
self.assertEqual(re.search(r"^\Aabc\Z$", "\nabc\n", re.M), None)
self.assertEqual(re.search(r"\d\D\w\W\s\S",
"1aa! a").group(0), "1aa! a")
self.assertEqual(re.search(r"\d\D\w\W\s\S",
"1aa! a", re.LOCALE).group(0), "1aa! a")
self.assertEqual(re.search(r"\d\D\w\W\s\S",
"1aa! a", re.UNICODE).group(0), "1aa! a")
def test_string_boundaries(self):
# See http://bugs.python.org/issue10713
self.assertEqual(re.search(r"\b(abc)\b", "abc").group(1),
"abc")
# There's a word boundary at the start of a string.
self.assertTrue(re.match(r"\b", "abc"))
# A non-empty string includes a non-boundary zero-length match.
self.assertTrue(re.search(r"\B", "abc"))
# There is no non-boundary match at the start of a string.
self.assertFalse(re.match(r"\B", "abc"))
# However, an empty string contains no word boundaries, and also no
# non-boundaries.
self.assertEqual(re.search(r"\B", ""), None)
# This one is questionable and different from the perlre behaviour,
# but describes current behavior.
self.assertEqual(re.search(r"\b", ""), None)
# A single word-character string has two boundaries, but no
# non-boundary gaps.
self.assertEqual(len(re.findall(r"\b", "a")), 2)
self.assertEqual(len(re.findall(r"\B", "a")), 0)
# If there are no words, there are no boundaries
self.assertEqual(len(re.findall(r"\b", " ")), 0)
self.assertEqual(len(re.findall(r"\b", " ")), 0)
# Can match around the whitespace.
self.assertEqual(len(re.findall(r"\B", " ")), 2)
def test_bigcharset(self):
self.assertEqual(re.match("([\u2222\u2223])",
"\u2222").group(1), "\u2222")
self.assertEqual(re.match("([\u2222\u2223])",
"\u2222", re.UNICODE).group(1), "\u2222")
r = '[%s]' % ''.join(map(chr, range(256, 2**16, 255)))
self.assertEqual(re.match(r,
"\uff01", re.UNICODE).group(), "\uff01")
def test_big_codesize(self):
# Issue #1160
r = re.compile('|'.join(('%d'%x for x in range(10000))))
self.assertIsNotNone(r.match('1000'))
self.assertIsNotNone(r.match('9999'))
def test_anyall(self):
self.assertEqual(re.match("a.b", "a\nb", re.DOTALL).group(0),
"a\nb")
self.assertEqual(re.match("a.*b", "a\n\nb", re.DOTALL).group(0),
"a\n\nb")
def test_non_consuming(self):
self.assertEqual(re.match("(a(?=\s[^a]))", "a b").group(1), "a")
self.assertEqual(re.match("(a(?=\s[^a]*))", "a b").group(1), "a")
self.assertEqual(re.match("(a(?=\s[abc]))", "a b").group(1), "a")
self.assertEqual(re.match("(a(?=\s[abc]*))", "a bc").group(1), "a")
self.assertEqual(re.match(r"(a)(?=\s\1)", "a a").group(1), "a")
self.assertEqual(re.match(r"(a)(?=\s\1*)", "a aa").group(1), "a")
self.assertEqual(re.match(r"(a)(?=\s(abc|a))", "a a").group(1), "a")
self.assertEqual(re.match(r"(a(?!\s[^a]))", "a a").group(1), "a")
self.assertEqual(re.match(r"(a(?!\s[abc]))", "a d").group(1), "a")
self.assertEqual(re.match(r"(a)(?!\s\1)", "a b").group(1), "a")
self.assertEqual(re.match(r"(a)(?!\s(abc|a))", "a b").group(1), "a")
def test_ignore_case(self):
self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC")
self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC")
self.assertEqual(re.match(r"(a\s[^a])", "a b", re.I).group(1), "a b")
self.assertEqual(re.match(r"(a\s[^a]*)", "a bb", re.I).group(1), "a bb")
self.assertEqual(re.match(r"(a\s[abc])", "a b", re.I).group(1), "a b")
self.assertEqual(re.match(r"(a\s[abc]*)", "a bb", re.I).group(1), "a bb")
self.assertEqual(re.match(r"((a)\s\2)", "a a", re.I).group(1), "a a")
self.assertEqual(re.match(r"((a)\s\2*)", "a aa", re.I).group(1), "a aa")
self.assertEqual(re.match(r"((a)\s(abc|a))", "a a", re.I).group(1), "a a")
self.assertEqual(re.match(r"((a)\s(abc|a)*)", "a aa", re.I).group(1), "a aa")
def test_category(self):
self.assertEqual(re.match(r"(\s)", " ").group(1), " ")
def test_getlower(self):
import _sre
self.assertEqual(_sre.getlower(ord('A'), 0), ord('a'))
self.assertEqual(_sre.getlower(ord('A'), re.LOCALE), ord('a'))
self.assertEqual(_sre.getlower(ord('A'), re.UNICODE), ord('a'))
self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC")
self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC")
def test_not_literal(self):
self.assertEqual(re.search("\s([^a])", " b").group(1), "b")
self.assertEqual(re.search("\s([^a]*)", " bb").group(1), "bb")
def test_search_coverage(self):
self.assertEqual(re.search("\s(b)", " b").group(1), "b")
self.assertEqual(re.search("a\s", "a ").group(0), "a ")
def assertMatch(self, pattern, text, match=None, span=None,
matcher=re.match):
if match is None and span is None:
# the pattern matches the whole text
match = text
span = (0, len(text))
elif match is None or span is None:
raise ValueError('If match is not None, span should be specified '
'(and vice versa).')
m = matcher(pattern, text)
self.assertTrue(m)
self.assertEqual(m.group(), match)
self.assertEqual(m.span(), span)
def test_re_escape(self):
alnum_chars = string.ascii_letters + string.digits + '_'
p = ''.join(chr(i) for i in range(256))
for c in p:
if c in alnum_chars:
self.assertEqual(re.escape(c), c)
elif c == '\x00':
self.assertEqual(re.escape(c), '\\000')
else:
self.assertEqual(re.escape(c), '\\' + c)
self.assertMatch(re.escape(c), c)
self.assertMatch(re.escape(p), p)
def test_re_escape_byte(self):
alnum_chars = (string.ascii_letters + string.digits + '_').encode('ascii')
p = bytes(range(256))
for i in p:
b = bytes([i])
if b in alnum_chars:
self.assertEqual(re.escape(b), b)
elif i == 0:
self.assertEqual(re.escape(b), b'\\000')
else:
self.assertEqual(re.escape(b), b'\\' + b)
self.assertMatch(re.escape(b), b)
self.assertMatch(re.escape(p), p)
def test_re_escape_non_ascii(self):
s = 'xxx\u2620\u2620\u2620xxx'
s_escaped = re.escape(s)
self.assertEqual(s_escaped, 'xxx\\\u2620\\\u2620\\\u2620xxx')
self.assertMatch(s_escaped, s)
self.assertMatch('.%s+.' % re.escape('\u2620'), s,
'x\u2620\u2620\u2620x', (2, 7), re.search)
def test_re_escape_non_ascii_bytes(self):
b = 'y\u2620y\u2620y'.encode('utf-8')
b_escaped = re.escape(b)
self.assertEqual(b_escaped, b'y\\\xe2\\\x98\\\xa0y\\\xe2\\\x98\\\xa0y')
self.assertMatch(b_escaped, b)
res = re.findall(re.escape('\u2620'.encode('utf-8')), b)
self.assertEqual(len(res), 2)
def pickle_test(self, pickle):
oldpat = re.compile('a(?:b|(c|e){1,2}?|d)+?(.)')
s = pickle.dumps(oldpat)
newpat = pickle.loads(s)
self.assertEqual(oldpat, newpat)
def test_constants(self):
self.assertEqual(re.I, re.IGNORECASE)
self.assertEqual(re.L, re.LOCALE)
self.assertEqual(re.M, re.MULTILINE)
self.assertEqual(re.S, re.DOTALL)
self.assertEqual(re.X, re.VERBOSE)
def test_flags(self):
for flag in [re.I, re.M, re.X, re.S, re.L]:
self.assertNotEqual(re.compile('^pattern$', flag), None)
def test_sre_character_literals(self):
for i in [0, 8, 16, 32, 64, 127, 128, 255, 256, 0xFFFF, 0x10000, 0x10FFFF]:
if i < 256:
self.assertIsNotNone(re.match(r"\%03o" % i, chr(i)))
self.assertIsNotNone(re.match(r"\%03o0" % i, chr(i)+"0"))
self.assertIsNotNone(re.match(r"\%03o8" % i, chr(i)+"8"))
self.assertIsNotNone(re.match(r"\x%02x" % i, chr(i)))
self.assertIsNotNone(re.match(r"\x%02x0" % i, chr(i)+"0"))
self.assertIsNotNone(re.match(r"\x%02xz" % i, chr(i)+"z"))
if i < 0x10000:
self.assertIsNotNone(re.match(r"\u%04x" % i, chr(i)))
self.assertIsNotNone(re.match(r"\u%04x0" % i, chr(i)+"0"))
self.assertIsNotNone(re.match(r"\u%04xz" % i, chr(i)+"z"))
self.assertIsNotNone(re.match(r"\U%08x" % i, chr(i)))
self.assertIsNotNone(re.match(r"\U%08x0" % i, chr(i)+"0"))
self.assertIsNotNone(re.match(r"\U%08xz" % i, chr(i)+"z"))
self.assertIsNotNone(re.match(r"\0", "\000"))
self.assertIsNotNone(re.match(r"\08", "\0008"))
self.assertIsNotNone(re.match(r"\01", "\001"))
self.assertIsNotNone(re.match(r"\018", "\0018"))
self.assertIsNotNone(re.match(r"\567", chr(0o167)))
self.assertRaises(re.error, re.match, r"\911", "")
self.assertRaises(re.error, re.match, r"\x1", "")
self.assertRaises(re.error, re.match, r"\x1z", "")
self.assertRaises(re.error, re.match, r"\u123", "")
self.assertRaises(re.error, re.match, r"\u123z", "")
self.assertRaises(re.error, re.match, r"\U0001234", "")
self.assertRaises(re.error, re.match, r"\U0001234z", "")
self.assertRaises(re.error, re.match, r"\U00110000", "")
def test_sre_character_class_literals(self):
for i in [0, 8, 16, 32, 64, 127, 128, 255, 256, 0xFFFF, 0x10000, 0x10FFFF]:
if i < 256:
self.assertIsNotNone(re.match(r"[\%o]" % i, chr(i)))
self.assertIsNotNone(re.match(r"[\%o8]" % i, chr(i)))
self.assertIsNotNone(re.match(r"[\%03o]" % i, chr(i)))
self.assertIsNotNone(re.match(r"[\%03o0]" % i, chr(i)))
self.assertIsNotNone(re.match(r"[\%03o8]" % i, chr(i)))
self.assertIsNotNone(re.match(r"[\x%02x]" % i, chr(i)))
self.assertIsNotNone(re.match(r"[\x%02x0]" % i, chr(i)))
self.assertIsNotNone(re.match(r"[\x%02xz]" % i, chr(i)))
if i < 0x10000:
self.assertIsNotNone(re.match(r"[\u%04x]" % i, chr(i)))
self.assertIsNotNone(re.match(r"[\u%04x0]" % i, chr(i)))
self.assertIsNotNone(re.match(r"[\u%04xz]" % i, chr(i)))
self.assertIsNotNone(re.match(r"[\U%08x]" % i, chr(i)))
self.assertIsNotNone(re.match(r"[\U%08x0]" % i, chr(i)+"0"))
self.assertIsNotNone(re.match(r"[\U%08xz]" % i, chr(i)+"z"))
self.assertIsNotNone(re.match(r"[\U0001d49c-\U0001d4b5]", "\U0001d49e"))
self.assertRaises(re.error, re.match, r"[\911]", "")
self.assertRaises(re.error, re.match, r"[\x1z]", "")
self.assertRaises(re.error, re.match, r"[\u123z]", "")
self.assertRaises(re.error, re.match, r"[\U0001234z]", "")
self.assertRaises(re.error, re.match, r"[\U00110000]", "")
def test_sre_byte_literals(self):
for i in [0, 8, 16, 32, 64, 127, 128, 255]:
self.assertIsNotNone(re.match((r"\%03o" % i).encode(), bytes([i])))
self.assertIsNotNone(re.match((r"\%03o0" % i).encode(), bytes([i])+b"0"))
self.assertIsNotNone(re.match((r"\%03o8" % i).encode(), bytes([i])+b"8"))
self.assertIsNotNone(re.match((r"\x%02x" % i).encode(), bytes([i])))
self.assertIsNotNone(re.match((r"\x%02x0" % i).encode(), bytes([i])+b"0"))
self.assertIsNotNone(re.match((r"\x%02xz" % i).encode(), bytes([i])+b"z"))
self.assertIsNotNone(re.match(br"\u", b'u'))
self.assertIsNotNone(re.match(br"\U", b'U'))
self.assertIsNotNone(re.match(br"\0", b"\000"))
self.assertIsNotNone(re.match(br"\08", b"\0008"))
self.assertIsNotNone(re.match(br"\01", b"\001"))
self.assertIsNotNone(re.match(br"\018", b"\0018"))
self.assertIsNotNone(re.match(br"\567", bytes([0o167])))
self.assertRaises(re.error, re.match, br"\911", b"")
self.assertRaises(re.error, re.match, br"\x1", b"")
self.assertRaises(re.error, re.match, br"\x1z", b"")
def test_sre_byte_class_literals(self):
for i in [0, 8, 16, 32, 64, 127, 128, 255]:
self.assertIsNotNone(re.match((r"[\%o]" % i).encode(), bytes([i])))
self.assertIsNotNone(re.match((r"[\%o8]" % i).encode(), bytes([i])))
self.assertIsNotNone(re.match((r"[\%03o]" % i).encode(), bytes([i])))
self.assertIsNotNone(re.match((r"[\%03o0]" % i).encode(), bytes([i])))
self.assertIsNotNone(re.match((r"[\%03o8]" % i).encode(), bytes([i])))
self.assertIsNotNone(re.match((r"[\x%02x]" % i).encode(), bytes([i])))
self.assertIsNotNone(re.match((r"[\x%02x0]" % i).encode(), bytes([i])))
self.assertIsNotNone(re.match((r"[\x%02xz]" % i).encode(), bytes([i])))
self.assertIsNotNone(re.match(br"[\u]", b'u'))
self.assertIsNotNone(re.match(br"[\U]", b'U'))
self.assertRaises(re.error, re.match, br"[\911]", "")
self.assertRaises(re.error, re.match, br"[\x1z]", "")
def test_bug_113254(self):
self.assertEqual(re.match(r'(a)|(b)', 'b').start(1), -1)
self.assertEqual(re.match(r'(a)|(b)', 'b').end(1), -1)
self.assertEqual(re.match(r'(a)|(b)', 'b').span(1), (-1, -1))
def test_bug_527371(self):
# bug described in patches 527371/672491
self.assertEqual(re.match(r'(a)?a','a').lastindex, None)
self.assertEqual(re.match(r'(a)(b)?b','ab').lastindex, 1)
self.assertEqual(re.match(r'(?P<a>a)(?P<b>b)?b','ab').lastgroup, 'a')
self.assertEqual(re.match("(?P<a>a(b))", "ab").lastgroup, 'a')
self.assertEqual(re.match("((a))", "a").lastindex, 1)
def test_bug_545855(self):
# bug 545855 -- This pattern failed to cause a compile error as it
# should, instead provoking a TypeError.
self.assertRaises(re.error, re.compile, 'foo[a-')
def test_bug_418626(self):
# bugs 418626 at al. -- Testing Greg Chapman's addition of op code
# SRE_OP_MIN_REPEAT_ONE for eliminating recursion on simple uses of
# pattern '*?' on a long string.
self.assertEqual(re.match('.*?c', 10000*'ab'+'cd').end(0), 20001)
self.assertEqual(re.match('.*?cd', 5000*'ab'+'c'+5000*'ab'+'cde').end(0),
20003)
self.assertEqual(re.match('.*?cd', 20000*'abc'+'de').end(0), 60001)
# non-simple '*?' still used to hit the recursion limit, before the
# non-recursive scheme was implemented.
self.assertEqual(re.search('(a|b)*?c', 10000*'ab'+'cd').end(0), 20001)
def test_bug_612074(self):
pat="["+re.escape("\u2039")+"]"
self.assertEqual(re.compile(pat) and 1, 1)
def test_stack_overflow(self):
# nasty cases that used to overflow the straightforward recursive
# implementation of repeated groups.
self.assertEqual(re.match('(x)*', 50000*'x').group(1), 'x')
self.assertEqual(re.match('(x)*y', 50000*'x'+'y').group(1), 'x')
self.assertEqual(re.match('(x)*?y', 50000*'x'+'y').group(1), 'x')
def test_unlimited_zero_width_repeat(self):
# Issue #9669
self.assertIsNone(re.match(r'(?:a?)*y', 'z'))
self.assertIsNone(re.match(r'(?:a?)+y', 'z'))
self.assertIsNone(re.match(r'(?:a?){2,}y', 'z'))
self.assertIsNone(re.match(r'(?:a?)*?y', 'z'))
self.assertIsNone(re.match(r'(?:a?)+?y', 'z'))
self.assertIsNone(re.match(r'(?:a?){2,}?y', 'z'))
def test_scanner(self):
def s_ident(scanner, token): return token
def s_operator(scanner, token): return "op%s" % token
def s_float(scanner, token): return float(token)
def s_int(scanner, token): return int(token)
scanner = Scanner([
(r"[a-zA-Z_]\w*", s_ident),
(r"\d+\.\d*", s_float),
(r"\d+", s_int),
(r"=|\+|-|\*|/", s_operator),
(r"\s+", None),
])
self.assertNotEqual(scanner.scanner.scanner("").pattern, None)
self.assertEqual(scanner.scan("sum = 3*foo + 312.50 + bar"),
(['sum', 'op=', 3, 'op*', 'foo', 'op+', 312.5,
'op+', 'bar'], ''))
def test_bug_448951(self):
# bug 448951 (similar to 429357, but with single char match)
# (Also test greedy matches.)
for op in '','?','*':
self.assertEqual(re.match(r'((.%s):)?z'%op, 'z').groups(),
(None, None))
self.assertEqual(re.match(r'((.%s):)?z'%op, 'a:z').groups(),
('a:', 'a'))
def test_bug_725106(self):
# capturing groups in alternatives in repeats
self.assertEqual(re.match('^((a)|b)*', 'abc').groups(),
('b', 'a'))
self.assertEqual(re.match('^(([ab])|c)*', 'abc').groups(),
('c', 'b'))
self.assertEqual(re.match('^((d)|[ab])*', 'abc').groups(),
('b', None))
self.assertEqual(re.match('^((a)c|[ab])*', 'abc').groups(),
('b', None))
self.assertEqual(re.match('^((a)|b)*?c', 'abc').groups(),
('b', 'a'))
self.assertEqual(re.match('^(([ab])|c)*?d', 'abcd').groups(),
('c', 'b'))
self.assertEqual(re.match('^((d)|[ab])*?c', 'abc').groups(),
('b', None))
self.assertEqual(re.match('^((a)c|[ab])*?c', 'abc').groups(),
('b', None))
def test_bug_725149(self):
# mark_stack_base restoring before restoring marks
self.assertEqual(re.match('(a)(?:(?=(b)*)c)*', 'abb').groups(),
('a', None))
self.assertEqual(re.match('(a)((?!(b)*))*', 'abb').groups(),
('a', None, None))
def test_bug_764548(self):
# bug 764548, re.compile() barfs on str/unicode subclasses
class my_unicode(str): pass
pat = re.compile(my_unicode("abc"))
self.assertEqual(pat.match("xyz"), None)
def test_finditer(self):
iter = re.finditer(r":+", "a:b::c:::d")
self.assertEqual([item.group(0) for item in iter],
[":", "::", ":::"])
pat = re.compile(r":+")
iter = pat.finditer("a:b::c:::d", 1, 10)
self.assertEqual([item.group(0) for item in iter],
[":", "::", ":::"])
pat = re.compile(r":+")
iter = pat.finditer("a:b::c:::d", pos=1, endpos=10)
self.assertEqual([item.group(0) for item in iter],
[":", "::", ":::"])
pat = re.compile(r":+")
iter = pat.finditer("a:b::c:::d", endpos=10, pos=1)
self.assertEqual([item.group(0) for item in iter],
[":", "::", ":::"])
pat = re.compile(r":+")
iter = pat.finditer("a:b::c:::d", pos=3, endpos=8)
self.assertEqual([item.group(0) for item in iter],
["::", "::"])
def test_bug_926075(self):
self.assertTrue(re.compile('bug_926075') is not
re.compile(b'bug_926075'))
def test_bug_931848(self):
pattern = eval('"[\u002E\u3002\uFF0E\uFF61]"')
self.assertEqual(re.compile(pattern).split("a.b.c"),
['a','b','c'])
def test_bug_581080(self):
iter = re.finditer(r"\s", "a b")
self.assertEqual(next(iter).span(), (1,2))
self.assertRaises(StopIteration, next, iter)
scanner = re.compile(r"\s").scanner("a b")
self.assertEqual(scanner.search().span(), (1, 2))
self.assertEqual(scanner.search(), None)
def test_bug_817234(self):
iter = re.finditer(r".*", "asdf")
self.assertEqual(next(iter).span(), (0, 4))
self.assertEqual(next(iter).span(), (4, 4))
self.assertRaises(StopIteration, next, iter)
def test_bug_6561(self):
# '\d' should match characters in Unicode category 'Nd'
# (Number, Decimal Digit), but not those in 'Nl' (Number,
# Letter) or 'No' (Number, Other).
decimal_digits = [
'\u0037', # '\N{DIGIT SEVEN}', category 'Nd'
'\u0e58', # '\N{THAI DIGIT SIX}', category 'Nd'
'\uff10', # '\N{FULLWIDTH DIGIT ZERO}', category 'Nd'
]
for x in decimal_digits:
self.assertEqual(re.match('^\d$', x).group(0), x)
not_decimal_digits = [
'\u2165', # '\N{ROMAN NUMERAL SIX}', category 'Nl'
'\u3039', # '\N{HANGZHOU NUMERAL TWENTY}', category 'Nl'
'\u2082', # '\N{SUBSCRIPT TWO}', category 'No'
'\u32b4', # '\N{CIRCLED NUMBER THIRTY NINE}', category 'No'
]
for x in not_decimal_digits:
self.assertIsNone(re.match('^\d$', x))
def test_empty_array(self):
# SF buf 1647541
import array
for typecode in 'bBuhHiIlLfd':
a = array.array(typecode)
self.assertEqual(re.compile(b"bla").match(a), None)
self.assertEqual(re.compile(b"").match(a).groups(), ())
def test_inline_flags(self):
# Bug #1700
upper_char = chr(0x1ea0) # Latin Capital Letter A with Dot Bellow
lower_char = chr(0x1ea1) # Latin Small Letter A with Dot Bellow
p = re.compile(upper_char, re.I | re.U)
q = p.match(lower_char)
self.assertNotEqual(q, None)
p = re.compile(lower_char, re.I | re.U)
q = p.match(upper_char)
self.assertNotEqual(q, None)
p = re.compile('(?i)' + upper_char, re.U)
q = p.match(lower_char)
self.assertNotEqual(q, None)
p = re.compile('(?i)' + lower_char, re.U)
q = p.match(upper_char)
self.assertNotEqual(q, None)
p = re.compile('(?iu)' + upper_char)
q = p.match(lower_char)
self.assertNotEqual(q, None)
p = re.compile('(?iu)' + lower_char)
q = p.match(upper_char)
self.assertNotEqual(q, None)
def test_dollar_matches_twice(self):
"$ matches the end of string, and just before the terminating \n"
pattern = re.compile('$')
self.assertEqual(pattern.sub('#', 'a\nb\n'), 'a\nb#\n#')
self.assertEqual(pattern.sub('#', 'a\nb\nc'), 'a\nb\nc#')
self.assertEqual(pattern.sub('#', '\n'), '#\n#')
pattern = re.compile('$', re.MULTILINE)
self.assertEqual(pattern.sub('#', 'a\nb\n' ), 'a#\nb#\n#' )
self.assertEqual(pattern.sub('#', 'a\nb\nc'), 'a#\nb#\nc#')
self.assertEqual(pattern.sub('#', '\n'), '#\n#')
def test_bytes_str_mixing(self):
# Mixing str and bytes is disallowed
pat = re.compile('.')
bpat = re.compile(b'.')
self.assertRaises(TypeError, pat.match, b'b')
self.assertRaises(TypeError, bpat.match, 'b')
self.assertRaises(TypeError, pat.sub, b'b', 'c')
self.assertRaises(TypeError, pat.sub, 'b', b'c')
self.assertRaises(TypeError, pat.sub, b'b', b'c')
self.assertRaises(TypeError, bpat.sub, b'b', 'c')
self.assertRaises(TypeError, bpat.sub, 'b', b'c')
self.assertRaises(TypeError, bpat.sub, 'b', 'c')
def test_ascii_and_unicode_flag(self):
# String patterns
for flags in (0, re.UNICODE):
pat = re.compile('\xc0', flags | re.IGNORECASE)
self.assertNotEqual(pat.match('\xe0'), None)
pat = re.compile('\w', flags)
self.assertNotEqual(pat.match('\xe0'), None)
pat = re.compile('\xc0', re.ASCII | re.IGNORECASE)
self.assertEqual(pat.match('\xe0'), None)
pat = re.compile('(?a)\xc0', re.IGNORECASE)
self.assertEqual(pat.match('\xe0'), None)
pat = re.compile('\w', re.ASCII)
self.assertEqual(pat.match('\xe0'), None)
pat = re.compile('(?a)\w')
self.assertEqual(pat.match('\xe0'), None)
# Bytes patterns
for flags in (0, re.ASCII):
pat = re.compile(b'\xc0', re.IGNORECASE)
self.assertEqual(pat.match(b'\xe0'), None)
pat = re.compile(b'\w')
self.assertEqual(pat.match(b'\xe0'), None)
# Incompatibilities
self.assertRaises(ValueError, re.compile, b'\w', re.UNICODE)
self.assertRaises(ValueError, re.compile, b'(?u)\w')
self.assertRaises(ValueError, re.compile, '\w', re.UNICODE | re.ASCII)
self.assertRaises(ValueError, re.compile, '(?u)\w', re.ASCII)
self.assertRaises(ValueError, re.compile, '(?a)\w', re.UNICODE)
self.assertRaises(ValueError, re.compile, '(?au)\w')
def test_bug_6509(self):
# Replacement strings of both types must parse properly.
# all strings
pat = re.compile('a(\w)')
self.assertEqual(pat.sub('b\\1', 'ac'), 'bc')
pat = re.compile('a(.)')
self.assertEqual(pat.sub('b\\1', 'a\u1234'), 'b\u1234')
pat = re.compile('..')
self.assertEqual(pat.sub(lambda m: 'str', 'a5'), 'str')
# all bytes
pat = re.compile(b'a(\w)')
self.assertEqual(pat.sub(b'b\\1', b'ac'), b'bc')
pat = re.compile(b'a(.)')
self.assertEqual(pat.sub(b'b\\1', b'a\xCD'), b'b\xCD')
pat = re.compile(b'..')
self.assertEqual(pat.sub(lambda m: b'bytes', b'a5'), b'bytes')
def test_dealloc(self):
# issue 3299: check for segfault in debug build
import _sre
# the overflow limit is different on wide and narrow builds and it
# depends on the definition of SRE_CODE (see sre.h).
# 2**128 should be big enough to overflow on both. For smaller values
# a RuntimeError is raised instead of OverflowError.
long_overflow = 2**128
self.assertRaises(TypeError, re.finditer, "a", {})
self.assertRaises(OverflowError, _sre.compile, "abc", 0, [long_overflow])
self.assertRaises(TypeError, _sre.compile, {}, 0, [])
def test_search_dot_unicode(self):
self.assertIsNotNone(re.search("123.*-", '123abc-'))
self.assertIsNotNone(re.search("123.*-", '123\xe9-'))
self.assertIsNotNone(re.search("123.*-", '123\u20ac-'))
self.assertIsNotNone(re.search("123.*-", '123\U0010ffff-'))
self.assertIsNotNone(re.search("123.*-", '123\xe9\u20ac\U0010ffff-'))
def test_compile(self):
# Test return value when given string and pattern as parameter
pattern = re.compile('random pattern')
self.assertIsInstance(pattern, re._pattern_type)
same_pattern = re.compile(pattern)
self.assertIsInstance(same_pattern, re._pattern_type)
self.assertIs(same_pattern, pattern)
# Test behaviour when not given a string or pattern as parameter
self.assertRaises(TypeError, re.compile, 0)
def test_bug_13899(self):
# Issue #13899: re pattern r"[\A]" should work like "A" but matches
# nothing. Ditto B and Z.
self.assertEqual(re.findall(r'[\A\B\b\C\Z]', 'AB\bCZ'),
['A', 'B', '\b', 'C', 'Z'])
@bigmemtest(size=_2G, memuse=1)
def test_large_search(self, size):
# Issue #10182: indices were 32-bit-truncated.
s = 'a' * size
m = re.search('$', s)
self.assertIsNotNone(m)
self.assertEqual(m.start(), size)
self.assertEqual(m.end(), size)
# The huge memuse is because of re.sub() using a list and a join()
# to create the replacement result.
@bigmemtest(size=_2G, memuse=16 + 2)
def test_large_subn(self, size):
# Issue #10182: indices were 32-bit-truncated.
s = 'a' * size
r, n = re.subn('', '', s)
self.assertEqual(r, s)
self.assertEqual(n, size + 1)
def test_bug_16688(self):
# Issue 16688: Backreferences make case-insensitive regex fail on
# non-ASCII strings.
self.assertEqual(re.findall(r"(?i)(a)\1", "aa \u0100"), ['a'])
self.assertEqual(re.match(r"(?s).{1,3}", "\u0100\u0100").span(), (0, 2))
def test_repeat_minmax_overflow(self):
# Issue #13169
string = "x" * 100000
self.assertEqual(re.match(r".{65535}", string).span(), (0, 65535))
self.assertEqual(re.match(r".{,65535}", string).span(), (0, 65535))
self.assertEqual(re.match(r".{65535,}?", string).span(), (0, 65535))
self.assertEqual(re.match(r".{65536}", string).span(), (0, 65536))
self.assertEqual(re.match(r".{,65536}", string).span(), (0, 65536))
self.assertEqual(re.match(r".{65536,}?", string).span(), (0, 65536))
# 2**128 should be big enough to overflow both SRE_CODE and Py_ssize_t.
self.assertRaises(OverflowError, re.compile, r".{%d}" % 2**128)
self.assertRaises(OverflowError, re.compile, r".{,%d}" % 2**128)
self.assertRaises(OverflowError, re.compile, r".{%d,}?" % 2**128)
self.assertRaises(OverflowError, re.compile, r".{%d,%d}" % (2**129, 2**128))
@cpython_only
def test_repeat_minmax_overflow_maxrepeat(self):
try:
from _sre import MAXREPEAT
except ImportError:
self.skipTest('requires _sre.MAXREPEAT constant')
string = "x" * 100000
self.assertIsNone(re.match(r".{%d}" % (MAXREPEAT - 1), string))
self.assertEqual(re.match(r".{,%d}" % (MAXREPEAT - 1), string).span(),
(0, 100000))
self.assertIsNone(re.match(r".{%d,}?" % (MAXREPEAT - 1), string))
self.assertRaises(OverflowError, re.compile, r".{%d}" % MAXREPEAT)
self.assertRaises(OverflowError, re.compile, r".{,%d}" % MAXREPEAT)
self.assertRaises(OverflowError, re.compile, r".{%d,}?" % MAXREPEAT)
def test_backref_group_name_in_exception(self):
# Issue 17341: Poor error message when compiling invalid regex
with self.assertRaisesRegex(sre_constants.error, '<foo>'):
re.compile('(?P=<foo>)')
def test_group_name_in_exception(self):
# Issue 17341: Poor error message when compiling invalid regex
with self.assertRaisesRegex(sre_constants.error, '\?foo'):
re.compile('(?P<?foo>)')
def test_issue17998(self):
for reps in '*', '+', '?', '{1}':
for mod in '', '?':
pattern = '.' + reps + mod + 'yz'
self.assertEqual(re.compile(pattern, re.S).findall('xyz'),
['xyz'], msg=pattern)
pattern = pattern.encode()
self.assertEqual(re.compile(pattern, re.S).findall(b'xyz'),
[b'xyz'], msg=pattern)
def test_bug_2537(self):
# issue 2537: empty submatches
for outer_op in ('{0,}', '*', '+', '{1,187}'):
for inner_op in ('{0,}', '*', '?'):
r = re.compile("^((x|y)%s)%s" % (inner_op, outer_op))
m = r.match("xyyzy")
self.assertEqual(m.group(0), "xyy")
self.assertEqual(m.group(1), "")
self.assertEqual(m.group(2), "y")
def run_re_tests():
from test.re_tests import tests, SUCCEED, FAIL, SYNTAX_ERROR
if verbose:
print('Running re_tests test suite')
else:
# To save time, only run the first and last 10 tests
#tests = tests[:10] + tests[-10:]
pass
for t in tests:
sys.stdout.flush()
pattern = s = outcome = repl = expected = None
if len(t) == 5:
pattern, s, outcome, repl, expected = t
elif len(t) == 3:
pattern, s, outcome = t
else:
raise ValueError('Test tuples should have 3 or 5 fields', t)
try:
obj = re.compile(pattern)
except re.error:
if outcome == SYNTAX_ERROR: pass # Expected a syntax error
else:
print('=== Syntax error:', t)
except KeyboardInterrupt: raise KeyboardInterrupt
except:
print('*** Unexpected error ***', t)
if verbose:
traceback.print_exc(file=sys.stdout)
else:
try:
result = obj.search(s)
except re.error as msg:
print('=== Unexpected exception', t, repr(msg))
if outcome == SYNTAX_ERROR:
# This should have been a syntax error; forget it.
pass
elif outcome == FAIL:
if result is None: pass # No match, as expected
else: print('=== Succeeded incorrectly', t)
elif outcome == SUCCEED:
if result is not None:
# Matched, as expected, so now we compute the
# result string and compare it to our expected result.
start, end = result.span(0)
vardict={'found': result.group(0),
'groups': result.group(),
'flags': result.re.flags}
for i in range(1, 100):
try:
gi = result.group(i)
# Special hack because else the string concat fails:
if gi is None:
gi = "None"
except IndexError:
gi = "Error"
vardict['g%d' % i] = gi
for i in result.re.groupindex.keys():
try:
gi = result.group(i)
if gi is None:
gi = "None"
except IndexError:
gi = "Error"
vardict[i] = gi
repl = eval(repl, vardict)
if repl != expected:
print('=== grouping error', t, end=' ')
print(repr(repl) + ' should be ' + repr(expected))
else:
print('=== Failed incorrectly', t)
# Try the match with both pattern and string converted to
# bytes, and check that it still succeeds.
try:
bpat = bytes(pattern, "ascii")
bs = bytes(s, "ascii")
except UnicodeEncodeError:
# skip non-ascii tests
pass
else:
try:
bpat = re.compile(bpat)
except Exception:
print('=== Fails on bytes pattern compile', t)
if verbose:
traceback.print_exc(file=sys.stdout)
else:
bytes_result = bpat.search(bs)
if bytes_result is None:
print('=== Fails on bytes pattern match', t)
# Try the match with the search area limited to the extent
# of the match and see if it still succeeds. \B will
# break (because it won't match at the end or start of a
# string), so we'll ignore patterns that feature it.
if pattern[:2] != '\\B' and pattern[-2:] != '\\B' \
and result is not None:
obj = re.compile(pattern)
result = obj.search(s, result.start(0), result.end(0) + 1)
if result is None:
print('=== Failed on range-limited match', t)
# Try the match with IGNORECASE enabled, and check that it
# still succeeds.
obj = re.compile(pattern, re.IGNORECASE)
result = obj.search(s)
if result is None:
print('=== Fails on case-insensitive match', t)
# Try the match with LOCALE enabled, and check that it
# still succeeds.
if '(?u)' not in pattern:
obj = re.compile(pattern, re.LOCALE)
result = obj.search(s)
if result is None:
print('=== Fails on locale-sensitive match', t)
# Try the match with UNICODE locale enabled, and check
# that it still succeeds.
obj = re.compile(pattern, re.UNICODE)
result = obj.search(s)
if result is None:
print('=== Fails on unicode-sensitive match', t)
def test_main():
run_unittest(ReTests)
run_re_tests()
if __name__ == "__main__":
test_main()
|
hTrap/junction
|
refs/heads/master
|
wsgi.py
|
9
|
"""
WSGI config for junction project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
from django.core.wsgi import get_wsgi_application # noqa
application = get_wsgi_application()
|
0k/OpenUpgrade
|
refs/heads/8.0
|
addons/hr_applicant_document/__openerp__.py
|
312
|
# -*- coding: utf-8 -*-
{
'name': 'Applicant Resumes and Letters',
'version': '1.0',
'category': 'Human Resources',
'sequence': 25,
'summary': 'Search job applications by Index content.',
'description': """This module allows you to search job applications by content
of resumes and letters.""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/recruitment',
'depends': [
'hr_recruitment',
'document'
],
'data': [
'views/hr_applicant.xml'
],
'demo': [
'demo/hr_applicant.xml'
],
'installable': True,
'auto_install': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
cculianu/bitcoin-abc
|
refs/heads/master
|
test/functional/wallet_keypool.py
|
1
|
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the wallet keypool and interaction with wallet encryption/locking."""
import time
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
class KeyPoolTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
nodes = self.nodes
addr_before_encrypting = nodes[0].getnewaddress()
addr_before_encrypting_data = nodes[
0].getaddressinfo(addr_before_encrypting)
wallet_info_old = nodes[0].getwalletinfo()
assert addr_before_encrypting_data[
'hdseedid'] == wallet_info_old['hdseedid']
# Encrypt wallet and wait to terminate
nodes[0].encryptwallet('test')
# Keep creating keys
addr = nodes[0].getnewaddress()
addr_data = nodes[0].getaddressinfo(addr)
wallet_info = nodes[0].getwalletinfo()
assert addr_before_encrypting_data[
'hdseedid'] != wallet_info['hdseedid']
assert addr_data['hdseedid'] == wallet_info['hdseedid']
assert_raises_rpc_error(
-12, "Error: Keypool ran out, please call keypoolrefill first", nodes[0].getnewaddress)
# put six (plus 2) new keys in the keypool (100% external-, +100%
# internal-keys, 1 in min)
nodes[0].walletpassphrase('test', 12000)
nodes[0].keypoolrefill(6)
nodes[0].walletlock()
wi = nodes[0].getwalletinfo()
assert_equal(wi['keypoolsize_hd_internal'], 6)
assert_equal(wi['keypoolsize'], 6)
# drain the internal keys
nodes[0].getrawchangeaddress()
nodes[0].getrawchangeaddress()
nodes[0].getrawchangeaddress()
nodes[0].getrawchangeaddress()
nodes[0].getrawchangeaddress()
nodes[0].getrawchangeaddress()
addr = set()
# the next one should fail
assert_raises_rpc_error(-12, "Keypool ran out",
nodes[0].getrawchangeaddress)
# drain the external keys
addr.add(nodes[0].getnewaddress())
addr.add(nodes[0].getnewaddress())
addr.add(nodes[0].getnewaddress())
addr.add(nodes[0].getnewaddress())
addr.add(nodes[0].getnewaddress())
addr.add(nodes[0].getnewaddress())
assert len(addr) == 6
# the next one should fail
assert_raises_rpc_error(
-12, "Error: Keypool ran out, please call keypoolrefill first", nodes[0].getnewaddress)
# refill keypool with three new addresses
nodes[0].walletpassphrase('test', 1)
nodes[0].keypoolrefill(3)
# test walletpassphrase timeout
time.sleep(1.1)
assert_equal(nodes[0].getwalletinfo()["unlocked_until"], 0)
# drain the keypool
for _ in range(3):
nodes[0].getnewaddress()
assert_raises_rpc_error(-12, "Keypool ran out", nodes[0].getnewaddress)
nodes[0].walletpassphrase('test', 100)
nodes[0].keypoolrefill(100)
wi = nodes[0].getwalletinfo()
assert_equal(wi['keypoolsize_hd_internal'], 100)
assert_equal(wi['keypoolsize'], 100)
if __name__ == '__main__':
KeyPoolTest().main()
|
Arno-Nymous/pyload
|
refs/heads/stable
|
module/plugins/crypter/RSLayerCom.py
|
7
|
# -*- coding: utf-8 -*-
from ..internal.DeadCrypter import DeadCrypter
class RSLayerCom(DeadCrypter):
__name__ = "RSLayerCom"
__type__ = "crypter"
__version__ = "0.26"
__status__ = "stable"
__pattern__ = r'http://(?:www\.)?rs-layer\.com/directory-'
__config__ = [("activated", "bool", "Activated", True)]
__description__ = """RS-Layer.com decrypter plugin"""
__license__ = "GPLv3"
__authors__ = [("hzpz", None)]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.