repo_name
stringlengths
5
100
path
stringlengths
4
294
copies
stringclasses
990 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
kracwarlock/neon
neon/metrics/tests/test_roc.py
13
3158
#!/usr/bin/env python # ---------------------------------------------------------------------------- # Copyright 2014 Nervana Systems Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ---------------------------------------------------------------------------- from neon.backends.cpu import CPUTensor from neon.metrics.roc import AUC class TestROC(object): def test_auc_add_binary(self): auc = AUC() assert auc.num_pos == 0 assert auc.num_neg == 0 refs = CPUTensor([[0, 1, 0, 0]]) preds = CPUTensor([[1, 1, 0, 1]]) auc.add(refs, preds) assert auc.num_pos == 1 assert auc.num_neg == 3 assert len(auc.probs) == 4 assert len(auc.labels) == 4 def test_auc_add_mixed(self): auc = AUC() assert auc.num_pos == 0 assert auc.num_neg == 0 refs = CPUTensor([[0, 1, 0]]) preds = CPUTensor([[0.00, 1, 0], [0.09, 0.0, 0.75], [0.01, 0, 0.15], [0.90, 0, 0.10]]) auc.add(refs, preds) assert auc.num_pos == 1 assert auc.num_neg == 2 assert len(auc.probs) == 3 assert len(auc.labels) == 3 def test_auc_add_probs(self): auc = AUC() assert auc.num_pos == 0 assert auc.num_neg == 0 refs = CPUTensor([[0.03, 0.80, 0.01], [0.20, 0.02, 0.80], [0.31, 0.08, 0.01], [0.46, 0.10, 0.03]]) preds = CPUTensor([[0.00, 1, 0], [0.09, 0.0, 0.75], [0.01, 0, 0.15], [0.90, 0, 0.10]]) auc.add(refs, preds) assert auc.num_pos == 1 assert auc.num_neg == 2 assert len(auc.probs) == 3 assert len(auc.labels) == 3 def test_auc_unique_ranks(self): auc = AUC() assert auc.get_ranks([0.1, 0.8, 0.4, 0.5]) == [1.0, 4.0, 2.0, 3.0] def test_auc_tied_ranks(self): auc = AUC() assert auc.get_ranks([0.1, 0.8, 0.4, 0.5, 0.4]) == [1.0, 5.0, 2.5, 4.0, 2.5] def test_auc_report_binary(self): auc = AUC() refs = CPUTensor([[0, 1, 0, 0]]) preds = CPUTensor([[1, 1, 0, 1]]) auc.add(refs, preds) assert auc.report() == (2.0 / 3.0) def test_auc_report_probs(self): auc = AUC() refs = CPUTensor([[0, 0, 1, 1]]) preds = CPUTensor([[0.1, 0.4, 0.35, 0.8]]) auc.add(refs, preds) assert auc.report() == .75
apache-2.0
xxd3vin/spp-sdk
opt/Python27/Lib/site-packages/numpy/distutils/tests/test_npy_pkg_config.py
37
3003
import os from tempfile import mkstemp from numpy.testing import * from numpy.distutils.npy_pkg_config import read_config, parse_flags simple = """\ [meta] Name = foo Description = foo lib Version = 0.1 [default] cflags = -I/usr/include libs = -L/usr/lib """ simple_d = {'cflags': '-I/usr/include', 'libflags': '-L/usr/lib', 'version': '0.1', 'name': 'foo'} simple_variable = """\ [meta] Name = foo Description = foo lib Version = 0.1 [variables] prefix = /foo/bar libdir = ${prefix}/lib includedir = ${prefix}/include [default] cflags = -I${includedir} libs = -L${libdir} """ simple_variable_d = {'cflags': '-I/foo/bar/include', 'libflags': '-L/foo/bar/lib', 'version': '0.1', 'name': 'foo'} class TestLibraryInfo(TestCase): def test_simple(self): fd, filename = mkstemp('foo.ini') try: pkg = os.path.splitext(filename)[0] try: os.write(fd, simple.encode('ascii')) finally: os.close(fd) out = read_config(pkg) self.assertTrue(out.cflags() == simple_d['cflags']) self.assertTrue(out.libs() == simple_d['libflags']) self.assertTrue(out.name == simple_d['name']) self.assertTrue(out.version == simple_d['version']) finally: os.remove(filename) def test_simple_variable(self): fd, filename = mkstemp('foo.ini') try: pkg = os.path.splitext(filename)[0] try: os.write(fd, simple_variable.encode('ascii')) finally: os.close(fd) out = read_config(pkg) self.assertTrue(out.cflags() == simple_variable_d['cflags']) self.assertTrue(out.libs() == simple_variable_d['libflags']) self.assertTrue(out.name == simple_variable_d['name']) self.assertTrue(out.version == simple_variable_d['version']) out.vars['prefix'] = '/Users/david' self.assertTrue(out.cflags() == '-I/Users/david/include') finally: os.remove(filename) class TestParseFlags(TestCase): def test_simple_cflags(self): d = parse_flags("-I/usr/include") self.assertTrue(d['include_dirs'] == ['/usr/include']) d = parse_flags("-I/usr/include -DFOO") self.assertTrue(d['include_dirs'] == ['/usr/include']) self.assertTrue(d['macros'] == ['FOO']) d = parse_flags("-I /usr/include -DFOO") self.assertTrue(d['include_dirs'] == ['/usr/include']) self.assertTrue(d['macros'] == ['FOO']) def test_simple_lflags(self): d = parse_flags("-L/usr/lib -lfoo -L/usr/lib -lbar") self.assertTrue(d['library_dirs'] == ['/usr/lib', '/usr/lib']) self.assertTrue(d['libraries'] == ['foo', 'bar']) d = parse_flags("-L /usr/lib -lfoo -L/usr/lib -lbar") self.assertTrue(d['library_dirs'] == ['/usr/lib', '/usr/lib']) self.assertTrue(d['libraries'] == ['foo', 'bar'])
mit
zyzyis/monetdb
clients/examples/python/sqlsample.py
1
1132
#!/usr/bin/env python # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. # # Copyright 2008-2015 MonetDB B.V. import monetdb.sql import sys dbh = monetdb.sql.Connection(port=int(sys.argv[1]),database=sys.argv[2],hostname=sys.argv[3],autocommit=True) cursor = dbh.cursor(); cursor.execute('select 1;') print(cursor.fetchall()) cursor = dbh.cursor(); cursor.execute('select 2;') print(cursor.fetchone()) # deliberately executing a wrong SQL statement: try: cursor.execute('( xyz 1);') except monetdb.sql.OperationalError as e: print(e) cursor.execute('create table python_table (i smallint,s string);'); cursor.execute('insert into python_table values ( 3, \'three\');'); cursor.execute('insert into python_table values ( 7, \'seven\');'); cursor.execute('select * from python_table;'); print(cursor.fetchall()) s = ((0, 'row1'), (1, 'row2')) x = cursor.executemany("insert into python_table VALUES (%s, %s);", s) print(x); cursor.execute('drop table python_table;');
mpl-2.0
enatheme/ducking-robot
main.py
1
3845
import os, sys, re nbLine = 0 nbFolder = 0 nbCfile = 0 target_folder = "" header_folder = "" #get arguments def read_args(argv): #var number_args = len(argv) - 1 ret = 0 #we have arguments if (number_args > 0): ret = 1 target_folder = argv[1] print("Target folder = %s" % (target_folder)) if(number_args > 1): header_folder = argv[2] print("Header folder = %s" % (header_folder)) return(ret) #read_config function def read_config(): #var global header_folder global target_folder re_header_folder = re.compile("^header_folder = ") re_target_folder = re.compile("^target_folder = ") #we try to open the config file try: config_file = open(".ducking-robot.cfg", "r") for line in config_file: if re_header_folder.match(line): temp = line.split("= ")[1] header_folder = temp[:len(temp) - 1] print("Header folder = %s" % (header_folder)) if re_target_folder.match(line): temp = line.split("= ")[1] target_folder = temp[:len(temp) - 1] print("Target folder = %s" % (target_folder)) return(1) config_file.close() except (IOError, OSError) as e: print("Error : %s" % (e)) return(0) #main class, scan folders and use parsingCodeFile def main(folder): #var global nbCfile global nbFolder cmpt=1 cmd = os.popen("ls -F " + folder) cmd = cmd.readlines() iterator = 0 #add here your extension listExtension = ["\.c", "\.c\*"] reArray = [] #re for tmp in listExtension: reArray.append(re.compile("([^ ]+)" + tmp + "$")) reFolder = re.compile("([^ ]+)/$") #recuperation of all files #file checking for line in cmd: iterator = 0 while (iterator < len(reArray)): if (reArray[iterator].match(line)): #call parsing of code file if (iterator == 0): parsingCfile(folder + line[:len(line) - 1]) if (iterator == 1): parsingCfile(folder + line[:len(line) - 2]) nbCfile += 1 iterator += 1 #folder detected, recursivity if (reFolder.match(line)): main(folder + line[:len(line) - 2] + '/') nbFolder += 1 #parsing for C file def parsingCfile (nameFile): global nbLine #var fEntry = open(nameFile, 'r') name_exit = (nameFile[:len(nameFile) - 2] + ".h") #if we have a header folder: if(header_folder != ""): temp_name_exit = name_exit.split("/") name_exit = header_folder + temp_name_exit[len(temp_name_exit) - 1] fExit = open(name_exit, 'w') listFunction= ["int", "void", "char", "double"] isIn = 0 lineTemp = "" listTemp = [] declaration = [] reArray = [] iterator2 = 0 #re for tmp in listFunction: reArray.append(re.compile("^" + tmp)) reInclude = re.compile("^#include+[(^ )]") #parsing of the file for line in fEntry: iterator = 0 nbLine += 1 #detection of include if (reInclude.match(line)): declaration += line #if a reArray is ok if (isIn == 1): #detection of function if ("{" in line): for car in lineTemp: listTemp.append(car) listTemp[len(listTemp) - 1] = ';' declaration += "".join(listTemp) + '\n' listTemp = [] isIn = 0 #if a reArray is not ok if (isIn == 0): iterator2 = 0 #test of reArray while (iterator < len(listFunction)): while (iterator2 < len(reArray)): if (reArray[iterator2].match(line)): isIn = 1 iterator2 += 1 iterator += 1 lineTemp = line for line in declaration: fExit.write(line) fEntry.close() fExit.close() got_config = 0 if(read_config() == 0): print("No config file found, read arguments") #we don't have a config file, so we read argument line if(read_args(sys.argv) == 0): #no args line, print the error message print("Usage: python main.py target_folder [header_folder]") else: got_config = 1 else: got_config = 1 if(got_config == 1): main(target_folder) print ("%s folder(s), %s C file and %s line(s)" % (nbFolder,nbCfile,nbLine))
gpl-2.0
Geoion/pili-sdk-python
pili/auth.py
3
2016
""" Auth provide class Auth for authentication account. You can use decorator auth_interface to create a function with auto generated authentication. """ import pili.conf as conf from urlparse import urlparse from .utils import send_and_decode, __hmac_sha1__ class Credentials(object): def __init__(self, access_key, secret_key): if not (access_key and secret_key): raise ValueError('invalid key') self.__auth__ = Auth(access_key, secret_key) class Auth(object): """ class Auth store the access_key and secret_key for authentication. """ def __init__(self, access_key, secret_key): if not (access_key and secret_key): raise ValueError('invalid key') self.__access_key, self.__secret_key = access_key, secret_key def auth_interface_str(self, raw_str): """ generate sign str. """ encoded = __hmac_sha1__(raw_str, self.__secret_key) return 'Qiniu {0}:{1}'.format(self.__access_key, encoded) def auth_interface(method): """ decorator takes func(**args) return req and change it to func(auth, **args) return json result. Args: func(**args) -> Request Returns: func(**args) -> dict (decoded json) """ def authed(auth, **args): """ send request and decode response. Return the result in python format. """ req = method(**args) parsed = urlparse(req.get_full_url()) raw_str = '%s %s' % (req.get_method(), parsed.path) if parsed.query: raw_str += '?%s' % (parsed.query) raw_str += '\nHost: %s' % (parsed.netloc) if req.has_data(): raw_str += '\nContent-Type: application/json' raw_str+="\n\n" if req.has_data(): raw_str += req.get_data() req.add_header('Content-Type', 'application/json') req.add_header('Authorization', auth.auth_interface_str(raw_str)) return send_and_decode(req) return authed
mit
UIKit0/marsyas
scripts/Python/batch.py
7
1475
import os from glob import glob inputDirectory = "../../../../Databases/taslp/"; outputDirectory = "../../../output3 "; testCommand = " "; #testCommand = " -q 1 "; beginCommand = "../../bin/release/peakClustering "; beginCommand = "..\\..\\bin\\release\\peakClustering.exe "; endCommand = " -P -f -S 0 -r -k 2 -c 3 -N music -i 250_2500 -o "+outputDirectory; execStyle=[ #hwps "-T 1 -s 20 -t hoabfb ", "-T 10 -s 20 -t hoabfb ", "-T 1 -s 20 -t hoabfb -u ", "-T 10 -s 20 -t hoabfb -u ", #virtanen "-T 1 -s 20 -t voabfb ", "-T 10 -s 20 -t voabfb ", "-T 1 -s 20 -t voabfb -u ", "-T 10 -s 20 -t voabfb -u ", #srinivasan criterion "-T 1 -s 20 -t soabfb ", "-T 10 -s 20 -t soabfb ", "-T 1 -s 20 -t soabfb -u ", "-T 10 -s 20 -t soabfb -u ", # amplitude only "-T 1 -s 20 -t abfb ", "-T 1 -s 20 -t abfb -u ", # harmonicity only "-T 1 -s 20 -t ho ", "-T 1 -s 20 -t ho -u ", "-T 1 -s 20 -t vo ", "-T 1 -s 20 -t vo -u ", "-T 1 -s 20 -t so ", "-T 1 -s 20 -t so -u ", # srinivasan algo " -s 1024 -npp -u -T 1 -t soabfb ", "-s 1024 -npp -u -T 10 -t soabfb "]; for style in execStyle: for name in glob(inputDirectory+"*V*.wav"): command = beginCommand+style+testCommand+endCommand+name print command os.system(command)
gpl-2.0
hfeeki/transifex
transifex/txcommon/management/commands/txmakemessages.py
3
3273
import os import glob from django.core.management.base import CommandError, BaseCommand from optparse import make_option from django.core.management.commands.makemessages import (make_messages, handle_extensions) class Command(BaseCommand): option_list = BaseCommand.option_list + ( make_option('--locale', '-l', default=None, dest='locale', help='Creates or updates the message files only for the given locale (e.g. pt_BR).'), make_option('--domain', '-d', default='django', dest='domain', help='The domain of the message files (default: "django").'), make_option('--all', '-a', action='store_true', dest='all', default=False, help='Reexamines all source code and templates for new translation strings and updates all message files for all available languages.'), make_option('--extension', '-e', dest='extensions', help='The file extension(s) to examine (default: ".html", separate multiple extensions with commas, or use -e multiple times)', action='append'), ) help = "Runs over the entire source tree of the current directory and pulls out all strings marked for translation. It creates (or updates) a message file in the 'locale' directory of Transifex." requires_model_validation = False can_import_settings = False def handle(self, *args, **options): if len(args) != 0: raise CommandError("Command doesn't accept any arguments") locale = options.get('locale') domain = options.get('domain') verbosity = int(options.get('verbosity')) process_all = options.get('all') extensions = options.get('extensions') or ['html'] if domain == 'djangojs': extensions = [] else: extensions = handle_extensions(extensions) if '.js' in extensions: raise CommandError("JavaScript files should be examined by using the special 'djangojs' domain only.") # The hacking part is here if process_all: if os.path.isdir(os.path.join('conf', 'locale')): localedir = os.path.abspath(os.path.join('conf', 'locale')) elif os.path.isdir('locale'): localedir = os.path.abspath('locale') else: raise CommandError("This script should be run from the Transifex project tree.") # Only for directories under the locale dir, make_messages locale_dirs = filter(os.path.isdir, glob.glob('%s/*' % localedir)) for locale_dir in locale_dirs: locale = os.path.basename(locale_dir) make_messages(locale, domain, verbosity, False, extensions) else: make_messages(locale, domain, verbosity, process_all, extensions) # Backwards compatibility # http://code.djangoproject.com/changeset/9110 if not [opt for opt in Command.option_list if opt.dest=='verbosity']: Command.option_list += ( make_option('--verbosity', '-v', action="store", dest="verbosity", default='1', type='choice', choices=['0', '1', '2'], help="Verbosity level; 0=minimal output, 1=normal output, 2=all output"), )
gpl-2.0
5t111111/markdown-preview.vim
markdownpreview_lib/markdown/inlinepatterns.py
46
16802
""" INLINE PATTERNS ============================================================================= Inline patterns such as *emphasis* are handled by means of auxiliary objects, one per pattern. Pattern objects must be instances of classes that extend markdown.Pattern. Each pattern object uses a single regular expression and needs support the following methods: pattern.getCompiledRegExp() # returns a regular expression pattern.handleMatch(m) # takes a match object and returns # an ElementTree element or just plain text All of python markdown's built-in patterns subclass from Pattern, but you can add additional patterns that don't. Also note that all the regular expressions used by inline must capture the whole block. For this reason, they all start with '^(.*)' and end with '(.*)!'. In case with built-in expression Pattern takes care of adding the "^(.*)" and "(.*)!". Finally, the order in which regular expressions are applied is very important - e.g. if we first replace http://.../ links with <a> tags and _then_ try to replace inline html, we would end up with a mess. So, we apply the expressions in the following order: * escape and backticks have to go before everything else, so that we can preempt any markdown patterns by escaping them. * then we handle auto-links (must be done before inline html) * then we handle inline HTML. At this point we will simply replace all inline HTML strings with a placeholder and add the actual HTML to a hash. * then inline images (must be done before links) * then bracketed links, first regular then reference-style * finally we apply strong and emphasis """ from __future__ import absolute_import from __future__ import unicode_literals from . import util from . import odict import re try: from urllib.parse import urlparse, urlunparse except ImportError: from urlparse import urlparse, urlunparse try: from html import entities except ImportError: import htmlentitydefs as entities def build_inlinepatterns(md_instance, **kwargs): """ Build the default set of inline patterns for Markdown. """ inlinePatterns = odict.OrderedDict() inlinePatterns["backtick"] = BacktickPattern(BACKTICK_RE) inlinePatterns["escape"] = EscapePattern(ESCAPE_RE, md_instance) inlinePatterns["reference"] = ReferencePattern(REFERENCE_RE, md_instance) inlinePatterns["link"] = LinkPattern(LINK_RE, md_instance) inlinePatterns["image_link"] = ImagePattern(IMAGE_LINK_RE, md_instance) inlinePatterns["image_reference"] = \ ImageReferencePattern(IMAGE_REFERENCE_RE, md_instance) inlinePatterns["short_reference"] = \ ReferencePattern(SHORT_REF_RE, md_instance) inlinePatterns["autolink"] = AutolinkPattern(AUTOLINK_RE, md_instance) inlinePatterns["automail"] = AutomailPattern(AUTOMAIL_RE, md_instance) inlinePatterns["linebreak"] = SubstituteTagPattern(LINE_BREAK_RE, 'br') if md_instance.safeMode != 'escape': inlinePatterns["html"] = HtmlPattern(HTML_RE, md_instance) inlinePatterns["entity"] = HtmlPattern(ENTITY_RE, md_instance) inlinePatterns["not_strong"] = SimpleTextPattern(NOT_STRONG_RE) inlinePatterns["strong_em"] = DoubleTagPattern(STRONG_EM_RE, 'strong,em') inlinePatterns["strong"] = SimpleTagPattern(STRONG_RE, 'strong') inlinePatterns["emphasis"] = SimpleTagPattern(EMPHASIS_RE, 'em') if md_instance.smart_emphasis: inlinePatterns["emphasis2"] = SimpleTagPattern(SMART_EMPHASIS_RE, 'em') else: inlinePatterns["emphasis2"] = SimpleTagPattern(EMPHASIS_2_RE, 'em') return inlinePatterns """ The actual regular expressions for patterns ----------------------------------------------------------------------------- """ NOBRACKET = r'[^\]\[]*' BRK = ( r'\[(' + (NOBRACKET + r'(\[')*6 + (NOBRACKET+ r'\])*')*6 + NOBRACKET + r')\]' ) NOIMG = r'(?<!\!)' BACKTICK_RE = r'(?<!\\)(`+)(.+?)(?<!`)\2(?!`)' # `e=f()` or ``e=f("`")`` ESCAPE_RE = r'\\(.)' # \< EMPHASIS_RE = r'(\*)([^\*]+)\2' # *emphasis* STRONG_RE = r'(\*{2}|_{2})(.+?)\2' # **strong** STRONG_EM_RE = r'(\*{3}|_{3})(.+?)\2' # ***strong*** SMART_EMPHASIS_RE = r'(?<!\w)(_)(?!_)(.+?)(?<!_)\2(?!\w)' # _smart_emphasis_ EMPHASIS_2_RE = r'(_)(.+?)\2' # _emphasis_ LINK_RE = NOIMG + BRK + \ r'''\(\s*(<.*?>|((?:(?:\(.*?\))|[^\(\)]))*?)\s*((['"])(.*?)\12\s*)?\)''' # [text](url) or [text](<url>) or [text](url "title") IMAGE_LINK_RE = r'\!' + BRK + r'\s*\((<.*?>|([^\)]*))\)' # ![alttxt](http://x.com/) or ![alttxt](<http://x.com/>) REFERENCE_RE = NOIMG + BRK+ r'\s?\[([^\]]*)\]' # [Google][3] SHORT_REF_RE = NOIMG + r'\[([^\]]+)\]' # [Google] IMAGE_REFERENCE_RE = r'\!' + BRK + '\s?\[([^\]]*)\]' # ![alt text][2] NOT_STRONG_RE = r'((^| )(\*|_)( |$))' # stand-alone * or _ AUTOLINK_RE = r'<((?:[Ff]|[Hh][Tt])[Tt][Pp][Ss]?://[^>]*)>' # <http://www.123.com> AUTOMAIL_RE = r'<([^> \!]*@[^> ]*)>' # <me@example.com> HTML_RE = r'(\<([a-zA-Z/][^\>]*?|\!--.*?--)\>)' # <...> ENTITY_RE = r'(&[\#a-zA-Z0-9]*;)' # &amp; LINE_BREAK_RE = r' \n' # two spaces at end of line def dequote(string): """Remove quotes from around a string.""" if ( ( string.startswith('"') and string.endswith('"')) or (string.startswith("'") and string.endswith("'")) ): return string[1:-1] else: return string ATTR_RE = re.compile("\{@([^\}]*)=([^\}]*)}") # {@id=123} def handleAttributes(text, parent): """Set values of an element based on attribute definitions ({@id=123}).""" def attributeCallback(match): parent.set(match.group(1), match.group(2).replace('\n', ' ')) return ATTR_RE.sub(attributeCallback, text) """ The pattern classes ----------------------------------------------------------------------------- """ class Pattern(object): """Base class that inline patterns subclass. """ def __init__(self, pattern, markdown_instance=None): """ Create an instant of an inline pattern. Keyword arguments: * pattern: A regular expression that matches a pattern """ self.pattern = pattern self.compiled_re = re.compile("^(.*?)%s(.*?)$" % pattern, re.DOTALL | re.UNICODE) # Api for Markdown to pass safe_mode into instance self.safe_mode = False if markdown_instance: self.markdown = markdown_instance def getCompiledRegExp(self): """ Return a compiled regular expression. """ return self.compiled_re def handleMatch(self, m): """Return a ElementTree element from the given match. Subclasses should override this method. Keyword arguments: * m: A re match object containing a match of the pattern. """ pass def type(self): """ Return class name, to define pattern type """ return self.__class__.__name__ def unescape(self, text): """ Return unescaped text given text with an inline placeholder. """ try: stash = self.markdown.treeprocessors['inline'].stashed_nodes except KeyError: return text def itertext(el): ' Reimplement Element.itertext for older python versions ' tag = el.tag if not isinstance(tag, util.string_type) and tag is not None: return if el.text: yield el.text for e in el: for s in itertext(e): yield s if e.tail: yield e.tail def get_stash(m): id = m.group(1) if id in stash: value = stash.get(id) if isinstance(value, util.string_type): return value else: # An etree Element - return text content only return ''.join(itertext(value)) return util.INLINE_PLACEHOLDER_RE.sub(get_stash, text) class SimpleTextPattern(Pattern): """ Return a simple text of group(2) of a Pattern. """ def handleMatch(self, m): text = m.group(2) if text == util.INLINE_PLACEHOLDER_PREFIX: return None return text class EscapePattern(Pattern): """ Return an escaped character. """ def handleMatch(self, m): char = m.group(2) if char in self.markdown.ESCAPED_CHARS: return '%s%s%s' % (util.STX, ord(char), util.ETX) else: return '\\%s' % char class SimpleTagPattern(Pattern): """ Return element of type `tag` with a text attribute of group(3) of a Pattern. """ def __init__ (self, pattern, tag): Pattern.__init__(self, pattern) self.tag = tag def handleMatch(self, m): el = util.etree.Element(self.tag) el.text = m.group(3) return el class SubstituteTagPattern(SimpleTagPattern): """ Return an element of type `tag` with no children. """ def handleMatch (self, m): return util.etree.Element(self.tag) class BacktickPattern(Pattern): """ Return a `<code>` element containing the matching text. """ def __init__ (self, pattern): Pattern.__init__(self, pattern) self.tag = "code" def handleMatch(self, m): el = util.etree.Element(self.tag) el.text = util.AtomicString(m.group(3).strip()) return el class DoubleTagPattern(SimpleTagPattern): """Return a ElementTree element nested in tag2 nested in tag1. Useful for strong emphasis etc. """ def handleMatch(self, m): tag1, tag2 = self.tag.split(",") el1 = util.etree.Element(tag1) el2 = util.etree.SubElement(el1, tag2) el2.text = m.group(3) return el1 class HtmlPattern(Pattern): """ Store raw inline html and return a placeholder. """ def handleMatch (self, m): rawhtml = self.unescape(m.group(2)) place_holder = self.markdown.htmlStash.store(rawhtml) return place_holder def unescape(self, text): """ Return unescaped text given text with an inline placeholder. """ try: stash = self.markdown.treeprocessors['inline'].stashed_nodes except KeyError: return text def get_stash(m): id = m.group(1) value = stash.get(id) if value is not None: try: return self.markdown.serializer(value) except: return '\%s' % value return util.INLINE_PLACEHOLDER_RE.sub(get_stash, text) class LinkPattern(Pattern): """ Return a link element from the given match. """ def handleMatch(self, m): el = util.etree.Element("a") el.text = m.group(2) title = m.group(13) href = m.group(9) if href: if href[0] == "<": href = href[1:-1] el.set("href", self.sanitize_url(self.unescape(href.strip()))) else: el.set("href", "") if title: title = dequote(self.unescape(title)) el.set("title", title) return el def sanitize_url(self, url): """ Sanitize a url against xss attacks in "safe_mode". Rather than specifically blacklisting `javascript:alert("XSS")` and all its aliases (see <http://ha.ckers.org/xss.html>), we whitelist known safe url formats. Most urls contain a network location, however some are known not to (i.e.: mailto links). Script urls do not contain a location. Additionally, for `javascript:...`, the scheme would be "javascript" but some aliases will appear to `urlparse()` to have no scheme. On top of that relative links (i.e.: "foo/bar.html") have no scheme. Therefore we must check "path", "parameters", "query" and "fragment" for any literal colons. We don't check "scheme" for colons because it *should* never have any and "netloc" must allow the form: `username:password@host:port`. """ url = url.replace(' ', '%20') if not self.markdown.safeMode: # Return immediately bipassing parsing. return url try: scheme, netloc, path, params, query, fragment = url = urlparse(url) except ValueError: # Bad url - so bad it couldn't be parsed. return '' locless_schemes = ['', 'mailto', 'news'] allowed_schemes = locless_schemes + ['http', 'https', 'ftp', 'ftps'] if scheme not in allowed_schemes: # Not a known (allowed) scheme. Not safe. return '' if netloc == '' and scheme not in locless_schemes: # This should not happen. Treat as suspect. return '' for part in url[2:]: if ":" in part: # A colon in "path", "parameters", "query" or "fragment" is suspect. return '' # Url passes all tests. Return url as-is. return urlunparse(url) class ImagePattern(LinkPattern): """ Return a img element from the given match. """ def handleMatch(self, m): el = util.etree.Element("img") src_parts = m.group(9).split() if src_parts: src = src_parts[0] if src[0] == "<" and src[-1] == ">": src = src[1:-1] el.set('src', self.sanitize_url(self.unescape(src))) else: el.set('src', "") if len(src_parts) > 1: el.set('title', dequote(self.unescape(" ".join(src_parts[1:])))) if self.markdown.enable_attributes: truealt = handleAttributes(m.group(2), el) else: truealt = m.group(2) el.set('alt', self.unescape(truealt)) return el class ReferencePattern(LinkPattern): """ Match to a stored reference and return link element. """ NEWLINE_CLEANUP_RE = re.compile(r'[ ]?\n', re.MULTILINE) def handleMatch(self, m): try: id = m.group(9).lower() except IndexError: id = None if not id: # if we got something like "[Google][]" or "[Goggle]" # we'll use "google" as the id id = m.group(2).lower() # Clean up linebreaks in id id = self.NEWLINE_CLEANUP_RE.sub(' ', id) if not id in self.markdown.references: # ignore undefined refs return None href, title = self.markdown.references[id] text = m.group(2) return self.makeTag(href, title, text) def makeTag(self, href, title, text): el = util.etree.Element('a') el.set('href', self.sanitize_url(href)) if title: el.set('title', title) el.text = text return el class ImageReferencePattern(ReferencePattern): """ Match to a stored reference and return img element. """ def makeTag(self, href, title, text): el = util.etree.Element("img") el.set("src", self.sanitize_url(href)) if title: el.set("title", title) if self.markdown.enable_attributes: text = handleAttributes(text, el) el.set("alt", self.unescape(text)) return el class AutolinkPattern(Pattern): """ Return a link Element given an autolink (`<http://example/com>`). """ def handleMatch(self, m): el = util.etree.Element("a") el.set('href', self.unescape(m.group(2))) el.text = util.AtomicString(m.group(2)) return el class AutomailPattern(Pattern): """ Return a mailto link Element given an automail link (`<foo@example.com>`). """ def handleMatch(self, m): el = util.etree.Element('a') email = self.unescape(m.group(2)) if email.startswith("mailto:"): email = email[len("mailto:"):] def codepoint2name(code): """Return entity definition by code, or the code if not defined.""" entity = entities.codepoint2name.get(code) if entity: return "%s%s;" % (util.AMP_SUBSTITUTE, entity) else: return "%s#%d;" % (util.AMP_SUBSTITUTE, code) letters = [codepoint2name(ord(letter)) for letter in email] el.text = util.AtomicString(''.join(letters)) mailto = "mailto:" + email mailto = "".join([util.AMP_SUBSTITUTE + '#%d;' % ord(letter) for letter in mailto]) el.set('href', mailto) return el
lgpl-2.1
leeseulstack/openstack
neutron/debug/commands.py
4
5254
# Copyright 2012, Nachi Ueno, NTT MCL, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cliff import lister from neutronclient.common import utils from neutronclient.neutron import v2_0 as client from neutronclient.neutron.v2_0 import port from neutron.i18n import _LI from neutron.openstack.common import log as logging class ProbeCommand(client.NeutronCommand): log = logging.getLogger(__name__ + '.ProbeCommand') def get_debug_agent(self): return self.app.debug_agent def run(self, parsed_args): self.log.debug('run(%s)', parsed_args) self.log.info(_('Unimplemented commands')) class CreateProbe(ProbeCommand): """Create probe port and interface, then plug it in.""" log = logging.getLogger(__name__ + '.CreateProbe') def get_parser(self, prog_name): parser = super(CreateProbe, self).get_parser(prog_name) parser.add_argument( 'id', metavar='network_id', help=_('ID of network to probe')) parser.add_argument( '--device-owner', default='network', choices=['network', 'compute'], help=_('Owner type of the device: network/compute')) return parser def run(self, parsed_args): self.log.debug('run(%s)' % parsed_args) debug_agent = self.get_debug_agent() probe_port = debug_agent.create_probe(parsed_args.id, parsed_args.device_owner) self.log.info(_('Probe created : %s '), probe_port.id) class DeleteProbe(ProbeCommand): """Delete probe - delete port then uplug.""" log = logging.getLogger(__name__ + '.DeleteProbe') def get_parser(self, prog_name): parser = super(DeleteProbe, self).get_parser(prog_name) parser.add_argument( 'id', metavar='port_id', help=_('ID of probe port to delete')) return parser def run(self, parsed_args): self.log.debug('run(%s)' % parsed_args) debug_agent = self.get_debug_agent() debug_agent.delete_probe(parsed_args.id) self.log.info(_('Probe %s deleted'), parsed_args.id) class ListProbe(client.NeutronCommand, lister.Lister): """List probes.""" log = logging.getLogger(__name__ + '.ListProbe') _formatters = {'fixed_ips': port._format_fixed_ips, } def get_debug_agent(self): return self.app.debug_agent def get_data(self, parsed_args): debug_agent = self.get_debug_agent() info = debug_agent.list_probes() columns = sorted(info[0].keys()) if info else [] return (columns, (utils.get_item_properties( s, columns, formatters=self._formatters, ) for s in info), ) class ClearProbe(ProbeCommand): """Clear All probes.""" log = logging.getLogger(__name__ + '.ClearProbe') def run(self, parsed_args): self.log.debug('run(%s)' % parsed_args) debug_agent = self.get_debug_agent() cleared_probes_count = debug_agent.clear_probes() self.log.info(_LI('%d probe(s) deleted') % cleared_probes_count) class ExecProbe(ProbeCommand): """Exec commands on the namespace of the probe.""" log = logging.getLogger(__name__ + '.ExecProbe') def get_parser(self, prog_name): parser = super(ExecProbe, self).get_parser(prog_name) parser.add_argument( 'id', metavar='port_id', help=_('ID of probe port to execute command')) parser.add_argument( 'command', metavar='command', nargs='?', default=None, help=_('Command to execute')) return parser def run(self, parsed_args): self.log.debug('run(%s)' % parsed_args) debug_agent = self.get_debug_agent() result = debug_agent.exec_command(parsed_args.id, parsed_args.command) self.app.stdout.write(result + '\n') class PingAll(ProbeCommand): """Ping all fixed_ip.""" log = logging.getLogger(__name__ + '.ExecProbe') def get_parser(self, prog_name): parser = super(PingAll, self).get_parser(prog_name) parser.add_argument( '--timeout', metavar='<timeout>', default=10, help=_('Ping timeout')) parser.add_argument( '--id', metavar='network_id', default=None, help=_('ID of network')) return parser def run(self, parsed_args): self.log.debug('run(%s)' % parsed_args) debug_agent = self.get_debug_agent() result = debug_agent.ping_all(parsed_args.id, timeout=parsed_args.timeout) self.app.stdout.write(result + '\n')
apache-2.0
cryptobanana/ansible
lib/ansible/modules/identity/cyberark/cyberark_authentication.py
83
10846
#!/usr/bin/python # Copyright: (c) 2017, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: cyberark_authentication short_description: Module for CyberArk Vault Authentication using PAS Web Services SDK author: Edward Nunez @ CyberArk BizDev (@enunez-cyberark, @cyberark-bizdev, @erasmix) version_added: 2.4 description: - Authenticates to CyberArk Vault using Privileged Account Security Web Services SDK and creates a session fact that can be used by other modules. It returns an Ansible fact called I(cyberark_session). Every module can use this fact as C(cyberark_session) parameter. options: state: default: present choices: [present, absent] description: - Specifies if an authentication logon/logoff and a cyberark_session should be added/removed. username: description: - The name of the user who will logon to the Vault. password: description: - The password of the user. new_password: description: - The new password of the user. This parameter is optional, and enables you to change a password. api_base_url: description: - A string containing the base URL of the server hosting CyberArk's Privileged Account Security Web Services SDK. validate_certs: type: bool default: 'yes' description: - If C(false), SSL certificates will not be validated. This should only set to C(false) used on personally controlled sites using self-signed certificates. use_shared_logon_authentication: type: bool default: 'no' description: - Whether or not Shared Logon Authentication will be used. use_radius_authentication: type: bool default: 'no' description: - Whether or not users will be authenticated via a RADIUS server. Valid values are true/false. cyberark_session: description: - Dictionary set by a CyberArk authentication containing the different values to perform actions on a logged-on CyberArk session. ''' EXAMPLES = ''' - name: Logon to CyberArk Vault using PAS Web Services SDK - use_shared_logon_authentication cyberark_authentication: api_base_url: "{{ web_services_base_url }}" use_shared_logon_authentication: yes - name: Logon to CyberArk Vault using PAS Web Services SDK - Not use_shared_logon_authentication cyberark_authentication: api_base_url: "{{ web_services_base_url }}" username: "{{ password_object.password }}" password: "{{ password_object.passprops.username }}" use_shared_logon_authentication: no - name: Logoff from CyberArk Vault cyberark_authentication: state: absent cyberark_session: "{{ cyberark_session }}" ''' RETURN = ''' cyberark_session: description: Authentication facts. returned: success type: dict sample: api_base_url: description: Base URL for API calls. Returned in the cyberark_session, so it can be used in subsequent calls. type: string returned: always token: description: The token that identifies the session, encoded in BASE 64. type: string returned: always use_shared_logon_authentication: description: Whether or not Shared Logon Authentication was used to establish the session. type: bool returned: always validate_certs: description: Whether or not SSL certificates should be validated. type: bool returned: always ''' from ansible.module_utils._text import to_text from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.urls import open_url from ansible.module_utils.six.moves.urllib.error import HTTPError import json try: import httplib except ImportError: # Python 3 import http.client as httplib def processAuthentication(module): # Getting parameters from module api_base_url = module.params["api_base_url"] validate_certs = module.params["validate_certs"] username = module.params["username"] password = module.params["password"] new_password = module.params["new_password"] use_shared_logon_authentication = module.params[ "use_shared_logon_authentication"] use_radius_authentication = module.params["use_radius_authentication"] state = module.params["state"] cyberark_session = module.params["cyberark_session"] # if in check mode it will not perform password changes if module.check_mode and new_password is not None: new_password = None # Defining initial values for open_url call headers = {'Content-Type': 'application/json'} payload = "" if state == "present": # Logon Action # Different end_points based on the use of shared logon authentication if use_shared_logon_authentication: end_point = "/PasswordVault/WebServices/auth/Shared/RestfulAuthenticationService.svc/Logon" else: end_point = "/PasswordVault/WebServices/auth/Cyberark/CyberArkAuthenticationService.svc/Logon" # The payload will contain username, password # and optionally use_radius_authentication and new_password payload_dict = {"username": username, "password": password} if use_radius_authentication: payload_dict["useRadiusAuthentication"] = use_radius_authentication if new_password is not None: payload_dict["newPassword"] = new_password payload = json.dumps(payload_dict) else: # Logoff Action # Get values from cyberark_session already established api_base_url = cyberark_session["api_base_url"] validate_certs = cyberark_session["validate_certs"] use_shared_logon_authentication = cyberark_session[ "use_shared_logon_authentication"] headers["Authorization"] = cyberark_session["token"] # Different end_points based on the use of shared logon authentication if use_shared_logon_authentication: end_point = "/PasswordVault/WebServices/auth/Shared/RestfulAuthenticationService.svc/Logoff" else: end_point = "/PasswordVault/WebServices/auth/Cyberark/CyberArkAuthenticationService.svc/Logoff" result = None changed = False response = None try: response = open_url( api_base_url + end_point, method="POST", headers=headers, data=payload, validate_certs=validate_certs) except (HTTPError, httplib.HTTPException) as http_exception: module.fail_json( msg=("Error while performing authentication." "Please validate parameters provided, and ability to logon to CyberArk." "\n*** end_point=%s%s\n ==> %s" % (api_base_url, end_point, to_text(http_exception))), payload=payload, headers=headers, status_code=http_exception.code) except Exception as unknown_exception: module.fail_json( msg=("Unknown error while performing authentication." "\n*** end_point=%s%s\n%s" % (api_base_url, end_point, to_text(unknown_exception))), payload=payload, headers=headers, status_code=-1) if response.getcode() == 200: # Success if state == "present": # Logon Action # Result token from REST Api uses a different key based # the use of shared logon authentication token = None try: if use_shared_logon_authentication: token = json.loads(response.read())["LogonResult"] else: token = json.loads(response.read())["CyberArkLogonResult"] except Exception as e: module.fail_json( msg="Error obtaining token\n%s" % (to_text(e)), payload=payload, headers=headers, status_code=-1) # Preparing result of the module result = { "cyberark_session": { "token": token, "api_base_url": api_base_url, "validate_certs": validate_certs, "use_shared_logon_authentication": use_shared_logon_authentication}, } if new_password is not None: # Only marks change if new_password was received resulting # in a password change changed = True else: # Logoff Action clears cyberark_session result = { "cyberark_session": {} } return (changed, result, response.getcode()) else: module.fail_json( msg="error in end_point=>" + end_point, headers=headers) def main(): fields = { "api_base_url": {"type": "str"}, "validate_certs": {"type": "bool", "default": "true"}, "username": {"type": "str"}, "password": {"type": "str", "no_log": True}, "new_password": {"type": "str", "no_log": True}, "use_shared_logon_authentication": {"default": False, "type": "bool"}, "use_radius_authentication": {"default": False, "type": "bool"}, "state": {"type": "str", "choices": ["present", "absent"], "default": "present"}, "cyberark_session": {"type": "dict"}, } mutually_exclusive = [ ["use_shared_logon_authentication", "use_radius_authentication"], ["use_shared_logon_authentication", "new_password"], ["api_base_url", "cyberark_session"], ["cyberark_session", "username", "use_shared_logon_authentication"] ] required_if = [ ("state", "present", ["api_base_url"]), ("state", "absent", ["cyberark_session"]) ] required_together = [ ["username", "password"] ] module = AnsibleModule( argument_spec=fields, mutually_exclusive=mutually_exclusive, required_if=required_if, required_together=required_together, supports_check_mode=True) (changed, result, status_code) = processAuthentication(module) module.exit_json( changed=changed, ansible_facts=result, status_code=status_code) if __name__ == '__main__': main()
gpl-3.0
dtuchsch/rpi-linux-preempt_rt
tools/perf/scripts/python/futex-contention.py
1997
1508
# futex contention # (c) 2010, Arnaldo Carvalho de Melo <acme@redhat.com> # Licensed under the terms of the GNU GPL License version 2 # # Translation of: # # http://sourceware.org/systemtap/wiki/WSFutexContention # # to perf python scripting. # # Measures futex contention import os, sys sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from Util import * process_names = {} thread_thislock = {} thread_blocktime = {} lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time process_names = {} # long-lived pid-to-execname mapping def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm, callchain, nr, uaddr, op, val, utime, uaddr2, val3): cmd = op & FUTEX_CMD_MASK if cmd != FUTEX_WAIT: return # we don't care about originators of WAKE events process_names[tid] = comm thread_thislock[tid] = uaddr thread_blocktime[tid] = nsecs(s, ns) def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm, callchain, nr, ret): if thread_blocktime.has_key(tid): elapsed = nsecs(s, ns) - thread_blocktime[tid] add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed) del thread_blocktime[tid] del thread_thislock[tid] def trace_begin(): print "Press control+C to stop and show the summary" def trace_end(): for (tid, lock) in lock_waits: min, max, avg, count = lock_waits[tid, lock] print "%s[%d] lock %x contended %d times, %d avg ns" % \ (process_names[tid], tid, lock, count, avg)
gpl-2.0
anryko/ansible
test/units/modules/storage/netapp/test_na_elementsw_cluster_snmp.py
38
7481
# (c) 2019, NetApp, Inc # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) ''' unit test for Ansible module: na_elementsw_cluster_snmp.py ''' from __future__ import print_function import json import pytest from units.compat import unittest from units.compat.mock import patch from ansible.module_utils import basic from ansible.module_utils._text import to_bytes import ansible.module_utils.netapp as netapp_utils if not netapp_utils.has_sf_sdk(): pytestmark = pytest.mark.skip('skipping as missing required SolidFire Python SDK') from ansible.modules.storage.netapp.na_elementsw_cluster_snmp \ import ElementSWClusterSnmp as my_module # module under test def set_module_args(args): """prepare arguments so that they will be picked up during module creation""" args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access class AnsibleExitJson(Exception): """Exception class to be raised by module.exit_json and caught by the test case""" pass class AnsibleFailJson(Exception): """Exception class to be raised by module.fail_json and caught by the test case""" pass def exit_json(*args, **kwargs): # pylint: disable=unused-argument """function to patch over exit_json; package return data into an exception""" if 'changed' not in kwargs: kwargs['changed'] = False raise AnsibleExitJson(kwargs) def fail_json(*args, **kwargs): # pylint: disable=unused-argument """function to patch over fail_json; package return data into an exception""" kwargs['failed'] = True raise AnsibleFailJson(kwargs) GET_ERROR = 'some_error_in_get_snmp_info' class MockSFConnection(object): ''' mock connection to ElementSW host ''' class Bunch(object): # pylint: disable=too-few-public-methods ''' create object with arbitrary attributes ''' def __init__(self, **kw): ''' called with (k1=v1, k2=v2), creates obj.k1, obj.k2 with values v1, v2 ''' setattr(self, '__dict__', kw) def __init__(self, force_error=False, where=None): ''' save arguments ''' self.force_error = force_error self.where = where class TestMyModule(unittest.TestCase): ''' a group of related Unit Tests ''' def setUp(self): self.mock_module_helper = patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json) self.mock_module_helper.start() self.addCleanup(self.mock_module_helper.stop) def set_default_args(self): return dict({ 'hostname': '10.117.78.131', 'username': 'admin', 'password': 'netapp1!', }) @patch('ansible.module_utils.netapp.create_sf_connection') def test_module_fail_when_required_args_missing(self, mock_create_sf_connection): ''' required arguments are reported as errors ''' with pytest.raises(AnsibleFailJson) as exc: set_module_args({}) my_module() print('Info: %s' % exc.value) @patch('ansible.module_utils.netapp.create_sf_connection') def test_ensure_enable_snmp_called(self, mock_create_sf_connection): ''' test if enable_snmp is called ''' module_args = {} module_args.update(self.set_default_args()) module_args.update({'snmp_v3_enabled': True, 'state': 'present'}) module_args.update({'usm_users': {'access': 'rouser', 'name': 'TestUser', 'password': 'ChangeMe@123', 'passphrase': 'ChangeMe@123', 'secLevel': 'auth', }}) module_args.update({'networks': {'access': 'ro', 'cidr': 24, 'community': 'TestNetwork', 'network': '192.168.0.1', }}) set_module_args(module_args) my_obj = my_module() with pytest.raises(AnsibleExitJson) as exc: my_obj.apply() print('Info: test_if_enable_snmp_called: %s' % repr(exc.value)) assert exc.value @patch('ansible.module_utils.netapp.create_sf_connection') def test_ensure_configure_snmp_from_version_3_TO_version_2_called(self, mock_create_sf_connection): ''' test if configure snmp from version_3 to version_2''' module_args = {} module_args.update(self.set_default_args()) module_args.update({'snmp_v3_enabled': False, 'state': 'present'}) module_args.update({'usm_users': {'access': 'rouser', 'name': 'TestUser', 'password': 'ChangeMe@123', 'passphrase': 'ChangeMe@123', 'secLevel': 'auth', }}) module_args.update({'networks': {'access': 'ro', 'cidr': 24, 'community': 'TestNetwork', 'network': '192.168.0.1', }}) set_module_args(module_args) my_obj = my_module() with pytest.raises(AnsibleExitJson) as exc: my_obj.apply() print('Info: test_ensure_configure_snmp_from_version_3_TO_version_2_called: %s' % repr(exc.value)) assert exc.value @patch('ansible.module_utils.netapp.create_sf_connection') def test_ensure_configure_snmp_from_version_2_TO_version_3_called(self, mock_create_sf_connection): ''' test if configure snmp from version_2 to version_3''' module_args = {} module_args.update(self.set_default_args()) module_args.update({'snmp_v3_enabled': True, 'state': 'present'}) module_args.update({'usm_users': {'access': 'rouser', 'name': 'TestUser_sample', 'password': 'ChangeMe@123', 'passphrase': 'ChangeMe@123', 'secLevel': 'auth', }}) module_args.update({'networks': {'access': 'ro', 'cidr': 24, 'community': 'TestNetwork', 'network': '192.168.0.1', }}) set_module_args(module_args) my_obj = my_module() with pytest.raises(AnsibleExitJson) as exc: my_obj.apply() print('Info: test_ensure_configure_snmp_from_version_2_TO_version_3_called: %s' % repr(exc.value)) assert exc.value @patch('ansible.module_utils.netapp.create_sf_connection') def test_ensure_disable_snmp_called(self, mock_create_sf_connection): ''' test if disable_snmp is called ''' module_args = {} module_args.update(self.set_default_args()) module_args.update({'state': 'absent'}) set_module_args(module_args) my_obj = my_module() with pytest.raises(AnsibleExitJson) as exc: my_obj.apply() print('Info: test_if_disable_snmp_called: %s' % repr(exc.value)) assert exc.value
gpl-3.0
menegon/geonode
geonode/services/models.py
29
5369
import logging from django.conf import settings from django.db import models from geoserver.catalog import FailedRequestError, Catalog from geonode.base.models import ResourceBase from geonode.services.enumerations import SERVICE_TYPES, SERVICE_METHODS, GXP_PTYPES from geonode.layers.models import Layer from django.utils.translation import ugettext_lazy as _ from django.db.models import signals from geonode.people.enumerations import ROLE_VALUES from geonode.security.models import remove_object_permissions STATUS_VALUES = [ 'pending', 'failed', 'process' ] logger = logging.getLogger("geonode.services") """ geonode.services """ class Service(ResourceBase): """ Service Class to represent remote Geo Web Services """ type = models.CharField(max_length=4, choices=SERVICE_TYPES) method = models.CharField(max_length=1, choices=SERVICE_METHODS) # with service, version and request etc stripped off base_url = models.URLField(unique=True, db_index=True) version = models.CharField(max_length=10, null=True, blank=True) # Should force to slug? name = models.CharField(max_length=255, unique=True, db_index=True) description = models.CharField(max_length=255, null=True, blank=True) online_resource = models.URLField(False, null=True, blank=True) fees = models.CharField(max_length=1000, null=True, blank=True) access_constraints = models.CharField(max_length=255, null=True, blank=True) connection_params = models.TextField(null=True, blank=True) username = models.CharField(max_length=50, null=True, blank=True) password = models.CharField(max_length=50, null=True, blank=True) api_key = models.CharField(max_length=255, null=True, blank=True) workspace_ref = models.URLField(False, null=True, blank=True) store_ref = models.URLField(null=True, blank=True) resources_ref = models.URLField(null=True, blank=True) profiles = models.ManyToManyField( settings.AUTH_USER_MODEL, through='ServiceProfileRole') created = models.DateTimeField(auto_now_add=True) last_updated = models.DateTimeField(auto_now=True) first_noanswer = models.DateTimeField(null=True, blank=True) noanswer_retries = models.PositiveIntegerField(null=True, blank=True) external_id = models.IntegerField(null=True, blank=True) parent = models.ForeignKey( 'services.Service', null=True, blank=True, related_name='service_set') # Supported Capabilities def __unicode__(self): return self.name @property def ptype(self): # Return the gxp ptype that should be used to display layers return GXP_PTYPES[self.type] def get_absolute_url(self): return '/services/%i' % self.id class Meta(ResourceBase.Meta): pass class ServiceProfileRole(models.Model): """ ServiceProfileRole is an intermediate model to bind Profiles and Services and apply roles. """ profiles = models.ForeignKey(settings.AUTH_USER_MODEL) service = models.ForeignKey(Service) role = models.CharField(choices=ROLE_VALUES, max_length=255, help_text=_( 'function performed by the responsible party')) class ServiceLayer(models.Model): service = models.ForeignKey(Service) layer = models.ForeignKey(Layer, null=True) typename = models.CharField(_("Layer Name"), max_length=255) title = models.CharField(_("Layer Title"), max_length=512) description = models.TextField(_("Layer Description"), null=True) styles = models.TextField(_("Layer Styles"), null=True) class WebServiceHarvestLayersJob(models.Model): service = models.ForeignKey(Service, blank=False, null=False, unique=True) status = models.CharField(choices=[( x, x) for x in STATUS_VALUES], max_length=10, blank=False, null=False, default='pending') class WebServiceRegistrationJob(models.Model): base_url = models.URLField(unique=True) type = models.CharField(max_length=4, choices=SERVICE_TYPES) status = models.CharField(choices=[( x, x) for x in STATUS_VALUES], max_length=10, blank=False, null=False, default='pending') def post_save_service(instance, sender, created, **kwargs): if created: instance.set_default_permissions() def pre_delete_service(instance, sender, **kwargs): for layer in instance.layer_set.all(): layer.delete() # if instance.method == 'H': # gn = Layer.objects.gn_catalog # gn.control_harvesting_task('stop', [instance.external_id]) # gn.control_harvesting_task('remove', [instance.external_id]) if instance.method == 'C': try: _user = settings.OGC_SERVER['default']['USER'] _password = settings.OGC_SERVER['default']['PASSWORD'] gs = Catalog(settings.OGC_SERVER['default']['LOCATION'] + "rest", _user, _password) cascade_store = gs.get_store( instance.name, settings.CASCADE_WORKSPACE) gs.delete(cascade_store, recurse=True) except FailedRequestError: logger.error( "Could not delete cascading WMS Store for %s - maybe already gone" % instance.name) remove_object_permissions(instance.get_self_resource()) signals.pre_delete.connect(pre_delete_service, sender=Service) signals.post_save.connect(post_save_service, sender=Service)
gpl-3.0
TheTypoMaster/chromium-crosswalk
tools/perf/page_sets/alexa1-10000.py
35
1340
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import json import os from telemetry.page import page as page_module from telemetry.page import shared_page_state from telemetry import story __location__ = os.path.realpath( os.path.join(os.getcwd(), os.path.dirname(__file__))) # Generated on 2013-09-03 13:59:53.459117 by rmistry using # create_page_set.py. _TOP_10000_ALEXA_FILE = os.path.join(__location__, 'alexa1-10000-urls.json') class Alexa1To10000Page(page_module.Page): def __init__(self, url, page_set): super(Alexa1To10000Page, self).__init__( url=url, page_set=page_set, shared_page_state_class=shared_page_state.SharedDesktopPageState) def RunPageInteractions(self, action_runner): with action_runner.CreateGestureInteraction('ScrollAction'): action_runner.ScrollPage() class Alexa1To10000PageSet(story.StorySet): """ Top 1-10000 Alexa global. Generated on 2013-09-03 13:59:53.459117 by rmistry using create_page_set.py. """ def __init__(self): super(Alexa1To10000PageSet, self).__init__() with open(_TOP_10000_ALEXA_FILE) as f: urls_list = json.load(f) for url in urls_list: self.AddStory(Alexa1To10000Page(url, self))
bsd-3-clause
shootsoft/practice
LeetCode/python/031-060/054-spiral-matrix/spiral.py
1
1308
class Solution: # @param matrix, a list of lists of integers # @return a list of integers def spiralOrder(self, matrix): r =[] n = len(matrix) m = 0 if n>0: m = len(matrix[0]) i = 0 j = 0 count = m*n maxn = n minin = 0 maxm = m minim =-1 l = 0 d = 0 while l<count: r.append(matrix[i][j]) l += 1 if d == 0: j += 1 if j== maxm: maxm -= 1 j -= 1 i += 1 d = 1 elif d == 1: i += 1 if i== maxn: maxn -= 1 i -= 1 j -= 1 d = 2 elif d == 2: j -= 1 if j== minim: minim += 1 j += 1 i -= 1 d = 3 else: i -= 1 if i == minin: minin += 1 i += 1 j += 1 d = 0 return r s = Solution() print s.spiralOrder([ [ 1, 2, 3 ], [ 4, 5, 6 ], [ 7, 8, 9 ] ])
apache-2.0
ZellMechanik-Dresden/dclab
dclab/features/emodulus/pxcorr.py
1
4837
"""Pixelation correction definitions""" import numpy as np def corr_deform_with_area_um(area_um, px_um=0.34): """Deformation correction for area_um-deform data The contour in RT-DC measurements is computed on a pixelated grid. Due to sampling problems, the measured deformation is overestimated and must be corrected. The correction formula is described in :cite:`Herold2017`. Parameters ---------- area_um: float or ndarray Apparent (2D image) area in µm² of the event(s) px_um: float The detector pixel size in µm. Returns ------- deform_delta: float or ndarray Error of the deformation of the event(s) that must be subtracted from `deform`. deform_corr = deform - deform_delta """ # A triple-exponential decay can be used to correct for pixelation # for apparent cell areas between 10 and 1250µm². # For 99 different radii between 0.4 μm and 20 μm circular objects were # simulated on a pixel grid with the pixel resolution of 340 nm/pix. At # each radius 1000 random starting points were created and the # obtained contours were analyzed in the same fashion as RT-DC data. # A convex hull on the contour was used to calculate the size (as area) # and the deformation. # The pixel size correction `pxscale` takes into account the pixel size # in the pixelation correction formula. pxscale = (.34 / px_um)**2 offs = 0.0012 exp1 = 0.020 * np.exp(-area_um * pxscale / 7.1) exp2 = 0.010 * np.exp(-area_um * pxscale / 38.6) exp3 = 0.005 * np.exp(-area_um * pxscale / 296) delta = offs + exp1 + exp2 + exp3 return delta def corr_deform_with_volume(volume, px_um=0.34): """Deformation correction for volume-deform data The contour in RT-DC measurements is computed on a pixelated grid. Due to sampling problems, the measured deformation is overestimated and must be corrected. The correction is derived in scripts/pixelation_correction.py. Parameters ---------- volume: float or ndarray The "volume" feature (rotation of raw contour) [µm³] px_um: float The detector pixel size in µm. Returns ------- deform_delta: float or ndarray Error of the deformation of the event(s) that must be subtracted from `deform`. deform_corr = deform - deform_delta """ pxscalev = (.34 / px_um)**3 offs = 0.0013 exp1 = 0.0172 * np.exp(-volume * pxscalev / 40) exp2 = 0.0070 * np.exp(-volume * pxscalev / 450) exp3 = 0.0032 * np.exp(-volume * pxscalev / 6040) delta = offs + exp1 + exp2 + exp3 return delta def get_pixelation_delta_pair(feat1, feat2, data1, data2, px_um=0.34): """Convenience function that returns pixelation correction pair""" # determine feature that defines abscissa feat_absc = feat1 if feat1 in ["area_um", "volume"] else feat2 data_absc = data1 if feat_absc == feat1 else data2 # first compute all the possible pixelation offsets delt1 = get_pixelation_delta( feat_corr=feat1, feat_absc=feat_absc, data_absc=data_absc, px_um=px_um) delt2 = get_pixelation_delta( feat_corr=feat2, feat_absc=feat_absc, data_absc=data_absc, px_um=px_um) return delt1, delt2 def get_pixelation_delta(feat_corr, feat_absc, data_absc, px_um=0.34): """Convenience function for obtaining pixelation correction Parameters ---------- feat_corr: str Feature for which to compute the pixelation correction (e.g. "deform") feat_absc: str Feature with which to compute the correction (e.g. "area_um"); data_absc: ndarray or float Corresponding data for `feat_absc` px_um: float Detector pixel size [µm] """ if feat_corr == "deform" and feat_absc == "area_um": delt = corr_deform_with_area_um(data_absc, px_um=px_um) elif feat_corr == "circ" and feat_absc == "area_um": delt = -corr_deform_with_area_um(data_absc, px_um=px_um) elif feat_corr == "deform" and feat_absc == "volume": delt = corr_deform_with_volume(data_absc, px_um=px_um) elif feat_corr == "circ" and feat_absc == "volume": delt = -corr_deform_with_volume(data_absc, px_um=px_um) elif feat_corr == "area_um": # no correction for area delt = np.zeros_like(data_absc, dtype=float) elif feat_corr == "volume": # no correction for volume delt = np.zeros_like(data_absc, dtype=float) elif feat_corr == feat_absc: raise ValueError("Input feature names are identical!") else: raise KeyError( "No rule for feature '{}' with abscissa ".format(feat_corr) + "'{}'!".format(feat_absc)) return delt
gpl-2.0
dfalt974/SickRage
lib/bs4/tests/test_tree.py
20
78279
# -*- coding: utf-8 -*- """Tests for Beautiful Soup's tree traversal methods. The tree traversal methods are the main advantage of using Beautiful Soup over just using a parser. Different parsers will build different Beautiful Soup trees given the same markup, but all Beautiful Soup trees can be traversed with the methods tested here. """ from pdb import set_trace import copy import pickle import re import warnings from bs4 import BeautifulSoup from bs4.builder import ( builder_registry, HTMLParserTreeBuilder, ) from bs4.element import ( PY3K, CData, Comment, Declaration, Doctype, NavigableString, SoupStrainer, Tag, ) from bs4.testing import ( SoupTest, skipIf, ) XML_BUILDER_PRESENT = (builder_registry.lookup("xml") is not None) LXML_PRESENT = (builder_registry.lookup("lxml") is not None) class TreeTest(SoupTest): def assertSelects(self, tags, should_match): """Make sure that the given tags have the correct text. This is used in tests that define a bunch of tags, each containing a single string, and then select certain strings by some mechanism. """ self.assertEqual([tag.string for tag in tags], should_match) def assertSelectsIDs(self, tags, should_match): """Make sure that the given tags have the correct IDs. This is used in tests that define a bunch of tags, each containing a single string, and then select certain strings by some mechanism. """ self.assertEqual([tag['id'] for tag in tags], should_match) class TestFind(TreeTest): """Basic tests of the find() method. find() just calls find_all() with limit=1, so it's not tested all that thouroughly here. """ def test_find_tag(self): soup = self.soup("<a>1</a><b>2</b><a>3</a><b>4</b>") self.assertEqual(soup.find("b").string, "2") def test_unicode_text_find(self): soup = self.soup(u'<h1>Räksmörgås</h1>') self.assertEqual(soup.find(string=u'Räksmörgås'), u'Räksmörgås') def test_unicode_attribute_find(self): soup = self.soup(u'<h1 id="Räksmörgås">here it is</h1>') str(soup) self.assertEqual("here it is", soup.find(id=u'Räksmörgås').text) def test_find_everything(self): """Test an optimization that finds all tags.""" soup = self.soup("<a>foo</a><b>bar</b>") self.assertEqual(2, len(soup.find_all())) def test_find_everything_with_name(self): """Test an optimization that finds all tags with a given name.""" soup = self.soup("<a>foo</a><b>bar</b><a>baz</a>") self.assertEqual(2, len(soup.find_all('a'))) class TestFindAll(TreeTest): """Basic tests of the find_all() method.""" def test_find_all_text_nodes(self): """You can search the tree for text nodes.""" soup = self.soup("<html>Foo<b>bar</b>\xbb</html>") # Exact match. self.assertEqual(soup.find_all(string="bar"), [u"bar"]) self.assertEqual(soup.find_all(text="bar"), [u"bar"]) # Match any of a number of strings. self.assertEqual( soup.find_all(text=["Foo", "bar"]), [u"Foo", u"bar"]) # Match a regular expression. self.assertEqual(soup.find_all(text=re.compile('.*')), [u"Foo", u"bar", u'\xbb']) # Match anything. self.assertEqual(soup.find_all(text=True), [u"Foo", u"bar", u'\xbb']) def test_find_all_limit(self): """You can limit the number of items returned by find_all.""" soup = self.soup("<a>1</a><a>2</a><a>3</a><a>4</a><a>5</a>") self.assertSelects(soup.find_all('a', limit=3), ["1", "2", "3"]) self.assertSelects(soup.find_all('a', limit=1), ["1"]) self.assertSelects( soup.find_all('a', limit=10), ["1", "2", "3", "4", "5"]) # A limit of 0 means no limit. self.assertSelects( soup.find_all('a', limit=0), ["1", "2", "3", "4", "5"]) def test_calling_a_tag_is_calling_findall(self): soup = self.soup("<a>1</a><b>2<a id='foo'>3</a></b>") self.assertSelects(soup('a', limit=1), ["1"]) self.assertSelects(soup.b(id="foo"), ["3"]) def test_find_all_with_self_referential_data_structure_does_not_cause_infinite_recursion(self): soup = self.soup("<a></a>") # Create a self-referential list. l = [] l.append(l) # Without special code in _normalize_search_value, this would cause infinite # recursion. self.assertEqual([], soup.find_all(l)) def test_find_all_resultset(self): """All find_all calls return a ResultSet""" soup = self.soup("<a></a>") result = soup.find_all("a") self.assertTrue(hasattr(result, "source")) result = soup.find_all(True) self.assertTrue(hasattr(result, "source")) result = soup.find_all(text="foo") self.assertTrue(hasattr(result, "source")) class TestFindAllBasicNamespaces(TreeTest): def test_find_by_namespaced_name(self): soup = self.soup('<mathml:msqrt>4</mathml:msqrt><a svg:fill="red">') self.assertEqual("4", soup.find("mathml:msqrt").string) self.assertEqual("a", soup.find(attrs= { "svg:fill" : "red" }).name) class TestFindAllByName(TreeTest): """Test ways of finding tags by tag name.""" def setUp(self): super(TreeTest, self).setUp() self.tree = self.soup("""<a>First tag.</a> <b>Second tag.</b> <c>Third <a>Nested tag.</a> tag.</c>""") def test_find_all_by_tag_name(self): # Find all the <a> tags. self.assertSelects( self.tree.find_all('a'), ['First tag.', 'Nested tag.']) def test_find_all_by_name_and_text(self): self.assertSelects( self.tree.find_all('a', text='First tag.'), ['First tag.']) self.assertSelects( self.tree.find_all('a', text=True), ['First tag.', 'Nested tag.']) self.assertSelects( self.tree.find_all('a', text=re.compile("tag")), ['First tag.', 'Nested tag.']) def test_find_all_on_non_root_element(self): # You can call find_all on any node, not just the root. self.assertSelects(self.tree.c.find_all('a'), ['Nested tag.']) def test_calling_element_invokes_find_all(self): self.assertSelects(self.tree('a'), ['First tag.', 'Nested tag.']) def test_find_all_by_tag_strainer(self): self.assertSelects( self.tree.find_all(SoupStrainer('a')), ['First tag.', 'Nested tag.']) def test_find_all_by_tag_names(self): self.assertSelects( self.tree.find_all(['a', 'b']), ['First tag.', 'Second tag.', 'Nested tag.']) def test_find_all_by_tag_dict(self): self.assertSelects( self.tree.find_all({'a' : True, 'b' : True}), ['First tag.', 'Second tag.', 'Nested tag.']) def test_find_all_by_tag_re(self): self.assertSelects( self.tree.find_all(re.compile('^[ab]$')), ['First tag.', 'Second tag.', 'Nested tag.']) def test_find_all_with_tags_matching_method(self): # You can define an oracle method that determines whether # a tag matches the search. def id_matches_name(tag): return tag.name == tag.get('id') tree = self.soup("""<a id="a">Match 1.</a> <a id="1">Does not match.</a> <b id="b">Match 2.</a>""") self.assertSelects( tree.find_all(id_matches_name), ["Match 1.", "Match 2."]) def test_find_with_multi_valued_attribute(self): soup = self.soup( "<div class='a b'>1</div><div class='a c'>2</div><div class='a d'>3</div>" ) r1 = soup.find('div', 'a d'); r2 = soup.find('div', re.compile(r'a d')); r3, r4 = soup.find_all('div', ['a b', 'a d']); self.assertEqual('3', r1.string) self.assertEqual('3', r2.string) self.assertEqual('1', r3.string) self.assertEqual('3', r4.string) class TestFindAllByAttribute(TreeTest): def test_find_all_by_attribute_name(self): # You can pass in keyword arguments to find_all to search by # attribute. tree = self.soup(""" <a id="first">Matching a.</a> <a id="second"> Non-matching <b id="first">Matching b.</b>a. </a>""") self.assertSelects(tree.find_all(id='first'), ["Matching a.", "Matching b."]) def test_find_all_by_utf8_attribute_value(self): peace = u"םולש".encode("utf8") data = u'<a title="םולש"></a>'.encode("utf8") soup = self.soup(data) self.assertEqual([soup.a], soup.find_all(title=peace)) self.assertEqual([soup.a], soup.find_all(title=peace.decode("utf8"))) self.assertEqual([soup.a], soup.find_all(title=[peace, "something else"])) def test_find_all_by_attribute_dict(self): # You can pass in a dictionary as the argument 'attrs'. This # lets you search for attributes like 'name' (a fixed argument # to find_all) and 'class' (a reserved word in Python.) tree = self.soup(""" <a name="name1" class="class1">Name match.</a> <a name="name2" class="class2">Class match.</a> <a name="name3" class="class3">Non-match.</a> <name1>A tag called 'name1'.</name1> """) # This doesn't do what you want. self.assertSelects(tree.find_all(name='name1'), ["A tag called 'name1'."]) # This does what you want. self.assertSelects(tree.find_all(attrs={'name' : 'name1'}), ["Name match."]) self.assertSelects(tree.find_all(attrs={'class' : 'class2'}), ["Class match."]) def test_find_all_by_class(self): tree = self.soup(""" <a class="1">Class 1.</a> <a class="2">Class 2.</a> <b class="1">Class 1.</b> <c class="3 4">Class 3 and 4.</c> """) # Passing in the class_ keyword argument will search against # the 'class' attribute. self.assertSelects(tree.find_all('a', class_='1'), ['Class 1.']) self.assertSelects(tree.find_all('c', class_='3'), ['Class 3 and 4.']) self.assertSelects(tree.find_all('c', class_='4'), ['Class 3 and 4.']) # Passing in a string to 'attrs' will also search the CSS class. self.assertSelects(tree.find_all('a', '1'), ['Class 1.']) self.assertSelects(tree.find_all(attrs='1'), ['Class 1.', 'Class 1.']) self.assertSelects(tree.find_all('c', '3'), ['Class 3 and 4.']) self.assertSelects(tree.find_all('c', '4'), ['Class 3 and 4.']) def test_find_by_class_when_multiple_classes_present(self): tree = self.soup("<gar class='foo bar'>Found it</gar>") f = tree.find_all("gar", class_=re.compile("o")) self.assertSelects(f, ["Found it"]) f = tree.find_all("gar", class_=re.compile("a")) self.assertSelects(f, ["Found it"]) # If the search fails to match the individual strings "foo" and "bar", # it will be tried against the combined string "foo bar". f = tree.find_all("gar", class_=re.compile("o b")) self.assertSelects(f, ["Found it"]) def test_find_all_with_non_dictionary_for_attrs_finds_by_class(self): soup = self.soup("<a class='bar'>Found it</a>") self.assertSelects(soup.find_all("a", re.compile("ba")), ["Found it"]) def big_attribute_value(value): return len(value) > 3 self.assertSelects(soup.find_all("a", big_attribute_value), []) def small_attribute_value(value): return len(value) <= 3 self.assertSelects( soup.find_all("a", small_attribute_value), ["Found it"]) def test_find_all_with_string_for_attrs_finds_multiple_classes(self): soup = self.soup('<a class="foo bar"></a><a class="foo"></a>') a, a2 = soup.find_all("a") self.assertEqual([a, a2], soup.find_all("a", "foo")) self.assertEqual([a], soup.find_all("a", "bar")) # If you specify the class as a string that contains a # space, only that specific value will be found. self.assertEqual([a], soup.find_all("a", class_="foo bar")) self.assertEqual([a], soup.find_all("a", "foo bar")) self.assertEqual([], soup.find_all("a", "bar foo")) def test_find_all_by_attribute_soupstrainer(self): tree = self.soup(""" <a id="first">Match.</a> <a id="second">Non-match.</a>""") strainer = SoupStrainer(attrs={'id' : 'first'}) self.assertSelects(tree.find_all(strainer), ['Match.']) def test_find_all_with_missing_attribute(self): # You can pass in None as the value of an attribute to find_all. # This will match tags that do not have that attribute set. tree = self.soup("""<a id="1">ID present.</a> <a>No ID present.</a> <a id="">ID is empty.</a>""") self.assertSelects(tree.find_all('a', id=None), ["No ID present."]) def test_find_all_with_defined_attribute(self): # You can pass in None as the value of an attribute to find_all. # This will match tags that have that attribute set to any value. tree = self.soup("""<a id="1">ID present.</a> <a>No ID present.</a> <a id="">ID is empty.</a>""") self.assertSelects( tree.find_all(id=True), ["ID present.", "ID is empty."]) def test_find_all_with_numeric_attribute(self): # If you search for a number, it's treated as a string. tree = self.soup("""<a id=1>Unquoted attribute.</a> <a id="1">Quoted attribute.</a>""") expected = ["Unquoted attribute.", "Quoted attribute."] self.assertSelects(tree.find_all(id=1), expected) self.assertSelects(tree.find_all(id="1"), expected) def test_find_all_with_list_attribute_values(self): # You can pass a list of attribute values instead of just one, # and you'll get tags that match any of the values. tree = self.soup("""<a id="1">1</a> <a id="2">2</a> <a id="3">3</a> <a>No ID.</a>""") self.assertSelects(tree.find_all(id=["1", "3", "4"]), ["1", "3"]) def test_find_all_with_regular_expression_attribute_value(self): # You can pass a regular expression as an attribute value, and # you'll get tags whose values for that attribute match the # regular expression. tree = self.soup("""<a id="a">One a.</a> <a id="aa">Two as.</a> <a id="ab">Mixed as and bs.</a> <a id="b">One b.</a> <a>No ID.</a>""") self.assertSelects(tree.find_all(id=re.compile("^a+$")), ["One a.", "Two as."]) def test_find_by_name_and_containing_string(self): soup = self.soup("<b>foo</b><b>bar</b><a>foo</a>") a = soup.a self.assertEqual([a], soup.find_all("a", text="foo")) self.assertEqual([], soup.find_all("a", text="bar")) self.assertEqual([], soup.find_all("a", text="bar")) def test_find_by_name_and_containing_string_when_string_is_buried(self): soup = self.soup("<a>foo</a><a><b><c>foo</c></b></a>") self.assertEqual(soup.find_all("a"), soup.find_all("a", text="foo")) def test_find_by_attribute_and_containing_string(self): soup = self.soup('<b id="1">foo</b><a id="2">foo</a>') a = soup.a self.assertEqual([a], soup.find_all(id=2, text="foo")) self.assertEqual([], soup.find_all(id=1, text="bar")) class TestIndex(TreeTest): """Test Tag.index""" def test_index(self): tree = self.soup("""<div> <a>Identical</a> <b>Not identical</b> <a>Identical</a> <c><d>Identical with child</d></c> <b>Also not identical</b> <c><d>Identical with child</d></c> </div>""") div = tree.div for i, element in enumerate(div.contents): self.assertEqual(i, div.index(element)) self.assertRaises(ValueError, tree.index, 1) class TestParentOperations(TreeTest): """Test navigation and searching through an element's parents.""" def setUp(self): super(TestParentOperations, self).setUp() self.tree = self.soup('''<ul id="empty"></ul> <ul id="top"> <ul id="middle"> <ul id="bottom"> <b>Start here</b> </ul> </ul>''') self.start = self.tree.b def test_parent(self): self.assertEqual(self.start.parent['id'], 'bottom') self.assertEqual(self.start.parent.parent['id'], 'middle') self.assertEqual(self.start.parent.parent.parent['id'], 'top') def test_parent_of_top_tag_is_soup_object(self): top_tag = self.tree.contents[0] self.assertEqual(top_tag.parent, self.tree) def test_soup_object_has_no_parent(self): self.assertEqual(None, self.tree.parent) def test_find_parents(self): self.assertSelectsIDs( self.start.find_parents('ul'), ['bottom', 'middle', 'top']) self.assertSelectsIDs( self.start.find_parents('ul', id="middle"), ['middle']) def test_find_parent(self): self.assertEqual(self.start.find_parent('ul')['id'], 'bottom') self.assertEqual(self.start.find_parent('ul', id='top')['id'], 'top') def test_parent_of_text_element(self): text = self.tree.find(text="Start here") self.assertEqual(text.parent.name, 'b') def test_text_element_find_parent(self): text = self.tree.find(text="Start here") self.assertEqual(text.find_parent('ul')['id'], 'bottom') def test_parent_generator(self): parents = [parent['id'] for parent in self.start.parents if parent is not None and 'id' in parent.attrs] self.assertEqual(parents, ['bottom', 'middle', 'top']) class ProximityTest(TreeTest): def setUp(self): super(TreeTest, self).setUp() self.tree = self.soup( '<html id="start"><head></head><body><b id="1">One</b><b id="2">Two</b><b id="3">Three</b></body></html>') class TestNextOperations(ProximityTest): def setUp(self): super(TestNextOperations, self).setUp() self.start = self.tree.b def test_next(self): self.assertEqual(self.start.next_element, "One") self.assertEqual(self.start.next_element.next_element['id'], "2") def test_next_of_last_item_is_none(self): last = self.tree.find(text="Three") self.assertEqual(last.next_element, None) def test_next_of_root_is_none(self): # The document root is outside the next/previous chain. self.assertEqual(self.tree.next_element, None) def test_find_all_next(self): self.assertSelects(self.start.find_all_next('b'), ["Two", "Three"]) self.start.find_all_next(id=3) self.assertSelects(self.start.find_all_next(id=3), ["Three"]) def test_find_next(self): self.assertEqual(self.start.find_next('b')['id'], '2') self.assertEqual(self.start.find_next(text="Three"), "Three") def test_find_next_for_text_element(self): text = self.tree.find(text="One") self.assertEqual(text.find_next("b").string, "Two") self.assertSelects(text.find_all_next("b"), ["Two", "Three"]) def test_next_generator(self): start = self.tree.find(text="Two") successors = [node for node in start.next_elements] # There are two successors: the final <b> tag and its text contents. tag, contents = successors self.assertEqual(tag['id'], '3') self.assertEqual(contents, "Three") class TestPreviousOperations(ProximityTest): def setUp(self): super(TestPreviousOperations, self).setUp() self.end = self.tree.find(text="Three") def test_previous(self): self.assertEqual(self.end.previous_element['id'], "3") self.assertEqual(self.end.previous_element.previous_element, "Two") def test_previous_of_first_item_is_none(self): first = self.tree.find('html') self.assertEqual(first.previous_element, None) def test_previous_of_root_is_none(self): # The document root is outside the next/previous chain. # XXX This is broken! #self.assertEqual(self.tree.previous_element, None) pass def test_find_all_previous(self): # The <b> tag containing the "Three" node is the predecessor # of the "Three" node itself, which is why "Three" shows up # here. self.assertSelects( self.end.find_all_previous('b'), ["Three", "Two", "One"]) self.assertSelects(self.end.find_all_previous(id=1), ["One"]) def test_find_previous(self): self.assertEqual(self.end.find_previous('b')['id'], '3') self.assertEqual(self.end.find_previous(text="One"), "One") def test_find_previous_for_text_element(self): text = self.tree.find(text="Three") self.assertEqual(text.find_previous("b").string, "Three") self.assertSelects( text.find_all_previous("b"), ["Three", "Two", "One"]) def test_previous_generator(self): start = self.tree.find(text="One") predecessors = [node for node in start.previous_elements] # There are four predecessors: the <b> tag containing "One" # the <body> tag, the <head> tag, and the <html> tag. b, body, head, html = predecessors self.assertEqual(b['id'], '1') self.assertEqual(body.name, "body") self.assertEqual(head.name, "head") self.assertEqual(html.name, "html") class SiblingTest(TreeTest): def setUp(self): super(SiblingTest, self).setUp() markup = '''<html> <span id="1"> <span id="1.1"></span> </span> <span id="2"> <span id="2.1"></span> </span> <span id="3"> <span id="3.1"></span> </span> <span id="4"></span> </html>''' # All that whitespace looks good but makes the tests more # difficult. Get rid of it. markup = re.compile("\n\s*").sub("", markup) self.tree = self.soup(markup) class TestNextSibling(SiblingTest): def setUp(self): super(TestNextSibling, self).setUp() self.start = self.tree.find(id="1") def test_next_sibling_of_root_is_none(self): self.assertEqual(self.tree.next_sibling, None) def test_next_sibling(self): self.assertEqual(self.start.next_sibling['id'], '2') self.assertEqual(self.start.next_sibling.next_sibling['id'], '3') # Note the difference between next_sibling and next_element. self.assertEqual(self.start.next_element['id'], '1.1') def test_next_sibling_may_not_exist(self): self.assertEqual(self.tree.html.next_sibling, None) nested_span = self.tree.find(id="1.1") self.assertEqual(nested_span.next_sibling, None) last_span = self.tree.find(id="4") self.assertEqual(last_span.next_sibling, None) def test_find_next_sibling(self): self.assertEqual(self.start.find_next_sibling('span')['id'], '2') def test_next_siblings(self): self.assertSelectsIDs(self.start.find_next_siblings("span"), ['2', '3', '4']) self.assertSelectsIDs(self.start.find_next_siblings(id='3'), ['3']) def test_next_sibling_for_text_element(self): soup = self.soup("Foo<b>bar</b>baz") start = soup.find(text="Foo") self.assertEqual(start.next_sibling.name, 'b') self.assertEqual(start.next_sibling.next_sibling, 'baz') self.assertSelects(start.find_next_siblings('b'), ['bar']) self.assertEqual(start.find_next_sibling(text="baz"), "baz") self.assertEqual(start.find_next_sibling(text="nonesuch"), None) class TestPreviousSibling(SiblingTest): def setUp(self): super(TestPreviousSibling, self).setUp() self.end = self.tree.find(id="4") def test_previous_sibling_of_root_is_none(self): self.assertEqual(self.tree.previous_sibling, None) def test_previous_sibling(self): self.assertEqual(self.end.previous_sibling['id'], '3') self.assertEqual(self.end.previous_sibling.previous_sibling['id'], '2') # Note the difference between previous_sibling and previous_element. self.assertEqual(self.end.previous_element['id'], '3.1') def test_previous_sibling_may_not_exist(self): self.assertEqual(self.tree.html.previous_sibling, None) nested_span = self.tree.find(id="1.1") self.assertEqual(nested_span.previous_sibling, None) first_span = self.tree.find(id="1") self.assertEqual(first_span.previous_sibling, None) def test_find_previous_sibling(self): self.assertEqual(self.end.find_previous_sibling('span')['id'], '3') def test_previous_siblings(self): self.assertSelectsIDs(self.end.find_previous_siblings("span"), ['3', '2', '1']) self.assertSelectsIDs(self.end.find_previous_siblings(id='1'), ['1']) def test_previous_sibling_for_text_element(self): soup = self.soup("Foo<b>bar</b>baz") start = soup.find(text="baz") self.assertEqual(start.previous_sibling.name, 'b') self.assertEqual(start.previous_sibling.previous_sibling, 'Foo') self.assertSelects(start.find_previous_siblings('b'), ['bar']) self.assertEqual(start.find_previous_sibling(text="Foo"), "Foo") self.assertEqual(start.find_previous_sibling(text="nonesuch"), None) class TestTagCreation(SoupTest): """Test the ability to create new tags.""" def test_new_tag(self): soup = self.soup("") new_tag = soup.new_tag("foo", bar="baz") self.assertTrue(isinstance(new_tag, Tag)) self.assertEqual("foo", new_tag.name) self.assertEqual(dict(bar="baz"), new_tag.attrs) self.assertEqual(None, new_tag.parent) def test_tag_inherits_self_closing_rules_from_builder(self): if XML_BUILDER_PRESENT: xml_soup = BeautifulSoup("", "lxml-xml") xml_br = xml_soup.new_tag("br") xml_p = xml_soup.new_tag("p") # Both the <br> and <p> tag are empty-element, just because # they have no contents. self.assertEqual(b"<br/>", xml_br.encode()) self.assertEqual(b"<p/>", xml_p.encode()) html_soup = BeautifulSoup("", "html.parser") html_br = html_soup.new_tag("br") html_p = html_soup.new_tag("p") # The HTML builder users HTML's rules about which tags are # empty-element tags, and the new tags reflect these rules. self.assertEqual(b"<br/>", html_br.encode()) self.assertEqual(b"<p></p>", html_p.encode()) def test_new_string_creates_navigablestring(self): soup = self.soup("") s = soup.new_string("foo") self.assertEqual("foo", s) self.assertTrue(isinstance(s, NavigableString)) def test_new_string_can_create_navigablestring_subclass(self): soup = self.soup("") s = soup.new_string("foo", Comment) self.assertEqual("foo", s) self.assertTrue(isinstance(s, Comment)) class TestTreeModification(SoupTest): def test_attribute_modification(self): soup = self.soup('<a id="1"></a>') soup.a['id'] = 2 self.assertEqual(soup.decode(), self.document_for('<a id="2"></a>')) del(soup.a['id']) self.assertEqual(soup.decode(), self.document_for('<a></a>')) soup.a['id2'] = 'foo' self.assertEqual(soup.decode(), self.document_for('<a id2="foo"></a>')) def test_new_tag_creation(self): builder = builder_registry.lookup('html')() soup = self.soup("<body></body>", builder=builder) a = Tag(soup, builder, 'a') ol = Tag(soup, builder, 'ol') a['href'] = 'http://foo.com/' soup.body.insert(0, a) soup.body.insert(1, ol) self.assertEqual( soup.body.encode(), b'<body><a href="http://foo.com/"></a><ol></ol></body>') def test_append_to_contents_moves_tag(self): doc = """<p id="1">Don't leave me <b>here</b>.</p> <p id="2">Don\'t leave!</p>""" soup = self.soup(doc) second_para = soup.find(id='2') bold = soup.b # Move the <b> tag to the end of the second paragraph. soup.find(id='2').append(soup.b) # The <b> tag is now a child of the second paragraph. self.assertEqual(bold.parent, second_para) self.assertEqual( soup.decode(), self.document_for( '<p id="1">Don\'t leave me .</p>\n' '<p id="2">Don\'t leave!<b>here</b></p>')) def test_replace_with_returns_thing_that_was_replaced(self): text = "<a></a><b><c></c></b>" soup = self.soup(text) a = soup.a new_a = a.replace_with(soup.c) self.assertEqual(a, new_a) def test_unwrap_returns_thing_that_was_replaced(self): text = "<a><b></b><c></c></a>" soup = self.soup(text) a = soup.a new_a = a.unwrap() self.assertEqual(a, new_a) def test_replace_with_and_unwrap_give_useful_exception_when_tag_has_no_parent(self): soup = self.soup("<a><b>Foo</b></a><c>Bar</c>") a = soup.a a.extract() self.assertEqual(None, a.parent) self.assertRaises(ValueError, a.unwrap) self.assertRaises(ValueError, a.replace_with, soup.c) def test_replace_tag_with_itself(self): text = "<a><b></b><c>Foo<d></d></c></a><a><e></e></a>" soup = self.soup(text) c = soup.c soup.c.replace_with(c) self.assertEqual(soup.decode(), self.document_for(text)) def test_replace_tag_with_its_parent_raises_exception(self): text = "<a><b></b></a>" soup = self.soup(text) self.assertRaises(ValueError, soup.b.replace_with, soup.a) def test_insert_tag_into_itself_raises_exception(self): text = "<a><b></b></a>" soup = self.soup(text) self.assertRaises(ValueError, soup.a.insert, 0, soup.a) def test_replace_with_maintains_next_element_throughout(self): soup = self.soup('<p><a>one</a><b>three</b></p>') a = soup.a b = a.contents[0] # Make it so the <a> tag has two text children. a.insert(1, "two") # Now replace each one with the empty string. left, right = a.contents left.replaceWith('') right.replaceWith('') # The <b> tag is still connected to the tree. self.assertEqual("three", soup.b.string) def test_replace_final_node(self): soup = self.soup("<b>Argh!</b>") soup.find(text="Argh!").replace_with("Hooray!") new_text = soup.find(text="Hooray!") b = soup.b self.assertEqual(new_text.previous_element, b) self.assertEqual(new_text.parent, b) self.assertEqual(new_text.previous_element.next_element, new_text) self.assertEqual(new_text.next_element, None) def test_consecutive_text_nodes(self): # A builder should never create two consecutive text nodes, # but if you insert one next to another, Beautiful Soup will # handle it correctly. soup = self.soup("<a><b>Argh!</b><c></c></a>") soup.b.insert(1, "Hooray!") self.assertEqual( soup.decode(), self.document_for( "<a><b>Argh!Hooray!</b><c></c></a>")) new_text = soup.find(text="Hooray!") self.assertEqual(new_text.previous_element, "Argh!") self.assertEqual(new_text.previous_element.next_element, new_text) self.assertEqual(new_text.previous_sibling, "Argh!") self.assertEqual(new_text.previous_sibling.next_sibling, new_text) self.assertEqual(new_text.next_sibling, None) self.assertEqual(new_text.next_element, soup.c) def test_insert_string(self): soup = self.soup("<a></a>") soup.a.insert(0, "bar") soup.a.insert(0, "foo") # The string were added to the tag. self.assertEqual(["foo", "bar"], soup.a.contents) # And they were converted to NavigableStrings. self.assertEqual(soup.a.contents[0].next_element, "bar") def test_insert_tag(self): builder = self.default_builder soup = self.soup( "<a><b>Find</b><c>lady!</c><d></d></a>", builder=builder) magic_tag = Tag(soup, builder, 'magictag') magic_tag.insert(0, "the") soup.a.insert(1, magic_tag) self.assertEqual( soup.decode(), self.document_for( "<a><b>Find</b><magictag>the</magictag><c>lady!</c><d></d></a>")) # Make sure all the relationships are hooked up correctly. b_tag = soup.b self.assertEqual(b_tag.next_sibling, magic_tag) self.assertEqual(magic_tag.previous_sibling, b_tag) find = b_tag.find(text="Find") self.assertEqual(find.next_element, magic_tag) self.assertEqual(magic_tag.previous_element, find) c_tag = soup.c self.assertEqual(magic_tag.next_sibling, c_tag) self.assertEqual(c_tag.previous_sibling, magic_tag) the = magic_tag.find(text="the") self.assertEqual(the.parent, magic_tag) self.assertEqual(the.next_element, c_tag) self.assertEqual(c_tag.previous_element, the) def test_append_child_thats_already_at_the_end(self): data = "<a><b></b></a>" soup = self.soup(data) soup.a.append(soup.b) self.assertEqual(data, soup.decode()) def test_move_tag_to_beginning_of_parent(self): data = "<a><b></b><c></c><d></d></a>" soup = self.soup(data) soup.a.insert(0, soup.d) self.assertEqual("<a><d></d><b></b><c></c></a>", soup.decode()) def test_insert_works_on_empty_element_tag(self): # This is a little strange, since most HTML parsers don't allow # markup like this to come through. But in general, we don't # know what the parser would or wouldn't have allowed, so # I'm letting this succeed for now. soup = self.soup("<br/>") soup.br.insert(1, "Contents") self.assertEqual(str(soup.br), "<br>Contents</br>") def test_insert_before(self): soup = self.soup("<a>foo</a><b>bar</b>") soup.b.insert_before("BAZ") soup.a.insert_before("QUUX") self.assertEqual( soup.decode(), self.document_for("QUUX<a>foo</a>BAZ<b>bar</b>")) soup.a.insert_before(soup.b) self.assertEqual( soup.decode(), self.document_for("QUUX<b>bar</b><a>foo</a>BAZ")) def test_insert_after(self): soup = self.soup("<a>foo</a><b>bar</b>") soup.b.insert_after("BAZ") soup.a.insert_after("QUUX") self.assertEqual( soup.decode(), self.document_for("<a>foo</a>QUUX<b>bar</b>BAZ")) soup.b.insert_after(soup.a) self.assertEqual( soup.decode(), self.document_for("QUUX<b>bar</b><a>foo</a>BAZ")) def test_insert_after_raises_exception_if_after_has_no_meaning(self): soup = self.soup("") tag = soup.new_tag("a") string = soup.new_string("") self.assertRaises(ValueError, string.insert_after, tag) self.assertRaises(NotImplementedError, soup.insert_after, tag) self.assertRaises(ValueError, tag.insert_after, tag) def test_insert_before_raises_notimplementederror_if_before_has_no_meaning(self): soup = self.soup("") tag = soup.new_tag("a") string = soup.new_string("") self.assertRaises(ValueError, string.insert_before, tag) self.assertRaises(NotImplementedError, soup.insert_before, tag) self.assertRaises(ValueError, tag.insert_before, tag) def test_replace_with(self): soup = self.soup( "<p>There's <b>no</b> business like <b>show</b> business</p>") no, show = soup.find_all('b') show.replace_with(no) self.assertEqual( soup.decode(), self.document_for( "<p>There's business like <b>no</b> business</p>")) self.assertEqual(show.parent, None) self.assertEqual(no.parent, soup.p) self.assertEqual(no.next_element, "no") self.assertEqual(no.next_sibling, " business") def test_replace_first_child(self): data = "<a><b></b><c></c></a>" soup = self.soup(data) soup.b.replace_with(soup.c) self.assertEqual("<a><c></c></a>", soup.decode()) def test_replace_last_child(self): data = "<a><b></b><c></c></a>" soup = self.soup(data) soup.c.replace_with(soup.b) self.assertEqual("<a><b></b></a>", soup.decode()) def test_nested_tag_replace_with(self): soup = self.soup( """<a>We<b>reserve<c>the</c><d>right</d></b></a><e>to<f>refuse</f><g>service</g></e>""") # Replace the entire <b> tag and its contents ("reserve the # right") with the <f> tag ("refuse"). remove_tag = soup.b move_tag = soup.f remove_tag.replace_with(move_tag) self.assertEqual( soup.decode(), self.document_for( "<a>We<f>refuse</f></a><e>to<g>service</g></e>")) # The <b> tag is now an orphan. self.assertEqual(remove_tag.parent, None) self.assertEqual(remove_tag.find(text="right").next_element, None) self.assertEqual(remove_tag.previous_element, None) self.assertEqual(remove_tag.next_sibling, None) self.assertEqual(remove_tag.previous_sibling, None) # The <f> tag is now connected to the <a> tag. self.assertEqual(move_tag.parent, soup.a) self.assertEqual(move_tag.previous_element, "We") self.assertEqual(move_tag.next_element.next_element, soup.e) self.assertEqual(move_tag.next_sibling, None) # The gap where the <f> tag used to be has been mended, and # the word "to" is now connected to the <g> tag. to_text = soup.find(text="to") g_tag = soup.g self.assertEqual(to_text.next_element, g_tag) self.assertEqual(to_text.next_sibling, g_tag) self.assertEqual(g_tag.previous_element, to_text) self.assertEqual(g_tag.previous_sibling, to_text) def test_unwrap(self): tree = self.soup(""" <p>Unneeded <em>formatting</em> is unneeded</p> """) tree.em.unwrap() self.assertEqual(tree.em, None) self.assertEqual(tree.p.text, "Unneeded formatting is unneeded") def test_wrap(self): soup = self.soup("I wish I was bold.") value = soup.string.wrap(soup.new_tag("b")) self.assertEqual(value.decode(), "<b>I wish I was bold.</b>") self.assertEqual( soup.decode(), self.document_for("<b>I wish I was bold.</b>")) def test_wrap_extracts_tag_from_elsewhere(self): soup = self.soup("<b></b>I wish I was bold.") soup.b.next_sibling.wrap(soup.b) self.assertEqual( soup.decode(), self.document_for("<b>I wish I was bold.</b>")) def test_wrap_puts_new_contents_at_the_end(self): soup = self.soup("<b>I like being bold.</b>I wish I was bold.") soup.b.next_sibling.wrap(soup.b) self.assertEqual(2, len(soup.b.contents)) self.assertEqual( soup.decode(), self.document_for( "<b>I like being bold.I wish I was bold.</b>")) def test_extract(self): soup = self.soup( '<html><body>Some content. <div id="nav">Nav crap</div> More content.</body></html>') self.assertEqual(len(soup.body.contents), 3) extracted = soup.find(id="nav").extract() self.assertEqual( soup.decode(), "<html><body>Some content. More content.</body></html>") self.assertEqual(extracted.decode(), '<div id="nav">Nav crap</div>') # The extracted tag is now an orphan. self.assertEqual(len(soup.body.contents), 2) self.assertEqual(extracted.parent, None) self.assertEqual(extracted.previous_element, None) self.assertEqual(extracted.next_element.next_element, None) # The gap where the extracted tag used to be has been mended. content_1 = soup.find(text="Some content. ") content_2 = soup.find(text=" More content.") self.assertEqual(content_1.next_element, content_2) self.assertEqual(content_1.next_sibling, content_2) self.assertEqual(content_2.previous_element, content_1) self.assertEqual(content_2.previous_sibling, content_1) def test_extract_distinguishes_between_identical_strings(self): soup = self.soup("<a>foo</a><b>bar</b>") foo_1 = soup.a.string bar_1 = soup.b.string foo_2 = soup.new_string("foo") bar_2 = soup.new_string("bar") soup.a.append(foo_2) soup.b.append(bar_2) # Now there are two identical strings in the <a> tag, and two # in the <b> tag. Let's remove the first "foo" and the second # "bar". foo_1.extract() bar_2.extract() self.assertEqual(foo_2, soup.a.string) self.assertEqual(bar_2, soup.b.string) def test_extract_multiples_of_same_tag(self): soup = self.soup(""" <html> <head> <script>foo</script> </head> <body> <script>bar</script> <a></a> </body> <script>baz</script> </html>""") [soup.script.extract() for i in soup.find_all("script")] self.assertEqual("<body>\n\n<a></a>\n</body>", unicode(soup.body)) def test_extract_works_when_element_is_surrounded_by_identical_strings(self): soup = self.soup( '<html>\n' '<body>hi</body>\n' '</html>') soup.find('body').extract() self.assertEqual(None, soup.find('body')) def test_clear(self): """Tag.clear()""" soup = self.soup("<p><a>String <em>Italicized</em></a> and another</p>") # clear using extract() a = soup.a soup.p.clear() self.assertEqual(len(soup.p.contents), 0) self.assertTrue(hasattr(a, "contents")) # clear using decompose() em = a.em a.clear(decompose=True) self.assertEqual(0, len(em.contents)) def test_string_set(self): """Tag.string = 'string'""" soup = self.soup("<a></a> <b><c></c></b>") soup.a.string = "foo" self.assertEqual(soup.a.contents, ["foo"]) soup.b.string = "bar" self.assertEqual(soup.b.contents, ["bar"]) def test_string_set_does_not_affect_original_string(self): soup = self.soup("<a><b>foo</b><c>bar</c>") soup.b.string = soup.c.string self.assertEqual(soup.a.encode(), b"<a><b>bar</b><c>bar</c></a>") def test_set_string_preserves_class_of_string(self): soup = self.soup("<a></a>") cdata = CData("foo") soup.a.string = cdata self.assertTrue(isinstance(soup.a.string, CData)) class TestElementObjects(SoupTest): """Test various features of element objects.""" def test_len(self): """The length of an element is its number of children.""" soup = self.soup("<top>1<b>2</b>3</top>") # The BeautifulSoup object itself contains one element: the # <top> tag. self.assertEqual(len(soup.contents), 1) self.assertEqual(len(soup), 1) # The <top> tag contains three elements: the text node "1", the # <b> tag, and the text node "3". self.assertEqual(len(soup.top), 3) self.assertEqual(len(soup.top.contents), 3) def test_member_access_invokes_find(self): """Accessing a Python member .foo invokes find('foo')""" soup = self.soup('<b><i></i></b>') self.assertEqual(soup.b, soup.find('b')) self.assertEqual(soup.b.i, soup.find('b').find('i')) self.assertEqual(soup.a, None) def test_deprecated_member_access(self): soup = self.soup('<b><i></i></b>') with warnings.catch_warnings(record=True) as w: tag = soup.bTag self.assertEqual(soup.b, tag) self.assertEqual( '.bTag is deprecated, use .find("b") instead.', str(w[0].message)) def test_has_attr(self): """has_attr() checks for the presence of an attribute. Please note note: has_attr() is different from __in__. has_attr() checks the tag's attributes and __in__ checks the tag's chidlren. """ soup = self.soup("<foo attr='bar'>") self.assertTrue(soup.foo.has_attr('attr')) self.assertFalse(soup.foo.has_attr('attr2')) def test_attributes_come_out_in_alphabetical_order(self): markup = '<b a="1" z="5" m="3" f="2" y="4"></b>' self.assertSoupEquals(markup, '<b a="1" f="2" m="3" y="4" z="5"></b>') def test_string(self): # A tag that contains only a text node makes that node # available as .string. soup = self.soup("<b>foo</b>") self.assertEqual(soup.b.string, 'foo') def test_empty_tag_has_no_string(self): # A tag with no children has no .stirng. soup = self.soup("<b></b>") self.assertEqual(soup.b.string, None) def test_tag_with_multiple_children_has_no_string(self): # A tag with no children has no .string. soup = self.soup("<a>foo<b></b><b></b></b>") self.assertEqual(soup.b.string, None) soup = self.soup("<a>foo<b></b>bar</b>") self.assertEqual(soup.b.string, None) # Even if all the children are strings, due to trickery, # it won't work--but this would be a good optimization. soup = self.soup("<a>foo</b>") soup.a.insert(1, "bar") self.assertEqual(soup.a.string, None) def test_tag_with_recursive_string_has_string(self): # A tag with a single child which has a .string inherits that # .string. soup = self.soup("<a><b>foo</b></a>") self.assertEqual(soup.a.string, "foo") self.assertEqual(soup.string, "foo") def test_lack_of_string(self): """Only a tag containing a single text node has a .string.""" soup = self.soup("<b>f<i>e</i>o</b>") self.assertFalse(soup.b.string) soup = self.soup("<b></b>") self.assertFalse(soup.b.string) def test_all_text(self): """Tag.text and Tag.get_text(sep=u"") -> all child text, concatenated""" soup = self.soup("<a>a<b>r</b> <r> t </r></a>") self.assertEqual(soup.a.text, "ar t ") self.assertEqual(soup.a.get_text(strip=True), "art") self.assertEqual(soup.a.get_text(","), "a,r, , t ") self.assertEqual(soup.a.get_text(",", strip=True), "a,r,t") def test_get_text_ignores_comments(self): soup = self.soup("foo<!--IGNORE-->bar") self.assertEqual(soup.get_text(), "foobar") self.assertEqual( soup.get_text(types=(NavigableString, Comment)), "fooIGNOREbar") self.assertEqual( soup.get_text(types=None), "fooIGNOREbar") def test_all_strings_ignores_comments(self): soup = self.soup("foo<!--IGNORE-->bar") self.assertEqual(['foo', 'bar'], list(soup.strings)) class TestCDAtaListAttributes(SoupTest): """Testing cdata-list attributes like 'class'. """ def test_single_value_becomes_list(self): soup = self.soup("<a class='foo'>") self.assertEqual(["foo"],soup.a['class']) def test_multiple_values_becomes_list(self): soup = self.soup("<a class='foo bar'>") self.assertEqual(["foo", "bar"], soup.a['class']) def test_multiple_values_separated_by_weird_whitespace(self): soup = self.soup("<a class='foo\tbar\nbaz'>") self.assertEqual(["foo", "bar", "baz"],soup.a['class']) def test_attributes_joined_into_string_on_output(self): soup = self.soup("<a class='foo\tbar'>") self.assertEqual(b'<a class="foo bar"></a>', soup.a.encode()) def test_get_attribute_list(self): soup = self.soup("<a id='abc def'>") self.assertEqual(['abc def'], soup.a.get_attribute_list('id')) def test_accept_charset(self): soup = self.soup('<form accept-charset="ISO-8859-1 UTF-8">') self.assertEqual(['ISO-8859-1', 'UTF-8'], soup.form['accept-charset']) def test_cdata_attribute_applying_only_to_one_tag(self): data = '<a accept-charset="ISO-8859-1 UTF-8"></a>' soup = self.soup(data) # We saw in another test that accept-charset is a cdata-list # attribute for the <form> tag. But it's not a cdata-list # attribute for any other tag. self.assertEqual('ISO-8859-1 UTF-8', soup.a['accept-charset']) def test_string_has_immutable_name_property(self): string = self.soup("s").string self.assertEqual(None, string.name) def t(): string.name = 'foo' self.assertRaises(AttributeError, t) class TestPersistence(SoupTest): "Testing features like pickle and deepcopy." def setUp(self): super(TestPersistence, self).setUp() self.page = """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN" "http://www.w3.org/TR/REC-html40/transitional.dtd"> <html> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8"> <title>Beautiful Soup: We called him Tortoise because he taught us.</title> <link rev="made" href="mailto:leonardr@segfault.org"> <meta name="Description" content="Beautiful Soup: an HTML parser optimized for screen-scraping."> <meta name="generator" content="Markov Approximation 1.4 (module: leonardr)"> <meta name="author" content="Leonard Richardson"> </head> <body> <a href="foo">foo</a> <a href="foo"><b>bar</b></a> </body> </html>""" self.tree = self.soup(self.page) def test_pickle_and_unpickle_identity(self): # Pickling a tree, then unpickling it, yields a tree identical # to the original. dumped = pickle.dumps(self.tree, 2) loaded = pickle.loads(dumped) self.assertEqual(loaded.__class__, BeautifulSoup) self.assertEqual(loaded.decode(), self.tree.decode()) def test_deepcopy_identity(self): # Making a deepcopy of a tree yields an identical tree. copied = copy.deepcopy(self.tree) self.assertEqual(copied.decode(), self.tree.decode()) def test_copy_preserves_encoding(self): soup = BeautifulSoup(b'<p>&nbsp;</p>', 'html.parser') encoding = soup.original_encoding copy = soup.__copy__() self.assertEqual(u"<p> </p>", unicode(copy)) self.assertEqual(encoding, copy.original_encoding) def test_unicode_pickle(self): # A tree containing Unicode characters can be pickled. html = u"<b>\N{SNOWMAN}</b>" soup = self.soup(html) dumped = pickle.dumps(soup, pickle.HIGHEST_PROTOCOL) loaded = pickle.loads(dumped) self.assertEqual(loaded.decode(), soup.decode()) def test_copy_navigablestring_is_not_attached_to_tree(self): html = u"<b>Foo<a></a></b><b>Bar</b>" soup = self.soup(html) s1 = soup.find(string="Foo") s2 = copy.copy(s1) self.assertEqual(s1, s2) self.assertEqual(None, s2.parent) self.assertEqual(None, s2.next_element) self.assertNotEqual(None, s1.next_sibling) self.assertEqual(None, s2.next_sibling) self.assertEqual(None, s2.previous_element) def test_copy_navigablestring_subclass_has_same_type(self): html = u"<b><!--Foo--></b>" soup = self.soup(html) s1 = soup.string s2 = copy.copy(s1) self.assertEqual(s1, s2) self.assertTrue(isinstance(s2, Comment)) def test_copy_entire_soup(self): html = u"<div><b>Foo<a></a></b><b>Bar</b></div>end" soup = self.soup(html) soup_copy = copy.copy(soup) self.assertEqual(soup, soup_copy) def test_copy_tag_copies_contents(self): html = u"<div><b>Foo<a></a></b><b>Bar</b></div>end" soup = self.soup(html) div = soup.div div_copy = copy.copy(div) # The two tags look the same, and evaluate to equal. self.assertEqual(unicode(div), unicode(div_copy)) self.assertEqual(div, div_copy) # But they're not the same object. self.assertFalse(div is div_copy) # And they don't have the same relation to the parse tree. The # copy is not associated with a parse tree at all. self.assertEqual(None, div_copy.parent) self.assertEqual(None, div_copy.previous_element) self.assertEqual(None, div_copy.find(string='Bar').next_element) self.assertNotEqual(None, div.find(string='Bar').next_element) class TestSubstitutions(SoupTest): def test_default_formatter_is_minimal(self): markup = u"<b>&lt;&lt;Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!&gt;&gt;</b>" soup = self.soup(markup) decoded = soup.decode(formatter="minimal") # The < is converted back into &lt; but the e-with-acute is left alone. self.assertEqual( decoded, self.document_for( u"<b>&lt;&lt;Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!&gt;&gt;</b>")) def test_formatter_html(self): markup = u"<b>&lt;&lt;Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!&gt;&gt;</b>" soup = self.soup(markup) decoded = soup.decode(formatter="html") self.assertEqual( decoded, self.document_for("<b>&lt;&lt;Sacr&eacute; bleu!&gt;&gt;</b>")) def test_formatter_minimal(self): markup = u"<b>&lt;&lt;Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!&gt;&gt;</b>" soup = self.soup(markup) decoded = soup.decode(formatter="minimal") # The < is converted back into &lt; but the e-with-acute is left alone. self.assertEqual( decoded, self.document_for( u"<b>&lt;&lt;Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!&gt;&gt;</b>")) def test_formatter_null(self): markup = u"<b>&lt;&lt;Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!&gt;&gt;</b>" soup = self.soup(markup) decoded = soup.decode(formatter=None) # Neither the angle brackets nor the e-with-acute are converted. # This is not valid HTML, but it's what the user wanted. self.assertEqual(decoded, self.document_for(u"<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>")) def test_formatter_custom(self): markup = u"<b>&lt;foo&gt;</b><b>bar</b>" soup = self.soup(markup) decoded = soup.decode(formatter = lambda x: x.upper()) # Instead of normal entity conversion code, the custom # callable is called on every string. self.assertEqual( decoded, self.document_for(u"<b><FOO></b><b>BAR</b>")) def test_formatter_is_run_on_attribute_values(self): markup = u'<a href="http://a.com?a=b&c=é">e</a>' soup = self.soup(markup) a = soup.a expect_minimal = u'<a href="http://a.com?a=b&amp;c=é">e</a>' self.assertEqual(expect_minimal, a.decode()) self.assertEqual(expect_minimal, a.decode(formatter="minimal")) expect_html = u'<a href="http://a.com?a=b&amp;c=&eacute;">e</a>' self.assertEqual(expect_html, a.decode(formatter="html")) self.assertEqual(markup, a.decode(formatter=None)) expect_upper = u'<a href="HTTP://A.COM?A=B&C=É">E</a>' self.assertEqual(expect_upper, a.decode(formatter=lambda x: x.upper())) def test_formatter_skips_script_tag_for_html_documents(self): doc = """ <script type="text/javascript"> console.log("< < hey > > "); </script> """ encoded = BeautifulSoup(doc, 'html.parser').encode() self.assertTrue(b"< < hey > >" in encoded) def test_formatter_skips_style_tag_for_html_documents(self): doc = """ <style type="text/css"> console.log("< < hey > > "); </style> """ encoded = BeautifulSoup(doc, 'html.parser').encode() self.assertTrue(b"< < hey > >" in encoded) def test_prettify_leaves_preformatted_text_alone(self): soup = self.soup("<div> foo <pre> \tbar\n \n </pre> baz ") # Everything outside the <pre> tag is reformatted, but everything # inside is left alone. self.assertEqual( u'<div>\n foo\n <pre> \tbar\n \n </pre>\n baz\n</div>', soup.div.prettify()) def test_prettify_accepts_formatter(self): soup = BeautifulSoup("<html><body>foo</body></html>", 'html.parser') pretty = soup.prettify(formatter = lambda x: x.upper()) self.assertTrue("FOO" in pretty) def test_prettify_outputs_unicode_by_default(self): soup = self.soup("<a></a>") self.assertEqual(unicode, type(soup.prettify())) def test_prettify_can_encode_data(self): soup = self.soup("<a></a>") self.assertEqual(bytes, type(soup.prettify("utf-8"))) def test_html_entity_substitution_off_by_default(self): markup = u"<b>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</b>" soup = self.soup(markup) encoded = soup.b.encode("utf-8") self.assertEqual(encoded, markup.encode('utf-8')) def test_encoding_substitution(self): # Here's the <meta> tag saying that a document is # encoded in Shift-JIS. meta_tag = ('<meta content="text/html; charset=x-sjis" ' 'http-equiv="Content-type"/>') soup = self.soup(meta_tag) # Parse the document, and the charset apprears unchanged. self.assertEqual(soup.meta['content'], 'text/html; charset=x-sjis') # Encode the document into some encoding, and the encoding is # substituted into the meta tag. utf_8 = soup.encode("utf-8") self.assertTrue(b"charset=utf-8" in utf_8) euc_jp = soup.encode("euc_jp") self.assertTrue(b"charset=euc_jp" in euc_jp) shift_jis = soup.encode("shift-jis") self.assertTrue(b"charset=shift-jis" in shift_jis) utf_16_u = soup.encode("utf-16").decode("utf-16") self.assertTrue("charset=utf-16" in utf_16_u) def test_encoding_substitution_doesnt_happen_if_tag_is_strained(self): markup = ('<head><meta content="text/html; charset=x-sjis" ' 'http-equiv="Content-type"/></head><pre>foo</pre>') # Beautiful Soup used to try to rewrite the meta tag even if the # meta tag got filtered out by the strainer. This test makes # sure that doesn't happen. strainer = SoupStrainer('pre') soup = self.soup(markup, parse_only=strainer) self.assertEqual(soup.contents[0].name, 'pre') class TestEncoding(SoupTest): """Test the ability to encode objects into strings.""" def test_unicode_string_can_be_encoded(self): html = u"<b>\N{SNOWMAN}</b>" soup = self.soup(html) self.assertEqual(soup.b.string.encode("utf-8"), u"\N{SNOWMAN}".encode("utf-8")) def test_tag_containing_unicode_string_can_be_encoded(self): html = u"<b>\N{SNOWMAN}</b>" soup = self.soup(html) self.assertEqual( soup.b.encode("utf-8"), html.encode("utf-8")) def test_encoding_substitutes_unrecognized_characters_by_default(self): html = u"<b>\N{SNOWMAN}</b>" soup = self.soup(html) self.assertEqual(soup.b.encode("ascii"), b"<b>&#9731;</b>") def test_encoding_can_be_made_strict(self): html = u"<b>\N{SNOWMAN}</b>" soup = self.soup(html) self.assertRaises( UnicodeEncodeError, soup.encode, "ascii", errors="strict") def test_decode_contents(self): html = u"<b>\N{SNOWMAN}</b>" soup = self.soup(html) self.assertEqual(u"\N{SNOWMAN}", soup.b.decode_contents()) def test_encode_contents(self): html = u"<b>\N{SNOWMAN}</b>" soup = self.soup(html) self.assertEqual( u"\N{SNOWMAN}".encode("utf8"), soup.b.encode_contents( encoding="utf8")) def test_deprecated_renderContents(self): html = u"<b>\N{SNOWMAN}</b>" soup = self.soup(html) self.assertEqual( u"\N{SNOWMAN}".encode("utf8"), soup.b.renderContents()) def test_repr(self): html = u"<b>\N{SNOWMAN}</b>" soup = self.soup(html) if PY3K: self.assertEqual(html, repr(soup)) else: self.assertEqual(b'<b>\\u2603</b>', repr(soup)) class TestNavigableStringSubclasses(SoupTest): def test_cdata(self): # None of the current builders turn CDATA sections into CData # objects, but you can create them manually. soup = self.soup("") cdata = CData("foo") soup.insert(1, cdata) self.assertEqual(str(soup), "<![CDATA[foo]]>") self.assertEqual(soup.find(text="foo"), "foo") self.assertEqual(soup.contents[0], "foo") def test_cdata_is_never_formatted(self): """Text inside a CData object is passed into the formatter. But the return value is ignored. """ self.count = 0 def increment(*args): self.count += 1 return "BITTER FAILURE" soup = self.soup("") cdata = CData("<><><>") soup.insert(1, cdata) self.assertEqual( b"<![CDATA[<><><>]]>", soup.encode(formatter=increment)) self.assertEqual(1, self.count) def test_doctype_ends_in_newline(self): # Unlike other NavigableString subclasses, a DOCTYPE always ends # in a newline. doctype = Doctype("foo") soup = self.soup("") soup.insert(1, doctype) self.assertEqual(soup.encode(), b"<!DOCTYPE foo>\n") def test_declaration(self): d = Declaration("foo") self.assertEqual("<?foo?>", d.output_ready()) class TestSoupSelector(TreeTest): HTML = """ <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd"> <html> <head> <title>The title</title> <link rel="stylesheet" href="blah.css" type="text/css" id="l1"> </head> <body> <custom-dashed-tag class="dashed" id="dash1">Hello there.</custom-dashed-tag> <div id="main" class="fancy"> <div id="inner"> <h1 id="header1">An H1</h1> <p>Some text</p> <p class="onep" id="p1">Some more text</p> <h2 id="header2">An H2</h2> <p class="class1 class2 class3" id="pmulti">Another</p> <a href="http://bob.example.org/" rel="friend met" id="bob">Bob</a> <h2 id="header3">Another H2</h2> <a id="me" href="http://simonwillison.net/" rel="me">me</a> <span class="s1"> <a href="#" id="s1a1">span1a1</a> <a href="#" id="s1a2">span1a2 <span id="s1a2s1">test</span></a> <span class="span2"> <a href="#" id="s2a1">span2a1</a> </span> <span class="span3"></span> <custom-dashed-tag class="dashed" id="dash2"/> <div data-tag="dashedvalue" id="data1"/> </span> </div> <x id="xid"> <z id="zida"/> <z id="zidab"/> <z id="zidac"/> </x> <y id="yid"> <z id="zidb"/> </y> <p lang="en" id="lang-en">English</p> <p lang="en-gb" id="lang-en-gb">English UK</p> <p lang="en-us" id="lang-en-us">English US</p> <p lang="fr" id="lang-fr">French</p> </div> <div id="footer"> </div> """ def setUp(self): self.soup = BeautifulSoup(self.HTML, 'html.parser') def assertSelects(self, selector, expected_ids, **kwargs): el_ids = [el['id'] for el in self.soup.select(selector, **kwargs)] el_ids.sort() expected_ids.sort() self.assertEqual(expected_ids, el_ids, "Selector %s, expected [%s], got [%s]" % ( selector, ', '.join(expected_ids), ', '.join(el_ids) ) ) assertSelect = assertSelects def assertSelectMultiple(self, *tests): for selector, expected_ids in tests: self.assertSelect(selector, expected_ids) def test_one_tag_one(self): els = self.soup.select('title') self.assertEqual(len(els), 1) self.assertEqual(els[0].name, 'title') self.assertEqual(els[0].contents, [u'The title']) def test_one_tag_many(self): els = self.soup.select('div') self.assertEqual(len(els), 4) for div in els: self.assertEqual(div.name, 'div') el = self.soup.select_one('div') self.assertEqual('main', el['id']) def test_select_one_returns_none_if_no_match(self): match = self.soup.select_one('nonexistenttag') self.assertEqual(None, match) def test_tag_in_tag_one(self): els = self.soup.select('div div') self.assertSelects('div div', ['inner', 'data1']) def test_tag_in_tag_many(self): for selector in ('html div', 'html body div', 'body div'): self.assertSelects(selector, ['data1', 'main', 'inner', 'footer']) def test_limit(self): self.assertSelects('html div', ['main'], limit=1) self.assertSelects('html body div', ['inner', 'main'], limit=2) self.assertSelects('body div', ['data1', 'main', 'inner', 'footer'], limit=10) def test_tag_no_match(self): self.assertEqual(len(self.soup.select('del')), 0) def test_invalid_tag(self): self.assertRaises(ValueError, self.soup.select, 'tag%t') def test_select_dashed_tag_ids(self): self.assertSelects('custom-dashed-tag', ['dash1', 'dash2']) def test_select_dashed_by_id(self): dashed = self.soup.select('custom-dashed-tag[id=\"dash2\"]') self.assertEqual(dashed[0].name, 'custom-dashed-tag') self.assertEqual(dashed[0]['id'], 'dash2') def test_dashed_tag_text(self): self.assertEqual(self.soup.select('body > custom-dashed-tag')[0].text, u'Hello there.') def test_select_dashed_matches_find_all(self): self.assertEqual(self.soup.select('custom-dashed-tag'), self.soup.find_all('custom-dashed-tag')) def test_header_tags(self): self.assertSelectMultiple( ('h1', ['header1']), ('h2', ['header2', 'header3']), ) def test_class_one(self): for selector in ('.onep', 'p.onep', 'html p.onep'): els = self.soup.select(selector) self.assertEqual(len(els), 1) self.assertEqual(els[0].name, 'p') self.assertEqual(els[0]['class'], ['onep']) def test_class_mismatched_tag(self): els = self.soup.select('div.onep') self.assertEqual(len(els), 0) def test_one_id(self): for selector in ('div#inner', '#inner', 'div div#inner'): self.assertSelects(selector, ['inner']) def test_bad_id(self): els = self.soup.select('#doesnotexist') self.assertEqual(len(els), 0) def test_items_in_id(self): els = self.soup.select('div#inner p') self.assertEqual(len(els), 3) for el in els: self.assertEqual(el.name, 'p') self.assertEqual(els[1]['class'], ['onep']) self.assertFalse(els[0].has_attr('class')) def test_a_bunch_of_emptys(self): for selector in ('div#main del', 'div#main div.oops', 'div div#main'): self.assertEqual(len(self.soup.select(selector)), 0) def test_multi_class_support(self): for selector in ('.class1', 'p.class1', '.class2', 'p.class2', '.class3', 'p.class3', 'html p.class2', 'div#inner .class2'): self.assertSelects(selector, ['pmulti']) def test_multi_class_selection(self): for selector in ('.class1.class3', '.class3.class2', '.class1.class2.class3'): self.assertSelects(selector, ['pmulti']) def test_child_selector(self): self.assertSelects('.s1 > a', ['s1a1', 's1a2']) self.assertSelects('.s1 > a span', ['s1a2s1']) def test_child_selector_id(self): self.assertSelects('.s1 > a#s1a2 span', ['s1a2s1']) def test_attribute_equals(self): self.assertSelectMultiple( ('p[class="onep"]', ['p1']), ('p[id="p1"]', ['p1']), ('[class="onep"]', ['p1']), ('[id="p1"]', ['p1']), ('link[rel="stylesheet"]', ['l1']), ('link[type="text/css"]', ['l1']), ('link[href="blah.css"]', ['l1']), ('link[href="no-blah.css"]', []), ('[rel="stylesheet"]', ['l1']), ('[type="text/css"]', ['l1']), ('[href="blah.css"]', ['l1']), ('[href="no-blah.css"]', []), ('p[href="no-blah.css"]', []), ('[href="no-blah.css"]', []), ) def test_attribute_tilde(self): self.assertSelectMultiple( ('p[class~="class1"]', ['pmulti']), ('p[class~="class2"]', ['pmulti']), ('p[class~="class3"]', ['pmulti']), ('[class~="class1"]', ['pmulti']), ('[class~="class2"]', ['pmulti']), ('[class~="class3"]', ['pmulti']), ('a[rel~="friend"]', ['bob']), ('a[rel~="met"]', ['bob']), ('[rel~="friend"]', ['bob']), ('[rel~="met"]', ['bob']), ) def test_attribute_startswith(self): self.assertSelectMultiple( ('[rel^="style"]', ['l1']), ('link[rel^="style"]', ['l1']), ('notlink[rel^="notstyle"]', []), ('[rel^="notstyle"]', []), ('link[rel^="notstyle"]', []), ('link[href^="bla"]', ['l1']), ('a[href^="http://"]', ['bob', 'me']), ('[href^="http://"]', ['bob', 'me']), ('[id^="p"]', ['pmulti', 'p1']), ('[id^="m"]', ['me', 'main']), ('div[id^="m"]', ['main']), ('a[id^="m"]', ['me']), ('div[data-tag^="dashed"]', ['data1']) ) def test_attribute_endswith(self): self.assertSelectMultiple( ('[href$=".css"]', ['l1']), ('link[href$=".css"]', ['l1']), ('link[id$="1"]', ['l1']), ('[id$="1"]', ['data1', 'l1', 'p1', 'header1', 's1a1', 's2a1', 's1a2s1', 'dash1']), ('div[id$="1"]', ['data1']), ('[id$="noending"]', []), ) def test_attribute_contains(self): self.assertSelectMultiple( # From test_attribute_startswith ('[rel*="style"]', ['l1']), ('link[rel*="style"]', ['l1']), ('notlink[rel*="notstyle"]', []), ('[rel*="notstyle"]', []), ('link[rel*="notstyle"]', []), ('link[href*="bla"]', ['l1']), ('[href*="http://"]', ['bob', 'me']), ('[id*="p"]', ['pmulti', 'p1']), ('div[id*="m"]', ['main']), ('a[id*="m"]', ['me']), # From test_attribute_endswith ('[href*=".css"]', ['l1']), ('link[href*=".css"]', ['l1']), ('link[id*="1"]', ['l1']), ('[id*="1"]', ['data1', 'l1', 'p1', 'header1', 's1a1', 's1a2', 's2a1', 's1a2s1', 'dash1']), ('div[id*="1"]', ['data1']), ('[id*="noending"]', []), # New for this test ('[href*="."]', ['bob', 'me', 'l1']), ('a[href*="."]', ['bob', 'me']), ('link[href*="."]', ['l1']), ('div[id*="n"]', ['main', 'inner']), ('div[id*="nn"]', ['inner']), ('div[data-tag*="edval"]', ['data1']) ) def test_attribute_exact_or_hypen(self): self.assertSelectMultiple( ('p[lang|="en"]', ['lang-en', 'lang-en-gb', 'lang-en-us']), ('[lang|="en"]', ['lang-en', 'lang-en-gb', 'lang-en-us']), ('p[lang|="fr"]', ['lang-fr']), ('p[lang|="gb"]', []), ) def test_attribute_exists(self): self.assertSelectMultiple( ('[rel]', ['l1', 'bob', 'me']), ('link[rel]', ['l1']), ('a[rel]', ['bob', 'me']), ('[lang]', ['lang-en', 'lang-en-gb', 'lang-en-us', 'lang-fr']), ('p[class]', ['p1', 'pmulti']), ('[blah]', []), ('p[blah]', []), ('div[data-tag]', ['data1']) ) def test_quoted_space_in_selector_name(self): html = """<div style="display: wrong">nope</div> <div style="display: right">yes</div> """ soup = BeautifulSoup(html, 'html.parser') [chosen] = soup.select('div[style="display: right"]') self.assertEqual("yes", chosen.string) def test_unsupported_pseudoclass(self): self.assertRaises( NotImplementedError, self.soup.select, "a:no-such-pseudoclass") self.assertRaises( NotImplementedError, self.soup.select, "a:nth-of-type(a)") def test_nth_of_type(self): # Try to select first paragraph els = self.soup.select('div#inner p:nth-of-type(1)') self.assertEqual(len(els), 1) self.assertEqual(els[0].string, u'Some text') # Try to select third paragraph els = self.soup.select('div#inner p:nth-of-type(3)') self.assertEqual(len(els), 1) self.assertEqual(els[0].string, u'Another') # Try to select (non-existent!) fourth paragraph els = self.soup.select('div#inner p:nth-of-type(4)') self.assertEqual(len(els), 0) # Pass in an invalid value. self.assertRaises( ValueError, self.soup.select, 'div p:nth-of-type(0)') def test_nth_of_type_direct_descendant(self): els = self.soup.select('div#inner > p:nth-of-type(1)') self.assertEqual(len(els), 1) self.assertEqual(els[0].string, u'Some text') def test_id_child_selector_nth_of_type(self): self.assertSelects('#inner > p:nth-of-type(2)', ['p1']) def test_select_on_element(self): # Other tests operate on the tree; this operates on an element # within the tree. inner = self.soup.find("div", id="main") selected = inner.select("div") # The <div id="inner"> tag was selected. The <div id="footer"> # tag was not. self.assertSelectsIDs(selected, ['inner', 'data1']) def test_overspecified_child_id(self): self.assertSelects(".fancy #inner", ['inner']) self.assertSelects(".normal #inner", []) def test_adjacent_sibling_selector(self): self.assertSelects('#p1 + h2', ['header2']) self.assertSelects('#p1 + h2 + p', ['pmulti']) self.assertSelects('#p1 + #header2 + .class1', ['pmulti']) self.assertEqual([], self.soup.select('#p1 + p')) def test_general_sibling_selector(self): self.assertSelects('#p1 ~ h2', ['header2', 'header3']) self.assertSelects('#p1 ~ #header2', ['header2']) self.assertSelects('#p1 ~ h2 + a', ['me']) self.assertSelects('#p1 ~ h2 + [rel="me"]', ['me']) self.assertEqual([], self.soup.select('#inner ~ h2')) def test_dangling_combinator(self): self.assertRaises(ValueError, self.soup.select, 'h1 >') def test_sibling_combinator_wont_select_same_tag_twice(self): self.assertSelects('p[lang] ~ p', ['lang-en-gb', 'lang-en-us', 'lang-fr']) # Test the selector grouping operator (the comma) def test_multiple_select(self): self.assertSelects('x, y', ['xid', 'yid']) def test_multiple_select_with_no_space(self): self.assertSelects('x,y', ['xid', 'yid']) def test_multiple_select_with_more_space(self): self.assertSelects('x, y', ['xid', 'yid']) def test_multiple_select_duplicated(self): self.assertSelects('x, x', ['xid']) def test_multiple_select_sibling(self): self.assertSelects('x, y ~ p[lang=fr]', ['xid', 'lang-fr']) def test_multiple_select_tag_and_direct_descendant(self): self.assertSelects('x, y > z', ['xid', 'zidb']) def test_multiple_select_direct_descendant_and_tags(self): self.assertSelects('div > x, y, z', ['xid', 'yid', 'zida', 'zidb', 'zidab', 'zidac']) def test_multiple_select_indirect_descendant(self): self.assertSelects('div x,y, z', ['xid', 'yid', 'zida', 'zidb', 'zidab', 'zidac']) def test_invalid_multiple_select(self): self.assertRaises(ValueError, self.soup.select, ',x, y') self.assertRaises(ValueError, self.soup.select, 'x,,y') def test_multiple_select_attrs(self): self.assertSelects('p[lang=en], p[lang=en-gb]', ['lang-en', 'lang-en-gb']) def test_multiple_select_ids(self): self.assertSelects('x, y > z[id=zida], z[id=zidab], z[id=zidb]', ['xid', 'zidb', 'zidab']) def test_multiple_select_nested(self): self.assertSelects('body > div > x, y > z', ['xid', 'zidb'])
gpl-3.0
secynic/nfsinkhole
nfsinkhole/tests/test_utils.py
1
1282
import logging from nfsinkhole.exceptions import SubprocessError from nfsinkhole.tests import TestCommon from nfsinkhole.utils import (popen_wrapper, get_default_interface, get_interface_addr, set_system_timezone) LOG_FORMAT = ('[%(asctime)s] [%(levelname)s] [%(filename)s:%(lineno)s] ' '[%(funcName)s()] %(message)s') logging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT) log = logging.getLogger(__name__) class TestIPTablesSinkhole(TestCommon): def test_popen_wrapper(self): # Argument checks self.assertRaises(ValueError, popen_wrapper) self.assertRaises(TypeError, popen_wrapper, 'notalist') self.assertNotEqual(popen_wrapper(['ls'], log_stdout_line=False), None) self.assertNotEqual(get_interface_addr('eth0'), None) self.assertEqual(get_interface_addr('asdasd'), None) # raise_err test self.assertRaises(SubprocessError, popen_wrapper, **dict( cmd_arr=['asdasd'], raise_err=True )) def test_get_default_interface(self): self.assertNotEqual(get_default_interface(), None) def test_set_system_timezone(self): set_system_timezone('UTC') set_system_timezone('UTC', skip_timedatectl=True)
bsd-2-clause
Krossom/python-for-android
python3-alpha/python3-src/Lib/ctypes/test/test_anon.py
264
2051
import unittest from ctypes import * class AnonTest(unittest.TestCase): def test_anon(self): class ANON(Union): _fields_ = [("a", c_int), ("b", c_int)] class Y(Structure): _fields_ = [("x", c_int), ("_", ANON), ("y", c_int)] _anonymous_ = ["_"] self.assertEqual(Y.a.offset, sizeof(c_int)) self.assertEqual(Y.b.offset, sizeof(c_int)) self.assertEqual(ANON.a.offset, 0) self.assertEqual(ANON.b.offset, 0) def test_anon_nonseq(self): # TypeError: _anonymous_ must be a sequence self.assertRaises(TypeError, lambda: type(Structure)("Name", (Structure,), {"_fields_": [], "_anonymous_": 42})) def test_anon_nonmember(self): # AttributeError: type object 'Name' has no attribute 'x' self.assertRaises(AttributeError, lambda: type(Structure)("Name", (Structure,), {"_fields_": [], "_anonymous_": ["x"]})) def test_nested(self): class ANON_S(Structure): _fields_ = [("a", c_int)] class ANON_U(Union): _fields_ = [("_", ANON_S), ("b", c_int)] _anonymous_ = ["_"] class Y(Structure): _fields_ = [("x", c_int), ("_", ANON_U), ("y", c_int)] _anonymous_ = ["_"] self.assertEqual(Y.x.offset, 0) self.assertEqual(Y.a.offset, sizeof(c_int)) self.assertEqual(Y.b.offset, sizeof(c_int)) self.assertEqual(Y._.offset, sizeof(c_int)) self.assertEqual(Y.y.offset, sizeof(c_int) * 2) if __name__ == "__main__": unittest.main()
apache-2.0
TFenby/python-mode
pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/imports.py
17
15940
# Copyright (c) 2003-2013 LOGILAB S.A. (Paris, FRANCE). # http://www.logilab.fr/ -- mailto:contact@logilab.fr # # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free Software # Foundation; either version 2 of the License, or (at your option) any later # version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. """imports checkers for Python code""" import sys from logilab.common.graph import get_cycles, DotBackend from logilab.common.modutils import get_module_part, is_standard_module from logilab.common.ureports import VerbatimText, Paragraph import astroid from astroid import are_exclusive from pylint.interfaces import IAstroidChecker from pylint.utils import EmptyReport from pylint.checkers import BaseChecker from pylint.checkers.utils import check_messages def get_first_import(node, context, name, base, level): """return the node where [base.]<name> is imported or None if not found """ fullname = '%s.%s' % (base, name) if base else name first = None found = False for first in context.body: if first is node: continue if first.scope() is node.scope() and first.fromlineno > node.fromlineno: continue if isinstance(first, astroid.Import): if any(fullname == iname[0] for iname in first.names): found = True break elif isinstance(first, astroid.From): if level == first.level and any( fullname == '%s.%s' % (first.modname, iname[0]) for iname in first.names): found = True break if found and not are_exclusive(first, node): return first # utilities to represents import dependencies as tree and dot graph ########### def make_tree_defs(mod_files_list): """get a list of 2-uple (module, list_of_files_which_import_this_module), it will return a dictionary to represent this as a tree """ tree_defs = {} for mod, files in mod_files_list: node = (tree_defs, ()) for prefix in mod.split('.'): node = node[0].setdefault(prefix, [{}, []]) node[1] += files return tree_defs def repr_tree_defs(data, indent_str=None): """return a string which represents imports as a tree""" lines = [] nodes = data.items() for i, (mod, (sub, files)) in enumerate(sorted(nodes, key=lambda x: x[0])): if not files: files = '' else: files = '(%s)' % ','.join(files) if indent_str is None: lines.append('%s %s' % (mod, files)) sub_indent_str = ' ' else: lines.append(r'%s\-%s %s' % (indent_str, mod, files)) if i == len(nodes)-1: sub_indent_str = '%s ' % indent_str else: sub_indent_str = '%s| ' % indent_str if sub: lines.append(repr_tree_defs(sub, sub_indent_str)) return '\n'.join(lines) def dependencies_graph(filename, dep_info): """write dependencies as a dot (graphviz) file """ done = {} printer = DotBackend(filename[:-4], rankdir='LR') printer.emit('URL="." node[shape="box"]') for modname, dependencies in sorted(dep_info.iteritems()): done[modname] = 1 printer.emit_node(modname) for modname in dependencies: if modname not in done: done[modname] = 1 printer.emit_node(modname) for depmodname, dependencies in sorted(dep_info.iteritems()): for modname in dependencies: printer.emit_edge(modname, depmodname) printer.generate(filename) def make_graph(filename, dep_info, sect, gtype): """generate a dependencies graph and add some information about it in the report's section """ dependencies_graph(filename, dep_info) sect.append(Paragraph('%simports graph has been written to %s' % (gtype, filename))) # the import checker itself ################################################### MSGS = { 'F0401': ('Unable to import %s', 'import-error', 'Used when pylint has been unable to import a module.'), 'R0401': ('Cyclic import (%s)', 'cyclic-import', 'Used when a cyclic import between two or more modules is \ detected.'), 'W0401': ('Wildcard import %s', 'wildcard-import', 'Used when `from module import *` is detected.'), 'W0402': ('Uses of a deprecated module %r', 'deprecated-module', 'Used a module marked as deprecated is imported.'), 'W0403': ('Relative import %r, should be %r', 'relative-import', 'Used when an import relative to the package directory is \ detected.'), 'W0404': ('Reimport %r (imported line %s)', 'reimported', 'Used when a module is reimported multiple times.'), 'W0406': ('Module import itself', 'import-self', 'Used when a module is importing itself.'), 'W0410': ('__future__ import is not the first non docstring statement', 'misplaced-future', 'Python 2.5 and greater require __future__ import to be the \ first non docstring statement in the module.', {'maxversion': (3, 0)}), } class ImportsChecker(BaseChecker): """checks for * external modules dependencies * relative / wildcard imports * cyclic imports * uses of deprecated modules """ __implements__ = IAstroidChecker name = 'imports' msgs = MSGS priority = -2 if sys.version_info < (3,): deprecated_modules = ('regsub', 'TERMIOS', 'Bastion', 'rexec') else: deprecated_modules = ('stringprep', 'optparse') options = (('deprecated-modules', {'default' : deprecated_modules, 'type' : 'csv', 'metavar' : '<modules>', 'help' : 'Deprecated modules which should not be used, \ separated by a comma'} ), ('import-graph', {'default' : '', 'type' : 'string', 'metavar' : '<file.dot>', 'help' : 'Create a graph of every (i.e. internal and \ external) dependencies in the given file (report RP0402 must not be disabled)'} ), ('ext-import-graph', {'default' : '', 'type' : 'string', 'metavar' : '<file.dot>', 'help' : 'Create a graph of external dependencies in the \ given file (report RP0402 must not be disabled)'} ), ('int-import-graph', {'default' : '', 'type' : 'string', 'metavar' : '<file.dot>', 'help' : 'Create a graph of internal dependencies in the \ given file (report RP0402 must not be disabled)'} ), ) def __init__(self, linter=None): BaseChecker.__init__(self, linter) self.stats = None self.import_graph = None self.__int_dep_info = self.__ext_dep_info = None self.reports = (('RP0401', 'External dependencies', self.report_external_dependencies), ('RP0402', 'Modules dependencies graph', self.report_dependencies_graph), ) def open(self): """called before visiting project (i.e set of modules)""" self.linter.add_stats(dependencies={}) self.linter.add_stats(cycles=[]) self.stats = self.linter.stats self.import_graph = {} def close(self): """called before visiting project (i.e set of modules)""" # don't try to compute cycles if the associated message is disabled if self.linter.is_message_enabled('cyclic-import'): for cycle in get_cycles(self.import_graph): self.add_message('cyclic-import', args=' -> '.join(cycle)) def visit_import(self, node): """triggered when an import statement is seen""" modnode = node.root() for name, _ in node.names: importedmodnode = self.get_imported_module(modnode, node, name) if importedmodnode is None: continue self._check_relative_import(modnode, node, importedmodnode, name) self._add_imported_module(node, importedmodnode.name) self._check_deprecated_module(node, name) self._check_reimport(node, name) # TODO This appears to be the list of all messages of the checker... # @check_messages('W0410', 'W0401', 'W0403', 'W0402', 'W0404', 'W0406', 'F0401') @check_messages(*(MSGS.keys())) def visit_from(self, node): """triggered when a from statement is seen""" basename = node.modname if basename == '__future__': # check if this is the first non-docstring statement in the module prev = node.previous_sibling() if prev: # consecutive future statements are possible if not (isinstance(prev, astroid.From) and prev.modname == '__future__'): self.add_message('misplaced-future', node=node) return for name, _ in node.names: if name == '*': self.add_message('wildcard-import', args=basename, node=node) modnode = node.root() importedmodnode = self.get_imported_module(modnode, node, basename) if importedmodnode is None: return self._check_relative_import(modnode, node, importedmodnode, basename) self._check_deprecated_module(node, basename) for name, _ in node.names: if name != '*': self._add_imported_module(node, '%s.%s' % (importedmodnode.name, name)) self._check_reimport(node, name, basename, node.level) def get_imported_module(self, modnode, importnode, modname): try: return importnode.do_import_module(modname) except astroid.InferenceError, ex: if str(ex) != modname: args = '%r (%s)' % (modname, ex) else: args = repr(modname) self.add_message("import-error", args=args, node=importnode) def _check_relative_import(self, modnode, importnode, importedmodnode, importedasname): """check relative import. node is either an Import or From node, modname the imported module name. """ if not self.linter.is_message_enabled('relative-import'): return if importedmodnode.file is None: return False # built-in module if modnode is importedmodnode: return False # module importing itself if modnode.absolute_import_activated() or getattr(importnode, 'level', None): return False if importedmodnode.name != importedasname: # this must be a relative import... self.add_message('relative-import', args=(importedasname, importedmodnode.name), node=importnode) def _add_imported_module(self, node, importedmodname): """notify an imported module, used to analyze dependencies""" importedmodname = get_module_part(importedmodname) context_name = node.root().name if context_name == importedmodname: # module importing itself ! self.add_message('import-self', node=node) elif not is_standard_module(importedmodname): # handle dependencies importedmodnames = self.stats['dependencies'].setdefault( importedmodname, set()) if not context_name in importedmodnames: importedmodnames.add(context_name) # update import graph mgraph = self.import_graph.setdefault(context_name, set()) if not importedmodname in mgraph: mgraph.add(importedmodname) def _check_deprecated_module(self, node, mod_path): """check if the module is deprecated""" for mod_name in self.config.deprecated_modules: if mod_path == mod_name or mod_path.startswith(mod_name + '.'): self.add_message('deprecated-module', node=node, args=mod_path) def _check_reimport(self, node, name, basename=None, level=None): """check if the import is necessary (i.e. not already done)""" if not self.linter.is_message_enabled('reimported'): return frame = node.frame() root = node.root() contexts = [(frame, level)] if root is not frame: contexts.append((root, None)) for context, level in contexts: first = get_first_import(node, context, name, basename, level) if first is not None: self.add_message('reimported', node=node, args=(name, first.fromlineno)) def report_external_dependencies(self, sect, _, dummy): """return a verbatim layout for displaying dependencies""" dep_info = make_tree_defs(self._external_dependencies_info().iteritems()) if not dep_info: raise EmptyReport() tree_str = repr_tree_defs(dep_info) sect.append(VerbatimText(tree_str)) def report_dependencies_graph(self, sect, _, dummy): """write dependencies as a dot (graphviz) file""" dep_info = self.stats['dependencies'] if not dep_info or not (self.config.import_graph or self.config.ext_import_graph or self.config.int_import_graph): raise EmptyReport() filename = self.config.import_graph if filename: make_graph(filename, dep_info, sect, '') filename = self.config.ext_import_graph if filename: make_graph(filename, self._external_dependencies_info(), sect, 'external ') filename = self.config.int_import_graph if filename: make_graph(filename, self._internal_dependencies_info(), sect, 'internal ') def _external_dependencies_info(self): """return cached external dependencies information or build and cache them """ if self.__ext_dep_info is None: package = self.linter.base_name self.__ext_dep_info = result = {} for importee, importers in self.stats['dependencies'].iteritems(): if not importee.startswith(package): result[importee] = importers return self.__ext_dep_info def _internal_dependencies_info(self): """return cached internal dependencies information or build and cache them """ if self.__int_dep_info is None: package = self.linter.base_name self.__int_dep_info = result = {} for importee, importers in self.stats['dependencies'].iteritems(): if importee.startswith(package): result[importee] = importers return self.__int_dep_info def register(linter): """required method to auto register this checker """ linter.register_checker(ImportsChecker(linter))
lgpl-3.0
PearsonIOKI/compose-forum
askbot/deps/django_authopenid/forms.py
1
19635
# -*- coding: utf-8 -*- # Copyright (c) 2007, 2008, Benoît Chesneau # # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # * notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # * notice, this list of conditions and the following disclaimer in the # * documentation and/or other materials provided with the # * distribution. Neither the name of the <ORGANIZATION> nor the names # * of its contributors may be used to endorse or promote products # * derived from this software without specific prior written # * permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, # OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF # THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import logging import cgi from django import forms from django.contrib.auth.models import User from django.utils.translation import ugettext as _ from django.utils.translation import ugettext_lazy from django.conf import settings as django_settings from askbot.conf import settings as askbot_settings from askbot import const as askbot_const from django.utils.safestring import mark_safe from recaptcha_works.fields import RecaptchaField from askbot.utils.forms import NextUrlField, UserNameField, UserEmailField, SetPasswordForm from askbot.utils.loading import load_module # needed for some linux distributions like debian try: from openid.yadis import xri except ImportError: from yadis import xri from askbot.deps.django_authopenid import util __all__ = [ 'OpenidSigninForm','OpenidRegisterForm', 'ClassicRegisterForm', 'ChangePasswordForm', 'ChangeEmailForm', 'EmailPasswordForm', 'DeleteForm', 'ChangeOpenidForm' ] class LoginProviderField(forms.CharField): """char field where value must be one of login providers """ widget = forms.widgets.HiddenInput() def __init__(self, *args, **kwargs): kwargs['max_length'] = 64 super(LoginProviderField, self).__init__(*args, **kwargs) def clean(self, value): """makes sure that login provider name exists is in the list of accepted providers """ providers = util.get_enabled_login_providers() if value in providers: return value else: error_message = 'unknown provider name %s' % cgi.escape(value) logging.critical(error_message) raise forms.ValidationError(error_message) class PasswordLoginProviderField(LoginProviderField): """char field where value must be one of login providers using username/password method for authentication """ def clean(self, value): """make sure that value is name of one of the known password login providers """ value = super(PasswordLoginProviderField, self).clean(value) providers = util.get_enabled_login_providers() if providers[value]['type'] != 'password': raise forms.ValidationError( 'provider %s must accept password' % value ) return value class OpenidSigninForm(forms.Form): """ signin form """ openid_url = forms.CharField(max_length=255, widget=forms.widgets.TextInput(attrs={'class': 'openid-login-input', 'size':80})) next = NextUrlField() def clean_openid_url(self): """ test if openid is accepted """ if 'openid_url' in self.cleaned_data: openid_url = self.cleaned_data['openid_url'] if xri.identifierScheme(openid_url) == 'XRI' and getattr( django_settings, 'OPENID_DISALLOW_INAMES', False ): raise forms.ValidationError(_('i-names are not supported')) return self.cleaned_data['openid_url'] class LoginForm(forms.Form): """All-inclusive login form. handles the following: * password login * change of password * openid login (of all types - direct, usename, generic url-based) * oauth login * facebook login (javascript-based facebook's sdk) """ next = NextUrlField() login_provider_name = LoginProviderField() openid_login_token = forms.CharField( max_length=256, required = False, ) username = UserNameField(required=False, skip_clean=True) password = forms.CharField( max_length=128, widget=forms.widgets.PasswordInput( attrs={'class':'required login'} ), required=False ) password_action = forms.CharField( max_length=32, required=False, widget=forms.widgets.HiddenInput() ) new_password = forms.CharField( max_length=128, widget=forms.widgets.PasswordInput( attrs={'class':'required login'} ), required=False ) new_password_retyped = forms.CharField( max_length=128, widget=forms.widgets.PasswordInput( attrs={'class':'required login'} ), required=False ) def set_error_if_missing(self, field_name, error_message): """set's error message on a field if the field is not present in the cleaned_data dictionary """ if field_name not in self.cleaned_data: self._errors[field_name] = self.error_class([error_message]) def set_password_login_error(self): """sets a parameter flagging that login with password had failed """ #add monkey-patch parameter #this is used in the signin.html template self.password_login_failed = True def set_password_change_error(self): """sets a parameter flagging that password change failed """ #add monkey-patch parameter #this is used in the signin.html template self.password_change_failed = True def clean(self): """besides input data takes data from the login provider settings and stores final digested data into the cleaned_data the idea is that cleaned data can be used directly to enact the signin action, without post-processing of the data contents of cleaned_data depends on the type of login """ providers = util.get_enabled_login_providers() if 'login_provider_name' in self.cleaned_data: provider_name = self.cleaned_data['login_provider_name'] else: raise forms.ValidationError('no login provider specified') provider_data = providers[provider_name] provider_type = provider_data['type'] if provider_type == 'password': self.do_clean_password_fields() self.cleaned_data['login_type'] = 'password' elif provider_type.startswith('openid'): self.do_clean_openid_fields(provider_data) self.cleaned_data['login_type'] = 'openid' elif provider_type == 'oauth': self.cleaned_data['login_type'] = 'oauth' elif provider_type == 'oauth2': self.cleaned_data['login_type'] = 'oauth2' elif provider_type == 'facebook': self.cleaned_data['login_type'] = 'facebook' #self.do_clean_oauth_fields() elif provider_type == 'wordpress_site': self.cleaned_data['login_type'] = 'wordpress_site' return self.cleaned_data def do_clean_openid_fields(self, provider_data): """returns fake openid_url value created based on provider_type (subtype of openid) and the """ openid_endpoint = provider_data['openid_endpoint'] openid_type = provider_data['type'] if openid_type == 'openid-direct': openid_url = openid_endpoint else: error_message = _('Please enter your %(username_token)s') % \ {'username_token': provider_data['extra_token_name']} self.set_error_if_missing('openid_login_token', error_message) if 'openid_login_token' in self.cleaned_data: openid_login_token = self.cleaned_data['openid_login_token'] if openid_type == 'openid-username': openid_url = openid_endpoint % {'username': openid_login_token} elif openid_type == 'openid-generic': openid_url = openid_login_token else: raise ValueError('unknown openid type %s' % openid_type) self.cleaned_data['openid_url'] = openid_url def do_clean_password_fields(self): """cleans password fields appropriate for the selected password_action, which can be either "login" or "change_password" new password is checked for minimum length and match to initial entry """ password_action = self.cleaned_data.get('password_action', None) if password_action == 'login': #if it's login with password - password and user name are required self.set_error_if_missing( 'username', _('Please, enter your user name') ) self.set_error_if_missing( 'password', _('Please, enter your password') ) elif password_action == 'change_password': #if it's change password - new_password and new_password_retyped self.set_error_if_missing( 'new_password', _('Please, enter your new password') ) self.set_error_if_missing( 'new_password_retyped', _('Please, enter your new password') ) field_set = set(('new_password', 'new_password_retyped')) if field_set.issubset(self.cleaned_data.keys()): new_password = self.cleaned_data[ 'new_password' ].strip() new_password_retyped = self.cleaned_data[ 'new_password_retyped' ].strip() if new_password != new_password_retyped: error_message = _('Passwords did not match') error = self.error_class([error_message]) self._errors['new_password_retyped'] = error self.set_password_change_error() del self.cleaned_data['new_password'] del self.cleaned_data['new_password_retyped'] else: #validate password if len(new_password) < askbot_const.PASSWORD_MIN_LENGTH: del self.cleaned_data['new_password'] del self.cleaned_data['new_password_retyped'] error_message = _( 'choose password > %(len)s characters' ) % {'len': askbot_const.PASSWORD_MIN_LENGTH} error = self.error_class([error_message]) self._errors['new_password'] = error self.set_password_change_error() else: error_message = 'unknown password action' logging.critical(error_message) self._errors['password_action'] = self.error_class([error_message]) raise forms.ValidationError(error_message) class OpenidRegisterForm(forms.Form): """ openid signin form """ next = NextUrlField() username = UserNameField(widget_attrs={'tabindex': 0}) email = UserEmailField() class ClassicRegisterForm(SetPasswordForm): """ legacy registration form """ next = NextUrlField() username = UserNameField(widget_attrs={'tabindex': 0}) email = UserEmailField() login_provider = PasswordLoginProviderField() #fields password1 and password2 are inherited class SafeClassicRegisterForm(ClassicRegisterForm): """this form uses recaptcha in addition to the base register form """ recaptcha = RecaptchaField( private_key = askbot_settings.RECAPTCHA_SECRET, public_key = askbot_settings.RECAPTCHA_KEY ) class ChangePasswordForm(forms.Form): """ change password form """ new_password = forms.CharField( widget=forms.PasswordInput(), error_messages = { 'required': _('password is required'), } ) new_password_retyped = forms.CharField( widget=forms.PasswordInput(), error_messages = { 'required': _('retype your password'), } ) def clean_new_password(self): if 'new_password' in self.cleaned_data: password = self.cleaned_data['new_password'] min_len = askbot_const.PASSWORD_MIN_LENGTH if len(password) < min_len: error = _('choose password > %(len)s characters') % \ {'len': min_len} raise forms.ValidationError(error) return password def clean(self): expected_keys = set(['new_password', 'new_password_retyped']) if set(self.cleaned_data.keys()) == expected_keys: pw1 = self.cleaned_data['new_password'] pw2 = self.cleaned_data['new_password_retyped'] if pw1 != pw2: error = _('entered passwords did not match, please try again') raise forms.ValidationError(error) return self.cleaned_data class ChangeEmailForm(forms.Form): """ change email form """ email = UserEmailField(skip_clean=True) def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None, \ initial=None, user=None): super(ChangeEmailForm, self).__init__(data, files, auto_id, prefix, initial) self.user = user def clean_email(self): """ check if email don't exist """ if 'email' in self.cleaned_data: try: user = User.objects.get(email = self.cleaned_data['email']) if self.user and self.user == user: return self.cleaned_data['email'] except User.DoesNotExist: return self.cleaned_data['email'] except User.MultipleObjectsReturned: raise forms.ValidationError(u'There is already more than one \ account registered with that e-mail address. Please try \ another.') raise forms.ValidationError(u'This email is already registered \ in our database. Please choose another.') class AccountRecoveryForm(forms.Form): """with this form user enters email address and receives an account recovery link in email this form merely checks that entered email """ email = forms.EmailField() def clean_email(self): """check if email exists in the database and if so, populate 'user' field in the cleaned data with the user object """ if 'email' in self.cleaned_data: email = self.cleaned_data['email'] try: user = User.objects.filter(email__iexact=email)[0] self.cleaned_data['user'] = user except IndexError: del self.cleaned_data['email'] message = _('Sorry, we don\'t have this email address in the database') raise forms.ValidationError(message) class ChangeopenidForm(forms.Form): """ change openid form """ openid_url = forms.CharField(max_length=255, widget=forms.TextInput(attrs={'class': "required" })) def __init__(self, data=None, user=None, *args, **kwargs): if user is None: raise TypeError("Keyword argument 'user' must be supplied") super(ChangeopenidForm, self).__init__(data, *args, **kwargs) self.user = user class DeleteForm(forms.Form): """ confirm form to delete an account """ #todo: i think this form is not used confirm = forms.CharField(widget=forms.CheckboxInput(attrs={'class':'required'})) password = forms.CharField(widget=forms.PasswordInput(attrs={'class':'required'})) def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None, initial=None, user=None): super(DeleteForm, self).__init__(data, files, auto_id, prefix, initial) self.test_openid = False self.user = user def clean_password(self): """ check if we have to test a legacy account or not """ if 'password' in self.cleaned_data: if not self.user.check_password(self.cleaned_data['password']): self.test_openid = True return self.cleaned_data['password'] class EmailPasswordForm(forms.Form): """ send new password form """ username = UserNameField( skip_clean=True, label=mark_safe( ugettext_lazy('Your user name (<i>required</i>)') ) ) def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None, initial=None): super(EmailPasswordForm, self).__init__(data, files, auto_id, prefix, initial) self.user_cache = None def clean_username(self): """ get user for this username """ if 'username' in self.cleaned_data: try: self.user_cache = User.objects.get( username = self.cleaned_data['username']) except: raise forms.ValidationError(_("sorry, there is no such user name")) return self.cleaned_data['username'] def get_registration_form_class(): """returns class for the user registration form user has a chance to specify the form via setting `REGISTRATION_FORM` """ custom_class = getattr(django_settings, 'REGISTRATION_FORM', None) if custom_class: return load_module(custom_class) else: return OpenidRegisterForm
gpl-3.0
paweljasinski/ironpython3
Src/StdLib/Lib/sched.py
88
6354
"""A generally useful event scheduler class. Each instance of this class manages its own queue. No multi-threading is implied; you are supposed to hack that yourself, or use a single instance per application. Each instance is parametrized with two functions, one that is supposed to return the current time, one that is supposed to implement a delay. You can implement real-time scheduling by substituting time and sleep from built-in module time, or you can implement simulated time by writing your own functions. This can also be used to integrate scheduling with STDWIN events; the delay function is allowed to modify the queue. Time can be expressed as integers or floating point numbers, as long as it is consistent. Events are specified by tuples (time, priority, action, argument, kwargs). As in UNIX, lower priority numbers mean higher priority; in this way the queue can be maintained as a priority queue. Execution of the event means calling the action function, passing it the argument sequence in "argument" (remember that in Python, multiple function arguments are be packed in a sequence) and keyword parameters in "kwargs". The action function may be an instance method so it has another way to reference private data (besides global variables). """ # XXX The timefunc and delayfunc should have been defined as methods # XXX so you can define new kinds of schedulers using subclassing # XXX instead of having to define a module or class just to hold # XXX the global state of your particular time and delay functions. import time import heapq from collections import namedtuple try: import threading except ImportError: import dummy_threading as threading try: from time import monotonic as _time except ImportError: from time import time as _time __all__ = ["scheduler"] class Event(namedtuple('Event', 'time, priority, action, argument, kwargs')): def __eq__(s, o): return (s.time, s.priority) == (o.time, o.priority) def __ne__(s, o): return (s.time, s.priority) != (o.time, o.priority) def __lt__(s, o): return (s.time, s.priority) < (o.time, o.priority) def __le__(s, o): return (s.time, s.priority) <= (o.time, o.priority) def __gt__(s, o): return (s.time, s.priority) > (o.time, o.priority) def __ge__(s, o): return (s.time, s.priority) >= (o.time, o.priority) _sentinel = object() class scheduler: def __init__(self, timefunc=_time, delayfunc=time.sleep): """Initialize a new instance, passing the time and delay functions""" self._queue = [] self._lock = threading.RLock() self.timefunc = timefunc self.delayfunc = delayfunc def enterabs(self, time, priority, action, argument=(), kwargs=_sentinel): """Enter a new event in the queue at an absolute time. Returns an ID for the event which can be used to remove it, if necessary. """ if kwargs is _sentinel: kwargs = {} event = Event(time, priority, action, argument, kwargs) with self._lock: heapq.heappush(self._queue, event) return event # The ID def enter(self, delay, priority, action, argument=(), kwargs=_sentinel): """A variant that specifies the time as a relative time. This is actually the more commonly used interface. """ time = self.timefunc() + delay return self.enterabs(time, priority, action, argument, kwargs) def cancel(self, event): """Remove an event from the queue. This must be presented the ID as returned by enter(). If the event is not in the queue, this raises ValueError. """ with self._lock: self._queue.remove(event) heapq.heapify(self._queue) def empty(self): """Check whether the queue is empty.""" with self._lock: return not self._queue def run(self, blocking=True): """Execute events until the queue is empty. If blocking is False executes the scheduled events due to expire soonest (if any) and then return the deadline of the next scheduled call in the scheduler. When there is a positive delay until the first event, the delay function is called and the event is left in the queue; otherwise, the event is removed from the queue and executed (its action function is called, passing it the argument). If the delay function returns prematurely, it is simply restarted. It is legal for both the delay function and the action function to modify the queue or to raise an exception; exceptions are not caught but the scheduler's state remains well-defined so run() may be called again. A questionable hack is added to allow other threads to run: just after an event is executed, a delay of 0 is executed, to avoid monopolizing the CPU when other threads are also runnable. """ # localize variable access to minimize overhead # and to improve thread safety lock = self._lock q = self._queue delayfunc = self.delayfunc timefunc = self.timefunc pop = heapq.heappop while True: with lock: if not q: break time, priority, action, argument, kwargs = q[0] now = timefunc() if time > now: delay = True else: delay = False pop(q) if delay: if not blocking: return time - now delayfunc(time - now) else: action(*argument, **kwargs) delayfunc(0) # Let other threads run @property def queue(self): """An ordered list of upcoming events. Events are named tuples with fields for: time, priority, action, arguments, kwargs """ # Use heapq to sort the queue rather than using 'sorted(self._queue)'. # With heapq, two events scheduled at the same time will show in # the actual order they would be retrieved. with self._lock: events = self._queue[:] return list(map(heapq.heappop, [events]*len(events)))
apache-2.0
fatihzkaratana/AutobahnPython
autobahn/autobahn/resource.py
17
6104
############################################################################### ## ## Copyright 2012-2013 Tavendo GmbH ## ## Licensed under the Apache License, Version 2.0 (the "License"); ## you may not use this file except in compliance with the License. ## You may obtain a copy of the License at ## ## http://www.apache.org/licenses/LICENSE-2.0 ## ## Unless required by applicable law or agreed to in writing, software ## distributed under the License is distributed on an "AS IS" BASIS, ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ## See the License for the specific language governing permissions and ## limitations under the License. ## ############################################################################### __all__ = ("WebSocketResource", "HTTPChannelHixie76Aware", "WSGIRootResource",) from zope.interface import implements from twisted.python import log from twisted.protocols.policies import ProtocolWrapper try: from twisted.web.error import NoResource except: ## starting from Twisted 12.2, NoResource has moved from twisted.web.resource import NoResource from twisted.web.error import UnsupportedMethod from twisted.web.resource import IResource, Resource from twisted.web.server import NOT_DONE_YET from twisted.web.http import HTTPChannel from websocket import WebSocketServerFactory, WebSocketServerProtocol class HTTPChannelHixie76Aware(HTTPChannel): """ Hixie-76 is deadly broken. It includes 8 bytes of body, but then does not set content-length header. This hacked HTTPChannel injects the missing HTTP header upon detecting Hixie-76. We need this since otherwise Twisted Web will silently ignore the body. To use this, set `protocol = HTTPChannelHixie76Aware` on your `twisted.web.server.Site <http://twistedmatrix.com/documents/current/api/twisted.web.server.Site.html>`_ instance. See: * `Autobahn Twisted Web site example <https://github.com/tavendo/AutobahnPython/tree/master/examples/websocket/echo_site>`_ """ def headerReceived(self, line): header = line.split(':')[0].lower() if header == "sec-websocket-key1" and not self._transferDecoder: HTTPChannel.headerReceived(self, "Content-Length: 8") HTTPChannel.headerReceived(self, line) class WSGIRootResource(Resource): """ Root resource when you want a WSGI resource be the default serving resource for a Twisted Web site, but have subpaths served by different resources. This is a hack needed since `twisted.web.wsgi.WSGIResource <http://twistedmatrix.com/documents/current/api/twisted.web.wsgi.WSGIResource.html>`_. does not provide a `putChild()` method. See also: * `Autobahn Twisted Web WSGI example <https://github.com/tavendo/AutobahnPython/tree/master/examples/websocket/echo_wsgi>`_ * `Original hack <http://blog.vrplumber.com/index.php?/archives/2426-Making-your-Twisted-resources-a-url-sub-tree-of-your-WSGI-resource....html>`_ """ def __init__(self, wsgiResource, children): """ Creates a Twisted Web root resource. :param wsgiResource: :type wsgiResource: Instance of `twisted.web.wsgi.WSGIResource <http://twistedmatrix.com/documents/current/api/twisted.web.wsgi.WSGIResource.html>`_. :param children: A dictionary with string keys constituting URL subpaths, and Twisted Web resources as values. :type children: dict """ Resource.__init__(self) self._wsgiResource = wsgiResource self.children = children def getChild(self, path, request): request.prepath.pop() request.postpath.insert(0, path) return self._wsgiResource class WebSocketResource(object): """ A Twisted Web resource for WebSocket. This resource needs to be instantiated with a factory derived from WebSocketServerFactory. """ implements(IResource) isLeaf = True def __init__(self, factory): """ Ctor. :param factory: An instance of WebSocketServerFactory. :type factory: obj """ self._factory = factory def getChildWithDefault(self, name, request): """ This resource cannot have children, hence this will always fail. """ return NoResource("No such child resource.") def putChild(self, path, child): """ This resource cannot have children, hence this is always ignored. """ pass def render(self, request): """ Render the resource. This will takeover the transport underlying the request, create a WebSocketServerProtocol and let that do any subsequent communication. """ ## Create Autobahn WebSocket protocol. ## protocol = self._factory.buildProtocol(request.transport.getPeer()) if not protocol: ## If protocol creation fails, we signal "internal server error" request.setResponseCode(500) return "" ## Take over the transport from Twisted Web ## transport, request.transport = request.transport, None ## Connect the transport to our protocol. Once #3204 is fixed, there ## may be a cleaner way of doing this. ## http://twistedmatrix.com/trac/ticket/3204 ## if isinstance(transport, ProtocolWrapper): ## i.e. TLS is a wrapping protocol transport.wrappedProtocol = protocol else: transport.protocol = protocol protocol.makeConnection(transport) ## We recreate the request and forward the raw data. This is somewhat ## silly (since Twisted Web already did the HTTP request parsing ## which we will do a 2nd time), but it's totally non-invasive to our ## code. Maybe improve this. ## data = "%s %s HTTP/1.1\x0d\x0a" % (request.method, request.path) for h in request.requestHeaders.getAllRawHeaders(): data += "%s: %s\x0d\x0a" % (h[0], ",".join(h[1])) data += "\x0d\x0a" data += request.content.read() # we need this for Hixie-76 protocol.dataReceived(data) return NOT_DONE_YET
apache-2.0
wtsi-hgi/python-sequencescape-client
sequencescape/tests/_json_converters_test_factory.py
3
4050
import json import unittest from typing import Callable, Tuple, Sequence from sequencescape.models import Model class _TestJSONEncoder(unittest.TestCase): """ Tests for custom JSON encoders. """ def __init__(self, model_factory: Callable[[], Model], expected_json_properties: Sequence[str], encoder_type: type, *args, **kwargs): super().__init__(*args, **kwargs) self.model_factory = model_factory self.expected_json_properties = expected_json_properties self.encoder_type = encoder_type def setUp(self): self.model = self.model_factory() def test_default(self): encoded_as_dict = self.encoder_type().default(self.model) for property in self.expected_json_properties: self.assertIn(property, encoded_as_dict) self.assertEqual(len(encoded_as_dict), len(self.expected_json_properties)) def test_with_json_dumps(self): encoded_as_string = json.dumps(self.model, cls=self.encoder_type) encoded_as_dict = json.loads(encoded_as_string) for property in self.expected_json_properties: self.assertIn(property, encoded_as_dict) self.assertEqual(len(encoded_as_dict), len(self.expected_json_properties)) class _TestJSONDecoder(unittest.TestCase): """ Tests for custom JSON decoders. """ def __init__(self, model_factory: Callable[[], Model], encoder_type: type, decoder_type: type, *args, **kwargs): super().__init__(*args, **kwargs) self.model_factory = model_factory self.encoder_type = encoder_type self.decoder_type = decoder_type def setUp(self): self.model = self.model_factory() def test_decode(self): encoded_as_string = json.dumps(self.model, cls=self.encoder_type) decoded = self.decoder_type().decode(encoded_as_string) self.assertEqual(decoded, self.model) def test_decode_parsed(self): encoded_as_dict = self.encoder_type().default(self.model) decoded = self.decoder_type().decode_parsed(encoded_as_dict) self.assertEqual(decoded, self.model) def test_with_json_loads(self): encoded_as_string = json.dumps(self.model, cls=self.encoder_type) decoded = json.loads(encoded_as_string, cls=self.decoder_type) self.assertEqual(decoded, self.model) def create_json_converter_test(model_factory: Callable[[], Model], expected_json_properties: Sequence[str], encoder_type: type, decoder_type: type) -> Tuple[unittest.TestCase, unittest.TestCase]: """ Creates a unit tests for testing a JSON converter. :param model_factory: factory that produces models that the converter deals with :param expected_json_properties: the properties that are expected to be in the JSON :param encoder_type: the JSON encoder type to test :param decoder_type: the JSON decoder type to test :return: tuple where the first element is a unit test for the encoder and the second is a unit test for the decoder """ encoder_test_class_name = "Test%s" % encoder_type.__name__ decoder_test_class_name = "Test%s" % decoder_type.__name__ def init(self, *args, **kwargs): if "transplant_class" in str(type(self)): # Fix for nosetest, which does some strange subclassing that results in the wrong super been used self_type = type(self).mro()[1] else: self_type = type(self) super(self_type, self).__init__(*self._SETUP, *args, **kwargs) encoder_test_class = type( encoder_test_class_name, (_TestJSONEncoder, ), { "_SETUP": (model_factory, expected_json_properties, encoder_type), "__init__": init } ) decoder_test_class = type( decoder_test_class_name, (_TestJSONDecoder, ), { "_SETUP": (model_factory, encoder_type, decoder_type), "__init__": init } ) return encoder_test_class, decoder_test_class
gpl-3.0
YuhangSong/GTN
main.py
1
20478
import copy import glob import os import gym import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.autograd import Variable from torch.utils.data.sampler import BatchSampler, SubsetRandomSampler from arguments import get_args from common.vec_env.subproc_vec_env import SubprocVecEnvMt from envs import make_env from kfac import KFACOptimizer from model import CNNPolicy, MLPPolicy from storage import RolloutStorage from visualize import visdom_plot from arguments import debugging, gtn_M from arguments import exp, title, title_html is_use_afs = True args = get_args() assert args.algo in ['a2c', 'ppo', 'acktr'] if args.algo == 'ppo': assert args.num_processes * args.num_steps % args.batch_size == 0 '''num_frames: number of frames to train (default: 10e6) num_steps: agent every time updata need steps ''' num_updates = int(args.num_frames) // args.num_steps // args.num_processes torch.manual_seed(args.seed) if args.cuda: torch.cuda.manual_seed(args.seed) mt_env_id_dic_all = { 'mt test pong':[ 'PongNoFrameskip-v4', 'BreakoutNoFrameskip-v4', ], 'mt high performance':[ 'BeamRiderNoFrameskip-v4', 'BreakoutNoFrameskip-v4', 'PongNoFrameskip-v4', 'QbertNoFrameskip-v4', 'SpaceInvadersNoFrameskip-v4', 'SeaquestNoFrameskip-v4', ], 'mt_as_ewc_test':[ 'CrazyClimberNoFrameskip-v4', 'RiverraidNoFrameskip-v4', 'BreakoutNoFrameskip-v4', 'PongNoFrameskip-v4', 'StarGunnerNoFrameskip-v4', 'DemonAttackNoFrameskip-v4', 'AsteroidsNoFrameskip-v4', 'SpaceInvadersNoFrameskip-v4', ], 'mt shooting':[ 'BeamRiderNoFrameskip-v4', 'PhoenixNoFrameskip-v4', 'AtlantisNoFrameskip-v4', 'CentipedeNoFrameskip-v4', 'RiverraidNoFrameskip-v4', 'DemonAttackNoFrameskip-v4', 'GravitarNoFrameskip-v4', 'SeaquestNoFrameskip-v4', 'ChopperCommandNoFrameskip-v4', 'AssaultNoFrameskip-v4', 'AsteroidsNoFrameskip-v4', 'SpaceInvadersNoFrameskip-v4', 'YarsRevengeNoFrameskip-v4', 'CarnivalNoFrameskip-v4', 'CrazyClimberNoFrameskip-v4', 'ZaxxonNoFrameskip-v4', 'PooyanNoFrameskip-v4', 'StarGunnerNoFrameskip-v4', ], 'mt all atari':[ 'CarnivalNoFrameskip-v4', 'AlienNoFrameskip-v4', 'AmidarNoFrameskip-v4', 'BankHeistNoFrameskip-v4', 'MsPacmanNoFrameskip-v4', 'TutankhamNoFrameskip-v4', 'VentureNoFrameskip-v4', 'WizardOfWorNoFrameskip-v4', 'AssaultNoFrameskip-v4', 'AsteroidsNoFrameskip-v4', 'BeamRiderNoFrameskip-v4', 'CentipedeNoFrameskip-v4', 'ChopperCommandNoFrameskip-v4', 'CrazyClimberNoFrameskip-v4', 'DemonAttackNoFrameskip-v4', 'AtlantisNoFrameskip-v4', 'GravitarNoFrameskip-v4', 'PhoenixNoFrameskip-v4', 'PooyanNoFrameskip-v4', 'RiverraidNoFrameskip-v4', 'SeaquestNoFrameskip-v4', 'SpaceInvadersNoFrameskip-v4', 'StarGunnerNoFrameskip-v4', 'TimePilotNoFrameskip-v4', 'ZaxxonNoFrameskip-v4', 'YarsRevengeNoFrameskip-v4', 'AsterixNoFrameskip-v4', 'ElevatorActionNoFrameskip-v4', 'BerzerkNoFrameskip-v4', 'FreewayNoFrameskip-v4', 'FrostbiteNoFrameskip-v4', 'JourneyEscapeNoFrameskip-v4', 'KangarooNoFrameskip-v4', 'KrullNoFrameskip-v4', 'PitfallNoFrameskip-v4', 'SkiingNoFrameskip-v4', 'UpNDownNoFrameskip-v4', 'QbertNoFrameskip-v4', 'RoadRunnerNoFrameskip-v4', 'DoubleDunkNoFrameskip-v4', 'IceHockeyNoFrameskip-v4', 'MontezumaRevengeNoFrameskip-v4', 'GopherNoFrameskip-v4', 'BreakoutNoFrameskip-v4', 'PongNoFrameskip-v4', 'PrivateEyeNoFrameskip-v4', 'TennisNoFrameskip-v4', 'VideoPinballNoFrameskip-v4', 'FishingDerbyNoFrameskip-v4', 'NameThisGameNoFrameskip-v4', 'BowlingNoFrameskip-v4', 'BattleZoneNoFrameskip-v4', 'BoxingNoFrameskip-v4', 'JamesbondNoFrameskip-v4', 'RobotankNoFrameskip-v4', 'SolarisNoFrameskip-v4', 'EnduroNoFrameskip-v4', 'KungFuMasterNoFrameskip-v4', ], } mt_env_id_dic_selected = mt_env_id_dic_all[args.env_name] for env_id in mt_env_id_dic_selected: log_dir = args.log_dir+env_id+'/' try: os.makedirs(log_dir) except OSError: files = glob.glob(os.path.join(log_dir, '*.monitor.json')) for f in files: os.remove(f) afs_offset = [0.0, 0.0, 0.0, 0.0, 0.0] reward_dict={} def rec_last_100_epi_reward(reward,done_list): # num = 0 ''' arguments statement: reward :episode reward done_list: the finished signal from env ''' for index,done in enumerate(done_list): env_name = mt_env_id_dic_selected[index // args.num_processes] # print (env_name) if done: try: reward_dict["{}_entire".format(env_name)].append(reward[index]) try: reward_dict["{}_average".format(env_name)].append(np.mean(np.asarray(reward_dict["{}_entire".format(env_name)]))) except: reward_dict["{}_average".format(env_name)] =[] if len(reward_dict["{}_entire".format(env_name)])>100: try: reward_dict["{}_last_100".format(env_name)].append(np.mean(np.asarray(reward_dict["{}_entire".format(env_name)][-100:]))) except: reward_dict["{}_last_100".format(env_name)]=[] except Exception as e: reward_dict["{}_entire".format(env_name)]=[] reward_dict["{}_average".format(env_name)] =[] reward[index] = 0 return reward def break_line_html(string): for x in range(0,len(string),40): string = string[:x] + '<br>' + string[x:] return string def main(): print("#######") print("WARNING: All rewards are clipped so you need to use a monitor (see envs.py) or visdom plot to get true rewards") print("#######") os.environ['OMP_NUM_THREADS'] = '1' if args.vis: from visdom import Visdom viz = Visdom() win = [] win_dic ={} for i in range(len(mt_env_id_dic_selected)): win += [None] win_afs_per_m = None win_afs_loss = None win_basic_loss = None plot_dic = {} envs = [] ''' Because the oral program has only one game per model, so Song add loop i So whatever you wanna run , just put in SubprocVecEnvMt! ''' for i in range(len(mt_env_id_dic_selected)): log_dir = args.log_dir+mt_env_id_dic_selected[i]+'/' for j in range(args.num_processes): envs += [make_env(mt_env_id_dic_selected[i], args.seed, j, log_dir)] ''' This envs is an intergration of all the running env''' envs = SubprocVecEnvMt(envs) num_processes_total = args.num_processes * len(mt_env_id_dic_selected) '''(1,128,128)''' obs_shape = envs.observation_space.shape #num_stack :number of frames to stack obs_shape = (obs_shape[0] * args.num_stack, *obs_shape[1:]) from arguments import is_restore if is_restore and args.save_dir: load_path = os.path.join(args.save_dir, args.algo) actor_critic =torch.load(os.path.join(load_path, args.env_name + ".pt")) # print ("restored previous model!") # print (actor_critic.Variable) # print (sss) else: if len(envs.observation_space.shape) == 3: actor_critic = CNNPolicy(obs_shape[0], envs.action_space) else: actor_critic = MLPPolicy(obs_shape[0], envs.action_space) if envs.action_space.__class__.__name__ == "Discrete": action_shape = 1 else: action_shape = envs.action_space.shape[0] if args.cuda: actor_critic.cuda() if args.algo == 'a2c': optimizer = optim.RMSprop(actor_critic.parameters(), args.lr, eps=args.eps, alpha=args.alpha) elif args.algo == 'ppo': optimizer = optim.Adam(actor_critic.parameters(), args.lr, eps=args.eps) elif args.algo == 'acktr': optimizer = KFACOptimizer(actor_critic) #'args.num_steps: number of forward steps in A2C #rollouts is an intergration of state\ reward\ next state\action and so on rollouts = RolloutStorage(args.num_steps, num_processes_total, obs_shape, envs.action_space) current_state = torch.zeros(num_processes_total, *obs_shape) ''' not sure about it''' def update_current_state(state): shape_dim0 = envs.observation_space.shape[0] # print (shape_dim0) # print (sss) state = torch.from_numpy(state).float() if args.num_stack > 1: current_state[:, :-shape_dim0] = current_state[:, shape_dim0:] current_state[:, -shape_dim0:] = state state = envs.reset() update_current_state(state) rollouts.states[0].copy_(current_state) # These variables are used to compute average rewards for all processes. episode_rewards = torch.zeros([num_processes_total, 1]) final_rewards = torch.zeros([num_processes_total, 1]) if args.cuda: current_state = current_state.cuda() rollouts.cuda() if args.algo == 'ppo': old_model = copy.deepcopy(actor_critic) from arguments import ewc, ewc_lambda, ewc_interval afs_per_m = [] afs_offset = [0.0]*gtn_M afs_loss_list = [] basic_loss_list = [] episode_reward_rec = 0.0 one = torch.FloatTensor([1]).cuda() mone = one * -1 '''for one whole game ''' for j in range(num_updates): for step in range(args.num_steps): if ewc == 1: try: states_store = torch.cat([states_store, rollouts.states[step].clone()], 0) except Exception as e: states_store = rollouts.states[step].clone() # Sample actions '''act fun refer to "observe it!"''' value, action = actor_critic.act(Variable(rollouts.states[step], volatile=True)) cpu_actions = action.data.squeeze(1).cpu().numpy() # Obser reward and next state state, reward, done = envs.step(cpu_actions) '''record the last 100 episodes rewards''' episode_reward_rec += reward episode_reward_rec = rec_last_100_epi_reward(episode_reward_rec,done) reward = torch.from_numpy(np.expand_dims(np.stack(reward), 1)).float() '''reward is shape of process_num_total, not batch-size''' # print ((reward).size()) # print (done) # print (sss) episode_rewards += reward ################ # rec_last_100_epi_reward(reward,done) # episode_reward_ppo += reward[0] # If done then clean the history of observations. final_rewards is used for compute after one whole num_step masks = torch.FloatTensor([[0.0] if done_ else [1.0] for done_ in done]) final_rewards *= masks final_rewards += (1 - masks) * episode_rewards episode_rewards *= masks if args.cuda: masks = masks.cuda() if current_state.dim() == 4: current_state *= masks.unsqueeze(2).unsqueeze(2) else: current_state *= masks update_current_state(state) rollouts.insert(step, current_state, action.data, value.data, reward, masks) next_value = actor_critic(Variable(rollouts.states[-1], volatile=True))[0].data if hasattr(actor_critic, 'obs_filter'): actor_critic.obs_filter.update(rollouts.states[:-1].view(-1, *obs_shape)) rollouts.compute_returns(next_value, args.use_gae, args.gamma, args.tau) if args.algo in ['a2c', 'acktr']: # reset gradient optimizer.zero_grad() # forward values, action_log_probs, dist_entropy, conv_list = actor_critic.evaluate_actions(Variable(rollouts.states[:-1].view(-1, *obs_shape)), Variable(rollouts.actions.view(-1, action_shape))) # pre-process values = values.view(args.num_steps, num_processes_total, 1) action_log_probs = action_log_probs.view(args.num_steps, num_processes_total, 1) # compute afs loss afs_per_m_temp, afs_loss = actor_critic.get_afs_per_m( action_log_probs=action_log_probs, conv_list=conv_list, ) if len(afs_per_m_temp)>0: afs_per_m += [afs_per_m_temp] if (afs_loss is not None) and (afs_loss.data.cpu().numpy()[0]!=0.0): afs_loss.backward(mone, retain_graph=True) afs_loss_list += [afs_loss.data.cpu().numpy()[0]] advantages = Variable(rollouts.returns[:-1]) - values value_loss = advantages.pow(2).mean() action_loss = -(Variable(advantages.data) * action_log_probs).mean() final_loss_basic = value_loss * args.value_loss_coef + action_loss - dist_entropy * args.entropy_coef ewc_loss = None if j != 0: if ewc == 1: ewc_loss = actor_critic.get_ewc_loss(lam=ewc_lambda) if ewc_loss is None: final_loss = final_loss_basic else: final_loss = final_loss_basic + ewc_loss # print (final_loss_basic.data.cpu().numpy()[0]) # final_loss_basic basic_loss_list += [final_loss_basic.data.cpu().numpy()[0]] final_loss.backward() if args.algo == 'a2c': nn.utils.clip_grad_norm(actor_critic.parameters(), args.max_grad_norm) optimizer.step() elif args.algo == 'ppo': advantages = rollouts.returns[:-1] - rollouts.value_preds[:-1] advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-5) old_model.load_state_dict(actor_critic.state_dict()) if hasattr(actor_critic, 'obs_filter'): old_model.obs_filter = actor_critic.obs_filter for _ in range(args.ppo_epoch): sampler = BatchSampler(SubsetRandomSampler(range(num_processes_total * args.num_steps)), args.batch_size * num_processes_total, drop_last=False) for indices in sampler: indices = torch.LongTensor(indices) if args.cuda: indices = indices.cuda() states_batch = rollouts.states[:-1].view(-1, *obs_shape)[indices] actions_batch = rollouts.actions.view(-1, action_shape)[indices] return_batch = rollouts.returns[:-1].view(-1, 1)[indices] # Reshape to do in a single forward pass for all steps values, action_log_probs, dist_entropy, conv_list = actor_critic.evaluate_actions(Variable(states_batch), Variable(actions_batch)) _, old_action_log_probs, _, old_conv_list= old_model.evaluate_actions(Variable(states_batch, volatile=True), Variable(actions_batch, volatile=True)) ratio = torch.exp(action_log_probs - Variable(old_action_log_probs.data)) adv_targ = Variable(advantages.view(-1, 1)[indices]) surr1 = ratio * adv_targ surr2 = torch.clamp(ratio, 1.0 - args.clip_param, 1.0 + args.clip_param) * adv_targ action_loss = -torch.min(surr1, surr2).mean() # PPO's pessimistic surrogate (L^CLIP) value_loss = (Variable(return_batch) - values).pow(2).mean() optimizer.zero_grad() final_loss_basic = (value_loss + action_loss - dist_entropy * args.entropy_coef) basic_loss_list += [final_loss_basic.data.cpu().numpy()[0]] final_loss_basic.backward() optimizer.step() rollouts.states[0].copy_(rollouts.states[-1]) # if j % int(num_updates/2-10) == 0 and args.save_dir != "": if j % args.save_interval == 0 and args.save_dir != "": save_path = os.path.join(args.save_dir, args.algo) try: os.makedirs(save_path) except OSError: pass # A really ugly way to save a model to CPU save_model = actor_critic if args.cuda: save_model = copy.deepcopy(actor_critic).cpu() torch.save(save_model, os.path.join(save_path, args.env_name + ".pt")) import pickle with open(os.path.join(save_path, args.env_name + "_last_100_reward"), "wb") as f: pickle.dump(reward_dict, f) if j % args.log_interval == 0: print("Updates {}, num frames {}, mean/median reward {:.1f}/{:.1f}, min/max reward {:.1f}/{:.1f}, entropy {:.5f}, value loss {:.5f}, policy loss {:.5f}". format(j, (j + 1) * args.num_processes * args.num_steps, final_rewards.mean(), final_rewards.median(), final_rewards.min(), final_rewards.max(), -dist_entropy.data[0], value_loss.data[0], action_loss.data[0])) try: print("ewc loss {:.5f}". format(ewc_loss.data.cpu().numpy()[0])) except Exception as e: pass if j > 5 and j % args.vis_interval == 0 and args.vis: ''' load from the folder''' for ii in range(len(mt_env_id_dic_selected)): log_dir = args.log_dir+mt_env_id_dic_selected[ii]+'/' win[ii] = visdom_plot(viz, win[ii], log_dir, mt_env_id_dic_selected[ii], args.algo) plot_dic = reward_dict for plot_name in plot_dic.keys(): # if plot_name not in win_dic: # win_dic[plot_name] = None if plot_name in win_dic.keys(): if len(plot_dic[plot_name]) > 0: win_dic[plot_name] = viz.line( torch.from_numpy(np.asarray(plot_dic[plot_name])), win=win_dic[plot_name], opts=dict(title=break_line_html(exp+'>>'+plot_name)) ) else: win_dic[plot_name] = None if len(afs_per_m)>0: win_afs_per_m = viz.line( torch.from_numpy(np.asarray(afs_per_m)), win=win_afs_per_m, opts=dict(title=title_html+'>>afs') ) # print (basic_loss_list) '''a2c:len(basic_loss_list) is vis_interval+1. because j start from 0 ppo:len(basic_loss_list) is (vis_interval+1)*ppo_epoch_4*len(BatchSampler) ''' # print (len(basic_loss_list)) # print (ss) win_basic_loss = viz.line( torch.from_numpy(np.asarray(basic_loss_list)), win=win_basic_loss, opts=dict(title=title_html+'>>basic_loss') ) if len(afs_loss_list) > 0: win_afs_loss = viz.line( torch.from_numpy(np.asarray(afs_loss_list)), win=win_afs_loss, opts=dict(title=title_html+'>>afs_loss') ) from arguments import parameter_noise, parameter_noise_interval if parameter_noise == 1: if j % parameter_noise_interval == 0: actor_critic.parameter_noise() if ewc == 1: if j % ewc_interval == 0 or j==0: actor_critic.compute_fisher(states_store) states_store = None actor_critic.star() if __name__ == "__main__": main()
mit
adist/drunken-sansa
openerp/addons/base/ir/ir_fields.py
4
18108
# -*- coding: utf-8 -*- import datetime import functools import operator import itertools import time import psycopg2 import pytz from openerp.osv import orm from openerp.tools.translate import _ from openerp.tools.misc import DEFAULT_SERVER_DATE_FORMAT,\ DEFAULT_SERVER_DATETIME_FORMAT REFERENCING_FIELDS = set([None, 'id', '.id']) def only_ref_fields(record): return dict((k, v) for k, v in record.iteritems() if k in REFERENCING_FIELDS) def exclude_ref_fields(record): return dict((k, v) for k, v in record.iteritems() if k not in REFERENCING_FIELDS) CREATE = lambda values: (0, False, values) UPDATE = lambda id, values: (1, id, values) DELETE = lambda id: (2, id, False) FORGET = lambda id: (3, id, False) LINK_TO = lambda id: (4, id, False) DELETE_ALL = lambda: (5, False, False) REPLACE_WITH = lambda ids: (6, False, ids) class ConversionNotFound(ValueError): pass class ColumnWrapper(object): def __init__(self, column, cr, uid, pool, fromtype, context=None): self._converter = None self._column = column if column._obj: self._pool = pool self._converter_args = { 'cr': cr, 'uid': uid, 'model': pool[column._obj], 'fromtype': fromtype, 'context': context } @property def converter(self): if not self._converter: self._converter = self._pool['ir.fields.converter'].for_model( **self._converter_args) return self._converter def __getattr__(self, item): return getattr(self._column, item) class ir_fields_converter(orm.Model): _name = 'ir.fields.converter' def for_model(self, cr, uid, model, fromtype=str, context=None): """ Returns a converter object for the model. A converter is a callable taking a record-ish (a dictionary representing an openerp record with values of typetag ``fromtype``) and returning a converted records matching what :meth:`openerp.osv.orm.Model.write` expects. :param model: :class:`openerp.osv.orm.Model` for the conversion base :returns: a converter callable :rtype: (record: dict, logger: (field, error) -> None) -> dict """ columns = dict( (k, ColumnWrapper(v.column, cr, uid, self.pool, fromtype, context)) for k, v in model._all_columns.iteritems()) converters = dict( (k, self.to_field(cr, uid, model, column, fromtype, context)) for k, column in columns.iteritems()) def fn(record, log): converted = {} for field, value in record.iteritems(): if field in (None, 'id', '.id'): continue if not value: converted[field] = False continue try: converted[field], ws = converters[field](value) for w in ws: if isinstance(w, basestring): # wrap warning string in an ImportWarning for # uniform handling w = ImportWarning(w) log(field, w) except ValueError, e: log(field, e) return converted return fn def to_field(self, cr, uid, model, column, fromtype=str, context=None): """ Fetches a converter for the provided column object, from the specified type. A converter is simply a callable taking a value of type ``fromtype`` (or a composite of ``fromtype``, e.g. list or dict) and returning a value acceptable for a write() on the column ``column``. By default, tries to get a method on itself with a name matching the pattern ``_$fromtype_to_$column._type`` and returns it. Converter callables can either return a value and a list of warnings to their caller or raise ``ValueError``, which will be interpreted as a validation & conversion failure. ValueError can have either one or two parameters. The first parameter is mandatory, **must** be a unicode string and will be used as the user-visible message for the error (it should be translatable and translated). It can contain a ``field`` named format placeholder so the caller can inject the field's translated, user-facing name (@string). The second parameter is optional and, if provided, must be a mapping. This mapping will be merged into the error dictionary returned to the client. If a converter can perform its function but has to make assumptions about the data, it can send a warning to the user through adding an instance of :class:`~openerp.osv.orm.ImportWarning` to the second value it returns. The handling of a warning at the upper levels is the same as ``ValueError`` above. :param column: column object to generate a value for :type column: :class:`fields._column` :param type fromtype: type to convert to something fitting for ``column`` :param context: openerp request context :return: a function (fromtype -> column.write_type), if a converter is found :rtype: Callable | None """ # FIXME: return None converter = getattr( self, '_%s_to_%s' % (fromtype.__name__, column._type), None) if not converter: return None return functools.partial( converter, cr, uid, model, column, context=context) def _str_to_boolean(self, cr, uid, model, column, value, context=None): # all translatables used for booleans true, yes, false, no = _(u"true"), _(u"yes"), _(u"false"), _(u"no") # potentially broken casefolding? What about locales? trues = set(word.lower() for word in itertools.chain( [u'1', u"true", u"yes"], # don't use potentially translated values self._get_translations(cr, uid, ['code'], u"true", context=context), self._get_translations(cr, uid, ['code'], u"yes", context=context), )) if value.lower() in trues: return True, [] # potentially broken casefolding? What about locales? falses = set(word.lower() for word in itertools.chain( [u'', u"0", u"false", u"no"], self._get_translations(cr, uid, ['code'], u"false", context=context), self._get_translations(cr, uid, ['code'], u"no", context=context), )) if value.lower() in falses: return False, [] return True, [orm.ImportWarning( _(u"Unknown value '%s' for boolean field '%%(field)s', assuming '%s'") % (value, yes), { 'moreinfo': _(u"Use '1' for yes and '0' for no") })] def _str_to_integer(self, cr, uid, model, column, value, context=None): try: return int(value), [] except ValueError: raise ValueError( _(u"'%s' does not seem to be an integer for field '%%(field)s'") % value) def _str_to_float(self, cr, uid, model, column, value, context=None): try: return float(value), [] except ValueError: raise ValueError( _(u"'%s' does not seem to be a number for field '%%(field)s'") % value) def _str_id(self, cr, uid, model, column, value, context=None): return value, [] _str_to_reference = _str_to_char = _str_to_text = _str_to_binary = _str_id def _str_to_date(self, cr, uid, model, column, value, context=None): try: time.strptime(value, DEFAULT_SERVER_DATE_FORMAT) return value, [] except ValueError: raise ValueError( _(u"'%s' does not seem to be a valid date for field '%%(field)s'") % value, { 'moreinfo': _(u"Use the format '%s'") % u"2012-12-31" }) def _input_tz(self, cr, uid, context): # if there's a tz in context, try to use that if context.get('tz'): try: return pytz.timezone(context['tz']) except pytz.UnknownTimeZoneError: pass # if the current user has a tz set, try to use that user = self.pool['res.users'].read( cr, uid, [uid], ['tz'], context=context)[0] if user['tz']: try: return pytz.timezone(user['tz']) except pytz.UnknownTimeZoneError: pass # fallback if no tz in context or on user: UTC return pytz.UTC def _str_to_datetime(self, cr, uid, model, column, value, context=None): if context is None: context = {} try: parsed_value = datetime.datetime.strptime( value, DEFAULT_SERVER_DATETIME_FORMAT) except ValueError: raise ValueError( _(u"'%s' does not seem to be a valid datetime for field '%%(field)s'") % value, { 'moreinfo': _(u"Use the format '%s'") % u"2012-12-31 23:59:59" }) input_tz = self._input_tz(cr, uid, context)# Apply input tz to the parsed naive datetime dt = input_tz.localize(parsed_value, is_dst=False) # And convert to UTC before reformatting for writing return dt.astimezone(pytz.UTC).strftime(DEFAULT_SERVER_DATETIME_FORMAT), [] def _get_translations(self, cr, uid, types, src, context): types = tuple(types) # Cache translations so they don't have to be reloaded from scratch on # every row of the file tnx_cache = cr.cache.setdefault(self._name, {}) if tnx_cache.setdefault(types, {}) and src in tnx_cache[types]: return tnx_cache[types][src] Translations = self.pool['ir.translation'] tnx_ids = Translations.search( cr, uid, [('type', 'in', types), ('src', '=', src)], context=context) tnx = Translations.read(cr, uid, tnx_ids, ['value'], context=context) result = tnx_cache[types][src] = map(operator.itemgetter('value'), tnx) return result def _str_to_selection(self, cr, uid, model, column, value, context=None): selection = column.selection if not isinstance(selection, (tuple, list)): # FIXME: Don't pass context to avoid translations? # Or just copy context & remove lang? selection = selection(model, cr, uid, context=None) for item, label in selection: labels = self._get_translations( cr, uid, ('selection', 'model', 'code'), label, context=context) labels.append(label) if value == unicode(item) or value in labels: return item, [] raise ValueError( _(u"Value '%s' not found in selection field '%%(field)s'") % ( value), { 'moreinfo': [label or unicode(item) for item, label in selection if label or item] }) def db_id_for(self, cr, uid, model, column, subfield, value, context=None): """ Finds a database id for the reference ``value`` in the referencing subfield ``subfield`` of the provided column of the provided model. :param model: model to which the column belongs :param column: relational column for which references are provided :param subfield: a relational subfield allowing building of refs to existing records: ``None`` for a name_get/name_search, ``id`` for an external id and ``.id`` for a database id :param value: value of the reference to match to an actual record :param context: OpenERP request context :return: a pair of the matched database identifier (if any), the translated user-readable name for the field and the list of warnings :rtype: (ID|None, unicode, list) """ if context is None: context = {} id = None warnings = [] action = {'type': 'ir.actions.act_window', 'target': 'new', 'view_mode': 'tree,form', 'view_type': 'form', 'views': [(False, 'tree'), (False, 'form')], 'help': _(u"See all possible values")} if subfield is None: action['res_model'] = column._obj elif subfield in ('id', '.id'): action['res_model'] = 'ir.model.data' action['domain'] = [('model', '=', column._obj)] RelatedModel = self.pool[column._obj] if subfield == '.id': field_type = _(u"database id") try: tentative_id = int(value) except ValueError: tentative_id = value try: if RelatedModel.search(cr, uid, [('id', '=', tentative_id)], context=context): id = tentative_id except psycopg2.DataError: # type error raise ValueError( _(u"Invalid database id '%s' for the field '%%(field)s'") % value, {'moreinfo': action}) elif subfield == 'id': field_type = _(u"external id") if '.' in value: module, xid = value.split('.', 1) else: module, xid = context.get('_import_current_module', ''), value ModelData = self.pool['ir.model.data'] try: _model, id = ModelData.get_object_reference( cr, uid, module, xid) except ValueError: pass # leave id is None elif subfield is None: field_type = _(u"name") ids = RelatedModel.name_search( cr, uid, name=value, operator='=', context=context) if ids: if len(ids) > 1: warnings.append(orm.ImportWarning( _(u"Found multiple matches for field '%%(field)s' (%d matches)") % (len(ids)))) id, _name = ids[0] else: raise Exception(_(u"Unknown sub-field '%s'") % subfield) if id is None: raise ValueError( _(u"No matching record found for %(field_type)s '%(value)s' in field '%%(field)s'") % {'field_type': field_type, 'value': value}, {'moreinfo': action}) return id, field_type, warnings def _referencing_subfield(self, record): """ Checks the record for the subfields allowing referencing (an existing record in an other table), errors out if it finds potential conflicts (multiple referencing subfields) or non-referencing subfields returns the name of the correct subfield. :param record: :return: the record subfield to use for referencing and a list of warnings :rtype: str, list """ # Can import by name_get, external id or database id fieldset = set(record.iterkeys()) if fieldset - REFERENCING_FIELDS: raise ValueError( _(u"Can not create Many-To-One records indirectly, import the field separately")) if len(fieldset) > 1: raise ValueError( _(u"Ambiguous specification for field '%(field)s', only provide one of name, external id or database id")) # only one field left possible, unpack [subfield] = fieldset return subfield, [] def _str_to_many2one(self, cr, uid, model, column, values, context=None): # Should only be one record, unpack [record] = values subfield, w1 = self._referencing_subfield(record) reference = record[subfield] id, subfield_type, w2 = self.db_id_for( cr, uid, model, column, subfield, reference, context=context) return id, w1 + w2 def _str_to_many2many(self, cr, uid, model, column, value, context=None): [record] = value subfield, warnings = self._referencing_subfield(record) ids = [] for reference in record[subfield].split(','): id, subfield_type, ws = self.db_id_for( cr, uid, model, column, subfield, reference, context=context) ids.append(id) warnings.extend(ws) return [REPLACE_WITH(ids)], warnings def _str_to_one2many(self, cr, uid, model, column, records, context=None): commands = [] warnings = [] if len(records) == 1 and exclude_ref_fields(records[0]) == {}: # only one row with only ref field, field=ref1,ref2,ref3 as in # m2o/m2m record = records[0] subfield, ws = self._referencing_subfield(record) warnings.extend(ws) # transform [{subfield:ref1,ref2,ref3}] into # [{subfield:ref1},{subfield:ref2},{subfield:ref3}] records = ({subfield:item} for item in record[subfield].split(',')) def log(_, e): if not isinstance(e, Warning): raise e warnings.append(e) for record in records: id = None refs = only_ref_fields(record) # there are ref fields in the record if refs: subfield, w1 = self._referencing_subfield(refs) warnings.extend(w1) reference = record[subfield] id, subfield_type, w2 = self.db_id_for( cr, uid, model, column, subfield, reference, context=context) warnings.extend(w2) writable = column.converter(exclude_ref_fields(record), log) if id: commands.append(LINK_TO(id)) commands.append(UPDATE(id, writable)) else: commands.append(CREATE(writable)) return commands, warnings
agpl-3.0
nzavagli/UnrealPy
UnrealPyEmbed/Source/Python/Lib/python27/sqlite3/test/py25tests.py
127
2736
#-*- coding: ISO-8859-1 -*- # pysqlite2/test/regression.py: pysqlite regression tests # # Copyright (C) 2007 Gerhard Häring <gh@ghaering.de> # # This file is part of pysqlite. # # This software is provided 'as-is', without any express or implied # warranty. In no event will the authors be held liable for any damages # arising from the use of this software. # # Permission is granted to anyone to use this software for any purpose, # including commercial applications, and to alter it and redistribute it # freely, subject to the following restrictions: # # 1. The origin of this software must not be misrepresented; you must not # claim that you wrote the original software. If you use this software # in a product, an acknowledgment in the product documentation would be # appreciated but is not required. # 2. Altered source versions must be plainly marked as such, and must not be # misrepresented as being the original software. # 3. This notice may not be removed or altered from any source distribution. from __future__ import with_statement import unittest import sqlite3 as sqlite did_rollback = False class MyConnection(sqlite.Connection): def rollback(self): global did_rollback did_rollback = True sqlite.Connection.rollback(self) class ContextTests(unittest.TestCase): def setUp(self): global did_rollback self.con = sqlite.connect(":memory:", factory=MyConnection) self.con.execute("create table test(c unique)") did_rollback = False def tearDown(self): self.con.close() def CheckContextManager(self): """Can the connection be used as a context manager at all?""" with self.con: pass def CheckContextManagerCommit(self): """Is a commit called in the context manager?""" with self.con: self.con.execute("insert into test(c) values ('foo')") self.con.rollback() count = self.con.execute("select count(*) from test").fetchone()[0] self.assertEqual(count, 1) def CheckContextManagerRollback(self): """Is a rollback called in the context manager?""" global did_rollback self.assertEqual(did_rollback, False) try: with self.con: self.con.execute("insert into test(c) values (4)") self.con.execute("insert into test(c) values (4)") except sqlite.IntegrityError: pass self.assertEqual(did_rollback, True) def suite(): ctx_suite = unittest.makeSuite(ContextTests, "Check") return unittest.TestSuite((ctx_suite,)) def test(): runner = unittest.TextTestRunner() runner.run(suite()) if __name__ == "__main__": test()
mit
teriyakichild/ansible-modules-extras
cloud/webfaction/webfaction_domain.py
153
5304
#!/usr/bin/python # # Create Webfaction domains and subdomains using Ansible and the Webfaction API # # ------------------------------------------ # # (c) Quentin Stafford-Fraser 2015 # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # DOCUMENTATION = ''' --- module: webfaction_domain short_description: Add or remove domains and subdomains on Webfaction description: - Add or remove domains or subdomains on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction. author: Quentin Stafford-Fraser (@quentinsf) version_added: "2.0" notes: - If you are I(deleting) domains by using C(state=absent), then note that if you specify subdomains, just those particular subdomains will be deleted. If you don't specify subdomains, the domain will be deleted. - "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays." - See `the webfaction API <http://docs.webfaction.com/xmlrpc-api/>`_ for more info. options: name: description: - The name of the domain required: true state: description: - Whether the domain should exist required: false choices: ['present', 'absent'] default: "present" subdomains: description: - Any subdomains to create. required: false default: null login_name: description: - The webfaction account to use required: true login_password: description: - The webfaction password to use required: true ''' EXAMPLES = ''' - name: Create a test domain webfaction_domain: name: mydomain.com state: present subdomains: - www - blog login_name: "{{webfaction_user}}" login_password: "{{webfaction_passwd}}" - name: Delete test domain and any subdomains webfaction_domain: name: mydomain.com state: absent login_name: "{{webfaction_user}}" login_password: "{{webfaction_passwd}}" ''' import socket import xmlrpclib webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/') def main(): module = AnsibleModule( argument_spec = dict( name = dict(required=True), state = dict(required=False, choices=['present', 'absent'], default='present'), subdomains = dict(required=False, default=[]), login_name = dict(required=True), login_password = dict(required=True), ), supports_check_mode=True ) domain_name = module.params['name'] domain_state = module.params['state'] domain_subdomains = module.params['subdomains'] session_id, account = webfaction.login( module.params['login_name'], module.params['login_password'] ) domain_list = webfaction.list_domains(session_id) domain_map = dict([(i['domain'], i) for i in domain_list]) existing_domain = domain_map.get(domain_name) result = {} # Here's where the real stuff happens if domain_state == 'present': # Does an app with this name already exist? if existing_domain: if set(existing_domain['subdomains']) >= set(domain_subdomains): # If it exists with the right subdomains, we don't change anything. module.exit_json( changed = False, ) positional_args = [session_id, domain_name] + domain_subdomains if not module.check_mode: # If this isn't a dry run, create the app # print positional_args result.update( webfaction.create_domain( *positional_args ) ) elif domain_state == 'absent': # If the app's already not there, nothing changed. if not existing_domain: module.exit_json( changed = False, ) positional_args = [session_id, domain_name] + domain_subdomains if not module.check_mode: # If this isn't a dry run, delete the app result.update( webfaction.delete_domain(*positional_args) ) else: module.fail_json(msg="Unknown state specified: {}".format(domain_state)) module.exit_json( changed = True, result = result ) from ansible.module_utils.basic import * main()
gpl-3.0
TimsTechDev/android_kernel_samsung_kanas3gnfcxx
tools/perf/util/setup.py
2079
1438
#!/usr/bin/python2 from distutils.core import setup, Extension from os import getenv from distutils.command.build_ext import build_ext as _build_ext from distutils.command.install_lib import install_lib as _install_lib class build_ext(_build_ext): def finalize_options(self): _build_ext.finalize_options(self) self.build_lib = build_lib self.build_temp = build_tmp class install_lib(_install_lib): def finalize_options(self): _install_lib.finalize_options(self) self.build_dir = build_lib cflags = ['-fno-strict-aliasing', '-Wno-write-strings'] cflags += getenv('CFLAGS', '').split() build_lib = getenv('PYTHON_EXTBUILD_LIB') build_tmp = getenv('PYTHON_EXTBUILD_TMP') libtraceevent = getenv('LIBTRACEEVENT') liblk = getenv('LIBLK') ext_sources = [f.strip() for f in file('util/python-ext-sources') if len(f.strip()) > 0 and f[0] != '#'] perf = Extension('perf', sources = ext_sources, include_dirs = ['util/include'], extra_compile_args = cflags, extra_objects = [libtraceevent, liblk], ) setup(name='perf', version='0.1', description='Interface with the Linux profiling infrastructure', author='Arnaldo Carvalho de Melo', author_email='acme@redhat.com', license='GPLv2', url='http://perf.wiki.kernel.org', ext_modules=[perf], cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
gpl-2.0
ME-ICA/me-ica
meica.libs/mdp/test/test_NeuralGasNode.py
2
2420
from _tools import * def _uniform(min_, max_, dims): return uniform(dims)*(max_-min_)+min_ def test_NeuralGasNode(): ### test 1D distribution in a 10D space # line coefficients dim = 10 npoints = 1000 const = _uniform(-100,100,[dim]) dir = _uniform(-1,1,[dim]) dir /= utils.norm2(dir) x = _uniform(-1,1,[npoints]) data = numx.outer(x, dir)+const # train the ng network num_nodes = 10 ng = mdp.nodes.NeuralGasNode(start_poss=[data[n,:] for n in range(num_nodes)], max_epochs=10) ng.train(data) ng.stop_training() # control that the nodes in the graph lie on the line poss = ng.get_nodes_position()-const norms = numx.sqrt(numx.sum(poss*poss, axis=1)) poss = (poss.T/norms).T assert max(numx.minimum(numx.sum(abs(poss-dir),axis=1), numx.sum(abs(poss+dir),axis=1))) < 1e-7, \ 'At least one node of the graph does lies out of the line.' # check that the graph is linear (no additional branches) # get a topological sort of the graph topolist = ng.graph.topological_sort() deg = numx.asarray(map(lambda n: n.degree(), topolist)) idx = deg.argsort() deg = deg[idx] assert_equal(deg[:2],[1,1]) assert_array_equal(deg[2:], [2 for i in xrange(len(deg)-2)]) # check the distribution of the nodes' position is uniform # this node is at one of the extrema of the graph x0 = numx.outer(numx.amin(x, axis=0), dir)+const x1 = numx.outer(numx.amax(x, axis=0), dir)+const linelen = utils.norm2(x0-x1) # this is the mean distance the node should have dist = linelen / poss.shape[0] # sort the node, depth first nodes = ng.graph.undirected_dfs(topolist[idx[0]]) poss = numx.array(map(lambda n: n.data.pos, nodes)) dists = numx.sqrt(numx.sum((poss[:-1,:]-poss[1:,:])**2, axis=1)) assert_almost_equal(dist, mean(dists), 1) def test_NeuralGasNode_nearest_neighbor(): # test the nearest_neighbor function start_poss = [numx.asarray([2.,0]), numx.asarray([-2.,0])] ng = mdp.nodes.NeuralGasNode(start_poss=start_poss, max_epochs=4) x = numx.asarray([[2.,0]]) ng.train(x) nodes, dists = ng.nearest_neighbor(numx.asarray([[3.,0]])) assert_almost_equal(dists[0], 1., 7) assert_almost_equal(nodes[0].data.pos, numx.asarray([2., 0.]), 7)
lgpl-2.1
eleonrk/SickRage
lib/past/types/olddict.py
62
2735
""" A dict subclass for Python 3 that behaves like Python 2's dict Example use: >>> from past.builtins import dict >>> d1 = dict() # instead of {} for an empty dict >>> d2 = dict(key1='value1', key2='value2') The keys, values and items methods now return lists on Python 3.x and there are methods for iterkeys, itervalues, iteritems, and viewkeys etc. >>> for d in (d1, d2): ... assert isinstance(d.keys(), list) ... assert isinstance(d.values(), list) ... assert isinstance(d.items(), list) """ import sys from past.utils import with_metaclass _builtin_dict = dict ver = sys.version_info[:2] class BaseOldDict(type): def __instancecheck__(cls, instance): return isinstance(instance, _builtin_dict) class olddict(with_metaclass(BaseOldDict, _builtin_dict)): """ A backport of the Python 3 dict object to Py2 """ iterkeys = _builtin_dict.keys viewkeys = _builtin_dict.keys def keys(self): return list(super(olddict, self).keys()) itervalues = _builtin_dict.values viewvalues = _builtin_dict.values def values(self): return list(super(olddict, self).values()) iteritems = _builtin_dict.items viewitems = _builtin_dict.items def items(self): return list(super(olddict, self).items()) def has_key(self, k): """ D.has_key(k) -> True if D has a key k, else False """ return k in self # def __new__(cls, *args, **kwargs): # """ # dict() -> new empty dictionary # dict(mapping) -> new dictionary initialized from a mapping object's # (key, value) pairs # dict(iterable) -> new dictionary initialized as if via: # d = {} # for k, v in iterable: # d[k] = v # dict(**kwargs) -> new dictionary initialized with the name=value pairs # in the keyword argument list. For example: dict(one=1, two=2) # """ # # if len(args) == 0: # return super(olddict, cls).__new__(cls) # # Was: elif isinstance(args[0], newbytes): # # We use type() instead of the above because we're redefining # # this to be True for all unicode string subclasses. Warning: # # This may render newstr un-subclassable. # elif type(args[0]) == olddict: # return args[0] # # elif isinstance(args[0], _builtin_dict): # # value = args[0] # else: # value = args[0] # return super(olddict, cls).__new__(cls, value) def __native__(self): """ Hook for the past.utils.native() function """ return super(oldbytes, self) __all__ = ['olddict']
gpl-3.0
bearstech/ansible
lib/ansible/modules/windows/win_environment.py
29
3774
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2015, Jon Hawkesworth (@jhawkesworth) <figs@unity.demon.co.uk> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # this is a windows documentation stub. actual code lives in the .ps1 # file of the same name ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = r''' --- module: win_environment version_added: "2.0" short_description: Modifies environment variables on windows hosts. description: - Uses .net Environment to set or remove environment variables and can set at User, Machine or Process level. - User level environment variables will be set, but not available until the user has logged off and on again. options: state: description: - present to ensure environment variable is set, or absent to ensure it is removed required: false default: present choices: - present - absent name: description: - The name of the environment variable required: true default: no default value: description: - The value to store in the environment variable. Can be omitted for state=absent required: false default: no default level: description: - The level at which to set the environment variable. - Use 'machine' to set for all users. - Use 'user' to set for the current user that ansible is connected as. - Use 'process' to set for the current process. Probably not that useful. required: true default: no default choices: - machine - process - user author: "Jon Hawkesworth (@jhawkesworth)" notes: - This module is best-suited for setting the entire value of an environment variable. For safe element-based management of path-like environment vars, use the M(win_path) module. - This module does not broadcast change events. This means that the minority of windows applications which can have their environment changed without restarting will not be notified and therefore will need restarting to pick up new environment settings. User level environment variables will require the user to log out and in again before they become available. ''' EXAMPLES = r''' - name: Set an environment variable for all users win_environment: state: present name: TestVariable value: Test value level: machine - name: Remove an environment variable for the current user win_environment: state: absent name: TestVariable level: user ''' RETURN = r''' before_value: description: - the value of the environment key before a change, this is null if it didn't exist returned: always type: string sample: C:\Windows\System32 level: description: the level set when calling the module returned: always type: string sample: machine name: description: the name of the environment key the module checked returned: always type: string sample: JAVA_HOME value: description: the value the environment key has been set to returned: always type: string sample: C:\Program Files\jdk1.8 '''
gpl-3.0
akashsinghal/Speech-Memorization-App
Python_Backend/lib/itsdangerous.py
626
31840
# -*- coding: utf-8 -*- """ itsdangerous ~~~~~~~~~~~~ A module that implements various functions to deal with untrusted sources. Mainly useful for web applications. :copyright: (c) 2014 by Armin Ronacher and the Django Software Foundation. :license: BSD, see LICENSE for more details. """ import sys import hmac import zlib import time import base64 import hashlib import operator from datetime import datetime PY2 = sys.version_info[0] == 2 if PY2: from itertools import izip text_type = unicode int_to_byte = chr number_types = (int, long, float) else: from functools import reduce izip = zip text_type = str int_to_byte = operator.methodcaller('to_bytes', 1, 'big') number_types = (int, float) try: import simplejson as json except ImportError: import json class _CompactJSON(object): """Wrapper around simplejson that strips whitespace. """ def loads(self, payload): return json.loads(payload) def dumps(self, obj): return json.dumps(obj, separators=(',', ':')) compact_json = _CompactJSON() # 2011/01/01 in UTC EPOCH = 1293840000 def want_bytes(s, encoding='utf-8', errors='strict'): if isinstance(s, text_type): s = s.encode(encoding, errors) return s def is_text_serializer(serializer): """Checks wheather a serializer generates text or binary.""" return isinstance(serializer.dumps({}), text_type) # Starting with 3.3 the standard library has a c-implementation for # constant time string compares. _builtin_constant_time_compare = getattr(hmac, 'compare_digest', None) def constant_time_compare(val1, val2): """Returns True if the two strings are equal, False otherwise. The time taken is independent of the number of characters that match. Do not use this function for anything else than comparision with known length targets. This is should be implemented in C in order to get it completely right. """ if _builtin_constant_time_compare is not None: return _builtin_constant_time_compare(val1, val2) len_eq = len(val1) == len(val2) if len_eq: result = 0 left = val1 else: result = 1 left = val2 for x, y in izip(bytearray(left), bytearray(val2)): result |= x ^ y return result == 0 class BadData(Exception): """Raised if bad data of any sort was encountered. This is the base for all exceptions that itsdangerous is currently using. .. versionadded:: 0.15 """ message = None def __init__(self, message): Exception.__init__(self, message) self.message = message def __str__(self): return text_type(self.message) if PY2: __unicode__ = __str__ def __str__(self): return self.__unicode__().encode('utf-8') class BadPayload(BadData): """This error is raised in situations when payload is loaded without checking the signature first and an exception happend as a result of that. The original exception that caused that will be stored on the exception as :attr:`original_error`. This can also happen with a :class:`JSONWebSignatureSerializer` that is subclassed and uses a different serializer for the payload than the expected one. .. versionadded:: 0.15 """ def __init__(self, message, original_error=None): BadData.__init__(self, message) #: If available, the error that indicates why the payload #: was not valid. This might be `None`. self.original_error = original_error class BadSignature(BadData): """This error is raised if a signature does not match. As of itsdangerous 0.14 there are helpful attributes on the exception instances. You can also catch down the baseclass :exc:`BadData`. """ def __init__(self, message, payload=None): BadData.__init__(self, message) #: The payload that failed the signature test. In some #: situations you might still want to inspect this, even if #: you know it was tampered with. #: #: .. versionadded:: 0.14 self.payload = payload class BadTimeSignature(BadSignature): """Raised for time based signatures that fail. This is a subclass of :class:`BadSignature` so you can catch those down as well. """ def __init__(self, message, payload=None, date_signed=None): BadSignature.__init__(self, message, payload) #: If the signature expired this exposes the date of when the #: signature was created. This can be helpful in order to #: tell the user how long a link has been gone stale. #: #: .. versionadded:: 0.14 self.date_signed = date_signed class BadHeader(BadSignature): """Raised if a signed header is invalid in some form. This only happens for serializers that have a header that goes with the signature. .. versionadded:: 0.24 """ def __init__(self, message, payload=None, header=None, original_error=None): BadSignature.__init__(self, message, payload) #: If the header is actually available but just malformed it #: might be stored here. self.header = header #: If available, the error that indicates why the payload #: was not valid. This might be `None`. self.original_error = original_error class SignatureExpired(BadTimeSignature): """Signature timestamp is older than required max_age. This is a subclass of :exc:`BadTimeSignature` so you can use the baseclass for catching the error. """ def base64_encode(string): """base64 encodes a single bytestring (and is tolerant to getting called with a unicode string). The resulting bytestring is safe for putting into URLs. """ string = want_bytes(string) return base64.urlsafe_b64encode(string).strip(b'=') def base64_decode(string): """base64 decodes a single bytestring (and is tolerant to getting called with a unicode string). The result is also a bytestring. """ string = want_bytes(string, encoding='ascii', errors='ignore') return base64.urlsafe_b64decode(string + b'=' * (-len(string) % 4)) def int_to_bytes(num): assert num >= 0 rv = [] while num: rv.append(int_to_byte(num & 0xff)) num >>= 8 return b''.join(reversed(rv)) def bytes_to_int(bytestr): return reduce(lambda a, b: a << 8 | b, bytearray(bytestr), 0) class SigningAlgorithm(object): """Subclasses of `SigningAlgorithm` have to implement `get_signature` to provide signature generation functionality. """ def get_signature(self, key, value): """Returns the signature for the given key and value""" raise NotImplementedError() def verify_signature(self, key, value, sig): """Verifies the given signature matches the expected signature""" return constant_time_compare(sig, self.get_signature(key, value)) class NoneAlgorithm(SigningAlgorithm): """This class provides a algorithm that does not perform any signing and returns an empty signature. """ def get_signature(self, key, value): return b'' class HMACAlgorithm(SigningAlgorithm): """This class provides signature generation using HMACs.""" #: The digest method to use with the MAC algorithm. This defaults to sha1 #: but can be changed for any other function in the hashlib module. default_digest_method = staticmethod(hashlib.sha1) def __init__(self, digest_method=None): if digest_method is None: digest_method = self.default_digest_method self.digest_method = digest_method def get_signature(self, key, value): mac = hmac.new(key, msg=value, digestmod=self.digest_method) return mac.digest() class Signer(object): """This class can sign bytes and unsign it and validate the signature provided. Salt can be used to namespace the hash, so that a signed string is only valid for a given namespace. Leaving this at the default value or re-using a salt value across different parts of your application where the same signed value in one part can mean something different in another part is a security risk. See :ref:`the-salt` for an example of what the salt is doing and how you can utilize it. .. versionadded:: 0.14 `key_derivation` and `digest_method` were added as arguments to the class constructor. .. versionadded:: 0.18 `algorithm` was added as an argument to the class constructor. """ #: The digest method to use for the signer. This defaults to sha1 but can #: be changed for any other function in the hashlib module. #: #: .. versionchanged:: 0.14 default_digest_method = staticmethod(hashlib.sha1) #: Controls how the key is derived. The default is Django style #: concatenation. Possible values are ``concat``, ``django-concat`` #: and ``hmac``. This is used for deriving a key from the secret key #: with an added salt. #: #: .. versionadded:: 0.14 default_key_derivation = 'django-concat' def __init__(self, secret_key, salt=None, sep='.', key_derivation=None, digest_method=None, algorithm=None): self.secret_key = want_bytes(secret_key) self.sep = sep self.salt = 'itsdangerous.Signer' if salt is None else salt if key_derivation is None: key_derivation = self.default_key_derivation self.key_derivation = key_derivation if digest_method is None: digest_method = self.default_digest_method self.digest_method = digest_method if algorithm is None: algorithm = HMACAlgorithm(self.digest_method) self.algorithm = algorithm def derive_key(self): """This method is called to derive the key. If you're unhappy with the default key derivation choices you can override them here. Keep in mind that the key derivation in itsdangerous is not intended to be used as a security method to make a complex key out of a short password. Instead you should use large random secret keys. """ salt = want_bytes(self.salt) if self.key_derivation == 'concat': return self.digest_method(salt + self.secret_key).digest() elif self.key_derivation == 'django-concat': return self.digest_method(salt + b'signer' + self.secret_key).digest() elif self.key_derivation == 'hmac': mac = hmac.new(self.secret_key, digestmod=self.digest_method) mac.update(salt) return mac.digest() elif self.key_derivation == 'none': return self.secret_key else: raise TypeError('Unknown key derivation method') def get_signature(self, value): """Returns the signature for the given value""" value = want_bytes(value) key = self.derive_key() sig = self.algorithm.get_signature(key, value) return base64_encode(sig) def sign(self, value): """Signs the given string.""" return value + want_bytes(self.sep) + self.get_signature(value) def verify_signature(self, value, sig): """Verifies the signature for the given value.""" key = self.derive_key() try: sig = base64_decode(sig) except Exception: return False return self.algorithm.verify_signature(key, value, sig) def unsign(self, signed_value): """Unsigns the given string.""" signed_value = want_bytes(signed_value) sep = want_bytes(self.sep) if sep not in signed_value: raise BadSignature('No %r found in value' % self.sep) value, sig = signed_value.rsplit(sep, 1) if self.verify_signature(value, sig): return value raise BadSignature('Signature %r does not match' % sig, payload=value) def validate(self, signed_value): """Just validates the given signed value. Returns `True` if the signature exists and is valid, `False` otherwise.""" try: self.unsign(signed_value) return True except BadSignature: return False class TimestampSigner(Signer): """Works like the regular :class:`Signer` but also records the time of the signing and can be used to expire signatures. The unsign method can rause a :exc:`SignatureExpired` method if the unsigning failed because the signature is expired. This exception is a subclass of :exc:`BadSignature`. """ def get_timestamp(self): """Returns the current timestamp. This implementation returns the seconds since 1/1/2011. The function must return an integer. """ return int(time.time() - EPOCH) def timestamp_to_datetime(self, ts): """Used to convert the timestamp from `get_timestamp` into a datetime object. """ return datetime.utcfromtimestamp(ts + EPOCH) def sign(self, value): """Signs the given string and also attaches a time information.""" value = want_bytes(value) timestamp = base64_encode(int_to_bytes(self.get_timestamp())) sep = want_bytes(self.sep) value = value + sep + timestamp return value + sep + self.get_signature(value) def unsign(self, value, max_age=None, return_timestamp=False): """Works like the regular :meth:`~Signer.unsign` but can also validate the time. See the base docstring of the class for the general behavior. If `return_timestamp` is set to `True` the timestamp of the signature will be returned as naive :class:`datetime.datetime` object in UTC. """ try: result = Signer.unsign(self, value) sig_error = None except BadSignature as e: sig_error = e result = e.payload or b'' sep = want_bytes(self.sep) # If there is no timestamp in the result there is something # seriously wrong. In case there was a signature error, we raise # that one directly, otherwise we have a weird situation in which # we shouldn't have come except someone uses a time-based serializer # on non-timestamp data, so catch that. if not sep in result: if sig_error: raise sig_error raise BadTimeSignature('timestamp missing', payload=result) value, timestamp = result.rsplit(sep, 1) try: timestamp = bytes_to_int(base64_decode(timestamp)) except Exception: timestamp = None # Signature is *not* okay. Raise a proper error now that we have # split the value and the timestamp. if sig_error is not None: raise BadTimeSignature(text_type(sig_error), payload=value, date_signed=timestamp) # Signature was okay but the timestamp is actually not there or # malformed. Should not happen, but well. We handle it nonetheless if timestamp is None: raise BadTimeSignature('Malformed timestamp', payload=value) # Check timestamp is not older than max_age if max_age is not None: age = self.get_timestamp() - timestamp if age > max_age: raise SignatureExpired( 'Signature age %s > %s seconds' % (age, max_age), payload=value, date_signed=self.timestamp_to_datetime(timestamp)) if return_timestamp: return value, self.timestamp_to_datetime(timestamp) return value def validate(self, signed_value, max_age=None): """Just validates the given signed value. Returns `True` if the signature exists and is valid, `False` otherwise.""" try: self.unsign(signed_value, max_age=max_age) return True except BadSignature: return False class Serializer(object): """This class provides a serialization interface on top of the signer. It provides a similar API to json/pickle and other modules but is slightly differently structured internally. If you want to change the underlying implementation for parsing and loading you have to override the :meth:`load_payload` and :meth:`dump_payload` functions. This implementation uses simplejson if available for dumping and loading and will fall back to the standard library's json module if it's not available. Starting with 0.14 you do not need to subclass this class in order to switch out or customer the :class:`Signer`. You can instead also pass a different class to the constructor as well as keyword arguments as dictionary that should be forwarded:: s = Serializer(signer_kwargs={'key_derivation': 'hmac'}) .. versionchanged:: 0.14: The `signer` and `signer_kwargs` parameters were added to the constructor. """ #: If a serializer module or class is not passed to the constructor #: this one is picked up. This currently defaults to :mod:`json`. default_serializer = json #: The default :class:`Signer` class that is being used by this #: serializer. #: #: .. versionadded:: 0.14 default_signer = Signer def __init__(self, secret_key, salt=b'itsdangerous', serializer=None, signer=None, signer_kwargs=None): self.secret_key = want_bytes(secret_key) self.salt = want_bytes(salt) if serializer is None: serializer = self.default_serializer self.serializer = serializer self.is_text_serializer = is_text_serializer(serializer) if signer is None: signer = self.default_signer self.signer = signer self.signer_kwargs = signer_kwargs or {} def load_payload(self, payload, serializer=None): """Loads the encoded object. This function raises :class:`BadPayload` if the payload is not valid. The `serializer` parameter can be used to override the serializer stored on the class. The encoded payload is always byte based. """ if serializer is None: serializer = self.serializer is_text = self.is_text_serializer else: is_text = is_text_serializer(serializer) try: if is_text: payload = payload.decode('utf-8') return serializer.loads(payload) except Exception as e: raise BadPayload('Could not load the payload because an ' 'exception occurred on unserializing the data', original_error=e) def dump_payload(self, obj): """Dumps the encoded object. The return value is always a bytestring. If the internal serializer is text based the value will automatically be encoded to utf-8. """ return want_bytes(self.serializer.dumps(obj)) def make_signer(self, salt=None): """A method that creates a new instance of the signer to be used. The default implementation uses the :class:`Signer` baseclass. """ if salt is None: salt = self.salt return self.signer(self.secret_key, salt=salt, **self.signer_kwargs) def dumps(self, obj, salt=None): """Returns a signed string serialized with the internal serializer. The return value can be either a byte or unicode string depending on the format of the internal serializer. """ payload = want_bytes(self.dump_payload(obj)) rv = self.make_signer(salt).sign(payload) if self.is_text_serializer: rv = rv.decode('utf-8') return rv def dump(self, obj, f, salt=None): """Like :meth:`dumps` but dumps into a file. The file handle has to be compatible with what the internal serializer expects. """ f.write(self.dumps(obj, salt)) def loads(self, s, salt=None): """Reverse of :meth:`dumps`, raises :exc:`BadSignature` if the signature validation fails. """ s = want_bytes(s) return self.load_payload(self.make_signer(salt).unsign(s)) def load(self, f, salt=None): """Like :meth:`loads` but loads from a file.""" return self.loads(f.read(), salt) def loads_unsafe(self, s, salt=None): """Like :meth:`loads` but without verifying the signature. This is potentially very dangerous to use depending on how your serializer works. The return value is ``(signature_okay, payload)`` instead of just the payload. The first item will be a boolean that indicates if the signature is okay (``True``) or if it failed. This function never fails. Use it for debugging only and if you know that your serializer module is not exploitable (eg: do not use it with a pickle serializer). .. versionadded:: 0.15 """ return self._loads_unsafe_impl(s, salt) def _loads_unsafe_impl(self, s, salt, load_kwargs=None, load_payload_kwargs=None): """Lowlevel helper function to implement :meth:`loads_unsafe` in serializer subclasses. """ try: return True, self.loads(s, salt=salt, **(load_kwargs or {})) except BadSignature as e: if e.payload is None: return False, None try: return False, self.load_payload(e.payload, **(load_payload_kwargs or {})) except BadPayload: return False, None def load_unsafe(self, f, *args, **kwargs): """Like :meth:`loads_unsafe` but loads from a file. .. versionadded:: 0.15 """ return self.loads_unsafe(f.read(), *args, **kwargs) class TimedSerializer(Serializer): """Uses the :class:`TimestampSigner` instead of the default :meth:`Signer`. """ default_signer = TimestampSigner def loads(self, s, max_age=None, return_timestamp=False, salt=None): """Reverse of :meth:`dumps`, raises :exc:`BadSignature` if the signature validation fails. If a `max_age` is provided it will ensure the signature is not older than that time in seconds. In case the signature is outdated, :exc:`SignatureExpired` is raised which is a subclass of :exc:`BadSignature`. All arguments are forwarded to the signer's :meth:`~TimestampSigner.unsign` method. """ base64d, timestamp = self.make_signer(salt) \ .unsign(s, max_age, return_timestamp=True) payload = self.load_payload(base64d) if return_timestamp: return payload, timestamp return payload def loads_unsafe(self, s, max_age=None, salt=None): load_kwargs = {'max_age': max_age} load_payload_kwargs = {} return self._loads_unsafe_impl(s, salt, load_kwargs, load_payload_kwargs) class JSONWebSignatureSerializer(Serializer): """This serializer implements JSON Web Signature (JWS) support. Only supports the JWS Compact Serialization. """ jws_algorithms = { 'HS256': HMACAlgorithm(hashlib.sha256), 'HS384': HMACAlgorithm(hashlib.sha384), 'HS512': HMACAlgorithm(hashlib.sha512), 'none': NoneAlgorithm(), } #: The default algorithm to use for signature generation default_algorithm = 'HS256' default_serializer = compact_json def __init__(self, secret_key, salt=None, serializer=None, signer=None, signer_kwargs=None, algorithm_name=None): Serializer.__init__(self, secret_key, salt, serializer, signer, signer_kwargs) if algorithm_name is None: algorithm_name = self.default_algorithm self.algorithm_name = algorithm_name self.algorithm = self.make_algorithm(algorithm_name) def load_payload(self, payload, return_header=False): payload = want_bytes(payload) if b'.' not in payload: raise BadPayload('No "." found in value') base64d_header, base64d_payload = payload.split(b'.', 1) try: json_header = base64_decode(base64d_header) except Exception as e: raise BadHeader('Could not base64 decode the header because of ' 'an exception', original_error=e) try: json_payload = base64_decode(base64d_payload) except Exception as e: raise BadPayload('Could not base64 decode the payload because of ' 'an exception', original_error=e) try: header = Serializer.load_payload(self, json_header, serializer=json) except BadData as e: raise BadHeader('Could not unserialize header because it was ' 'malformed', original_error=e) if not isinstance(header, dict): raise BadHeader('Header payload is not a JSON object', header=header) payload = Serializer.load_payload(self, json_payload) if return_header: return payload, header return payload def dump_payload(self, header, obj): base64d_header = base64_encode(self.serializer.dumps(header)) base64d_payload = base64_encode(self.serializer.dumps(obj)) return base64d_header + b'.' + base64d_payload def make_algorithm(self, algorithm_name): try: return self.jws_algorithms[algorithm_name] except KeyError: raise NotImplementedError('Algorithm not supported') def make_signer(self, salt=None, algorithm=None): if salt is None: salt = self.salt key_derivation = 'none' if salt is None else None if algorithm is None: algorithm = self.algorithm return self.signer(self.secret_key, salt=salt, sep='.', key_derivation=key_derivation, algorithm=algorithm) def make_header(self, header_fields): header = header_fields.copy() if header_fields else {} header['alg'] = self.algorithm_name return header def dumps(self, obj, salt=None, header_fields=None): """Like :meth:`~Serializer.dumps` but creates a JSON Web Signature. It also allows for specifying additional fields to be included in the JWS Header. """ header = self.make_header(header_fields) signer = self.make_signer(salt, self.algorithm) return signer.sign(self.dump_payload(header, obj)) def loads(self, s, salt=None, return_header=False): """Reverse of :meth:`dumps`. If requested via `return_header` it will return a tuple of payload and header. """ payload, header = self.load_payload( self.make_signer(salt, self.algorithm).unsign(want_bytes(s)), return_header=True) if header.get('alg') != self.algorithm_name: raise BadHeader('Algorithm mismatch', header=header, payload=payload) if return_header: return payload, header return payload def loads_unsafe(self, s, salt=None, return_header=False): kwargs = {'return_header': return_header} return self._loads_unsafe_impl(s, salt, kwargs, kwargs) class TimedJSONWebSignatureSerializer(JSONWebSignatureSerializer): """Works like the regular :class:`JSONWebSignatureSerializer` but also records the time of the signing and can be used to expire signatures. JWS currently does not specify this behavior but it mentions a possibility extension like this in the spec. Expiry date is encoded into the header similarily as specified in `draft-ietf-oauth-json-web-token <http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#expDef`_. The unsign method can raise a :exc:`SignatureExpired` method if the unsigning failed because the signature is expired. This exception is a subclass of :exc:`BadSignature`. """ DEFAULT_EXPIRES_IN = 3600 def __init__(self, secret_key, expires_in=None, **kwargs): JSONWebSignatureSerializer.__init__(self, secret_key, **kwargs) if expires_in is None: expires_in = self.DEFAULT_EXPIRES_IN self.expires_in = expires_in def make_header(self, header_fields): header = JSONWebSignatureSerializer.make_header(self, header_fields) iat = self.now() exp = iat + self.expires_in header['iat'] = iat header['exp'] = exp return header def loads(self, s, salt=None, return_header=False): payload, header = JSONWebSignatureSerializer.loads( self, s, salt, return_header=True) if 'exp' not in header: raise BadSignature('Missing expiry date', payload=payload) if not (isinstance(header['exp'], number_types) and header['exp'] > 0): raise BadSignature('expiry date is not an IntDate', payload=payload) if header['exp'] < self.now(): raise SignatureExpired('Signature expired', payload=payload, date_signed=self.get_issue_date(header)) if return_header: return payload, header return payload def get_issue_date(self, header): rv = header.get('iat') if isinstance(rv, number_types): return datetime.utcfromtimestamp(int(rv)) def now(self): return int(time.time()) class URLSafeSerializerMixin(object): """Mixed in with a regular serializer it will attempt to zlib compress the string to make it shorter if necessary. It will also base64 encode the string so that it can safely be placed in a URL. """ def load_payload(self, payload): decompress = False if payload.startswith(b'.'): payload = payload[1:] decompress = True try: json = base64_decode(payload) except Exception as e: raise BadPayload('Could not base64 decode the payload because of ' 'an exception', original_error=e) if decompress: try: json = zlib.decompress(json) except Exception as e: raise BadPayload('Could not zlib decompress the payload before ' 'decoding the payload', original_error=e) return super(URLSafeSerializerMixin, self).load_payload(json) def dump_payload(self, obj): json = super(URLSafeSerializerMixin, self).dump_payload(obj) is_compressed = False compressed = zlib.compress(json) if len(compressed) < (len(json) - 1): json = compressed is_compressed = True base64d = base64_encode(json) if is_compressed: base64d = b'.' + base64d return base64d class URLSafeSerializer(URLSafeSerializerMixin, Serializer): """Works like :class:`Serializer` but dumps and loads into a URL safe string consisting of the upper and lowercase character of the alphabet as well as ``'_'``, ``'-'`` and ``'.'``. """ default_serializer = compact_json class URLSafeTimedSerializer(URLSafeSerializerMixin, TimedSerializer): """Works like :class:`TimedSerializer` but dumps and loads into a URL safe string consisting of the upper and lowercase character of the alphabet as well as ``'_'``, ``'-'`` and ``'.'``. """ default_serializer = compact_json
apache-2.0
laszlocsomor/tensorflow
tensorflow/python/kernel_tests/as_string_op_test.py
119
8652
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for as_string_op.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import dtypes from tensorflow.python.ops import array_ops from tensorflow.python.ops import string_ops from tensorflow.python.platform import test class AsStringOpTest(test.TestCase): def testFloat(self): float_inputs_ = [ 0, 1, -1, 0.5, 0.25, 0.125, float("INF"), float("NAN"), float("-INF") ] with self.test_session(): for dtype in (dtypes.float32, dtypes.float64): input_ = array_ops.placeholder(dtype) output = string_ops.as_string(input_, shortest=True) result = output.eval(feed_dict={input_: float_inputs_}) s = lambda strs: [x.decode("ascii") for x in strs] self.assertAllEqual(s(result), ["%g" % x for x in float_inputs_]) output = string_ops.as_string(input_, scientific=True) result = output.eval(feed_dict={input_: float_inputs_}) self.assertAllEqual(s(result), ["%e" % x for x in float_inputs_]) output = string_ops.as_string(input_) result = output.eval(feed_dict={input_: float_inputs_}) self.assertAllEqual(s(result), ["%f" % x for x in float_inputs_]) output = string_ops.as_string(input_, width=3) result = output.eval(feed_dict={input_: float_inputs_}) self.assertAllEqual(s(result), ["%3f" % x for x in float_inputs_]) output = string_ops.as_string(input_, width=3, fill="0") result = output.eval(feed_dict={input_: float_inputs_}) self.assertAllEqual(s(result), ["%03f" % x for x in float_inputs_]) output = string_ops.as_string(input_, width=3, fill="0", shortest=True) result = output.eval(feed_dict={input_: float_inputs_}) self.assertAllEqual(s(result), ["%03g" % x for x in float_inputs_]) output = string_ops.as_string(input_, precision=10, width=3) result = output.eval(feed_dict={input_: float_inputs_}) self.assertAllEqual(s(result), ["%03.10f" % x for x in float_inputs_]) output = string_ops.as_string( input_, precision=10, width=3, fill="0", shortest=True) result = output.eval(feed_dict={input_: float_inputs_}) self.assertAllEqual(s(result), ["%03.10g" % x for x in float_inputs_]) with self.assertRaisesOpError("Cannot select both"): output = string_ops.as_string(input_, scientific=True, shortest=True) output.eval(feed_dict={input_: float_inputs_}) with self.assertRaisesOpError("Fill string must be one or fewer"): output = string_ops.as_string(input_, fill="ab") output.eval(feed_dict={input_: float_inputs_}) def testInt(self): # Cannot use values outside -128..127 for test, because we're also # testing int8 int_inputs_ = [0, -1, 1, -128, 127, -101, 101, -0] s = lambda strs: [x.decode("ascii") for x in strs] with self.test_session(): for dtype in (dtypes.int32, dtypes.int64, dtypes.int8): input_ = array_ops.placeholder(dtype) output = string_ops.as_string(input_) result = output.eval(feed_dict={input_: int_inputs_}) self.assertAllEqual(s(result), ["%d" % x for x in int_inputs_]) output = string_ops.as_string(input_, width=3) result = output.eval(feed_dict={input_: int_inputs_}) self.assertAllEqual(s(result), ["%3d" % x for x in int_inputs_]) output = string_ops.as_string(input_, width=3, fill="0") result = output.eval(feed_dict={input_: int_inputs_}) self.assertAllEqual(s(result), ["%03d" % x for x in int_inputs_]) with self.assertRaisesOpError("scientific and shortest"): output = string_ops.as_string(input_, scientific=True) output.eval(feed_dict={input_: int_inputs_}) with self.assertRaisesOpError("scientific and shortest"): output = string_ops.as_string(input_, shortest=True) output.eval(feed_dict={input_: int_inputs_}) with self.assertRaisesOpError("precision not supported"): output = string_ops.as_string(input_, precision=0) output.eval(feed_dict={input_: int_inputs_}) def testLargeInt(self): # Cannot use values outside -128..127 for test, because we're also # testing int8 s = lambda strs: [x.decode("ascii") for x in strs] with self.test_session(): input_ = array_ops.placeholder(dtypes.int32) int_inputs_ = [np.iinfo(np.int32).min, np.iinfo(np.int32).max] output = string_ops.as_string(input_) result = output.eval(feed_dict={input_: int_inputs_}) self.assertAllEqual(s(result), ["%d" % x for x in int_inputs_]) input_ = array_ops.placeholder(dtypes.int64) int_inputs_ = [np.iinfo(np.int64).min, np.iinfo(np.int64).max] output = string_ops.as_string(input_) result = output.eval(feed_dict={input_: int_inputs_}) self.assertAllEqual(s(result), ["%d" % x for x in int_inputs_]) def testBool(self): bool_inputs_ = [False, True] s = lambda strs: [x.decode("ascii") for x in strs] with self.test_session(): for dtype in (dtypes.bool,): input_ = array_ops.placeholder(dtype) output = string_ops.as_string(input_) result = output.eval(feed_dict={input_: bool_inputs_}) self.assertAllEqual(s(result), ["false", "true"]) def testComplex(self): float_inputs_ = [ 0, 1, -1, 0.5, 0.25, 0.125, complex("INF"), complex("NAN"), complex("-INF") ] complex_inputs_ = [(x + (x + 1) * 1j) for x in float_inputs_] with self.test_session(): for dtype in (dtypes.complex64,): input_ = array_ops.placeholder(dtype) def clean_nans(s_l): return [s.decode("ascii").replace("-nan", "nan") for s in s_l] output = string_ops.as_string(input_, shortest=True) result = output.eval(feed_dict={input_: complex_inputs_}) self.assertAllEqual( clean_nans(result), ["(%g,%g)" % (x.real, x.imag) for x in complex_inputs_]) output = string_ops.as_string(input_, scientific=True) result = output.eval(feed_dict={input_: complex_inputs_}) self.assertAllEqual( clean_nans(result), ["(%e,%e)" % (x.real, x.imag) for x in complex_inputs_]) output = string_ops.as_string(input_) result = output.eval(feed_dict={input_: complex_inputs_}) self.assertAllEqual( clean_nans(result), ["(%f,%f)" % (x.real, x.imag) for x in complex_inputs_]) output = string_ops.as_string(input_, width=3) result = output.eval(feed_dict={input_: complex_inputs_}) self.assertAllEqual( clean_nans(result), ["(%03f,%03f)" % (x.real, x.imag) for x in complex_inputs_]) output = string_ops.as_string(input_, width=3, fill="0", shortest=True) result = output.eval(feed_dict={input_: complex_inputs_}) self.assertAllEqual( clean_nans(result), ["(%03g,%03g)" % (x.real, x.imag) for x in complex_inputs_]) output = string_ops.as_string(input_, precision=10, width=3) result = output.eval(feed_dict={input_: complex_inputs_}) self.assertAllEqual( clean_nans(result), ["(%03.10f,%03.10f)" % (x.real, x.imag) for x in complex_inputs_]) output = string_ops.as_string( input_, precision=10, width=3, fill="0", shortest=True) result = output.eval(feed_dict={input_: complex_inputs_}) self.assertAllEqual( clean_nans(result), ["(%03.10g,%03.10g)" % (x.real, x.imag) for x in complex_inputs_]) with self.assertRaisesOpError("Cannot select both"): output = string_ops.as_string(input_, scientific=True, shortest=True) output.eval(feed_dict={input_: complex_inputs_}) if __name__ == "__main__": test.main()
apache-2.0
HLFH/CouchPotatoServer
couchpotato/core/media/movie/searcher.py
43
19822
from datetime import date import random import re import time import traceback from couchpotato import get_db from couchpotato.api import addApiView from couchpotato.core.event import addEvent, fireEvent, fireEventAsync from couchpotato.core.helpers.encoding import simplifyString from couchpotato.core.helpers.variable import getTitle, possibleTitles, getImdb, getIdentifier, tryInt from couchpotato.core.logger import CPLog from couchpotato.core.media._base.searcher.base import SearcherBase from couchpotato.core.media.movie import MovieTypeBase from couchpotato.environment import Env log = CPLog(__name__) autoload = 'MovieSearcher' class MovieSearcher(SearcherBase, MovieTypeBase): in_progress = False def __init__(self): super(MovieSearcher, self).__init__() addEvent('movie.searcher.all', self.searchAll) addEvent('movie.searcher.all_view', self.searchAllView) addEvent('movie.searcher.single', self.single) addEvent('movie.searcher.try_next_release', self.tryNextRelease) addEvent('movie.searcher.could_be_released', self.couldBeReleased) addEvent('searcher.correct_release', self.correctRelease) addEvent('searcher.get_search_title', self.getSearchTitle) addApiView('movie.searcher.try_next', self.tryNextReleaseView, docs = { 'desc': 'Marks the snatched results as ignored and try the next best release', 'params': { 'media_id': {'desc': 'The id of the media'}, }, }) addApiView('movie.searcher.full_search', self.searchAllView, docs = { 'desc': 'Starts a full search for all wanted movies', }) addApiView('movie.searcher.progress', self.getProgress, docs = { 'desc': 'Get the progress of current full search', 'return': {'type': 'object', 'example': """{ 'progress': False || object, total & to_go, }"""}, }) if self.conf('run_on_launch'): addEvent('app.load', self.searchAll) def searchAllView(self, **kwargs): fireEventAsync('movie.searcher.all', manual = True) return { 'success': not self.in_progress } def searchAll(self, manual = False): if self.in_progress: log.info('Search already in progress') fireEvent('notify.frontend', type = 'movie.searcher.already_started', data = True, message = 'Full search already in progress') return self.in_progress = True fireEvent('notify.frontend', type = 'movie.searcher.started', data = True, message = 'Full search started') medias = [x['_id'] for x in fireEvent('media.with_status', 'active', types = 'movie', with_doc = False, single = True)] random.shuffle(medias) total = len(medias) self.in_progress = { 'total': total, 'to_go': total, } try: search_protocols = fireEvent('searcher.protocols', single = True) for media_id in medias: media = fireEvent('media.get', media_id, single = True) if not media: continue try: self.single(media, search_protocols, manual = manual) except IndexError: log.error('Forcing library update for %s, if you see this often, please report: %s', (getIdentifier(media), traceback.format_exc())) fireEvent('movie.update', media_id) except: log.error('Search failed for %s: %s', (getIdentifier(media), traceback.format_exc())) self.in_progress['to_go'] -= 1 # Break if CP wants to shut down if self.shuttingDown(): break except SearchSetupError: pass self.in_progress = False def single(self, movie, search_protocols = None, manual = False, force_download = False): # Find out search type try: if not search_protocols: search_protocols = fireEvent('searcher.protocols', single = True) except SearchSetupError: return if not movie['profile_id'] or (movie['status'] == 'done' and not manual): log.debug('Movie doesn\'t have a profile or already done, assuming in manage tab.') fireEvent('media.restatus', movie['_id'], single = True) return default_title = getTitle(movie) if not default_title: log.error('No proper info found for movie, removing it from library to stop it from causing more issues.') fireEvent('media.delete', movie['_id'], single = True) return # Update media status and check if it is still not done (due to the stop searching after feature if fireEvent('media.restatus', movie['_id'], single = True) == 'done': log.debug('No better quality found, marking movie %s as done.', default_title) pre_releases = fireEvent('quality.pre_releases', single = True) release_dates = fireEvent('movie.update_release_dates', movie['_id'], merge = True) found_releases = [] previous_releases = movie.get('releases', []) too_early_to_search = [] outside_eta_results = 0 always_search = self.conf('always_search') ignore_eta = manual total_result_count = 0 fireEvent('notify.frontend', type = 'movie.searcher.started', data = {'_id': movie['_id']}, message = 'Searching for "%s"' % default_title) # Ignore eta once every 7 days if not always_search: prop_name = 'last_ignored_eta.%s' % movie['_id'] last_ignored_eta = float(Env.prop(prop_name, default = 0)) if last_ignored_eta < time.time() - 604800: ignore_eta = True Env.prop(prop_name, value = time.time()) db = get_db() profile = db.get('id', movie['profile_id']) ret = False for index, q_identifier in enumerate(profile.get('qualities', [])): quality_custom = { 'index': index, 'quality': q_identifier, 'finish': profile['finish'][index], 'wait_for': tryInt(profile['wait_for'][index]), '3d': profile['3d'][index] if profile.get('3d') else False, 'minimum_score': profile.get('minimum_score', 1), } could_not_be_released = not self.couldBeReleased(q_identifier in pre_releases, release_dates, movie['info']['year']) if not always_search and could_not_be_released: too_early_to_search.append(q_identifier) # Skip release, if ETA isn't ignored if not ignore_eta: continue has_better_quality = 0 # See if better quality is available for release in movie.get('releases', []): if release['status'] not in ['available', 'ignored', 'failed']: is_higher = fireEvent('quality.ishigher', \ {'identifier': q_identifier, 'is_3d': quality_custom.get('3d', 0)}, \ {'identifier': release['quality'], 'is_3d': release.get('is_3d', 0)}, \ profile, single = True) if is_higher != 'higher': has_better_quality += 1 # Don't search for quality lower then already available. if has_better_quality > 0: log.info('Better quality (%s) already available or snatched for %s', (q_identifier, default_title)) fireEvent('media.restatus', movie['_id'], single = True) break quality = fireEvent('quality.single', identifier = q_identifier, single = True) log.info('Search for %s in %s%s', (default_title, quality['label'], ' ignoring ETA' if always_search or ignore_eta else '')) # Extend quality with profile customs quality['custom'] = quality_custom results = fireEvent('searcher.search', search_protocols, movie, quality, single = True) or [] # Check if movie isn't deleted while searching if not fireEvent('media.get', movie.get('_id'), single = True): break # Add them to this movie releases list found_releases += fireEvent('release.create_from_search', results, movie, quality, single = True) results_count = len(found_releases) total_result_count += results_count if results_count == 0: log.debug('Nothing found for %s in %s', (default_title, quality['label'])) # Keep track of releases found outside ETA window outside_eta_results += results_count if could_not_be_released else 0 # Don't trigger download, but notify user of available releases if could_not_be_released and results_count > 0: log.debug('Found %s releases for "%s", but ETA isn\'t correct yet.', (results_count, default_title)) # Try find a valid result and download it if (force_download or not could_not_be_released or always_search) and fireEvent('release.try_download_result', results, movie, quality_custom, single = True): ret = True # Remove releases that aren't found anymore temp_previous_releases = [] for release in previous_releases: if release.get('status') == 'available' and release.get('identifier') not in found_releases: fireEvent('release.delete', release.get('_id'), single = True) else: temp_previous_releases.append(release) previous_releases = temp_previous_releases del temp_previous_releases # Break if CP wants to shut down if self.shuttingDown() or ret: break if total_result_count > 0: fireEvent('media.tag', movie['_id'], 'recent', update_edited = True, single = True) if len(too_early_to_search) > 0: log.info2('Too early to search for %s, %s', (too_early_to_search, default_title)) if outside_eta_results > 0: message = 'Found %s releases for "%s" before ETA. Select and download via the dashboard.' % (outside_eta_results, default_title) log.info(message) if not manual: fireEvent('media.available', message = message, data = {}) fireEvent('notify.frontend', type = 'movie.searcher.ended', data = {'_id': movie['_id']}) return ret def correctRelease(self, nzb = None, media = None, quality = None, **kwargs): if media.get('type') != 'movie': return media_title = fireEvent('searcher.get_search_title', media, single = True) imdb_results = kwargs.get('imdb_results', False) retention = Env.setting('retention', section = 'nzb') if nzb.get('seeders') is None and 0 < retention < nzb.get('age', 0): log.info2('Wrong: Outside retention, age is %s, needs %s or lower: %s', (nzb['age'], retention, nzb['name'])) return False # Check for required and ignored words if not fireEvent('searcher.correct_words', nzb['name'], media, single = True): return False preferred_quality = quality if quality else fireEvent('quality.single', identifier = quality['identifier'], single = True) # Contains lower quality string contains_other = fireEvent('searcher.contains_other_quality', nzb, movie_year = media['info']['year'], preferred_quality = preferred_quality, single = True) if contains_other and isinstance(contains_other, dict): log.info2('Wrong: %s, looking for %s, found %s', (nzb['name'], quality['label'], [x for x in contains_other] if contains_other else 'no quality')) return False # Contains lower quality string if not fireEvent('searcher.correct_3d', nzb, preferred_quality = preferred_quality, single = True): log.info2('Wrong: %s, %slooking for %s in 3D', (nzb['name'], ('' if preferred_quality['custom'].get('3d') else 'NOT '), quality['label'])) return False # File to small if nzb['size'] and tryInt(preferred_quality['size_min']) > tryInt(nzb['size']): log.info2('Wrong: "%s" is too small to be %s. %sMB instead of the minimal of %sMB.', (nzb['name'], preferred_quality['label'], nzb['size'], preferred_quality['size_min'])) return False # File to large if nzb['size'] and tryInt(preferred_quality['size_max']) < tryInt(nzb['size']): log.info2('Wrong: "%s" is too large to be %s. %sMB instead of the maximum of %sMB.', (nzb['name'], preferred_quality['label'], nzb['size'], preferred_quality['size_max'])) return False # Provider specific functions get_more = nzb.get('get_more_info') if get_more: get_more(nzb) extra_check = nzb.get('extra_check') if extra_check and not extra_check(nzb): return False if imdb_results: return True # Check if nzb contains imdb link if getImdb(nzb.get('description', '')) == getIdentifier(media): return True for raw_title in media['info']['titles']: for movie_title in possibleTitles(raw_title): movie_words = re.split('\W+', simplifyString(movie_title)) if fireEvent('searcher.correct_name', nzb['name'], movie_title, single = True): # if no IMDB link, at least check year range 1 if len(movie_words) > 2 and fireEvent('searcher.correct_year', nzb['name'], media['info']['year'], 1, single = True): return True # if no IMDB link, at least check year if len(movie_words) <= 2 and fireEvent('searcher.correct_year', nzb['name'], media['info']['year'], 0, single = True): return True log.info("Wrong: %s, undetermined naming. Looking for '%s (%s)'", (nzb['name'], media_title, media['info']['year'])) return False def couldBeReleased(self, is_pre_release, dates, year = None): now = int(time.time()) now_year = date.today().year now_month = date.today().month if (year is None or year < now_year - 1 or (year <= now_year - 1 and now_month > 4)) and (not dates or (dates.get('theater', 0) == 0 and dates.get('dvd', 0) == 0)): return True else: # Don't allow movies with years to far in the future add_year = 1 if now_month > 10 else 0 # Only allow +1 year if end of the year if year is not None and year > (now_year + add_year): return False # For movies before 1972 if not dates or dates.get('theater', 0) < 0 or dates.get('dvd', 0) < 0: return True if is_pre_release: # Prerelease 1 week before theaters if dates.get('theater') - 604800 < now: return True else: # 12 weeks after theater release if dates.get('theater') > 0 and dates.get('theater') + 7257600 < now: return True if dates.get('dvd') > 0: # 4 weeks before dvd release if dates.get('dvd') - 2419200 < now: return True # Dvd should be released if dates.get('dvd') < now: return True return False def tryNextReleaseView(self, media_id = None, **kwargs): trynext = self.tryNextRelease(media_id, manual = True, force_download = True) return { 'success': trynext } def tryNextRelease(self, media_id, manual = False, force_download = False): try: rels = fireEvent('release.for_media', media_id, single = True) for rel in rels: if rel.get('status') in ['snatched', 'done']: fireEvent('release.update_status', rel.get('_id'), status = 'ignored') media = fireEvent('media.get', media_id, single = True) if media: log.info('Trying next release for: %s', getTitle(media)) self.single(media, manual = manual, force_download = force_download) return True return False except: log.error('Failed searching for next release: %s', traceback.format_exc()) return False def getSearchTitle(self, media): if media['type'] == 'movie': return getTitle(media) class SearchSetupError(Exception): pass config = [{ 'name': 'moviesearcher', 'order': 20, 'groups': [ { 'tab': 'searcher', 'name': 'movie_searcher', 'label': 'Movie search', 'description': 'Search options for movies', 'advanced': True, 'options': [ { 'name': 'always_search', 'default': False, 'migrate_from': 'searcher', 'type': 'bool', 'label': 'Always search', 'description': 'Search for movies even before there is a ETA. Enabling this will probably get you a lot of fakes.', }, { 'name': 'run_on_launch', 'migrate_from': 'searcher', 'label': 'Run on launch', 'advanced': True, 'default': 0, 'type': 'bool', 'description': 'Force run the searcher after (re)start.', }, { 'name': 'search_on_add', 'label': 'Search after add', 'advanced': True, 'default': 1, 'type': 'bool', 'description': 'Disable this to only search for movies on cron.', }, { 'name': 'cron_day', 'migrate_from': 'searcher', 'label': 'Day', 'advanced': True, 'default': '*', 'type': 'string', 'description': '<strong>*</strong>: Every day, <strong>*/2</strong>: Every 2 days, <strong>1</strong>: Every first of the month. See <a href="http://packages.python.org/APScheduler/cronschedule.html">APScheduler</a> for details.', }, { 'name': 'cron_hour', 'migrate_from': 'searcher', 'label': 'Hour', 'advanced': True, 'default': random.randint(0, 23), 'type': 'string', 'description': '<strong>*</strong>: Every hour, <strong>*/8</strong>: Every 8 hours, <strong>3</strong>: At 3, midnight.', }, { 'name': 'cron_minute', 'migrate_from': 'searcher', 'label': 'Minute', 'advanced': True, 'default': random.randint(0, 59), 'type': 'string', 'description': "Just keep it random, so the providers don't get DDOSed by every CP user on a 'full' hour." }, ], }, ], }]
gpl-3.0
BitFunnel/BitFunnel
NativeJIT/googletest/googletest/test/gtest_shuffle_test.py
3023
12549
#!/usr/bin/env python # # Copyright 2009 Google Inc. All Rights Reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Verifies that test shuffling works.""" __author__ = 'wan@google.com (Zhanyong Wan)' import os import gtest_test_utils # Command to run the gtest_shuffle_test_ program. COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_shuffle_test_') # The environment variables for test sharding. TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS' SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX' TEST_FILTER = 'A*.A:A*.B:C*' ALL_TESTS = [] ACTIVE_TESTS = [] FILTERED_TESTS = [] SHARDED_TESTS = [] SHUFFLED_ALL_TESTS = [] SHUFFLED_ACTIVE_TESTS = [] SHUFFLED_FILTERED_TESTS = [] SHUFFLED_SHARDED_TESTS = [] def AlsoRunDisabledTestsFlag(): return '--gtest_also_run_disabled_tests' def FilterFlag(test_filter): return '--gtest_filter=%s' % (test_filter,) def RepeatFlag(n): return '--gtest_repeat=%s' % (n,) def ShuffleFlag(): return '--gtest_shuffle' def RandomSeedFlag(n): return '--gtest_random_seed=%s' % (n,) def RunAndReturnOutput(extra_env, args): """Runs the test program and returns its output.""" environ_copy = os.environ.copy() environ_copy.update(extra_env) return gtest_test_utils.Subprocess([COMMAND] + args, env=environ_copy).output def GetTestsForAllIterations(extra_env, args): """Runs the test program and returns a list of test lists. Args: extra_env: a map from environment variables to their values args: command line flags to pass to gtest_shuffle_test_ Returns: A list where the i-th element is the list of tests run in the i-th test iteration. """ test_iterations = [] for line in RunAndReturnOutput(extra_env, args).split('\n'): if line.startswith('----'): tests = [] test_iterations.append(tests) elif line.strip(): tests.append(line.strip()) # 'TestCaseName.TestName' return test_iterations def GetTestCases(tests): """Returns a list of test cases in the given full test names. Args: tests: a list of full test names Returns: A list of test cases from 'tests', in their original order. Consecutive duplicates are removed. """ test_cases = [] for test in tests: test_case = test.split('.')[0] if not test_case in test_cases: test_cases.append(test_case) return test_cases def CalculateTestLists(): """Calculates the list of tests run under different flags.""" if not ALL_TESTS: ALL_TESTS.extend( GetTestsForAllIterations({}, [AlsoRunDisabledTestsFlag()])[0]) if not ACTIVE_TESTS: ACTIVE_TESTS.extend(GetTestsForAllIterations({}, [])[0]) if not FILTERED_TESTS: FILTERED_TESTS.extend( GetTestsForAllIterations({}, [FilterFlag(TEST_FILTER)])[0]) if not SHARDED_TESTS: SHARDED_TESTS.extend( GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3', SHARD_INDEX_ENV_VAR: '1'}, [])[0]) if not SHUFFLED_ALL_TESTS: SHUFFLED_ALL_TESTS.extend(GetTestsForAllIterations( {}, [AlsoRunDisabledTestsFlag(), ShuffleFlag(), RandomSeedFlag(1)])[0]) if not SHUFFLED_ACTIVE_TESTS: SHUFFLED_ACTIVE_TESTS.extend(GetTestsForAllIterations( {}, [ShuffleFlag(), RandomSeedFlag(1)])[0]) if not SHUFFLED_FILTERED_TESTS: SHUFFLED_FILTERED_TESTS.extend(GetTestsForAllIterations( {}, [ShuffleFlag(), RandomSeedFlag(1), FilterFlag(TEST_FILTER)])[0]) if not SHUFFLED_SHARDED_TESTS: SHUFFLED_SHARDED_TESTS.extend( GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3', SHARD_INDEX_ENV_VAR: '1'}, [ShuffleFlag(), RandomSeedFlag(1)])[0]) class GTestShuffleUnitTest(gtest_test_utils.TestCase): """Tests test shuffling.""" def setUp(self): CalculateTestLists() def testShufflePreservesNumberOfTests(self): self.assertEqual(len(ALL_TESTS), len(SHUFFLED_ALL_TESTS)) self.assertEqual(len(ACTIVE_TESTS), len(SHUFFLED_ACTIVE_TESTS)) self.assertEqual(len(FILTERED_TESTS), len(SHUFFLED_FILTERED_TESTS)) self.assertEqual(len(SHARDED_TESTS), len(SHUFFLED_SHARDED_TESTS)) def testShuffleChangesTestOrder(self): self.assert_(SHUFFLED_ALL_TESTS != ALL_TESTS, SHUFFLED_ALL_TESTS) self.assert_(SHUFFLED_ACTIVE_TESTS != ACTIVE_TESTS, SHUFFLED_ACTIVE_TESTS) self.assert_(SHUFFLED_FILTERED_TESTS != FILTERED_TESTS, SHUFFLED_FILTERED_TESTS) self.assert_(SHUFFLED_SHARDED_TESTS != SHARDED_TESTS, SHUFFLED_SHARDED_TESTS) def testShuffleChangesTestCaseOrder(self): self.assert_(GetTestCases(SHUFFLED_ALL_TESTS) != GetTestCases(ALL_TESTS), GetTestCases(SHUFFLED_ALL_TESTS)) self.assert_( GetTestCases(SHUFFLED_ACTIVE_TESTS) != GetTestCases(ACTIVE_TESTS), GetTestCases(SHUFFLED_ACTIVE_TESTS)) self.assert_( GetTestCases(SHUFFLED_FILTERED_TESTS) != GetTestCases(FILTERED_TESTS), GetTestCases(SHUFFLED_FILTERED_TESTS)) self.assert_( GetTestCases(SHUFFLED_SHARDED_TESTS) != GetTestCases(SHARDED_TESTS), GetTestCases(SHUFFLED_SHARDED_TESTS)) def testShuffleDoesNotRepeatTest(self): for test in SHUFFLED_ALL_TESTS: self.assertEqual(1, SHUFFLED_ALL_TESTS.count(test), '%s appears more than once' % (test,)) for test in SHUFFLED_ACTIVE_TESTS: self.assertEqual(1, SHUFFLED_ACTIVE_TESTS.count(test), '%s appears more than once' % (test,)) for test in SHUFFLED_FILTERED_TESTS: self.assertEqual(1, SHUFFLED_FILTERED_TESTS.count(test), '%s appears more than once' % (test,)) for test in SHUFFLED_SHARDED_TESTS: self.assertEqual(1, SHUFFLED_SHARDED_TESTS.count(test), '%s appears more than once' % (test,)) def testShuffleDoesNotCreateNewTest(self): for test in SHUFFLED_ALL_TESTS: self.assert_(test in ALL_TESTS, '%s is an invalid test' % (test,)) for test in SHUFFLED_ACTIVE_TESTS: self.assert_(test in ACTIVE_TESTS, '%s is an invalid test' % (test,)) for test in SHUFFLED_FILTERED_TESTS: self.assert_(test in FILTERED_TESTS, '%s is an invalid test' % (test,)) for test in SHUFFLED_SHARDED_TESTS: self.assert_(test in SHARDED_TESTS, '%s is an invalid test' % (test,)) def testShuffleIncludesAllTests(self): for test in ALL_TESTS: self.assert_(test in SHUFFLED_ALL_TESTS, '%s is missing' % (test,)) for test in ACTIVE_TESTS: self.assert_(test in SHUFFLED_ACTIVE_TESTS, '%s is missing' % (test,)) for test in FILTERED_TESTS: self.assert_(test in SHUFFLED_FILTERED_TESTS, '%s is missing' % (test,)) for test in SHARDED_TESTS: self.assert_(test in SHUFFLED_SHARDED_TESTS, '%s is missing' % (test,)) def testShuffleLeavesDeathTestsAtFront(self): non_death_test_found = False for test in SHUFFLED_ACTIVE_TESTS: if 'DeathTest.' in test: self.assert_(not non_death_test_found, '%s appears after a non-death test' % (test,)) else: non_death_test_found = True def _VerifyTestCasesDoNotInterleave(self, tests): test_cases = [] for test in tests: [test_case, _] = test.split('.') if test_cases and test_cases[-1] != test_case: test_cases.append(test_case) self.assertEqual(1, test_cases.count(test_case), 'Test case %s is not grouped together in %s' % (test_case, tests)) def testShuffleDoesNotInterleaveTestCases(self): self._VerifyTestCasesDoNotInterleave(SHUFFLED_ALL_TESTS) self._VerifyTestCasesDoNotInterleave(SHUFFLED_ACTIVE_TESTS) self._VerifyTestCasesDoNotInterleave(SHUFFLED_FILTERED_TESTS) self._VerifyTestCasesDoNotInterleave(SHUFFLED_SHARDED_TESTS) def testShuffleRestoresOrderAfterEachIteration(self): # Get the test lists in all 3 iterations, using random seed 1, 2, # and 3 respectively. Google Test picks a different seed in each # iteration, and this test depends on the current implementation # picking successive numbers. This dependency is not ideal, but # makes the test much easier to write. [tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = ( GetTestsForAllIterations( {}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)])) # Make sure running the tests with random seed 1 gets the same # order as in iteration 1 above. [tests_with_seed1] = GetTestsForAllIterations( {}, [ShuffleFlag(), RandomSeedFlag(1)]) self.assertEqual(tests_in_iteration1, tests_with_seed1) # Make sure running the tests with random seed 2 gets the same # order as in iteration 2 above. Success means that Google Test # correctly restores the test order before re-shuffling at the # beginning of iteration 2. [tests_with_seed2] = GetTestsForAllIterations( {}, [ShuffleFlag(), RandomSeedFlag(2)]) self.assertEqual(tests_in_iteration2, tests_with_seed2) # Make sure running the tests with random seed 3 gets the same # order as in iteration 3 above. Success means that Google Test # correctly restores the test order before re-shuffling at the # beginning of iteration 3. [tests_with_seed3] = GetTestsForAllIterations( {}, [ShuffleFlag(), RandomSeedFlag(3)]) self.assertEqual(tests_in_iteration3, tests_with_seed3) def testShuffleGeneratesNewOrderInEachIteration(self): [tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = ( GetTestsForAllIterations( {}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)])) self.assert_(tests_in_iteration1 != tests_in_iteration2, tests_in_iteration1) self.assert_(tests_in_iteration1 != tests_in_iteration3, tests_in_iteration1) self.assert_(tests_in_iteration2 != tests_in_iteration3, tests_in_iteration2) def testShuffleShardedTestsPreservesPartition(self): # If we run M tests on N shards, the same M tests should be run in # total, regardless of the random seeds used by the shards. [tests1] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3', SHARD_INDEX_ENV_VAR: '0'}, [ShuffleFlag(), RandomSeedFlag(1)]) [tests2] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3', SHARD_INDEX_ENV_VAR: '1'}, [ShuffleFlag(), RandomSeedFlag(20)]) [tests3] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3', SHARD_INDEX_ENV_VAR: '2'}, [ShuffleFlag(), RandomSeedFlag(25)]) sorted_sharded_tests = tests1 + tests2 + tests3 sorted_sharded_tests.sort() sorted_active_tests = [] sorted_active_tests.extend(ACTIVE_TESTS) sorted_active_tests.sort() self.assertEqual(sorted_active_tests, sorted_sharded_tests) if __name__ == '__main__': gtest_test_utils.Main()
mit
CodeDJ/qt5-hidpi
qt/qtwebkit/Tools/Scripts/webkitpy/tool/commands/earlywarningsystem_unittest.py
121
4542
# Copyright (C) 2009 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from webkitpy.thirdparty.mock import Mock from webkitpy.common.system.outputcapture import OutputCapture from webkitpy.tool.bot.queueengine import QueueEngine from webkitpy.tool.commands.earlywarningsystem import * from webkitpy.tool.commands.queuestest import QueuesTest from webkitpy.tool.mocktool import MockTool, MockOptions class AbstractEarlyWarningSystemTest(QueuesTest): def test_failing_tests_message(self): # Needed to define port_name, used in AbstractEarlyWarningSystem.__init__ class TestEWS(AbstractEarlyWarningSystem): port_name = "win" # Needs to be a port which port/factory understands. ews = TestEWS() ews.bind_to_tool(MockTool()) ews._options = MockOptions(port=None, confirm=False) OutputCapture().assert_outputs(self, ews.begin_work_queue, expected_logs=self._default_begin_work_queue_logs(ews.name)) ews._expected_failures.unexpected_failures_observed = lambda results: set(["foo.html", "bar.html"]) task = Mock() patch = ews._tool.bugs.fetch_attachment(10000) self.assertMultiLineEqual(ews._failing_tests_message(task, patch), "New failing tests:\nbar.html\nfoo.html") class EarlyWarningSystemTest(QueuesTest): def _default_expected_logs(self, ews): string_replacements = { "name": ews.name, "port": ews.port_name, } if ews.run_tests: run_tests_line = "Running: webkit-patch --status-host=example.com build-and-test --no-clean --no-update --test --non-interactive --port=%(port)s\n" % string_replacements else: run_tests_line = "" string_replacements['run_tests_line'] = run_tests_line expected_logs = { "begin_work_queue": self._default_begin_work_queue_logs(ews.name), "process_work_item": """Running: webkit-patch --status-host=example.com clean --port=%(port)s Running: webkit-patch --status-host=example.com update --port=%(port)s Running: webkit-patch --status-host=example.com apply-attachment --no-update --non-interactive 10000 --port=%(port)s Running: webkit-patch --status-host=example.com build --no-clean --no-update --build-style=release --port=%(port)s %(run_tests_line)sMOCK: update_status: %(name)s Pass MOCK: release_work_item: %(name)s 10000 """ % string_replacements, "handle_unexpected_error": "Mock error message\n", "handle_script_error": "ScriptError error message\n\nMOCK output\n", } return expected_logs def _test_ews(self, ews): ews.bind_to_tool(MockTool()) options = Mock() options.port = None options.run_tests = ews.run_tests self.assert_queue_outputs(ews, expected_logs=self._default_expected_logs(ews), options=options) def test_ewses(self): classes = AbstractEarlyWarningSystem.load_ews_classes() self.assertTrue(classes) self.maxDiff = None for ews_class in classes: self._test_ews(ews_class())
lgpl-2.1
winking324/pyactivemq
src/test/test_openwire_async.py
11
2696
# Copyright 2007 Albert Strasheim <fullung@gmail.com> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from util import * set_local_path() import pyactivemq restore_path() from test_async import _test_async import unittest import Queue class test_openwire_async(_test_async, unittest.TestCase): def setUp(self): self.url = 'tcp://localhost:61616?wireFormat=openwire' from pyactivemq import ActiveMQConnectionFactory f = ActiveMQConnectionFactory(self.url) self.conn = f.createConnection() def tearDown(self): self.conn.close() del self.conn def test_selectors(self): session = self.conn.createSession() topic = self.random_topic(session) producer = session.createProducer(topic) nconsumers = 7 consumers = [] for i in xrange(1, nconsumers + 1): session = self.conn.createSession() selector = 'int1%%%d=0' % i consumer = session.createConsumer(topic, selector) consumer.messageListener = self.QueueMessageListener() consumers.append(consumer) messagecounts = [0] * nconsumers self.conn.start() textMessage = session.createTextMessage() nmessages = 200 for i in xrange(nmessages): textMessage.setIntProperty('int1', i) producer.send(textMessage) for j in xrange(1, nconsumers + 1): if i % j == 0: messagecounts[j - 1] += 1 listeners = map(lambda x: x.messageListener, consumers) for i, (messagecount, listener) in enumerate(zip(messagecounts, listeners)): try: for j in xrange(messagecount): message = listener.queue.get(block=True, timeout=5) int1 = message.getIntProperty('int1') self.assertEqual(0, int1 % (i + 1)) except Queue.Empty: msg = 'Expected %d messages for consumer %d, got %d' self.assert_(False, msg % (messagecount, i, j)) self.assert_(listener.queue.empty()) if __name__ == '__main__': import sys unittest.main(argv=sys.argv)
apache-2.0
visualputty/Landing-Lights
django/db/backends/dummy/base.py
5
1611
""" Dummy database backend for Django. Django uses this if the database ENGINE setting is empty (None or empty string). Each of these API functions, except connection.close(), raises ImproperlyConfigured. """ from django.core.exceptions import ImproperlyConfigured from django.db.backends import * from django.db.backends.creation import BaseDatabaseCreation def complain(*args, **kwargs): raise ImproperlyConfigured("You haven't set the database ENGINE setting yet.") def ignore(*args, **kwargs): pass class DatabaseError(Exception): pass class IntegrityError(DatabaseError): pass class DatabaseOperations(BaseDatabaseOperations): quote_name = complain class DatabaseClient(BaseDatabaseClient): runshell = complain class DatabaseIntrospection(BaseDatabaseIntrospection): get_table_list = complain get_table_description = complain get_relations = complain get_indexes = complain class DatabaseWrapper(object): operators = {} cursor = complain _commit = complain _rollback = ignore def __init__(self, settings_dict, alias, *args, **kwargs): self.features = BaseDatabaseFeatures(self) self.ops = DatabaseOperations() self.client = DatabaseClient(self) self.creation = BaseDatabaseCreation(self) self.introspection = DatabaseIntrospection(self) self.validation = BaseDatabaseValidation(self) self.settings_dict = settings_dict self.alias = alias self.transaction_state = [] self.savepoint_state = 0 self.dirty = None def close(self): pass
bsd-3-clause
wujuguang/tornado
tornado/test/web_test.py
4
115285
from tornado.concurrent import Future from tornado import gen from tornado.escape import ( json_decode, utf8, to_unicode, recursive_unicode, native_str, to_basestring, ) from tornado.httpclient import HTTPClientError from tornado.httputil import format_timestamp from tornado.iostream import IOStream from tornado import locale from tornado.locks import Event from tornado.log import app_log, gen_log from tornado.simple_httpclient import SimpleAsyncHTTPClient from tornado.template import DictLoader from tornado.testing import AsyncHTTPTestCase, AsyncTestCase, ExpectLog, gen_test from tornado.util import ObjectDict, unicode_type from tornado.web import ( Application, RequestHandler, StaticFileHandler, RedirectHandler as WebRedirectHandler, HTTPError, MissingArgumentError, ErrorHandler, authenticated, url, _create_signature_v1, create_signed_value, decode_signed_value, get_signature_key_version, UIModule, Finish, stream_request_body, removeslash, addslash, GZipContentEncoding, ) import binascii import contextlib import copy import datetime import email.utils import gzip from io import BytesIO import itertools import logging import os import re import socket import typing # noqa: F401 import unittest import urllib.parse def relpath(*a): return os.path.join(os.path.dirname(__file__), *a) class WebTestCase(AsyncHTTPTestCase): """Base class for web tests that also supports WSGI mode. Override get_handlers and get_app_kwargs instead of get_app. This class is deprecated since WSGI mode is no longer supported. """ def get_app(self): self.app = Application(self.get_handlers(), **self.get_app_kwargs()) return self.app def get_handlers(self): raise NotImplementedError() def get_app_kwargs(self): return {} class SimpleHandlerTestCase(WebTestCase): """Simplified base class for tests that work with a single handler class. To use, define a nested class named ``Handler``. """ def get_handlers(self): return [("/", self.Handler)] class HelloHandler(RequestHandler): def get(self): self.write("hello") class CookieTestRequestHandler(RequestHandler): # stub out enough methods to make the secure_cookie functions work def __init__(self, cookie_secret="0123456789", key_version=None): # don't call super.__init__ self._cookies = {} # type: typing.Dict[str, bytes] if key_version is None: self.application = ObjectDict(settings=dict(cookie_secret=cookie_secret)) else: self.application = ObjectDict( settings=dict(cookie_secret=cookie_secret, key_version=key_version) ) def get_cookie(self, name): return self._cookies.get(name) def set_cookie(self, name, value, expires_days=None): self._cookies[name] = value # See SignedValueTest below for more. class SecureCookieV1Test(unittest.TestCase): def test_round_trip(self): handler = CookieTestRequestHandler() handler.set_secure_cookie("foo", b"bar", version=1) self.assertEqual(handler.get_secure_cookie("foo", min_version=1), b"bar") def test_cookie_tampering_future_timestamp(self): handler = CookieTestRequestHandler() # this string base64-encodes to '12345678' handler.set_secure_cookie("foo", binascii.a2b_hex(b"d76df8e7aefc"), version=1) cookie = handler._cookies["foo"] match = re.match(br"12345678\|([0-9]+)\|([0-9a-f]+)", cookie) assert match is not None timestamp = match.group(1) sig = match.group(2) self.assertEqual( _create_signature_v1( handler.application.settings["cookie_secret"], "foo", "12345678", timestamp, ), sig, ) # shifting digits from payload to timestamp doesn't alter signature # (this is not desirable behavior, just confirming that that's how it # works) self.assertEqual( _create_signature_v1( handler.application.settings["cookie_secret"], "foo", "1234", b"5678" + timestamp, ), sig, ) # tamper with the cookie handler._cookies["foo"] = utf8( "1234|5678%s|%s" % (to_basestring(timestamp), to_basestring(sig)) ) # it gets rejected with ExpectLog(gen_log, "Cookie timestamp in future"): self.assertTrue(handler.get_secure_cookie("foo", min_version=1) is None) def test_arbitrary_bytes(self): # Secure cookies accept arbitrary data (which is base64 encoded). # Note that normal cookies accept only a subset of ascii. handler = CookieTestRequestHandler() handler.set_secure_cookie("foo", b"\xe9", version=1) self.assertEqual(handler.get_secure_cookie("foo", min_version=1), b"\xe9") # See SignedValueTest below for more. class SecureCookieV2Test(unittest.TestCase): KEY_VERSIONS = {0: "ajklasdf0ojaisdf", 1: "aslkjasaolwkjsdf"} def test_round_trip(self): handler = CookieTestRequestHandler() handler.set_secure_cookie("foo", b"bar", version=2) self.assertEqual(handler.get_secure_cookie("foo", min_version=2), b"bar") def test_key_version_roundtrip(self): handler = CookieTestRequestHandler( cookie_secret=self.KEY_VERSIONS, key_version=0 ) handler.set_secure_cookie("foo", b"bar") self.assertEqual(handler.get_secure_cookie("foo"), b"bar") def test_key_version_roundtrip_differing_version(self): handler = CookieTestRequestHandler( cookie_secret=self.KEY_VERSIONS, key_version=1 ) handler.set_secure_cookie("foo", b"bar") self.assertEqual(handler.get_secure_cookie("foo"), b"bar") def test_key_version_increment_version(self): handler = CookieTestRequestHandler( cookie_secret=self.KEY_VERSIONS, key_version=0 ) handler.set_secure_cookie("foo", b"bar") new_handler = CookieTestRequestHandler( cookie_secret=self.KEY_VERSIONS, key_version=1 ) new_handler._cookies = handler._cookies self.assertEqual(new_handler.get_secure_cookie("foo"), b"bar") def test_key_version_invalidate_version(self): handler = CookieTestRequestHandler( cookie_secret=self.KEY_VERSIONS, key_version=0 ) handler.set_secure_cookie("foo", b"bar") new_key_versions = self.KEY_VERSIONS.copy() new_key_versions.pop(0) new_handler = CookieTestRequestHandler( cookie_secret=new_key_versions, key_version=1 ) new_handler._cookies = handler._cookies self.assertEqual(new_handler.get_secure_cookie("foo"), None) class FinalReturnTest(WebTestCase): def get_handlers(self): test = self class FinishHandler(RequestHandler): @gen.coroutine def get(self): test.final_return = self.finish() yield test.final_return @gen.coroutine def post(self): self.write("hello,") yield self.flush() test.final_return = self.finish("world") yield test.final_return class RenderHandler(RequestHandler): def create_template_loader(self, path): return DictLoader({"foo.html": "hi"}) @gen.coroutine def get(self): test.final_return = self.render("foo.html") return [("/finish", FinishHandler), ("/render", RenderHandler)] def get_app_kwargs(self): return dict(template_path="FinalReturnTest") def test_finish_method_return_future(self): response = self.fetch(self.get_url("/finish")) self.assertEqual(response.code, 200) self.assertIsInstance(self.final_return, Future) self.assertTrue(self.final_return.done()) response = self.fetch(self.get_url("/finish"), method="POST", body=b"") self.assertEqual(response.code, 200) self.assertIsInstance(self.final_return, Future) self.assertTrue(self.final_return.done()) def test_render_method_return_future(self): response = self.fetch(self.get_url("/render")) self.assertEqual(response.code, 200) self.assertIsInstance(self.final_return, Future) class CookieTest(WebTestCase): def get_handlers(self): class SetCookieHandler(RequestHandler): def get(self): # Try setting cookies with different argument types # to ensure that everything gets encoded correctly self.set_cookie("str", "asdf") self.set_cookie("unicode", u"qwer") self.set_cookie("bytes", b"zxcv") class GetCookieHandler(RequestHandler): def get(self): self.write(self.get_cookie("foo", "default")) class SetCookieDomainHandler(RequestHandler): def get(self): # unicode domain and path arguments shouldn't break things # either (see bug #285) self.set_cookie("unicode_args", "blah", domain=u"foo.com", path=u"/foo") class SetCookieSpecialCharHandler(RequestHandler): def get(self): self.set_cookie("equals", "a=b") self.set_cookie("semicolon", "a;b") self.set_cookie("quote", 'a"b') class SetCookieOverwriteHandler(RequestHandler): def get(self): self.set_cookie("a", "b", domain="example.com") self.set_cookie("c", "d", domain="example.com") # A second call with the same name clobbers the first. # Attributes from the first call are not carried over. self.set_cookie("a", "e") class SetCookieMaxAgeHandler(RequestHandler): def get(self): self.set_cookie("foo", "bar", max_age=10) class SetCookieExpiresDaysHandler(RequestHandler): def get(self): self.set_cookie("foo", "bar", expires_days=10) class SetCookieFalsyFlags(RequestHandler): def get(self): self.set_cookie("a", "1", secure=True) self.set_cookie("b", "1", secure=False) self.set_cookie("c", "1", httponly=True) self.set_cookie("d", "1", httponly=False) return [ ("/set", SetCookieHandler), ("/get", GetCookieHandler), ("/set_domain", SetCookieDomainHandler), ("/special_char", SetCookieSpecialCharHandler), ("/set_overwrite", SetCookieOverwriteHandler), ("/set_max_age", SetCookieMaxAgeHandler), ("/set_expires_days", SetCookieExpiresDaysHandler), ("/set_falsy_flags", SetCookieFalsyFlags), ] def test_set_cookie(self): response = self.fetch("/set") self.assertEqual( sorted(response.headers.get_list("Set-Cookie")), ["bytes=zxcv; Path=/", "str=asdf; Path=/", "unicode=qwer; Path=/"], ) def test_get_cookie(self): response = self.fetch("/get", headers={"Cookie": "foo=bar"}) self.assertEqual(response.body, b"bar") response = self.fetch("/get", headers={"Cookie": 'foo="bar"'}) self.assertEqual(response.body, b"bar") response = self.fetch("/get", headers={"Cookie": "/=exception;"}) self.assertEqual(response.body, b"default") def test_set_cookie_domain(self): response = self.fetch("/set_domain") self.assertEqual( response.headers.get_list("Set-Cookie"), ["unicode_args=blah; Domain=foo.com; Path=/foo"], ) def test_cookie_special_char(self): response = self.fetch("/special_char") headers = sorted(response.headers.get_list("Set-Cookie")) self.assertEqual(len(headers), 3) self.assertEqual(headers[0], 'equals="a=b"; Path=/') self.assertEqual(headers[1], 'quote="a\\"b"; Path=/') # python 2.7 octal-escapes the semicolon; older versions leave it alone self.assertTrue( headers[2] in ('semicolon="a;b"; Path=/', 'semicolon="a\\073b"; Path=/'), headers[2], ) data = [ ("foo=a=b", "a=b"), ('foo="a=b"', "a=b"), ('foo="a;b"', '"a'), # even quoted, ";" is a delimiter ("foo=a\\073b", "a\\073b"), # escapes only decoded in quotes ('foo="a\\073b"', "a;b"), ('foo="a\\"b"', 'a"b'), ] for header, expected in data: logging.debug("trying %r", header) response = self.fetch("/get", headers={"Cookie": header}) self.assertEqual(response.body, utf8(expected)) def test_set_cookie_overwrite(self): response = self.fetch("/set_overwrite") headers = response.headers.get_list("Set-Cookie") self.assertEqual( sorted(headers), ["a=e; Path=/", "c=d; Domain=example.com; Path=/"] ) def test_set_cookie_max_age(self): response = self.fetch("/set_max_age") headers = response.headers.get_list("Set-Cookie") self.assertEqual(sorted(headers), ["foo=bar; Max-Age=10; Path=/"]) def test_set_cookie_expires_days(self): response = self.fetch("/set_expires_days") header = response.headers.get("Set-Cookie") match = re.match("foo=bar; expires=(?P<expires>.+); Path=/", header) assert match is not None expires = datetime.datetime.utcnow() + datetime.timedelta(days=10) parsed = email.utils.parsedate(match.groupdict()["expires"]) assert parsed is not None header_expires = datetime.datetime(*parsed[:6]) self.assertTrue(abs((expires - header_expires).total_seconds()) < 10) def test_set_cookie_false_flags(self): response = self.fetch("/set_falsy_flags") headers = sorted(response.headers.get_list("Set-Cookie")) # The secure and httponly headers are capitalized in py35 and # lowercase in older versions. self.assertEqual(headers[0].lower(), "a=1; path=/; secure") self.assertEqual(headers[1].lower(), "b=1; path=/") self.assertEqual(headers[2].lower(), "c=1; httponly; path=/") self.assertEqual(headers[3].lower(), "d=1; path=/") class AuthRedirectRequestHandler(RequestHandler): def initialize(self, login_url): self.login_url = login_url def get_login_url(self): return self.login_url @authenticated def get(self): # we'll never actually get here because the test doesn't follow redirects self.send_error(500) class AuthRedirectTest(WebTestCase): def get_handlers(self): return [ ("/relative", AuthRedirectRequestHandler, dict(login_url="/login")), ( "/absolute", AuthRedirectRequestHandler, dict(login_url="http://example.com/login"), ), ] def test_relative_auth_redirect(self): response = self.fetch(self.get_url("/relative"), follow_redirects=False) self.assertEqual(response.code, 302) self.assertEqual(response.headers["Location"], "/login?next=%2Frelative") def test_absolute_auth_redirect(self): response = self.fetch(self.get_url("/absolute"), follow_redirects=False) self.assertEqual(response.code, 302) self.assertTrue( re.match( r"http://example.com/login\?next=http%3A%2F%2F127.0.0.1%3A[0-9]+%2Fabsolute", response.headers["Location"], ), response.headers["Location"], ) class ConnectionCloseHandler(RequestHandler): def initialize(self, test): self.test = test @gen.coroutine def get(self): self.test.on_handler_waiting() yield self.test.cleanup_event.wait() def on_connection_close(self): self.test.on_connection_close() class ConnectionCloseTest(WebTestCase): def get_handlers(self): self.cleanup_event = Event() return [("/", ConnectionCloseHandler, dict(test=self))] def test_connection_close(self): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) s.connect(("127.0.0.1", self.get_http_port())) self.stream = IOStream(s) self.stream.write(b"GET / HTTP/1.0\r\n\r\n") self.wait() # Let the hanging coroutine clean up after itself self.cleanup_event.set() self.io_loop.run_sync(lambda: gen.sleep(0)) def on_handler_waiting(self): logging.debug("handler waiting") self.stream.close() def on_connection_close(self): logging.debug("connection closed") self.stop() class EchoHandler(RequestHandler): def get(self, *path_args): # Type checks: web.py interfaces convert argument values to # unicode strings (by default, but see also decode_argument). # In httpserver.py (i.e. self.request.arguments), they're left # as bytes. Keys are always native strings. for key in self.request.arguments: if type(key) != str: raise Exception("incorrect type for key: %r" % type(key)) for value in self.request.arguments[key]: if type(value) != bytes: raise Exception("incorrect type for value: %r" % type(value)) for value in self.get_arguments(key): if type(value) != unicode_type: raise Exception("incorrect type for value: %r" % type(value)) for arg in path_args: if type(arg) != unicode_type: raise Exception("incorrect type for path arg: %r" % type(arg)) self.write( dict( path=self.request.path, path_args=path_args, args=recursive_unicode(self.request.arguments), ) ) class RequestEncodingTest(WebTestCase): def get_handlers(self): return [("/group/(.*)", EchoHandler), ("/slashes/([^/]*)/([^/]*)", EchoHandler)] def fetch_json(self, path): return json_decode(self.fetch(path).body) def test_group_question_mark(self): # Ensure that url-encoded question marks are handled properly self.assertEqual( self.fetch_json("/group/%3F"), dict(path="/group/%3F", path_args=["?"], args={}), ) self.assertEqual( self.fetch_json("/group/%3F?%3F=%3F"), dict(path="/group/%3F", path_args=["?"], args={"?": ["?"]}), ) def test_group_encoding(self): # Path components and query arguments should be decoded the same way self.assertEqual( self.fetch_json("/group/%C3%A9?arg=%C3%A9"), { u"path": u"/group/%C3%A9", u"path_args": [u"\u00e9"], u"args": {u"arg": [u"\u00e9"]}, }, ) def test_slashes(self): # Slashes may be escaped to appear as a single "directory" in the path, # but they are then unescaped when passed to the get() method. self.assertEqual( self.fetch_json("/slashes/foo/bar"), dict(path="/slashes/foo/bar", path_args=["foo", "bar"], args={}), ) self.assertEqual( self.fetch_json("/slashes/a%2Fb/c%2Fd"), dict(path="/slashes/a%2Fb/c%2Fd", path_args=["a/b", "c/d"], args={}), ) def test_error(self): # Percent signs (encoded as %25) should not mess up printf-style # messages in logs with ExpectLog(gen_log, ".*Invalid unicode"): self.fetch("/group/?arg=%25%e9") class TypeCheckHandler(RequestHandler): def prepare(self): self.errors = {} # type: typing.Dict[str, str] self.check_type("status", self.get_status(), int) # get_argument is an exception from the general rule of using # type str for non-body data mainly for historical reasons. self.check_type("argument", self.get_argument("foo"), unicode_type) self.check_type("cookie_key", list(self.cookies.keys())[0], str) self.check_type("cookie_value", list(self.cookies.values())[0].value, str) # Secure cookies return bytes because they can contain arbitrary # data, but regular cookies are native strings. if list(self.cookies.keys()) != ["asdf"]: raise Exception( "unexpected values for cookie keys: %r" % self.cookies.keys() ) self.check_type("get_secure_cookie", self.get_secure_cookie("asdf"), bytes) self.check_type("get_cookie", self.get_cookie("asdf"), str) self.check_type("xsrf_token", self.xsrf_token, bytes) self.check_type("xsrf_form_html", self.xsrf_form_html(), str) self.check_type("reverse_url", self.reverse_url("typecheck", "foo"), str) self.check_type("request_summary", self._request_summary(), str) def get(self, path_component): # path_component uses type unicode instead of str for consistency # with get_argument() self.check_type("path_component", path_component, unicode_type) self.write(self.errors) def post(self, path_component): self.check_type("path_component", path_component, unicode_type) self.write(self.errors) def check_type(self, name, obj, expected_type): actual_type = type(obj) if expected_type != actual_type: self.errors[name] = "expected %s, got %s" % (expected_type, actual_type) class DecodeArgHandler(RequestHandler): def decode_argument(self, value, name=None): if type(value) != bytes: raise Exception("unexpected type for value: %r" % type(value)) # use self.request.arguments directly to avoid recursion if "encoding" in self.request.arguments: return value.decode(to_unicode(self.request.arguments["encoding"][0])) else: return value def get(self, arg): def describe(s): if type(s) == bytes: return ["bytes", native_str(binascii.b2a_hex(s))] elif type(s) == unicode_type: return ["unicode", s] raise Exception("unknown type") self.write({"path": describe(arg), "query": describe(self.get_argument("foo"))}) class LinkifyHandler(RequestHandler): def get(self): self.render("linkify.html", message="http://example.com") class UIModuleResourceHandler(RequestHandler): def get(self): self.render("page.html", entries=[1, 2]) class OptionalPathHandler(RequestHandler): def get(self, path): self.write({"path": path}) class MultiHeaderHandler(RequestHandler): def get(self): self.set_header("x-overwrite", "1") self.set_header("X-Overwrite", 2) self.add_header("x-multi", 3) self.add_header("X-Multi", "4") class RedirectHandler(RequestHandler): def get(self): if self.get_argument("permanent", None) is not None: self.redirect("/", permanent=int(self.get_argument("permanent"))) elif self.get_argument("status", None) is not None: self.redirect("/", status=int(self.get_argument("status"))) else: raise Exception("didn't get permanent or status arguments") class EmptyFlushCallbackHandler(RequestHandler): @gen.coroutine def get(self): # Ensure that the flush callback is run whether or not there # was any output. The gen.Task and direct yield forms are # equivalent. yield self.flush() # "empty" flush, but writes headers yield self.flush() # empty flush self.write("o") yield self.flush() # flushes the "o" yield self.flush() # empty flush self.finish("k") class HeaderInjectionHandler(RequestHandler): def get(self): try: self.set_header("X-Foo", "foo\r\nX-Bar: baz") raise Exception("Didn't get expected exception") except ValueError as e: if "Unsafe header value" in str(e): self.finish(b"ok") else: raise class GetArgumentHandler(RequestHandler): def prepare(self): if self.get_argument("source", None) == "query": method = self.get_query_argument elif self.get_argument("source", None) == "body": method = self.get_body_argument else: method = self.get_argument self.finish(method("foo", "default")) class GetArgumentsHandler(RequestHandler): def prepare(self): self.finish( dict( default=self.get_arguments("foo"), query=self.get_query_arguments("foo"), body=self.get_body_arguments("foo"), ) ) # This test was shared with wsgi_test.py; now the name is meaningless. class WSGISafeWebTest(WebTestCase): COOKIE_SECRET = "WebTest.COOKIE_SECRET" def get_app_kwargs(self): loader = DictLoader( { "linkify.html": "{% module linkify(message) %}", "page.html": """\ <html><head></head><body> {% for e in entries %} {% module Template("entry.html", entry=e) %} {% end %} </body></html>""", "entry.html": """\ {{ set_resources(embedded_css=".entry { margin-bottom: 1em; }", embedded_javascript="js_embed()", css_files=["/base.css", "/foo.css"], javascript_files="/common.js", html_head="<meta>", html_body='<script src="/analytics.js"/>') }} <div class="entry">...</div>""", } ) return dict( template_loader=loader, autoescape="xhtml_escape", cookie_secret=self.COOKIE_SECRET, ) def tearDown(self): super(WSGISafeWebTest, self).tearDown() RequestHandler._template_loaders.clear() def get_handlers(self): urls = [ url("/typecheck/(.*)", TypeCheckHandler, name="typecheck"), url("/decode_arg/(.*)", DecodeArgHandler, name="decode_arg"), url("/decode_arg_kw/(?P<arg>.*)", DecodeArgHandler), url("/linkify", LinkifyHandler), url("/uimodule_resources", UIModuleResourceHandler), url("/optional_path/(.+)?", OptionalPathHandler), url("/multi_header", MultiHeaderHandler), url("/redirect", RedirectHandler), url( "/web_redirect_permanent", WebRedirectHandler, {"url": "/web_redirect_newpath"}, ), url( "/web_redirect", WebRedirectHandler, {"url": "/web_redirect_newpath", "permanent": False}, ), url( "//web_redirect_double_slash", WebRedirectHandler, {"url": "/web_redirect_newpath"}, ), url("/header_injection", HeaderInjectionHandler), url("/get_argument", GetArgumentHandler), url("/get_arguments", GetArgumentsHandler), ] return urls def fetch_json(self, *args, **kwargs): response = self.fetch(*args, **kwargs) response.rethrow() return json_decode(response.body) def test_types(self): cookie_value = to_unicode( create_signed_value(self.COOKIE_SECRET, "asdf", "qwer") ) response = self.fetch( "/typecheck/asdf?foo=bar", headers={"Cookie": "asdf=" + cookie_value} ) data = json_decode(response.body) self.assertEqual(data, {}) response = self.fetch( "/typecheck/asdf?foo=bar", method="POST", headers={"Cookie": "asdf=" + cookie_value}, body="foo=bar", ) def test_decode_argument(self): # These urls all decode to the same thing urls = [ "/decode_arg/%C3%A9?foo=%C3%A9&encoding=utf-8", "/decode_arg/%E9?foo=%E9&encoding=latin1", "/decode_arg_kw/%E9?foo=%E9&encoding=latin1", ] for req_url in urls: response = self.fetch(req_url) response.rethrow() data = json_decode(response.body) self.assertEqual( data, {u"path": [u"unicode", u"\u00e9"], u"query": [u"unicode", u"\u00e9"]}, ) response = self.fetch("/decode_arg/%C3%A9?foo=%C3%A9") response.rethrow() data = json_decode(response.body) self.assertEqual( data, {u"path": [u"bytes", u"c3a9"], u"query": [u"bytes", u"c3a9"]} ) def test_decode_argument_invalid_unicode(self): # test that invalid unicode in URLs causes 400, not 500 with ExpectLog(gen_log, ".*Invalid unicode.*"): response = self.fetch("/typecheck/invalid%FF") self.assertEqual(response.code, 400) response = self.fetch("/typecheck/invalid?foo=%FF") self.assertEqual(response.code, 400) def test_decode_argument_plus(self): # These urls are all equivalent. urls = [ "/decode_arg/1%20%2B%201?foo=1%20%2B%201&encoding=utf-8", "/decode_arg/1%20+%201?foo=1+%2B+1&encoding=utf-8", ] for req_url in urls: response = self.fetch(req_url) response.rethrow() data = json_decode(response.body) self.assertEqual( data, {u"path": [u"unicode", u"1 + 1"], u"query": [u"unicode", u"1 + 1"]}, ) def test_reverse_url(self): self.assertEqual(self.app.reverse_url("decode_arg", "foo"), "/decode_arg/foo") self.assertEqual(self.app.reverse_url("decode_arg", 42), "/decode_arg/42") self.assertEqual(self.app.reverse_url("decode_arg", b"\xe9"), "/decode_arg/%E9") self.assertEqual( self.app.reverse_url("decode_arg", u"\u00e9"), "/decode_arg/%C3%A9" ) self.assertEqual( self.app.reverse_url("decode_arg", "1 + 1"), "/decode_arg/1%20%2B%201" ) def test_uimodule_unescaped(self): response = self.fetch("/linkify") self.assertEqual( response.body, b'<a href="http://example.com">http://example.com</a>' ) def test_uimodule_resources(self): response = self.fetch("/uimodule_resources") self.assertEqual( response.body, b"""\ <html><head><link href="/base.css" type="text/css" rel="stylesheet"/><link href="/foo.css" type="text/css" rel="stylesheet"/> <style type="text/css"> .entry { margin-bottom: 1em; } </style> <meta> </head><body> <div class="entry">...</div> <div class="entry">...</div> <script src="/common.js" type="text/javascript"></script> <script type="text/javascript"> //<![CDATA[ js_embed() //]]> </script> <script src="/analytics.js"/> </body></html>""", # noqa: E501 ) def test_optional_path(self): self.assertEqual(self.fetch_json("/optional_path/foo"), {u"path": u"foo"}) self.assertEqual(self.fetch_json("/optional_path/"), {u"path": None}) def test_multi_header(self): response = self.fetch("/multi_header") self.assertEqual(response.headers["x-overwrite"], "2") self.assertEqual(response.headers.get_list("x-multi"), ["3", "4"]) def test_redirect(self): response = self.fetch("/redirect?permanent=1", follow_redirects=False) self.assertEqual(response.code, 301) response = self.fetch("/redirect?permanent=0", follow_redirects=False) self.assertEqual(response.code, 302) response = self.fetch("/redirect?status=307", follow_redirects=False) self.assertEqual(response.code, 307) def test_web_redirect(self): response = self.fetch("/web_redirect_permanent", follow_redirects=False) self.assertEqual(response.code, 301) self.assertEqual(response.headers["Location"], "/web_redirect_newpath") response = self.fetch("/web_redirect", follow_redirects=False) self.assertEqual(response.code, 302) self.assertEqual(response.headers["Location"], "/web_redirect_newpath") def test_web_redirect_double_slash(self): response = self.fetch("//web_redirect_double_slash", follow_redirects=False) self.assertEqual(response.code, 301) self.assertEqual(response.headers["Location"], "/web_redirect_newpath") def test_header_injection(self): response = self.fetch("/header_injection") self.assertEqual(response.body, b"ok") def test_get_argument(self): response = self.fetch("/get_argument?foo=bar") self.assertEqual(response.body, b"bar") response = self.fetch("/get_argument?foo=") self.assertEqual(response.body, b"") response = self.fetch("/get_argument") self.assertEqual(response.body, b"default") # Test merging of query and body arguments. # In singular form, body arguments take precedence over query arguments. body = urllib.parse.urlencode(dict(foo="hello")) response = self.fetch("/get_argument?foo=bar", method="POST", body=body) self.assertEqual(response.body, b"hello") # In plural methods they are merged. response = self.fetch("/get_arguments?foo=bar", method="POST", body=body) self.assertEqual( json_decode(response.body), dict(default=["bar", "hello"], query=["bar"], body=["hello"]), ) def test_get_query_arguments(self): # send as a post so we can ensure the separation between query # string and body arguments. body = urllib.parse.urlencode(dict(foo="hello")) response = self.fetch( "/get_argument?source=query&foo=bar", method="POST", body=body ) self.assertEqual(response.body, b"bar") response = self.fetch( "/get_argument?source=query&foo=", method="POST", body=body ) self.assertEqual(response.body, b"") response = self.fetch("/get_argument?source=query", method="POST", body=body) self.assertEqual(response.body, b"default") def test_get_body_arguments(self): body = urllib.parse.urlencode(dict(foo="bar")) response = self.fetch( "/get_argument?source=body&foo=hello", method="POST", body=body ) self.assertEqual(response.body, b"bar") body = urllib.parse.urlencode(dict(foo="")) response = self.fetch( "/get_argument?source=body&foo=hello", method="POST", body=body ) self.assertEqual(response.body, b"") body = urllib.parse.urlencode(dict()) response = self.fetch( "/get_argument?source=body&foo=hello", method="POST", body=body ) self.assertEqual(response.body, b"default") def test_no_gzip(self): response = self.fetch("/get_argument") self.assertNotIn("Accept-Encoding", response.headers.get("Vary", "")) self.assertNotIn("gzip", response.headers.get("Content-Encoding", "")) class NonWSGIWebTests(WebTestCase): def get_handlers(self): return [("/empty_flush", EmptyFlushCallbackHandler)] def test_empty_flush(self): response = self.fetch("/empty_flush") self.assertEqual(response.body, b"ok") class ErrorResponseTest(WebTestCase): def get_handlers(self): class DefaultHandler(RequestHandler): def get(self): if self.get_argument("status", None): raise HTTPError(int(self.get_argument("status"))) 1 / 0 class WriteErrorHandler(RequestHandler): def get(self): if self.get_argument("status", None): self.send_error(int(self.get_argument("status"))) else: 1 / 0 def write_error(self, status_code, **kwargs): self.set_header("Content-Type", "text/plain") if "exc_info" in kwargs: self.write("Exception: %s" % kwargs["exc_info"][0].__name__) else: self.write("Status: %d" % status_code) class FailedWriteErrorHandler(RequestHandler): def get(self): 1 / 0 def write_error(self, status_code, **kwargs): raise Exception("exception in write_error") return [ url("/default", DefaultHandler), url("/write_error", WriteErrorHandler), url("/failed_write_error", FailedWriteErrorHandler), ] def test_default(self): with ExpectLog(app_log, "Uncaught exception"): response = self.fetch("/default") self.assertEqual(response.code, 500) self.assertTrue(b"500: Internal Server Error" in response.body) response = self.fetch("/default?status=503") self.assertEqual(response.code, 503) self.assertTrue(b"503: Service Unavailable" in response.body) response = self.fetch("/default?status=435") self.assertEqual(response.code, 435) self.assertTrue(b"435: Unknown" in response.body) def test_write_error(self): with ExpectLog(app_log, "Uncaught exception"): response = self.fetch("/write_error") self.assertEqual(response.code, 500) self.assertEqual(b"Exception: ZeroDivisionError", response.body) response = self.fetch("/write_error?status=503") self.assertEqual(response.code, 503) self.assertEqual(b"Status: 503", response.body) def test_failed_write_error(self): with ExpectLog(app_log, "Uncaught exception"): response = self.fetch("/failed_write_error") self.assertEqual(response.code, 500) self.assertEqual(b"", response.body) class StaticFileTest(WebTestCase): # The expected MD5 hash of robots.txt, used in tests that call # StaticFileHandler.get_version robots_txt_hash = b"f71d20196d4caf35b6a670db8c70b03d" static_dir = os.path.join(os.path.dirname(__file__), "static") def get_handlers(self): class StaticUrlHandler(RequestHandler): def get(self, path): with_v = int(self.get_argument("include_version", 1)) self.write(self.static_url(path, include_version=with_v)) class AbsoluteStaticUrlHandler(StaticUrlHandler): include_host = True class OverrideStaticUrlHandler(RequestHandler): def get(self, path): do_include = bool(self.get_argument("include_host")) self.include_host = not do_include regular_url = self.static_url(path) override_url = self.static_url(path, include_host=do_include) if override_url == regular_url: return self.write(str(False)) protocol = self.request.protocol + "://" protocol_length = len(protocol) check_regular = regular_url.find(protocol, 0, protocol_length) check_override = override_url.find(protocol, 0, protocol_length) if do_include: result = check_override == 0 and check_regular == -1 else: result = check_override == -1 and check_regular == 0 self.write(str(result)) return [ ("/static_url/(.*)", StaticUrlHandler), ("/abs_static_url/(.*)", AbsoluteStaticUrlHandler), ("/override_static_url/(.*)", OverrideStaticUrlHandler), ("/root_static/(.*)", StaticFileHandler, dict(path="/")), ] def get_app_kwargs(self): return dict(static_path=relpath("static")) def test_static_files(self): response = self.fetch("/robots.txt") self.assertTrue(b"Disallow: /" in response.body) response = self.fetch("/static/robots.txt") self.assertTrue(b"Disallow: /" in response.body) self.assertEqual(response.headers.get("Content-Type"), "text/plain") def test_static_compressed_files(self): response = self.fetch("/static/sample.xml.gz") self.assertEqual(response.headers.get("Content-Type"), "application/gzip") response = self.fetch("/static/sample.xml.bz2") self.assertEqual( response.headers.get("Content-Type"), "application/octet-stream" ) # make sure the uncompressed file still has the correct type response = self.fetch("/static/sample.xml") self.assertTrue( response.headers.get("Content-Type") in set(("text/xml", "application/xml")) ) def test_static_url(self): response = self.fetch("/static_url/robots.txt") self.assertEqual(response.body, b"/static/robots.txt?v=" + self.robots_txt_hash) def test_absolute_static_url(self): response = self.fetch("/abs_static_url/robots.txt") self.assertEqual( response.body, (utf8(self.get_url("/")) + b"static/robots.txt?v=" + self.robots_txt_hash), ) def test_relative_version_exclusion(self): response = self.fetch("/static_url/robots.txt?include_version=0") self.assertEqual(response.body, b"/static/robots.txt") def test_absolute_version_exclusion(self): response = self.fetch("/abs_static_url/robots.txt?include_version=0") self.assertEqual(response.body, utf8(self.get_url("/") + "static/robots.txt")) def test_include_host_override(self): self._trigger_include_host_check(False) self._trigger_include_host_check(True) def _trigger_include_host_check(self, include_host): path = "/override_static_url/robots.txt?include_host=%s" response = self.fetch(path % int(include_host)) self.assertEqual(response.body, utf8(str(True))) def get_and_head(self, *args, **kwargs): """Performs a GET and HEAD request and returns the GET response. Fails if any ``Content-*`` headers returned by the two requests differ. """ head_response = self.fetch(*args, method="HEAD", **kwargs) get_response = self.fetch(*args, method="GET", **kwargs) content_headers = set() for h in itertools.chain(head_response.headers, get_response.headers): if h.startswith("Content-"): content_headers.add(h) for h in content_headers: self.assertEqual( head_response.headers.get(h), get_response.headers.get(h), "%s differs between GET (%s) and HEAD (%s)" % (h, head_response.headers.get(h), get_response.headers.get(h)), ) return get_response def test_static_304_if_modified_since(self): response1 = self.get_and_head("/static/robots.txt") response2 = self.get_and_head( "/static/robots.txt", headers={"If-Modified-Since": response1.headers["Last-Modified"]}, ) self.assertEqual(response2.code, 304) self.assertTrue("Content-Length" not in response2.headers) self.assertTrue("Last-Modified" not in response2.headers) def test_static_304_if_none_match(self): response1 = self.get_and_head("/static/robots.txt") response2 = self.get_and_head( "/static/robots.txt", headers={"If-None-Match": response1.headers["Etag"]} ) self.assertEqual(response2.code, 304) def test_static_304_etag_modified_bug(self): response1 = self.get_and_head("/static/robots.txt") response2 = self.get_and_head( "/static/robots.txt", headers={ "If-None-Match": '"MISMATCH"', "If-Modified-Since": response1.headers["Last-Modified"], }, ) self.assertEqual(response2.code, 200) def test_static_if_modified_since_pre_epoch(self): # On windows, the functions that work with time_t do not accept # negative values, and at least one client (processing.js) seems # to use if-modified-since 1/1/1960 as a cache-busting technique. response = self.get_and_head( "/static/robots.txt", headers={"If-Modified-Since": "Fri, 01 Jan 1960 00:00:00 GMT"}, ) self.assertEqual(response.code, 200) def test_static_if_modified_since_time_zone(self): # Instead of the value from Last-Modified, make requests with times # chosen just before and after the known modification time # of the file to ensure that the right time zone is being used # when parsing If-Modified-Since. stat = os.stat(relpath("static/robots.txt")) response = self.get_and_head( "/static/robots.txt", headers={"If-Modified-Since": format_timestamp(stat.st_mtime - 1)}, ) self.assertEqual(response.code, 200) response = self.get_and_head( "/static/robots.txt", headers={"If-Modified-Since": format_timestamp(stat.st_mtime + 1)}, ) self.assertEqual(response.code, 304) def test_static_etag(self): response = self.get_and_head("/static/robots.txt") self.assertEqual( utf8(response.headers.get("Etag")), b'"' + self.robots_txt_hash + b'"' ) def test_static_with_range(self): response = self.get_and_head( "/static/robots.txt", headers={"Range": "bytes=0-9"} ) self.assertEqual(response.code, 206) self.assertEqual(response.body, b"User-agent") self.assertEqual( utf8(response.headers.get("Etag")), b'"' + self.robots_txt_hash + b'"' ) self.assertEqual(response.headers.get("Content-Length"), "10") self.assertEqual(response.headers.get("Content-Range"), "bytes 0-9/26") def test_static_with_range_full_file(self): response = self.get_and_head( "/static/robots.txt", headers={"Range": "bytes=0-"} ) # Note: Chrome refuses to play audio if it gets an HTTP 206 in response # to ``Range: bytes=0-`` :( self.assertEqual(response.code, 200) robots_file_path = os.path.join(self.static_dir, "robots.txt") with open(robots_file_path) as f: self.assertEqual(response.body, utf8(f.read())) self.assertEqual(response.headers.get("Content-Length"), "26") self.assertEqual(response.headers.get("Content-Range"), None) def test_static_with_range_full_past_end(self): response = self.get_and_head( "/static/robots.txt", headers={"Range": "bytes=0-10000000"} ) self.assertEqual(response.code, 200) robots_file_path = os.path.join(self.static_dir, "robots.txt") with open(robots_file_path) as f: self.assertEqual(response.body, utf8(f.read())) self.assertEqual(response.headers.get("Content-Length"), "26") self.assertEqual(response.headers.get("Content-Range"), None) def test_static_with_range_partial_past_end(self): response = self.get_and_head( "/static/robots.txt", headers={"Range": "bytes=1-10000000"} ) self.assertEqual(response.code, 206) robots_file_path = os.path.join(self.static_dir, "robots.txt") with open(robots_file_path) as f: self.assertEqual(response.body, utf8(f.read()[1:])) self.assertEqual(response.headers.get("Content-Length"), "25") self.assertEqual(response.headers.get("Content-Range"), "bytes 1-25/26") def test_static_with_range_end_edge(self): response = self.get_and_head( "/static/robots.txt", headers={"Range": "bytes=22-"} ) self.assertEqual(response.body, b": /\n") self.assertEqual(response.headers.get("Content-Length"), "4") self.assertEqual(response.headers.get("Content-Range"), "bytes 22-25/26") def test_static_with_range_neg_end(self): response = self.get_and_head( "/static/robots.txt", headers={"Range": "bytes=-4"} ) self.assertEqual(response.body, b": /\n") self.assertEqual(response.headers.get("Content-Length"), "4") self.assertEqual(response.headers.get("Content-Range"), "bytes 22-25/26") def test_static_with_range_neg_past_start(self): response = self.get_and_head( "/static/robots.txt", headers={"Range": "bytes=-1000000"} ) self.assertEqual(response.code, 200) robots_file_path = os.path.join(self.static_dir, "robots.txt") with open(robots_file_path) as f: self.assertEqual(response.body, utf8(f.read())) self.assertEqual(response.headers.get("Content-Length"), "26") self.assertEqual(response.headers.get("Content-Range"), None) def test_static_invalid_range(self): response = self.get_and_head("/static/robots.txt", headers={"Range": "asdf"}) self.assertEqual(response.code, 200) def test_static_unsatisfiable_range_zero_suffix(self): response = self.get_and_head( "/static/robots.txt", headers={"Range": "bytes=-0"} ) self.assertEqual(response.headers.get("Content-Range"), "bytes */26") self.assertEqual(response.code, 416) def test_static_unsatisfiable_range_invalid_start(self): response = self.get_and_head( "/static/robots.txt", headers={"Range": "bytes=26"} ) self.assertEqual(response.code, 416) self.assertEqual(response.headers.get("Content-Range"), "bytes */26") def test_static_unsatisfiable_range_end_less_than_start(self): response = self.get_and_head( "/static/robots.txt", headers={"Range": "bytes=10-3"} ) self.assertEqual(response.code, 416) self.assertEqual(response.headers.get("Content-Range"), "bytes */26") def test_static_head(self): response = self.fetch("/static/robots.txt", method="HEAD") self.assertEqual(response.code, 200) # No body was returned, but we did get the right content length. self.assertEqual(response.body, b"") self.assertEqual(response.headers["Content-Length"], "26") self.assertEqual( utf8(response.headers["Etag"]), b'"' + self.robots_txt_hash + b'"' ) def test_static_head_range(self): response = self.fetch( "/static/robots.txt", method="HEAD", headers={"Range": "bytes=1-4"} ) self.assertEqual(response.code, 206) self.assertEqual(response.body, b"") self.assertEqual(response.headers["Content-Length"], "4") self.assertEqual( utf8(response.headers["Etag"]), b'"' + self.robots_txt_hash + b'"' ) def test_static_range_if_none_match(self): response = self.get_and_head( "/static/robots.txt", headers={ "Range": "bytes=1-4", "If-None-Match": b'"' + self.robots_txt_hash + b'"', }, ) self.assertEqual(response.code, 304) self.assertEqual(response.body, b"") self.assertTrue("Content-Length" not in response.headers) self.assertEqual( utf8(response.headers["Etag"]), b'"' + self.robots_txt_hash + b'"' ) def test_static_404(self): response = self.get_and_head("/static/blarg") self.assertEqual(response.code, 404) def test_path_traversal_protection(self): # curl_httpclient processes ".." on the client side, so we # must test this with simple_httpclient. self.http_client.close() self.http_client = SimpleAsyncHTTPClient() with ExpectLog(gen_log, ".*not in root static directory"): response = self.get_and_head("/static/../static_foo.txt") # Attempted path traversal should result in 403, not 200 # (which means the check failed and the file was served) # or 404 (which means that the file didn't exist and # is probably a packaging error). self.assertEqual(response.code, 403) @unittest.skipIf(os.name != "posix", "non-posix OS") def test_root_static_path(self): # Sometimes people set the StaticFileHandler's path to '/' # to disable Tornado's path validation (in conjunction with # their own validation in get_absolute_path). Make sure # that the stricter validation in 4.2.1 doesn't break them. path = os.path.join( os.path.dirname(os.path.abspath(__file__)), "static/robots.txt" ) response = self.get_and_head("/root_static" + urllib.parse.quote(path)) self.assertEqual(response.code, 200) class StaticDefaultFilenameTest(WebTestCase): def get_app_kwargs(self): return dict( static_path=relpath("static"), static_handler_args=dict(default_filename="index.html"), ) def get_handlers(self): return [] def test_static_default_filename(self): response = self.fetch("/static/dir/", follow_redirects=False) self.assertEqual(response.code, 200) self.assertEqual(b"this is the index\n", response.body) def test_static_default_redirect(self): response = self.fetch("/static/dir", follow_redirects=False) self.assertEqual(response.code, 301) self.assertTrue(response.headers["Location"].endswith("/static/dir/")) class StaticFileWithPathTest(WebTestCase): def get_app_kwargs(self): return dict( static_path=relpath("static"), static_handler_args=dict(default_filename="index.html"), ) def get_handlers(self): return [("/foo/(.*)", StaticFileHandler, {"path": relpath("templates/")})] def test_serve(self): response = self.fetch("/foo/utf8.html") self.assertEqual(response.body, b"H\xc3\xa9llo\n") class CustomStaticFileTest(WebTestCase): def get_handlers(self): class MyStaticFileHandler(StaticFileHandler): @classmethod def make_static_url(cls, settings, path): version_hash = cls.get_version(settings, path) extension_index = path.rindex(".") before_version = path[:extension_index] after_version = path[(extension_index + 1) :] return "/static/%s.%s.%s" % ( before_version, version_hash, after_version, ) def parse_url_path(self, url_path): extension_index = url_path.rindex(".") version_index = url_path.rindex(".", 0, extension_index) return "%s%s" % (url_path[:version_index], url_path[extension_index:]) @classmethod def get_absolute_path(cls, settings, path): return "CustomStaticFileTest:" + path def validate_absolute_path(self, root, absolute_path): return absolute_path @classmethod def get_content(self, path, start=None, end=None): assert start is None and end is None if path == "CustomStaticFileTest:foo.txt": return b"bar" raise Exception("unexpected path %r" % path) def get_content_size(self): if self.absolute_path == "CustomStaticFileTest:foo.txt": return 3 raise Exception("unexpected path %r" % self.absolute_path) def get_modified_time(self): return None @classmethod def get_version(cls, settings, path): return "42" class StaticUrlHandler(RequestHandler): def get(self, path): self.write(self.static_url(path)) self.static_handler_class = MyStaticFileHandler return [("/static_url/(.*)", StaticUrlHandler)] def get_app_kwargs(self): return dict(static_path="dummy", static_handler_class=self.static_handler_class) def test_serve(self): response = self.fetch("/static/foo.42.txt") self.assertEqual(response.body, b"bar") def test_static_url(self): with ExpectLog(gen_log, "Could not open static file", required=False): response = self.fetch("/static_url/foo.txt") self.assertEqual(response.body, b"/static/foo.42.txt") class HostMatchingTest(WebTestCase): class Handler(RequestHandler): def initialize(self, reply): self.reply = reply def get(self): self.write(self.reply) def get_handlers(self): return [("/foo", HostMatchingTest.Handler, {"reply": "wildcard"})] def test_host_matching(self): self.app.add_handlers( "www.example.com", [("/foo", HostMatchingTest.Handler, {"reply": "[0]"})] ) self.app.add_handlers( r"www\.example\.com", [("/bar", HostMatchingTest.Handler, {"reply": "[1]"})] ) self.app.add_handlers( "www.example.com", [("/baz", HostMatchingTest.Handler, {"reply": "[2]"})] ) self.app.add_handlers( "www.e.*e.com", [("/baz", HostMatchingTest.Handler, {"reply": "[3]"})] ) response = self.fetch("/foo") self.assertEqual(response.body, b"wildcard") response = self.fetch("/bar") self.assertEqual(response.code, 404) response = self.fetch("/baz") self.assertEqual(response.code, 404) response = self.fetch("/foo", headers={"Host": "www.example.com"}) self.assertEqual(response.body, b"[0]") response = self.fetch("/bar", headers={"Host": "www.example.com"}) self.assertEqual(response.body, b"[1]") response = self.fetch("/baz", headers={"Host": "www.example.com"}) self.assertEqual(response.body, b"[2]") response = self.fetch("/baz", headers={"Host": "www.exe.com"}) self.assertEqual(response.body, b"[3]") class DefaultHostMatchingTest(WebTestCase): def get_handlers(self): return [] def get_app_kwargs(self): return {"default_host": "www.example.com"} def test_default_host_matching(self): self.app.add_handlers( "www.example.com", [("/foo", HostMatchingTest.Handler, {"reply": "[0]"})] ) self.app.add_handlers( r"www\.example\.com", [("/bar", HostMatchingTest.Handler, {"reply": "[1]"})] ) self.app.add_handlers( "www.test.com", [("/baz", HostMatchingTest.Handler, {"reply": "[2]"})] ) response = self.fetch("/foo") self.assertEqual(response.body, b"[0]") response = self.fetch("/bar") self.assertEqual(response.body, b"[1]") response = self.fetch("/baz") self.assertEqual(response.code, 404) response = self.fetch("/foo", headers={"X-Real-Ip": "127.0.0.1"}) self.assertEqual(response.code, 404) self.app.default_host = "www.test.com" response = self.fetch("/baz") self.assertEqual(response.body, b"[2]") class NamedURLSpecGroupsTest(WebTestCase): def get_handlers(self): class EchoHandler(RequestHandler): def get(self, path): self.write(path) return [ ("/str/(?P<path>.*)", EchoHandler), (u"/unicode/(?P<path>.*)", EchoHandler), ] def test_named_urlspec_groups(self): response = self.fetch("/str/foo") self.assertEqual(response.body, b"foo") response = self.fetch("/unicode/bar") self.assertEqual(response.body, b"bar") class ClearHeaderTest(SimpleHandlerTestCase): class Handler(RequestHandler): def get(self): self.set_header("h1", "foo") self.set_header("h2", "bar") self.clear_header("h1") self.clear_header("nonexistent") def test_clear_header(self): response = self.fetch("/") self.assertTrue("h1" not in response.headers) self.assertEqual(response.headers["h2"], "bar") class Header204Test(SimpleHandlerTestCase): class Handler(RequestHandler): def get(self): self.set_status(204) self.finish() def test_204_headers(self): response = self.fetch("/") self.assertEqual(response.code, 204) self.assertNotIn("Content-Length", response.headers) self.assertNotIn("Transfer-Encoding", response.headers) class Header304Test(SimpleHandlerTestCase): class Handler(RequestHandler): def get(self): self.set_header("Content-Language", "en_US") self.write("hello") def test_304_headers(self): response1 = self.fetch("/") self.assertEqual(response1.headers["Content-Length"], "5") self.assertEqual(response1.headers["Content-Language"], "en_US") response2 = self.fetch( "/", headers={"If-None-Match": response1.headers["Etag"]} ) self.assertEqual(response2.code, 304) self.assertTrue("Content-Length" not in response2.headers) self.assertTrue("Content-Language" not in response2.headers) # Not an entity header, but should not be added to 304s by chunking self.assertTrue("Transfer-Encoding" not in response2.headers) class StatusReasonTest(SimpleHandlerTestCase): class Handler(RequestHandler): def get(self): reason = self.request.arguments.get("reason", []) self.set_status( int(self.get_argument("code")), reason=reason[0] if reason else None ) def get_http_client(self): # simple_httpclient only: curl doesn't expose the reason string return SimpleAsyncHTTPClient() def test_status(self): response = self.fetch("/?code=304") self.assertEqual(response.code, 304) self.assertEqual(response.reason, "Not Modified") response = self.fetch("/?code=304&reason=Foo") self.assertEqual(response.code, 304) self.assertEqual(response.reason, "Foo") response = self.fetch("/?code=682&reason=Bar") self.assertEqual(response.code, 682) self.assertEqual(response.reason, "Bar") response = self.fetch("/?code=682") self.assertEqual(response.code, 682) self.assertEqual(response.reason, "Unknown") class DateHeaderTest(SimpleHandlerTestCase): class Handler(RequestHandler): def get(self): self.write("hello") def test_date_header(self): response = self.fetch("/") parsed = email.utils.parsedate(response.headers["Date"]) assert parsed is not None header_date = datetime.datetime(*parsed[:6]) self.assertTrue( header_date - datetime.datetime.utcnow() < datetime.timedelta(seconds=2) ) class RaiseWithReasonTest(SimpleHandlerTestCase): class Handler(RequestHandler): def get(self): raise HTTPError(682, reason="Foo") def get_http_client(self): # simple_httpclient only: curl doesn't expose the reason string return SimpleAsyncHTTPClient() def test_raise_with_reason(self): response = self.fetch("/") self.assertEqual(response.code, 682) self.assertEqual(response.reason, "Foo") self.assertIn(b"682: Foo", response.body) def test_httperror_str(self): self.assertEqual(str(HTTPError(682, reason="Foo")), "HTTP 682: Foo") def test_httperror_str_from_httputil(self): self.assertEqual(str(HTTPError(682)), "HTTP 682: Unknown") class ErrorHandlerXSRFTest(WebTestCase): def get_handlers(self): # note that if the handlers list is empty we get the default_host # redirect fallback instead of a 404, so test with both an # explicitly defined error handler and an implicit 404. return [("/error", ErrorHandler, dict(status_code=417))] def get_app_kwargs(self): return dict(xsrf_cookies=True) def test_error_xsrf(self): response = self.fetch("/error", method="POST", body="") self.assertEqual(response.code, 417) def test_404_xsrf(self): response = self.fetch("/404", method="POST", body="") self.assertEqual(response.code, 404) class GzipTestCase(SimpleHandlerTestCase): class Handler(RequestHandler): def get(self): for v in self.get_arguments("vary"): self.add_header("Vary", v) # Must write at least MIN_LENGTH bytes to activate compression. self.write("hello world" + ("!" * GZipContentEncoding.MIN_LENGTH)) def get_app_kwargs(self): return dict( gzip=True, static_path=os.path.join(os.path.dirname(__file__), "static") ) def assert_compressed(self, response): # simple_httpclient renames the content-encoding header; # curl_httpclient doesn't. self.assertEqual( response.headers.get( "Content-Encoding", response.headers.get("X-Consumed-Content-Encoding") ), "gzip", ) def test_gzip(self): response = self.fetch("/") self.assert_compressed(response) self.assertEqual(response.headers["Vary"], "Accept-Encoding") def test_gzip_static(self): # The streaming responses in StaticFileHandler have subtle # interactions with the gzip output so test this case separately. response = self.fetch("/robots.txt") self.assert_compressed(response) self.assertEqual(response.headers["Vary"], "Accept-Encoding") def test_gzip_not_requested(self): response = self.fetch("/", use_gzip=False) self.assertNotIn("Content-Encoding", response.headers) self.assertEqual(response.headers["Vary"], "Accept-Encoding") def test_vary_already_present(self): response = self.fetch("/?vary=Accept-Language") self.assert_compressed(response) self.assertEqual( [s.strip() for s in response.headers["Vary"].split(",")], ["Accept-Language", "Accept-Encoding"], ) def test_vary_already_present_multiple(self): # Regression test for https://github.com/tornadoweb/tornado/issues/1670 response = self.fetch("/?vary=Accept-Language&vary=Cookie") self.assert_compressed(response) self.assertEqual( [s.strip() for s in response.headers["Vary"].split(",")], ["Accept-Language", "Cookie", "Accept-Encoding"], ) class PathArgsInPrepareTest(WebTestCase): class Handler(RequestHandler): def prepare(self): self.write(dict(args=self.path_args, kwargs=self.path_kwargs)) def get(self, path): assert path == "foo" self.finish() def get_handlers(self): return [("/pos/(.*)", self.Handler), ("/kw/(?P<path>.*)", self.Handler)] def test_pos(self): response = self.fetch("/pos/foo") response.rethrow() data = json_decode(response.body) self.assertEqual(data, {"args": ["foo"], "kwargs": {}}) def test_kw(self): response = self.fetch("/kw/foo") response.rethrow() data = json_decode(response.body) self.assertEqual(data, {"args": [], "kwargs": {"path": "foo"}}) class ClearAllCookiesTest(SimpleHandlerTestCase): class Handler(RequestHandler): def get(self): self.clear_all_cookies() self.write("ok") def test_clear_all_cookies(self): response = self.fetch("/", headers={"Cookie": "foo=bar; baz=xyzzy"}) set_cookies = sorted(response.headers.get_list("Set-Cookie")) # Python 3.5 sends 'baz="";'; older versions use 'baz=;' self.assertTrue( set_cookies[0].startswith("baz=;") or set_cookies[0].startswith('baz="";') ) self.assertTrue( set_cookies[1].startswith("foo=;") or set_cookies[1].startswith('foo="";') ) class PermissionError(Exception): pass class ExceptionHandlerTest(SimpleHandlerTestCase): class Handler(RequestHandler): def get(self): exc = self.get_argument("exc") if exc == "http": raise HTTPError(410, "no longer here") elif exc == "zero": 1 / 0 elif exc == "permission": raise PermissionError("not allowed") def write_error(self, status_code, **kwargs): if "exc_info" in kwargs: typ, value, tb = kwargs["exc_info"] if isinstance(value, PermissionError): self.set_status(403) self.write("PermissionError") return RequestHandler.write_error(self, status_code, **kwargs) def log_exception(self, typ, value, tb): if isinstance(value, PermissionError): app_log.warning("custom logging for PermissionError: %s", value.args[0]) else: RequestHandler.log_exception(self, typ, value, tb) def test_http_error(self): # HTTPErrors are logged as warnings with no stack trace. # TODO: extend ExpectLog to test this more precisely with ExpectLog(gen_log, ".*no longer here"): response = self.fetch("/?exc=http") self.assertEqual(response.code, 410) def test_unknown_error(self): # Unknown errors are logged as errors with a stack trace. with ExpectLog(app_log, "Uncaught exception"): response = self.fetch("/?exc=zero") self.assertEqual(response.code, 500) def test_known_error(self): # log_exception can override logging behavior, and write_error # can override the response. with ExpectLog(app_log, "custom logging for PermissionError: not allowed"): response = self.fetch("/?exc=permission") self.assertEqual(response.code, 403) class BuggyLoggingTest(SimpleHandlerTestCase): class Handler(RequestHandler): def get(self): 1 / 0 def log_exception(self, typ, value, tb): 1 / 0 def test_buggy_log_exception(self): # Something gets logged even though the application's # logger is broken. with ExpectLog(app_log, ".*"): self.fetch("/") class UIMethodUIModuleTest(SimpleHandlerTestCase): """Test that UI methods and modules are created correctly and associated with the handler. """ class Handler(RequestHandler): def get(self): self.render("foo.html") def value(self): return self.get_argument("value") def get_app_kwargs(self): def my_ui_method(handler, x): return "In my_ui_method(%s) with handler value %s." % (x, handler.value()) class MyModule(UIModule): def render(self, x): return "In MyModule(%s) with handler value %s." % ( x, self.handler.value(), ) loader = DictLoader( {"foo.html": "{{ my_ui_method(42) }} {% module MyModule(123) %}"} ) return dict( template_loader=loader, ui_methods={"my_ui_method": my_ui_method}, ui_modules={"MyModule": MyModule}, ) def tearDown(self): super(UIMethodUIModuleTest, self).tearDown() # TODO: fix template loader caching so this isn't necessary. RequestHandler._template_loaders.clear() def test_ui_method(self): response = self.fetch("/?value=asdf") self.assertEqual( response.body, b"In my_ui_method(42) with handler value asdf. " b"In MyModule(123) with handler value asdf.", ) class GetArgumentErrorTest(SimpleHandlerTestCase): class Handler(RequestHandler): def get(self): try: self.get_argument("foo") self.write({}) except MissingArgumentError as e: self.write({"arg_name": e.arg_name, "log_message": e.log_message}) def test_catch_error(self): response = self.fetch("/") self.assertEqual( json_decode(response.body), {"arg_name": "foo", "log_message": "Missing argument foo"}, ) class SetLazyPropertiesTest(SimpleHandlerTestCase): class Handler(RequestHandler): def prepare(self): self.current_user = "Ben" self.locale = locale.get("en_US") def get_user_locale(self): raise NotImplementedError() def get_current_user(self): raise NotImplementedError() def get(self): self.write("Hello %s (%s)" % (self.current_user, self.locale.code)) def test_set_properties(self): # Ensure that current_user can be assigned to normally for apps # that want to forgo the lazy get_current_user property response = self.fetch("/") self.assertEqual(response.body, b"Hello Ben (en_US)") class GetCurrentUserTest(WebTestCase): def get_app_kwargs(self): class WithoutUserModule(UIModule): def render(self): return "" class WithUserModule(UIModule): def render(self): return str(self.current_user) loader = DictLoader( { "without_user.html": "", "with_user.html": "{{ current_user }}", "without_user_module.html": "{% module WithoutUserModule() %}", "with_user_module.html": "{% module WithUserModule() %}", } ) return dict( template_loader=loader, ui_modules={ "WithUserModule": WithUserModule, "WithoutUserModule": WithoutUserModule, }, ) def tearDown(self): super(GetCurrentUserTest, self).tearDown() RequestHandler._template_loaders.clear() def get_handlers(self): class CurrentUserHandler(RequestHandler): def prepare(self): self.has_loaded_current_user = False def get_current_user(self): self.has_loaded_current_user = True return "" class WithoutUserHandler(CurrentUserHandler): def get(self): self.render_string("without_user.html") self.finish(str(self.has_loaded_current_user)) class WithUserHandler(CurrentUserHandler): def get(self): self.render_string("with_user.html") self.finish(str(self.has_loaded_current_user)) class CurrentUserModuleHandler(CurrentUserHandler): def get_template_namespace(self): # If RequestHandler.get_template_namespace is called, then # get_current_user is evaluated. Until #820 is fixed, this # is a small hack to circumvent the issue. return self.ui class WithoutUserModuleHandler(CurrentUserModuleHandler): def get(self): self.render_string("without_user_module.html") self.finish(str(self.has_loaded_current_user)) class WithUserModuleHandler(CurrentUserModuleHandler): def get(self): self.render_string("with_user_module.html") self.finish(str(self.has_loaded_current_user)) return [ ("/without_user", WithoutUserHandler), ("/with_user", WithUserHandler), ("/without_user_module", WithoutUserModuleHandler), ("/with_user_module", WithUserModuleHandler), ] @unittest.skip("needs fix") def test_get_current_user_is_lazy(self): # TODO: Make this test pass. See #820. response = self.fetch("/without_user") self.assertEqual(response.body, b"False") def test_get_current_user_works(self): response = self.fetch("/with_user") self.assertEqual(response.body, b"True") def test_get_current_user_from_ui_module_is_lazy(self): response = self.fetch("/without_user_module") self.assertEqual(response.body, b"False") def test_get_current_user_from_ui_module_works(self): response = self.fetch("/with_user_module") self.assertEqual(response.body, b"True") class UnimplementedHTTPMethodsTest(SimpleHandlerTestCase): class Handler(RequestHandler): pass def test_unimplemented_standard_methods(self): for method in ["HEAD", "GET", "DELETE", "OPTIONS"]: response = self.fetch("/", method=method) self.assertEqual(response.code, 405) for method in ["POST", "PUT"]: response = self.fetch("/", method=method, body=b"") self.assertEqual(response.code, 405) class UnimplementedNonStandardMethodsTest(SimpleHandlerTestCase): class Handler(RequestHandler): def other(self): # Even though this method exists, it won't get called automatically # because it is not in SUPPORTED_METHODS. self.write("other") def test_unimplemented_patch(self): # PATCH is recently standardized; Tornado supports it by default # but wsgiref.validate doesn't like it. response = self.fetch("/", method="PATCH", body=b"") self.assertEqual(response.code, 405) def test_unimplemented_other(self): response = self.fetch("/", method="OTHER", allow_nonstandard_methods=True) self.assertEqual(response.code, 405) class AllHTTPMethodsTest(SimpleHandlerTestCase): class Handler(RequestHandler): def method(self): self.write(self.request.method) get = delete = options = post = put = method # type: ignore def test_standard_methods(self): response = self.fetch("/", method="HEAD") self.assertEqual(response.body, b"") for method in ["GET", "DELETE", "OPTIONS"]: response = self.fetch("/", method=method) self.assertEqual(response.body, utf8(method)) for method in ["POST", "PUT"]: response = self.fetch("/", method=method, body=b"") self.assertEqual(response.body, utf8(method)) class PatchMethodTest(SimpleHandlerTestCase): class Handler(RequestHandler): SUPPORTED_METHODS = RequestHandler.SUPPORTED_METHODS + ( # type: ignore "OTHER", ) def patch(self): self.write("patch") def other(self): self.write("other") def test_patch(self): response = self.fetch("/", method="PATCH", body=b"") self.assertEqual(response.body, b"patch") def test_other(self): response = self.fetch("/", method="OTHER", allow_nonstandard_methods=True) self.assertEqual(response.body, b"other") class FinishInPrepareTest(SimpleHandlerTestCase): class Handler(RequestHandler): def prepare(self): self.finish("done") def get(self): # It's difficult to assert for certain that a method did not # or will not be called in an asynchronous context, but this # will be logged noisily if it is reached. raise Exception("should not reach this method") def test_finish_in_prepare(self): response = self.fetch("/") self.assertEqual(response.body, b"done") class Default404Test(WebTestCase): def get_handlers(self): # If there are no handlers at all a default redirect handler gets added. return [("/foo", RequestHandler)] def test_404(self): response = self.fetch("/") self.assertEqual(response.code, 404) self.assertEqual( response.body, b"<html><title>404: Not Found</title>" b"<body>404: Not Found</body></html>", ) class Custom404Test(WebTestCase): def get_handlers(self): return [("/foo", RequestHandler)] def get_app_kwargs(self): class Custom404Handler(RequestHandler): def get(self): self.set_status(404) self.write("custom 404 response") return dict(default_handler_class=Custom404Handler) def test_404(self): response = self.fetch("/") self.assertEqual(response.code, 404) self.assertEqual(response.body, b"custom 404 response") class DefaultHandlerArgumentsTest(WebTestCase): def get_handlers(self): return [("/foo", RequestHandler)] def get_app_kwargs(self): return dict( default_handler_class=ErrorHandler, default_handler_args=dict(status_code=403), ) def test_403(self): response = self.fetch("/") self.assertEqual(response.code, 403) class HandlerByNameTest(WebTestCase): def get_handlers(self): # All three are equivalent. return [ ("/hello1", HelloHandler), ("/hello2", "tornado.test.web_test.HelloHandler"), url("/hello3", "tornado.test.web_test.HelloHandler"), ] def test_handler_by_name(self): resp = self.fetch("/hello1") self.assertEqual(resp.body, b"hello") resp = self.fetch("/hello2") self.assertEqual(resp.body, b"hello") resp = self.fetch("/hello3") self.assertEqual(resp.body, b"hello") class StreamingRequestBodyTest(WebTestCase): def get_handlers(self): @stream_request_body class StreamingBodyHandler(RequestHandler): def initialize(self, test): self.test = test def prepare(self): self.test.prepared.set_result(None) def data_received(self, data): self.test.data.set_result(data) def get(self): self.test.finished.set_result(None) self.write({}) @stream_request_body class EarlyReturnHandler(RequestHandler): def prepare(self): # If we finish the response in prepare, it won't continue to # the (non-existent) data_received. raise HTTPError(401) @stream_request_body class CloseDetectionHandler(RequestHandler): def initialize(self, test): self.test = test def on_connection_close(self): super(CloseDetectionHandler, self).on_connection_close() self.test.close_future.set_result(None) return [ ("/stream_body", StreamingBodyHandler, dict(test=self)), ("/early_return", EarlyReturnHandler), ("/close_detection", CloseDetectionHandler, dict(test=self)), ] def connect(self, url, connection_close): # Use a raw connection so we can control the sending of data. s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) s.connect(("127.0.0.1", self.get_http_port())) stream = IOStream(s) stream.write(b"GET " + url + b" HTTP/1.1\r\n") if connection_close: stream.write(b"Connection: close\r\n") stream.write(b"Transfer-Encoding: chunked\r\n\r\n") return stream @gen_test def test_streaming_body(self): self.prepared = Future() # type: Future[None] self.data = Future() # type: Future[bytes] self.finished = Future() # type: Future[None] stream = self.connect(b"/stream_body", connection_close=True) yield self.prepared stream.write(b"4\r\nasdf\r\n") # Ensure the first chunk is received before we send the second. data = yield self.data self.assertEqual(data, b"asdf") self.data = Future() stream.write(b"4\r\nqwer\r\n") data = yield self.data self.assertEquals(data, b"qwer") stream.write(b"0\r\n\r\n") yield self.finished data = yield stream.read_until_close() # This would ideally use an HTTP1Connection to read the response. self.assertTrue(data.endswith(b"{}")) stream.close() @gen_test def test_early_return(self): stream = self.connect(b"/early_return", connection_close=False) data = yield stream.read_until_close() self.assertTrue(data.startswith(b"HTTP/1.1 401")) @gen_test def test_early_return_with_data(self): stream = self.connect(b"/early_return", connection_close=False) stream.write(b"4\r\nasdf\r\n") data = yield stream.read_until_close() self.assertTrue(data.startswith(b"HTTP/1.1 401")) @gen_test def test_close_during_upload(self): self.close_future = Future() # type: Future[None] stream = self.connect(b"/close_detection", connection_close=False) stream.close() yield self.close_future # Each method in this handler returns a yieldable object and yields to the # IOLoop so the future is not immediately ready. Ensure that the # yieldables are respected and no method is called before the previous # one has completed. @stream_request_body class BaseFlowControlHandler(RequestHandler): def initialize(self, test): self.test = test self.method = None self.methods = [] # type: typing.List[str] @contextlib.contextmanager def in_method(self, method): if self.method is not None: self.test.fail("entered method %s while in %s" % (method, self.method)) self.method = method self.methods.append(method) try: yield finally: self.method = None @gen.coroutine def prepare(self): # Note that asynchronous prepare() does not block data_received, # so we don't use in_method here. self.methods.append("prepare") yield gen.moment @gen.coroutine def post(self): with self.in_method("post"): yield gen.moment self.write(dict(methods=self.methods)) class BaseStreamingRequestFlowControlTest(object): def get_httpserver_options(self): # Use a small chunk size so flow control is relevant even though # all the data arrives at once. return dict(chunk_size=10, decompress_request=True) def get_http_client(self): # simple_httpclient only: curl doesn't support body_producer. return SimpleAsyncHTTPClient() # Test all the slightly different code paths for fixed, chunked, etc bodies. def test_flow_control_fixed_body(self): response = self.fetch("/", body="abcdefghijklmnopqrstuvwxyz", method="POST") response.rethrow() self.assertEqual( json_decode(response.body), dict( methods=[ "prepare", "data_received", "data_received", "data_received", "post", ] ), ) def test_flow_control_chunked_body(self): chunks = [b"abcd", b"efgh", b"ijkl"] @gen.coroutine def body_producer(write): for i in chunks: yield write(i) response = self.fetch("/", body_producer=body_producer, method="POST") response.rethrow() self.assertEqual( json_decode(response.body), dict( methods=[ "prepare", "data_received", "data_received", "data_received", "post", ] ), ) def test_flow_control_compressed_body(self): bytesio = BytesIO() gzip_file = gzip.GzipFile(mode="w", fileobj=bytesio) gzip_file.write(b"abcdefghijklmnopqrstuvwxyz") gzip_file.close() compressed_body = bytesio.getvalue() response = self.fetch( "/", body=compressed_body, method="POST", headers={"Content-Encoding": "gzip"}, ) response.rethrow() self.assertEqual( json_decode(response.body), dict( methods=[ "prepare", "data_received", "data_received", "data_received", "post", ] ), ) class DecoratedStreamingRequestFlowControlTest( BaseStreamingRequestFlowControlTest, WebTestCase ): def get_handlers(self): class DecoratedFlowControlHandler(BaseFlowControlHandler): @gen.coroutine def data_received(self, data): with self.in_method("data_received"): yield gen.moment return [("/", DecoratedFlowControlHandler, dict(test=self))] class NativeStreamingRequestFlowControlTest( BaseStreamingRequestFlowControlTest, WebTestCase ): def get_handlers(self): class NativeFlowControlHandler(BaseFlowControlHandler): async def data_received(self, data): with self.in_method("data_received"): import asyncio await asyncio.sleep(0) return [("/", NativeFlowControlHandler, dict(test=self))] class IncorrectContentLengthTest(SimpleHandlerTestCase): def get_handlers(self): test = self self.server_error = None # Manually set a content-length that doesn't match the actual content. class TooHigh(RequestHandler): def get(self): self.set_header("Content-Length", "42") try: self.finish("ok") except Exception as e: test.server_error = e raise class TooLow(RequestHandler): def get(self): self.set_header("Content-Length", "2") try: self.finish("hello") except Exception as e: test.server_error = e raise return [("/high", TooHigh), ("/low", TooLow)] def test_content_length_too_high(self): # When the content-length is too high, the connection is simply # closed without completing the response. An error is logged on # the server. with ExpectLog(app_log, "(Uncaught exception|Exception in callback)"): with ExpectLog( gen_log, "(Cannot send error response after headers written" "|Failed to flush partial response)", ): with self.assertRaises(HTTPClientError): self.fetch("/high", raise_error=True) self.assertEqual( str(self.server_error), "Tried to write 40 bytes less than Content-Length" ) def test_content_length_too_low(self): # When the content-length is too low, the connection is closed # without writing the last chunk, so the client never sees the request # complete (which would be a framing error). with ExpectLog(app_log, "(Uncaught exception|Exception in callback)"): with ExpectLog( gen_log, "(Cannot send error response after headers written" "|Failed to flush partial response)", ): with self.assertRaises(HTTPClientError): self.fetch("/low", raise_error=True) self.assertEqual( str(self.server_error), "Tried to write more data than Content-Length" ) class ClientCloseTest(SimpleHandlerTestCase): class Handler(RequestHandler): def get(self): if self.request.version.startswith("HTTP/1"): # Simulate a connection closed by the client during # request processing. The client will see an error, but the # server should respond gracefully (without logging errors # because we were unable to write out as many bytes as # Content-Length said we would) self.request.connection.stream.close() self.write("hello") else: # TODO: add a HTTP2-compatible version of this test. self.write("requires HTTP/1.x") def test_client_close(self): with self.assertRaises((HTTPClientError, unittest.SkipTest)): response = self.fetch("/", raise_error=True) if response.body == b"requires HTTP/1.x": self.skipTest("requires HTTP/1.x") self.assertEqual(response.code, 599) class SignedValueTest(unittest.TestCase): SECRET = "It's a secret to everybody" SECRET_DICT = {0: "asdfbasdf", 1: "12312312", 2: "2342342"} def past(self): return self.present() - 86400 * 32 def present(self): return 1300000000 def test_known_values(self): signed_v1 = create_signed_value( SignedValueTest.SECRET, "key", "value", version=1, clock=self.present ) self.assertEqual( signed_v1, b"dmFsdWU=|1300000000|31c934969f53e48164c50768b40cbd7e2daaaa4f" ) signed_v2 = create_signed_value( SignedValueTest.SECRET, "key", "value", version=2, clock=self.present ) self.assertEqual( signed_v2, b"2|1:0|10:1300000000|3:key|8:dmFsdWU=|" b"3d4e60b996ff9c5d5788e333a0cba6f238a22c6c0f94788870e1a9ecd482e152", ) signed_default = create_signed_value( SignedValueTest.SECRET, "key", "value", clock=self.present ) self.assertEqual(signed_default, signed_v2) decoded_v1 = decode_signed_value( SignedValueTest.SECRET, "key", signed_v1, min_version=1, clock=self.present ) self.assertEqual(decoded_v1, b"value") decoded_v2 = decode_signed_value( SignedValueTest.SECRET, "key", signed_v2, min_version=2, clock=self.present ) self.assertEqual(decoded_v2, b"value") def test_name_swap(self): signed1 = create_signed_value( SignedValueTest.SECRET, "key1", "value", clock=self.present ) signed2 = create_signed_value( SignedValueTest.SECRET, "key2", "value", clock=self.present ) # Try decoding each string with the other's "name" decoded1 = decode_signed_value( SignedValueTest.SECRET, "key2", signed1, clock=self.present ) self.assertIs(decoded1, None) decoded2 = decode_signed_value( SignedValueTest.SECRET, "key1", signed2, clock=self.present ) self.assertIs(decoded2, None) def test_expired(self): signed = create_signed_value( SignedValueTest.SECRET, "key1", "value", clock=self.past ) decoded_past = decode_signed_value( SignedValueTest.SECRET, "key1", signed, clock=self.past ) self.assertEqual(decoded_past, b"value") decoded_present = decode_signed_value( SignedValueTest.SECRET, "key1", signed, clock=self.present ) self.assertIs(decoded_present, None) def test_payload_tampering(self): # These cookies are variants of the one in test_known_values. sig = "3d4e60b996ff9c5d5788e333a0cba6f238a22c6c0f94788870e1a9ecd482e152" def validate(prefix): return b"value" == decode_signed_value( SignedValueTest.SECRET, "key", prefix + sig, clock=self.present ) self.assertTrue(validate("2|1:0|10:1300000000|3:key|8:dmFsdWU=|")) # Change key version self.assertFalse(validate("2|1:1|10:1300000000|3:key|8:dmFsdWU=|")) # length mismatch (field too short) self.assertFalse(validate("2|1:0|10:130000000|3:key|8:dmFsdWU=|")) # length mismatch (field too long) self.assertFalse(validate("2|1:0|10:1300000000|3:keey|8:dmFsdWU=|")) def test_signature_tampering(self): prefix = "2|1:0|10:1300000000|3:key|8:dmFsdWU=|" def validate(sig): return b"value" == decode_signed_value( SignedValueTest.SECRET, "key", prefix + sig, clock=self.present ) self.assertTrue( validate("3d4e60b996ff9c5d5788e333a0cba6f238a22c6c0f94788870e1a9ecd482e152") ) # All zeros self.assertFalse(validate("0" * 32)) # Change one character self.assertFalse( validate("4d4e60b996ff9c5d5788e333a0cba6f238a22c6c0f94788870e1a9ecd482e152") ) # Change another character self.assertFalse( validate("3d4e60b996ff9c5d5788e333a0cba6f238a22c6c0f94788870e1a9ecd482e153") ) # Truncate self.assertFalse( validate("3d4e60b996ff9c5d5788e333a0cba6f238a22c6c0f94788870e1a9ecd482e15") ) # Lengthen self.assertFalse( validate( "3d4e60b996ff9c5d5788e333a0cba6f238a22c6c0f94788870e1a9ecd482e1538" ) ) def test_non_ascii(self): value = b"\xe9" signed = create_signed_value( SignedValueTest.SECRET, "key", value, clock=self.present ) decoded = decode_signed_value( SignedValueTest.SECRET, "key", signed, clock=self.present ) self.assertEqual(value, decoded) def test_key_versioning_read_write_default_key(self): value = b"\xe9" signed = create_signed_value( SignedValueTest.SECRET_DICT, "key", value, clock=self.present, key_version=0 ) decoded = decode_signed_value( SignedValueTest.SECRET_DICT, "key", signed, clock=self.present ) self.assertEqual(value, decoded) def test_key_versioning_read_write_non_default_key(self): value = b"\xe9" signed = create_signed_value( SignedValueTest.SECRET_DICT, "key", value, clock=self.present, key_version=1 ) decoded = decode_signed_value( SignedValueTest.SECRET_DICT, "key", signed, clock=self.present ) self.assertEqual(value, decoded) def test_key_versioning_invalid_key(self): value = b"\xe9" signed = create_signed_value( SignedValueTest.SECRET_DICT, "key", value, clock=self.present, key_version=0 ) newkeys = SignedValueTest.SECRET_DICT.copy() newkeys.pop(0) decoded = decode_signed_value(newkeys, "key", signed, clock=self.present) self.assertEqual(None, decoded) def test_key_version_retrieval(self): value = b"\xe9" signed = create_signed_value( SignedValueTest.SECRET_DICT, "key", value, clock=self.present, key_version=1 ) key_version = get_signature_key_version(signed) self.assertEqual(1, key_version) class XSRFTest(SimpleHandlerTestCase): class Handler(RequestHandler): def get(self): version = int(self.get_argument("version", "2")) # This would be a bad idea in a real app, but in this test # it's fine. self.settings["xsrf_cookie_version"] = version self.write(self.xsrf_token) def post(self): self.write("ok") def get_app_kwargs(self): return dict(xsrf_cookies=True) def setUp(self): super(XSRFTest, self).setUp() self.xsrf_token = self.get_token() def get_token(self, old_token=None, version=None): if old_token is not None: headers = self.cookie_headers(old_token) else: headers = None response = self.fetch( "/" if version is None else ("/?version=%d" % version), headers=headers ) response.rethrow() return native_str(response.body) def cookie_headers(self, token=None): if token is None: token = self.xsrf_token return {"Cookie": "_xsrf=" + token} def test_xsrf_fail_no_token(self): with ExpectLog(gen_log, ".*'_xsrf' argument missing"): response = self.fetch("/", method="POST", body=b"") self.assertEqual(response.code, 403) def test_xsrf_fail_body_no_cookie(self): with ExpectLog(gen_log, ".*XSRF cookie does not match POST"): response = self.fetch( "/", method="POST", body=urllib.parse.urlencode(dict(_xsrf=self.xsrf_token)), ) self.assertEqual(response.code, 403) def test_xsrf_fail_argument_invalid_format(self): with ExpectLog(gen_log, ".*'_xsrf' argument has invalid format"): response = self.fetch( "/", method="POST", headers=self.cookie_headers(), body=urllib.parse.urlencode(dict(_xsrf="3|")), ) self.assertEqual(response.code, 403) def test_xsrf_fail_cookie_invalid_format(self): with ExpectLog(gen_log, ".*XSRF cookie does not match POST"): response = self.fetch( "/", method="POST", headers=self.cookie_headers(token="3|"), body=urllib.parse.urlencode(dict(_xsrf=self.xsrf_token)), ) self.assertEqual(response.code, 403) def test_xsrf_fail_cookie_no_body(self): with ExpectLog(gen_log, ".*'_xsrf' argument missing"): response = self.fetch( "/", method="POST", body=b"", headers=self.cookie_headers() ) self.assertEqual(response.code, 403) def test_xsrf_success_short_token(self): response = self.fetch( "/", method="POST", body=urllib.parse.urlencode(dict(_xsrf="deadbeef")), headers=self.cookie_headers(token="deadbeef"), ) self.assertEqual(response.code, 200) def test_xsrf_success_non_hex_token(self): response = self.fetch( "/", method="POST", body=urllib.parse.urlencode(dict(_xsrf="xoxo")), headers=self.cookie_headers(token="xoxo"), ) self.assertEqual(response.code, 200) def test_xsrf_success_post_body(self): response = self.fetch( "/", method="POST", body=urllib.parse.urlencode(dict(_xsrf=self.xsrf_token)), headers=self.cookie_headers(), ) self.assertEqual(response.code, 200) def test_xsrf_success_query_string(self): response = self.fetch( "/?" + urllib.parse.urlencode(dict(_xsrf=self.xsrf_token)), method="POST", body=b"", headers=self.cookie_headers(), ) self.assertEqual(response.code, 200) def test_xsrf_success_header(self): response = self.fetch( "/", method="POST", body=b"", headers=dict( {"X-Xsrftoken": self.xsrf_token}, # type: ignore **self.cookie_headers() ), ) self.assertEqual(response.code, 200) def test_distinct_tokens(self): # Every request gets a distinct token. NUM_TOKENS = 10 tokens = set() for i in range(NUM_TOKENS): tokens.add(self.get_token()) self.assertEqual(len(tokens), NUM_TOKENS) def test_cross_user(self): token2 = self.get_token() # Each token can be used to authenticate its own request. for token in (self.xsrf_token, token2): response = self.fetch( "/", method="POST", body=urllib.parse.urlencode(dict(_xsrf=token)), headers=self.cookie_headers(token), ) self.assertEqual(response.code, 200) # Sending one in the cookie and the other in the body is not allowed. for cookie_token, body_token in ( (self.xsrf_token, token2), (token2, self.xsrf_token), ): with ExpectLog(gen_log, ".*XSRF cookie does not match POST"): response = self.fetch( "/", method="POST", body=urllib.parse.urlencode(dict(_xsrf=body_token)), headers=self.cookie_headers(cookie_token), ) self.assertEqual(response.code, 403) def test_refresh_token(self): token = self.xsrf_token tokens_seen = set([token]) # A user's token is stable over time. Refreshing the page in one tab # might update the cookie while an older tab still has the old cookie # in its DOM. Simulate this scenario by passing a constant token # in the body and re-querying for the token. for i in range(5): token = self.get_token(token) # Tokens are encoded uniquely each time tokens_seen.add(token) response = self.fetch( "/", method="POST", body=urllib.parse.urlencode(dict(_xsrf=self.xsrf_token)), headers=self.cookie_headers(token), ) self.assertEqual(response.code, 200) self.assertEqual(len(tokens_seen), 6) def test_versioning(self): # Version 1 still produces distinct tokens per request. self.assertNotEqual(self.get_token(version=1), self.get_token(version=1)) # Refreshed v1 tokens are all identical. v1_token = self.get_token(version=1) for i in range(5): self.assertEqual(self.get_token(v1_token, version=1), v1_token) # Upgrade to a v2 version of the same token v2_token = self.get_token(v1_token) self.assertNotEqual(v1_token, v2_token) # Each v1 token can map to many v2 tokens. self.assertNotEqual(v2_token, self.get_token(v1_token)) # The tokens are cross-compatible. for cookie_token, body_token in ((v1_token, v2_token), (v2_token, v1_token)): response = self.fetch( "/", method="POST", body=urllib.parse.urlencode(dict(_xsrf=body_token)), headers=self.cookie_headers(cookie_token), ) self.assertEqual(response.code, 200) class XSRFCookieKwargsTest(SimpleHandlerTestCase): class Handler(RequestHandler): def get(self): self.write(self.xsrf_token) def get_app_kwargs(self): return dict( xsrf_cookies=True, xsrf_cookie_kwargs=dict(httponly=True, expires_days=2) ) def test_xsrf_httponly(self): response = self.fetch("/") self.assertIn("httponly;", response.headers["Set-Cookie"].lower()) self.assertIn("expires=", response.headers["Set-Cookie"].lower()) header = response.headers.get("Set-Cookie") match = re.match(".*; expires=(?P<expires>.+);.*", header) assert match is not None expires = datetime.datetime.utcnow() + datetime.timedelta(days=2) parsed = email.utils.parsedate(match.groupdict()["expires"]) assert parsed is not None header_expires = datetime.datetime(*parsed[:6]) self.assertTrue(abs((expires - header_expires).total_seconds()) < 10) class FinishExceptionTest(SimpleHandlerTestCase): class Handler(RequestHandler): def get(self): self.set_status(401) self.set_header("WWW-Authenticate", 'Basic realm="something"') if self.get_argument("finish_value", ""): raise Finish("authentication required") else: self.write("authentication required") raise Finish() def test_finish_exception(self): for u in ["/", "/?finish_value=1"]: response = self.fetch(u) self.assertEqual(response.code, 401) self.assertEqual( 'Basic realm="something"', response.headers.get("WWW-Authenticate") ) self.assertEqual(b"authentication required", response.body) class DecoratorTest(WebTestCase): def get_handlers(self): class RemoveSlashHandler(RequestHandler): @removeslash def get(self): pass class AddSlashHandler(RequestHandler): @addslash def get(self): pass return [("/removeslash/", RemoveSlashHandler), ("/addslash", AddSlashHandler)] def test_removeslash(self): response = self.fetch("/removeslash/", follow_redirects=False) self.assertEqual(response.code, 301) self.assertEqual(response.headers["Location"], "/removeslash") response = self.fetch("/removeslash/?foo=bar", follow_redirects=False) self.assertEqual(response.code, 301) self.assertEqual(response.headers["Location"], "/removeslash?foo=bar") def test_addslash(self): response = self.fetch("/addslash", follow_redirects=False) self.assertEqual(response.code, 301) self.assertEqual(response.headers["Location"], "/addslash/") response = self.fetch("/addslash?foo=bar", follow_redirects=False) self.assertEqual(response.code, 301) self.assertEqual(response.headers["Location"], "/addslash/?foo=bar") class CacheTest(WebTestCase): def get_handlers(self): class EtagHandler(RequestHandler): def get(self, computed_etag): self.write(computed_etag) def compute_etag(self): return self._write_buffer[0] return [("/etag/(.*)", EtagHandler)] def test_wildcard_etag(self): computed_etag = '"xyzzy"' etags = "*" self._test_etag(computed_etag, etags, 304) def test_strong_etag_match(self): computed_etag = '"xyzzy"' etags = '"xyzzy"' self._test_etag(computed_etag, etags, 304) def test_multiple_strong_etag_match(self): computed_etag = '"xyzzy1"' etags = '"xyzzy1", "xyzzy2"' self._test_etag(computed_etag, etags, 304) def test_strong_etag_not_match(self): computed_etag = '"xyzzy"' etags = '"xyzzy1"' self._test_etag(computed_etag, etags, 200) def test_multiple_strong_etag_not_match(self): computed_etag = '"xyzzy"' etags = '"xyzzy1", "xyzzy2"' self._test_etag(computed_etag, etags, 200) def test_weak_etag_match(self): computed_etag = '"xyzzy1"' etags = 'W/"xyzzy1"' self._test_etag(computed_etag, etags, 304) def test_multiple_weak_etag_match(self): computed_etag = '"xyzzy2"' etags = 'W/"xyzzy1", W/"xyzzy2"' self._test_etag(computed_etag, etags, 304) def test_weak_etag_not_match(self): computed_etag = '"xyzzy2"' etags = 'W/"xyzzy1"' self._test_etag(computed_etag, etags, 200) def test_multiple_weak_etag_not_match(self): computed_etag = '"xyzzy3"' etags = 'W/"xyzzy1", W/"xyzzy2"' self._test_etag(computed_etag, etags, 200) def _test_etag(self, computed_etag, etags, status_code): response = self.fetch( "/etag/" + computed_etag, headers={"If-None-Match": etags} ) self.assertEqual(response.code, status_code) class RequestSummaryTest(SimpleHandlerTestCase): class Handler(RequestHandler): def get(self): # remote_ip is optional, although it's set by # both HTTPServer and WSGIAdapter. # Clobber it to make sure it doesn't break logging. self.request.remote_ip = None self.finish(self._request_summary()) def test_missing_remote_ip(self): resp = self.fetch("/") self.assertEqual(resp.body, b"GET / (None)") class HTTPErrorTest(unittest.TestCase): def test_copy(self): e = HTTPError(403, reason="Go away") e2 = copy.copy(e) self.assertIsNot(e, e2) self.assertEqual(e.status_code, e2.status_code) self.assertEqual(e.reason, e2.reason) class ApplicationTest(AsyncTestCase): def test_listen(self): app = Application([]) server = app.listen(0, address="127.0.0.1") server.stop() class URLSpecReverseTest(unittest.TestCase): def test_reverse(self): self.assertEqual("/favicon.ico", url(r"/favicon\.ico", None).reverse()) self.assertEqual("/favicon.ico", url(r"^/favicon\.ico$", None).reverse()) def test_non_reversible(self): # URLSpecs are non-reversible if they include non-constant # regex features outside capturing groups. Currently, this is # only strictly enforced for backslash-escaped character # classes. paths = [r"^/api/v\d+/foo/(\w+)$"] for path in paths: # A URLSpec can still be created even if it cannot be reversed. url_spec = url(path, None) try: result = url_spec.reverse() self.fail( "did not get expected exception when reversing %s. " "result: %s" % (path, result) ) except ValueError: pass def test_reverse_arguments(self): self.assertEqual( "/api/v1/foo/bar", url(r"^/api/v1/foo/(\w+)$", None).reverse("bar") ) class RedirectHandlerTest(WebTestCase): def get_handlers(self): return [ ("/src", WebRedirectHandler, {"url": "/dst"}), ("/src2", WebRedirectHandler, {"url": "/dst2?foo=bar"}), (r"/(.*?)/(.*?)/(.*)", WebRedirectHandler, {"url": "/{1}/{0}/{2}"}), ] def test_basic_redirect(self): response = self.fetch("/src", follow_redirects=False) self.assertEqual(response.code, 301) self.assertEqual(response.headers["Location"], "/dst") def test_redirect_with_argument(self): response = self.fetch("/src?foo=bar", follow_redirects=False) self.assertEqual(response.code, 301) self.assertEqual(response.headers["Location"], "/dst?foo=bar") def test_redirect_with_appending_argument(self): response = self.fetch("/src2?foo2=bar2", follow_redirects=False) self.assertEqual(response.code, 301) self.assertEqual(response.headers["Location"], "/dst2?foo=bar&foo2=bar2") def test_redirect_pattern(self): response = self.fetch("/a/b/c", follow_redirects=False) self.assertEqual(response.code, 301) self.assertEqual(response.headers["Location"], "/b/a/c")
apache-2.0
LecomteEmerick/Essentia-build
.waf-1.7.9-16e1644c17ba46b94844133bac6e2a8c/waflib/Tools/xlcxx.py
330
1222
#! /usr/bin/env python # encoding: utf-8 # WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file from waflib.Tools import ccroot,ar from waflib.Configure import conf @conf def find_xlcxx(conf): cxx=conf.find_program(['xlc++_r','xlc++'],var='CXX') cxx=conf.cmd_to_list(cxx) conf.get_xlc_version(cxx) conf.env.CXX_NAME='xlc++' conf.env.CXX=cxx @conf def xlcxx_common_flags(conf): v=conf.env v['CXX_SRC_F']=[] v['CXX_TGT_F']=['-c','-o'] if not v['LINK_CXX']:v['LINK_CXX']=v['CXX'] v['CXXLNK_SRC_F']=[] v['CXXLNK_TGT_F']=['-o'] v['CPPPATH_ST']='-I%s' v['DEFINES_ST']='-D%s' v['LIB_ST']='-l%s' v['LIBPATH_ST']='-L%s' v['STLIB_ST']='-l%s' v['STLIBPATH_ST']='-L%s' v['RPATH_ST']='-Wl,-rpath,%s' v['SONAME_ST']=[] v['SHLIB_MARKER']=[] v['STLIB_MARKER']=[] v['LINKFLAGS_cxxprogram']=['-Wl,-brtl'] v['cxxprogram_PATTERN']='%s' v['CXXFLAGS_cxxshlib']=['-fPIC'] v['LINKFLAGS_cxxshlib']=['-G','-Wl,-brtl,-bexpfull'] v['cxxshlib_PATTERN']='lib%s.so' v['LINKFLAGS_cxxstlib']=[] v['cxxstlib_PATTERN']='lib%s.a' def configure(conf): conf.find_xlcxx() conf.find_ar() conf.xlcxx_common_flags() conf.cxx_load_tools() conf.cxx_add_flags() conf.link_add_flags()
agpl-3.0
Dandandan/wikiprogramming
jsrepl/build/extern/python/unclosured/lib/python2.7/unittest/runner.py
109
6502
"""Running tests""" import sys import time from . import result from .signals import registerResult __unittest = True class _WritelnDecorator(object): """Used to decorate file-like objects with a handy 'writeln' method""" def __init__(self,stream): self.stream = stream def __getattr__(self, attr): if attr in ('stream', '__getstate__'): raise AttributeError(attr) return getattr(self.stream,attr) def writeln(self, arg=None): if arg: self.write(arg) self.write('\n') # text-mode streams translate to \r\n if needed class TextTestResult(result.TestResult): """A test result class that can print formatted text results to a stream. Used by TextTestRunner. """ separator1 = '=' * 70 separator2 = '-' * 70 def __init__(self, stream, descriptions, verbosity): super(TextTestResult, self).__init__() self.stream = stream self.showAll = verbosity > 1 self.dots = verbosity == 1 self.descriptions = descriptions def getDescription(self, test): doc_first_line = test.shortDescription() if self.descriptions and doc_first_line: return '\n'.join((str(test), doc_first_line)) else: return str(test) def startTest(self, test): super(TextTestResult, self).startTest(test) if self.showAll: self.stream.write(self.getDescription(test)) self.stream.write(" ... ") self.stream.flush() def addSuccess(self, test): super(TextTestResult, self).addSuccess(test) if self.showAll: self.stream.writeln("ok") elif self.dots: self.stream.write('.') self.stream.flush() def addError(self, test, err): super(TextTestResult, self).addError(test, err) if self.showAll: self.stream.writeln("ERROR") elif self.dots: self.stream.write('E') self.stream.flush() def addFailure(self, test, err): super(TextTestResult, self).addFailure(test, err) if self.showAll: self.stream.writeln("FAIL") elif self.dots: self.stream.write('F') self.stream.flush() def addSkip(self, test, reason): super(TextTestResult, self).addSkip(test, reason) if self.showAll: self.stream.writeln("skipped {0!r}".format(reason)) elif self.dots: self.stream.write("s") self.stream.flush() def addExpectedFailure(self, test, err): super(TextTestResult, self).addExpectedFailure(test, err) if self.showAll: self.stream.writeln("expected failure") elif self.dots: self.stream.write("x") self.stream.flush() def addUnexpectedSuccess(self, test): super(TextTestResult, self).addUnexpectedSuccess(test) if self.showAll: self.stream.writeln("unexpected success") elif self.dots: self.stream.write("u") self.stream.flush() def printErrors(self): if self.dots or self.showAll: self.stream.writeln() self.printErrorList('ERROR', self.errors) self.printErrorList('FAIL', self.failures) def printErrorList(self, flavour, errors): for test, err in errors: self.stream.writeln(self.separator1) self.stream.writeln("%s: %s" % (flavour,self.getDescription(test))) self.stream.writeln(self.separator2) self.stream.writeln("%s" % err) class TextTestRunner(object): """A test runner class that displays results in textual form. It prints out the names of tests as they are run, errors as they occur, and a summary of the results at the end of the test run. """ resultclass = TextTestResult def __init__(self, stream=sys.stderr, descriptions=True, verbosity=1, failfast=False, buffer=False, resultclass=None): self.stream = _WritelnDecorator(stream) self.descriptions = descriptions self.verbosity = verbosity self.failfast = failfast self.buffer = buffer if resultclass is not None: self.resultclass = resultclass def _makeResult(self): return self.resultclass(self.stream, self.descriptions, self.verbosity) def run(self, test): "Run the given test case or test suite." result = self._makeResult() registerResult(result) result.failfast = self.failfast result.buffer = self.buffer startTime = time.time() startTestRun = getattr(result, 'startTestRun', None) if startTestRun is not None: startTestRun() try: test(result) finally: stopTestRun = getattr(result, 'stopTestRun', None) if stopTestRun is not None: stopTestRun() stopTime = time.time() timeTaken = stopTime - startTime result.printErrors() if hasattr(result, 'separator2'): self.stream.writeln(result.separator2) run = result.testsRun self.stream.writeln("Ran %d test%s in %.3fs" % (run, run != 1 and "s" or "", timeTaken)) self.stream.writeln() expectedFails = unexpectedSuccesses = skipped = 0 try: results = map(len, (result.expectedFailures, result.unexpectedSuccesses, result.skipped)) except AttributeError: pass else: expectedFails, unexpectedSuccesses, skipped = results infos = [] if not result.wasSuccessful(): self.stream.write("FAILED") failed, errored = map(len, (result.failures, result.errors)) if failed: infos.append("failures=%d" % failed) if errored: infos.append("errors=%d" % errored) else: self.stream.write("OK") if skipped: infos.append("skipped=%d" % skipped) if expectedFails: infos.append("expected failures=%d" % expectedFails) if unexpectedSuccesses: infos.append("unexpected successes=%d" % unexpectedSuccesses) if infos: self.stream.writeln(" (%s)" % (", ".join(infos),)) else: self.stream.write("\n") return result
mit
NejcZupec/ggrc-core
src/ggrc_basic_permissions/migrations/versions/20131210004352_1a22bb208258_auditors_have_docume.py
7
1448
# Copyright (C) 2016 Google Inc. # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> """Auditors have document and meeting permissions in audit context. Revision ID: 1a22bb208258 Revises: 1f865f61312 Create Date: 2013-12-10 00:43:52.151598 """ # revision identifiers, used by Alembic. revision = '1a22bb208258' down_revision = '1f865f61312' import sqlalchemy as sa from alembic import op from datetime import datetime from sqlalchemy.sql import table, column, select import json roles_table = table('roles', column('id', sa.Integer), column('name', sa.String), column('permissions_json', sa.String) ) def get_role_permissions(role): connection = op.get_bind() role = connection.execute( select([roles_table.c.permissions_json])\ .where(roles_table.c.name == role)).fetchone() return json.loads(role.permissions_json) def update_role_permissions(role, permissions): op.execute(roles_table\ .update()\ .values(permissions_json = json.dumps(permissions))\ .where(roles_table.c.name == role)) def upgrade(): permissions = get_role_permissions('Auditor') permissions['read'].extend(['Document', 'Meeting']) update_role_permissions('Auditor', permissions) def downgrade(): permissions = get_role_permissions('Auditor') permissions['read'].remove('Document') permissions['read'].remove('Meeting') update_role_permissions('Auditor', permissions)
apache-2.0
defionscode/ansible
test/units/modules/network/exos/test_exos_config.py
30
10656
# # (c) 2018 Extreme Networks Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # from __future__ import (absolute_import, division, print_function) __metaclass__ = type from units.compat.mock import patch from ansible.modules.network.exos import exos_config from units.modules.utils import set_module_args from .exos_module import TestExosModule, load_fixture class TestExosConfigModule(TestExosModule): module = exos_config def setUp(self): super(TestExosConfigModule, self).setUp() self.mock_get_config = patch('ansible.modules.network.exos.exos_config.get_config') self.get_config = self.mock_get_config.start() self.mock_load_config = patch('ansible.modules.network.exos.exos_config.load_config') self.load_config = self.mock_load_config.start() self.mock_run_commands = patch('ansible.modules.network.exos.exos_config.run_commands') self.run_commands = self.mock_run_commands.start() def tearDown(self): super(TestExosConfigModule, self).tearDown() self.mock_get_config.stop() self.mock_load_config.stop() self.mock_run_commands.stop() def load_fixtures(self, commands=None): config_file = 'exos_config_config.cfg' self.get_config.return_value = load_fixture(config_file) self.load_config.return_value = None def test_exos_config_unchanged(self): src = load_fixture('exos_config_config.cfg') set_module_args(dict(src=src)) self.execute_module() def test_exos_config_src(self): src = load_fixture('exos_config_src.cfg') set_module_args(dict(src=src)) commands = ['configure ports 1 description-string "IDS"', 'configure snmp sysName "marble"'] self.execute_module(changed=True, commands=commands) def test_exos_config_backup(self): set_module_args(dict(backup=True)) result = self.execute_module() self.assertIn('__backup__', result) def test_exos_config_save_always(self): self.run_commands.return_value = 'configure snmp sysName "marble"' set_module_args(dict(save_when='always')) self.execute_module(changed=True) self.assertEqual(self.run_commands.call_count, 1) self.assertEqual(self.get_config.call_count, 0) self.assertEqual(self.load_config.call_count, 0) args = self.run_commands.call_args[0][1] self.assertIn('save configuration', args['command']) def test_exos_config_save_changed_true(self): src = load_fixture('exos_config_src.cfg') set_module_args(dict(src=src, save_when='changed')) commands = ['configure ports 1 description-string "IDS"', 'configure snmp sysName "marble"'] self.execute_module(changed=True, commands=commands) self.assertEqual(self.run_commands.call_count, 1) self.assertEqual(self.get_config.call_count, 1) self.assertEqual(self.load_config.call_count, 1) args = self.run_commands.call_args[0][1] self.assertIn('save configuration', args['command']) def test_exos_config_save_changed_true_check_mode(self): src = load_fixture('exos_config_src.cfg') set_module_args(dict(src=src, save_when='changed', _ansible_check_mode=True)) commands = ['configure ports 1 description-string "IDS"', 'configure snmp sysName "marble"'] self.execute_module(changed=True, commands=commands) self.assertEqual(self.run_commands.call_count, 0) self.assertEqual(self.get_config.call_count, 1) self.assertEqual(self.load_config.call_count, 0) def test_exos_config_save_changed_false(self): set_module_args(dict(save_when='changed')) self.execute_module(changed=False) self.assertEqual(self.run_commands.call_count, 0) self.assertEqual(self.get_config.call_count, 0) self.assertEqual(self.load_config.call_count, 0) def test_exos_config_save_modified_false(self): mock_get_startup_config_text = patch('ansible.modules.network.exos.exos_config.get_startup_config_text') get_startup_config_text = mock_get_startup_config_text.start() get_startup_config_text.return_value = load_fixture('exos_config_config.cfg') set_module_args(dict(save_when='modified')) self.execute_module(changed=False) self.assertEqual(self.run_commands.call_count, 0) self.assertEqual(self.get_config.call_count, 1) self.assertEqual(get_startup_config_text.call_count, 1) self.assertEqual(self.load_config.call_count, 0) mock_get_startup_config_text.stop() def test_exos_config_save_modified_true(self): mock_get_startup_config_text = patch('ansible.modules.network.exos.exos_config.get_startup_config_text') get_startup_config_text = mock_get_startup_config_text.start() get_startup_config_text.return_value = load_fixture('exos_config_modified.cfg') set_module_args(dict(save_when='modified')) self.execute_module(changed=True) self.assertEqual(self.run_commands.call_count, 1) self.assertTrue(self.get_config.call_count > 0) self.assertEqual(get_startup_config_text.call_count, 1) self.assertEqual(self.load_config.call_count, 0) mock_get_startup_config_text.stop() def test_exos_config_lines(self): set_module_args(dict(lines=['configure snmp sysName "marble"'])) commands = ['configure snmp sysName "marble"'] self.execute_module(changed=True, commands=commands) def test_exos_config_before(self): set_module_args(dict(lines=['configure snmp sysName "marble"'], before=['test1', 'test2'])) commands = ['test1', 'test2', 'configure snmp sysName "marble"'] self.execute_module(changed=True, commands=commands, sort=False) def test_exos_config_after(self): set_module_args(dict(lines=['hostname foo'], after=['test1', 'test2'])) commands = ['hostname foo', 'test1', 'test2'] set_module_args(dict(lines=['configure snmp sysName "marble"'], after=['test1', 'test2'])) commands = ['configure snmp sysName "marble"', 'test1', 'test2'] self.execute_module(changed=True, commands=commands, sort=False) def test_exos_config_before_after_no_change(self): set_module_args(dict(lines=['configure snmp sysName "x870"'], before=['test1', 'test2'], after=['test3', 'test4'])) self.execute_module() def test_exos_config_config(self): config = 'hostname localhost' set_module_args(dict(lines=['configure snmp sysName "x870"'], config=config)) commands = ['configure snmp sysName "x870"'] self.execute_module(changed=True, commands=commands) def test_exos_config_match_none(self): lines = ['configure snmp sysName "x870"'] set_module_args(dict(lines=lines, match='none')) self.execute_module(changed=True, commands=lines) def test_exos_config_src_and_lines_fails(self): args = dict(src='foo', lines='foo') set_module_args(args) self.execute_module(failed=True) def test_exos_config_match_exact_requires_lines(self): args = dict(match='exact') set_module_args(args) self.execute_module(failed=True) def test_exos_config_match_strict_requires_lines(self): args = dict(match='strict') set_module_args(args) self.execute_module(failed=True) def test_exos_config_replace_block_requires_lines(self): args = dict(replace='block') set_module_args(args) self.execute_module(failed=True) def test_exos_config_replace_config_requires_src(self): args = dict(replace='config') set_module_args(args) self.execute_module(failed=True) def test_exos_diff_running_unchanged(self): args = dict(diff_against='running', _ansible_diff=True) set_module_args(args) self.execute_module(changed=False) def test_exos_diff_running_unchanged_check(self): args = dict(diff_against='running', _ansible_diff=True, _ansible_check_mode=True) set_module_args(args) self.execute_module(changed=False) def test_exos_diff_startup_unchanged(self): mock_get_startup_config_text = patch('ansible.modules.network.exos.exos_config.get_startup_config_text') get_startup_config_text = mock_get_startup_config_text.start() get_startup_config_text.return_value = load_fixture('exos_config_config.cfg') args = dict(diff_against='startup', _ansible_diff=True) set_module_args(args) self.execute_module(changed=False) self.assertEqual(get_startup_config_text.call_count, 1) mock_get_startup_config_text.stop() def test_exos_diff_startup_changed(self): mock_get_startup_config_text = patch('ansible.modules.network.exos.exos_config.get_startup_config_text') get_startup_config_text = mock_get_startup_config_text.start() get_startup_config_text.return_value = load_fixture('exos_config_modified.cfg') args = dict(diff_against='startup', _ansible_diff=True) set_module_args(args) self.execute_module(changed=True) self.assertEqual(get_startup_config_text.call_count, 1) mock_get_startup_config_text.stop() def test_exos_diff_intended_unchanged(self): args = dict(diff_against='intended', intended_config=load_fixture('exos_config_config.cfg'), _ansible_diff=True) set_module_args(args) self.execute_module(changed=False) def test_exos_diff_intended_modified(self): args = dict(diff_against='intended', intended_config=load_fixture('exos_config_modified.cfg'), _ansible_diff=True) set_module_args(args) self.execute_module(changed=True)
gpl-3.0
spininertia/graph-mining-rdbms
src/phase-2/src/eigenvalue/lanczos.py
1
3785
from common.basic_operation import * from eigen_quodratic import * def lanczos(A, b, n, m, conn): """ A: n X n matrix b: initial vector m: number of steps """ beta = "beta" v = ["v%s" % i for i in range(m+2)] # TODO: why this fixed? v_tmp = "v_tmp" alpha = "alpha" create_vector_or_matrix(beta, conn) # just empty for v_table in v: create_vector_or_matrix(v_table, conn) # just empty create_vector_or_matrix(v_tmp, conn) # just empty create_vector_or_matrix(alpha, conn) # just empty initilizat_vector(beta, m+1, conn) # beta_0 = 0 initilizat_vector(v[0], n, conn) # v_0 = {0} initilizat_vector(alpha, m+1, conn) create_vector_or_matrix(v[1], conn) # empty v1 assign_to(b, v[1], conn) # v_1 = b normalize_vector(v[1], conn) # v_1 = b/|b| for i in range(1, m+1): print "Iteration: %s" % i matrix_multiply_vector_overwrite(A, v[i], v_tmp, conn) # v = A * v_i alpha_i = vector_dot_product(v[i], v_tmp, conn) # alpha_i = v_i * v set_matrix(alpha, i, 0, alpha_i, conn) cur = conn.cursor() s = cur.mogrify(""" update %s set value = value - (select value from %s where row = %s) * (select value from %s where row = %s.row) - (select value from %s where row = %s) * (select value from %s where row = %s.row)""" % \ (v_tmp, beta, i-1, v[i-1], v_tmp, alpha, i, v[i], v_tmp)) print s cur.execute(""" update %s set value = value - (select value from %s where row = %s) * (select value from %s where row = %s.row) - (select value from %s where row = %s) * (select value from %s where row = %s.row)""" % \ (v_tmp, beta, i-1, v[i-1], v_tmp, alpha, i, v[i], v_tmp)) vl = vector_length(v_tmp, conn) # |v| print "-> %s\n" % (vl) set_matrix(beta, i, 0, vl, conn) # beta_i = |v| if (vl == 0): break create_vector_or_matrix(v[i+1], conn) # just empty assign_to(v_tmp, v[i+1], conn) # v_i+1 = v cur = conn.cursor() cur.execute("update %s set value = value / (select value from %s where row = %s)" % (v[i+1], beta, i)) # copy v into a matrix, they are still separate vector now # seems like V is useless, so I would not bother transform it. ritz_vector(alpha, beta, m, conn) def ritz_vector(alpha, beta, m, conn): """ solve eigenvector for a smaller martix """ t = "t" build_tridiagonal_matrix(alpha, beta, m, t, conn) eigen_quodratic(t, 'eigenvec', 'eigenval', m, conn) print "Eigenvector calculated, they are stored in %s and %s" % ('eigenval', 'eigenvec') def build_tridiagonal_matrix(alpha, beta, m, t, conn): create_vector_or_matrix(t, conn) # just empty cur = conn.cursor() for i in range(1, m+1): for j in range(1, m+1): if j == i: # print cur.mogrify("insert into %s select %s, %s, value from %s where row = %s" % (t, i-1, i-1, alpha, i)) # T[i, i] <- alpha_i cur.execute("insert into %s select %s, %s, value from %s where row = %s" % (t, i-1, i-1, alpha, i)) # T[i, i] <- alpha_i elif j == i + 1: cur.execute("insert into %s select %s, %s, value from %s where row = %s" % (t, i-1, j-1, beta, i)) elif i - 1 >= 1 and j == i - 1: cur.execute("insert into %s select %s, %s, value from %s where row = %s" % (t, i-1, i-2, beta, i-1)) else: # print cur.mogrify("insert into %s values (%s, %s, 0.0)" % (t, i-1, j-1)) cur.execute("insert into %s values (%s, %s, 0.0)" % (t, i-1, j-1)) conn.commit()
mit
chemelnucfin/tensorflow
tensorflow/contrib/eager/python/examples/revnet/cifar_tfrecords.py
32
5089
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Read CIFAR data from pickled numpy arrays and writes TFRecords. Generates tf.train.Example protos and writes them to TFRecord files from the python version of the CIFAR dataset downloaded from https://www.cs.toronto.edu/~kriz/cifar.html. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import sys import tarfile from absl import flags from six.moves import cPickle as pickle from six.moves import urllib import tensorflow as tf BASE_URL = 'https://www.cs.toronto.edu/~kriz/' CIFAR_FILE_NAMES = ['cifar-10-python.tar.gz', 'cifar-100-python.tar.gz'] CIFAR_DOWNLOAD_URLS = [BASE_URL + name for name in CIFAR_FILE_NAMES] CIFAR_LOCAL_FOLDERS = ['cifar-10', 'cifar-100'] EXTRACT_FOLDERS = ['cifar-10-batches-py', 'cifar-100-python'] def download_and_extract(data_dir, file_name, url): """Download CIFAR if not already downloaded.""" filepath = os.path.join(data_dir, file_name) if tf.gfile.Exists(filepath): return filepath if not tf.gfile.Exists(data_dir): tf.gfile.MakeDirs(data_dir) urllib.request.urlretrieve(url, filepath) tarfile.open(os.path.join(filepath), 'r:gz').extractall(data_dir) return filepath def _int64_feature(value): return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) def _bytes_feature(value): return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) def _get_file_names(folder): """Returns the file names expected to exist in the input_dir.""" assert folder in ['cifar-10', 'cifar-100'] file_names = {} if folder == 'cifar-10': file_names['train'] = ['data_batch_%d' % i for i in range(1, 5)] file_names['validation'] = ['data_batch_5'] file_names['train_all'] = ['data_batch_%d' % i for i in range(1, 6)] file_names['test'] = ['test_batch'] else: file_names['train_all'] = ['train'] file_names['test'] = ['test'] # Split in `convert_to_tfrecord` function file_names['train'] = ['train'] file_names['validation'] = ['train'] return file_names def read_pickle_from_file(filename): with tf.gfile.Open(filename, 'rb') as f: if sys.version_info >= (3, 0): data_dict = pickle.load(f, encoding='bytes') else: data_dict = pickle.load(f) return data_dict def convert_to_tfrecord(input_files, output_file, folder): """Converts files with pickled data to TFRecords.""" assert folder in ['cifar-10', 'cifar-100'] print('Generating %s' % output_file) with tf.python_io.TFRecordWriter(output_file) as record_writer: for input_file in input_files: data_dict = read_pickle_from_file(input_file) data = data_dict[b'data'] try: labels = data_dict[b'labels'] except KeyError: labels = data_dict[b'fine_labels'] if folder == 'cifar-100' and input_file.endswith('train.tfrecords'): data = data[:40000] labels = labels[:40000] elif folder == 'cifar-100' and input_file.endswith( 'validation.tfrecords'): data = data[40000:] labels = labels[40000:] num_entries_in_batch = len(labels) for i in range(num_entries_in_batch): example = tf.train.Example( features=tf.train.Features( feature={ 'image': _bytes_feature(data[i].tobytes()), 'label': _int64_feature(labels[i]) })) record_writer.write(example.SerializeToString()) def main(_): for file_name, url, folder, extract_folder in zip( CIFAR_FILE_NAMES, CIFAR_DOWNLOAD_URLS, CIFAR_LOCAL_FOLDERS, EXTRACT_FOLDERS): print('Download from {} and extract.'.format(url)) data_dir = os.path.join(FLAGS.data_dir, folder) download_and_extract(data_dir, file_name, url) file_names = _get_file_names(folder) input_dir = os.path.join(data_dir, extract_folder) for mode, files in file_names.items(): input_files = [os.path.join(input_dir, f) for f in files] output_file = os.path.join(data_dir, mode + '.tfrecords') try: os.remove(output_file) except OSError: pass convert_to_tfrecord(input_files, output_file, folder) print('Done!') if __name__ == '__main__': FLAGS = flags.FLAGS flags.DEFINE_string( 'data_dir', default=None, help='Directory to download, extract and store TFRecords.') tf.app.run(main)
apache-2.0
AkA84/edx-platform
lms/djangoapps/certificates/migrations/0019_auto__add_certificatehtmlviewconfiguration.py
101
7978
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'CertificateHtmlViewConfiguration' db.create_table('certificates_certificatehtmlviewconfiguration', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('change_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('changed_by', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, on_delete=models.PROTECT)), ('enabled', self.gf('django.db.models.fields.BooleanField')(default=False)), ('configuration', self.gf('django.db.models.fields.TextField')()), )) db.send_create_signal('certificates', ['CertificateHtmlViewConfiguration']) def backwards(self, orm): # Deleting model 'CertificateHtmlViewConfiguration' db.delete_table('certificates_certificatehtmlviewconfiguration') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'certificates.certificategenerationconfiguration': { 'Meta': {'object_name': 'CertificateGenerationConfiguration'}, 'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}), 'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, 'certificates.certificatehtmlviewconfiguration': { 'Meta': {'object_name': 'CertificateHtmlViewConfiguration'}, 'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}), 'configuration': ('django.db.models.fields.TextField', [], {}), 'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, 'certificates.certificatewhitelist': { 'Meta': {'object_name': 'CertificateWhitelist'}, 'course_id': ('xmodule_django.models.CourseKeyField', [], {'default': 'None', 'max_length': '255', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}), 'whitelist': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) }, 'certificates.generatedcertificate': { 'Meta': {'unique_together': "(('user', 'course_id'),)", 'object_name': 'GeneratedCertificate'}, 'course_id': ('xmodule_django.models.CourseKeyField', [], {'default': 'None', 'max_length': '255', 'blank': 'True'}), 'created_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now_add': 'True', 'blank': 'True'}), 'distinction': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'download_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'blank': 'True'}), 'download_uuid': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'}), 'error_reason': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}), 'grade': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '5', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'}), 'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '32'}), 'modified_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'status': ('django.db.models.fields.CharField', [], {'default': "'unavailable'", 'max_length': '32'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}), 'verify_uuid': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) } } complete_apps = ['certificates']
agpl-3.0
glass-bead-labs/data-connector
github-api/issue-tracker.py
2
3064
import requests import json from datetime import datetime import time GITHUB_USERNAME = 'glass-bead-labs' GITHUB_REPO = 'sensor-group' with open('GitHubAuthKey.txt') as f: key = f.readline().strip() print(key) #Runs updateASBase every 10 seconds def main(): while True: updateASBase(); time.sleep(10); """ Grabs issues from the GITHUB_REPO repository under GITHUB_USERNAME and posts them on ASbase. """ def updateASBase(): last_posted_timestamp = getLastPostTime('https://github.com/repos/' + GITHUB_USERNAME + '/' + GITHUB_REPO) updated_issues = getUpdatedIssues(last_posted_timestamp) #Retrieve repository information for issues repo_request_url = 'https://api.github.com/repos/' + GITHUB_USERNAME + '/' + GITHUB_REPO repo_response = requests.get(url=repo_request_url, auth=(key, '')).json() # repo_response = requests.get(repo_request_url).json() for issue in updated_issues: verb = getVerb(issue) body = {'published': issue['created_at'], 'verb': verb, 'timestamp' : str(datetime.now())} body['actor'] = { 'url' : issue['user']['html_url'], 'objectType' : issue['user']['type'], 'id' : issue['user']['id'], 'displayName' : issue['user']['login'] } body['object'] = { 'url' : issue['html_url'], 'id' : issue['id'] } body['target'] = { 'url' : 'https://github.com/repos/' + GITHUB_USERNAME + '/' + GITHUB_REPO, 'objectType' : 'repository', 'id': repo_response['id'], 'displayName': repo_response['full_name'] } converted = json.dumps(body) asbase_url = 'http://russet.ischool.berkeley.edu:8080/activities' requests.post(url=asbase_url, data=converted) """ Goes to ASBase and gets a timestamp of the last time this repo was updated. """ def getLastPostTime(repo_url): query = { "target.url" : { "$in" : [ repo_url ] } } converted_query = json.dumps(query) #print(converted_query) response = requests.post(url='http://russet.ischool.berkeley.edu:8080/query', data=converted_query).json() #print('response: ' + str(response)) if response['totalItems'] == 0: return None else: #print(response['items'][0]['timestamp']) return response['items'][0]['timestamp'] """ Goes to Github and grabs issues that have been updated since the timestamp provided. """ def getUpdatedIssues(timestamp): if not timestamp: issue_request_url = 'https://api.github.com/repos/' + GITHUB_USERNAME + '/' + GITHUB_REPO + '/issues' return requests.get(url=issue_request_url, auth=(key, '')).json() # return requests.get(issue_request_url).json() else: issue_request_url = 'https://api.github.com/repos/' + GITHUB_USERNAME + '/' + GITHUB_REPO + '/issues?since='+ timestamp + '&state=all' return requests.get(url=issue_request_url, auth=(key, '')).json() # return requests.get(issue_request_url).json() """ Sets the verb attribute of the ASBase record based on whether the issue was created, updated, or closed. """ def getVerb(issue): if issue['state'] == "closed": return "close" elif issue['updated_at'] != issue['created_at']: return "update" else: return "create" main()
isc
canhhs91/greenpointtrees
src/oscar/apps/shipping/migrations/0001_initial.py
119
4281
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import oscar.models.fields.autoslugfield from decimal import Decimal import django.core.validators class Migration(migrations.Migration): dependencies = [ ('address', '0001_initial'), ] operations = [ migrations.CreateModel( name='OrderAndItemCharges', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('code', oscar.models.fields.autoslugfield.AutoSlugField(populate_from='name', unique=True, verbose_name='Slug', max_length=128, editable=False, blank=True)), ('name', models.CharField(unique=True, max_length=128, verbose_name='Name')), ('description', models.TextField(verbose_name='Description', blank=True)), ('price_per_order', models.DecimalField(default=Decimal('0.00'), max_digits=12, decimal_places=2, verbose_name='Price per order')), ('price_per_item', models.DecimalField(default=Decimal('0.00'), max_digits=12, decimal_places=2, verbose_name='Price per item')), ('free_shipping_threshold', models.DecimalField(max_digits=12, decimal_places=2, blank=True, verbose_name='Free Shipping', null=True)), ('countries', models.ManyToManyField(blank=True, verbose_name='Countries', to='address.Country', null=True)), ], options={ 'ordering': ['name'], 'verbose_name_plural': 'Order and Item Charges', 'verbose_name': 'Order and Item Charge', 'abstract': False, }, bases=(models.Model,), ), migrations.CreateModel( name='WeightBand', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('upper_limit', models.DecimalField(verbose_name='Upper Limit', decimal_places=3, validators=[django.core.validators.MinValueValidator(Decimal('0.00'))], help_text='Enter upper limit of this weight band in kg. The lower limit will be determined by the other weight bands.', max_digits=12)), ('charge', models.DecimalField(max_digits=12, decimal_places=2, validators=[django.core.validators.MinValueValidator(Decimal('0.00'))], verbose_name='Charge')), ], options={ 'ordering': ['method', 'upper_limit'], 'verbose_name_plural': 'Weight Bands', 'verbose_name': 'Weight Band', 'abstract': False, }, bases=(models.Model,), ), migrations.CreateModel( name='WeightBased', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('code', oscar.models.fields.autoslugfield.AutoSlugField(populate_from='name', unique=True, verbose_name='Slug', max_length=128, editable=False, blank=True)), ('name', models.CharField(unique=True, max_length=128, verbose_name='Name')), ('description', models.TextField(verbose_name='Description', blank=True)), ('default_weight', models.DecimalField(validators=[django.core.validators.MinValueValidator(Decimal('0.00'))], verbose_name='Default Weight', default=Decimal('0.000'), max_digits=12, decimal_places=3, help_text='Default product weight in kg when no weight attribute is defined')), ('countries', models.ManyToManyField(blank=True, verbose_name='Countries', to='address.Country', null=True)), ], options={ 'ordering': ['name'], 'verbose_name_plural': 'Weight-based Shipping Methods', 'verbose_name': 'Weight-based Shipping Method', 'abstract': False, }, bases=(models.Model,), ), migrations.AddField( model_name='weightband', name='method', field=models.ForeignKey(verbose_name='Method', related_name='bands', to='shipping.WeightBased'), preserve_default=True, ), ]
mit
technologiescollege/Blockly-rduino-communication
scripts_XP/Lib/site-packages/pygments/lexers/smalltalk.py
31
7215
# -*- coding: utf-8 -*- """ pygments.lexers.smalltalk ~~~~~~~~~~~~~~~~~~~~~~~~~ Lexers for Smalltalk and related languages. :copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.lexer import RegexLexer, include, bygroups, default from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ Number, Punctuation __all__ = ['SmalltalkLexer', 'NewspeakLexer'] class SmalltalkLexer(RegexLexer): """ For `Smalltalk <http://www.smalltalk.org/>`_ syntax. Contributed by Stefan Matthias Aust. Rewritten by Nils Winter. .. versionadded:: 0.10 """ name = 'Smalltalk' filenames = ['*.st'] aliases = ['smalltalk', 'squeak', 'st'] mimetypes = ['text/x-smalltalk'] tokens = { 'root': [ (r'(<)(\w+:)(.*?)(>)', bygroups(Text, Keyword, Text, Text)), include('squeak fileout'), include('whitespaces'), include('method definition'), (r'(\|)([\w\s]*)(\|)', bygroups(Operator, Name.Variable, Operator)), include('objects'), (r'\^|\:=|\_', Operator), # temporaries (r'[\]({}.;!]', Text), ], 'method definition': [ # Not perfect can't allow whitespaces at the beginning and the # without breaking everything (r'([a-zA-Z]+\w*:)(\s*)(\w+)', bygroups(Name.Function, Text, Name.Variable)), (r'^(\b[a-zA-Z]+\w*\b)(\s*)$', bygroups(Name.Function, Text)), (r'^([-+*/\\~<>=|&!?,@%]+)(\s*)(\w+)(\s*)$', bygroups(Name.Function, Text, Name.Variable, Text)), ], 'blockvariables': [ include('whitespaces'), (r'(:)(\s*)(\w+)', bygroups(Operator, Text, Name.Variable)), (r'\|', Operator, '#pop'), default('#pop'), # else pop ], 'literals': [ (r"'(''|[^'])*'", String, 'afterobject'), (r'\$.', String.Char, 'afterobject'), (r'#\(', String.Symbol, 'parenth'), (r'\)', Text, 'afterobject'), (r'(\d+r)?-?\d+(\.\d+)?(e-?\d+)?', Number, 'afterobject'), ], '_parenth_helper': [ include('whitespaces'), (r'(\d+r)?-?\d+(\.\d+)?(e-?\d+)?', Number), (r'[-+*/\\~<>=|&#!?,@%\w:]+', String.Symbol), # literals (r"'(''|[^'])*'", String), (r'\$.', String.Char), (r'#*\(', String.Symbol, 'inner_parenth'), ], 'parenth': [ # This state is a bit tricky since # we can't just pop this state (r'\)', String.Symbol, ('root', 'afterobject')), include('_parenth_helper'), ], 'inner_parenth': [ (r'\)', String.Symbol, '#pop'), include('_parenth_helper'), ], 'whitespaces': [ # skip whitespace and comments (r'\s+', Text), (r'"(""|[^"])*"', Comment), ], 'objects': [ (r'\[', Text, 'blockvariables'), (r'\]', Text, 'afterobject'), (r'\b(self|super|true|false|nil|thisContext)\b', Name.Builtin.Pseudo, 'afterobject'), (r'\b[A-Z]\w*(?!:)\b', Name.Class, 'afterobject'), (r'\b[a-z]\w*(?!:)\b', Name.Variable, 'afterobject'), (r'#("(""|[^"])*"|[-+*/\\~<>=|&!?,@%]+|[\w:]+)', String.Symbol, 'afterobject'), include('literals'), ], 'afterobject': [ (r'! !$', Keyword, '#pop'), # squeak chunk delimiter include('whitespaces'), (r'\b(ifTrue:|ifFalse:|whileTrue:|whileFalse:|timesRepeat:)', Name.Builtin, '#pop'), (r'\b(new\b(?!:))', Name.Builtin), (r'\:=|\_', Operator, '#pop'), (r'\b[a-zA-Z]+\w*:', Name.Function, '#pop'), (r'\b[a-zA-Z]+\w*', Name.Function), (r'\w+:?|[-+*/\\~<>=|&!?,@%]+', Name.Function, '#pop'), (r'\.', Punctuation, '#pop'), (r';', Punctuation), (r'[\])}]', Text), (r'[\[({]', Text, '#pop'), ], 'squeak fileout': [ # Squeak fileout format (optional) (r'^"(""|[^"])*"!', Keyword), (r"^'(''|[^'])*'!", Keyword), (r'^(!)(\w+)( commentStamp: )(.*?)( prior: .*?!\n)(.*?)(!)', bygroups(Keyword, Name.Class, Keyword, String, Keyword, Text, Keyword)), (r"^(!)(\w+(?: class)?)( methodsFor: )('(?:''|[^'])*')(.*?!)", bygroups(Keyword, Name.Class, Keyword, String, Keyword)), (r'^(\w+)( subclass: )(#\w+)' r'(\s+instanceVariableNames: )(.*?)' r'(\s+classVariableNames: )(.*?)' r'(\s+poolDictionaries: )(.*?)' r'(\s+category: )(.*?)(!)', bygroups(Name.Class, Keyword, String.Symbol, Keyword, String, Keyword, String, Keyword, String, Keyword, String, Keyword)), (r'^(\w+(?: class)?)(\s+instanceVariableNames: )(.*?)(!)', bygroups(Name.Class, Keyword, String, Keyword)), (r'(!\n)(\].*)(! !)$', bygroups(Keyword, Text, Keyword)), (r'! !$', Keyword), ], } class NewspeakLexer(RegexLexer): """ For `Newspeak <http://newspeaklanguage.org/>` syntax. .. versionadded:: 1.1 """ name = 'Newspeak' filenames = ['*.ns2'] aliases = ['newspeak', ] mimetypes = ['text/x-newspeak'] tokens = { 'root': [ (r'\b(Newsqueak2)\b', Keyword.Declaration), (r"'[^']*'", String), (r'\b(class)(\s+)(\w+)(\s*)', bygroups(Keyword.Declaration, Text, Name.Class, Text)), (r'\b(mixin|self|super|private|public|protected|nil|true|false)\b', Keyword), (r'(\w+\:)(\s*)([a-zA-Z_]\w+)', bygroups(Name.Function, Text, Name.Variable)), (r'(\w+)(\s*)(=)', bygroups(Name.Attribute, Text, Operator)), (r'<\w+>', Comment.Special), include('expressionstat'), include('whitespace') ], 'expressionstat': [ (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), (r'\d+', Number.Integer), (r':\w+', Name.Variable), (r'(\w+)(::)', bygroups(Name.Variable, Operator)), (r'\w+:', Name.Function), (r'\w+', Name.Variable), (r'\(|\)', Punctuation), (r'\[|\]', Punctuation), (r'\{|\}', Punctuation), (r'(\^|\+|\/|~|\*|<|>|=|@|%|\||&|\?|!|,|-|:)', Operator), (r'\.|;', Punctuation), include('whitespace'), include('literals'), ], 'literals': [ (r'\$.', String), (r"'[^']*'", String), (r"#'[^']*'", String.Symbol), (r"#\w+:?", String.Symbol), (r"#(\+|\/|~|\*|<|>|=|@|%|\||&|\?|!|,|-)+", String.Symbol) ], 'whitespace': [ (r'\s+', Text), (r'"[^"]*"', Comment) ], }
gpl-3.0
tequa/ammisoft
ammimain/WinPython-64bit-2.7.13.1Zero/python-2.7.13.amd64/Lib/site-packages/pygments/formatters/svg.py
31
5840
# -*- coding: utf-8 -*- """ pygments.formatters.svg ~~~~~~~~~~~~~~~~~~~~~~~ Formatter for SVG output. :copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.formatter import Formatter from pygments.util import get_bool_opt, get_int_opt __all__ = ['SvgFormatter'] def escape_html(text): """Escape &, <, > as well as single and double quotes for HTML.""" return text.replace('&', '&amp;'). \ replace('<', '&lt;'). \ replace('>', '&gt;'). \ replace('"', '&quot;'). \ replace("'", '&#39;') class2style = {} class SvgFormatter(Formatter): """ Format tokens as an SVG graphics file. This formatter is still experimental. Each line of code is a ``<text>`` element with explicit ``x`` and ``y`` coordinates containing ``<tspan>`` elements with the individual token styles. By default, this formatter outputs a full SVG document including doctype declaration and the ``<svg>`` root element. .. versionadded:: 0.9 Additional options accepted: `nowrap` Don't wrap the SVG ``<text>`` elements in ``<svg><g>`` elements and don't add a XML declaration and a doctype. If true, the `fontfamily` and `fontsize` options are ignored. Defaults to ``False``. `fontfamily` The value to give the wrapping ``<g>`` element's ``font-family`` attribute, defaults to ``"monospace"``. `fontsize` The value to give the wrapping ``<g>`` element's ``font-size`` attribute, defaults to ``"14px"``. `xoffset` Starting offset in X direction, defaults to ``0``. `yoffset` Starting offset in Y direction, defaults to the font size if it is given in pixels, or ``20`` else. (This is necessary since text coordinates refer to the text baseline, not the top edge.) `ystep` Offset to add to the Y coordinate for each subsequent line. This should roughly be the text size plus 5. It defaults to that value if the text size is given in pixels, or ``25`` else. `spacehack` Convert spaces in the source to ``&#160;``, which are non-breaking spaces. SVG provides the ``xml:space`` attribute to control how whitespace inside tags is handled, in theory, the ``preserve`` value could be used to keep all whitespace as-is. However, many current SVG viewers don't obey that rule, so this option is provided as a workaround and defaults to ``True``. """ name = 'SVG' aliases = ['svg'] filenames = ['*.svg'] def __init__(self, **options): Formatter.__init__(self, **options) self.nowrap = get_bool_opt(options, 'nowrap', False) self.fontfamily = options.get('fontfamily', 'monospace') self.fontsize = options.get('fontsize', '14px') self.xoffset = get_int_opt(options, 'xoffset', 0) fs = self.fontsize.strip() if fs.endswith('px'): fs = fs[:-2].strip() try: int_fs = int(fs) except: int_fs = 20 self.yoffset = get_int_opt(options, 'yoffset', int_fs) self.ystep = get_int_opt(options, 'ystep', int_fs + 5) self.spacehack = get_bool_opt(options, 'spacehack', True) self._stylecache = {} def format_unencoded(self, tokensource, outfile): """ Format ``tokensource``, an iterable of ``(tokentype, tokenstring)`` tuples and write it into ``outfile``. For our implementation we put all lines in their own 'line group'. """ x = self.xoffset y = self.yoffset if not self.nowrap: if self.encoding: outfile.write('<?xml version="1.0" encoding="%s"?>\n' % self.encoding) else: outfile.write('<?xml version="1.0"?>\n') outfile.write('<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN" ' '"http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/' 'svg10.dtd">\n') outfile.write('<svg xmlns="http://www.w3.org/2000/svg">\n') outfile.write('<g font-family="%s" font-size="%s">\n' % (self.fontfamily, self.fontsize)) outfile.write('<text x="%s" y="%s" xml:space="preserve">' % (x, y)) for ttype, value in tokensource: style = self._get_style(ttype) tspan = style and '<tspan' + style + '>' or '' tspanend = tspan and '</tspan>' or '' value = escape_html(value) if self.spacehack: value = value.expandtabs().replace(' ', '&#160;') parts = value.split('\n') for part in parts[:-1]: outfile.write(tspan + part + tspanend) y += self.ystep outfile.write('</text>\n<text x="%s" y="%s" ' 'xml:space="preserve">' % (x, y)) outfile.write(tspan + parts[-1] + tspanend) outfile.write('</text>') if not self.nowrap: outfile.write('</g></svg>\n') def _get_style(self, tokentype): if tokentype in self._stylecache: return self._stylecache[tokentype] otokentype = tokentype while not self.style.styles_token(tokentype): tokentype = tokentype.parent value = self.style.style_for_token(tokentype) result = '' if value['color']: result = ' fill="#' + value['color'] + '"' if value['bold']: result += ' font-weight="bold"' if value['italic']: result += ' font-style="italic"' self._stylecache[otokentype] = result return result
bsd-3-clause
kenshay/ImageScripter
ProgramData/SystemFiles/Python/Lib/site-packages/spyderlib/utils/external/rope/base/fscommands.py
29
7524
"""Project file system commands. This modules implements file system operations used by rope. Different version control systems can be supported by implementing the interface provided by `FileSystemCommands` class. See `SubversionCommands` and `MercurialCommands` for example. """ import os, re import shutil import subprocess def create_fscommands(root): dirlist = os.listdir(root) commands = {'.hg': MercurialCommands, '.svn': SubversionCommands, '.git': GITCommands, '_svn': SubversionCommands, '_darcs': DarcsCommands} for key in commands: if key in dirlist: try: return commands[key](root) except (ImportError, OSError): pass return FileSystemCommands() class FileSystemCommands(object): def create_file(self, path): open(path, 'w').close() def create_folder(self, path): os.mkdir(path) def move(self, path, new_location): shutil.move(path, new_location) def remove(self, path): if os.path.isfile(path): os.remove(path) else: shutil.rmtree(path) def write(self, path, data): file_ = open(path, 'wb') try: file_.write(data) finally: file_.close() class SubversionCommands(object): def __init__(self, *args): self.normal_actions = FileSystemCommands() import pysvn self.client = pysvn.Client() def create_file(self, path): self.normal_actions.create_file(path) self.client.add(path, force=True) def create_folder(self, path): self.normal_actions.create_folder(path) self.client.add(path, force=True) def move(self, path, new_location): self.client.move(path, new_location, force=True) def remove(self, path): self.client.remove(path, force=True) def write(self, path, data): self.normal_actions.write(path, data) class MercurialCommands(object): def __init__(self, root): self.hg = self._import_mercurial() self.normal_actions = FileSystemCommands() try: self.ui = self.hg.ui.ui( verbose=False, debug=False, quiet=True, interactive=False, traceback=False, report_untrusted=False) except: self.ui = self.hg.ui.ui() self.ui.setconfig('ui', 'interactive', 'no') self.ui.setconfig('ui', 'debug', 'no') self.ui.setconfig('ui', 'traceback', 'no') self.ui.setconfig('ui', 'verbose', 'no') self.ui.setconfig('ui', 'report_untrusted', 'no') self.ui.setconfig('ui', 'quiet', 'yes') self.repo = self.hg.hg.repository(self.ui, root) def _import_mercurial(self): import mercurial.commands import mercurial.hg import mercurial.ui return mercurial def create_file(self, path): self.normal_actions.create_file(path) self.hg.commands.add(self.ui, self.repo, path) def create_folder(self, path): self.normal_actions.create_folder(path) def move(self, path, new_location): self.hg.commands.rename(self.ui, self.repo, path, new_location, after=False) def remove(self, path): self.hg.commands.remove(self.ui, self.repo, path) def write(self, path, data): self.normal_actions.write(path, data) class GITCommands(object): def __init__(self, root): self.root = root self._do(['version']) self.normal_actions = FileSystemCommands() def create_file(self, path): self.normal_actions.create_file(path) self._do(['add', self._in_dir(path)]) def create_folder(self, path): self.normal_actions.create_folder(path) def move(self, path, new_location): self._do(['mv', self._in_dir(path), self._in_dir(new_location)]) def remove(self, path): self._do(['rm', self._in_dir(path)]) def write(self, path, data): # XXX: should we use ``git add``? self.normal_actions.write(path, data) def _do(self, args): _execute(['git'] + args, cwd=self.root) def _in_dir(self, path): if path.startswith(self.root): return path[len(self.root) + 1:] return self.root class DarcsCommands(object): def __init__(self, root): self.root = root self.normal_actions = FileSystemCommands() def create_file(self, path): self.normal_actions.create_file(path) self._do(['add', path]) def create_folder(self, path): self.normal_actions.create_folder(path) self._do(['add', path]) def move(self, path, new_location): self._do(['mv', path, new_location]) def remove(self, path): self.normal_actions.remove(path) def write(self, path, data): self.normal_actions.write(path, data) def _do(self, args): _execute(['darcs'] + args, cwd=self.root) def _execute(args, cwd=None): process = subprocess.Popen(args, cwd=cwd, stdout=subprocess.PIPE) process.wait() return process.returncode def unicode_to_file_data(contents, encoding=None): if not isinstance(contents, str): return contents if encoding is None: encoding = read_str_coding(contents) if encoding is not None: return contents.encode(encoding) try: return contents.encode() except UnicodeEncodeError: return contents.encode('utf-8') def file_data_to_unicode(data, encoding=None): result = _decode_data(data, encoding) if '\r' in result: result = result.replace('\r\n', '\n').replace('\r', '\n') return result def _decode_data(data, encoding): if isinstance(data, str): return data if encoding is None: encoding = read_str_coding(data) if encoding is None: # there is no encoding tip, we need to guess. # PEP263 says that "encoding not explicitly defined" means it is ascii, # but we will use utf8 instead since utf8 fully covers ascii and btw is # the only non-latin sane encoding. encoding = 'utf-8' try: return data.decode(encoding) except (UnicodeError, LookupError): # fallback to utf-8: it should never fail return data.decode('utf-8') def read_file_coding(path): file = open(path, 'b') count = 0 result = [] buffsize = 10 while True: current = file.read(10) if not current: break count += current.count('\n') result.append(current) file.close() return _find_coding(''.join(result)) def read_str_coding(source): if not isinstance(source, str): source = source.decode("utf-8", "ignore") #TODO: change it to precompiled version mex = re.search("\-\*\-\s+coding:\s+(.*?)\s+\-\*\-", source) if mex: return mex.group(1) return "utf-8" def _find_coding(text): coding = 'coding' try: start = text.index(coding) + len(coding) if text[start] not in '=:': return start += 1 while start < len(text) and text[start].isspace(): start += 1 end = start while end < len(text): c = text[end] if not c.isalnum() and c not in '-_': break end += 1 return text[start:end] except ValueError: pass
gpl-3.0
AnishShah/tensorflow
tensorflow/contrib/tpu/python/tpu/tpu_context.py
1
26214
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # =================================================================== """TPU system metadata and associated tooling.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from contextlib import contextmanager import copy from tensorflow.contrib.tpu.python.tpu import device_assignment as tpu_device_assignment from tensorflow.contrib.tpu.python.tpu import tpu_config from tensorflow.contrib.tpu.python.tpu import tpu_system_metadata as tpu_system_metadata_lib from tensorflow.python.estimator import model_fn as model_fn_lib from tensorflow.python.platform import tf_logging as logging _DEFAULT_JOB_NAME = 'tpu_worker' _DEFAULT_COORDINATOR_JOB_NAME = 'coordinator' _LOCAL_MASTERS = ('', 'local') _NUM_CORES_TO_COMPUTATION_SHAPE = { 1: [1, 1, 1], 2: [1, 1, 2], 4: [1, 2, 2], 8: [2, 2, 2] } class TPUContext(object): """The context of current input_fn invocation.""" def __init__(self, internal_ctx, input_device=None, invocation_index=None, call_from_input_fn=True): self._internal_ctx = internal_ctx self._input_device = input_device self._invocation_index = invocation_index self._call_from_input_fn = call_from_input_fn def current_input_fn_deployment(self): """The configuration of the current input_fn invocation. The configuration depends on `TPUConfig.per_host_input_for_training`. See `TPUConfig` for details. Only set in params dict of input_fn Returns: A tuple of 1. Device spec string: String, is the current CPU host where the input_fn is invoked. 2. Current invocation index: Int, 0-based index of the input_fn invocation. See next item for details. 3. Total invocation count: Int, the total number of times to invoke the input_fn on all CPU hosts. Each invocation will be passed with a new `TPUContext` instance with current invocation index set properly. 4. Total number of replicas consumed by current_invocation: Int, the number of replicas fed by the data returned by current input_fn. For example, for per_core input pipeline deployment and non-model-parallelism, total invocation count is equal to the number of cores in the system and num replicas consumed by current invocation is 1. For per-host v2 input pipeline deployment, total invocation count is equal to the number of hosts in the system and num replicas consumed by current invocation is equal to number of cores per host. Raises: RuntimeError: If this method must not be called from input_fn. """ if not self._call_from_input_fn: raise RuntimeError('This TPUContext instance must not be called from' ' model_fn.') if self._internal_ctx.is_input_sharded_per_core(): total_invocation_count = (self._internal_ctx.num_hosts * self._internal_ctx.num_of_replicas_per_host) replicas_consumed = 1 elif self._internal_ctx.is_input_broadcast_with_iterators(): total_invocation_count = 1 replicas_consumed = self._internal_ctx.num_replicas else: total_invocation_count = self._internal_ctx.num_hosts replicas_consumed = self._internal_ctx.num_of_replicas_per_host return (self._input_device, self._invocation_index, total_invocation_count, replicas_consumed) @property def num_replicas(self): """The total number of replicas. For non-model-parallelism, num_replicas should be the total num of TPU cores in the system. Returns: The number of replicas. """ return self._internal_ctx.num_replicas @property def num_hosts(self): """The number of hosts for the TPU system.""" return self._internal_ctx.num_hosts @property def num_of_replicas_per_host(self): """The number of replicas for each host.""" if self._internal_ctx.model_parallelism_enabled: raise ValueError( 'num_of_replicas_per_host is not supported for model_parallelism') return self._internal_ctx.num_of_replicas_per_host @property def device_assignment(self): """Returns device_assignment object.""" if self._call_from_input_fn: raise RuntimeError('This TPUContext instance must not be called from' ' input_fn.') return self._internal_ctx.device_assignment def device_for_replica(self, replica_id): """Returns the tuple of (CPU device and device ordinal) for replica. This should be used for full replicate for non-model-parallelism. Args: replica_id: Int, the replica index. Returns: A tuple of device spec for CPU device and int device ordinal. """ # Note that: For the non-model parallelism, the mapping could be # a random permutation. The order should not matter in most cases # as far as model is replicated to all cores in the system. return self._internal_ctx.device_for_replica(replica_id) class _InternalTPUContext(object): """A context holds immutable states of TPU computation. This immutable object holds TPUEstimator config, train/eval batch size, and `TPUEstimator.use_tpu`, which is expected to be passed around. It also provides utility functions, based on the current state, to determine other information commonly required by TPU computation, such as TPU device names, TPU hosts, shard batch size, etc. if eval_on_tpu is False, then execution of eval on TPU is disabled. if eval_on_tpu is True, but use_tpu is False, a warning is issued, and TPU execution is disabled for all modes. N.B. As `mode` is not immutable state in Estimator, but essential to distinguish between TPU training and evaluation, a common usage for _InternalTPUContext with `mode` is as follows: ``` with _ctx.with_mode(mode) as ctx: if ctx.is_running_on_cpu(): ... ``` """ def __init__(self, config, train_batch_size, eval_batch_size, predict_batch_size, use_tpu, eval_on_tpu=True): self._config = config self._train_batch_size = train_batch_size self._eval_batch_size = eval_batch_size self._predict_batch_size = predict_batch_size self._use_tpu = use_tpu logging.info('_TPUContext: eval_on_tpu %s', eval_on_tpu) if not use_tpu and eval_on_tpu: logging.warning('eval_on_tpu ignored because use_tpu is False.') self._eval_on_tpu = eval_on_tpu self._model_parallelism_enabled = ( use_tpu and config.tpu_config.num_cores_per_replica) self._mode = None num_cores_per_replica = config.tpu_config.num_cores_per_replica if num_cores_per_replica: self._computation_shape = _NUM_CORES_TO_COMPUTATION_SHAPE[ num_cores_per_replica] else: self._computation_shape = None self._lazy_tpu_system_metadata_dict = {} # key by master address self._lazy_device_assignment_dict = {} # key by master address self._lazy_validation_dict = {} # key by ModeKeys def _assert_mode(self): if self._mode is None: raise RuntimeError( '`mode` needs to be set via contextmanager `with_mode`.') return self._mode @contextmanager def with_mode(self, mode): # NOTE(xiejw): Shallow copy is enough. It will share he lazy dictionaries, # such as _lazy_tpu_system_metadata_dict between new copy and the original # one. Note that all lazy states stored in properties _lazy_foo are sort of # immutable as they should be same for the process lifetime. new_ctx = copy.copy(self) new_ctx._mode = mode # pylint: disable=protected-access yield new_ctx @property def mode(self): return self._assert_mode() def _get_master_address(self): mode = self._assert_mode() config = self._config master = ( config.master if mode != model_fn_lib.ModeKeys.EVAL else config.evaluation_master) return master def _get_tpu_system_metadata(self): """Gets the (maybe cached) TPU system metadata.""" master = self._get_master_address() tpu_system_metadata = self._lazy_tpu_system_metadata_dict.get(master) if tpu_system_metadata is not None: return tpu_system_metadata cluster_def = None if (self._config.session_config and self._config.session_config.cluster_def.job): cluster_def = self._config.session_config.cluster_def # pylint: disable=protected-access tpu_system_metadata = ( tpu_system_metadata_lib._query_tpu_system_metadata( master, cluster_def=cluster_def, query_topology=self.model_parallelism_enabled)) self._lazy_tpu_system_metadata_dict[master] = tpu_system_metadata return tpu_system_metadata def _get_device_assignment(self): """Gets the (maybe cached) TPU device assignment.""" master = self._get_master_address() device_assignment = self._lazy_device_assignment_dict.get(master) if device_assignment is not None: return device_assignment tpu_system_metadata = self._get_tpu_system_metadata() device_assignment = tpu_device_assignment.device_assignment( tpu_system_metadata.topology, computation_shape=self._computation_shape, num_replicas=self.num_replicas) logging.info('num_cores_per_replica: %s', str(self._config.tpu_config.num_cores_per_replica)) logging.info('computation_shape: %s', str(self._computation_shape)) logging.info('num_replicas: %d', self.num_replicas) logging.info('device_assignment.topology.device_coordinates: %s', str(device_assignment.topology.device_coordinates)) logging.info('device_assignment.core_assignment: %s', str(device_assignment.core_assignment)) self._lazy_device_assignment_dict[master] = device_assignment return device_assignment @property def model_parallelism_enabled(self): return self._model_parallelism_enabled @property def input_partition_dims(self): return self._config.tpu_config.input_partition_dims @property def device_assignment(self): return (self._get_device_assignment() if self._model_parallelism_enabled else None) @property def num_of_cores_per_host(self): metadata = self._get_tpu_system_metadata() return metadata.num_of_cores_per_host @property def num_cores(self): metadata = self._get_tpu_system_metadata() return metadata.num_cores @property def num_of_replicas_per_host(self): if self.model_parallelism_enabled: return self.num_replicas // self.num_hosts else: return self.num_of_cores_per_host @property def num_replicas(self): num_cores_in_system = self.num_cores if self.model_parallelism_enabled: num_cores_per_replica = self._config.tpu_config.num_cores_per_replica if num_cores_per_replica > num_cores_in_system: raise ValueError( 'The num of cores required by the model parallelism, specified by ' 'TPUConfig.num_cores_per_replica, is larger than the total num of ' 'TPU cores in the system. num_cores_per_replica: {}, num cores ' 'in the system: {}'.format(num_cores_per_replica, num_cores_in_system)) if num_cores_in_system % num_cores_per_replica != 0: raise RuntimeError( 'The num of cores in the system ({}) is not divisible by the num ' 'of cores ({}) required by the model parallelism, specified by ' 'TPUConfig.num_cores_per_replica. This should never happen!'.format( num_cores_in_system, num_cores_per_replica)) return num_cores_in_system // num_cores_per_replica else: return num_cores_in_system @property def num_hosts(self): metadata = self._get_tpu_system_metadata() return metadata.num_hosts @property def config(self): return self._config def is_input_sharded_per_core(self): """Return true if input_fn is invoked per-core (other than per-host).""" mode = self._assert_mode() return (mode == model_fn_lib.ModeKeys.TRAIN and (self._config.tpu_config.per_host_input_for_training is tpu_config.InputPipelineConfig.PER_SHARD_V1)) def is_input_per_host_with_iterators(self): """Return true if input_fn should be run in the per-host v2 config.""" return (self._config.tpu_config.per_host_input_for_training is tpu_config.InputPipelineConfig.PER_HOST_V2) def is_input_broadcast_with_iterators(self): """Return true if input_fn should be run in the full_replicae config.""" return (self._config.tpu_config.per_host_input_for_training is tpu_config.InputPipelineConfig.BROADCAST) def is_running_on_cpu(self, is_export_mode=False): """Determines whether the input_fn and model_fn should be invoked on CPU. This API also validates user provided configuration, such as batch size, according the lazy initialized TPU system metadata. Args: is_export_mode: Indicates whether the current mode is for exporting the model, when mode == PREDICT. Only with this bool, we could tell whether user is calling the Estimator.predict or Estimator.export_savedmodel, which are running on TPU and CPU respectively. Parent class Estimator does not distinguish these two. Returns: bool, whether current input_fn or model_fn should be running on CPU. Raises: ValueError: any configuration is invalid. """ is_running_on_cpu = self._is_running_on_cpu(is_export_mode) if not is_running_on_cpu: self._validate_tpu_configuration() return is_running_on_cpu def _is_running_on_cpu(self, is_export_mode): """Determines whether the input_fn and model_fn should be invoked on CPU.""" mode = self._assert_mode() if not self._use_tpu: return True if mode == model_fn_lib.ModeKeys.EVAL and not self._eval_on_tpu: logging.info('_is_running_on_cpu: eval_on_tpu disabled') return True if is_export_mode: return True return False @property def global_batch_size(self): mode = self._assert_mode() if mode == model_fn_lib.ModeKeys.TRAIN: return self._train_batch_size elif mode == model_fn_lib.ModeKeys.EVAL: return self._eval_batch_size elif mode == model_fn_lib.ModeKeys.PREDICT: return self._predict_batch_size else: return None @property def batch_size_for_input_fn(self): """Returns the shard batch size for `input_fn`.""" global_batch_size = self.global_batch_size if (self.is_running_on_cpu() or self.is_input_broadcast_with_iterators()): return global_batch_size # On TPU if self.is_input_sharded_per_core() or ( self.is_input_per_host_with_iterators()): return global_batch_size // self.num_replicas else: return global_batch_size // self.num_hosts @property def batch_size_for_model_fn(self): """Returns the shard batch size for `model_fn`.""" global_batch_size = self.global_batch_size if (self.is_running_on_cpu() or self.is_input_broadcast_with_iterators()): return global_batch_size # On TPU. always sharded per shard. return global_batch_size // self.num_replicas @property def master_job(self): """Returns the job name to use to place TPU computations on. Returns: A string containing the job name, or None if no job should be specified. Raises: ValueError: If the user needs to specify a tpu_job_name, because we are unable to infer the job name automatically, or if the user-specified job names are inappropriate. """ run_config = self._config # If the user specifies the tpu_job_name, use that. if run_config.tpu_config.tpu_job_name: return run_config.tpu_config.tpu_job_name # The tpu job is determined by the run_config. Right now, this method is # required as tpu_config is not part of the RunConfig. mode = self._assert_mode() master = ( run_config.evaluation_master if mode == model_fn_lib.ModeKeys.EVAL else run_config.master) if master in _LOCAL_MASTERS: return None if (not run_config.session_config or not run_config.session_config.cluster_def.job): return _DEFAULT_JOB_NAME cluster_def = run_config.session_config.cluster_def job_names = set([job.name for job in cluster_def.job]) if _DEFAULT_JOB_NAME in job_names: # b/37868888 tracks allowing ClusterSpec propagation to reuse job names. raise ValueError('Currently, tpu_worker is not an allowed job name.') if len(job_names) == 1: return cluster_def.job[0].name if len(job_names) == 2: if _DEFAULT_COORDINATOR_JOB_NAME in job_names: job_names.remove(_DEFAULT_COORDINATOR_JOB_NAME) return job_names.pop() # TODO(b/67716447): Include more sophisticated heuristics. raise ValueError( 'Could not infer TPU job name. Please specify a tpu_job_name as part ' 'of your TPUConfig.') @property def tpu_host_placement_function(self): """Returns the TPU host place function.""" master = self.master_job def _placement_function(_sentinal=None, replica_id=None, host_id=None): # pylint: disable=invalid-name """Return the host device given replica_id or host_id.""" assert _sentinal is None if replica_id is not None and host_id is not None: raise RuntimeError( 'replica_id and host_id can have only one non-None value.') if master is None: return '/replica:0/task:0/device:CPU:0' else: if replica_id is not None: if self.model_parallelism_enabled: return self.device_assignment.host_device( replica=replica_id, job=master) else: host_id = replica_id / self.num_of_cores_per_host return '/job:%s/task:%d/device:CPU:0' % (master, host_id) return _placement_function @property def tpu_device_placement_function(self): """Returns a TPU device placement Fn.""" master = self.master_job job_device = '' if master is None else ('/job:%s' % master) def _placement_function(i): if self.model_parallelism_enabled: return self.device_assignment.tpu_device(replica=i, job=master) else: num_of_cores_per_host = self.num_of_cores_per_host host_id = i / num_of_cores_per_host ordinal_id = i % num_of_cores_per_host return '%s/task:%d/device:TPU:%d' % (job_device, host_id, ordinal_id) return _placement_function def tpu_ordinal_function(self, host_id): """Returns the TPU ordinal fn.""" def _tpu_ordinal_function(shard_index_in_host): """Return the TPU ordinal associated with a shard. Required because the enqueue ops are placed on CPU. Args: shard_index_in_host: the shard index Returns: The ordinal of the TPU device the shard's infeed should be placed on. """ if self.model_parallelism_enabled: # We put both enqueue/dequeue ops at tpu.core(0) in each replica. replica = self.device_assignment.lookup_replicas( host_id, (0, 0, 0))[shard_index_in_host] return self.device_assignment.tpu_ordinal(replica=replica) else: return shard_index_in_host % self.num_of_cores_per_host return _tpu_ordinal_function def _validate_tpu_configuration(self): """Validates the configuration based on the TPU system metadata.""" mode = self._assert_mode() if self._lazy_validation_dict.get(mode): return # All following information is obtained from TPU system metadata. num_cores = self.num_cores num_replicas = self.num_replicas num_hosts = self.num_hosts if not num_cores: tpu_system_metadata = self._get_tpu_system_metadata() raise RuntimeError( 'Cannot find any TPU cores in the system. Please double check ' 'Tensorflow master address and TPU worker(s). Available devices ' 'are {}.'.format(tpu_system_metadata.devices)) if self._config.tpu_config.num_shards: user_provided_num_replicas = self._config.tpu_config.num_shards if user_provided_num_replicas != num_replicas: message = ( 'TPUConfig.num_shards is not set correctly. According to TPU ' 'system metadata for Tensorflow master ({}): num_replicas should ' 'be ({}), got ({}). For non-model-parallelism, num_replicas should ' 'be the total num of TPU cores in the system. For ' 'model-parallelism, the total number of TPU cores should be ' 'num_cores_per_replica * num_replicas. Please set it ' 'accordingly or leave it as `None`'.format( self._get_master_address(), num_replicas, user_provided_num_replicas)) raise ValueError(message) if mode == model_fn_lib.ModeKeys.TRAIN: if (self._train_batch_size % num_replicas != 0 and not self.is_input_broadcast_with_iterators()): raise ValueError( 'train batch size {} must be divisible by number of replicas {}' .format(self._train_batch_size, num_replicas)) elif mode == model_fn_lib.ModeKeys.EVAL: if self._eval_batch_size is None: raise ValueError( 'eval_batch_size in TPUEstimator constructor cannot be `None`' 'if .evaluate is running on TPU.') if (self._eval_batch_size % num_replicas != 0 and not self.is_input_broadcast_with_iterators()): raise ValueError( 'eval batch size {} must be divisible by number of replicas {}' .format(self._eval_batch_size, num_replicas)) if num_hosts > 1 and not self.is_input_broadcast_with_iterators(): raise ValueError( 'TPUEstimator.evaluate should be running on single TPU worker. ' 'got {}.'.format(num_hosts)) else: assert mode == model_fn_lib.ModeKeys.PREDICT if self._predict_batch_size is None: raise ValueError( 'predict_batch_size in TPUEstimator constructor should not be ' '`None` if .predict is running on TPU.') if (self._predict_batch_size % num_replicas != 0 and not self.is_input_broadcast_with_iterators()): raise ValueError( 'predict batch size {} must be divisible by number of replicas {}' .format(self._predict_batch_size, num_replicas)) if num_hosts > 1 and not self.is_input_broadcast_with_iterators(): raise ValueError( 'TPUEstimator.predict should be running on single TPU worker. ' 'got {}.'.format(num_hosts)) # Record the state "validated" into lazy dictionary. self._lazy_validation_dict[mode] = True def device_for_replica(self, replica_id): """Returns the tuple of (CPU device and device ordinal) for replica. This should be used for full replicate for non-model-parallelism. Args: replica_id: Int, the replica index. Returns: A tuple of device spec for CPU device and int device ordinal. """ master = self.master_job if self.model_parallelism_enabled: return (self.device_assignment.host_device( replica=replica_id, job=master), self.device_assignment.tpu_ordinal(replica=replica_id)) job_device = '' if master is None else ('/job:%s' % master) num_of_replicas_per_host = self.num_of_replicas_per_host host_id = replica_id / num_of_replicas_per_host ordinal_id = replica_id % num_of_replicas_per_host host_device = '%s/task:%d/device:CPU:0' % (job_device, host_id) return (host_device, ordinal_id) class _OneCoreTPUContext(_InternalTPUContext): """Special _InternalTPUContext for one core usage.""" def __init__(self, config, train_batch_size, eval_batch_size, predict_batch_size, use_tpu): super(_OneCoreTPUContext, self).__init__( config, train_batch_size, eval_batch_size, predict_batch_size, use_tpu) def _get_tpu_system_metadata(self): """Gets the (maybe cached) TPU system metadata.""" master = self._get_master_address() tpu_system_metadata = self._lazy_tpu_system_metadata_dict.get(master) if tpu_system_metadata is not None: return tpu_system_metadata tpu_system_metadata = ( tpu_system_metadata_lib._TPUSystemMetadata( # pylint: disable=protected-access num_cores=1, num_hosts=1, num_of_cores_per_host=1, topology=None, devices=[])) self._lazy_tpu_system_metadata_dict[master] = tpu_system_metadata return tpu_system_metadata def _get_tpu_context(config, train_batch_size, eval_batch_size, predict_batch_size, use_tpu, eval_on_tpu): """Returns an instance of `_InternalTPUContext`.""" if (config.tpu_config.num_shards == 1 and config.tpu_config.num_cores_per_replica is None): logging.warning( 'Setting TPUConfig.num_shards==1 is an unsupported behavior. ' 'Please fix as soon as possible (leaving num_shards as None.') return _OneCoreTPUContext(config, train_batch_size, eval_batch_size, predict_batch_size, use_tpu) return _InternalTPUContext(config, train_batch_size, eval_batch_size, predict_batch_size, use_tpu, eval_on_tpu)
apache-2.0
kenshay/ImageScript
ProgramData/Android/ADB/platform-tools/systrace/catapult/telemetry/third_party/web-page-replay/third_party/dns/rdataset.py
215
11527
# Copyright (C) 2001-2007, 2009, 2010 Nominum, Inc. # # Permission to use, copy, modify, and distribute this software and its # documentation for any purpose with or without fee is hereby granted, # provided that the above copyright notice and this permission notice # appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. """DNS rdatasets (an rdataset is a set of rdatas of a given type and class)""" import random import StringIO import struct import dns.exception import dns.rdatatype import dns.rdataclass import dns.rdata import dns.set # define SimpleSet here for backwards compatibility SimpleSet = dns.set.Set class DifferingCovers(dns.exception.DNSException): """Raised if an attempt is made to add a SIG/RRSIG whose covered type is not the same as that of the other rdatas in the rdataset.""" pass class IncompatibleTypes(dns.exception.DNSException): """Raised if an attempt is made to add rdata of an incompatible type.""" pass class Rdataset(dns.set.Set): """A DNS rdataset. @ivar rdclass: The class of the rdataset @type rdclass: int @ivar rdtype: The type of the rdataset @type rdtype: int @ivar covers: The covered type. Usually this value is dns.rdatatype.NONE, but if the rdtype is dns.rdatatype.SIG or dns.rdatatype.RRSIG, then the covers value will be the rdata type the SIG/RRSIG covers. The library treats the SIG and RRSIG types as if they were a family of types, e.g. RRSIG(A), RRSIG(NS), RRSIG(SOA). This makes RRSIGs much easier to work with than if RRSIGs covering different rdata types were aggregated into a single RRSIG rdataset. @type covers: int @ivar ttl: The DNS TTL (Time To Live) value @type ttl: int """ __slots__ = ['rdclass', 'rdtype', 'covers', 'ttl'] def __init__(self, rdclass, rdtype, covers=dns.rdatatype.NONE): """Create a new rdataset of the specified class and type. @see: the description of the class instance variables for the meaning of I{rdclass} and I{rdtype}""" super(Rdataset, self).__init__() self.rdclass = rdclass self.rdtype = rdtype self.covers = covers self.ttl = 0 def _clone(self): obj = super(Rdataset, self)._clone() obj.rdclass = self.rdclass obj.rdtype = self.rdtype obj.covers = self.covers obj.ttl = self.ttl return obj def update_ttl(self, ttl): """Set the TTL of the rdataset to be the lesser of the set's current TTL or the specified TTL. If the set contains no rdatas, set the TTL to the specified TTL. @param ttl: The TTL @type ttl: int""" if len(self) == 0: self.ttl = ttl elif ttl < self.ttl: self.ttl = ttl def add(self, rd, ttl=None): """Add the specified rdata to the rdataset. If the optional I{ttl} parameter is supplied, then self.update_ttl(ttl) will be called prior to adding the rdata. @param rd: The rdata @type rd: dns.rdata.Rdata object @param ttl: The TTL @type ttl: int""" # # If we're adding a signature, do some special handling to # check that the signature covers the same type as the # other rdatas in this rdataset. If this is the first rdata # in the set, initialize the covers field. # if self.rdclass != rd.rdclass or self.rdtype != rd.rdtype: raise IncompatibleTypes if not ttl is None: self.update_ttl(ttl) if self.rdtype == dns.rdatatype.RRSIG or \ self.rdtype == dns.rdatatype.SIG: covers = rd.covers() if len(self) == 0 and self.covers == dns.rdatatype.NONE: self.covers = covers elif self.covers != covers: raise DifferingCovers if dns.rdatatype.is_singleton(rd.rdtype) and len(self) > 0: self.clear() super(Rdataset, self).add(rd) def union_update(self, other): self.update_ttl(other.ttl) super(Rdataset, self).union_update(other) def intersection_update(self, other): self.update_ttl(other.ttl) super(Rdataset, self).intersection_update(other) def update(self, other): """Add all rdatas in other to self. @param other: The rdataset from which to update @type other: dns.rdataset.Rdataset object""" self.update_ttl(other.ttl) super(Rdataset, self).update(other) def __repr__(self): if self.covers == 0: ctext = '' else: ctext = '(' + dns.rdatatype.to_text(self.covers) + ')' return '<DNS ' + dns.rdataclass.to_text(self.rdclass) + ' ' + \ dns.rdatatype.to_text(self.rdtype) + ctext + ' rdataset>' def __str__(self): return self.to_text() def __eq__(self, other): """Two rdatasets are equal if they have the same class, type, and covers, and contain the same rdata. @rtype: bool""" if not isinstance(other, Rdataset): return False if self.rdclass != other.rdclass or \ self.rdtype != other.rdtype or \ self.covers != other.covers: return False return super(Rdataset, self).__eq__(other) def __ne__(self, other): return not self.__eq__(other) def to_text(self, name=None, origin=None, relativize=True, override_rdclass=None, **kw): """Convert the rdataset into DNS master file format. @see: L{dns.name.Name.choose_relativity} for more information on how I{origin} and I{relativize} determine the way names are emitted. Any additional keyword arguments are passed on to the rdata to_text() method. @param name: If name is not None, emit a RRs with I{name} as the owner name. @type name: dns.name.Name object @param origin: The origin for relative names, or None. @type origin: dns.name.Name object @param relativize: True if names should names be relativized @type relativize: bool""" if not name is None: name = name.choose_relativity(origin, relativize) ntext = str(name) pad = ' ' else: ntext = '' pad = '' s = StringIO.StringIO() if not override_rdclass is None: rdclass = override_rdclass else: rdclass = self.rdclass if len(self) == 0: # # Empty rdatasets are used for the question section, and in # some dynamic updates, so we don't need to print out the TTL # (which is meaningless anyway). # print >> s, '%s%s%s %s' % (ntext, pad, dns.rdataclass.to_text(rdclass), dns.rdatatype.to_text(self.rdtype)) else: for rd in self: print >> s, '%s%s%d %s %s %s' % \ (ntext, pad, self.ttl, dns.rdataclass.to_text(rdclass), dns.rdatatype.to_text(self.rdtype), rd.to_text(origin=origin, relativize=relativize, **kw)) # # We strip off the final \n for the caller's convenience in printing # return s.getvalue()[:-1] def to_wire(self, name, file, compress=None, origin=None, override_rdclass=None, want_shuffle=True): """Convert the rdataset to wire format. @param name: The owner name of the RRset that will be emitted @type name: dns.name.Name object @param file: The file to which the wire format data will be appended @type file: file @param compress: The compression table to use; the default is None. @type compress: dict @param origin: The origin to be appended to any relative names when they are emitted. The default is None. @returns: the number of records emitted @rtype: int """ if not override_rdclass is None: rdclass = override_rdclass want_shuffle = False else: rdclass = self.rdclass file.seek(0, 2) if len(self) == 0: name.to_wire(file, compress, origin) stuff = struct.pack("!HHIH", self.rdtype, rdclass, 0, 0) file.write(stuff) return 1 else: if want_shuffle: l = list(self) random.shuffle(l) else: l = self for rd in l: name.to_wire(file, compress, origin) stuff = struct.pack("!HHIH", self.rdtype, rdclass, self.ttl, 0) file.write(stuff) start = file.tell() rd.to_wire(file, compress, origin) end = file.tell() assert end - start < 65536 file.seek(start - 2) stuff = struct.pack("!H", end - start) file.write(stuff) file.seek(0, 2) return len(self) def match(self, rdclass, rdtype, covers): """Returns True if this rdataset matches the specified class, type, and covers""" if self.rdclass == rdclass and \ self.rdtype == rdtype and \ self.covers == covers: return True return False def from_text_list(rdclass, rdtype, ttl, text_rdatas): """Create an rdataset with the specified class, type, and TTL, and with the specified list of rdatas in text format. @rtype: dns.rdataset.Rdataset object """ if isinstance(rdclass, str): rdclass = dns.rdataclass.from_text(rdclass) if isinstance(rdtype, str): rdtype = dns.rdatatype.from_text(rdtype) r = Rdataset(rdclass, rdtype) r.update_ttl(ttl) for t in text_rdatas: rd = dns.rdata.from_text(r.rdclass, r.rdtype, t) r.add(rd) return r def from_text(rdclass, rdtype, ttl, *text_rdatas): """Create an rdataset with the specified class, type, and TTL, and with the specified rdatas in text format. @rtype: dns.rdataset.Rdataset object """ return from_text_list(rdclass, rdtype, ttl, text_rdatas) def from_rdata_list(ttl, rdatas): """Create an rdataset with the specified TTL, and with the specified list of rdata objects. @rtype: dns.rdataset.Rdataset object """ if len(rdatas) == 0: raise ValueError("rdata list must not be empty") r = None for rd in rdatas: if r is None: r = Rdataset(rd.rdclass, rd.rdtype) r.update_ttl(ttl) first_time = False r.add(rd) return r def from_rdata(ttl, *rdatas): """Create an rdataset with the specified TTL, and with the specified rdata objects. @rtype: dns.rdataset.Rdataset object """ return from_rdata_list(ttl, rdatas)
gpl-3.0
WillisXChen/django-oscar
oscar/lib/python2.7/site-packages/whoosh/analysis/tokenizers.py
93
12678
# Copyright 2007 Matt Chaput. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO # EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, # OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # The views and conclusions contained in the software and documentation are # those of the authors and should not be interpreted as representing official # policies, either expressed or implied, of Matt Chaput. from whoosh.compat import u, text_type from whoosh.analysis.acore import Composable, Token from whoosh.util.text import rcompile default_pattern = rcompile(r"\w+(\.?\w+)*") # Tokenizers class Tokenizer(Composable): """Base class for Tokenizers. """ def __eq__(self, other): return other and self.__class__ is other.__class__ class IDTokenizer(Tokenizer): """Yields the entire input string as a single token. For use in indexed but untokenized fields, such as a document's path. >>> idt = IDTokenizer() >>> [token.text for token in idt("/a/b 123 alpha")] ["/a/b 123 alpha"] """ def __call__(self, value, positions=False, chars=False, keeporiginal=False, removestops=True, start_pos=0, start_char=0, mode='', **kwargs): assert isinstance(value, text_type), "%r is not unicode" % value t = Token(positions, chars, removestops=removestops, mode=mode, **kwargs) t.text = value t.boost = 1.0 if keeporiginal: t.original = value if positions: t.pos = start_pos + 1 if chars: t.startchar = start_char t.endchar = start_char + len(value) yield t class RegexTokenizer(Tokenizer): """ Uses a regular expression to extract tokens from text. >>> rex = RegexTokenizer() >>> [token.text for token in rex(u("hi there 3.141 big-time under_score"))] ["hi", "there", "3.141", "big", "time", "under_score"] """ def __init__(self, expression=default_pattern, gaps=False): """ :param expression: A regular expression object or string. Each match of the expression equals a token. Group 0 (the entire matched text) is used as the text of the token. If you require more complicated handling of the expression match, simply write your own tokenizer. :param gaps: If True, the tokenizer *splits* on the expression, rather than matching on the expression. """ self.expression = rcompile(expression) self.gaps = gaps def __eq__(self, other): if self.__class__ is other.__class__: if self.expression.pattern == other.expression.pattern: return True return False def __call__(self, value, positions=False, chars=False, keeporiginal=False, removestops=True, start_pos=0, start_char=0, tokenize=True, mode='', **kwargs): """ :param value: The unicode string to tokenize. :param positions: Whether to record token positions in the token. :param chars: Whether to record character offsets in the token. :param start_pos: The position number of the first token. For example, if you set start_pos=2, the tokens will be numbered 2,3,4,... instead of 0,1,2,... :param start_char: The offset of the first character of the first token. For example, if you set start_char=2, the text "aaa bbb" will have chars (2,5),(6,9) instead (0,3),(4,7). :param tokenize: if True, the text should be tokenized. """ assert isinstance(value, text_type), "%s is not unicode" % repr(value) t = Token(positions, chars, removestops=removestops, mode=mode, **kwargs) if not tokenize: t.original = t.text = value t.boost = 1.0 if positions: t.pos = start_pos if chars: t.startchar = start_char t.endchar = start_char + len(value) yield t elif not self.gaps: # The default: expression matches are used as tokens for pos, match in enumerate(self.expression.finditer(value)): t.text = match.group(0) t.boost = 1.0 if keeporiginal: t.original = t.text t.stopped = False if positions: t.pos = start_pos + pos if chars: t.startchar = start_char + match.start() t.endchar = start_char + match.end() yield t else: # When gaps=True, iterate through the matches and # yield the text between them. prevend = 0 pos = start_pos for match in self.expression.finditer(value): start = prevend end = match.start() text = value[start:end] if text: t.text = text t.boost = 1.0 if keeporiginal: t.original = t.text t.stopped = False if positions: t.pos = pos pos += 1 if chars: t.startchar = start_char + start t.endchar = start_char + end yield t prevend = match.end() # If the last "gap" was before the end of the text, # yield the last bit of text as a final token. if prevend < len(value): t.text = value[prevend:] t.boost = 1.0 if keeporiginal: t.original = t.text t.stopped = False if positions: t.pos = pos if chars: t.startchar = prevend t.endchar = len(value) yield t class CharsetTokenizer(Tokenizer): """Tokenizes and translates text according to a character mapping object. Characters that map to None are considered token break characters. For all other characters the map is used to translate the character. This is useful for case and accent folding. This tokenizer loops character-by-character and so will likely be much slower than :class:`RegexTokenizer`. One way to get a character mapping object is to convert a Sphinx charset table file using :func:`whoosh.support.charset.charset_table_to_dict`. >>> from whoosh.support.charset import charset_table_to_dict >>> from whoosh.support.charset import default_charset >>> charmap = charset_table_to_dict(default_charset) >>> chtokenizer = CharsetTokenizer(charmap) >>> [t.text for t in chtokenizer(u'Stra\\xdfe ABC')] [u'strase', u'abc'] The Sphinx charset table format is described at http://www.sphinxsearch.com/docs/current.html#conf-charset-table. """ __inittype__ = dict(charmap=str) def __init__(self, charmap): """ :param charmap: a mapping from integer character numbers to unicode characters, as used by the unicode.translate() method. """ self.charmap = charmap def __eq__(self, other): return (other and self.__class__ is other.__class__ and self.charmap == other.charmap) def __call__(self, value, positions=False, chars=False, keeporiginal=False, removestops=True, start_pos=0, start_char=0, tokenize=True, mode='', **kwargs): """ :param value: The unicode string to tokenize. :param positions: Whether to record token positions in the token. :param chars: Whether to record character offsets in the token. :param start_pos: The position number of the first token. For example, if you set start_pos=2, the tokens will be numbered 2,3,4,... instead of 0,1,2,... :param start_char: The offset of the first character of the first token. For example, if you set start_char=2, the text "aaa bbb" will have chars (2,5),(6,9) instead (0,3),(4,7). :param tokenize: if True, the text should be tokenized. """ assert isinstance(value, text_type), "%r is not unicode" % value t = Token(positions, chars, removestops=removestops, mode=mode, **kwargs) if not tokenize: t.original = t.text = value t.boost = 1.0 if positions: t.pos = start_pos if chars: t.startchar = start_char t.endchar = start_char + len(value) yield t else: text = u("") charmap = self.charmap pos = start_pos startchar = currentchar = start_char for char in value: tchar = charmap[ord(char)] if tchar: text += tchar else: if currentchar > startchar: t.text = text t.boost = 1.0 if keeporiginal: t.original = t.text if positions: t.pos = pos pos += 1 if chars: t.startchar = startchar t.endchar = currentchar yield t startchar = currentchar + 1 text = u("") currentchar += 1 if currentchar > startchar: t.text = value[startchar:currentchar] t.boost = 1.0 if keeporiginal: t.original = t.text if positions: t.pos = pos if chars: t.startchar = startchar t.endchar = currentchar yield t def SpaceSeparatedTokenizer(): """Returns a RegexTokenizer that splits tokens by whitespace. >>> sst = SpaceSeparatedTokenizer() >>> [token.text for token in sst("hi there big-time, what's up")] ["hi", "there", "big-time,", "what's", "up"] """ return RegexTokenizer(r"[^ \t\r\n]+") def CommaSeparatedTokenizer(): """Splits tokens by commas. Note that the tokenizer calls unicode.strip() on each match of the regular expression. >>> cst = CommaSeparatedTokenizer() >>> [token.text for token in cst("hi there, what's , up")] ["hi there", "what's", "up"] """ from whoosh.analysis.filters import StripFilter return RegexTokenizer(r"[^,]+") | StripFilter() class PathTokenizer(Tokenizer): """A simple tokenizer that given a string ``"/a/b/c"`` yields tokens ``["/a", "/a/b", "/a/b/c"]``. """ def __init__(self, expression="[^/]+"): self.expr = rcompile(expression) def __call__(self, value, positions=False, start_pos=0, **kwargs): assert isinstance(value, text_type), "%r is not unicode" % value token = Token(positions, **kwargs) pos = start_pos for match in self.expr.finditer(value): token.text = value[:match.end()] if positions: token.pos = pos pos += 1 yield token
bsd-3-clause
atsolakid/edx-platform
common/lib/xmodule/xmodule/modulestore/tests/sample_courses.py
100
9544
# encoding: utf-8 """ The data type and use of it for declaratively creating test courses. """ # used to create course subtrees in ModuleStoreTestCase.create_test_course # adds to self properties w/ the given block_id which hold the UsageKey for easy retrieval. # fields is a dictionary of keys and values. sub_tree is a collection of BlockInfo from collections import namedtuple import datetime BlockInfo = namedtuple('BlockInfo', 'block_id, category, fields, sub_tree') # pylint: disable=invalid-name default_block_info_tree = [ # pylint: disable=invalid-name BlockInfo( 'chapter_x', 'chapter', {}, [ BlockInfo( 'sequential_x1', 'sequential', {}, [ BlockInfo( 'vertical_x1a', 'vertical', {}, [ BlockInfo('problem_x1a_1', 'problem', {}, []), BlockInfo('problem_x1a_2', 'problem', {}, []), BlockInfo('problem_x1a_3', 'problem', {}, []), BlockInfo('html_x1a_1', 'html', {}, []), ] ) ] ) ] ), BlockInfo( 'chapter_y', 'chapter', {}, [ BlockInfo( 'sequential_y1', 'sequential', {}, [ BlockInfo( 'vertical_y1a', 'vertical', {}, [ BlockInfo('problem_y1a_1', 'problem', {}, []), BlockInfo('problem_y1a_2', 'problem', {}, []), BlockInfo('problem_y1a_3', 'problem', {}, []), ] ) ] ) ] ) ] # equivalent to toy course in xml TOY_BLOCK_INFO_TREE = [ BlockInfo( 'Overview', "chapter", {"display_name": "Overview"}, [ BlockInfo( "Toy_Videos", "videosequence", { "xml_attributes": {"filename": ["", None]}, "display_name": "Toy Videos", "format": "Lecture Sequence" }, [ BlockInfo( "secret:toylab", "html", { "data": "<b>Lab 2A: Superposition Experiment</b>\n\n\n<p>Isn't the toy course great?</p>\n\n<p>Let's add some markup that uses non-ascii characters.\n'For example, we should be able to write words like encyclop&aelig;dia, or foreign words like fran&ccedil;ais.\nLooking beyond latin-1, we should handle math symbols: &pi;r&sup2 &le; &#8734.\nAnd it shouldn't matter if we use entities or numeric codes &mdash; &Omega; &ne; &pi; &equiv; &#937; &#8800; &#960;.\n</p>\n\n", # pylint: disable=line-too-long "xml_attributes": {"filename": ["html/secret/toylab.xml", "html/secret/toylab.xml"]}, "display_name": "Toy lab" }, [] ), BlockInfo( "toyjumpto", "html", { "data": "<a href=\"/jump_to_id/vertical_test\">This is a link to another page and some Chinese 四節比分和七年前</a> <p>Some more Chinese 四節比分和七年前</p>\n", "xml_attributes": {"filename": ["html/toyjumpto.xml", "html/toyjumpto.xml"]} }, []), BlockInfo( "toyhtml", "html", { "data": "<a href='/static/handouts/sample_handout.txt'>Sample</a>", "xml_attributes": {"filename": ["html/toyhtml.xml", "html/toyhtml.xml"]} }, []), BlockInfo( "nonportable", "html", { "data": "<a href=\"/static/foo.jpg\">link</a>\n", "xml_attributes": {"filename": ["html/nonportable.xml", "html/nonportable.xml"]} }, []), BlockInfo( "nonportable_link", "html", { "data": "<a href=\"/jump_to_id/nonportable_link\">link</a>\n\n", "xml_attributes": {"filename": ["html/nonportable_link.xml", "html/nonportable_link.xml"]} }, []), BlockInfo( "badlink", "html", { "data": "<img src=\"/static//file.jpg\" />\n", "xml_attributes": {"filename": ["html/badlink.xml", "html/badlink.xml"]} }, []), BlockInfo( "with_styling", "html", { "data": "<p style=\"font:italic bold 72px/30px Georgia, serif; color: red; \">Red text here</p>", "xml_attributes": {"filename": ["html/with_styling.xml", "html/with_styling.xml"]} }, []), BlockInfo( "just_img", "html", { "data": "<img src=\"/static/foo_bar.jpg\" />", "xml_attributes": {"filename": ["html/just_img.xml", "html/just_img.xml"]} }, []), BlockInfo( "Video_Resources", "video", { "youtube_id_1_0": "1bK-WdDi6Qw", "display_name": "Video Resources" }, []), ]), BlockInfo( "Welcome", "video", {"data": "", "youtube_id_1_0": "p2Q6BrNhdh8", "display_name": "Welcome"}, [] ), BlockInfo( "video_123456789012", "video", {"data": "", "youtube_id_1_0": "p2Q6BrNhdh8", "display_name": "Test Video"}, [] ), BlockInfo( "video_4f66f493ac8f", "video", {"youtube_id_1_0": "p2Q6BrNhdh8"}, [] ) ] ), BlockInfo( "secret:magic", "chapter", { "xml_attributes": {"filename": ["chapter/secret/magic.xml", "chapter/secret/magic.xml"]} }, [ BlockInfo( "toyvideo", "video", {"youtube_id_1_0": "OEoXaMPEzfMA", "display_name": "toyvideo"}, [] ) ] ), BlockInfo( "poll_test", "chapter", {}, [ BlockInfo( "T1_changemind_poll_foo", "poll_question", { "question": "<p>Have you changed your mind? ’</p>", "answers": [{"text": "Yes", "id": "yes"}, {"text": "No", "id": "no"}], "xml_attributes": {"reset": "false", "filename": ["", None]}, "display_name": "Change your answer" }, [])] ), BlockInfo( "vertical_container", "chapter", { "xml_attributes": {"filename": ["chapter/vertical_container.xml", "chapter/vertical_container.xml"]} }, [ BlockInfo("vertical_sequential", "sequential", {}, [ BlockInfo("vertical_test", "vertical", { "xml_attributes": {"filename": ["vertical/vertical_test.xml", "vertical_test"]} }, [ BlockInfo( "sample_video", "video", { "youtube_id_1_25": "AKqURZnYqpk", "youtube_id_0_75": "JMD_ifUUfsU", "youtube_id_1_0": "OEoXaMPEzfM", "display_name": "default", "youtube_id_1_5": "DYpADpL7jAY" }, []), BlockInfo( "separate_file_video", "video", { "youtube_id_1_25": "AKqURZnYqpk", "youtube_id_0_75": "JMD_ifUUfsU", "youtube_id_1_0": "OEoXaMPEzfM", "display_name": "default", "youtube_id_1_5": "DYpADpL7jAY" }, []), BlockInfo( "video_with_end_time", "video", { "youtube_id_1_25": "AKqURZnYqpk", "display_name": "default", "youtube_id_1_0": "OEoXaMPEzfM", "end_time": datetime.timedelta(seconds=10), "youtube_id_1_5": "DYpADpL7jAY", "youtube_id_0_75": "JMD_ifUUfsU" }, []), BlockInfo( "T1_changemind_poll_foo_2", "poll_question", { "question": "<p>Have you changed your mind?</p>", "answers": [{"text": "Yes", "id": "yes"}, {"text": "No", "id": "no"}], "xml_attributes": {"reset": "false", "filename": ["", None]}, "display_name": "Change your answer" }, []), ]), BlockInfo("unicode", "html", { "data": "…", "xml_attributes": {"filename": ["", None]} }, []) ]), ] ), BlockInfo( "handout_container", "chapter", { "xml_attributes": {"filename": ["chapter/handout_container.xml", "chapter/handout_container.xml"]} }, [ BlockInfo( "html_7e5578f25f79", "html", { "data": "<a href=\"/static/handouts/sample_handout.txt\"> handouts</a>", "xml_attributes": {"filename": ["", None]} }, [] ), ] ) ]
agpl-3.0
emedinaa/contentbox
third_party/django/utils/datetime_safe.py
179
2747
# Python's datetime strftime doesn't handle dates before 1900. # These classes override date and datetime to support the formatting of a date # through its full "proleptic Gregorian" date range. # # Based on code submitted to comp.lang.python by Andrew Dalke # # >>> datetime_safe.date(1850, 8, 2).strftime("%Y/%m/%d was a %A") # '1850/08/02 was a Friday' from datetime import date as real_date, datetime as real_datetime import re import time class date(real_date): def strftime(self, fmt): return strftime(self, fmt) class datetime(real_datetime): def strftime(self, fmt): return strftime(self, fmt) @classmethod def combine(cls, date, time): return cls(date.year, date.month, date.day, time.hour, time.minute, time.second, time.microsecond, time.tzinfo) def date(self): return date(self.year, self.month, self.day) def new_date(d): "Generate a safe date from a datetime.date object." return date(d.year, d.month, d.day) def new_datetime(d): """ Generate a safe datetime from a datetime.date or datetime.datetime object. """ kw = [d.year, d.month, d.day] if isinstance(d, real_datetime): kw.extend([d.hour, d.minute, d.second, d.microsecond, d.tzinfo]) return datetime(*kw) # This library does not support strftime's "%s" or "%y" format strings. # Allowed if there's an even number of "%"s because they are escaped. _illegal_formatting = re.compile(r"((^|[^%])(%%)*%[sy])") def _findall(text, substr): # Also finds overlaps sites = [] i = 0 while 1: j = text.find(substr, i) if j == -1: break sites.append(j) i=j+1 return sites def strftime(dt, fmt): if dt.year >= 1900: return super(type(dt), dt).strftime(fmt) illegal_formatting = _illegal_formatting.search(fmt) if illegal_formatting: raise TypeError("strftime of dates before 1900 does not handle" + illegal_formatting.group(0)) year = dt.year # For every non-leap year century, advance by # 6 years to get into the 28-year repeat cycle delta = 2000 - year off = 6 * (delta // 100 + delta // 400) year = year + off # Move to around the year 2000 year = year + ((2000 - year) // 28) * 28 timetuple = dt.timetuple() s1 = time.strftime(fmt, (year,) + timetuple[1:]) sites1 = _findall(s1, str(year)) s2 = time.strftime(fmt, (year+28,) + timetuple[1:]) sites2 = _findall(s2, str(year+28)) sites = [] for site in sites1: if site in sites2: sites.append(site) s = s1 syear = "%04d" % (dt.year,) for site in sites: s = s[:site] + syear + s[site+4:] return s
apache-2.0
Lujeni/ansible
lib/ansible/modules/monitoring/sensu/sensu_subscription.py
36
4897
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2014, Anders Ingemann <aim@secoya.dk> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: sensu_subscription short_description: Manage Sensu subscriptions version_added: 2.2 description: - Manage which I(sensu channels) a machine should subscribe to options: name: description: - The name of the channel required: true state: description: - Whether the machine should subscribe or unsubscribe from the channel choices: [ 'present', 'absent' ] required: false default: present path: description: - Path to the subscriptions json file required: false default: /etc/sensu/conf.d/subscriptions.json backup: description: - Create a backup file (if yes), including the timestamp information so you - can get the original file back if you somehow clobbered it incorrectly. type: bool required: false default: no requirements: [ ] author: Anders Ingemann (@andsens) ''' RETURN = ''' reasons: description: the reasons why the module changed or did not change something returned: success type: list sample: ["channel subscription was absent and state is `present'"] ''' EXAMPLES = ''' # Subscribe to the nginx channel - name: subscribe to nginx checks sensu_subscription: name=nginx # Unsubscribe from the common checks channel - name: unsubscribe from common checks sensu_subscription: name=common state=absent ''' import json import traceback from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native def sensu_subscription(module, path, name, state='present', backup=False): changed = False reasons = [] try: config = json.load(open(path)) except IOError as e: if e.errno == 2: # File not found, non-fatal if state == 'absent': reasons.append('file did not exist and state is `absent\'') return changed, reasons config = {} else: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) except ValueError: msg = '{path} contains invalid JSON'.format(path=path) module.fail_json(msg=msg) if 'client' not in config: if state == 'absent': reasons.append('`client\' did not exist and state is `absent\'') return changed, reasons config['client'] = {} changed = True reasons.append('`client\' did not exist') if 'subscriptions' not in config['client']: if state == 'absent': reasons.append('`client.subscriptions\' did not exist and state is `absent\'') return changed, reasons config['client']['subscriptions'] = [] changed = True reasons.append('`client.subscriptions\' did not exist') if name not in config['client']['subscriptions']: if state == 'absent': reasons.append('channel subscription was absent') return changed, reasons config['client']['subscriptions'].append(name) changed = True reasons.append('channel subscription was absent and state is `present\'') else: if state == 'absent': config['client']['subscriptions'].remove(name) changed = True reasons.append('channel subscription was present and state is `absent\'') if changed and not module.check_mode: if backup: module.backup_local(path) try: open(path, 'w').write(json.dumps(config, indent=2) + '\n') except IOError as e: module.fail_json(msg='Failed to write to file %s: %s' % (path, to_native(e)), exception=traceback.format_exc()) return changed, reasons def main(): arg_spec = {'name': {'type': 'str', 'required': True}, 'path': {'type': 'str', 'default': '/etc/sensu/conf.d/subscriptions.json'}, 'state': {'type': 'str', 'default': 'present', 'choices': ['present', 'absent']}, 'backup': {'type': 'bool', 'default': 'no'}, } module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True) path = module.params['path'] name = module.params['name'] state = module.params['state'] backup = module.params['backup'] changed, reasons = sensu_subscription(module, path, name, state, backup) module.exit_json(path=path, name=name, changed=changed, msg='OK', reasons=reasons) if __name__ == '__main__': main()
gpl-3.0
insomnia-lab/calibre
src/calibre/devices/folder_device/driver.py
13
3191
''' Created on 15 May 2010 @author: charles ''' import os from calibre.devices.usbms.driver import USBMS, BookList from calibre.ebooks import BOOK_EXTENSIONS # This class is added to the standard device plugin chain, so that it can # be configured. It has invalid vendor_id etc, so it will never match a # device. The 'real' FOLDER_DEVICE will use the config from it. class FOLDER_DEVICE_FOR_CONFIG(USBMS): name = 'Folder Device Interface' gui_name = 'Folder Device' description = _('Use an arbitrary folder as a device.') author = 'John Schember/Charles Haley' supported_platforms = ['windows', 'osx', 'linux'] FORMATS = list(BOOK_EXTENSIONS) VENDOR_ID = [0xffff] PRODUCT_ID = [0xffff] BCD = [0xffff] DEVICE_PLUGBOARD_NAME = 'FOLDER_DEVICE' SUPPORTS_SUB_DIRS = True class FOLDER_DEVICE(USBMS): type = _('Device Interface') name = 'Folder Device Interface' gui_name = 'Folder Device' description = _('Use an arbitrary folder as a device.') author = 'John Schember/Charles Haley' supported_platforms = ['windows', 'osx', 'linux'] FORMATS = FOLDER_DEVICE_FOR_CONFIG.FORMATS VENDOR_ID = [0xffff] PRODUCT_ID = [0xffff] BCD = [0xffff] DEVICE_PLUGBOARD_NAME = 'FOLDER_DEVICE' THUMBNAIL_HEIGHT = 68 # Height for thumbnails on device CAN_SET_METADATA = ['title', 'authors'] SUPPORTS_SUB_DIRS = True #: Icon for this device icon = I('devices/folder.png') METADATA_CACHE = '.metadata.calibre' DRIVEINFO = '.driveinfo.calibre' _main_prefix = '' _card_a_prefix = None _card_b_prefix = None is_connected = False def __init__(self, path): if not os.path.isdir(path): raise IOError, 'Path is not a folder' path = USBMS.normalize_path(path) if path.endswith(os.sep): self._main_prefix = path else: self._main_prefix = path + os.sep self.booklist_class = BookList self.is_connected = True def reset(self, key='-1', log_packets=False, report_progress=None, detected_device=None): pass def unmount_device(self): self._main_prefix = '' self.is_connected = False def is_usb_connected(self, devices_on_system, debug=False, only_presence=False): return self.is_connected, self def open(self, connected_device, library_uuid): self.current_library_uuid = library_uuid if not self._main_prefix: return False return True def set_progress_reporter(self, report_progress): self.report_progress = report_progress def card_prefix(self, end_session=True): return (None, None) def eject(self): self.is_connected = False @classmethod def settings(self): return FOLDER_DEVICE_FOR_CONFIG._config().parse() @classmethod def config_widget(cls): return FOLDER_DEVICE_FOR_CONFIG.config_widget() @classmethod def save_settings(cls, config_widget): return FOLDER_DEVICE_FOR_CONFIG.save_settings(config_widget)
gpl-3.0
subutai/htmresearch
projects/location_layer/location_module_experiment/three_layer_tracing.py
4
10150
# ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2017, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- """ Connect the Grid2DLocationExperiment to the locationModuleInference.js visualization. """ from __future__ import print_function from collections import defaultdict import json import numpy as np from grid_2d_location_experiment import Grid2DLocationExperimentMonitor class Grid2DLocationExperimentVisualizer(Grid2DLocationExperimentMonitor): """ Logs the state of the world and the state of each layer to a file. """ def __init__(self, exp, out, includeSynapses=True): self.exp = exp self.out = out self.includeSynapses = includeSynapses self.locationRepresentations = exp.locationRepresentations self.inputRepresentations = exp.inputRepresentations self.objectRepresentations = exp.objectRepresentations self.locationModules = exp.locationModules self.inputLayer = exp.inputLayer self.objectLayer = exp.objectLayer self.subscriberToken = exp.addMonitor(self) # World dimensions print(json.dumps({"width": exp.worldDimensions[1], "height": exp.worldDimensions[0]}), file=self.out) print(json.dumps({"A": "red", "B": "blue", "C": "gray"}), file=self.out) print(json.dumps(exp.objects), file=self.out) print(json.dumps([{"cellDimensions": module.cellDimensions.tolist(), "moduleMapDimensions": module.moduleMapDimensions.tolist(), "orientation": module.orientation} for module in exp.locationModules]), file=self.out) print("objectPlacements", file=self.out) print(json.dumps(exp.objectPlacements), file=self.out) def __enter__(self, *args): pass def __exit__(self, *args): self.unsubscribe() def unsubscribe(self): self.exp.removeMonitor(self.subscriberToken) self.subscriberToken = None def beforeSense(self, featureSDR): print("sense", file=self.out) print(json.dumps(featureSDR.tolist()), file=self.out) print(json.dumps( [k for k, sdr in self.exp.features.iteritems() if np.intersect1d(featureSDR, sdr).size == sdr.size]), file=self.out) def beforeMove(self, deltaLocation): print("move", file=self.out) print(json.dumps(list(deltaLocation)), file=self.out) def afterReset(self): print("reset", file=self.out) def markSensoryRepetition(self): print("sensoryRepetition", file=self.out) def afterWorldLocationChanged(self, locationInWorld): print("locationInWorld", file=self.out) print(json.dumps(locationInWorld), file=self.out) def afterLocationShift(self, displacement): print("shift", file=self.out) cellsByModule = [module.getActiveCells().tolist() for module in self.locationModules] print(json.dumps(cellsByModule), file=self.out) phasesByModule = [] for module in self.locationModules: phasesByModule.append(module.activePhases.tolist()) print(json.dumps(phasesByModule), file=self.out) activeLocationCells = self.exp.getActiveLocationCells() decodings = [] for (objectName, location), sdr in self.locationRepresentations.iteritems(): amountContained = (np.intersect1d(sdr, activeLocationCells).size / float(sdr.size)) decodings.append( [objectName, location[0], location[1], amountContained]) print(json.dumps(decodings), file=self.out) def afterLocationAnchor(self, anchorInput, **kwargs): print("locationLayer", file=self.out) cellsByModule = [] for module in self.locationModules: activeCells = module.getActiveCells() if self.includeSynapses: segmentsForActiveCellsDict = defaultdict(list) activeSegments = module.connections.filterSegmentsByCell( module.activeSegments, activeCells) cellForActiveSegments = ( module.connections.mapSegmentsToCells(activeSegments)) for i, segment in enumerate(activeSegments): connectedSynapses = np.where( module.connections.matrix.getRow(segment) >= module.connectedPermanence)[0] activeSynapses = np.intersect1d(connectedSynapses, anchorInput) segmentsForActiveCellsDict[cellForActiveSegments[i]].append( activeSynapses.tolist()) segmentsForActiveCells = [segmentsForActiveCellsDict[cell] for cell in activeCells] cellsByModule.append([activeCells.tolist(), {"inputLayer": segmentsForActiveCells}]) else: cellsByModule.append([activeCells.tolist()]) print(json.dumps(cellsByModule), file=self.out) phasesByModule = [] for module in self.locationModules: phasesByModule.append(module.activePhases.tolist()) print(json.dumps(phasesByModule), file=self.out) activeLocationCells = self.exp.getActiveLocationCells() decodings = [] for (objectName, location), sdr in self.locationRepresentations.iteritems(): amountContained = (np.intersect1d(sdr, activeLocationCells).size / float(sdr.size)) decodings.append( [objectName, location[0], location[1], amountContained]) print(json.dumps(decodings), file=self.out) def getInputSegments(self, cells, basalInput, apicalInput): basalSegmentsForCellDict = defaultdict(list) basalSegments = self.inputLayer.basalConnections.filterSegmentsByCell( self.inputLayer.activeBasalSegments, cells) cellForBasalSegment = self.inputLayer.basalConnections.mapSegmentsToCells( basalSegments) for i, segment in enumerate(basalSegments): connectedSynapses = np.where( self.inputLayer.basalConnections.matrix.getRow( segment) >= self.inputLayer.connectedPermanence)[0] activeSynapses = np.intersect1d(connectedSynapses, basalInput) basalSegmentsForCellDict[cellForBasalSegment[i]].append( activeSynapses.tolist()) apicalSegmentsForCellDict = defaultdict(list) apicalSegments = self.inputLayer.apicalConnections.filterSegmentsByCell( self.inputLayer.activeApicalSegments, cells) cellForApicalSegment = ( self.inputLayer.apicalConnections.mapSegmentsToCells(apicalSegments)) for i, segment in enumerate(apicalSegments): connectedSynapses = np.where( self.inputLayer.apicalConnections.matrix.getRow(segment) >= self.inputLayer.connectedPermanence)[0] activeSynapses = np.intersect1d(connectedSynapses, apicalInput) apicalSegmentsForCellDict[cellForApicalSegment[i]].append( activeSynapses.tolist()) return { "locationLayer": [basalSegmentsForCellDict[cell] for cell in cells], "objectLayer": [apicalSegmentsForCellDict[cell] for cell in cells], } def getInputDecodings(self, activeCells): decodings = [] for (objectName, location, feature), sdr in self.inputRepresentations.iteritems(): amountContained = (np.intersect1d(sdr, activeCells).size / float(sdr.size)) decodings.append([objectName, location[0], location[1], amountContained]) return decodings def afterInputCompute(self, activeColumns, basalInput, apicalInput, **kwargs): activeCells = self.inputLayer.getActiveCells().tolist() predictedCells = self.inputLayer.getPredictedCells().tolist() print("inputLayer", file=self.out) if self.includeSynapses: segmentsForActiveCells = self.getInputSegments(activeCells, basalInput, apicalInput) segmentsForPredictedCells = self.getInputSegments(predictedCells, basalInput, apicalInput) print(json.dumps( [activeCells, predictedCells, segmentsForActiveCells, segmentsForPredictedCells]), file=self.out) else: print(json.dumps( [activeCells, predictedCells]), file=self.out) print(json.dumps({ "activeCellDecodings": self.getInputDecodings(activeCells), "predictedCellDecodings": self.getInputDecodings(predictedCells) }), file=self.out) def afterObjectCompute(self, feedforwardInput, **kwargs): activeCells = self.objectLayer.getActiveCells() print("objectLayer", file=self.out) if self.includeSynapses: segmentsForActiveCells = [[] for _ in activeCells] for i, cell in enumerate(activeCells): connectedSynapses = np.where( self.objectLayer.proximalPermanences.getRow(cell) >= self.objectLayer.connectedPermanenceProximal)[0] activeSynapses = np.intersect1d(connectedSynapses, feedforwardInput) segmentsForActiveCells[i].append(activeSynapses.tolist()) print(json.dumps([activeCells.tolist(), {"inputLayer": segmentsForActiveCells}]), file=self.out) else: print(json.dumps([activeCells.tolist()]), file=self.out) decodings = [k for k, sdr in self.objectRepresentations.iteritems() if np.intersect1d(activeCells, sdr).size == sdr.size] print(json.dumps(decodings), file=self.out)
agpl-3.0
cherusk/ansible
lib/ansible/modules/windows/win_say.py
45
4641
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2016, Jon Hawkesworth (@jhawkesworth) <figs@unity.demon.co.uk> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # this is a windows documentation stub. actual code lives in the .ps1 # file of the same name ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = r''' --- module: win_say version_added: "2.3" short_description: Text to speech module for Windows to speak messages and optionally play sounds description: - Uses .NET libraries to convert text to speech and optionally play .wav sounds. Audio Service needs to be running and some kind of speakers or headphones need to be attached to the windows target(s) for the speech to be audible. options: msg: description: - The text to be spoken. Use either msg or msg_file. Optional so that you can use this module just to play sounds. required: false default: none msg_file: description: - Full path to a windows format text file containing the text to be spokend. Use either msg or msg_file. Optional so that you can use this module just to play sounds. required: false default: none voice: description: - Which voice to use. See notes for how to discover installed voices. If the requested voice is not available the default voice will be used. Example voice names from Windows 10 are 'Microsoft Zira Desktop' and 'Microsoft Hazel Desktop'. required: false default: system default voice speech_speed: description: - How fast or slow to speak the text. Must be an integer value in the range -10 to 10. -10 is slowest, 10 is fastest. required: false default: 0 start_sound_path: description: - Full path to a C(.wav) file containing a sound to play before the text is spoken. Useful on conference calls to alert other speakers that ansible has something to say. required: false default: null end_sound_path: description: - Full path to a C(.wav) file containing a sound to play after the text has been spoken. Useful on conference calls to alert other speakers that ansible has finished speaking. required: false default: null author: "Jon Hawkesworth (@jhawkesworth)" notes: - Needs speakers or headphones to do anything useful. - To find which voices are installed, run the following powershell Add-Type -AssemblyName System.Speech $speech = New-Object -TypeName System.Speech.Synthesis.SpeechSynthesizer $speech.GetInstalledVoices() | ForEach-Object { $_.VoiceInfo } $speech.Dispose() - Speech can be surprisingly slow, so its best to keep message text short. ''' EXAMPLES = r''' # Warn of impending deployment - win_say: msg: Warning, deployment commencing in 5 minutes, please log out. # Using a different voice and a start sound - win_say: start_sound_path: C:\Windows\Media\ding.wav msg: Warning, deployment commencing in 5 minutes, please log out. voice: Microsoft Hazel Desktop # example with start and end sound - win_say: start_sound_path: C:\Windows\Media\Windows Balloon.wav msg: New software installed end_sound_path: C:\Windows\Media\chimes.wav # text from file example - win_say: start_sound_path: C:\Windows\Media\Windows Balloon.wav msg_file: AppData\Local\Temp\morning_report.txt end_sound_path: C:\Windows\Media\chimes.wav ''' RETURN = r''' message_text: description: the text that the module attempted to speak returned: success type: string sample: "Warning, deployment commencing in 5 minutes." voice: description: the voice used to speak the text. returned: success type: string sample: Microsoft Hazel Desktop voice_info: description: the voice used to speak the text. returned: when requested voice could not be loaded type: string sample: Could not load voice TestVoice, using system default voice '''
gpl-3.0
nubark/odoo
addons/account_budget/wizard/account_budget_analytic.py
47
1099
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. import time from openerp.osv import fields, osv class account_budget_analytic(osv.osv_memory): _name = 'account.budget.analytic' _description = 'Account Budget report for analytic account' _columns = { 'date_from': fields.date('Start of period', required=True), 'date_to': fields.date('End of period', required=True), } _defaults = { 'date_from': lambda *a: time.strftime('%Y-01-01'), 'date_to': lambda *a: time.strftime('%Y-%m-%d'), } def check_report(self, cr, uid, ids, context=None): if context is None: context = {} data = self.read(cr, uid, ids, context=context)[0] datas = { 'ids': context.get('active_ids', []), 'model': 'account.analytic.account', 'form': data } datas['form']['ids'] = datas['ids'] return self.pool['report'].get_action(cr, uid, [], 'account_budget.report_analyticaccountbudget', data=datas, context=context)
gpl-3.0
vidyacraghav/zerorpc-python
zerorpc/patterns.py
103
3937
# -*- coding: utf-8 -*- # Open Source Initiative OSI - The MIT License (MIT):Licensing # # The MIT License (MIT) # Copyright (c) 2012 DotCloud Inc (opensource@dotcloud.com) # # Permission is hereby granted, free of charge, to any person obtaining a copy of # this software and associated documentation files (the "Software"), to deal in # the Software without restriction, including without limitation the rights to # use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies # of the Software, and to permit persons to whom the Software is furnished to do # so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. class ReqRep: def process_call(self, context, bufchan, req_event, functor): context.hook_server_before_exec(req_event) result = functor(*req_event.args) rep_event = bufchan.create_event('OK', (result,), context.hook_get_task_context()) context.hook_server_after_exec(req_event, rep_event) bufchan.emit_event(rep_event) def accept_answer(self, event): return True def process_answer(self, context, bufchan, req_event, rep_event, handle_remote_error): if rep_event.name == 'ERR': exception = handle_remote_error(rep_event) context.hook_client_after_request(req_event, rep_event, exception) raise exception context.hook_client_after_request(req_event, rep_event) bufchan.close() result = rep_event.args[0] return result class ReqStream: def process_call(self, context, bufchan, req_event, functor): context.hook_server_before_exec(req_event) xheader = context.hook_get_task_context() for result in iter(functor(*req_event.args)): bufchan.emit('STREAM', result, xheader) done_event = bufchan.create_event('STREAM_DONE', None, xheader) # NOTE: "We" made the choice to call the hook once the stream is done, # the other choice was to call it at each iteration. I don't think that # one choice is better than the other, so I'm fine with changing this # or adding the server_after_iteration and client_after_iteration hooks. context.hook_server_after_exec(req_event, done_event) bufchan.emit_event(done_event) def accept_answer(self, event): return event.name in ('STREAM', 'STREAM_DONE') def process_answer(self, context, bufchan, req_event, rep_event, handle_remote_error): def is_stream_done(rep_event): return rep_event.name == 'STREAM_DONE' bufchan.on_close_if = is_stream_done def iterator(req_event, rep_event): while rep_event.name == 'STREAM': # Like in process_call, we made the choice to call the # after_exec hook only when the stream is done. yield rep_event.args rep_event = bufchan.recv() if rep_event.name == 'ERR': exception = handle_remote_error(rep_event) context.hook_client_after_request(req_event, rep_event, exception) raise exception context.hook_client_after_request(req_event, rep_event) bufchan.close() return iterator(req_event, rep_event) patterns_list = [ReqStream(), ReqRep()]
mit
ueno/ibus
setup/enginecombobox.py
2
5814
# vim:set et sts=4 sw=4: # # ibus - The Input Bus # # Copyright (c) 2007-2010 Peng Huang <shawn.p.huang@gmail.com> # Copyright (c) 2007-2010 Red Hat, Inc. # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the # Free Software Foundation, Inc., 59 Temple Place, Suite 330, # Boston, MA 02111-1307 USA import locale from gi.repository import GObject from gi.repository import Gtk from gi.repository import IBus from gi.repository import Pango from icon import load_icon from i18n import _, N_ class EngineComboBox(Gtk.ComboBox): __gtype_name__ = 'EngineComboBox' __gproperties__ = { 'active-engine' : ( object, 'selected engine', 'selected engine', GObject.ParamFlags.READABLE) } def __init__(self): super(EngineComboBox, self).__init__() self.connect("notify::active", self.__notify_active_cb) self.__model = None renderer = Gtk.CellRendererPixbuf() renderer.set_property("xalign", 0) renderer.set_property("xpad", 2) self.pack_start(renderer, False) self.set_cell_data_func(renderer, self.__icon_cell_data_cb, None) renderer = Gtk.CellRendererText() renderer.set_property("xalign", 0) renderer.set_property("xpad", 2) self.pack_start(renderer, True) self.set_cell_data_func(renderer, self.__name_cell_data_cb, None) def set_engines(self, engines): self.__model = Gtk.TreeStore(object) iter1 = self.__model.append(None) self.__model.set(iter1, 0, 0) langs = {} for e in engines: l = IBus.get_language_name(e.get_language()) if l == None: l = "" if l not in langs: langs[l] = [] langs[l].append(e) keys = langs.keys() keys.sort(locale.strcoll) loc = locale.getlocale()[0] # None on C locale if loc == None: loc = 'en_US' current_lang = IBus.get_language_name(loc) # move current language to the first place if current_lang in keys: keys.remove(current_lang) keys.insert(0, current_lang) #add "Others" to the end of the combo box if IBus.get_language_name("Other") in keys: keys.remove(IBus.get_language_name("Other")) keys += [IBus.get_language_name("Other")] for l in keys: iter1 = self.__model.append(None) self.__model.set(iter1, 0, l) def cmp_engine(a, b): if a.get_rank() == b.get_rank(): return locale.strcoll(a.get_longname(), b.get_longname()) return int(b.get_rank() - a.get_rank()) langs[l].sort(cmp_engine) for e in langs[l]: iter2 = self.__model.append(iter1) self.__model.set(iter2, 0, e) self.set_model(self.__model) self.set_active(0) def __icon_cell_data_cb(self, celllayout, renderer, model, iter, data): engine = self.__model.get_value(iter, 0) if isinstance(engine, str) or isinstance (engine, unicode): renderer.set_property("visible", False) renderer.set_property("sensitive", False) elif isinstance(engine, int): renderer.set_property("visible", False) renderer.set_property("sensitive", False) else: renderer.set_property("visible", True) renderer.set_property("sensitive", True) pixbuf = load_icon(engine.get_icon(), Gtk.IconSize.LARGE_TOOLBAR) renderer.set_property("pixbuf", pixbuf) def __name_cell_data_cb(self, celllayout, renderer, model, iter, data): engine = self.__model.get_value(iter, 0) if isinstance (engine, str) or isinstance (engine, unicode): renderer.set_property("sensitive", False) renderer.set_property("text", engine) renderer.set_property("weight", Pango.Weight.NORMAL) elif isinstance(engine, int): renderer.set_property("sensitive", True) renderer.set_property("text", _("Select an input method")) renderer.set_property("weight", Pango.Weight.NORMAL) else: renderer.set_property("sensitive", True) renderer.set_property("text", engine.get_longname()) renderer.set_property("weight", Pango.Weight.BOLD if engine.get_rank() > 0 else Pango.Weight.NORMAL) def __notify_active_cb(self, combobox, property): self.notify("active-engine") def do_get_property(self, property): if property.name == "active-engine": i = self.get_active() if i == 0 or i == -1: return None iter = self.get_active_iter() return self.get_model()[iter][0] else: raise AttributeError, 'unknown property %s' % property.name def get_active_engine(self): return self.get_property("active-engine") if __name__ == "__main__": combo = EngineComboBox() combo.set_engines([IBus.EngineDesc(language="zh")]) w = Gtk.Window() w.add(combo) w.show_all() Gtk.main()
lgpl-2.1
zhumingliang1209/Ardupilot
ardupilot/Tools/LogAnalyzer/tests/TestIMUMatch.py
100
4071
from LogAnalyzer import Test,TestResult import DataflashLog from math import sqrt class TestIMUMatch(Test): '''test for empty or near-empty logs''' def __init__(self): Test.__init__(self) self.name = "IMU Mismatch" def run(self, logdata, verbose): #tuning parameters: warn_threshold = .75 fail_threshold = 1.5 filter_tc = 5.0 self.result = TestResult() self.result.status = TestResult.StatusType.GOOD if ("IMU" in logdata.channels) and (not "IMU2" in logdata.channels): self.result.status = TestResult.StatusType.NA self.result.statusMessage = "No IMU2" return if (not "IMU" in logdata.channels) or (not "IMU2" in logdata.channels): self.result.status = TestResult.StatusType.UNKNOWN self.result.statusMessage = "No IMU log data" return imu1 = logdata.channels["IMU"] imu2 = logdata.channels["IMU2"] timeLabel = None for i in 'TimeMS','TimeUS','Time': if i in logdata.channels["GPS"]: timeLabel = i break imu1_timems = imu1[timeLabel].listData imu1_accx = imu1["AccX"].listData imu1_accy = imu1["AccY"].listData imu1_accz = imu1["AccZ"].listData imu2_timems = imu2[timeLabel].listData imu2_accx = imu2["AccX"].listData imu2_accy = imu2["AccY"].listData imu2_accz = imu2["AccZ"].listData imu_multiplier = 1.0E-3 if timeLabel == 'TimeUS': imu_multiplier = 1.0E-6 imu1 = [] imu2 = [] for i in range(len(imu1_timems)): imu1.append({ 't': imu1_timems[i][1]*imu_multiplier, 'x': imu1_accx[i][1], 'y': imu1_accy[i][1], 'z': imu1_accz[i][1]}) for i in range(len(imu2_timems)): imu2.append({ 't': imu2_timems[i][1]*imu_multiplier, 'x': imu2_accx[i][1], 'y': imu2_accy[i][1], 'z': imu2_accz[i][1]}) imu1.sort(key=lambda x: x['t']) imu2.sort(key=lambda x: x['t']) imu2_index = 0 last_t = None xdiff_filtered = 0 ydiff_filtered = 0 zdiff_filtered = 0 max_diff_filtered = 0 for i in range(len(imu1)): #find closest imu2 value t = imu1[i]['t'] dt = 0 if last_t is None else t-last_t dt=min(dt,.1) next_imu2 = None for i in range(imu2_index,len(imu2)): next_imu2 = imu2[i] imu2_index=i if next_imu2['t'] >= t: break prev_imu2 = imu2[imu2_index-1] closest_imu2 = next_imu2 if abs(next_imu2['t']-t)<abs(prev_imu2['t']-t) else prev_imu2 xdiff = imu1[i]['x']-closest_imu2['x'] ydiff = imu1[i]['y']-closest_imu2['y'] zdiff = imu1[i]['z']-closest_imu2['z'] xdiff_filtered += (xdiff-xdiff_filtered)*dt/filter_tc ydiff_filtered += (ydiff-ydiff_filtered)*dt/filter_tc zdiff_filtered += (zdiff-zdiff_filtered)*dt/filter_tc diff_filtered = math.sqrt(xdiff_filtered**2+ydiff_filtered**2+zdiff_filtered**2) max_diff_filtered = max(max_diff_filtered,diff_filtered) #print max_diff_filtered last_t = t if max_diff_filtered > fail_threshold: self.result.statusMessage = "Check vibration or accelerometer calibration. (Mismatch: %.2f, WARN: %.2f, FAIL: %.2f)" % (max_diff_filtered,warn_threshold,fail_threshold) self.result.status = TestResult.StatusType.FAIL elif max_diff_filtered > warn_threshold: self.result.statusMessage = "Check vibration or accelerometer calibration. (Mismatch: %.2f, WARN: %.2f, FAIL: %.2f)" % (max_diff_filtered,warn_threshold,fail_threshold) self.result.status = TestResult.StatusType.WARN else: self.result.statusMessage = "(Mismatch: %.2f, WARN: %.2f, FAIL: %.2f)" % (max_diff_filtered,warn_threshold, fail_threshold)
gpl-3.0
qwefi/nova
nova/tests/api/openstack/compute/plugins/v3/test_server_diagnostics.py
3
2790
# Copyright 2011 Eldar Nugaev # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from lxml import etree from nova.api.openstack import compute from nova.api.openstack.compute.plugins.v3 import server_diagnostics from nova.api.openstack import wsgi from nova.compute import api as compute_api from nova.openstack.common import jsonutils from nova import test from nova.tests.api.openstack import fakes UUID = 'abc' def fake_get_diagnostics(self, _context, instance_uuid): return {'data': 'Some diagnostic info'} def fake_instance_get(self, _context, instance_uuid): if instance_uuid != UUID: raise Exception("Invalid UUID") return {'uuid': instance_uuid} class ServerDiagnosticsTest(test.TestCase): def setUp(self): super(ServerDiagnosticsTest, self).setUp() self.stubs.Set(compute_api.API, 'get_diagnostics', fake_get_diagnostics) self.stubs.Set(compute_api.API, 'get', fake_instance_get) self.router = compute.APIRouterV3(init_only=('servers', 'os-server-diagnostics')) def test_get_diagnostics(self): req = fakes.HTTPRequestV3.blank( '/servers/%s/os-server-diagnostics' % UUID) res = req.get_response(self.router) output = jsonutils.loads(res.body) self.assertEqual(output, {'data': 'Some diagnostic info'}) class TestServerDiagnosticsXMLSerializer(test.TestCase): namespace = wsgi.XMLNS_V11 def _tag(self, elem): tagname = elem.tag self.assertEqual(tagname[0], '{') tmp = tagname.partition('}') namespace = tmp[0][1:] self.assertEqual(namespace, self.namespace) return tmp[2] def test_index_serializer(self): serializer = server_diagnostics.ServerDiagnosticsTemplate() exemplar = dict(diag1='foo', diag2='bar') text = serializer.serialize(exemplar) tree = etree.fromstring(text) self.assertEqual('diagnostics', self._tag(tree)) self.assertEqual(len(tree), len(exemplar)) for child in tree: tag = self._tag(child) self.assertTrue(tag in exemplar) self.assertEqual(child.text, exemplar[tag])
apache-2.0
tersmitten/ansible
lib/ansible/modules/cloud/ovirt/ovirt_tag_facts.py
55
4848
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright (c) 2016 Red Hat, Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: ovirt_tag_facts short_description: Retrieve facts about one or more oVirt/RHV tags author: "Ondra Machacek (@machacekondra)" version_added: "2.3" description: - "Retrieve facts about one or more oVirt/RHV tags." notes: - "This module creates a new top-level C(ovirt_tags) fact, which contains a list of tags" options: name: description: - "Name of the tag which should be listed." vm: description: - "Name of the VM, which tags should be listed." host: description: - "Name of the host, which tags should be listed." extends_documentation_fragment: ovirt_facts ''' EXAMPLES = ''' # Examples don't contain auth parameter for simplicity, # look at ovirt_auth module to see how to reuse authentication: # Gather facts about all tags, which names start with C(tag): - ovirt_tag_facts: name: tag* - debug: var: tags # Gather facts about all tags, which are assigned to VM C(postgres): - ovirt_tag_facts: vm: postgres - debug: var: tags # Gather facts about all tags, which are assigned to host C(west): - ovirt_tag_facts: host: west - debug: var: tags ''' RETURN = ''' ovirt_tags: description: "List of dictionaries describing the tags. Tags attributes are mapped to dictionary keys, all tags attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/tag." returned: On success. type: list ''' import fnmatch import traceback from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.ovirt import ( check_sdk, create_connection, get_dict_of_struct, ovirt_facts_full_argument_spec, search_by_name, ) def main(): argument_spec = ovirt_facts_full_argument_spec( name=dict(default=None), host=dict(default=None), vm=dict(default=None), ) module = AnsibleModule(argument_spec) check_sdk(module) try: auth = module.params.pop('auth') connection = create_connection(auth) tags_service = connection.system_service().tags_service() tags = [] all_tags = tags_service.list() if module.params['name']: tags.extend([ t for t in all_tags if fnmatch.fnmatch(t.name, module.params['name']) ]) if module.params['host']: hosts_service = connection.system_service().hosts_service() host = search_by_name(hosts_service, module.params['host']) if host is None: raise Exception("Host '%s' was not found." % module.params['host']) tags.extend([ tag for tag in hosts_service.host_service(host.id).tags_service().list() ]) if module.params['vm']: vms_service = connection.system_service().vms_service() vm = search_by_name(vms_service, module.params['vm']) if vm is None: raise Exception("Vm '%s' was not found." % module.params['vm']) tags.extend([ tag for tag in vms_service.vm_service(vm.id).tags_service().list() ]) if not (module.params['vm'] or module.params['host'] or module.params['name']): tags = all_tags module.exit_json( changed=False, ansible_facts=dict( ovirt_tags=[ get_dict_of_struct( struct=t, connection=connection, fetch_nested=module.params['fetch_nested'], attributes=module.params['nested_attributes'], ) for t in tags ], ), ) except Exception as e: module.fail_json(msg=str(e), exception=traceback.format_exc()) finally: connection.close(logout=auth.get('token') is None) if __name__ == '__main__': main()
gpl-3.0
hand-iemura/lightpng
boost_1_53_0/tools/build/v2/test/indirect_conditional.py
20
1389
#!/usr/bin/python # Copyright (C) Vladimir Prus 2006. # Distributed under the Boost Software License, Version 1.0. (See # accompanying file LICENSE_1_0.txt or copy at # http://www.boost.org/LICENSE_1_0.txt) import BoostBuild t = BoostBuild.Tester() t.write("jamroot.jam", """ exe a1 : a1.cpp : <conditional>@a1-rule ; rule a1-rule ( properties * ) { if <variant>debug in $(properties) { return <define>OK ; } } exe a2 : a2.cpp : <conditional>@$(__name__).a2-rule <variant>debug:<optimization>speed ; rule a2-rule ( properties * ) { if <optimization>speed in $(properties) { return <define>OK ; } } exe a3 : a3.cpp : <conditional>@$(__name__).a3-rule-1 <conditional>@$(__name__).a3-rule-2 ; rule a3-rule-1 ( properties * ) { if <optimization>speed in $(properties) { return <define>OK ; } } rule a3-rule-2 ( properties * ) { if <variant>debug in $(properties) { return <optimization>speed ; } } """) t.write("a1.cpp", """ #ifdef OK int main() {} #endif """) t.write("a2.cpp", """ #ifdef OK int main() {} #endif """) t.write("a3.cpp", """ #ifdef OK int main() {} #endif """) t.run_build_system() t.expect_addition("bin/$toolset/debug/a1.exe") t.expect_addition("bin/$toolset/debug/optimization-speed/a2.exe") t.expect_addition("bin/$toolset/debug/optimization-speed/a3.exe") t.cleanup()
mit
kasioumis/invenio
invenio/modules/workflows/models.py
9
33638
# -*- coding: utf-8 -*- # This file is part of Invenio. # Copyright (C) 2012, 2013, 2014, 2015 CERN. # # Invenio is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # Invenio is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Invenio; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """Models for BibWorkflow Objects.""" import base64 import logging import os import tempfile from datetime import datetime from invenio.base.globals import cfg from invenio.base.utils import classproperty from invenio.ext.logging import deprecated from invenio.ext.sqlalchemy import db from invenio.ext.sqlalchemy.utils import session_manager from six import callable, iteritems from six.moves import cPickle from sqlalchemy import desc from sqlalchemy.orm.exc import NoResultFound from .logger import BibWorkflowLogHandler, get_logger class ObjectVersion(object): """Specify the different versions possible.""" INITIAL = 0 COMPLETED = 1 HALTED = 2 RUNNING = 3 WAITING = 4 ERROR = 5 MAPPING = {"New": 0, "Done": 1, "Need action": 2, "In process": 3, "Waiting": 4, "Error": 5} @classproperty @deprecated("Please use ObjectVersion.COMPLETED " "instead of ObjectVersion.FINAL") def FINAL(cls): return cls.COMPLETED @classmethod def name_from_version(cls, version): try: return cls.MAPPING.keys()[cls.MAPPING.values().index(version)] except ValueError: return None def get_default_data(): """Return the base64 representation of the data default value.""" data_default = {} return base64.b64encode(cPickle.dumps(data_default)) def get_default_extra_data(): """Return the base64 representation of the extra_data default value.""" extra_data_default = {"_tasks_results": {}, "owner": {}, "_task_counter": {}, "_error_msg": None, "_last_task_name": "", "latest_object": -1, "_action": None, "redis_search": {}, "source": "", "_task_history": []} return base64.b64encode(cPickle.dumps(extra_data_default)) class Workflow(db.Model): """Represents a workflow instance. Used by BibWorkflowEngine to store the state of the workflow. """ __tablename__ = "bwlWORKFLOW" uuid = db.Column(db.String(36), primary_key=True, nullable=False) name = db.Column(db.String(255), default="Default workflow", nullable=False) created = db.Column(db.DateTime, default=datetime.now, nullable=False) modified = db.Column(db.DateTime, default=datetime.now, onupdate=datetime.now, nullable=False) id_user = db.Column(db.Integer, default=0, nullable=False) _extra_data = db.Column(db.LargeBinary, nullable=False, default=get_default_extra_data()) status = db.Column(db.Integer, default=0, nullable=False) current_object = db.Column(db.Integer, default="0", nullable=False) objects = db.relationship("BibWorkflowObject", backref='bwlWORKFLOW', cascade="all, delete, delete-orphan") counter_initial = db.Column(db.Integer, default=0, nullable=False) counter_halted = db.Column(db.Integer, default=0, nullable=False) counter_error = db.Column(db.Integer, default=0, nullable=False) counter_finished = db.Column(db.Integer, default=0, nullable=False) module_name = db.Column(db.String(64), nullable=False) child_logs = db.relationship("BibWorkflowEngineLog", backref='bwlWORKFLOW', cascade="all, delete, delete-orphan") def __repr__(self): """Represent a workflow object.""" return "<Workflow(name: %s, module: %s, cre: %s, mod: %s," \ "id_user: %s, status: %s)>" % \ (str(self.name), str(self.module_name), str(self.created), str(self.modified), str(self.id_user), str(self.status)) def __str__(self): """Print a workflow object.""" return """Workflow: Uuid: %s Name: %s User id: %s Module name: %s Created: %s Modified: %s Status: %s Current object: %s Counters: initial=%s, halted=%s, error=%s, finished=%s Extra data: %s""" % (str(self.uuid), str(self.name), str(self.id_user), str(self.module_name), str(self.created), str(self.modified), str(self.status), str(self.current_object), str(self.counter_initial), str(self.counter_halted), str(self.counter_error), str(self.counter_finished), str(self._extra_data)) @classmethod def get(cls, *criteria, **filters): """Wrapper to get a specified object. A wrapper for the filter and filter_by functions of sqlalchemy. Define a dict with which columns should be filtered by which values. .. code-block:: python Workflow.get(uuid=uuid) Workflow.get(Workflow.uuid != uuid) The function supports also "hybrid" arguments. .. code-block:: python Workflow.get(Workflow.module_name != 'i_hate_this_module', user_id=user_id) See also SQLAlchemy BaseQuery's filter and filter_by documentation. """ return cls.query.filter(*criteria).filter_by(**filters) @classmethod def get_status(cls, uuid=None): """Return the status of the workflow.""" return cls.get(Workflow.uuid == uuid).one().status @classmethod def get_most_recent(cls, *criteria, **filters): """Return the most recently modified workflow.""" most_recent = cls.get(*criteria, **filters). \ order_by(desc(Workflow.modified)).first() if most_recent is None: raise NoResultFound else: return most_recent @classmethod def get_objects(cls, uuid=None): """Return the objects of the workflow.""" return cls.get(Workflow.uuid == uuid).one().objects def get_extra_data(self, user_id=0, uuid=None, key=None, getter=None): """Get the extra_data for the object. Returns a JSON of the column extra_data or if any of the other arguments are defined, a specific value. You can define either the key or the getter function. :param key: the key to access the desirable value :param getter: a callable that takes a dict as param and returns a value """ extra_data = Workflow.get(Workflow.id_user == self.id_user, Workflow.uuid == self.uuid).one()._extra_data extra_data = cPickle.loads(base64.b64decode(extra_data)) if key: return extra_data[key] elif callable(getter): return getter(extra_data) elif not key: return extra_data def set_extra_data(self, user_id=0, uuid=None, key=None, value=None, setter=None): """Replace extra_data. Modifies the JSON of the column extra_data or if any of the other arguments are defined, a specific value. You can define either the key, value or the setter function. :param key: the key to access the desirable value :param value: the new value :param setter: a callable that takes a dict as param and modifies it """ extra_data = Workflow.get(Workflow.id_user == user_id, Workflow.uuid == uuid).one()._extra_data extra_data = cPickle.loads(base64.b64decode(extra_data)) if key is not None and value is not None: extra_data[key] = value elif callable(setter): setter(extra_data) Workflow.get(Workflow.uuid == self.uuid).update( {'_extra_data': base64.b64encode(cPickle.dumps(extra_data))} ) @classmethod @session_manager def delete(cls, uuid=None): """Delete a workflow.""" cls.get(Workflow.uuid == uuid).delete() @session_manager def save(self, status): """Save object to persistent storage.""" self.modified = datetime.now() if status is not None: self.status = status db.session.add(self) class BibWorkflowObject(db.Model): """Data model for wrapping data being run in the workflows. Main object being passed around in the workflows module when using the workflows API. It can be instantiated like this: .. code-block:: python obj = BibWorkflowObject() obj.save() Or, like this: .. code-block:: python obj = BibWorkflowObject.create_object() BibWorkflowObject provides some handy functions such as: .. code-block:: python obj.set_data("<xml ..... />") obj.get_data() == "<xml ..... />" obj.set_extra_data({"param": value}) obj.get_extra_data() == {"param": value} obj.add_task_result("myresult", {"result": 1}) Then to finally save the object .. code-block:: python obj.save() Now you can for example run it in a workflow: .. code-block:: python obj.start_workflow("sample_workflow") """ # db table definition __tablename__ = "bwlOBJECT" id = db.Column(db.Integer, primary_key=True) # Our internal data column. Default is encoded dict. _data = db.Column(db.LargeBinary, nullable=False, default=get_default_data()) _extra_data = db.Column(db.LargeBinary, nullable=False, default=get_default_extra_data()) id_workflow = db.Column(db.String(36), db.ForeignKey("bwlWORKFLOW.uuid"), nullable=True) version = db.Column(db.Integer(3), default=ObjectVersion.INITIAL, nullable=False) id_parent = db.Column(db.Integer, db.ForeignKey("bwlOBJECT.id"), default=None) child_objects = db.relationship("BibWorkflowObject", remote_side=[id_parent]) created = db.Column(db.DateTime, default=datetime.now, nullable=False) modified = db.Column(db.DateTime, default=datetime.now, onupdate=datetime.now, nullable=False) status = db.Column(db.String(255), default="", nullable=False) data_type = db.Column(db.String(150), default="", nullable=True) uri = db.Column(db.String(500), default="") id_user = db.Column(db.Integer, default=0, nullable=False) child_logs = db.relationship("BibWorkflowObjectLog", backref='bibworkflowobject', cascade="all, delete, delete-orphan") workflow = db.relationship( Workflow, foreign_keys=[id_workflow], remote_side=Workflow.uuid, ) _log = None @property def log(self): """Access logger object for this instance.""" if not self._log: db_handler_obj = BibWorkflowLogHandler(BibWorkflowObjectLog, "id") self._log = get_logger(logger_name="object.%s" % (self.id,), db_handler_obj=db_handler_obj, loglevel=logging.DEBUG, obj=self) return self._log def get_data(self): """Get data saved in the object.""" return cPickle.loads(base64.b64decode(self._data)) def set_data(self, value): """Save data to the object.""" self._data = base64.b64encode(cPickle.dumps(value)) def get_extra_data(self): """Get extra data saved to the object.""" return cPickle.loads(base64.b64decode(self._extra_data)) def set_extra_data(self, value): """Save extra data to the object. :param value: what you want to replace extra_data with. :type value: dict """ self._extra_data = base64.b64encode(cPickle.dumps(value)) def get_workflow_name(self): """Return the workflow name for this object.""" try: if self.id_workflow: return Workflow.query.get(self.id_workflow).name except AttributeError: # Workflow non-existent pass return def update_task_history(self, last_task): """Append last task to task history.""" from .utils import get_func_info if "_task_history" not in self.extra_data: self.extra_data["_task_history"] = [] if hasattr(last_task, 'branch') and last_task.branch: return elif hasattr(last_task, 'hide') and last_task.hide: return else: self.extra_data["_task_history"].append(get_func_info(last_task)) def get_formatted_data(self, of="hd"): """Get the formatted representation for this object.""" from .registry import workflows try: name = self.get_workflow_name() if not name: return "" workflow_definition = workflows[name] formatted_data = workflow_definition.formatter( self, of=of ) except (KeyError, AttributeError): # Somehow the workflow or formatter does not exist from invenio.ext.logging import register_exception register_exception(alert_admin=True) formatted_data = "" return formatted_data def __repr__(self): """Represent a BibWorkflowObject.""" return "<BibWorkflowObject(id = %s, data = %s, id_workflow = %s, " \ "version = %s, id_parent = %s, created = %s, extra_data = %s)" \ % (str(self.id), str(self.get_data()), str(self.id_workflow), str(self.version), str(self.id_parent), str(self.created), str(self.get_extra_data())) def __eq__(self, other): """Enable equal operators on BibWorkflowObjects.""" if isinstance(other, BibWorkflowObject): if self._data == other._data and \ self._extra_data == other._extra_data and \ self.id_workflow == other.id_workflow and \ self.version == other.version and \ self.id_parent == other.id_parent and \ isinstance(self.created, datetime) and \ isinstance(self.modified, datetime): return True else: return False return NotImplemented def __ne__(self, other): """Enable equal operators on BibWorkflowObjects.""" return not self.__eq__(other) def add_task_result(self, name, result, template="workflows/results/default.html"): """Add a new task result defined by name. The name is the dictionary key used to group similar types of results as well as a possible label for the result. The result is a dictionary given as context to the template when rendered. The result given here is added to a list of results for this name. .. code-block:: python obj = BibWorkflowObject() # or BibWorkflowObject.query.get(id) obj.add_task_result("foo", my_result, "path/to/template") :param name: The name of the task in human friendly way. It is used as a key and label for the result rendering. :type name: string :param result: The result to store - passed to render_template(). :type result: dict :param template: The location of the template to render the result. :type template: string """ extra_data = getattr(self, "extra_data", self.get_extra_data()) task_result = { "name": name, "result": result, "template": template } if name in extra_data["_tasks_results"]: extra_data["_tasks_results"][name].append(task_result) else: extra_data["_tasks_results"][name] = [task_result] self.set_extra_data(extra_data) def update_task_results(self, name, results): """Update tasks results by name. The name is the dictionary key used to group similar types of results as well as a possible label for the result. This functions allows you to update (replace) the list of results associated with a name where each result is structured like this: .. code-block:: python task_result = { "name": "foo", "result": result, "template": template } obj = BibWorkflowObject() # or BibWorkflowObject.query.get(id) obj.update_task_results("foo", [task_result]) :param name: The name of the task in human friendly way. It is used as a key and label for the result rendering. :type name: string :param results: List of results to store - passed to render_template(). :type results: list :param template: The location of the template to render the result. :type template: string """ extra_data = getattr(self, "extra_data", self.get_extra_data()) extra_data["_tasks_results"][name] = results self.set_extra_data(extra_data) def get_tasks_results(self): """Return the complete set of tasks results. The result is given as a dictionary where each result is structured like: .. code-block:: python task_result = { "name": name, "result": result, "template": template } :return: dictionary of results as {name: [result, ..], ..} """ return self.get_extra_data()["_tasks_results"] def set_action(self, action, message): """Set the action to be taken for this object. Assign an special "action" to this object to be taken in consideration in Holding Pen. The widget is referred to by a string with the filename minus extension. A message is also needed to tell the user the action required in a textual way. :param action: name of the action to add (i.e. "approval") :type action: string :param message: message to show to the user :type message: string """ extra_data = self.get_extra_data() extra_data["_action"] = action extra_data["_message"] = message self.set_extra_data(extra_data) def get_action(self): """Retrieve the currently assigned action, if any. :return: name of action assigned as string, or None """ try: return self.get_extra_data()["_action"] except KeyError: # No widget, try old _widget extra_data = self.get_extra_data() if "_widget" in extra_data: import warnings warnings.warn("Widget's are now stored in '_action'", DeprecationWarning) # Migrate to new naming extra_data["_action"] = extra_data['_widget'] del extra_data["_widget"] self.set_extra_data(extra_data) return extra_data["_action"] return None def get_action_message(self): """Retrieve the currently assigned widget, if any.""" try: return self.get_extra_data()["_message"] except KeyError: # No widget return "" def set_error_message(self, msg): """Set an error message.""" extra_data = self.get_extra_data() extra_data["_error_msg"] = msg self.set_extra_data(extra_data) def reset_error_message(self): """Reset the error message.""" extra_data = self.get_extra_data() if "_error_msg" in extra_data: del extra_data["_error_msg"] self.set_extra_data(extra_data) def get_error_message(self): """Retrieve the error message, if any.""" if "error_msg" in self.get_extra_data(): # Backwards compatibility extra_data = self.get_extra_data() msg = extra_data["error_msg"] del extra_data["error_msg"] self.set_extra_data(extra_data) self.set_error_message(msg) try: return self.get_extra_data()["_error_msg"] except KeyError: # No message return "" def remove_action(self): """Remove the currently assigned action.""" extra_data = self.get_extra_data() extra_data["_action"] = None extra_data["_message"] = "" if "_widget" in extra_data: del extra_data["_widget"] self.set_extra_data(extra_data) def start_workflow(self, workflow_name, delayed=False, **kwargs): """Run the workflow specified on the object. Will start workflows execution for the object using :py:func:`.api.start` (or :py:func:`.api.start_delayed` if `delayed=True`). :param workflow_name: name of workflow to run :type workflow_name: str :param delayed: should the workflow run asynchronously? :type delayed: bool :return: BibWorkflowEngine (or AsynchronousResultWrapper). """ if delayed: from .api import start_delayed as start_func else: from .api import start as start_func self.save() return start_func(workflow_name, data=[self], **kwargs) def continue_workflow(self, start_point="continue_next", delayed=False, **kwargs): """Continue the workflow for this object. Will continue a previous execution for the object using :py:func:`.api.continue_oid` (or :py:func:`.api.continue_oid_delayed` if `delayed=True`). The parameter `start_point` allows you to specify the point of where the workflow shall continue: * restart_prev: will restart from the previous task * continue_next: will continue to the next task * restart_task: will restart the current task :param start_point: where should the workflow start from? :type start_point: str :param delayed: should the workflow run asynchronously? :type delayed: bool :return: BibWorkflowEngine (or AsynchronousResultWrapper). """ from .errors import WorkflowAPIError self.save() if not self.id_workflow: raise WorkflowAPIError("No workflow associated with object: %r" % (repr(self),)) if delayed: from .api import continue_oid_delayed as continue_func else: from .api import continue_oid as continue_func return continue_func(self.id, start_point, **kwargs) def change_status(self, message): """Change the status.""" self.status = message def get_current_task(self): """Return the current task from the workflow engine for this object.""" extra_data = self.get_extra_data() try: return extra_data["_task_counter"] except KeyError: # Assume old version "task_counter" return extra_data["task_counter"] def save_to_file(self, directory=None, prefix="workflow_object_data_", suffix=".obj"): """Save the contents of self.data['data'] to file. Returns path to saved file. Warning: Currently assumes non-binary content. """ if directory is None: directory = cfg['CFG_TMPSHAREDIR'] tmp_fd, filename = tempfile.mkstemp(dir=directory, prefix=prefix, suffix=suffix) os.write(tmp_fd, self.get_data()) os.close(tmp_fd) return filename def get_log(self, *criteria, **filters): """Return a list of log entries from BibWorkflowObjectLog. You can specify additional filters following the SQLAlchemy syntax. Get all the logs for the object: .. code-block:: python b = BibWorkflowObject.query.get(1) b.get_log() Get all the logs for the object labeled as ERROR. .. code-block:: python b = BibWorkflowObject.query.get(1) b.get_log(BibWorkflowObjectLog.log_type == logging.ERROR) :return: list of BibWorkflowObjectLog """ criterions = [BibWorkflowObjectLog.id_object == self.id] + list(criteria) res = BibWorkflowObjectLog.query.filter( *criterions ).filter_by(**filters) return res.all() def __getstate__(self): """Return internal dict.""" return self.__dict__ def __setstate__(self, state): """Update interal dict with given state.""" self.__dict__ = state def copy(self, other): """Copy data and metadata except id and id_workflow.""" self._data = other._data self._extra_data = other._extra_data self.version = other.version self.id_parent = other.id_parent self.created = other.created self.modified = other.modified self.status = other.status self.data_type = other.data_type self.uri = other.uri @session_manager def save(self, version=None, task_counter=None, id_workflow=None): """Save object to persistent storage.""" if task_counter is not None: if isinstance(task_counter, list): self.log.debug("Saving task counter: %s" % (task_counter,)) extra_data = self.get_extra_data() extra_data["_task_counter"] = task_counter self.set_extra_data(extra_data) else: raise ValueError("Task counter must be a list!") if version is not None: if version != self.version: self.modified = datetime.now() self.version = version if id_workflow is not None: self.id_workflow = id_workflow db.session.add(self) if self.id is not None: self.log.debug("Saving object: %s" % (self.id or "new",)) @classmethod def get(cls, *criteria, **filters): """Wrapper of SQLAlchemy to get a BibWorkflowObject. A wrapper for the filter and filter_by functions of SQLAlchemy. Define a dict with which columns should be filtered by which values. .. code-block:: python Workflow.get(uuid=uuid) Workflow.get(Workflow.uuid != uuid) The function supports also "hybrid" arguments. .. code-block:: python Workflow.get(Workflow.module_name != 'i_hate_this_module', user_id=user_id) See also SQLAlchemy BaseQuery's filter and filter_by documentation. """ return cls.query.filter(*criteria).filter_by(**filters) @classmethod @session_manager def delete(cls, oid): """Delete a BibWorkflowObject.""" cls.get(BibWorkflowObject.id == oid).delete() @classmethod @session_manager def create_object(cls, **kwargs): """Create a new Workflow Object with given content.""" obj = BibWorkflowObject(**kwargs) db.session.add(obj) return obj @classmethod @session_manager def create_object_revision(cls, old_obj, version, **kwargs): """Create a Workflow Object copy with customized values.""" # Create new object and copy it obj = BibWorkflowObject(**kwargs) obj.copy(old_obj) # Overwrite some changes obj.version = version obj.created = datetime.now() obj.modified = datetime.now() for key, value in iteritems(kwargs): setattr(obj, key, value) db.session.add(obj) return obj class BibWorkflowObjectLog(db.Model): """Represents a log entry for BibWorkflowObjects. This class represent a record of a log emit by an object into the database. The object must be saved before using this class as it requires the object id. """ __tablename__ = 'bwlOBJECTLOGGING' id = db.Column(db.Integer, primary_key=True) id_object = db.Column(db.Integer(255), db.ForeignKey('bwlOBJECT.id'), nullable=False) log_type = db.Column(db.Integer, default=0, nullable=False) created = db.Column(db.DateTime, default=datetime.now) message = db.Column(db.TEXT, default="", nullable=False) def __str__(self): """Print a log.""" return "%(severity)s: %(created)s - %(message)s" % { "severity": self.log_type, "created": self.created, "message": self.message } def __repr__(self): """Represent a log message.""" return "BibWorkflowObjectLog(%s)" % (", ".join([ "log_type='%s'" % self.log_type, "created='%s'" % self.created, "message='%s'" % self.message, "id_object='%s'" % self.id_object, ])) @classmethod def get(cls, *criteria, **filters): """SQLAlchemy wrapper to get BibworkflowLogs. A wrapper for the filter and filter_by functions of SQLAlchemy. Define a dict with which columns should be filtered by which values. See also SQLAlchemy BaseQuery's filter and filter_by documentation. """ return cls.query.filter(*criteria).filter_by(**filters) @classmethod def get_most_recent(cls, *criteria, **filters): """Return the most recently created log.""" most_recent = cls.get(*criteria, **filters).order_by( desc(BibWorkflowObjectLog.created)).first() if most_recent is None: raise NoResultFound else: return most_recent @classmethod def delete(cls, id=None): """Delete an instance in database.""" cls.get(BibWorkflowObjectLog.id == id).delete() db.session.commit() class BibWorkflowEngineLog(db.Model): """Represents a log entry for BibWorkflowEngine. This class represent a record of a log emit by an object into the database. The object must be saved before using this class as it requires the object id. """ __tablename__ = "bwlWORKFLOWLOGGING" id = db.Column(db.Integer, primary_key=True) id_object = db.Column(db.String(255), db.ForeignKey('bwlWORKFLOW.uuid'), nullable=False) log_type = db.Column(db.Integer, default=0, nullable=False) created = db.Column(db.DateTime, default=datetime.now) message = db.Column(db.TEXT, default="", nullable=False) def __str__(self): """Print a log.""" return "%(severity)s: %(created)s - %(message)s" % { "severity": self.log_type, "created": self.created, "message": self.message } def __repr__(self): """Represent a log message.""" return "BibWorkflowEngineLog(%s)" % (", ".join([ "log_type='%s'" % self.log_type, "created='%s'" % self.created, "message='%s'" % self.message, "id_object='%s'" % self.id_object ])) @classmethod def get(cls, *criteria, **filters): """Sqlalchemy wrapper to get BibWorkflowEngineLog. A wrapper for the filter and filter_by functions of sqlalchemy. Define a dict with which columns should be filtered by which values. look up also sqalchemy BaseQuery's filter and filter_by documentation """ return cls.query.filter(*criteria).filter_by(**filters) @classmethod def get_most_recent(cls, *criteria, **filters): """Return the most recently created log.""" most_recent = cls.get(*criteria, **filters).order_by( desc(BibWorkflowEngineLog.created)).first() if most_recent is None: raise NoResultFound else: return most_recent @classmethod def delete(cls, uuid=None): """Delete an instance in database.""" cls.get(BibWorkflowEngineLog.id == uuid).delete() db.session.commit() __all__ = ('Workflow', 'BibWorkflowObject', 'BibWorkflowObjectLog', 'BibWorkflowEngineLog')
gpl-2.0
Storj/bitcointalkbot
crawler.py
1
4595
#!/usr/bin/env python3 # Written by Jonathon Vogel, 2014 import bs4 import feedparser import requests import slack import slack.chat import time import traceback import urllib.parse KEYWORDS = ['Storj', 'Storj Labs', 'SJCX', 'Storjcoin X', 'Storjcoin'] PING_TIME = 2 # how many seconds to wait between checking BitcoinTalk KEYWORD_FORMAT = '_*{}*_' # markdown bold, {} is replaced MESSAGE_FORMAT = """Someone mentioned your organization on BitcoinTalk! Thread - {} / {} {}""" slack.api_token = '' # get one for your org. at api.slack.com SLACK_USERNAME = 'Bitcoin-Talk-Bot' SLACK_CHANNEL = '#general' BITCOIN_TALK_RSS = 'https://bitcointalk.org/index.php?type=rss;action=.xml&limit=100' def string_find_all(s, needle): """A generator that finds all occurences of needle in s.""" loc = 0 while True: loc = s.find(needle, loc) if loc == -1: return yield loc loc += len(needle) def check_and_format_string(s, kwds, each_side_context=20): """s is the string to check, kwds is the keywords to check for, and each_side_context is the number of characters of context to include on each side of the keyword. Returns a list of formatted strings, which is empty if no keywords were found. """ keywords = {} for k in kwds: for loc in string_find_all(s, k): if loc not in keywords or len(k) > len(keywords[loc]): keywords[loc] = k return [s[loc - each_side_context:loc] + KEYWORD_FORMAT.format(k) + s[loc + len(keywords[loc]):loc + len(keywords[loc]) + each_side_context] for loc, k in keywords.items()] def check_post_strings(url, kwd=KEYWORDS): """We need to do a *little* bit of HTML scraping, as the RSS feed only gives us partial summaries of posts. Luckily, this isn't too difficult, and it's flexible enough that BitcoinTalk redesigns shouldn't break it too hard. """ html = bs4.BeautifulSoup(requests.get(url).text) post_id_elem = html.find('a', href=url) if post_id_elem is None: # bitcoin talk returning bad HTML print('Bad HTML (503?), bailing...') print(html[:100]) raise Exception('Bad HTML, possible 503') post = post_id_elem.find_next('div', {'class': 'post'}) def walk_post_children(node): if isinstance(node, str): yield str(node) elif hasattr(node, 'children') and ('class' not in node or node['class'] not in ['quote', 'quoteheader']): # we don't want quotes to double-report things for c in node.children: for s in walk_post_children(c): yield s lines = [] for s in walk_post_children(post): lines += check_and_format_string(s, kwd) return lines def get_post_id(url): return int(urllib.parse.urlparse(url).fragment.replace('msg', '')) def check_btc_talk(last_post_checked): """Handler for RSS and posting to Slack.""" t = requests.get(BITCOIN_TALK_RSS).text feed = feedparser.parse(t) #if feed['bozo']: # print('WARNING: XML errors in feed') # print(t[:100]) for entry in reversed(feed['entries']): if 'id' not in entry or (last_post_checked is not None and get_post_id(entry['id']) <= get_post_id(last_post_checked)): continue print(entry['id']) try: mentions = check_post_strings(entry['id'], KEYWORDS) if len(mentions): print('Found a mention, posting to slack...') slack.chat.post_message(SLACK_CHANNEL, MESSAGE_FORMAT.format(entry['title'], entry['id'], '\n'.join(mentions)), username=SLACK_USERNAME) last_post_checked = entry['id'] except Exception as e: if isinstance(e, KeyboardInterrupt): raise e print('Unhandled exception, retrying feed parse at exception point') traceback.print_exc() break time.sleep(1) return last_post_checked def main(): """Loop and exception handling""" last_post_checked = feedparser.parse(BITCOIN_TALK_RSS)['entries'][0]['id'] # don't spend a bunch of time parsing old comments while True: try: last_post_checked = check_btc_talk(last_post_checked) time.sleep(1) except Exception as e: if isinstance(e, KeyboardInterrupt): print('Being killed! Exiting...') break print('Unexpected exception, trying to continue...') traceback.print_exc() if __name__ == '__main__': main()
mit
JVillella/tensorflow
tensorflow/contrib/keras/python/keras/utils/generic_utils.py
12
11458
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Python utilities required by Keras.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import marshal import sys import time import types as python_types import numpy as np import six from tensorflow.python.util import tf_decorator from tensorflow.python.util import tf_inspect _GLOBAL_CUSTOM_OBJECTS = {} class CustomObjectScope(object): """Provides a scope that changes to `_GLOBAL_CUSTOM_OBJECTS` cannot escape. Code within a `with` statement will be able to access custom objects by name. Changes to global custom objects persist within the enclosing `with` statement. At end of the `with` statement, global custom objects are reverted to state at beginning of the `with` statement. Example: Consider a custom object `MyObject` ```python with CustomObjectScope({'MyObject':MyObject}): layer = Dense(..., kernel_regularizer='MyObject') # save, load, etc. will recognize custom object by name ``` """ def __init__(self, *args): self.custom_objects = args self.backup = None def __enter__(self): self.backup = _GLOBAL_CUSTOM_OBJECTS.copy() for objects in self.custom_objects: _GLOBAL_CUSTOM_OBJECTS.update(objects) return self def __exit__(self, *args, **kwargs): _GLOBAL_CUSTOM_OBJECTS.clear() _GLOBAL_CUSTOM_OBJECTS.update(self.backup) def custom_object_scope(*args): """Provides a scope that changes to `_GLOBAL_CUSTOM_OBJECTS` cannot escape. Convenience wrapper for `CustomObjectScope`. Code within a `with` statement will be able to access custom objects by name. Changes to global custom objects persist within the enclosing `with` statement. At end of the `with` statement, global custom objects are reverted to state at beginning of the `with` statement. Example: Consider a custom object `MyObject` ```python with custom_object_scope({'MyObject':MyObject}): layer = Dense(..., kernel_regularizer='MyObject') # save, load, etc. will recognize custom object by name ``` Arguments: *args: Variable length list of dictionaries of name, class pairs to add to custom objects. Returns: Object of type `CustomObjectScope`. """ return CustomObjectScope(*args) def get_custom_objects(): """Retrieves a live reference to the global dictionary of custom objects. Updating and clearing custom objects using `custom_object_scope` is preferred, but `get_custom_objects` can be used to directly access `_GLOBAL_CUSTOM_OBJECTS`. Example: ```python get_custom_objects().clear() get_custom_objects()['MyObject'] = MyObject ``` Returns: Global dictionary of names to classes (`_GLOBAL_CUSTOM_OBJECTS`). """ return _GLOBAL_CUSTOM_OBJECTS def serialize_keras_object(instance): _, instance = tf_decorator.unwrap(instance) if instance is None: return None if hasattr(instance, 'get_config'): return { 'class_name': instance.__class__.__name__, 'config': instance.get_config() } if hasattr(instance, '__name__'): return instance.__name__ else: raise ValueError('Cannot serialize', instance) def deserialize_keras_object(identifier, module_objects=None, custom_objects=None, printable_module_name='object'): if isinstance(identifier, dict): # In this case we are dealing with a Keras config dictionary. config = identifier if 'class_name' not in config or 'config' not in config: raise ValueError('Improper config format: ' + str(config)) class_name = config['class_name'] if custom_objects and class_name in custom_objects: cls = custom_objects[class_name] elif class_name in _GLOBAL_CUSTOM_OBJECTS: cls = _GLOBAL_CUSTOM_OBJECTS[class_name] else: module_objects = module_objects or {} cls = module_objects.get(class_name) if cls is None: raise ValueError('Unknown ' + printable_module_name + ': ' + class_name) if hasattr(cls, 'from_config'): arg_spec = tf_inspect.getargspec(cls.from_config) custom_objects = custom_objects or {} if 'custom_objects' in arg_spec.args: return cls.from_config( config['config'], custom_objects=dict( list(_GLOBAL_CUSTOM_OBJECTS.items()) + list(custom_objects.items()))) with CustomObjectScope(custom_objects): return cls.from_config(config['config']) else: # Then `cls` may be a function returning a class. # in this case by convention `config` holds # the kwargs of the function. custom_objects = custom_objects or {} with CustomObjectScope(custom_objects): return cls(**config['config']) elif isinstance(identifier, six.string_types): function_name = identifier if custom_objects and function_name in custom_objects: fn = custom_objects.get(function_name) elif function_name in _GLOBAL_CUSTOM_OBJECTS: fn = _GLOBAL_CUSTOM_OBJECTS[function_name] else: fn = module_objects.get(function_name) if fn is None: raise ValueError('Unknown ' + printable_module_name + ':' + function_name) return fn else: raise ValueError('Could not interpret serialized ' + printable_module_name + ': ' + identifier) def func_dump(func): """Serializes a user defined function. Arguments: func: the function to serialize. Returns: A tuple `(code, defaults, closure)`. """ code = marshal.dumps(func.__code__).decode('raw_unicode_escape') defaults = func.__defaults__ if func.__closure__: closure = tuple(c.cell_contents for c in func.__closure__) else: closure = None return code, defaults, closure def func_load(code, defaults=None, closure=None, globs=None): """Deserializes a user defined function. Arguments: code: bytecode of the function. defaults: defaults of the function. closure: closure of the function. globs: dictionary of global objects. Returns: A function object. """ if isinstance(code, (tuple, list)): # unpack previous dump code, defaults, closure = code if isinstance(defaults, list): defaults = tuple(defaults) code = marshal.loads(code.encode('raw_unicode_escape')) if globs is None: globs = globals() return python_types.FunctionType( code, globs, name=code.co_name, argdefs=defaults, closure=closure) def has_arg(fn, name, accept_all=False): """Checks if a callable accepts a given keyword argument. Arguments: fn: Callable to inspect. name: Check if `fn` can be called with `name` as a keyword argument. accept_all: What to return if there is no parameter called `name` but the function accepts a `**kwargs` argument. Returns: bool, whether `fn` accepts a `name` keyword argument. """ arg_spec = tf_inspect.getargspec(fn) if accept_all and arg_spec.keywords is not None: return True return name in arg_spec.args class Progbar(object): """Displays a progress bar. Arguments: target: Total number of steps expected, None if unknown. interval: Minimum visual progress update interval (in seconds). """ def __init__(self, target, width=30, verbose=1, interval=0.05): self.width = width if target is None: target = -1 self.target = target self.sum_values = {} self.unique_values = [] self.start = time.time() self.last_update = 0 self.interval = interval self.total_width = 0 self.seen_so_far = 0 self.verbose = verbose def update(self, current, values=None, force=False): """Updates the progress bar. Arguments: current: Index of current step. values: List of tuples (name, value_for_last_step). The progress bar will display averages for these values. force: Whether to force visual progress update. """ values = values or [] for k, v in values: if k not in self.sum_values: self.sum_values[k] = [ v * (current - self.seen_so_far), current - self.seen_so_far ] self.unique_values.append(k) else: self.sum_values[k][0] += v * (current - self.seen_so_far) self.sum_values[k][1] += (current - self.seen_so_far) self.seen_so_far = current now = time.time() if self.verbose == 1: if not force and (now - self.last_update) < self.interval: return prev_total_width = self.total_width sys.stdout.write('\b' * prev_total_width) sys.stdout.write('\r') if self.target is not -1: numdigits = int(np.floor(np.log10(self.target))) + 1 barstr = '%%%dd/%%%dd [' % (numdigits, numdigits) bar = barstr % (current, self.target) prog = float(current) / self.target prog_width = int(self.width * prog) if prog_width > 0: bar += ('=' * (prog_width - 1)) if current < self.target: bar += '>' else: bar += '=' bar += ('.' * (self.width - prog_width)) bar += ']' sys.stdout.write(bar) self.total_width = len(bar) if current: time_per_unit = (now - self.start) / current else: time_per_unit = 0 eta = time_per_unit * (self.target - current) info = '' if current < self.target and self.target is not -1: info += ' - ETA: %ds' % eta else: info += ' - %ds' % (now - self.start) for k in self.unique_values: info += ' - %s:' % k if isinstance(self.sum_values[k], list): avg = self.sum_values[k][0] / max(1, self.sum_values[k][1]) if abs(avg) > 1e-3: info += ' %.4f' % avg else: info += ' %.4e' % avg else: info += ' %s' % self.sum_values[k] self.total_width += len(info) if prev_total_width > self.total_width: info += ((prev_total_width - self.total_width) * ' ') sys.stdout.write(info) sys.stdout.flush() if current >= self.target: sys.stdout.write('\n') if self.verbose == 2: if current >= self.target: info = '%ds' % (now - self.start) for k in self.unique_values: info += ' - %s:' % k avg = self.sum_values[k][0] / max(1, self.sum_values[k][1]) if avg > 1e-3: info += ' %.4f' % avg else: info += ' %.4e' % avg sys.stdout.write(info + '\n') self.last_update = now def add(self, n, values=None): self.update(self.seen_so_far + n, values)
apache-2.0
UOMx/edx-platform
openedx/core/djangoapps/user_api/tests/test_views.py
2
73163
"""Tests for the user API at the HTTP request level. """ import datetime import json from unittest import skipUnless, SkipTest import ddt import httpretty import mock from django.conf import settings from django.contrib.auth.models import User from django.core import mail from django.core.urlresolvers import reverse from django.test.client import RequestFactory from django.test.testcases import TransactionTestCase from django.test.utils import override_settings from opaque_keys.edx.locations import SlashSeparatedCourseKey from pytz import UTC from social.apps.django_app.default.models import UserSocialAuth from django_comment_common import models from openedx.core.lib.api.test_utils import ApiTestCase, TEST_API_KEY from student.tests.factories import UserFactory from third_party_auth.tests.testutil import simulate_running_pipeline, ThirdPartyAuthTestMixin from third_party_auth.tests.utils import ( ThirdPartyOAuthTestMixin, ThirdPartyOAuthTestMixinFacebook, ThirdPartyOAuthTestMixinGoogle ) from .test_helpers import TestCaseForm from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase from xmodule.modulestore.tests.factories import CourseFactory from ..accounts import ( NAME_MAX_LENGTH, EMAIL_MIN_LENGTH, EMAIL_MAX_LENGTH, PASSWORD_MIN_LENGTH, PASSWORD_MAX_LENGTH, USERNAME_MIN_LENGTH, USERNAME_MAX_LENGTH ) from ..accounts.api import get_account_settings from ..models import UserOrgTag from ..tests.factories import UserPreferenceFactory from ..tests.test_constants import SORTED_COUNTRIES USER_LIST_URI = "/user_api/v1/users/" USER_PREFERENCE_LIST_URI = "/user_api/v1/user_prefs/" ROLE_LIST_URI = "/user_api/v1/forum_roles/Moderator/users/" class UserAPITestCase(ApiTestCase): """ Parent test case for User API workflow coverage """ LIST_URI = USER_LIST_URI def get_uri_for_user(self, target_user): """Given a user object, get the URI for the corresponding resource""" users = self.get_json(USER_LIST_URI)["results"] for user in users: if user["id"] == target_user.id: return user["url"] self.fail() def get_uri_for_pref(self, target_pref): """Given a user preference object, get the URI for the corresponding resource""" prefs = self.get_json(USER_PREFERENCE_LIST_URI)["results"] for pref in prefs: if pref["user"]["id"] == target_pref.user.id and pref["key"] == target_pref.key: return pref["url"] self.fail() def assertUserIsValid(self, user): """Assert that the given user result is valid""" self.assertItemsEqual(user.keys(), ["email", "id", "name", "username", "preferences", "url"]) self.assertItemsEqual( user["preferences"].items(), [(pref.key, pref.value) for pref in self.prefs if pref.user.id == user["id"]] ) self.assertSelfReferential(user) def assertPrefIsValid(self, pref): """ Assert that the given preference is acknowledged by the system """ self.assertItemsEqual(pref.keys(), ["user", "key", "value", "url"]) self.assertSelfReferential(pref) self.assertUserIsValid(pref["user"]) class EmptyUserTestCase(UserAPITestCase): """ Test that the endpoint supports empty user result sets """ def test_get_list_empty(self): result = self.get_json(self.LIST_URI) self.assertEqual(result["count"], 0) self.assertIsNone(result["next"]) self.assertIsNone(result["previous"]) self.assertEqual(result["results"], []) class EmptyRoleTestCase(UserAPITestCase): """Test that the endpoint supports empty result sets""" course_id = SlashSeparatedCourseKey.from_deprecated_string("org/course/run") LIST_URI = ROLE_LIST_URI + "?course_id=" + course_id.to_deprecated_string() def test_get_list_empty(self): """Test that the endpoint properly returns empty result sets""" result = self.get_json(self.LIST_URI) self.assertEqual(result["count"], 0) self.assertIsNone(result["next"]) self.assertIsNone(result["previous"]) self.assertEqual(result["results"], []) class UserApiTestCase(UserAPITestCase): """ Generalized test case class for specific implementations below """ def setUp(self): super(UserApiTestCase, self).setUp() self.users = [ UserFactory.create( email="test{0}@test.org".format(i), profile__name="Test {0}".format(i) ) for i in range(5) ] self.prefs = [ UserPreferenceFactory.create(user=self.users[0], key="key0"), UserPreferenceFactory.create(user=self.users[0], key="key1"), UserPreferenceFactory.create(user=self.users[1], key="key0") ] class RoleTestCase(UserApiTestCase): """ Test cases covering Role-related views and their behaviors """ course_id = SlashSeparatedCourseKey.from_deprecated_string("org/course/run") LIST_URI = ROLE_LIST_URI + "?course_id=" + course_id.to_deprecated_string() def setUp(self): super(RoleTestCase, self).setUp() (role, _) = models.Role.objects.get_or_create( name=models.FORUM_ROLE_MODERATOR, course_id=self.course_id ) for user in self.users: user.roles.add(role) def test_options_list(self): self.assertAllowedMethods(self.LIST_URI, ["OPTIONS", "GET", "HEAD"]) def test_post_list_not_allowed(self): self.assertHttpMethodNotAllowed(self.request_with_auth("post", self.LIST_URI)) def test_put_list_not_allowed(self): self.assertHttpMethodNotAllowed(self.request_with_auth("put", self.LIST_URI)) def test_patch_list_not_allowed(self): raise SkipTest("Django 1.4's test client does not support patch") def test_delete_list_not_allowed(self): self.assertHttpMethodNotAllowed(self.request_with_auth("delete", self.LIST_URI)) def test_list_unauthorized(self): self.assertHttpForbidden(self.client.get(self.LIST_URI)) @override_settings(DEBUG=True) @override_settings(EDX_API_KEY=None) def test_debug_auth(self): self.assertHttpOK(self.client.get(self.LIST_URI)) @override_settings(DEBUG=False) @override_settings(EDX_API_KEY=TEST_API_KEY) def test_basic_auth(self): # ensure that having basic auth headers in the mix does not break anything self.assertHttpOK( self.request_with_auth("get", self.LIST_URI, **self.basic_auth("someuser", "somepass"))) self.assertHttpForbidden( self.client.get(self.LIST_URI, **self.basic_auth("someuser", "somepass"))) def test_get_list_nonempty(self): result = self.get_json(self.LIST_URI) users = result["results"] self.assertEqual(result["count"], len(self.users)) self.assertEqual(len(users), len(self.users)) self.assertIsNone(result["next"]) self.assertIsNone(result["previous"]) for user in users: self.assertUserIsValid(user) def test_required_parameter(self): response = self.request_with_auth("get", ROLE_LIST_URI) self.assertHttpBadRequest(response) def test_get_list_pagination(self): first_page = self.get_json(self.LIST_URI, data={ "page_size": 3, "course_id": self.course_id.to_deprecated_string(), }) self.assertEqual(first_page["count"], 5) first_page_next_uri = first_page["next"] self.assertIsNone(first_page["previous"]) first_page_users = first_page["results"] self.assertEqual(len(first_page_users), 3) second_page = self.get_json(first_page_next_uri) self.assertEqual(second_page["count"], 5) self.assertIsNone(second_page["next"]) second_page_prev_uri = second_page["previous"] second_page_users = second_page["results"] self.assertEqual(len(second_page_users), 2) self.assertEqual(self.get_json(second_page_prev_uri), first_page) for user in first_page_users + second_page_users: self.assertUserIsValid(user) all_user_uris = [user["url"] for user in first_page_users + second_page_users] self.assertEqual(len(set(all_user_uris)), 5) class UserViewSetTest(UserApiTestCase): """ Test cases covering the User DRF view set class and its various behaviors """ LIST_URI = USER_LIST_URI def setUp(self): super(UserViewSetTest, self).setUp() self.detail_uri = self.get_uri_for_user(self.users[0]) # List view tests def test_options_list(self): self.assertAllowedMethods(self.LIST_URI, ["OPTIONS", "GET", "HEAD"]) def test_post_list_not_allowed(self): self.assertHttpMethodNotAllowed(self.request_with_auth("post", self.LIST_URI)) def test_put_list_not_allowed(self): self.assertHttpMethodNotAllowed(self.request_with_auth("put", self.LIST_URI)) def test_patch_list_not_allowed(self): raise SkipTest("Django 1.4's test client does not support patch") def test_delete_list_not_allowed(self): self.assertHttpMethodNotAllowed(self.request_with_auth("delete", self.LIST_URI)) def test_list_unauthorized(self): self.assertHttpForbidden(self.client.get(self.LIST_URI)) @override_settings(DEBUG=True) @override_settings(EDX_API_KEY=None) def test_debug_auth(self): self.assertHttpOK(self.client.get(self.LIST_URI)) @override_settings(DEBUG=False) @override_settings(EDX_API_KEY=TEST_API_KEY) def test_basic_auth(self): # ensure that having basic auth headers in the mix does not break anything self.assertHttpOK( self.request_with_auth("get", self.LIST_URI, **self.basic_auth('someuser', 'somepass'))) self.assertHttpForbidden( self.client.get(self.LIST_URI, **self.basic_auth('someuser', 'somepass'))) def test_get_list_nonempty(self): result = self.get_json(self.LIST_URI) self.assertEqual(result["count"], 5) self.assertIsNone(result["next"]) self.assertIsNone(result["previous"]) users = result["results"] self.assertEqual(len(users), 5) for user in users: self.assertUserIsValid(user) def test_get_list_pagination(self): first_page = self.get_json(self.LIST_URI, data={"page_size": 3}) self.assertEqual(first_page["count"], 5) first_page_next_uri = first_page["next"] self.assertIsNone(first_page["previous"]) first_page_users = first_page["results"] self.assertEqual(len(first_page_users), 3) second_page = self.get_json(first_page_next_uri) self.assertEqual(second_page["count"], 5) self.assertIsNone(second_page["next"]) second_page_prev_uri = second_page["previous"] second_page_users = second_page["results"] self.assertEqual(len(second_page_users), 2) self.assertEqual(self.get_json(second_page_prev_uri), first_page) for user in first_page_users + second_page_users: self.assertUserIsValid(user) all_user_uris = [user["url"] for user in first_page_users + second_page_users] self.assertEqual(len(set(all_user_uris)), 5) # Detail view tests def test_options_detail(self): self.assertAllowedMethods(self.detail_uri, ["OPTIONS", "GET", "HEAD"]) def test_post_detail_not_allowed(self): self.assertHttpMethodNotAllowed(self.request_with_auth("post", self.detail_uri)) def test_put_detail_not_allowed(self): self.assertHttpMethodNotAllowed(self.request_with_auth("put", self.detail_uri)) def test_patch_detail_not_allowed(self): raise SkipTest("Django 1.4's test client does not support patch") def test_delete_detail_not_allowed(self): self.assertHttpMethodNotAllowed(self.request_with_auth("delete", self.detail_uri)) def test_get_detail_unauthorized(self): self.assertHttpForbidden(self.client.get(self.detail_uri)) def test_get_detail(self): user = self.users[1] uri = self.get_uri_for_user(user) self.assertEqual( self.get_json(uri), { "email": user.email, "id": user.id, "name": user.profile.name, "username": user.username, "preferences": dict([ (user_pref.key, user_pref.value) for user_pref in self.prefs if user_pref.user == user ]), "url": uri } ) class UserPreferenceViewSetTest(UserApiTestCase): """ Test cases covering the User Preference DRF view class and its various behaviors """ LIST_URI = USER_PREFERENCE_LIST_URI def setUp(self): super(UserPreferenceViewSetTest, self).setUp() self.detail_uri = self.get_uri_for_pref(self.prefs[0]) # List view tests def test_options_list(self): self.assertAllowedMethods(self.LIST_URI, ["OPTIONS", "GET", "HEAD"]) def test_put_list_not_allowed(self): self.assertHttpMethodNotAllowed(self.request_with_auth("put", self.LIST_URI)) def test_patch_list_not_allowed(self): raise SkipTest("Django 1.4's test client does not support patch") def test_delete_list_not_allowed(self): self.assertHttpMethodNotAllowed(self.request_with_auth("delete", self.LIST_URI)) def test_list_unauthorized(self): self.assertHttpForbidden(self.client.get(self.LIST_URI)) @override_settings(DEBUG=True) @override_settings(EDX_API_KEY=None) def test_debug_auth(self): self.assertHttpOK(self.client.get(self.LIST_URI)) def test_get_list_nonempty(self): result = self.get_json(self.LIST_URI) self.assertEqual(result["count"], 3) self.assertIsNone(result["next"]) self.assertIsNone(result["previous"]) prefs = result["results"] self.assertEqual(len(prefs), 3) for pref in prefs: self.assertPrefIsValid(pref) def test_get_list_filter_key_empty(self): result = self.get_json(self.LIST_URI, data={"key": "non-existent"}) self.assertEqual(result["count"], 0) self.assertEqual(result["results"], []) def test_get_list_filter_key_nonempty(self): result = self.get_json(self.LIST_URI, data={"key": "key0"}) self.assertEqual(result["count"], 2) prefs = result["results"] self.assertEqual(len(prefs), 2) for pref in prefs: self.assertPrefIsValid(pref) self.assertEqual(pref["key"], "key0") def test_get_list_filter_user_empty(self): def test_id(user_id): result = self.get_json(self.LIST_URI, data={"user": user_id}) self.assertEqual(result["count"], 0) self.assertEqual(result["results"], []) test_id(self.users[2].id) # TODO: If the given id does not match a user, then the filter is a no-op # test_id(42) # test_id("asdf") def test_get_list_filter_user_nonempty(self): user_id = self.users[0].id result = self.get_json(self.LIST_URI, data={"user": user_id}) self.assertEqual(result["count"], 2) prefs = result["results"] self.assertEqual(len(prefs), 2) for pref in prefs: self.assertPrefIsValid(pref) self.assertEqual(pref["user"]["id"], user_id) def test_get_list_pagination(self): first_page = self.get_json(self.LIST_URI, data={"page_size": 2}) self.assertEqual(first_page["count"], 3) first_page_next_uri = first_page["next"] self.assertIsNone(first_page["previous"]) first_page_prefs = first_page["results"] self.assertEqual(len(first_page_prefs), 2) second_page = self.get_json(first_page_next_uri) self.assertEqual(second_page["count"], 3) self.assertIsNone(second_page["next"]) second_page_prev_uri = second_page["previous"] second_page_prefs = second_page["results"] self.assertEqual(len(second_page_prefs), 1) self.assertEqual(self.get_json(second_page_prev_uri), first_page) for pref in first_page_prefs + second_page_prefs: self.assertPrefIsValid(pref) all_pref_uris = [pref["url"] for pref in first_page_prefs + second_page_prefs] self.assertEqual(len(set(all_pref_uris)), 3) # Detail view tests def test_options_detail(self): self.assertAllowedMethods(self.detail_uri, ["OPTIONS", "GET", "HEAD"]) def test_post_detail_not_allowed(self): self.assertHttpMethodNotAllowed(self.request_with_auth("post", self.detail_uri)) def test_put_detail_not_allowed(self): self.assertHttpMethodNotAllowed(self.request_with_auth("put", self.detail_uri)) def test_patch_detail_not_allowed(self): raise SkipTest("Django 1.4's test client does not support patch") def test_delete_detail_not_allowed(self): self.assertHttpMethodNotAllowed(self.request_with_auth("delete", self.detail_uri)) def test_detail_unauthorized(self): self.assertHttpForbidden(self.client.get(self.detail_uri)) def test_get_detail(self): pref = self.prefs[1] uri = self.get_uri_for_pref(pref) self.assertEqual( self.get_json(uri), { "user": { "email": pref.user.email, "id": pref.user.id, "name": pref.user.profile.name, "username": pref.user.username, "preferences": dict([ (user_pref.key, user_pref.value) for user_pref in self.prefs if user_pref.user == pref.user ]), "url": self.get_uri_for_user(pref.user), }, "key": pref.key, "value": pref.value, "url": uri, } ) class PreferenceUsersListViewTest(UserApiTestCase): """ Test cases covering the list viewing behavior for user preferences """ LIST_URI = "/user_api/v1/preferences/key0/users/" def test_options(self): self.assertAllowedMethods(self.LIST_URI, ["OPTIONS", "GET", "HEAD"]) def test_put_not_allowed(self): self.assertHttpMethodNotAllowed(self.request_with_auth("put", self.LIST_URI)) def test_patch_not_allowed(self): raise SkipTest("Django 1.4's test client does not support patch") def test_delete_not_allowed(self): self.assertHttpMethodNotAllowed(self.request_with_auth("delete", self.LIST_URI)) def test_unauthorized(self): self.assertHttpForbidden(self.client.get(self.LIST_URI)) @override_settings(DEBUG=True) @override_settings(EDX_API_KEY=None) def test_debug_auth(self): self.assertHttpOK(self.client.get(self.LIST_URI)) def test_get_basic(self): result = self.get_json(self.LIST_URI) self.assertEqual(result["count"], 2) self.assertIsNone(result["next"]) self.assertIsNone(result["previous"]) users = result["results"] self.assertEqual(len(users), 2) for user in users: self.assertUserIsValid(user) def test_get_pagination(self): first_page = self.get_json(self.LIST_URI, data={"page_size": 1}) self.assertEqual(first_page["count"], 2) first_page_next_uri = first_page["next"] self.assertIsNone(first_page["previous"]) first_page_users = first_page["results"] self.assertEqual(len(first_page_users), 1) second_page = self.get_json(first_page_next_uri) self.assertEqual(second_page["count"], 2) self.assertIsNone(second_page["next"]) second_page_prev_uri = second_page["previous"] second_page_users = second_page["results"] self.assertEqual(len(second_page_users), 1) self.assertEqual(self.get_json(second_page_prev_uri), first_page) for user in first_page_users + second_page_users: self.assertUserIsValid(user) all_user_uris = [user["url"] for user in first_page_users + second_page_users] self.assertEqual(len(set(all_user_uris)), 2) @ddt.ddt @skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms') class LoginSessionViewTest(UserAPITestCase): """Tests for the login end-points of the user API. """ USERNAME = "bob" EMAIL = "bob@example.com" PASSWORD = "password" def setUp(self): super(LoginSessionViewTest, self).setUp() self.url = reverse("user_api_login_session") @ddt.data("get", "post") def test_auth_disabled(self, method): self.assertAuthDisabled(method, self.url) def test_allowed_methods(self): self.assertAllowedMethods(self.url, ["GET", "POST", "HEAD", "OPTIONS"]) def test_put_not_allowed(self): response = self.client.put(self.url) self.assertHttpMethodNotAllowed(response) def test_delete_not_allowed(self): response = self.client.delete(self.url) self.assertHttpMethodNotAllowed(response) def test_patch_not_allowed(self): raise SkipTest("Django 1.4's test client does not support patch") def test_login_form(self): # Retrieve the login form response = self.client.get(self.url, content_type="application/json") self.assertHttpOK(response) # Verify that the form description matches what we expect form_desc = json.loads(response.content) self.assertEqual(form_desc["method"], "post") self.assertEqual(form_desc["submit_url"], self.url) self.assertEqual(form_desc["fields"], [ { "name": "email", "defaultValue": "", "type": "email", "required": True, "label": "Email", "placeholder": "username@domain.com", "instructions": "The email address you used to register with {platform_name}".format( platform_name=settings.PLATFORM_NAME ), "restrictions": { "min_length": EMAIL_MIN_LENGTH, "max_length": EMAIL_MAX_LENGTH }, "errorMessages": {}, }, { "name": "password", "defaultValue": "", "type": "password", "required": True, "label": "Password", "placeholder": "", "instructions": "", "restrictions": { "min_length": PASSWORD_MIN_LENGTH, "max_length": PASSWORD_MAX_LENGTH }, "errorMessages": {}, }, { "name": "remember", "defaultValue": False, "type": "checkbox", "required": False, "label": "Remember me", "placeholder": "", "instructions": "", "restrictions": {}, "errorMessages": {}, }, ]) def test_login(self): # Create a test user UserFactory.create(username=self.USERNAME, email=self.EMAIL, password=self.PASSWORD) # Login response = self.client.post(self.url, { "email": self.EMAIL, "password": self.PASSWORD, }) self.assertHttpOK(response) # Verify that we logged in successfully by accessing # a page that requires authentication. response = self.client.get(reverse("dashboard")) self.assertHttpOK(response) @ddt.data( (json.dumps(True), False), (json.dumps(False), True), (None, True), ) @ddt.unpack def test_login_remember_me(self, remember_value, expire_at_browser_close): # Create a test user UserFactory.create(username=self.USERNAME, email=self.EMAIL, password=self.PASSWORD) # Login and remember me data = { "email": self.EMAIL, "password": self.PASSWORD, } if remember_value is not None: data["remember"] = remember_value response = self.client.post(self.url, data) self.assertHttpOK(response) # Verify that the session expiration was set correctly self.assertEqual( self.client.session.get_expire_at_browser_close(), expire_at_browser_close ) def test_invalid_credentials(self): # Create a test user UserFactory.create(username=self.USERNAME, email=self.EMAIL, password=self.PASSWORD) # Invalid password response = self.client.post(self.url, { "email": self.EMAIL, "password": "invalid" }) self.assertHttpForbidden(response) # Invalid email address response = self.client.post(self.url, { "email": "invalid@example.com", "password": self.PASSWORD, }) self.assertHttpForbidden(response) def test_missing_login_params(self): # Create a test user UserFactory.create(username=self.USERNAME, email=self.EMAIL, password=self.PASSWORD) # Missing password response = self.client.post(self.url, { "email": self.EMAIL, }) self.assertHttpBadRequest(response) # Missing email response = self.client.post(self.url, { "password": self.PASSWORD, }) self.assertHttpBadRequest(response) # Missing both email and password response = self.client.post(self.url, {}) @ddt.ddt @skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms') class PasswordResetViewTest(UserAPITestCase): """Tests of the user API's password reset endpoint. """ def setUp(self): super(PasswordResetViewTest, self).setUp() self.url = reverse("user_api_password_reset") @ddt.data("get", "post") def test_auth_disabled(self, method): self.assertAuthDisabled(method, self.url) def test_allowed_methods(self): self.assertAllowedMethods(self.url, ["GET", "HEAD", "OPTIONS"]) def test_put_not_allowed(self): response = self.client.put(self.url) self.assertHttpMethodNotAllowed(response) def test_delete_not_allowed(self): response = self.client.delete(self.url) self.assertHttpMethodNotAllowed(response) def test_patch_not_allowed(self): raise SkipTest("Django 1.4's test client does not support patch") def test_password_reset_form(self): # Retrieve the password reset form response = self.client.get(self.url, content_type="application/json") self.assertHttpOK(response) # Verify that the form description matches what we expect form_desc = json.loads(response.content) self.assertEqual(form_desc["method"], "post") self.assertEqual(form_desc["submit_url"], reverse("password_change_request")) self.assertEqual(form_desc["fields"], [ { "name": "email", "defaultValue": "", "type": "email", "required": True, "label": "Email", "placeholder": "username@domain.com", "instructions": "The email address you used to register with {platform_name}".format( platform_name=settings.PLATFORM_NAME ), "restrictions": { "min_length": EMAIL_MIN_LENGTH, "max_length": EMAIL_MAX_LENGTH }, "errorMessages": {}, } ]) @ddt.ddt @skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms') class RegistrationViewTest(ThirdPartyAuthTestMixin, UserAPITestCase): """Tests for the registration end-points of the User API. """ maxDiff = None USERNAME = "bob" EMAIL = "bob@example.com" PASSWORD = "password" NAME = "Bob Smith" EDUCATION = "m" YEAR_OF_BIRTH = "1998" ADDRESS = "123 Fake Street" CITY = "Springfield" COUNTRY = "us" GOALS = "Learn all the things!" def setUp(self): super(RegistrationViewTest, self).setUp() self.url = reverse("user_api_registration") @ddt.data("get", "post") def test_auth_disabled(self, method): self.assertAuthDisabled(method, self.url) def test_allowed_methods(self): self.assertAllowedMethods(self.url, ["GET", "POST", "HEAD", "OPTIONS"]) def test_put_not_allowed(self): response = self.client.put(self.url) self.assertHttpMethodNotAllowed(response) def test_delete_not_allowed(self): response = self.client.delete(self.url) self.assertHttpMethodNotAllowed(response) def test_patch_not_allowed(self): raise SkipTest("Django 1.4's test client does not support patch") def test_register_form_default_fields(self): no_extra_fields_setting = {} self._assert_reg_field( no_extra_fields_setting, { u"name": u"email", u"type": u"email", u"required": True, u"label": u"Email", u"placeholder": u"username@domain.com", u"restrictions": { "min_length": EMAIL_MIN_LENGTH, "max_length": EMAIL_MAX_LENGTH }, } ) self._assert_reg_field( no_extra_fields_setting, { u"name": u"name", u"type": u"text", u"required": True, u"label": u"Full name", u"placeholder": u"Jane Doe", u"instructions": u"Your legal name, used for any certificates you earn.", u"restrictions": { "max_length": 255 }, } ) self._assert_reg_field( no_extra_fields_setting, { u"name": u"username", u"type": u"text", u"required": True, u"label": u"Public username", u"placeholder": u"JaneDoe", u"instructions": u"The name that will identify you in your courses - <strong>(cannot be changed later)</strong>", # pylint: disable=line-too-long u"restrictions": { "min_length": USERNAME_MIN_LENGTH, "max_length": USERNAME_MAX_LENGTH }, } ) self._assert_reg_field( no_extra_fields_setting, { u"placeholder": "", u"name": u"password", u"type": u"password", u"required": True, u"label": u"Password", u"restrictions": { 'min_length': PASSWORD_MIN_LENGTH, 'max_length': PASSWORD_MAX_LENGTH # 'min_length': account_api.PASSWORD_MIN_LENGTH, # 'max_length': account_api.PASSWORD_MAX_LENGTH }, } ) @override_settings(REGISTRATION_EXTENSION_FORM='openedx.core.djangoapps.user_api.tests.test_helpers.TestCaseForm') def test_extension_form_fields(self): no_extra_fields_setting = {} # Verify other fields didn't disappear for some reason. self._assert_reg_field( no_extra_fields_setting, { u"name": u"email", u"type": u"email", u"required": True, u"label": u"Email", u"placeholder": u"username@domain.com", u"restrictions": { "min_length": EMAIL_MIN_LENGTH, "max_length": EMAIL_MAX_LENGTH }, } ) self._assert_reg_field( no_extra_fields_setting, { u"name": u"favorite_editor", u"type": u"select", u"required": False, u"label": u"Favorite Editor", u"placeholder": u"cat", u"defaultValue": u"vim", u"errorMessages": { u'required': u'This field is required.', u'invalid_choice': u'Select a valid choice. %(value)s is not one of the available choices.', } } ) self._assert_reg_field( no_extra_fields_setting, { u"name": u"favorite_movie", u"type": u"text", u"required": True, u"label": u"Fav Flick", u"placeholder": None, u"defaultValue": None, u"errorMessages": { u'required': u'Please tell us your favorite movie.', u'invalid': u"We're pretty sure you made that movie up." }, u"restrictions": { "min_length": TestCaseForm.MOVIE_MIN_LEN, "max_length": TestCaseForm.MOVIE_MAX_LEN, } } ) def test_register_form_third_party_auth_running(self): no_extra_fields_setting = {} self.configure_google_provider(enabled=True) with simulate_running_pipeline( "openedx.core.djangoapps.user_api.views.third_party_auth.pipeline", "google-oauth2", email="bob@example.com", fullname="Bob", username="Bob123" ): # Password field should be hidden self._assert_reg_field( no_extra_fields_setting, { "name": "password", "type": "hidden", "required": False, } ) # Email should be filled in self._assert_reg_field( no_extra_fields_setting, { u"name": u"email", u"defaultValue": u"bob@example.com", u"type": u"email", u"required": True, u"label": u"Email", u"placeholder": u"username@domain.com", u"restrictions": { "min_length": EMAIL_MIN_LENGTH, "max_length": EMAIL_MAX_LENGTH }, } ) # Full name should be filled in self._assert_reg_field( no_extra_fields_setting, { u"name": u"name", u"defaultValue": u"Bob", u"type": u"text", u"required": True, u"label": u"Full name", u"placeholder": u"Jane Doe", u"instructions": u"Your legal name, used for any certificates you earn.", u"restrictions": { "max_length": NAME_MAX_LENGTH, } } ) # Username should be filled in self._assert_reg_field( no_extra_fields_setting, { u"name": u"username", u"defaultValue": u"Bob123", u"type": u"text", u"required": True, u"label": u"Public username", u"placeholder": u"JaneDoe", u"instructions": u"The name that will identify you in your courses - <strong>(cannot be changed later)</strong>", # pylint: disable=line-too-long u"restrictions": { "min_length": USERNAME_MIN_LENGTH, "max_length": USERNAME_MAX_LENGTH } } ) def test_register_form_level_of_education(self): self._assert_reg_field( {"level_of_education": "optional"}, { "name": "level_of_education", "type": "select", "required": False, "label": "Highest level of education completed", "options": [ {"value": "", "name": "--", "default": True}, {"value": "p", "name": "Doctorate"}, {"value": "m", "name": "Master's or professional degree"}, {"value": "b", "name": "Bachelor's degree"}, {"value": "a", "name": "Associate degree"}, {"value": "hs", "name": "Secondary/high school"}, {"value": "jhs", "name": "Junior secondary/junior high/middle school"}, {"value": "el", "name": "Elementary/primary school"}, {"value": "none", "name": "No Formal Education"}, {"value": "other", "name": "Other Education"}, ], } ) @mock.patch('openedx.core.djangoapps.user_api.views._') def test_register_form_level_of_education_translations(self, fake_gettext): fake_gettext.side_effect = lambda text: text + ' TRANSLATED' self._assert_reg_field( {"level_of_education": "optional"}, { "name": "level_of_education", "type": "select", "required": False, "label": "Highest level of education completed TRANSLATED", "options": [ {"value": "", "name": "--", "default": True}, {"value": "p", "name": "Doctorate TRANSLATED"}, {"value": "m", "name": "Master's or professional degree TRANSLATED"}, {"value": "b", "name": "Bachelor's degree TRANSLATED"}, {"value": "a", "name": "Associate degree TRANSLATED"}, {"value": "hs", "name": "Secondary/high school TRANSLATED"}, {"value": "jhs", "name": "Junior secondary/junior high/middle school TRANSLATED"}, {"value": "el", "name": "Elementary/primary school TRANSLATED"}, {"value": "none", "name": "No Formal Education TRANSLATED"}, {"value": "other", "name": "Other Education TRANSLATED"}, ], } ) def test_register_form_gender(self): self._assert_reg_field( {"gender": "optional"}, { "name": "gender", "type": "select", "required": False, "label": "Gender", "options": [ {"value": "", "name": "--", "default": True}, {"value": "m", "name": "Male"}, {"value": "f", "name": "Female"}, {"value": "o", "name": "Other/Prefer Not to Say"}, ], } ) @mock.patch('openedx.core.djangoapps.user_api.views._') def test_register_form_gender_translations(self, fake_gettext): fake_gettext.side_effect = lambda text: text + ' TRANSLATED' self._assert_reg_field( {"gender": "optional"}, { "name": "gender", "type": "select", "required": False, "label": "Gender TRANSLATED", "options": [ {"value": "", "name": "--", "default": True}, {"value": "m", "name": "Male TRANSLATED"}, {"value": "f", "name": "Female TRANSLATED"}, {"value": "o", "name": "Other/Prefer Not to Say TRANSLATED"}, ], } ) def test_register_form_year_of_birth(self): this_year = datetime.datetime.now(UTC).year year_options = ( [{"value": "", "name": "--", "default": True}] + [ {"value": unicode(year), "name": unicode(year)} for year in range(this_year, this_year - 120, -1) ] ) self._assert_reg_field( {"year_of_birth": "optional"}, { "name": "year_of_birth", "type": "select", "required": False, "label": "Year of birth", "options": year_options, } ) def test_registration_form_mailing_address(self): self._assert_reg_field( {"mailing_address": "optional"}, { "name": "mailing_address", "type": "textarea", "required": False, "label": "Mailing address", } ) def test_registration_form_goals(self): self._assert_reg_field( {"goals": "optional"}, { "name": "goals", "type": "textarea", "required": False, "label": "Tell us why you're interested in {platform_name}".format( platform_name=settings.PLATFORM_NAME ) } ) def test_registration_form_city(self): self._assert_reg_field( {"city": "optional"}, { "name": "city", "type": "text", "required": False, "label": "City", } ) def test_registration_form_state(self): self._assert_reg_field( {"state": "optional"}, { "name": "state", "type": "text", "required": False, "label": "State/Province/Region", } ) def test_registration_form_country(self): country_options = ( [{"name": "--", "value": "", "default": True}] + [ {"value": country_code, "name": unicode(country_name)} for country_code, country_name in SORTED_COUNTRIES ] ) self._assert_reg_field( {"country": "required"}, { "label": "Country", "name": "country", "type": "select", "required": True, "options": country_options, "errorMessages": { "required": "Please select your Country." }, } ) @override_settings( MKTG_URLS={"ROOT": "https://www.test.com/", "HONOR": "honor"}, ) @mock.patch.dict(settings.FEATURES, {"ENABLE_MKTG_SITE": True}) def test_registration_honor_code_mktg_site_enabled(self): link_html = '<a href=\"https://www.test.com/honor\">Terms of Service and Honor Code</a>' self._assert_reg_field( {"honor_code": "required"}, { "label": "I agree to the {platform_name} {link_html}.".format( platform_name=settings.PLATFORM_NAME, link_html=link_html ), "name": "honor_code", "defaultValue": False, "type": "checkbox", "required": True, "errorMessages": { "required": "You must agree to the {platform_name} {link_html}.".format( platform_name=settings.PLATFORM_NAME, link_html=link_html ) } } ) @override_settings(MKTG_URLS_LINK_MAP={"HONOR": "honor"}) @mock.patch.dict(settings.FEATURES, {"ENABLE_MKTG_SITE": False}) def test_registration_honor_code_mktg_site_disabled(self): link_html = '<a href=\"/honor\">Terms of Service and Honor Code</a>' self._assert_reg_field( {"honor_code": "required"}, { "label": "I agree to the {platform_name} {link_html}.".format( platform_name=settings.PLATFORM_NAME, link_html=link_html ), "name": "honor_code", "defaultValue": False, "type": "checkbox", "required": True, "errorMessages": { "required": "You must agree to the {platform_name} {link_html}.".format( platform_name=settings.PLATFORM_NAME, link_html=link_html ) } } ) @override_settings(MKTG_URLS={ "ROOT": "https://www.test.com/", "HONOR": "honor", "TOS": "tos", }) @mock.patch.dict(settings.FEATURES, {"ENABLE_MKTG_SITE": True}) def test_registration_separate_terms_of_service_mktg_site_enabled(self): # Honor code field should say ONLY honor code, # not "terms of service and honor code" link_html = '<a href=\"https://www.test.com/honor\">Honor Code</a>' self._assert_reg_field( {"honor_code": "required", "terms_of_service": "required"}, { "label": "I agree to the {platform_name} {link_html}.".format( platform_name=settings.PLATFORM_NAME, link_html=link_html ), "name": "honor_code", "defaultValue": False, "type": "checkbox", "required": True, "errorMessages": { "required": "You must agree to the {platform_name} {link_html}.".format( platform_name=settings.PLATFORM_NAME, link_html=link_html ) } } ) # Terms of service field should also be present link_html = '<a href=\"https://www.test.com/tos\">Terms of Service</a>' self._assert_reg_field( {"honor_code": "required", "terms_of_service": "required"}, { "label": "I agree to the {platform_name} {link_html}.".format( platform_name=settings.PLATFORM_NAME, link_html=link_html ), "name": "terms_of_service", "defaultValue": False, "type": "checkbox", "required": True, "errorMessages": { "required": "You must agree to the {platform_name} {link_html}.".format( platform_name=settings.PLATFORM_NAME, link_html=link_html ) } } ) @override_settings(MKTG_URLS_LINK_MAP={"HONOR": "honor", "TOS": "tos"}) @mock.patch.dict(settings.FEATURES, {"ENABLE_MKTG_SITE": False}) def test_registration_separate_terms_of_service_mktg_site_disabled(self): # Honor code field should say ONLY honor code, # not "terms of service and honor code" self._assert_reg_field( {"honor_code": "required", "terms_of_service": "required"}, { "label": "I agree to the {platform_name} <a href=\"/honor\">Honor Code</a>.".format( platform_name=settings.PLATFORM_NAME ), "name": "honor_code", "defaultValue": False, "type": "checkbox", "required": True, "errorMessages": { "required": "You must agree to the {platform_name} <a href=\"/honor\">Honor Code</a>.".format( platform_name=settings.PLATFORM_NAME ) } } ) # Terms of service field should also be present self._assert_reg_field( {"honor_code": "required", "terms_of_service": "required"}, { "label": "I agree to the {platform_name} <a href=\"/tos\">Terms of Service</a>.".format( platform_name=settings.PLATFORM_NAME ), "name": "terms_of_service", "defaultValue": False, "type": "checkbox", "required": True, "errorMessages": { "required": "You must agree to the {platform_name} <a href=\"/tos\">Terms of Service</a>.".format( platform_name=settings.PLATFORM_NAME ) } } ) @override_settings( REGISTRATION_EXTRA_FIELDS={ "level_of_education": "optional", "gender": "optional", "year_of_birth": "optional", "mailing_address": "optional", "goals": "optional", "city": "optional", "state": "optional", "country": "required", "honor_code": "required", }, REGISTRATION_EXTENSION_FORM='openedx.core.djangoapps.user_api.tests.test_helpers.TestCaseForm', ) def test_field_order(self): response = self.client.get(self.url) self.assertHttpOK(response) # Verify that all fields render in the correct order form_desc = json.loads(response.content) field_names = [field["name"] for field in form_desc["fields"]] self.assertEqual(field_names, [ "email", "name", "username", "password", "favorite_movie", "favorite_editor", "city", "state", "country", "gender", "year_of_birth", "level_of_education", "mailing_address", "goals", "honor_code", ]) def test_register(self): # Create a new registration response = self.client.post(self.url, { "email": self.EMAIL, "name": self.NAME, "username": self.USERNAME, "password": self.PASSWORD, "honor_code": "true", }) self.assertHttpOK(response) self.assertIn(settings.EDXMKTG_LOGGED_IN_COOKIE_NAME, self.client.cookies) self.assertIn(settings.EDXMKTG_USER_INFO_COOKIE_NAME, self.client.cookies) user = User.objects.get(username=self.USERNAME) request = RequestFactory().get('/url') request.user = user account_settings = get_account_settings(request) self.assertEqual(self.USERNAME, account_settings["username"]) self.assertEqual(self.EMAIL, account_settings["email"]) self.assertFalse(account_settings["is_active"]) self.assertEqual(self.NAME, account_settings["name"]) # Verify that we've been logged in # by trying to access a page that requires authentication response = self.client.get(reverse("dashboard")) self.assertHttpOK(response) @override_settings(REGISTRATION_EXTRA_FIELDS={ "level_of_education": "optional", "gender": "optional", "year_of_birth": "optional", "mailing_address": "optional", "goals": "optional", "country": "required", }) def test_register_with_profile_info(self): # Register, providing lots of demographic info response = self.client.post(self.url, { "email": self.EMAIL, "name": self.NAME, "username": self.USERNAME, "password": self.PASSWORD, "level_of_education": self.EDUCATION, "mailing_address": self.ADDRESS, "year_of_birth": self.YEAR_OF_BIRTH, "goals": self.GOALS, "country": self.COUNTRY, "honor_code": "true", }) self.assertHttpOK(response) # Verify the user's account user = User.objects.get(username=self.USERNAME) request = RequestFactory().get('/url') request.user = user account_settings = get_account_settings(request) self.assertEqual(account_settings["level_of_education"], self.EDUCATION) self.assertEqual(account_settings["mailing_address"], self.ADDRESS) self.assertEqual(account_settings["year_of_birth"], int(self.YEAR_OF_BIRTH)) self.assertEqual(account_settings["goals"], self.GOALS) self.assertEqual(account_settings["country"], self.COUNTRY) @override_settings(REGISTRATION_EXTENSION_FORM='openedx.core.djangoapps.user_api.tests.test_helpers.TestCaseForm') @mock.patch('openedx.core.djangoapps.user_api.tests.test_helpers.TestCaseForm.DUMMY_STORAGE', new_callable=dict) @mock.patch( 'openedx.core.djangoapps.user_api.tests.test_helpers.DummyRegistrationExtensionModel', ) def test_with_extended_form(self, dummy_model, storage_dict): dummy_model_instance = mock.Mock() dummy_model.return_value = dummy_model_instance # Create a new registration self.assertEqual(storage_dict, {}) response = self.client.post(self.url, { "email": self.EMAIL, "name": self.NAME, "username": self.USERNAME, "password": self.PASSWORD, "honor_code": "true", "favorite_movie": "Inception", "favorite_editor": "cat", }) self.assertHttpOK(response) self.assertIn(settings.EDXMKTG_LOGGED_IN_COOKIE_NAME, self.client.cookies) self.assertIn(settings.EDXMKTG_USER_INFO_COOKIE_NAME, self.client.cookies) user = User.objects.get(username=self.USERNAME) request = RequestFactory().get('/url') request.user = user account_settings = get_account_settings(request) self.assertEqual(self.USERNAME, account_settings["username"]) self.assertEqual(self.EMAIL, account_settings["email"]) self.assertFalse(account_settings["is_active"]) self.assertEqual(self.NAME, account_settings["name"]) self.assertEqual(storage_dict, {'favorite_movie': "Inception", "favorite_editor": "cat"}) self.assertEqual(dummy_model_instance.user, user) # Verify that we've been logged in # by trying to access a page that requires authentication response = self.client.get(reverse("dashboard")) self.assertHttpOK(response) def test_activation_email(self): # Register, which should trigger an activation email response = self.client.post(self.url, { "email": self.EMAIL, "name": self.NAME, "username": self.USERNAME, "password": self.PASSWORD, "honor_code": "true", }) self.assertHttpOK(response) # Verify that the activation email was sent self.assertEqual(len(mail.outbox), 1) sent_email = mail.outbox[0] self.assertEqual(sent_email.to, [self.EMAIL]) self.assertEqual(sent_email.subject, "Activate Your edX Account") self.assertIn( u"activating your {platform} account".format(platform=settings.PLATFORM_NAME), sent_email.body ) @ddt.data( {"email": ""}, {"email": "invalid"}, {"name": ""}, {"username": ""}, {"username": "a"}, {"password": ""}, ) def test_register_invalid_input(self, invalid_fields): # Initially, the field values are all valid data = { "email": self.EMAIL, "name": self.NAME, "username": self.USERNAME, "password": self.PASSWORD, } # Override the valid fields, making the input invalid data.update(invalid_fields) # Attempt to create the account, expecting an error response response = self.client.post(self.url, data) self.assertHttpBadRequest(response) @override_settings(REGISTRATION_EXTRA_FIELDS={"country": "required"}) @ddt.data("email", "name", "username", "password", "country") def test_register_missing_required_field(self, missing_field): data = { "email": self.EMAIL, "name": self.NAME, "username": self.USERNAME, "password": self.PASSWORD, "country": self.COUNTRY, } del data[missing_field] # Send a request missing a field response = self.client.post(self.url, data) self.assertHttpBadRequest(response) def test_register_duplicate_email(self): # Register the first user response = self.client.post(self.url, { "email": self.EMAIL, "name": self.NAME, "username": self.USERNAME, "password": self.PASSWORD, "honor_code": "true", }) self.assertHttpOK(response) # Try to create a second user with the same email address response = self.client.post(self.url, { "email": self.EMAIL, "name": "Someone Else", "username": "someone_else", "password": self.PASSWORD, "honor_code": "true", }) self.assertEqual(response.status_code, 409) response_json = json.loads(response.content) self.assertEqual( response_json, { "email": [{ "user_message": ( "It looks like {} belongs to an existing account. " "Try again with a different email address." ).format( self.EMAIL ) }] } ) def test_register_duplicate_username(self): # Register the first user response = self.client.post(self.url, { "email": self.EMAIL, "name": self.NAME, "username": self.USERNAME, "password": self.PASSWORD, "honor_code": "true", }) self.assertHttpOK(response) # Try to create a second user with the same username response = self.client.post(self.url, { "email": "someone+else@example.com", "name": "Someone Else", "username": self.USERNAME, "password": self.PASSWORD, "honor_code": "true", }) self.assertEqual(response.status_code, 409) response_json = json.loads(response.content) self.assertEqual( response_json, { "username": [{ "user_message": ( "It looks like {} belongs to an existing account. " "Try again with a different username." ).format( self.USERNAME ) }] } ) def test_register_duplicate_username_and_email(self): # Register the first user response = self.client.post(self.url, { "email": self.EMAIL, "name": self.NAME, "username": self.USERNAME, "password": self.PASSWORD, "honor_code": "true", }) self.assertHttpOK(response) # Try to create a second user with the same username response = self.client.post(self.url, { "email": self.EMAIL, "name": "Someone Else", "username": self.USERNAME, "password": self.PASSWORD, "honor_code": "true", }) self.assertEqual(response.status_code, 409) response_json = json.loads(response.content) self.assertEqual( response_json, { "username": [{ "user_message": ( "It looks like {} belongs to an existing account. " "Try again with a different username." ).format( self.USERNAME ) }], "email": [{ "user_message": ( "It looks like {} belongs to an existing account. " "Try again with a different email address." ).format( self.EMAIL ) }] } ) @override_settings(REGISTRATION_EXTRA_FIELDS={"honor_code": "hidden", "terms_of_service": "hidden"}) def test_register_hidden_honor_code_and_terms_of_service(self): response = self.client.post(self.url, { "email": self.EMAIL, "name": self.NAME, "username": self.USERNAME, "password": self.PASSWORD, }) self.assertHttpOK(response) def test_missing_fields(self): response = self.client.post( self.url, { "email": self.EMAIL, "name": self.NAME, "honor_code": "true", } ) self.assertEqual(response.status_code, 400) response_json = json.loads(response.content) self.assertEqual( response_json, { "username": [{"user_message": "Username must be minimum of two characters long"}], "password": [{"user_message": "A valid password is required"}], } ) def _assert_reg_field(self, extra_fields_setting, expected_field): """Retrieve the registration form description from the server and verify that it contains the expected field. Args: extra_fields_setting (dict): Override the Django setting controlling which extra fields are displayed in the form. expected_field (dict): The field definition we expect to find in the form. Raises: AssertionError """ # Add in fields that are always present defaults = [ ("label", ""), ("instructions", ""), ("placeholder", ""), ("defaultValue", ""), ("restrictions", {}), ("errorMessages", {}), ] for key, value in defaults: if key not in expected_field: expected_field[key] = value # Retrieve the registration form description with override_settings(REGISTRATION_EXTRA_FIELDS=extra_fields_setting): response = self.client.get(self.url) self.assertHttpOK(response) # Verify that the form description matches what we'd expect form_desc = json.loads(response.content) # Search the form for this field actual_field = None for field in form_desc["fields"]: if field["name"] == expected_field["name"]: actual_field = field break self.assertIsNot( actual_field, None, msg="Could not find field {name}".format(name=expected_field["name"]) ) for key, value in expected_field.iteritems(): self.assertEqual( expected_field[key], actual_field[key], msg=u"Expected {expected} for {key} but got {actual} instead".format( key=key, expected=expected_field[key], actual=actual_field[key] ) ) @httpretty.activate @ddt.ddt class ThirdPartyRegistrationTestMixin(ThirdPartyOAuthTestMixin): """ Tests for the User API registration endpoint with 3rd party authentication. """ def setUp(self): super(ThirdPartyRegistrationTestMixin, self).setUp(create_user=False) self.url = reverse('user_api_registration') def data(self, user=None): """Returns the request data for the endpoint.""" return { "provider": self.BACKEND, "access_token": self.access_token, "client_id": self.client_id, "honor_code": "true", "country": "US", "username": user.username if user else "test_username", "name": user.first_name if user else "test name", "email": user.email if user else "test@test.com", } def _assert_existing_user_error(self, response): """Assert that the given response was an error with the given status_code and error code.""" self.assertEqual(response.status_code, 409) errors = json.loads(response.content) for conflict_attribute in ["username", "email"]: self.assertIn(conflict_attribute, errors) self.assertIn("belongs to an existing account", errors[conflict_attribute][0]["user_message"]) self.assertNotIn("partial_pipeline", self.client.session) def _assert_access_token_error(self, response, expected_error_message): """Assert that the given response was an error for the access_token field with the given error message.""" self.assertEqual(response.status_code, 400) response_json = json.loads(response.content) self.assertEqual( response_json, {"access_token": [{"user_message": expected_error_message}]} ) self.assertNotIn("partial_pipeline", self.client.session) def _verify_user_existence(self, user_exists, social_link_exists, user_is_active=None, username=None): """Verifies whether the user object exists.""" users = User.objects.filter(username=(username if username else "test_username")) self.assertEquals(users.exists(), user_exists) if user_exists: self.assertEquals(users[0].is_active, user_is_active) self.assertEqual( UserSocialAuth.objects.filter(user=users[0], provider=self.BACKEND).exists(), social_link_exists ) else: self.assertEquals(UserSocialAuth.objects.count(), 0) def test_success(self): self._verify_user_existence(user_exists=False, social_link_exists=False) self._setup_provider_response(success=True) response = self.client.post(self.url, self.data()) self.assertEqual(response.status_code, 200) self._verify_user_existence(user_exists=True, social_link_exists=True, user_is_active=False) def test_unlinked_active_user(self): user = UserFactory() response = self.client.post(self.url, self.data(user)) self._assert_existing_user_error(response) self._verify_user_existence( user_exists=True, social_link_exists=False, user_is_active=True, username=user.username ) def test_unlinked_inactive_user(self): user = UserFactory(is_active=False) response = self.client.post(self.url, self.data(user)) self._assert_existing_user_error(response) self._verify_user_existence( user_exists=True, social_link_exists=False, user_is_active=False, username=user.username ) def test_user_already_registered(self): self._setup_provider_response(success=True) user = UserFactory() UserSocialAuth.objects.create(user=user, provider=self.BACKEND, uid=self.social_uid) response = self.client.post(self.url, self.data(user)) self._assert_existing_user_error(response) self._verify_user_existence( user_exists=True, social_link_exists=True, user_is_active=True, username=user.username ) def test_social_user_conflict(self): self._setup_provider_response(success=True) user = UserFactory() UserSocialAuth.objects.create(user=user, provider=self.BACKEND, uid=self.social_uid) response = self.client.post(self.url, self.data()) self._assert_access_token_error(response, "The provided access_token is already associated with another user.") self._verify_user_existence( user_exists=True, social_link_exists=True, user_is_active=True, username=user.username ) def test_invalid_token(self): self._setup_provider_response(success=False) response = self.client.post(self.url, self.data()) self._assert_access_token_error(response, "The provided access_token is not valid.") self._verify_user_existence(user_exists=False, social_link_exists=False) def test_missing_token(self): data = self.data() data.pop("access_token") response = self.client.post(self.url, data) self._assert_access_token_error( response, "An access_token is required when passing value ({}) for provider.".format(self.BACKEND) ) self._verify_user_existence(user_exists=False, social_link_exists=False) @skipUnless(settings.FEATURES.get("ENABLE_THIRD_PARTY_AUTH"), "third party auth not enabled") class TestFacebookRegistrationView( ThirdPartyRegistrationTestMixin, ThirdPartyOAuthTestMixinFacebook, TransactionTestCase ): """Tests the User API registration endpoint with Facebook authentication.""" def test_social_auth_exception(self): """ According to the do_auth method in social.backends.facebook.py, the Facebook API sometimes responds back a JSON with just False as value. """ self._setup_provider_response_with_body(200, json.dumps("false")) response = self.client.post(self.url, self.data()) self._assert_access_token_error(response, "The provided access_token is not valid.") self._verify_user_existence(user_exists=False, social_link_exists=False) @skipUnless(settings.FEATURES.get("ENABLE_THIRD_PARTY_AUTH"), "third party auth not enabled") class TestGoogleRegistrationView( ThirdPartyRegistrationTestMixin, ThirdPartyOAuthTestMixinGoogle, TransactionTestCase ): """Tests the User API registration endpoint with Google authentication.""" pass @ddt.ddt class UpdateEmailOptInTestCase(UserAPITestCase, SharedModuleStoreTestCase): """Tests the UpdateEmailOptInPreference view. """ USERNAME = "steve" EMAIL = "steve@isawesome.com" PASSWORD = "steveopolis" @classmethod def setUpClass(cls): super(UpdateEmailOptInTestCase, cls).setUpClass() cls.course = CourseFactory.create() cls.url = reverse("preferences_email_opt_in") def setUp(self): """ Create a course and user, then log in. """ super(UpdateEmailOptInTestCase, self).setUp() self.user = UserFactory.create(username=self.USERNAME, email=self.EMAIL, password=self.PASSWORD) self.client.login(username=self.USERNAME, password=self.PASSWORD) @ddt.data( (u"True", u"True"), (u"true", u"True"), (u"TrUe", u"True"), (u"Banana", u"False"), (u"strawberries", u"False"), (u"False", u"False"), ) @ddt.unpack def test_update_email_opt_in(self, opt, result): """Tests the email opt in preference""" # Register, which should trigger an activation email response = self.client.post(self.url, { "course_id": unicode(self.course.id), "email_opt_in": opt }) self.assertHttpOK(response) preference = UserOrgTag.objects.get( user=self.user, org=self.course.id.org, key="email-optin" ) self.assertEquals(preference.value, result) @ddt.data( (True, False), (False, True), (False, False) ) @ddt.unpack def test_update_email_opt_in_wrong_params(self, use_course_id, use_opt_in): """Tests the email opt in preference""" params = {} if use_course_id: params["course_id"] = unicode(self.course.id) if use_opt_in: params["email_opt_in"] = u"True" response = self.client.post(self.url, params) self.assertHttpBadRequest(response) def test_update_email_opt_in_inactive_user(self): """Test that an inactive user can still update their email optin preference.""" self.user.is_active = False self.user.save() # Register, which should trigger an activation email response = self.client.post(self.url, { "course_id": unicode(self.course.id), "email_opt_in": u"True" }) self.assertHttpOK(response) preference = UserOrgTag.objects.get( user=self.user, org=self.course.id.org, key="email-optin" ) self.assertEquals(preference.value, u"True") def test_update_email_opt_with_invalid_course_key(self): """ Test that with invalid key it returns bad request and not update their email optin preference. """ response = self.client.post(self.url, { "course_id": 'invalid', "email_opt_in": u"True" }) self.assertHttpBadRequest(response) with self.assertRaises(UserOrgTag.DoesNotExist): UserOrgTag.objects.get(user=self.user, org=self.course.id.org, key="email-optin")
agpl-3.0
darjeeling/django
django/template/context.py
15
9076
from contextlib import contextmanager from copy import copy # Hard-coded processor for easier use of CSRF protection. _builtin_context_processors = ('django.template.context_processors.csrf',) class ContextPopException(Exception): "pop() has been called more times than push()" pass class ContextDict(dict): def __init__(self, context, *args, **kwargs): super().__init__(*args, **kwargs) context.dicts.append(self) self.context = context def __enter__(self): return self def __exit__(self, *args, **kwargs): self.context.pop() class BaseContext: def __init__(self, dict_=None): self._reset_dicts(dict_) def _reset_dicts(self, value=None): builtins = {'True': True, 'False': False, 'None': None} self.dicts = [builtins] if value is not None: self.dicts.append(value) def __copy__(self): duplicate = copy(super()) duplicate.dicts = self.dicts[:] return duplicate def __repr__(self): return repr(self.dicts) def __iter__(self): yield from reversed(self.dicts) def push(self, *args, **kwargs): dicts = [] for d in args: if isinstance(d, BaseContext): dicts += d.dicts[1:] else: dicts.append(d) return ContextDict(self, *dicts, **kwargs) def pop(self): if len(self.dicts) == 1: raise ContextPopException return self.dicts.pop() def __setitem__(self, key, value): "Set a variable in the current context" self.dicts[-1][key] = value def set_upward(self, key, value): """ Set a variable in one of the higher contexts if it exists there, otherwise in the current context. """ context = self.dicts[-1] for d in reversed(self.dicts): if key in d: context = d break context[key] = value def __getitem__(self, key): "Get a variable's value, starting at the current context and going upward" for d in reversed(self.dicts): if key in d: return d[key] raise KeyError(key) def __delitem__(self, key): "Delete a variable from the current context" del self.dicts[-1][key] def __contains__(self, key): for d in self.dicts: if key in d: return True return False def get(self, key, otherwise=None): for d in reversed(self.dicts): if key in d: return d[key] return otherwise def setdefault(self, key, default=None): try: return self[key] except KeyError: self[key] = default return default def new(self, values=None): """ Return a new context with the same properties, but with only the values given in 'values' stored. """ new_context = copy(self) new_context._reset_dicts(values) return new_context def flatten(self): """ Return self.dicts as one dictionary. """ flat = {} for d in self.dicts: flat.update(d) return flat def __eq__(self, other): """ Compare two contexts by comparing theirs 'dicts' attributes. """ if isinstance(other, BaseContext): # because dictionaries can be put in different order # we have to flatten them like in templates return self.flatten() == other.flatten() # if it's not comparable return false return False class Context(BaseContext): "A stack container for variable context" def __init__(self, dict_=None, autoescape=True, use_l10n=None, use_tz=None): self.autoescape = autoescape self.use_l10n = use_l10n self.use_tz = use_tz self.template_name = "unknown" self.render_context = RenderContext() # Set to the original template -- as opposed to extended or included # templates -- during rendering, see bind_template. self.template = None super().__init__(dict_) @contextmanager def bind_template(self, template): if self.template is not None: raise RuntimeError("Context is already bound to a template") self.template = template try: yield finally: self.template = None def __copy__(self): duplicate = super().__copy__() duplicate.render_context = copy(self.render_context) return duplicate def update(self, other_dict): "Push other_dict to the stack of dictionaries in the Context" if not hasattr(other_dict, '__getitem__'): raise TypeError('other_dict must be a mapping (dictionary-like) object.') if isinstance(other_dict, BaseContext): other_dict = other_dict.dicts[1:].pop() return ContextDict(self, other_dict) class RenderContext(BaseContext): """ A stack container for storing Template state. RenderContext simplifies the implementation of template Nodes by providing a safe place to store state between invocations of a node's `render` method. The RenderContext also provides scoping rules that are more sensible for 'template local' variables. The render context stack is pushed before each template is rendered, creating a fresh scope with nothing in it. Name resolution fails if a variable is not found at the top of the RequestContext stack. Thus, variables are local to a specific template and don't affect the rendering of other templates as they would if they were stored in the normal template context. """ template = None def __iter__(self): yield from self.dicts[-1] def __contains__(self, key): return key in self.dicts[-1] def get(self, key, otherwise=None): return self.dicts[-1].get(key, otherwise) def __getitem__(self, key): return self.dicts[-1][key] @contextmanager def push_state(self, template, isolated_context=True): initial = self.template self.template = template if isolated_context: self.push() try: yield finally: self.template = initial if isolated_context: self.pop() class RequestContext(Context): """ This subclass of template.Context automatically populates itself using the processors defined in the engine's configuration. Additional processors can be specified as a list of callables using the "processors" keyword argument. """ def __init__(self, request, dict_=None, processors=None, use_l10n=None, use_tz=None, autoescape=True): super().__init__(dict_, use_l10n=use_l10n, use_tz=use_tz, autoescape=autoescape) self.request = request self._processors = () if processors is None else tuple(processors) self._processors_index = len(self.dicts) # placeholder for context processors output self.update({}) # empty dict for any new modifications # (so that context processors don't overwrite them) self.update({}) @contextmanager def bind_template(self, template): if self.template is not None: raise RuntimeError("Context is already bound to a template") self.template = template # Set context processors according to the template engine's settings. processors = (template.engine.template_context_processors + self._processors) updates = {} for processor in processors: updates.update(processor(self.request)) self.dicts[self._processors_index] = updates try: yield finally: self.template = None # Unset context processors. self.dicts[self._processors_index] = {} def new(self, values=None): new_context = super().new(values) # This is for backwards-compatibility: RequestContexts created via # Context.new don't include values from context processors. if hasattr(new_context, '_processors_index'): del new_context._processors_index return new_context def make_context(context, request=None, **kwargs): """ Create a suitable Context from a plain dict and optionally an HttpRequest. """ if context is not None and not isinstance(context, dict): raise TypeError('context must be a dict rather than %s.' % context.__class__.__name__) if request is None: context = Context(context, **kwargs) else: # The following pattern is required to ensure values from # context override those from template context processors. original_context = context context = RequestContext(request, **kwargs) if original_context: context.push(original_context) return context
bsd-3-clause
dhruvsrivastava/OJ
flask/lib/python2.7/site-packages/coverage/pickle2json.py
93
1489
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt """Convert pickle to JSON for coverage.py.""" from coverage.backward import pickle from coverage.data import CoverageData def pickle_read_raw_data(cls_unused, file_obj): """Replacement for CoverageData._read_raw_data.""" return pickle.load(file_obj) def pickle2json(infile, outfile): """Convert a coverage.py 3.x pickle data file to a 4.x JSON data file.""" try: old_read_raw_data = CoverageData._read_raw_data CoverageData._read_raw_data = pickle_read_raw_data covdata = CoverageData() with open(infile, 'rb') as inf: covdata.read_fileobj(inf) covdata.write_file(outfile) finally: CoverageData._read_raw_data = old_read_raw_data if __name__ == "__main__": from optparse import OptionParser parser = OptionParser(usage="usage: %s [options]" % __file__) parser.description = "Convert .coverage files from pickle to JSON format" parser.add_option( "-i", "--input-file", action="store", default=".coverage", help="Name of input file. Default .coverage", ) parser.add_option( "-o", "--output-file", action="store", default=".coverage", help="Name of output file. Default .coverage", ) (options, args) = parser.parse_args() pickle2json(options.input_file, options.output_file)
bsd-3-clause
drexly/openhgsenti
lib/django/db/migrations/operations/special.py
374
7425
from __future__ import unicode_literals from django.db import router from .base import Operation class SeparateDatabaseAndState(Operation): """ Takes two lists of operations - ones that will be used for the database, and ones that will be used for the state change. This allows operations that don't support state change to have it applied, or have operations that affect the state or not the database, or so on. """ serialization_expand_args = ['database_operations', 'state_operations'] def __init__(self, database_operations=None, state_operations=None): self.database_operations = database_operations or [] self.state_operations = state_operations or [] def deconstruct(self): kwargs = {} if self.database_operations: kwargs['database_operations'] = self.database_operations if self.state_operations: kwargs['state_operations'] = self.state_operations return ( self.__class__.__name__, [], kwargs ) def state_forwards(self, app_label, state): for state_operation in self.state_operations: state_operation.state_forwards(app_label, state) def database_forwards(self, app_label, schema_editor, from_state, to_state): # We calculate state separately in here since our state functions aren't useful for database_operation in self.database_operations: to_state = from_state.clone() database_operation.state_forwards(app_label, to_state) database_operation.database_forwards(app_label, schema_editor, from_state, to_state) from_state = to_state def database_backwards(self, app_label, schema_editor, from_state, to_state): # We calculate state separately in here since our state functions aren't useful base_state = to_state for pos, database_operation in enumerate(reversed(self.database_operations)): to_state = base_state.clone() for dbop in self.database_operations[:-(pos + 1)]: dbop.state_forwards(app_label, to_state) from_state = base_state.clone() database_operation.state_forwards(app_label, from_state) database_operation.database_backwards(app_label, schema_editor, from_state, to_state) def describe(self): return "Custom state/database change combination" class RunSQL(Operation): """ Runs some raw SQL. A reverse SQL statement may be provided. Also accepts a list of operations that represent the state change effected by this SQL change, in case it's custom column/table creation/deletion. """ noop = '' def __init__(self, sql, reverse_sql=None, state_operations=None, hints=None): self.sql = sql self.reverse_sql = reverse_sql self.state_operations = state_operations or [] self.hints = hints or {} def deconstruct(self): kwargs = { 'sql': self.sql, } if self.reverse_sql is not None: kwargs['reverse_sql'] = self.reverse_sql if self.state_operations: kwargs['state_operations'] = self.state_operations if self.hints: kwargs['hints'] = self.hints return ( self.__class__.__name__, [], kwargs ) @property def reversible(self): return self.reverse_sql is not None def state_forwards(self, app_label, state): for state_operation in self.state_operations: state_operation.state_forwards(app_label, state) def database_forwards(self, app_label, schema_editor, from_state, to_state): if router.allow_migrate(schema_editor.connection.alias, app_label, **self.hints): self._run_sql(schema_editor, self.sql) def database_backwards(self, app_label, schema_editor, from_state, to_state): if self.reverse_sql is None: raise NotImplementedError("You cannot reverse this operation") if router.allow_migrate(schema_editor.connection.alias, app_label, **self.hints): self._run_sql(schema_editor, self.reverse_sql) def describe(self): return "Raw SQL operation" def _run_sql(self, schema_editor, sqls): if isinstance(sqls, (list, tuple)): for sql in sqls: params = None if isinstance(sql, (list, tuple)): elements = len(sql) if elements == 2: sql, params = sql else: raise ValueError("Expected a 2-tuple but got %d" % elements) schema_editor.execute(sql, params=params) elif sqls != RunSQL.noop: statements = schema_editor.connection.ops.prepare_sql_script(sqls) for statement in statements: schema_editor.execute(statement, params=None) class RunPython(Operation): """ Runs Python code in a context suitable for doing versioned ORM operations. """ reduces_to_sql = False def __init__(self, code, reverse_code=None, atomic=True, hints=None): self.atomic = atomic # Forwards code if not callable(code): raise ValueError("RunPython must be supplied with a callable") self.code = code # Reverse code if reverse_code is None: self.reverse_code = None else: if not callable(reverse_code): raise ValueError("RunPython must be supplied with callable arguments") self.reverse_code = reverse_code self.hints = hints or {} def deconstruct(self): kwargs = { 'code': self.code, } if self.reverse_code is not None: kwargs['reverse_code'] = self.reverse_code if self.atomic is not True: kwargs['atomic'] = self.atomic if self.hints: kwargs['hints'] = self.hints return ( self.__class__.__name__, [], kwargs ) @property def reversible(self): return self.reverse_code is not None def state_forwards(self, app_label, state): # RunPython objects have no state effect. To add some, combine this # with SeparateDatabaseAndState. pass def database_forwards(self, app_label, schema_editor, from_state, to_state): if router.allow_migrate(schema_editor.connection.alias, app_label, **self.hints): # We now execute the Python code in a context that contains a 'models' # object, representing the versioned models as an app registry. # We could try to override the global cache, but then people will still # use direct imports, so we go with a documentation approach instead. self.code(from_state.apps, schema_editor) def database_backwards(self, app_label, schema_editor, from_state, to_state): if self.reverse_code is None: raise NotImplementedError("You cannot reverse this operation") if router.allow_migrate(schema_editor.connection.alias, app_label, **self.hints): self.reverse_code(from_state.apps, schema_editor) def describe(self): return "Raw Python operation" @staticmethod def noop(apps, schema_editor): return None
apache-2.0
shashankrao/RC6-Block-Cipher
decrypt.py
1
1816
from helpers import * import sys def decrypt(esentence,s): encoded = blockConverter(esentence) enlength = len(encoded) A = long(encoded[0],2) B = long(encoded[1],2) C = long(encoded[2],2) D = long(encoded[3],2) cipher = [] cipher.append(A) cipher.append(B) cipher.append(C) cipher.append(D) r=12 w=32 modulo = 2**32 lgw = 5 C = (C - s[2*r+3])%modulo A = (A - s[2*r+2])%modulo for j in range(1,r+1): i = r+1-j (A, B, C, D) = (D, A, B, C) u_temp = (D*(2*D + 1))%modulo u = ROL(u_temp,lgw,32) t_temp = (B*(2*B + 1))%modulo t = ROL(t_temp,lgw,32) tmod=t%32 umod=u%32 C = (ROR((C-s[2*i+1])%modulo,tmod,32) ^u) A = (ROR((A-s[2*i])%modulo,umod,32) ^t) D = (D - s[1])%modulo B = (B - s[0])%modulo orgi = [] orgi.append(A) orgi.append(B) orgi.append(C) orgi.append(D) return cipher,orgi def main(): print "DECRYPTION: " #key='A WORD IS A WORD' key =raw_input("Enter Key(0-16 characters): ") if len(key) <16: key = key + " "*(16-len(key)) key = key[:16] print "UserKey: "+key s = generateKey(key) f = open("encrypted.txt","r") if not f: print "Encrypted input not found in encrypted.txt" sys.exit(0) else: esentence = f.readline() cipher,orgi = decrypt(esentence,s) sentence = deBlocker(orgi) print "\nEncrypted String list: ",cipher print "Encrypted String: " + esentence print "Length of Encrypted String: ",len(esentence) print "\nDecrypted String list: ",orgi print "Decrypted String: " + sentence print "Length of Decrypted String: ",len(sentence) if __name__ == "__main__": main()
gpl-2.0
infyponics/infyponics
requests/packages/urllib3/fields.py
514
5931
from __future__ import absolute_import import email.utils import mimetypes from .packages import six def guess_content_type(filename, default='application/octet-stream'): """ Guess the "Content-Type" of a file. :param filename: The filename to guess the "Content-Type" of using :mod:`mimetypes`. :param default: If no "Content-Type" can be guessed, default to `default`. """ if filename: return mimetypes.guess_type(filename)[0] or default return default def format_header_param(name, value): """ Helper function to format and quote a single header parameter. Particularly useful for header parameters which might contain non-ASCII values, like file names. This follows RFC 2231, as suggested by RFC 2388 Section 4.4. :param name: The name of the parameter, a string expected to be ASCII only. :param value: The value of the parameter, provided as a unicode string. """ if not any(ch in value for ch in '"\\\r\n'): result = '%s="%s"' % (name, value) try: result.encode('ascii') except (UnicodeEncodeError, UnicodeDecodeError): pass else: return result if not six.PY3 and isinstance(value, six.text_type): # Python 2: value = value.encode('utf-8') value = email.utils.encode_rfc2231(value, 'utf-8') value = '%s*=%s' % (name, value) return value class RequestField(object): """ A data container for request body parameters. :param name: The name of this request field. :param data: The data/value body. :param filename: An optional filename of the request field. :param headers: An optional dict-like object of headers to initially use for the field. """ def __init__(self, name, data, filename=None, headers=None): self._name = name self._filename = filename self.data = data self.headers = {} if headers: self.headers = dict(headers) @classmethod def from_tuples(cls, fieldname, value): """ A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters. Supports constructing :class:`~urllib3.fields.RequestField` from parameter of key/value strings AND key/filetuple. A filetuple is a (filename, data, MIME type) tuple where the MIME type is optional. For example:: 'foo': 'bar', 'fakefile': ('foofile.txt', 'contents of foofile'), 'realfile': ('barfile.txt', open('realfile').read()), 'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'), 'nonamefile': 'contents of nonamefile field', Field names and filenames must be unicode. """ if isinstance(value, tuple): if len(value) == 3: filename, data, content_type = value else: filename, data = value content_type = guess_content_type(filename) else: filename = None content_type = None data = value request_param = cls(fieldname, data, filename=filename) request_param.make_multipart(content_type=content_type) return request_param def _render_part(self, name, value): """ Overridable helper function to format a single header parameter. :param name: The name of the parameter, a string expected to be ASCII only. :param value: The value of the parameter, provided as a unicode string. """ return format_header_param(name, value) def _render_parts(self, header_parts): """ Helper function to format and quote a single header. Useful for single headers that are composed of multiple items. E.g., 'Content-Disposition' fields. :param header_parts: A sequence of (k, v) typles or a :class:`dict` of (k, v) to format as `k1="v1"; k2="v2"; ...`. """ parts = [] iterable = header_parts if isinstance(header_parts, dict): iterable = header_parts.items() for name, value in iterable: if value: parts.append(self._render_part(name, value)) return '; '.join(parts) def render_headers(self): """ Renders the headers for this request field. """ lines = [] sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location'] for sort_key in sort_keys: if self.headers.get(sort_key, False): lines.append('%s: %s' % (sort_key, self.headers[sort_key])) for header_name, header_value in self.headers.items(): if header_name not in sort_keys: if header_value: lines.append('%s: %s' % (header_name, header_value)) lines.append('\r\n') return '\r\n'.join(lines) def make_multipart(self, content_disposition=None, content_type=None, content_location=None): """ Makes this request field into a multipart request field. This method overrides "Content-Disposition", "Content-Type" and "Content-Location" headers to the request parameter. :param content_type: The 'Content-Type' of the request body. :param content_location: The 'Content-Location' of the request body. """ self.headers['Content-Disposition'] = content_disposition or 'form-data' self.headers['Content-Disposition'] += '; '.join([ '', self._render_parts( (('name', self._name), ('filename', self._filename)) ) ]) self.headers['Content-Type'] = content_type self.headers['Content-Location'] = content_location
apache-2.0
CenterForOpenScience/osf.io
tasks/__init__.py
3
28875
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """Invoke tasks. To run a task, run ``$ invoke <COMMAND>``. To see a list of commands, run ``$ invoke --list``. """ import os import sys import json import platform import subprocess import logging import sqlite3 import invoke from invoke import Collection from website import settings from .utils import pip_install, bin_prefix try: from tasks import local # noqa except ImportError: print('No tasks/local.py file found. ' 'Did you remember to copy local-dist.py to local.py?') logging.getLogger('invoke').setLevel(logging.CRITICAL) # gets the root path for all the scripts that rely on it HERE = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')) WHEELHOUSE_PATH = os.environ.get('WHEELHOUSE') NO_TESTS_COLLECTED = 5 ns = Collection() try: from tasks import local as local_tasks ns.add_collection(Collection.from_module(local_tasks), name='local') except ImportError: pass try: from admin import tasks as admin_tasks ns.add_collection(Collection.from_module(admin_tasks), name='admin') except ImportError: pass def task(*args, **kwargs): """Behaves the same way as invoke.task. Adds the task to the root namespace. """ if len(args) == 1 and callable(args[0]): new_task = invoke.task(args[0]) ns.add_task(new_task) return new_task def decorator(f): new_task = invoke.task(f, *args, **kwargs) ns.add_task(new_task) return new_task return decorator @task def server(ctx, host=None, port=5000, debug=True, gitlogs=False): """Run the app server.""" if os.environ.get('WERKZEUG_RUN_MAIN') == 'true' or not debug: if os.environ.get('WEB_REMOTE_DEBUG', None): import pydevd # e.g. '127.0.0.1:5678' remote_parts = os.environ.get('WEB_REMOTE_DEBUG').split(':') pydevd.settrace(remote_parts[0], port=int(remote_parts[1]), suspend=False, stdoutToServer=True, stderrToServer=True) if gitlogs: git_logs(ctx) from website.app import init_app os.environ['DJANGO_SETTINGS_MODULE'] = 'api.base.settings' app = init_app(set_backends=True, routes=True) settings.API_SERVER_PORT = port else: from framework.flask import app context = None if settings.SECURE_MODE: context = (settings.OSF_SERVER_CERT, settings.OSF_SERVER_KEY) app.run(host=host, port=port, debug=debug, threaded=debug, extra_files=[settings.ASSET_HASH_PATH], ssl_context=context) @task def git_logs(ctx, branch=None): from scripts.meta import gatherer gatherer.main(branch=branch) @task def apiserver(ctx, port=8000, wait=True, autoreload=True, host='127.0.0.1', pty=True): """Run the API server.""" env = os.environ.copy() cmd = 'DJANGO_SETTINGS_MODULE=api.base.settings {} manage.py runserver {}:{} --nothreading'\ .format(sys.executable, host, port) if not autoreload: cmd += ' --noreload' if settings.SECURE_MODE: cmd = cmd.replace('runserver', 'runsslserver') cmd += ' --certificate {} --key {}'.format(settings.OSF_SERVER_CERT, settings.OSF_SERVER_KEY) if wait: return ctx.run(cmd, echo=True, pty=pty) from subprocess import Popen return Popen(cmd, shell=True, env=env) @task def adminserver(ctx, port=8001, host='127.0.0.1', pty=True): """Run the Admin server.""" env = 'DJANGO_SETTINGS_MODULE="admin.base.settings"' cmd = '{} python3 manage.py runserver {}:{} --nothreading'.format(env, host, port) if settings.SECURE_MODE: cmd = cmd.replace('runserver', 'runsslserver') cmd += ' --certificate {} --key {}'.format(settings.OSF_SERVER_CERT, settings.OSF_SERVER_KEY) ctx.run(cmd, echo=True, pty=pty) @task def shell(ctx, transaction=True, print_sql=False, notebook=False): cmd = 'DJANGO_SETTINGS_MODULE="api.base.settings" python3 manage.py osf_shell' if print_sql: cmd += ' --print-sql' if notebook: cmd += ' --notebook' if not transaction: cmd += ' --no-transaction' return ctx.run(cmd, pty=True, echo=True) @task def sharejs(ctx, host=None, port=None, db_url=None, cors_allow_origin=None): """Start a local ShareJS server.""" if host: os.environ['SHAREJS_SERVER_HOST'] = host if port: os.environ['SHAREJS_SERVER_PORT'] = port if db_url: os.environ['SHAREJS_DB_URL'] = db_url if cors_allow_origin: os.environ['SHAREJS_CORS_ALLOW_ORIGIN'] = cors_allow_origin if settings.SENTRY_DSN: os.environ['SHAREJS_SENTRY_DSN'] = settings.SENTRY_DSN share_server = os.path.join(settings.ADDON_PATH, 'wiki', 'shareServer.js') ctx.run('node {0}'.format(share_server)) @task(aliases=['celery']) def celery_worker(ctx, level='debug', hostname=None, beat=False, queues=None, concurrency=None, max_tasks_per_child=None): """Run the Celery process.""" os.environ['DJANGO_SETTINGS_MODULE'] = 'api.base.settings' cmd = 'celery worker -A framework.celery_tasks -Ofair -l {0}'.format(level) if hostname: cmd = cmd + ' --hostname={}'.format(hostname) # beat sets up a cron like scheduler, refer to website/settings if beat: cmd = cmd + ' --beat' if queues: cmd = cmd + ' --queues={}'.format(queues) if concurrency: cmd = cmd + ' --concurrency={}'.format(concurrency) if max_tasks_per_child: cmd = cmd + ' --maxtasksperchild={}'.format(max_tasks_per_child) ctx.run(bin_prefix(cmd), pty=True) @task(aliases=['beat']) def celery_beat(ctx, level='debug', schedule=None): """Run the Celery process.""" os.environ['DJANGO_SETTINGS_MODULE'] = 'api.base.settings' # beat sets up a cron like scheduler, refer to website/settings cmd = 'celery beat -A framework.celery_tasks -l {0} --pidfile='.format(level) if schedule: cmd = cmd + ' --schedule={}'.format(schedule) ctx.run(bin_prefix(cmd), pty=True) @task def migrate_search(ctx, delete=True, remove=False, index=settings.ELASTIC_INDEX): """Migrate the search-enabled models.""" from website.app import init_app init_app(routes=False, set_backends=False) from website.search_migration.migrate import migrate # NOTE: Silence the warning: # "InsecureRequestWarning: Unverified HTTPS request is being made. Adding certificate verification is strongly advised." SILENT_LOGGERS = ['py.warnings'] for logger in SILENT_LOGGERS: logging.getLogger(logger).setLevel(logging.ERROR) migrate(delete, remove=remove, index=index) @task def rebuild_search(ctx): """Delete and recreate the index for elasticsearch""" from website.app import init_app import requests from website import settings init_app(routes=False, set_backends=True) if not settings.ELASTIC_URI.startswith('http'): protocol = 'http://' if settings.DEBUG_MODE else 'https://' else: protocol = '' url = '{protocol}{uri}/{index}'.format( protocol=protocol, uri=settings.ELASTIC_URI.rstrip('/'), index=settings.ELASTIC_INDEX, ) print('Deleting index {}'.format(settings.ELASTIC_INDEX)) print('----- DELETE {}*'.format(url)) requests.delete(url + '*') print('Creating index {}'.format(settings.ELASTIC_INDEX)) print('----- PUT {}'.format(url)) requests.put(url) migrate_search(ctx, delete=False) @task def mailserver(ctx, port=1025): """Run a SMTP test server.""" cmd = 'python3 -m smtpd -n -c DebuggingServer localhost:{port}'.format(port=port) ctx.run(bin_prefix(cmd), pty=True) @task def syntax(ctx): """Use pre-commit to run formatters and linters.""" ctx.run('pre-commit run --all-files --show-diff-on-failure', echo=True) @task(aliases=['req']) def requirements(ctx, base=False, addons=False, release=False, dev=False, all=False): """Install python dependencies. Examples: inv requirements inv requirements --all You should use --all for updating your developement environment. --all will install (in order): addons, dev and the base requirements. By default, base requirements will run. However, if any set of addons, release, or dev are chosen, base will have to be mentioned explicitly in order to run. This is to remain compatible with previous usages. Release requirements will prevent dev, and base from running. """ if all: base = True addons = True dev = True if not(addons or dev): base = True if release or addons: addon_requirements(ctx) # "release" takes precedence if release: req_file = os.path.join(HERE, 'requirements', 'release.txt') ctx.run( pip_install(req_file), echo=True ) else: if dev: # then dev requirements req_file = os.path.join(HERE, 'requirements', 'dev.txt') ctx.run( pip_install(req_file), echo=True ) if base: # then base requirements req_file = os.path.join(HERE, 'requirements.txt') ctx.run( pip_install(req_file), echo=True ) # fix URITemplate name conflict h/t @github ctx.run('pip3 uninstall uritemplate.py --yes || true') ctx.run('pip3 install --no-cache-dir uritemplate.py==0.3.0') @task def test_module(ctx, module=None, numprocesses=None, nocapture=False, params=None, coverage=False, testmon=False): """Helper for running tests. """ from past.builtins import basestring os.environ['DJANGO_SETTINGS_MODULE'] = 'osf_tests.settings' import pytest if not numprocesses: from multiprocessing import cpu_count numprocesses = cpu_count() numprocesses = int(numprocesses) # NOTE: Subprocess to compensate for lack of thread safety in the httpretty module. # https://github.com/gabrielfalcao/HTTPretty/issues/209#issue-54090252 args = [] if coverage: args.extend([ '--cov-report', 'term-missing', '--cov', 'admin', '--cov', 'addons', '--cov', 'api', '--cov', 'framework', '--cov', 'osf', '--cov', 'website', ]) if not nocapture: args += ['-s'] if numprocesses > 1: args += ['-n {}'.format(numprocesses), '--max-slave-restart=0'] modules = [module] if isinstance(module, basestring) else module args.extend(modules) if testmon: args.extend(['--testmon']) if params: params = [params] if isinstance(params, basestring) else params args.extend(params) retcode = pytest.main(args) # exit code 5 is all tests skipped which is the same as passing with testmon sys.exit(0 if retcode == NO_TESTS_COLLECTED else retcode) OSF_TESTS = [ 'osf_tests', ] WEBSITE_TESTS = [ 'tests', ] API_TESTS1 = [ 'api_tests/draft_registrations', 'api_tests/draft_nodes', 'api_tests/identifiers', 'api_tests/institutions', 'api_tests/licenses', 'api_tests/logs', 'api_tests/schemas', 'api_tests/providers', 'api_tests/preprints', 'api_tests/registrations', 'api_tests/registries_moderation', 'api_tests/users', ] API_TESTS2 = [ 'api_tests/actions', 'api_tests/chronos', 'api_tests/meetings', 'api_tests/metrics', 'api_tests/nodes', 'api_tests/osf_groups', 'api_tests/requests', 'api_tests/subscriptions', 'api_tests/waffle', 'api_tests/wb', ] API_TESTS3 = [ 'api_tests/addons_tests', 'api_tests/alerts', 'api_tests/applications', 'api_tests/banners', 'api_tests/base', 'api_tests/collections', 'api_tests/comments', 'api_tests/crossref', 'api_tests/files', 'api_tests/guids', 'api_tests/reviews', 'api_tests/regions', 'api_tests/search', 'api_tests/scopes', 'api_tests/sloan', 'api_tests/subjects', 'api_tests/taxonomies', 'api_tests/test', 'api_tests/tokens', 'api_tests/view_only_links', 'api_tests/share', 'api_tests/wikis', ] ADDON_TESTS = [ 'addons', ] ADMIN_TESTS = [ 'admin_tests', ] @task def test_osf(ctx, numprocesses=None, coverage=False, testmon=False): """Run the OSF test suite.""" print('Testing modules "{}"'.format(OSF_TESTS)) test_module(ctx, module=OSF_TESTS, numprocesses=numprocesses, coverage=coverage, testmon=testmon) @task def test_website(ctx, numprocesses=None, coverage=False, testmon=False): """Run the old test suite.""" print('Testing modules "{}"'.format(WEBSITE_TESTS)) test_module(ctx, module=WEBSITE_TESTS, numprocesses=numprocesses, coverage=coverage, testmon=testmon) @task def test_api1(ctx, numprocesses=None, coverage=False, testmon=False): """Run the API test suite.""" print('Testing modules "{}"'.format(API_TESTS1 + ADMIN_TESTS)) test_module(ctx, module=API_TESTS1 + ADMIN_TESTS, numprocesses=numprocesses, coverage=coverage, testmon=testmon) @task def test_api2(ctx, numprocesses=None, coverage=False, testmon=False): """Run the API test suite.""" print('Testing modules "{}"'.format(API_TESTS2)) test_module(ctx, module=API_TESTS2, numprocesses=numprocesses, coverage=coverage, testmon=testmon) @task def test_api3(ctx, numprocesses=None, coverage=False, testmon=False): """Run the API test suite.""" print('Testing modules "{}"'.format(API_TESTS3 + OSF_TESTS)) # NOTE: There may be some concurrency issues with ES test_module(ctx, module=API_TESTS3 + OSF_TESTS, numprocesses=numprocesses, coverage=coverage, testmon=testmon) @task def test_admin(ctx, numprocesses=None, coverage=False, testmon=False): """Run the Admin test suite.""" print('Testing module "admin_tests"') test_module(ctx, module=ADMIN_TESTS, numprocesses=numprocesses, coverage=coverage, testmon=testmon) @task def test_addons(ctx, numprocesses=None, coverage=False, testmon=False): """Run all the tests in the addons directory. """ print('Testing modules "{}"'.format(ADDON_TESTS)) test_module(ctx, module=ADDON_TESTS, numprocesses=numprocesses, coverage=coverage, testmon=testmon) @task def test(ctx, all=False, lint=False): """ Run unit tests: OSF (always), plus addons and syntax checks (optional) """ if lint: syntax(ctx) test_website(ctx) # /tests test_api1(ctx) test_api2(ctx) test_api3(ctx) # also /osf_tests if all: test_addons(ctx) # TODO: Enable admin tests test_admin(ctx) karma(ctx) @task def remove_failures_from_testmon(ctx, db_path=None): conn = sqlite3.connect(db_path) tests_decached = conn.execute("delete from node where result <> '{}'").rowcount ctx.run('echo {} failures purged from travis cache'.format(tests_decached)) @task def travis_setup(ctx): ctx.run('npm install -g bower', echo=True) with open('package.json', 'r') as fobj: package_json = json.load(fobj) ctx.run('npm install @centerforopenscience/list-of-licenses@{}'.format(package_json['dependencies']['@centerforopenscience/list-of-licenses']), echo=True) with open('bower.json', 'r') as fobj: bower_json = json.load(fobj) ctx.run('bower install {}'.format(bower_json['dependencies']['styles']), echo=True) @task def test_travis_addons(ctx, numprocesses=None, coverage=False, testmon=False): """ Run half of the tests to help travis go faster. """ #travis_setup(ctx) syntax(ctx) test_addons(ctx, numprocesses=numprocesses, coverage=coverage, testmon=testmon) @task def test_travis_website(ctx, numprocesses=None, coverage=False, testmon=False): """ Run other half of the tests to help travis go faster. """ #travis_setup(ctx) test_website(ctx, numprocesses=numprocesses, coverage=coverage, testmon=testmon) @task def test_travis_api1_and_js(ctx, numprocesses=None, coverage=False, testmon=False): # TODO: Uncomment when https://github.com/travis-ci/travis-ci/issues/8836 is resolved # karma(ctx) #travis_setup(ctx) test_api1(ctx, numprocesses=numprocesses, coverage=coverage, testmon=testmon) @task def test_travis_api2(ctx, numprocesses=None, coverage=False, testmon=False): #travis_setup(ctx) test_api2(ctx, numprocesses=numprocesses, coverage=coverage, testmon=testmon) @task def test_travis_api3_and_osf(ctx, numprocesses=None, coverage=False, testmon=False): #travis_setup(ctx) test_api3(ctx, numprocesses=numprocesses, coverage=coverage, testmon=testmon) @task def karma(ctx, travis=False): """Run JS tests with Karma. Requires Chrome to be installed.""" if travis: return ctx.run('yarn test-travis', echo=True) ctx.run('yarn test', echo=True) @task def wheelhouse(ctx, addons=False, release=False, dev=False, pty=True): """Build wheels for python dependencies. Examples: inv wheelhouse --dev inv wheelhouse --addons inv wheelhouse --release """ if release or addons: for directory in os.listdir(settings.ADDON_PATH): path = os.path.join(settings.ADDON_PATH, directory) if os.path.isdir(path): req_file = os.path.join(path, 'requirements.txt') if os.path.exists(req_file): cmd = ('pip3 wheel --find-links={} -r {} --wheel-dir={} ').format(WHEELHOUSE_PATH, req_file, WHEELHOUSE_PATH) ctx.run(cmd, pty=pty) if release: req_file = os.path.join(HERE, 'requirements', 'release.txt') elif dev: req_file = os.path.join(HERE, 'requirements', 'dev.txt') else: req_file = os.path.join(HERE, 'requirements.txt') cmd = 'pip3 wheel --find-links={} -r {} --wheel-dir={} '.format(WHEELHOUSE_PATH, req_file, WHEELHOUSE_PATH) ctx.run(cmd, pty=pty) @task def addon_requirements(ctx): """Install all addon requirements.""" for directory in os.listdir(settings.ADDON_PATH): path = os.path.join(settings.ADDON_PATH, directory) requirements_file = os.path.join(path, 'requirements.txt') if os.path.isdir(path) and os.path.isfile(requirements_file): print('Installing requirements for {0}'.format(directory)) ctx.run( pip_install(requirements_file), echo=True ) print('Finished installing addon requirements') @task def travis_addon_settings(ctx): for directory in os.listdir(settings.ADDON_PATH): path = os.path.join(settings.ADDON_PATH, directory, 'settings') if os.path.isdir(path): try: open(os.path.join(path, 'local-travis.py')) ctx.run('cp {path}/local-travis.py {path}/local.py'.format(path=path)) except IOError: pass @task def copy_addon_settings(ctx): for directory in os.listdir(settings.ADDON_PATH): path = os.path.join(settings.ADDON_PATH, directory, 'settings') if os.path.isdir(path) and not os.path.isfile(os.path.join(path, 'local.py')): try: open(os.path.join(path, 'local-dist.py')) ctx.run('cp {path}/local-dist.py {path}/local.py'.format(path=path)) except IOError: pass @task def copy_settings(ctx, addons=False): # Website settings if not os.path.isfile('website/settings/local.py'): print('Creating local.py file') ctx.run('cp website/settings/local-dist.py website/settings/local.py') # Addon settings if addons: copy_addon_settings(ctx) @task(aliases=['bower']) def bower_install(ctx): print('Installing bower-managed packages') bower_bin = os.path.join(HERE, 'node_modules', '.bin', 'bower') ctx.run('{} prune --allow-root'.format(bower_bin), echo=True) ctx.run('{} install --allow-root'.format(bower_bin), echo=True) @task def docker_init(ctx): """Initial docker setup""" print('You will be asked for your sudo password to continue...') if platform.system() == 'Darwin': # Mac OSX ctx.run('sudo ifconfig lo0 alias 192.168.168.167') else: print('Your system is not recognized, you will have to setup docker manually') def ensure_docker_env_setup(ctx): if hasattr(os.environ, 'DOCKER_ENV_SETUP') and os.environ['DOCKER_ENV_SETUP'] == '1': pass else: os.environ['WEB_REMOTE_DEBUG'] = '192.168.168.167:11000' os.environ['API_REMOTE_DEBUG'] = '192.168.168.167:12000' os.environ['WORKER_REMOTE_DEBUG'] = '192.168.168.167:13000' os.environ['DOCKER_ENV_SETUP'] = '1' docker_init(ctx) @task def docker_requirements(ctx): ensure_docker_env_setup(ctx) ctx.run('docker-compose up requirements requirements_mfr requirements_wb') @task def docker_appservices(ctx): ensure_docker_env_setup(ctx) ctx.run('docker-compose up assets fakecas elasticsearch tokumx postgres') @task def docker_osf(ctx): ensure_docker_env_setup(ctx) ctx.run('docker-compose up mfr wb web api') @task def clear_sessions(ctx, months=1, dry_run=False): from website.app import init_app init_app(routes=False, set_backends=True) from scripts import clear_sessions clear_sessions.clear_sessions_relative(months=months, dry_run=dry_run) # Release tasks @task def hotfix(ctx, name, finish=False, push=False): """Rename hotfix branch to hotfix/<next-patch-version> and optionally finish hotfix. """ print('Checking out master to calculate curent version') ctx.run('git checkout master') latest_version = latest_tag_info()['current_version'] print('Current version is: {}'.format(latest_version)) major, minor, patch = latest_version.split('.') next_patch_version = '.'.join([major, minor, str(int(patch) + 1)]) print('Bumping to next patch version: {}'.format(next_patch_version)) print('Renaming branch...') new_branch_name = 'hotfix/{}'.format(next_patch_version) ctx.run('git checkout {}'.format(name), echo=True) ctx.run('git branch -m {}'.format(new_branch_name), echo=True) if finish: ctx.run('git flow hotfix finish {}'.format(next_patch_version), echo=True, pty=True) if push: ctx.run('git push --follow-tags origin master', echo=True) ctx.run('git push origin develop', echo=True) @task def feature(ctx, name, finish=False, push=False): """Rename the current branch to a feature branch and optionally finish it.""" print('Renaming branch...') ctx.run('git branch -m feature/{}'.format(name), echo=True) if finish: ctx.run('git flow feature finish {}'.format(name), echo=True) if push: ctx.run('git push origin develop', echo=True) # Adapted from bumpversion def latest_tag_info(): try: # git-describe doesn't update the git-index, so we do that # subprocess.check_output(["git", "update-index", "--refresh"]) # get info about the latest tag in git describe_out = subprocess.check_output([ 'git', 'describe', '--dirty', '--tags', '--long', '--abbrev=40' ], stderr=subprocess.STDOUT ).decode().split('-') except subprocess.CalledProcessError as err: raise err # logger.warn("Error when running git describe") return {} info = {} if describe_out[-1].strip() == 'dirty': info['dirty'] = True describe_out.pop() info['commit_sha'] = describe_out.pop().lstrip('g') info['distance_to_latest_tag'] = int(describe_out.pop()) info['current_version'] = describe_out.pop().lstrip('v') # assert type(info["current_version"]) == str assert 0 == len(describe_out) return info # Tasks for generating and bundling SSL certificates # See http://cosdev.readthedocs.org/en/latest/osf/ops.html for details @task def generate_key(ctx, domain, bits=2048): cmd = 'openssl genrsa -des3 -out {0}.key {1}'.format(domain, bits) ctx.run(cmd) @task def generate_key_nopass(ctx, domain): cmd = 'openssl rsa -in {domain}.key -out {domain}.key.nopass'.format( domain=domain ) ctx.run(cmd) @task def generate_csr(ctx, domain): cmd = 'openssl req -new -key {domain}.key.nopass -out {domain}.csr'.format( domain=domain ) ctx.run(cmd) @task def request_ssl_cert(ctx, domain): """Generate a key, a key with password removed, and a signing request for the specified domain. Usage: > invoke request_ssl_cert pizza.osf.io """ generate_key(ctx, domain) generate_key_nopass(ctx, domain) generate_csr(ctx, domain) @task def bundle_certs(ctx, domain, cert_path): """Concatenate certificates from NameCheap in the correct order. Certificate files must be in the same directory. """ cert_files = [ '{0}.crt'.format(domain), 'COMODORSADomainValidationSecureServerCA.crt', 'COMODORSAAddTrustCA.crt', 'AddTrustExternalCARoot.crt', ] certs = ' '.join( os.path.join(cert_path, cert_file) for cert_file in cert_files ) cmd = 'cat {certs} > {domain}.bundle.crt'.format( certs=certs, domain=domain, ) ctx.run(cmd) @task def clean_assets(ctx): """Remove built JS files.""" public_path = os.path.join(HERE, 'website', 'static', 'public') js_path = os.path.join(public_path, 'js') ctx.run('rm -rf {0}'.format(js_path), echo=True) @task(aliases=['pack']) def webpack(ctx, clean=False, watch=False, dev=False, colors=False): """Build static assets with webpack.""" if clean: clean_assets(ctx) args = ['yarn run webpack-{}'.format('dev' if dev else 'prod')] args += ['--progress'] if watch: args += ['--watch'] if colors: args += ['--colors'] command = ' '.join(args) ctx.run(command, echo=True) @task() def build_js_config_files(ctx): from website import settings print('Building JS config files...') with open(os.path.join(settings.STATIC_FOLDER, 'built', 'nodeCategories.json'), 'w') as fp: json.dump(settings.NODE_CATEGORY_MAP, fp) print('...Done.') @task() def assets(ctx, dev=False, watch=False, colors=False): """Install and build static assets.""" command = 'yarn install --frozen-lockfile' if not dev: command += ' --production' ctx.run(command, echo=True) bower_install(ctx) build_js_config_files(ctx) # Always set clean=False to prevent possible mistakes # on prod webpack(ctx, clean=False, watch=watch, dev=dev, colors=colors) @task def generate_self_signed(ctx, domain): """Generate self-signed SSL key and certificate. """ cmd = ( 'openssl req -x509 -nodes -days 365 -newkey rsa:2048' ' -keyout {0}.key -out {0}.crt' ).format(domain) ctx.run(cmd) @task def update_citation_styles(ctx): from scripts import parse_citation_styles total = parse_citation_styles.main() print('Parsed {} styles'.format(total)) @task def clean(ctx, verbose=False): ctx.run('find . -name "*.pyc" -delete', echo=True) @task(default=True) def usage(ctx): ctx.run('invoke --list') ### Maintenance Tasks ### @task def set_maintenance(ctx, message='', level=1, start=None, end=None): from website.app import setup_django setup_django() from website.maintenance import set_maintenance """Display maintenance notice across OSF applications (incl. preprints, registries, etc.) start - Start time for the maintenance period end - End time for the mainteance period NOTE: If no start or end values are provided, default to starting now and ending 24 hours from now. message - Message to display. If omitted, will be: "The site will undergo maintenance between <localized start time> and <localized end time>. Thank you for your patience." level - Severity level. Modifies the color of the displayed notice. Must be one of 1 (info), 2 (warning), 3 (danger). Examples: invoke set_maintenance --start 2016-03-16T15:41:00-04:00 --end 2016-03-16T15:42:00-04:00 invoke set_maintenance --message 'The OSF is experiencing issues connecting to a 3rd party service' --level 2 --start 2016-03-16T15:41:00-04:00 --end 2016-03-16T15:42:00-04:00 """ state = set_maintenance(message, level, start, end) print('Maintenance notice up {} to {}.'.format(state['start'], state['end'])) @task def unset_maintenance(ctx): from website.app import setup_django setup_django() from website.maintenance import unset_maintenance print('Taking down maintenance notice...') unset_maintenance() print('...Done.')
apache-2.0
thundernet8/WRGameVideos-API
venv/lib/python2.7/site-packages/sqlalchemy/sql/selectable.py
13
118995
# sql/selectable.py # Copyright (C) 2005-2015 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """The :class:`.FromClause` class of SQL expression elements, representing SQL tables and derived rowsets. """ from .elements import ClauseElement, TextClause, ClauseList, \ and_, Grouping, UnaryExpression, literal_column, BindParameter from .elements import _clone, \ _literal_as_text, _interpret_as_column_or_from, _expand_cloned,\ _select_iterables, _anonymous_label, _clause_element_as_expr,\ _cloned_intersection, _cloned_difference, True_, \ _literal_as_label_reference, _literal_and_labels_as_label_reference from .base import Immutable, Executable, _generative, \ ColumnCollection, ColumnSet, _from_objects, Generative from . import type_api from .. import inspection from .. import util from .. import exc from operator import attrgetter from . import operators import operator import collections from .annotation import Annotated import itertools from sqlalchemy.sql.visitors import Visitable def _interpret_as_from(element): insp = inspection.inspect(element, raiseerr=False) if insp is None: if isinstance(element, util.string_types): util.warn_limited( "Textual SQL FROM expression %(expr)r should be " "explicitly declared as text(%(expr)r), " "or use table(%(expr)r) for more specificity", {"expr": util.ellipses_string(element)}) return TextClause(util.text_type(element)) try: return insp.selectable except AttributeError: raise exc.ArgumentError("FROM expression expected") def _interpret_as_select(element): element = _interpret_as_from(element) if isinstance(element, Alias): element = element.original if not isinstance(element, SelectBase): element = element.select() return element class _OffsetLimitParam(BindParameter): @property def _limit_offset_value(self): return self.effective_value def _offset_or_limit_clause(element, name=None, type_=None): """Convert the given value to an "offset or limit" clause. This handles incoming integers and converts to an expression; if an expression is already given, it is passed through. """ if element is None: return None elif hasattr(element, '__clause_element__'): return element.__clause_element__() elif isinstance(element, Visitable): return element else: value = util.asint(element) return _OffsetLimitParam(name, value, type_=type_, unique=True) def _offset_or_limit_clause_asint(clause, attrname): """Convert the "offset or limit" clause of a select construct to an integer. This is only possible if the value is stored as a simple bound parameter. Otherwise, a compilation error is raised. """ if clause is None: return None try: value = clause._limit_offset_value except AttributeError: raise exc.CompileError( "This SELECT structure does not use a simple " "integer value for %s" % attrname) else: return util.asint(value) def subquery(alias, *args, **kwargs): """Return an :class:`.Alias` object derived from a :class:`.Select`. name alias name \*args, \**kwargs all other arguments are delivered to the :func:`select` function. """ return Select(*args, **kwargs).alias(alias) def alias(selectable, name=None, flat=False): """Return an :class:`.Alias` object. An :class:`.Alias` represents any :class:`.FromClause` with an alternate name assigned within SQL, typically using the ``AS`` clause when generated, e.g. ``SELECT * FROM table AS aliasname``. Similar functionality is available via the :meth:`~.FromClause.alias` method available on all :class:`.FromClause` subclasses. When an :class:`.Alias` is created from a :class:`.Table` object, this has the effect of the table being rendered as ``tablename AS aliasname`` in a SELECT statement. For :func:`.select` objects, the effect is that of creating a named subquery, i.e. ``(select ...) AS aliasname``. The ``name`` parameter is optional, and provides the name to use in the rendered SQL. If blank, an "anonymous" name will be deterministically generated at compile time. Deterministic means the name is guaranteed to be unique against other constructs used in the same statement, and will also be the same name for each successive compilation of the same statement object. :param selectable: any :class:`.FromClause` subclass, such as a table, select statement, etc. :param name: string name to be assigned as the alias. If ``None``, a name will be deterministically generated at compile time. :param flat: Will be passed through to if the given selectable is an instance of :class:`.Join` - see :meth:`.Join.alias` for details. .. versionadded:: 0.9.0 """ return selectable.alias(name=name, flat=flat) class Selectable(ClauseElement): """mark a class as being selectable""" __visit_name__ = 'selectable' is_selectable = True @property def selectable(self): return self class HasPrefixes(object): _prefixes = () @_generative def prefix_with(self, *expr, **kw): """Add one or more expressions following the statement keyword, i.e. SELECT, INSERT, UPDATE, or DELETE. Generative. This is used to support backend-specific prefix keywords such as those provided by MySQL. E.g.:: stmt = table.insert().prefix_with("LOW_PRIORITY", dialect="mysql") Multiple prefixes can be specified by multiple calls to :meth:`.prefix_with`. :param \*expr: textual or :class:`.ClauseElement` construct which will be rendered following the INSERT, UPDATE, or DELETE keyword. :param \**kw: A single keyword 'dialect' is accepted. This is an optional string dialect name which will limit rendering of this prefix to only that dialect. """ dialect = kw.pop('dialect', None) if kw: raise exc.ArgumentError("Unsupported argument(s): %s" % ",".join(kw)) self._setup_prefixes(expr, dialect) def _setup_prefixes(self, prefixes, dialect=None): self._prefixes = self._prefixes + tuple( [(_literal_as_text(p, warn=False), dialect) for p in prefixes]) class HasSuffixes(object): _suffixes = () @_generative def suffix_with(self, *expr, **kw): """Add one or more expressions following the statement as a whole. This is used to support backend-specific suffix keywords on certain constructs. E.g.:: stmt = select([col1, col2]).cte().suffix_with( "cycle empno set y_cycle to 1 default 0", dialect="oracle") Multiple suffixes can be specified by multiple calls to :meth:`.suffix_with`. :param \*expr: textual or :class:`.ClauseElement` construct which will be rendered following the target clause. :param \**kw: A single keyword 'dialect' is accepted. This is an optional string dialect name which will limit rendering of this suffix to only that dialect. """ dialect = kw.pop('dialect', None) if kw: raise exc.ArgumentError("Unsupported argument(s): %s" % ",".join(kw)) self._setup_suffixes(expr, dialect) def _setup_suffixes(self, suffixes, dialect=None): self._suffixes = self._suffixes + tuple( [(_literal_as_text(p, warn=False), dialect) for p in suffixes]) class FromClause(Selectable): """Represent an element that can be used within the ``FROM`` clause of a ``SELECT`` statement. The most common forms of :class:`.FromClause` are the :class:`.Table` and the :func:`.select` constructs. Key features common to all :class:`.FromClause` objects include: * a :attr:`.c` collection, which provides per-name access to a collection of :class:`.ColumnElement` objects. * a :attr:`.primary_key` attribute, which is a collection of all those :class:`.ColumnElement` objects that indicate the ``primary_key`` flag. * Methods to generate various derivations of a "from" clause, including :meth:`.FromClause.alias`, :meth:`.FromClause.join`, :meth:`.FromClause.select`. """ __visit_name__ = 'fromclause' named_with_column = False _hide_froms = [] _is_join = False _is_select = False _is_from_container = False _textual = False """a marker that allows us to easily distinguish a :class:`.TextAsFrom` or similar object from other kinds of :class:`.FromClause` objects.""" schema = None """Define the 'schema' attribute for this :class:`.FromClause`. This is typically ``None`` for most objects except that of :class:`.Table`, where it is taken as the value of the :paramref:`.Table.schema` argument. """ _memoized_property = util.group_expirable_memoized_property(["_columns"]) @util.dependencies("sqlalchemy.sql.functions") def count(self, functions, whereclause=None, **params): """return a SELECT COUNT generated against this :class:`.FromClause`.""" if self.primary_key: col = list(self.primary_key)[0] else: col = list(self.columns)[0] return Select( [functions.func.count(col).label('tbl_row_count')], whereclause, from_obj=[self], **params) def select(self, whereclause=None, **params): """return a SELECT of this :class:`.FromClause`. .. seealso:: :func:`~.sql.expression.select` - general purpose method which allows for arbitrary column lists. """ return Select([self], whereclause, **params) def join(self, right, onclause=None, isouter=False): """Return a :class:`.Join` from this :class:`.FromClause` to another :class:`FromClause`. E.g.:: from sqlalchemy import join j = user_table.join(address_table, user_table.c.id == address_table.c.user_id) stmt = select([user_table]).select_from(j) would emit SQL along the lines of:: SELECT user.id, user.name FROM user JOIN address ON user.id = address.user_id :param right: the right side of the join; this is any :class:`.FromClause` object such as a :class:`.Table` object, and may also be a selectable-compatible object such as an ORM-mapped class. :param onclause: a SQL expression representing the ON clause of the join. If left at ``None``, :meth:`.FromClause.join` will attempt to join the two tables based on a foreign key relationship. :param isouter: if True, render a LEFT OUTER JOIN, instead of JOIN. .. seealso:: :func:`.join` - standalone function :class:`.Join` - the type of object produced """ return Join(self, right, onclause, isouter) def outerjoin(self, right, onclause=None): """Return a :class:`.Join` from this :class:`.FromClause` to another :class:`FromClause`, with the "isouter" flag set to True. E.g.:: from sqlalchemy import outerjoin j = user_table.outerjoin(address_table, user_table.c.id == address_table.c.user_id) The above is equivalent to:: j = user_table.join( address_table, user_table.c.id == address_table.c.user_id, isouter=True) :param right: the right side of the join; this is any :class:`.FromClause` object such as a :class:`.Table` object, and may also be a selectable-compatible object such as an ORM-mapped class. :param onclause: a SQL expression representing the ON clause of the join. If left at ``None``, :meth:`.FromClause.join` will attempt to join the two tables based on a foreign key relationship. .. seealso:: :meth:`.FromClause.join` :class:`.Join` """ return Join(self, right, onclause, True) def alias(self, name=None, flat=False): """return an alias of this :class:`.FromClause`. This is shorthand for calling:: from sqlalchemy import alias a = alias(self, name=name) See :func:`~.expression.alias` for details. """ return Alias(self, name) def is_derived_from(self, fromclause): """Return True if this FromClause is 'derived' from the given FromClause. An example would be an Alias of a Table is derived from that Table. """ # this is essentially an "identity" check in the base class. # Other constructs override this to traverse through # contained elements. return fromclause in self._cloned_set def _is_lexical_equivalent(self, other): """Return True if this FromClause and the other represent the same lexical identity. This tests if either one is a copy of the other, or if they are the same via annotation identity. """ return self._cloned_set.intersection(other._cloned_set) @util.dependencies("sqlalchemy.sql.util") def replace_selectable(self, sqlutil, old, alias): """replace all occurrences of FromClause 'old' with the given Alias object, returning a copy of this :class:`.FromClause`. """ return sqlutil.ClauseAdapter(alias).traverse(self) def correspond_on_equivalents(self, column, equivalents): """Return corresponding_column for the given column, or if None search for a match in the given dictionary. """ col = self.corresponding_column(column, require_embedded=True) if col is None and col in equivalents: for equiv in equivalents[col]: nc = self.corresponding_column(equiv, require_embedded=True) if nc: return nc return col def corresponding_column(self, column, require_embedded=False): """Given a :class:`.ColumnElement`, return the exported :class:`.ColumnElement` object from this :class:`.Selectable` which corresponds to that original :class:`~sqlalchemy.schema.Column` via a common ancestor column. :param column: the target :class:`.ColumnElement` to be matched :param require_embedded: only return corresponding columns for the given :class:`.ColumnElement`, if the given :class:`.ColumnElement` is actually present within a sub-element of this :class:`.FromClause`. Normally the column will match if it merely shares a common ancestor with one of the exported columns of this :class:`.FromClause`. """ def embedded(expanded_proxy_set, target_set): for t in target_set.difference(expanded_proxy_set): if not set(_expand_cloned([t]) ).intersection(expanded_proxy_set): return False return True # don't dig around if the column is locally present if self.c.contains_column(column): return column col, intersect = None, None target_set = column.proxy_set cols = self.c._all_columns for c in cols: expanded_proxy_set = set(_expand_cloned(c.proxy_set)) i = target_set.intersection(expanded_proxy_set) if i and (not require_embedded or embedded(expanded_proxy_set, target_set)): if col is None: # no corresponding column yet, pick this one. col, intersect = c, i elif len(i) > len(intersect): # 'c' has a larger field of correspondence than # 'col'. i.e. selectable.c.a1_x->a1.c.x->table.c.x # matches a1.c.x->table.c.x better than # selectable.c.x->table.c.x does. col, intersect = c, i elif i == intersect: # they have the same field of correspondence. see # which proxy_set has fewer columns in it, which # indicates a closer relationship with the root # column. Also take into account the "weight" # attribute which CompoundSelect() uses to give # higher precedence to columns based on vertical # position in the compound statement, and discard # columns that have no reference to the target # column (also occurs with CompoundSelect) col_distance = util.reduce( operator.add, [sc._annotations.get('weight', 1) for sc in col.proxy_set if sc.shares_lineage(column)]) c_distance = util.reduce( operator.add, [sc._annotations.get('weight', 1) for sc in c.proxy_set if sc.shares_lineage(column)]) if c_distance < col_distance: col, intersect = c, i return col @property def description(self): """a brief description of this FromClause. Used primarily for error message formatting. """ return getattr(self, 'name', self.__class__.__name__ + " object") def _reset_exported(self): """delete memoized collections when a FromClause is cloned.""" self._memoized_property.expire_instance(self) @_memoized_property def columns(self): """A named-based collection of :class:`.ColumnElement` objects maintained by this :class:`.FromClause`. The :attr:`.columns`, or :attr:`.c` collection, is the gateway to the construction of SQL expressions using table-bound or other selectable-bound columns:: select([mytable]).where(mytable.c.somecolumn == 5) """ if '_columns' not in self.__dict__: self._init_collections() self._populate_column_collection() return self._columns.as_immutable() @_memoized_property def primary_key(self): """Return the collection of Column objects which comprise the primary key of this FromClause.""" self._init_collections() self._populate_column_collection() return self.primary_key @_memoized_property def foreign_keys(self): """Return the collection of ForeignKey objects which this FromClause references.""" self._init_collections() self._populate_column_collection() return self.foreign_keys c = property(attrgetter('columns'), doc="An alias for the :attr:`.columns` attribute.") _select_iterable = property(attrgetter('columns')) def _init_collections(self): assert '_columns' not in self.__dict__ assert 'primary_key' not in self.__dict__ assert 'foreign_keys' not in self.__dict__ self._columns = ColumnCollection() self.primary_key = ColumnSet() self.foreign_keys = set() @property def _cols_populated(self): return '_columns' in self.__dict__ def _populate_column_collection(self): """Called on subclasses to establish the .c collection. Each implementation has a different way of establishing this collection. """ def _refresh_for_new_column(self, column): """Given a column added to the .c collection of an underlying selectable, produce the local version of that column, assuming this selectable ultimately should proxy this column. this is used to "ping" a derived selectable to add a new column to its .c. collection when a Column has been added to one of the Table objects it ultimtely derives from. If the given selectable hasn't populated its .c. collection yet, it should at least pass on the message to the contained selectables, but it will return None. This method is currently used by Declarative to allow Table columns to be added to a partially constructed inheritance mapping that may have already produced joins. The method isn't public right now, as the full span of implications and/or caveats aren't yet clear. It's also possible that this functionality could be invoked by default via an event, which would require that selectables maintain a weak referencing collection of all derivations. """ if not self._cols_populated: return None elif (column.key in self.columns and self.columns[column.key] is column): return column else: return None class Join(FromClause): """represent a ``JOIN`` construct between two :class:`.FromClause` elements. The public constructor function for :class:`.Join` is the module-level :func:`.join()` function, as well as the :meth:`.FromClause.join` method of any :class:`.FromClause` (e.g. such as :class:`.Table`). .. seealso:: :func:`.join` :meth:`.FromClause.join` """ __visit_name__ = 'join' _is_join = True def __init__(self, left, right, onclause=None, isouter=False): """Construct a new :class:`.Join`. The usual entrypoint here is the :func:`~.expression.join` function or the :meth:`.FromClause.join` method of any :class:`.FromClause` object. """ self.left = _interpret_as_from(left) self.right = _interpret_as_from(right).self_group() if onclause is None: self.onclause = self._match_primaries(self.left, self.right) else: self.onclause = onclause self.isouter = isouter @classmethod def _create_outerjoin(cls, left, right, onclause=None): """Return an ``OUTER JOIN`` clause element. The returned object is an instance of :class:`.Join`. Similar functionality is also available via the :meth:`~.FromClause.outerjoin()` method on any :class:`.FromClause`. :param left: The left side of the join. :param right: The right side of the join. :param onclause: Optional criterion for the ``ON`` clause, is derived from foreign key relationships established between left and right otherwise. To chain joins together, use the :meth:`.FromClause.join` or :meth:`.FromClause.outerjoin` methods on the resulting :class:`.Join` object. """ return cls(left, right, onclause, isouter=True) @classmethod def _create_join(cls, left, right, onclause=None, isouter=False): """Produce a :class:`.Join` object, given two :class:`.FromClause` expressions. E.g.:: j = join(user_table, address_table, user_table.c.id == address_table.c.user_id) stmt = select([user_table]).select_from(j) would emit SQL along the lines of:: SELECT user.id, user.name FROM user JOIN address ON user.id = address.user_id Similar functionality is available given any :class:`.FromClause` object (e.g. such as a :class:`.Table`) using the :meth:`.FromClause.join` method. :param left: The left side of the join. :param right: the right side of the join; this is any :class:`.FromClause` object such as a :class:`.Table` object, and may also be a selectable-compatible object such as an ORM-mapped class. :param onclause: a SQL expression representing the ON clause of the join. If left at ``None``, :meth:`.FromClause.join` will attempt to join the two tables based on a foreign key relationship. :param isouter: if True, render a LEFT OUTER JOIN, instead of JOIN. .. seealso:: :meth:`.FromClause.join` - method form, based on a given left side :class:`.Join` - the type of object produced """ return cls(left, right, onclause, isouter) @property def description(self): return "Join object on %s(%d) and %s(%d)" % ( self.left.description, id(self.left), self.right.description, id(self.right)) def is_derived_from(self, fromclause): return fromclause is self or \ self.left.is_derived_from(fromclause) or \ self.right.is_derived_from(fromclause) def self_group(self, against=None): return FromGrouping(self) @util.dependencies("sqlalchemy.sql.util") def _populate_column_collection(self, sqlutil): columns = [c for c in self.left.columns] + \ [c for c in self.right.columns] self.primary_key.extend(sqlutil.reduce_columns( (c for c in columns if c.primary_key), self.onclause)) self._columns.update((col._label, col) for col in columns) self.foreign_keys.update(itertools.chain( *[col.foreign_keys for col in columns])) def _refresh_for_new_column(self, column): col = self.left._refresh_for_new_column(column) if col is None: col = self.right._refresh_for_new_column(column) if col is not None: if self._cols_populated: self._columns[col._label] = col self.foreign_keys.add(col) if col.primary_key: self.primary_key.add(col) return col return None def _copy_internals(self, clone=_clone, **kw): self._reset_exported() self.left = clone(self.left, **kw) self.right = clone(self.right, **kw) self.onclause = clone(self.onclause, **kw) def get_children(self, **kwargs): return self.left, self.right, self.onclause def _match_primaries(self, left, right): if isinstance(left, Join): left_right = left.right else: left_right = None return self._join_condition(left, right, a_subset=left_right) @classmethod def _join_condition(cls, a, b, ignore_nonexistent_tables=False, a_subset=None, consider_as_foreign_keys=None): """create a join condition between two tables or selectables. e.g.:: join_condition(tablea, tableb) would produce an expression along the lines of:: tablea.c.id==tableb.c.tablea_id The join is determined based on the foreign key relationships between the two selectables. If there are multiple ways to join, or no way to join, an error is raised. :param ignore_nonexistent_tables: Deprecated - this flag is no longer used. Only resolution errors regarding the two given tables are propagated. :param a_subset: An optional expression that is a sub-component of ``a``. An attempt will be made to join to just this sub-component first before looking at the full ``a`` construct, and if found will be successful even if there are other ways to join to ``a``. This allows the "right side" of a join to be passed thereby providing a "natural join". """ constraints = cls._joincond_scan_left_right( a, a_subset, b, consider_as_foreign_keys) if len(constraints) > 1: cls._joincond_trim_constraints( a, b, constraints, consider_as_foreign_keys) if len(constraints) == 0: if isinstance(b, FromGrouping): hint = " Perhaps you meant to convert the right side to a "\ "subquery using alias()?" else: hint = "" raise exc.NoForeignKeysError( "Can't find any foreign key relationships " "between '%s' and '%s'.%s" % (a.description, b.description, hint)) crit = [(x == y) for x, y in list(constraints.values())[0]] if len(crit) == 1: return (crit[0]) else: return and_(*crit) @classmethod def _joincond_scan_left_right( cls, a, a_subset, b, consider_as_foreign_keys): constraints = collections.defaultdict(list) for left in (a_subset, a): if left is None: continue for fk in sorted( b.foreign_keys, key=lambda fk: fk.parent._creation_order): if consider_as_foreign_keys is not None and \ fk.parent not in consider_as_foreign_keys: continue try: col = fk.get_referent(left) except exc.NoReferenceError as nrte: if nrte.table_name == left.name: raise else: continue if col is not None: constraints[fk.constraint].append((col, fk.parent)) if left is not b: for fk in sorted( left.foreign_keys, key=lambda fk: fk.parent._creation_order): if consider_as_foreign_keys is not None and \ fk.parent not in consider_as_foreign_keys: continue try: col = fk.get_referent(b) except exc.NoReferenceError as nrte: if nrte.table_name == b.name: raise else: continue if col is not None: constraints[fk.constraint].append((col, fk.parent)) if constraints: break return constraints @classmethod def _joincond_trim_constraints( cls, a, b, constraints, consider_as_foreign_keys): # more than one constraint matched. narrow down the list # to include just those FKCs that match exactly to # "consider_as_foreign_keys". if consider_as_foreign_keys: for const in list(constraints): if set(f.parent for f in const.elements) != set( consider_as_foreign_keys): del constraints[const] # if still multiple constraints, but # they all refer to the exact same end result, use it. if len(constraints) > 1: dedupe = set(tuple(crit) for crit in constraints.values()) if len(dedupe) == 1: key = list(constraints)[0] constraints = {key: constraints[key]} if len(constraints) != 1: raise exc.AmbiguousForeignKeysError( "Can't determine join between '%s' and '%s'; " "tables have more than one foreign key " "constraint relationship between them. " "Please specify the 'onclause' of this " "join explicitly." % (a.description, b.description)) def select(self, whereclause=None, **kwargs): """Create a :class:`.Select` from this :class:`.Join`. The equivalent long-hand form, given a :class:`.Join` object ``j``, is:: from sqlalchemy import select j = select([j.left, j.right], **kw).\\ where(whereclause).\\ select_from(j) :param whereclause: the WHERE criterion that will be sent to the :func:`select()` function :param \**kwargs: all other kwargs are sent to the underlying :func:`select()` function. """ collist = [self.left, self.right] return Select(collist, whereclause, from_obj=[self], **kwargs) @property def bind(self): return self.left.bind or self.right.bind @util.dependencies("sqlalchemy.sql.util") def alias(self, sqlutil, name=None, flat=False): """return an alias of this :class:`.Join`. The default behavior here is to first produce a SELECT construct from this :class:`.Join`, then to produce an :class:`.Alias` from that. So given a join of the form:: j = table_a.join(table_b, table_a.c.id == table_b.c.a_id) The JOIN by itself would look like:: table_a JOIN table_b ON table_a.id = table_b.a_id Whereas the alias of the above, ``j.alias()``, would in a SELECT context look like:: (SELECT table_a.id AS table_a_id, table_b.id AS table_b_id, table_b.a_id AS table_b_a_id FROM table_a JOIN table_b ON table_a.id = table_b.a_id) AS anon_1 The equivalent long-hand form, given a :class:`.Join` object ``j``, is:: from sqlalchemy import select, alias j = alias( select([j.left, j.right]).\\ select_from(j).\\ with_labels(True).\\ correlate(False), name=name ) The selectable produced by :meth:`.Join.alias` features the same columns as that of the two individual selectables presented under a single name - the individual columns are "auto-labeled", meaning the ``.c.`` collection of the resulting :class:`.Alias` represents the names of the individual columns using a ``<tablename>_<columname>`` scheme:: j.c.table_a_id j.c.table_b_a_id :meth:`.Join.alias` also features an alternate option for aliasing joins which produces no enclosing SELECT and does not normally apply labels to the column names. The ``flat=True`` option will call :meth:`.FromClause.alias` against the left and right sides individually. Using this option, no new ``SELECT`` is produced; we instead, from a construct as below:: j = table_a.join(table_b, table_a.c.id == table_b.c.a_id) j = j.alias(flat=True) we get a result like this:: table_a AS table_a_1 JOIN table_b AS table_b_1 ON table_a_1.id = table_b_1.a_id The ``flat=True`` argument is also propagated to the contained selectables, so that a composite join such as:: j = table_a.join( table_b.join(table_c, table_b.c.id == table_c.c.b_id), table_b.c.a_id == table_a.c.id ).alias(flat=True) Will produce an expression like:: table_a AS table_a_1 JOIN ( table_b AS table_b_1 JOIN table_c AS table_c_1 ON table_b_1.id = table_c_1.b_id ) ON table_a_1.id = table_b_1.a_id The standalone :func:`~.expression.alias` function as well as the base :meth:`.FromClause.alias` method also support the ``flat=True`` argument as a no-op, so that the argument can be passed to the ``alias()`` method of any selectable. .. versionadded:: 0.9.0 Added the ``flat=True`` option to create "aliases" of joins without enclosing inside of a SELECT subquery. :param name: name given to the alias. :param flat: if True, produce an alias of the left and right sides of this :class:`.Join` and return the join of those two selectables. This produces join expression that does not include an enclosing SELECT. .. versionadded:: 0.9.0 .. seealso:: :func:`~.expression.alias` """ if flat: assert name is None, "Can't send name argument with flat" left_a, right_a = self.left.alias(flat=True), \ self.right.alias(flat=True) adapter = sqlutil.ClauseAdapter(left_a).\ chain(sqlutil.ClauseAdapter(right_a)) return left_a.join(right_a, adapter.traverse(self.onclause), isouter=self.isouter) else: return self.select(use_labels=True, correlate=False).alias(name) @property def _hide_froms(self): return itertools.chain(*[_from_objects(x.left, x.right) for x in self._cloned_set]) @property def _from_objects(self): return [self] + \ self.onclause._from_objects + \ self.left._from_objects + \ self.right._from_objects class Alias(FromClause): """Represents an table or selectable alias (AS). Represents an alias, as typically applied to any table or sub-select within a SQL statement using the ``AS`` keyword (or without the keyword on certain databases such as Oracle). This object is constructed from the :func:`~.expression.alias` module level function as well as the :meth:`.FromClause.alias` method available on all :class:`.FromClause` subclasses. """ __visit_name__ = 'alias' named_with_column = True _is_from_container = True def __init__(self, selectable, name=None): baseselectable = selectable while isinstance(baseselectable, Alias): baseselectable = baseselectable.element self.original = baseselectable self.supports_execution = baseselectable.supports_execution if self.supports_execution: self._execution_options = baseselectable._execution_options self.element = selectable if name is None: if self.original.named_with_column: name = getattr(self.original, 'name', None) name = _anonymous_label('%%(%d %s)s' % (id(self), name or 'anon')) self.name = name @property def description(self): if util.py3k: return self.name else: return self.name.encode('ascii', 'backslashreplace') def as_scalar(self): try: return self.element.as_scalar() except AttributeError: raise AttributeError("Element %s does not support " "'as_scalar()'" % self.element) def is_derived_from(self, fromclause): if fromclause in self._cloned_set: return True return self.element.is_derived_from(fromclause) def _populate_column_collection(self): for col in self.element.columns._all_columns: col._make_proxy(self) def _refresh_for_new_column(self, column): col = self.element._refresh_for_new_column(column) if col is not None: if not self._cols_populated: return None else: return col._make_proxy(self) else: return None def _copy_internals(self, clone=_clone, **kw): # don't apply anything to an aliased Table # for now. May want to drive this from # the given **kw. if isinstance(self.element, TableClause): return self._reset_exported() self.element = clone(self.element, **kw) baseselectable = self.element while isinstance(baseselectable, Alias): baseselectable = baseselectable.element self.original = baseselectable def get_children(self, column_collections=True, **kw): if column_collections: for c in self.c: yield c yield self.element @property def _from_objects(self): return [self] @property def bind(self): return self.element.bind class CTE(Generative, HasSuffixes, Alias): """Represent a Common Table Expression. The :class:`.CTE` object is obtained using the :meth:`.SelectBase.cte` method from any selectable. See that method for complete examples. .. versionadded:: 0.7.6 """ __visit_name__ = 'cte' def __init__(self, selectable, name=None, recursive=False, _cte_alias=None, _restates=frozenset(), _suffixes=None): self.recursive = recursive self._cte_alias = _cte_alias self._restates = _restates if _suffixes: self._suffixes = _suffixes super(CTE, self).__init__(selectable, name=name) def alias(self, name=None, flat=False): return CTE( self.original, name=name, recursive=self.recursive, _cte_alias=self, _suffixes=self._suffixes ) def union(self, other): return CTE( self.original.union(other), name=self.name, recursive=self.recursive, _restates=self._restates.union([self]), _suffixes=self._suffixes ) def union_all(self, other): return CTE( self.original.union_all(other), name=self.name, recursive=self.recursive, _restates=self._restates.union([self]), _suffixes=self._suffixes ) class FromGrouping(FromClause): """Represent a grouping of a FROM clause""" __visit_name__ = 'grouping' def __init__(self, element): self.element = element def _init_collections(self): pass @property def columns(self): return self.element.columns @property def primary_key(self): return self.element.primary_key @property def foreign_keys(self): return self.element.foreign_keys def is_derived_from(self, element): return self.element.is_derived_from(element) def alias(self, **kw): return FromGrouping(self.element.alias(**kw)) @property def _hide_froms(self): return self.element._hide_froms def get_children(self, **kwargs): return self.element, def _copy_internals(self, clone=_clone, **kw): self.element = clone(self.element, **kw) @property def _from_objects(self): return self.element._from_objects def __getattr__(self, attr): return getattr(self.element, attr) def __getstate__(self): return {'element': self.element} def __setstate__(self, state): self.element = state['element'] class TableClause(Immutable, FromClause): """Represents a minimal "table" construct. This is a lightweight table object that has only a name and a collection of columns, which are typically produced by the :func:`.expression.column` function:: from sqlalchemy import table, column user = table("user", column("id"), column("name"), column("description"), ) The :class:`.TableClause` construct serves as the base for the more commonly used :class:`~.schema.Table` object, providing the usual set of :class:`~.expression.FromClause` services including the ``.c.`` collection and statement generation methods. It does **not** provide all the additional schema-level services of :class:`~.schema.Table`, including constraints, references to other tables, or support for :class:`.MetaData`-level services. It's useful on its own as an ad-hoc construct used to generate quick SQL statements when a more fully fledged :class:`~.schema.Table` is not on hand. """ __visit_name__ = 'table' named_with_column = True implicit_returning = False """:class:`.TableClause` doesn't support having a primary key or column -level defaults, so implicit returning doesn't apply.""" _autoincrement_column = None """No PK or default support so no autoincrement column.""" def __init__(self, name, *columns): """Produce a new :class:`.TableClause`. The object returned is an instance of :class:`.TableClause`, which represents the "syntactical" portion of the schema-level :class:`~.schema.Table` object. It may be used to construct lightweight table constructs. .. versionchanged:: 1.0.0 :func:`.expression.table` can now be imported from the plain ``sqlalchemy`` namespace like any other SQL element. :param name: Name of the table. :param columns: A collection of :func:`.expression.column` constructs. """ super(TableClause, self).__init__() self.name = self.fullname = name self._columns = ColumnCollection() self.primary_key = ColumnSet() self.foreign_keys = set() for c in columns: self.append_column(c) def _init_collections(self): pass @util.memoized_property def description(self): if util.py3k: return self.name else: return self.name.encode('ascii', 'backslashreplace') def append_column(self, c): self._columns[c.key] = c c.table = self def get_children(self, column_collections=True, **kwargs): if column_collections: return [c for c in self.c] else: return [] @util.dependencies("sqlalchemy.sql.functions") def count(self, functions, whereclause=None, **params): """return a SELECT COUNT generated against this :class:`.TableClause`.""" if self.primary_key: col = list(self.primary_key)[0] else: col = list(self.columns)[0] return Select( [functions.func.count(col).label('tbl_row_count')], whereclause, from_obj=[self], **params) @util.dependencies("sqlalchemy.sql.dml") def insert(self, dml, values=None, inline=False, **kwargs): """Generate an :func:`.insert` construct against this :class:`.TableClause`. E.g.:: table.insert().values(name='foo') See :func:`.insert` for argument and usage information. """ return dml.Insert(self, values=values, inline=inline, **kwargs) @util.dependencies("sqlalchemy.sql.dml") def update( self, dml, whereclause=None, values=None, inline=False, **kwargs): """Generate an :func:`.update` construct against this :class:`.TableClause`. E.g.:: table.update().where(table.c.id==7).values(name='foo') See :func:`.update` for argument and usage information. """ return dml.Update(self, whereclause=whereclause, values=values, inline=inline, **kwargs) @util.dependencies("sqlalchemy.sql.dml") def delete(self, dml, whereclause=None, **kwargs): """Generate a :func:`.delete` construct against this :class:`.TableClause`. E.g.:: table.delete().where(table.c.id==7) See :func:`.delete` for argument and usage information. """ return dml.Delete(self, whereclause, **kwargs) @property def _from_objects(self): return [self] class ForUpdateArg(ClauseElement): @classmethod def parse_legacy_select(self, arg): """Parse the for_update arugment of :func:`.select`. :param mode: Defines the lockmode to use. ``None`` - translates to no lockmode ``'update'`` - translates to ``FOR UPDATE`` (standard SQL, supported by most dialects) ``'nowait'`` - translates to ``FOR UPDATE NOWAIT`` (supported by Oracle, PostgreSQL 8.1 upwards) ``'read'`` - translates to ``LOCK IN SHARE MODE`` (for MySQL), and ``FOR SHARE`` (for PostgreSQL) ``'read_nowait'`` - translates to ``FOR SHARE NOWAIT`` (supported by PostgreSQL). ``FOR SHARE`` and ``FOR SHARE NOWAIT`` (PostgreSQL). """ if arg in (None, False): return None nowait = read = False if arg == 'nowait': nowait = True elif arg == 'read': read = True elif arg == 'read_nowait': read = nowait = True elif arg is not True: raise exc.ArgumentError("Unknown for_update argument: %r" % arg) return ForUpdateArg(read=read, nowait=nowait) @property def legacy_for_update_value(self): if self.read and not self.nowait: return "read" elif self.read and self.nowait: return "read_nowait" elif self.nowait: return "nowait" else: return True def _copy_internals(self, clone=_clone, **kw): if self.of is not None: self.of = [clone(col, **kw) for col in self.of] def __init__(self, nowait=False, read=False, of=None): """Represents arguments specified to :meth:`.Select.for_update`. .. versionadded:: 0.9.0 """ self.nowait = nowait self.read = read if of is not None: self.of = [_interpret_as_column_or_from(elem) for elem in util.to_list(of)] else: self.of = None class SelectBase(Executable, FromClause): """Base class for SELECT statements. This includes :class:`.Select`, :class:`.CompoundSelect` and :class:`.TextAsFrom`. """ def as_scalar(self): """return a 'scalar' representation of this selectable, which can be used as a column expression. Typically, a select statement which has only one column in its columns clause is eligible to be used as a scalar expression. The returned object is an instance of :class:`ScalarSelect`. """ return ScalarSelect(self) def label(self, name): """return a 'scalar' representation of this selectable, embedded as a subquery with a label. .. seealso:: :meth:`~.SelectBase.as_scalar`. """ return self.as_scalar().label(name) def cte(self, name=None, recursive=False): """Return a new :class:`.CTE`, or Common Table Expression instance. Common table expressions are a SQL standard whereby SELECT statements can draw upon secondary statements specified along with the primary statement, using a clause called "WITH". Special semantics regarding UNION can also be employed to allow "recursive" queries, where a SELECT statement can draw upon the set of rows that have previously been selected. SQLAlchemy detects :class:`.CTE` objects, which are treated similarly to :class:`.Alias` objects, as special elements to be delivered to the FROM clause of the statement as well as to a WITH clause at the top of the statement. .. versionadded:: 0.7.6 :param name: name given to the common table expression. Like :meth:`._FromClause.alias`, the name can be left as ``None`` in which case an anonymous symbol will be used at query compile time. :param recursive: if ``True``, will render ``WITH RECURSIVE``. A recursive common table expression is intended to be used in conjunction with UNION ALL in order to derive rows from those already selected. The following examples illustrate two examples from Postgresql's documentation at http://www.postgresql.org/docs/8.4/static/queries-with.html. Example 1, non recursive:: from sqlalchemy import (Table, Column, String, Integer, MetaData, select, func) metadata = MetaData() orders = Table('orders', metadata, Column('region', String), Column('amount', Integer), Column('product', String), Column('quantity', Integer) ) regional_sales = select([ orders.c.region, func.sum(orders.c.amount).label('total_sales') ]).group_by(orders.c.region).cte("regional_sales") top_regions = select([regional_sales.c.region]).\\ where( regional_sales.c.total_sales > select([ func.sum(regional_sales.c.total_sales)/10 ]) ).cte("top_regions") statement = select([ orders.c.region, orders.c.product, func.sum(orders.c.quantity).label("product_units"), func.sum(orders.c.amount).label("product_sales") ]).where(orders.c.region.in_( select([top_regions.c.region]) )).group_by(orders.c.region, orders.c.product) result = conn.execute(statement).fetchall() Example 2, WITH RECURSIVE:: from sqlalchemy import (Table, Column, String, Integer, MetaData, select, func) metadata = MetaData() parts = Table('parts', metadata, Column('part', String), Column('sub_part', String), Column('quantity', Integer), ) included_parts = select([ parts.c.sub_part, parts.c.part, parts.c.quantity]).\\ where(parts.c.part=='our part').\\ cte(recursive=True) incl_alias = included_parts.alias() parts_alias = parts.alias() included_parts = included_parts.union_all( select([ parts_alias.c.sub_part, parts_alias.c.part, parts_alias.c.quantity ]). where(parts_alias.c.part==incl_alias.c.sub_part) ) statement = select([ included_parts.c.sub_part, func.sum(included_parts.c.quantity). label('total_quantity') ]).\\ group_by(included_parts.c.sub_part) result = conn.execute(statement).fetchall() .. seealso:: :meth:`.orm.query.Query.cte` - ORM version of :meth:`.SelectBase.cte`. """ return CTE(self, name=name, recursive=recursive) @_generative @util.deprecated('0.6', message="``autocommit()`` is deprecated. Use " ":meth:`.Executable.execution_options` with the " "'autocommit' flag.") def autocommit(self): """return a new selectable with the 'autocommit' flag set to True. """ self._execution_options = \ self._execution_options.union({'autocommit': True}) def _generate(self): """Override the default _generate() method to also clear out exported collections.""" s = self.__class__.__new__(self.__class__) s.__dict__ = self.__dict__.copy() s._reset_exported() return s @property def _from_objects(self): return [self] class GenerativeSelect(SelectBase): """Base class for SELECT statements where additional elements can be added. This serves as the base for :class:`.Select` and :class:`.CompoundSelect` where elements such as ORDER BY, GROUP BY can be added and column rendering can be controlled. Compare to :class:`.TextAsFrom`, which, while it subclasses :class:`.SelectBase` and is also a SELECT construct, represents a fixed textual string which cannot be altered at this level, only wrapped as a subquery. .. versionadded:: 0.9.0 :class:`.GenerativeSelect` was added to provide functionality specific to :class:`.Select` and :class:`.CompoundSelect` while allowing :class:`.SelectBase` to be used for other SELECT-like objects, e.g. :class:`.TextAsFrom`. """ _order_by_clause = ClauseList() _group_by_clause = ClauseList() _limit_clause = None _offset_clause = None _for_update_arg = None def __init__(self, use_labels=False, for_update=False, limit=None, offset=None, order_by=None, group_by=None, bind=None, autocommit=None): self.use_labels = use_labels if for_update is not False: self._for_update_arg = (ForUpdateArg. parse_legacy_select(for_update)) if autocommit is not None: util.warn_deprecated('autocommit on select() is ' 'deprecated. Use .execution_options(a' 'utocommit=True)') self._execution_options = \ self._execution_options.union( {'autocommit': autocommit}) if limit is not None: self._limit_clause = _offset_or_limit_clause(limit) if offset is not None: self._offset_clause = _offset_or_limit_clause(offset) self._bind = bind if order_by is not None: self._order_by_clause = ClauseList( *util.to_list(order_by), _literal_as_text=_literal_and_labels_as_label_reference) if group_by is not None: self._group_by_clause = ClauseList( *util.to_list(group_by), _literal_as_text=_literal_as_label_reference) @property def for_update(self): """Provide legacy dialect support for the ``for_update`` attribute. """ if self._for_update_arg is not None: return self._for_update_arg.legacy_for_update_value else: return None @for_update.setter def for_update(self, value): self._for_update_arg = ForUpdateArg.parse_legacy_select(value) @_generative def with_for_update(self, nowait=False, read=False, of=None): """Specify a ``FOR UPDATE`` clause for this :class:`.GenerativeSelect`. E.g.:: stmt = select([table]).with_for_update(nowait=True) On a database like Postgresql or Oracle, the above would render a statement like:: SELECT table.a, table.b FROM table FOR UPDATE NOWAIT on other backends, the ``nowait`` option is ignored and instead would produce:: SELECT table.a, table.b FROM table FOR UPDATE When called with no arguments, the statement will render with the suffix ``FOR UPDATE``. Additional arguments can then be provided which allow for common database-specific variants. :param nowait: boolean; will render ``FOR UPDATE NOWAIT`` on Oracle and Postgresql dialects. :param read: boolean; will render ``LOCK IN SHARE MODE`` on MySQL, ``FOR SHARE`` on Postgresql. On Postgresql, when combined with ``nowait``, will render ``FOR SHARE NOWAIT``. :param of: SQL expression or list of SQL expression elements (typically :class:`.Column` objects or a compatible expression) which will render into a ``FOR UPDATE OF`` clause; supported by PostgreSQL and Oracle. May render as a table or as a column depending on backend. .. versionadded:: 0.9.0 """ self._for_update_arg = ForUpdateArg(nowait=nowait, read=read, of=of) @_generative def apply_labels(self): """return a new selectable with the 'use_labels' flag set to True. This will result in column expressions being generated using labels against their table name, such as "SELECT somecolumn AS tablename_somecolumn". This allows selectables which contain multiple FROM clauses to produce a unique set of column names regardless of name conflicts among the individual FROM clauses. """ self.use_labels = True @property def _limit(self): """Get an integer value for the limit. This should only be used by code that cannot support a limit as a BindParameter or other custom clause as it will throw an exception if the limit isn't currently set to an integer. """ return _offset_or_limit_clause_asint(self._limit_clause, "limit") @property def _simple_int_limit(self): """True if the LIMIT clause is a simple integer, False if it is not present or is a SQL expression. """ return isinstance(self._limit_clause, _OffsetLimitParam) @property def _simple_int_offset(self): """True if the OFFSET clause is a simple integer, False if it is not present or is a SQL expression. """ return isinstance(self._offset_clause, _OffsetLimitParam) @property def _offset(self): """Get an integer value for the offset. This should only be used by code that cannot support an offset as a BindParameter or other custom clause as it will throw an exception if the offset isn't currently set to an integer. """ return _offset_or_limit_clause_asint(self._offset_clause, "offset") @_generative def limit(self, limit): """return a new selectable with the given LIMIT criterion applied. This is a numerical value which usually renders as a ``LIMIT`` expression in the resulting select. Backends that don't support ``LIMIT`` will attempt to provide similar functionality. .. versionchanged:: 1.0.0 - :meth:`.Select.limit` can now accept arbitrary SQL expressions as well as integer values. :param limit: an integer LIMIT parameter, or a SQL expression that provides an integer result. """ self._limit_clause = _offset_or_limit_clause(limit) @_generative def offset(self, offset): """return a new selectable with the given OFFSET criterion applied. This is a numeric value which usually renders as an ``OFFSET`` expression in the resulting select. Backends that don't support ``OFFSET`` will attempt to provide similar functionality. .. versionchanged:: 1.0.0 - :meth:`.Select.offset` can now accept arbitrary SQL expressions as well as integer values. :param offset: an integer OFFSET parameter, or a SQL expression that provides an integer result. """ self._offset_clause = _offset_or_limit_clause(offset) @_generative def order_by(self, *clauses): """return a new selectable with the given list of ORDER BY criterion applied. The criterion will be appended to any pre-existing ORDER BY criterion. """ self.append_order_by(*clauses) @_generative def group_by(self, *clauses): """return a new selectable with the given list of GROUP BY criterion applied. The criterion will be appended to any pre-existing GROUP BY criterion. """ self.append_group_by(*clauses) def append_order_by(self, *clauses): """Append the given ORDER BY criterion applied to this selectable. The criterion will be appended to any pre-existing ORDER BY criterion. This is an **in-place** mutation method; the :meth:`~.GenerativeSelect.order_by` method is preferred, as it provides standard :term:`method chaining`. """ if len(clauses) == 1 and clauses[0] is None: self._order_by_clause = ClauseList() else: if getattr(self, '_order_by_clause', None) is not None: clauses = list(self._order_by_clause) + list(clauses) self._order_by_clause = ClauseList( *clauses, _literal_as_text=_literal_and_labels_as_label_reference) def append_group_by(self, *clauses): """Append the given GROUP BY criterion applied to this selectable. The criterion will be appended to any pre-existing GROUP BY criterion. This is an **in-place** mutation method; the :meth:`~.GenerativeSelect.group_by` method is preferred, as it provides standard :term:`method chaining`. """ if len(clauses) == 1 and clauses[0] is None: self._group_by_clause = ClauseList() else: if getattr(self, '_group_by_clause', None) is not None: clauses = list(self._group_by_clause) + list(clauses) self._group_by_clause = ClauseList( *clauses, _literal_as_text=_literal_as_label_reference) @property def _label_resolve_dict(self): raise NotImplementedError() def _copy_internals(self, clone=_clone, **kw): if self._limit_clause is not None: self._limit_clause = clone(self._limit_clause, **kw) if self._offset_clause is not None: self._offset_clause = clone(self._offset_clause, **kw) class CompoundSelect(GenerativeSelect): """Forms the basis of ``UNION``, ``UNION ALL``, and other SELECT-based set operations. .. seealso:: :func:`.union` :func:`.union_all` :func:`.intersect` :func:`.intersect_all` :func:`.except` :func:`.except_all` """ __visit_name__ = 'compound_select' UNION = util.symbol('UNION') UNION_ALL = util.symbol('UNION ALL') EXCEPT = util.symbol('EXCEPT') EXCEPT_ALL = util.symbol('EXCEPT ALL') INTERSECT = util.symbol('INTERSECT') INTERSECT_ALL = util.symbol('INTERSECT ALL') _is_from_container = True def __init__(self, keyword, *selects, **kwargs): self._auto_correlate = kwargs.pop('correlate', False) self.keyword = keyword self.selects = [] numcols = None # some DBs do not like ORDER BY in the inner queries of a UNION, etc. for n, s in enumerate(selects): s = _clause_element_as_expr(s) if not numcols: numcols = len(s.c._all_columns) elif len(s.c._all_columns) != numcols: raise exc.ArgumentError( 'All selectables passed to ' 'CompoundSelect must have identical numbers of ' 'columns; select #%d has %d columns, select ' '#%d has %d' % (1, len(self.selects[0].c._all_columns), n + 1, len(s.c._all_columns)) ) self.selects.append(s.self_group(self)) GenerativeSelect.__init__(self, **kwargs) @property def _label_resolve_dict(self): d = dict( (c.key, c) for c in self.c ) return d, d @classmethod def _create_union(cls, *selects, **kwargs): """Return a ``UNION`` of multiple selectables. The returned object is an instance of :class:`.CompoundSelect`. A similar :func:`union()` method is available on all :class:`.FromClause` subclasses. \*selects a list of :class:`.Select` instances. \**kwargs available keyword arguments are the same as those of :func:`select`. """ return CompoundSelect(CompoundSelect.UNION, *selects, **kwargs) @classmethod def _create_union_all(cls, *selects, **kwargs): """Return a ``UNION ALL`` of multiple selectables. The returned object is an instance of :class:`.CompoundSelect`. A similar :func:`union_all()` method is available on all :class:`.FromClause` subclasses. \*selects a list of :class:`.Select` instances. \**kwargs available keyword arguments are the same as those of :func:`select`. """ return CompoundSelect(CompoundSelect.UNION_ALL, *selects, **kwargs) @classmethod def _create_except(cls, *selects, **kwargs): """Return an ``EXCEPT`` of multiple selectables. The returned object is an instance of :class:`.CompoundSelect`. \*selects a list of :class:`.Select` instances. \**kwargs available keyword arguments are the same as those of :func:`select`. """ return CompoundSelect(CompoundSelect.EXCEPT, *selects, **kwargs) @classmethod def _create_except_all(cls, *selects, **kwargs): """Return an ``EXCEPT ALL`` of multiple selectables. The returned object is an instance of :class:`.CompoundSelect`. \*selects a list of :class:`.Select` instances. \**kwargs available keyword arguments are the same as those of :func:`select`. """ return CompoundSelect(CompoundSelect.EXCEPT_ALL, *selects, **kwargs) @classmethod def _create_intersect(cls, *selects, **kwargs): """Return an ``INTERSECT`` of multiple selectables. The returned object is an instance of :class:`.CompoundSelect`. \*selects a list of :class:`.Select` instances. \**kwargs available keyword arguments are the same as those of :func:`select`. """ return CompoundSelect(CompoundSelect.INTERSECT, *selects, **kwargs) @classmethod def _create_intersect_all(cls, *selects, **kwargs): """Return an ``INTERSECT ALL`` of multiple selectables. The returned object is an instance of :class:`.CompoundSelect`. \*selects a list of :class:`.Select` instances. \**kwargs available keyword arguments are the same as those of :func:`select`. """ return CompoundSelect( CompoundSelect.INTERSECT_ALL, *selects, **kwargs) def _scalar_type(self): return self.selects[0]._scalar_type() def self_group(self, against=None): return FromGrouping(self) def is_derived_from(self, fromclause): for s in self.selects: if s.is_derived_from(fromclause): return True return False def _populate_column_collection(self): for cols in zip(*[s.c._all_columns for s in self.selects]): # this is a slightly hacky thing - the union exports a # column that resembles just that of the *first* selectable. # to get at a "composite" column, particularly foreign keys, # you have to dig through the proxies collection which we # generate below. We may want to improve upon this, such as # perhaps _make_proxy can accept a list of other columns # that are "shared" - schema.column can then copy all the # ForeignKeys in. this would allow the union() to have all # those fks too. proxy = cols[0]._make_proxy( self, name=cols[0]._label if self.use_labels else None, key=cols[0]._key_label if self.use_labels else None) # hand-construct the "_proxies" collection to include all # derived columns place a 'weight' annotation corresponding # to how low in the list of select()s the column occurs, so # that the corresponding_column() operation can resolve # conflicts proxy._proxies = [ c._annotate({'weight': i + 1}) for (i, c) in enumerate(cols)] def _refresh_for_new_column(self, column): for s in self.selects: s._refresh_for_new_column(column) if not self._cols_populated: return None raise NotImplementedError("CompoundSelect constructs don't support " "addition of columns to underlying " "selectables") def _copy_internals(self, clone=_clone, **kw): super(CompoundSelect, self)._copy_internals(clone, **kw) self._reset_exported() self.selects = [clone(s, **kw) for s in self.selects] if hasattr(self, '_col_map'): del self._col_map for attr in ( '_order_by_clause', '_group_by_clause', '_for_update_arg'): if getattr(self, attr) is not None: setattr(self, attr, clone(getattr(self, attr), **kw)) def get_children(self, column_collections=True, **kwargs): return (column_collections and list(self.c) or []) \ + [self._order_by_clause, self._group_by_clause] \ + list(self.selects) def bind(self): if self._bind: return self._bind for s in self.selects: e = s.bind if e: return e else: return None def _set_bind(self, bind): self._bind = bind bind = property(bind, _set_bind) class Select(HasPrefixes, HasSuffixes, GenerativeSelect): """Represents a ``SELECT`` statement. """ __visit_name__ = 'select' _prefixes = () _suffixes = () _hints = util.immutabledict() _statement_hints = () _distinct = False _from_cloned = None _correlate = () _correlate_except = None _memoized_property = SelectBase._memoized_property _is_select = True def __init__(self, columns=None, whereclause=None, from_obj=None, distinct=False, having=None, correlate=True, prefixes=None, suffixes=None, **kwargs): """Construct a new :class:`.Select`. Similar functionality is also available via the :meth:`.FromClause.select` method on any :class:`.FromClause`. All arguments which accept :class:`.ClauseElement` arguments also accept string arguments, which will be converted as appropriate into either :func:`text()` or :func:`literal_column()` constructs. .. seealso:: :ref:`coretutorial_selecting` - Core Tutorial description of :func:`.select`. :param columns: A list of :class:`.ColumnElement` or :class:`.FromClause` objects which will form the columns clause of the resulting statement. For those objects that are instances of :class:`.FromClause` (typically :class:`.Table` or :class:`.Alias` objects), the :attr:`.FromClause.c` collection is extracted to form a collection of :class:`.ColumnElement` objects. This parameter will also accept :class:`.Text` constructs as given, as well as ORM-mapped classes. .. note:: The :paramref:`.select.columns` parameter is not available in the method form of :func:`.select`, e.g. :meth:`.FromClause.select`. .. seealso:: :meth:`.Select.column` :meth:`.Select.with_only_columns` :param whereclause: A :class:`.ClauseElement` expression which will be used to form the ``WHERE`` clause. It is typically preferable to add WHERE criterion to an existing :class:`.Select` using method chaining with :meth:`.Select.where`. .. seealso:: :meth:`.Select.where` :param from_obj: A list of :class:`.ClauseElement` objects which will be added to the ``FROM`` clause of the resulting statement. This is equivalent to calling :meth:`.Select.select_from` using method chaining on an existing :class:`.Select` object. .. seealso:: :meth:`.Select.select_from` - full description of explicit FROM clause specification. :param autocommit: Deprecated. Use ``.execution_options(autocommit=<True|False>)`` to set the autocommit option. .. seealso:: :meth:`.Executable.execution_options` :param bind=None: an :class:`~.Engine` or :class:`~.Connection` instance to which the resulting :class:`.Select` object will be bound. The :class:`.Select` object will otherwise automatically bind to whatever :class:`~.base.Connectable` instances can be located within its contained :class:`.ClauseElement` members. :param correlate=True: indicates that this :class:`.Select` object should have its contained :class:`.FromClause` elements "correlated" to an enclosing :class:`.Select` object. It is typically preferable to specify correlations on an existing :class:`.Select` construct using :meth:`.Select.correlate`. .. seealso:: :meth:`.Select.correlate` - full description of correlation. :param distinct=False: when ``True``, applies a ``DISTINCT`` qualifier to the columns clause of the resulting statement. The boolean argument may also be a column expression or list of column expressions - this is a special calling form which is understood by the Postgresql dialect to render the ``DISTINCT ON (<columns>)`` syntax. ``distinct`` is also available on an existing :class:`.Select` object via the :meth:`~.Select.distinct` method. .. seealso:: :meth:`.Select.distinct` :param for_update=False: when ``True``, applies ``FOR UPDATE`` to the end of the resulting statement. .. deprecated:: 0.9.0 - use :meth:`.Select.with_for_update` to specify the structure of the ``FOR UPDATE`` clause. ``for_update`` accepts various string values interpreted by specific backends, including: * ``"read"`` - on MySQL, translates to ``LOCK IN SHARE MODE``; on Postgresql, translates to ``FOR SHARE``. * ``"nowait"`` - on Postgresql and Oracle, translates to ``FOR UPDATE NOWAIT``. * ``"read_nowait"`` - on Postgresql, translates to ``FOR SHARE NOWAIT``. .. seealso:: :meth:`.Select.with_for_update` - improved API for specifying the ``FOR UPDATE`` clause. :param group_by: a list of :class:`.ClauseElement` objects which will comprise the ``GROUP BY`` clause of the resulting select. This parameter is typically specified more naturally using the :meth:`.Select.group_by` method on an existing :class:`.Select`. .. seealso:: :meth:`.Select.group_by` :param having: a :class:`.ClauseElement` that will comprise the ``HAVING`` clause of the resulting select when ``GROUP BY`` is used. This parameter is typically specified more naturally using the :meth:`.Select.having` method on an existing :class:`.Select`. .. seealso:: :meth:`.Select.having` :param limit=None: a numerical value which usually renders as a ``LIMIT`` expression in the resulting select. Backends that don't support ``LIMIT`` will attempt to provide similar functionality. This parameter is typically specified more naturally using the :meth:`.Select.limit` method on an existing :class:`.Select`. .. seealso:: :meth:`.Select.limit` :param offset=None: a numeric value which usually renders as an ``OFFSET`` expression in the resulting select. Backends that don't support ``OFFSET`` will attempt to provide similar functionality. This parameter is typically specified more naturally using the :meth:`.Select.offset` method on an existing :class:`.Select`. .. seealso:: :meth:`.Select.offset` :param order_by: a scalar or list of :class:`.ClauseElement` objects which will comprise the ``ORDER BY`` clause of the resulting select. This parameter is typically specified more naturally using the :meth:`.Select.order_by` method on an existing :class:`.Select`. .. seealso:: :meth:`.Select.order_by` :param use_labels=False: when ``True``, the statement will be generated using labels for each column in the columns clause, which qualify each column with its parent table's (or aliases) name so that name conflicts between columns in different tables don't occur. The format of the label is <tablename>_<column>. The "c" collection of the resulting :class:`.Select` object will use these names as well for targeting column members. This parameter can also be specified on an existing :class:`.Select` object using the :meth:`.Select.apply_labels` method. .. seealso:: :meth:`.Select.apply_labels` """ self._auto_correlate = correlate if distinct is not False: if distinct is True: self._distinct = True else: self._distinct = [ _literal_as_text(e) for e in util.to_list(distinct) ] if from_obj is not None: self._from_obj = util.OrderedSet( _interpret_as_from(f) for f in util.to_list(from_obj)) else: self._from_obj = util.OrderedSet() try: cols_present = bool(columns) except TypeError: raise exc.ArgumentError("columns argument to select() must " "be a Python list or other iterable") if cols_present: self._raw_columns = [] for c in columns: c = _interpret_as_column_or_from(c) if isinstance(c, ScalarSelect): c = c.self_group(against=operators.comma_op) self._raw_columns.append(c) else: self._raw_columns = [] if whereclause is not None: self._whereclause = _literal_as_text( whereclause).self_group(against=operators._asbool) else: self._whereclause = None if having is not None: self._having = _literal_as_text( having).self_group(against=operators._asbool) else: self._having = None if prefixes: self._setup_prefixes(prefixes) if suffixes: self._setup_suffixes(suffixes) GenerativeSelect.__init__(self, **kwargs) @property def _froms(self): # would love to cache this, # but there's just enough edge cases, particularly now that # declarative encourages construction of SQL expressions # without tables present, to just regen this each time. froms = [] seen = set() translate = self._from_cloned for item in itertools.chain( _from_objects(*self._raw_columns), _from_objects(self._whereclause) if self._whereclause is not None else (), self._from_obj ): if item is self: raise exc.InvalidRequestError( "select() construct refers to itself as a FROM") if translate and item in translate: item = translate[item] if not seen.intersection(item._cloned_set): froms.append(item) seen.update(item._cloned_set) return froms def _get_display_froms(self, explicit_correlate_froms=None, implicit_correlate_froms=None): """Return the full list of 'from' clauses to be displayed. Takes into account a set of existing froms which may be rendered in the FROM clause of enclosing selects; this Select may want to leave those absent if it is automatically correlating. """ froms = self._froms toremove = set(itertools.chain(*[ _expand_cloned(f._hide_froms) for f in froms])) if toremove: # if we're maintaining clones of froms, # add the copies out to the toremove list. only include # clones that are lexical equivalents. if self._from_cloned: toremove.update( self._from_cloned[f] for f in toremove.intersection(self._from_cloned) if self._from_cloned[f]._is_lexical_equivalent(f) ) # filter out to FROM clauses not in the list, # using a list to maintain ordering froms = [f for f in froms if f not in toremove] if self._correlate: to_correlate = self._correlate if to_correlate: froms = [ f for f in froms if f not in _cloned_intersection( _cloned_intersection( froms, explicit_correlate_froms or ()), to_correlate ) ] if self._correlate_except is not None: froms = [ f for f in froms if f not in _cloned_difference( _cloned_intersection( froms, explicit_correlate_froms or ()), self._correlate_except ) ] if self._auto_correlate and \ implicit_correlate_froms and \ len(froms) > 1: froms = [ f for f in froms if f not in _cloned_intersection(froms, implicit_correlate_froms) ] if not len(froms): raise exc.InvalidRequestError("Select statement '%s" "' returned no FROM clauses " "due to auto-correlation; " "specify correlate(<tables>) " "to control correlation " "manually." % self) return froms def _scalar_type(self): elem = self._raw_columns[0] cols = list(elem._select_iterable) return cols[0].type @property def froms(self): """Return the displayed list of FromClause elements.""" return self._get_display_froms() def with_statement_hint(self, text, dialect_name='*'): """add a statement hint to this :class:`.Select`. This method is similar to :meth:`.Select.with_hint` except that it does not require an individual table, and instead applies to the statement as a whole. Hints here are specific to the backend database and may include directives such as isolation levels, file directives, fetch directives, etc. .. versionadded:: 1.0.0 .. seealso:: :meth:`.Select.with_hint` """ return self.with_hint(None, text, dialect_name) @_generative def with_hint(self, selectable, text, dialect_name='*'): """Add an indexing or other executional context hint for the given selectable to this :class:`.Select`. The text of the hint is rendered in the appropriate location for the database backend in use, relative to the given :class:`.Table` or :class:`.Alias` passed as the ``selectable`` argument. The dialect implementation typically uses Python string substitution syntax with the token ``%(name)s`` to render the name of the table or alias. E.g. when using Oracle, the following:: select([mytable]).\\ with_hint(mytable, "index(%(name)s ix_mytable)") Would render SQL as:: select /*+ index(mytable ix_mytable) */ ... from mytable The ``dialect_name`` option will limit the rendering of a particular hint to a particular backend. Such as, to add hints for both Oracle and Sybase simultaneously:: select([mytable]).\\ with_hint(mytable, "index(%(name)s ix_mytable)", 'oracle').\\ with_hint(mytable, "WITH INDEX ix_mytable", 'sybase') .. seealso:: :meth:`.Select.with_statement_hint` """ if selectable is None: self._statement_hints += ((dialect_name, text), ) else: self._hints = self._hints.union( {(selectable, dialect_name): text}) @property def type(self): raise exc.InvalidRequestError("Select objects don't have a type. " "Call as_scalar() on this Select " "object to return a 'scalar' version " "of this Select.") @_memoized_property.method def locate_all_froms(self): """return a Set of all FromClause elements referenced by this Select. This set is a superset of that returned by the ``froms`` property, which is specifically for those FromClause elements that would actually be rendered. """ froms = self._froms return froms + list(_from_objects(*froms)) @property def inner_columns(self): """an iterator of all ColumnElement expressions which would be rendered into the columns clause of the resulting SELECT statement. """ return _select_iterables(self._raw_columns) @_memoized_property def _label_resolve_dict(self): with_cols = dict( (c._resolve_label or c._label or c.key, c) for c in _select_iterables(self._raw_columns) if c._allow_label_resolve) only_froms = dict( (c.key, c) for c in _select_iterables(self.froms) if c._allow_label_resolve) for key, value in only_froms.items(): with_cols.setdefault(key, value) return with_cols, only_froms def is_derived_from(self, fromclause): if self in fromclause._cloned_set: return True for f in self.locate_all_froms(): if f.is_derived_from(fromclause): return True return False def _copy_internals(self, clone=_clone, **kw): super(Select, self)._copy_internals(clone, **kw) # Select() object has been cloned and probably adapted by the # given clone function. Apply the cloning function to internal # objects # 1. keep a dictionary of the froms we've cloned, and what # they've become. This is consulted later when we derive # additional froms from "whereclause" and the columns clause, # which may still reference the uncloned parent table. # as of 0.7.4 we also put the current version of _froms, which # gets cleared on each generation. previously we were "baking" # _froms into self._from_obj. self._from_cloned = from_cloned = dict( (f, clone(f, **kw)) for f in self._from_obj.union(self._froms)) # 3. update persistent _from_obj with the cloned versions. self._from_obj = util.OrderedSet(from_cloned[f] for f in self._from_obj) # the _correlate collection is done separately, what can happen # here is the same item is _correlate as in _from_obj but the # _correlate version has an annotation on it - (specifically # RelationshipProperty.Comparator._criterion_exists() does # this). Also keep _correlate liberally open with its previous # contents, as this set is used for matching, not rendering. self._correlate = set(clone(f) for f in self._correlate).union(self._correlate) # 4. clone other things. The difficulty here is that Column # objects are not actually cloned, and refer to their original # .table, resulting in the wrong "from" parent after a clone # operation. Hence _from_cloned and _from_obj supersede what is # present here. self._raw_columns = [clone(c, **kw) for c in self._raw_columns] for attr in '_whereclause', '_having', '_order_by_clause', \ '_group_by_clause', '_for_update_arg': if getattr(self, attr) is not None: setattr(self, attr, clone(getattr(self, attr), **kw)) # erase exported column list, _froms collection, # etc. self._reset_exported() def get_children(self, column_collections=True, **kwargs): """return child elements as per the ClauseElement specification.""" return (column_collections and list(self.columns) or []) + \ self._raw_columns + list(self._froms) + \ [x for x in (self._whereclause, self._having, self._order_by_clause, self._group_by_clause) if x is not None] @_generative def column(self, column): """return a new select() construct with the given column expression added to its columns clause. """ self.append_column(column) @util.dependencies("sqlalchemy.sql.util") def reduce_columns(self, sqlutil, only_synonyms=True): """Return a new :func`.select` construct with redundantly named, equivalently-valued columns removed from the columns clause. "Redundant" here means two columns where one refers to the other either based on foreign key, or via a simple equality comparison in the WHERE clause of the statement. The primary purpose of this method is to automatically construct a select statement with all uniquely-named columns, without the need to use table-qualified labels as :meth:`.apply_labels` does. When columns are omitted based on foreign key, the referred-to column is the one that's kept. When columns are omitted based on WHERE eqivalence, the first column in the columns clause is the one that's kept. :param only_synonyms: when True, limit the removal of columns to those which have the same name as the equivalent. Otherwise, all columns that are equivalent to another are removed. .. versionadded:: 0.8 """ return self.with_only_columns( sqlutil.reduce_columns( self.inner_columns, only_synonyms=only_synonyms, *(self._whereclause, ) + tuple(self._from_obj) ) ) @_generative def with_only_columns(self, columns): """Return a new :func:`.select` construct with its columns clause replaced with the given columns. .. versionchanged:: 0.7.3 Due to a bug fix, this method has a slight behavioral change as of version 0.7.3. Prior to version 0.7.3, the FROM clause of a :func:`.select` was calculated upfront and as new columns were added; in 0.7.3 and later it's calculated at compile time, fixing an issue regarding late binding of columns to parent tables. This changes the behavior of :meth:`.Select.with_only_columns` in that FROM clauses no longer represented in the new list are dropped, but this behavior is more consistent in that the FROM clauses are consistently derived from the current columns clause. The original intent of this method is to allow trimming of the existing columns list to be fewer columns than originally present; the use case of replacing the columns list with an entirely different one hadn't been anticipated until 0.7.3 was released; the usage guidelines below illustrate how this should be done. This method is exactly equivalent to as if the original :func:`.select` had been called with the given columns clause. I.e. a statement:: s = select([table1.c.a, table1.c.b]) s = s.with_only_columns([table1.c.b]) should be exactly equivalent to:: s = select([table1.c.b]) This means that FROM clauses which are only derived from the column list will be discarded if the new column list no longer contains that FROM:: >>> table1 = table('t1', column('a'), column('b')) >>> table2 = table('t2', column('a'), column('b')) >>> s1 = select([table1.c.a, table2.c.b]) >>> print s1 SELECT t1.a, t2.b FROM t1, t2 >>> s2 = s1.with_only_columns([table2.c.b]) >>> print s2 SELECT t2.b FROM t1 The preferred way to maintain a specific FROM clause in the construct, assuming it won't be represented anywhere else (i.e. not in the WHERE clause, etc.) is to set it using :meth:`.Select.select_from`:: >>> s1 = select([table1.c.a, table2.c.b]).\\ ... select_from(table1.join(table2, ... table1.c.a==table2.c.a)) >>> s2 = s1.with_only_columns([table2.c.b]) >>> print s2 SELECT t2.b FROM t1 JOIN t2 ON t1.a=t2.a Care should also be taken to use the correct set of column objects passed to :meth:`.Select.with_only_columns`. Since the method is essentially equivalent to calling the :func:`.select` construct in the first place with the given columns, the columns passed to :meth:`.Select.with_only_columns` should usually be a subset of those which were passed to the :func:`.select` construct, not those which are available from the ``.c`` collection of that :func:`.select`. That is:: s = select([table1.c.a, table1.c.b]).select_from(table1) s = s.with_only_columns([table1.c.b]) and **not**:: # usually incorrect s = s.with_only_columns([s.c.b]) The latter would produce the SQL:: SELECT b FROM (SELECT t1.a AS a, t1.b AS b FROM t1), t1 Since the :func:`.select` construct is essentially being asked to select both from ``table1`` as well as itself. """ self._reset_exported() rc = [] for c in columns: c = _interpret_as_column_or_from(c) if isinstance(c, ScalarSelect): c = c.self_group(against=operators.comma_op) rc.append(c) self._raw_columns = rc @_generative def where(self, whereclause): """return a new select() construct with the given expression added to its WHERE clause, joined to the existing clause via AND, if any. """ self.append_whereclause(whereclause) @_generative def having(self, having): """return a new select() construct with the given expression added to its HAVING clause, joined to the existing clause via AND, if any. """ self.append_having(having) @_generative def distinct(self, *expr): """Return a new select() construct which will apply DISTINCT to its columns clause. :param \*expr: optional column expressions. When present, the Postgresql dialect will render a ``DISTINCT ON (<expressions>>)`` construct. """ if expr: expr = [_literal_as_label_reference(e) for e in expr] if isinstance(self._distinct, list): self._distinct = self._distinct + expr else: self._distinct = expr else: self._distinct = True @_generative def select_from(self, fromclause): """return a new :func:`.select` construct with the given FROM expression merged into its list of FROM objects. E.g.:: table1 = table('t1', column('a')) table2 = table('t2', column('b')) s = select([table1.c.a]).\\ select_from( table1.join(table2, table1.c.a==table2.c.b) ) The "from" list is a unique set on the identity of each element, so adding an already present :class:`.Table` or other selectable will have no effect. Passing a :class:`.Join` that refers to an already present :class:`.Table` or other selectable will have the effect of concealing the presence of that selectable as an individual element in the rendered FROM list, instead rendering it into a JOIN clause. While the typical purpose of :meth:`.Select.select_from` is to replace the default, derived FROM clause with a join, it can also be called with individual table elements, multiple times if desired, in the case that the FROM clause cannot be fully derived from the columns clause:: select([func.count('*')]).select_from(table1) """ self.append_from(fromclause) @_generative def correlate(self, *fromclauses): """return a new :class:`.Select` which will correlate the given FROM clauses to that of an enclosing :class:`.Select`. Calling this method turns off the :class:`.Select` object's default behavior of "auto-correlation". Normally, FROM elements which appear in a :class:`.Select` that encloses this one via its :term:`WHERE clause`, ORDER BY, HAVING or :term:`columns clause` will be omitted from this :class:`.Select` object's :term:`FROM clause`. Setting an explicit correlation collection using the :meth:`.Select.correlate` method provides a fixed list of FROM objects that can potentially take place in this process. When :meth:`.Select.correlate` is used to apply specific FROM clauses for correlation, the FROM elements become candidates for correlation regardless of how deeply nested this :class:`.Select` object is, relative to an enclosing :class:`.Select` which refers to the same FROM object. This is in contrast to the behavior of "auto-correlation" which only correlates to an immediate enclosing :class:`.Select`. Multi-level correlation ensures that the link between enclosed and enclosing :class:`.Select` is always via at least one WHERE/ORDER BY/HAVING/columns clause in order for correlation to take place. If ``None`` is passed, the :class:`.Select` object will correlate none of its FROM entries, and all will render unconditionally in the local FROM clause. :param \*fromclauses: a list of one or more :class:`.FromClause` constructs, or other compatible constructs (i.e. ORM-mapped classes) to become part of the correlate collection. .. versionchanged:: 0.8.0 ORM-mapped classes are accepted by :meth:`.Select.correlate`. .. versionchanged:: 0.8.0 The :meth:`.Select.correlate` method no longer unconditionally removes entries from the FROM clause; instead, the candidate FROM entries must also be matched by a FROM entry located in an enclosing :class:`.Select`, which ultimately encloses this one as present in the WHERE clause, ORDER BY clause, HAVING clause, or columns clause of an enclosing :meth:`.Select`. .. versionchanged:: 0.8.2 explicit correlation takes place via any level of nesting of :class:`.Select` objects; in previous 0.8 versions, correlation would only occur relative to the immediate enclosing :class:`.Select` construct. .. seealso:: :meth:`.Select.correlate_except` :ref:`correlated_subqueries` """ self._auto_correlate = False if fromclauses and fromclauses[0] is None: self._correlate = () else: self._correlate = set(self._correlate).union( _interpret_as_from(f) for f in fromclauses) @_generative def correlate_except(self, *fromclauses): """return a new :class:`.Select` which will omit the given FROM clauses from the auto-correlation process. Calling :meth:`.Select.correlate_except` turns off the :class:`.Select` object's default behavior of "auto-correlation" for the given FROM elements. An element specified here will unconditionally appear in the FROM list, while all other FROM elements remain subject to normal auto-correlation behaviors. .. versionchanged:: 0.8.2 The :meth:`.Select.correlate_except` method was improved to fully prevent FROM clauses specified here from being omitted from the immediate FROM clause of this :class:`.Select`. If ``None`` is passed, the :class:`.Select` object will correlate all of its FROM entries. .. versionchanged:: 0.8.2 calling ``correlate_except(None)`` will correctly auto-correlate all FROM clauses. :param \*fromclauses: a list of one or more :class:`.FromClause` constructs, or other compatible constructs (i.e. ORM-mapped classes) to become part of the correlate-exception collection. .. seealso:: :meth:`.Select.correlate` :ref:`correlated_subqueries` """ self._auto_correlate = False if fromclauses and fromclauses[0] is None: self._correlate_except = () else: self._correlate_except = set(self._correlate_except or ()).union( _interpret_as_from(f) for f in fromclauses) def append_correlation(self, fromclause): """append the given correlation expression to this select() construct. This is an **in-place** mutation method; the :meth:`~.Select.correlate` method is preferred, as it provides standard :term:`method chaining`. """ self._auto_correlate = False self._correlate = set(self._correlate).union( _interpret_as_from(f) for f in fromclause) def append_column(self, column): """append the given column expression to the columns clause of this select() construct. This is an **in-place** mutation method; the :meth:`~.Select.column` method is preferred, as it provides standard :term:`method chaining`. """ self._reset_exported() column = _interpret_as_column_or_from(column) if isinstance(column, ScalarSelect): column = column.self_group(against=operators.comma_op) self._raw_columns = self._raw_columns + [column] def append_prefix(self, clause): """append the given columns clause prefix expression to this select() construct. This is an **in-place** mutation method; the :meth:`~.Select.prefix_with` method is preferred, as it provides standard :term:`method chaining`. """ clause = _literal_as_text(clause) self._prefixes = self._prefixes + (clause,) def append_whereclause(self, whereclause): """append the given expression to this select() construct's WHERE criterion. The expression will be joined to existing WHERE criterion via AND. This is an **in-place** mutation method; the :meth:`~.Select.where` method is preferred, as it provides standard :term:`method chaining`. """ self._reset_exported() self._whereclause = and_( True_._ifnone(self._whereclause), whereclause) def append_having(self, having): """append the given expression to this select() construct's HAVING criterion. The expression will be joined to existing HAVING criterion via AND. This is an **in-place** mutation method; the :meth:`~.Select.having` method is preferred, as it provides standard :term:`method chaining`. """ self._reset_exported() self._having = and_(True_._ifnone(self._having), having) def append_from(self, fromclause): """append the given FromClause expression to this select() construct's FROM clause. This is an **in-place** mutation method; the :meth:`~.Select.select_from` method is preferred, as it provides standard :term:`method chaining`. """ self._reset_exported() fromclause = _interpret_as_from(fromclause) self._from_obj = self._from_obj.union([fromclause]) @_memoized_property def _columns_plus_names(self): if self.use_labels: names = set() def name_for_col(c): if c._label is None or not c._render_label_in_columns_clause: return (None, c) name = c._label if name in names: name = c.anon_label else: names.add(name) return name, c return [ name_for_col(c) for c in util.unique_list( _select_iterables(self._raw_columns)) ] else: return [ (None, c) for c in util.unique_list( _select_iterables(self._raw_columns)) ] def _populate_column_collection(self): for name, c in self._columns_plus_names: if not hasattr(c, '_make_proxy'): continue if name is None: key = None elif self.use_labels: key = c._key_label if key is not None and key in self.c: key = c.anon_label else: key = None c._make_proxy(self, key=key, name=name, name_is_truncatable=True) def _refresh_for_new_column(self, column): for fromclause in self._froms: col = fromclause._refresh_for_new_column(column) if col is not None: if col in self.inner_columns and self._cols_populated: our_label = col._key_label if self.use_labels else col.key if our_label not in self.c: return col._make_proxy( self, name=col._label if self.use_labels else None, key=col._key_label if self.use_labels else None, name_is_truncatable=True) return None return None def self_group(self, against=None): """return a 'grouping' construct as per the ClauseElement specification. This produces an element that can be embedded in an expression. Note that this method is called automatically as needed when constructing expressions and should not require explicit use. """ if isinstance(against, CompoundSelect): return self return FromGrouping(self) def union(self, other, **kwargs): """return a SQL UNION of this select() construct against the given selectable.""" return CompoundSelect._create_union(self, other, **kwargs) def union_all(self, other, **kwargs): """return a SQL UNION ALL of this select() construct against the given selectable. """ return CompoundSelect._create_union_all(self, other, **kwargs) def except_(self, other, **kwargs): """return a SQL EXCEPT of this select() construct against the given selectable.""" return CompoundSelect._create_except(self, other, **kwargs) def except_all(self, other, **kwargs): """return a SQL EXCEPT ALL of this select() construct against the given selectable. """ return CompoundSelect._create_except_all(self, other, **kwargs) def intersect(self, other, **kwargs): """return a SQL INTERSECT of this select() construct against the given selectable. """ return CompoundSelect._create_intersect(self, other, **kwargs) def intersect_all(self, other, **kwargs): """return a SQL INTERSECT ALL of this select() construct against the given selectable. """ return CompoundSelect._create_intersect_all(self, other, **kwargs) def bind(self): if self._bind: return self._bind froms = self._froms if not froms: for c in self._raw_columns: e = c.bind if e: self._bind = e return e else: e = list(froms)[0].bind if e: self._bind = e return e return None def _set_bind(self, bind): self._bind = bind bind = property(bind, _set_bind) class ScalarSelect(Generative, Grouping): _from_objects = [] _is_from_container = True def __init__(self, element): self.element = element self.type = element._scalar_type() @property def columns(self): raise exc.InvalidRequestError('Scalar Select expression has no ' 'columns; use this object directly ' 'within a column-level expression.') c = columns @_generative def where(self, crit): """Apply a WHERE clause to the SELECT statement referred to by this :class:`.ScalarSelect`. """ self.element = self.element.where(crit) def self_group(self, **kwargs): return self class Exists(UnaryExpression): """Represent an ``EXISTS`` clause. """ __visit_name__ = UnaryExpression.__visit_name__ _from_objects = [] def __init__(self, *args, **kwargs): """Construct a new :class:`.Exists` against an existing :class:`.Select` object. Calling styles are of the following forms:: # use on an existing select() s = select([table.c.col1]).where(table.c.col2==5) s = exists(s) # construct a select() at once exists(['*'], **select_arguments).where(criterion) # columns argument is optional, generates "EXISTS (SELECT *)" # by default. exists().where(table.c.col2==5) """ if args and isinstance(args[0], (SelectBase, ScalarSelect)): s = args[0] else: if not args: args = ([literal_column('*')],) s = Select(*args, **kwargs).as_scalar().self_group() UnaryExpression.__init__(self, s, operator=operators.exists, type_=type_api.BOOLEANTYPE, wraps_column_expression=True) def select(self, whereclause=None, **params): return Select([self], whereclause, **params) def correlate(self, *fromclause): e = self._clone() e.element = self.element.correlate(*fromclause).self_group() return e def correlate_except(self, *fromclause): e = self._clone() e.element = self.element.correlate_except(*fromclause).self_group() return e def select_from(self, clause): """return a new :class:`.Exists` construct, applying the given expression to the :meth:`.Select.select_from` method of the select statement contained. """ e = self._clone() e.element = self.element.select_from(clause).self_group() return e def where(self, clause): """return a new exists() construct with the given expression added to its WHERE clause, joined to the existing clause via AND, if any. """ e = self._clone() e.element = self.element.where(clause).self_group() return e class TextAsFrom(SelectBase): """Wrap a :class:`.TextClause` construct within a :class:`.SelectBase` interface. This allows the :class:`.TextClause` object to gain a ``.c`` collection and other FROM-like capabilities such as :meth:`.FromClause.alias`, :meth:`.SelectBase.cte`, etc. The :class:`.TextAsFrom` construct is produced via the :meth:`.TextClause.columns` method - see that method for details. .. versionadded:: 0.9.0 .. seealso:: :func:`.text` :meth:`.TextClause.columns` """ __visit_name__ = "text_as_from" _textual = True def __init__(self, text, columns): self.element = text self.column_args = columns @property def _bind(self): return self.element._bind @_generative def bindparams(self, *binds, **bind_as_values): self.element = self.element.bindparams(*binds, **bind_as_values) def _populate_column_collection(self): for c in self.column_args: c._make_proxy(self) def _copy_internals(self, clone=_clone, **kw): self._reset_exported() self.element = clone(self.element, **kw) def _scalar_type(self): return self.column_args[0].type class AnnotatedFromClause(Annotated): def __init__(self, element, values): # force FromClause to generate their internal # collections into __dict__ element.c Annotated.__init__(self, element, values)
gpl-2.0
insomnia-lab/calibre
src/calibre/devices/usbms/deviceconfig.py
5
5614
# -*- coding: utf-8 -*- __license__ = 'GPL 3' __copyright__ = '2009, John Schember <john@nachtimwald.com>' __docformat__ = 'restructuredtext en' from calibre.utils.config_base import Config, ConfigProxy class DeviceConfig(object): HELP_MESSAGE = _('Configure Device') #: Can be None, a string or a list of strings. When it is a string #: that string is used for the help text and the actual customization value #: can be read from ``dev.settings().extra_customization``. #: If it a list of strings, then dev.settings().extra_customization will #: also be a list. In this case, you *must* ensure that #: EXTRA_CUSTOMIZATION_DEFAULT is also a list. The list can contain either #: boolean values or strings, in which case a checkbox or line edit will be #: used for them in the config widget, automatically. #: If a string contains ::: then the text after it is interpreted as the #: tooltip EXTRA_CUSTOMIZATION_MESSAGE = None #: The default value for extra customization. If you set #: EXTRA_CUSTOMIZATION_MESSAGE you *must* set this as well. EXTRA_CUSTOMIZATION_DEFAULT = None SUPPORTS_SUB_DIRS = False SUPPORTS_SUB_DIRS_FOR_SCAN = False # This setting is used when scanning for # books when SUPPORTS_SUB_DIRS is False SUPPORTS_SUB_DIRS_DEFAULT = True MUST_READ_METADATA = False SUPPORTS_USE_AUTHOR_SORT = False #: If None the default is used SAVE_TEMPLATE = None #: If True the user can add new formats to the driver USER_CAN_ADD_NEW_FORMATS = True @classmethod def _default_save_template(cls): from calibre.library.save_to_disk import config return cls.SAVE_TEMPLATE if cls.SAVE_TEMPLATE else \ config().parse().send_template @classmethod def _config_base_name(cls): klass = cls if isinstance(cls, type) else cls.__class__ return klass.__name__ @classmethod def _config(cls): name = cls._config_base_name() c = Config('device_drivers_%s' % name, _('settings for device drivers')) c.add_opt('format_map', default=cls.FORMATS, help=_('Ordered list of formats the device will accept')) c.add_opt('use_subdirs', default=cls.SUPPORTS_SUB_DIRS_DEFAULT, help=_('Place files in sub directories if the device supports them')) c.add_opt('read_metadata', default=True, help=_('Read metadata from files on device')) c.add_opt('use_author_sort', default=False, help=_('Use author sort instead of author')) c.add_opt('save_template', default=cls._default_save_template(), help=_('Template to control how books are saved')) c.add_opt('extra_customization', default=cls.EXTRA_CUSTOMIZATION_DEFAULT, help=_('Extra customization')) return c @classmethod def _configProxy(cls): return ConfigProxy(cls._config()) @classmethod def config_widget(cls): from calibre.gui2.device_drivers.configwidget import ConfigWidget cw = ConfigWidget(cls.settings(), cls.FORMATS, cls.SUPPORTS_SUB_DIRS, cls.MUST_READ_METADATA, cls.SUPPORTS_USE_AUTHOR_SORT, cls.EXTRA_CUSTOMIZATION_MESSAGE, cls) return cw @classmethod def save_settings(cls, config_widget): proxy = cls._configProxy() proxy['format_map'] = config_widget.format_map() if cls.SUPPORTS_SUB_DIRS: proxy['use_subdirs'] = config_widget.use_subdirs() if not cls.MUST_READ_METADATA: proxy['read_metadata'] = config_widget.read_metadata() if cls.SUPPORTS_USE_AUTHOR_SORT: proxy['use_author_sort'] = config_widget.use_author_sort() if cls.EXTRA_CUSTOMIZATION_MESSAGE: if isinstance(cls.EXTRA_CUSTOMIZATION_MESSAGE, list): ec = [] for i in range(0, len(cls.EXTRA_CUSTOMIZATION_MESSAGE)): if config_widget.opt_extra_customization[i] is None: ec.append(None) continue if hasattr(config_widget.opt_extra_customization[i], 'isChecked'): ec.append(config_widget.opt_extra_customization[i].isChecked()) else: ec.append(unicode(config_widget.opt_extra_customization[i].text()).strip()) else: ec = unicode(config_widget.opt_extra_customization.text()).strip() if not ec: ec = None proxy['extra_customization'] = ec st = unicode(config_widget.opt_save_template.text()) proxy['save_template'] = st @classmethod def settings(cls): opts = cls._config().parse() if isinstance(cls.EXTRA_CUSTOMIZATION_DEFAULT, list): if opts.extra_customization is None: opts.extra_customization = [] if not isinstance(opts.extra_customization, list): opts.extra_customization = [opts.extra_customization] for i,d in enumerate(cls.EXTRA_CUSTOMIZATION_DEFAULT): if i >= len(opts.extra_customization): opts.extra_customization.append(d) return opts @classmethod def save_template(cls): st = cls.settings().save_template if st: return st else: return cls._default_save_template() @classmethod def customization_help(cls, gui=False): return cls.HELP_MESSAGE
gpl-3.0
clinton-hall/nzbToMedia
libs/common/unidecode/x084.py
252
4646
data = ( 'Hu ', # 0x00 'Qi ', # 0x01 'He ', # 0x02 'Cui ', # 0x03 'Tao ', # 0x04 'Chun ', # 0x05 'Bei ', # 0x06 'Chang ', # 0x07 'Huan ', # 0x08 'Fei ', # 0x09 'Lai ', # 0x0a 'Qi ', # 0x0b 'Meng ', # 0x0c 'Ping ', # 0x0d 'Wei ', # 0x0e 'Dan ', # 0x0f 'Sha ', # 0x10 'Huan ', # 0x11 'Yan ', # 0x12 'Yi ', # 0x13 'Tiao ', # 0x14 'Qi ', # 0x15 'Wan ', # 0x16 'Ce ', # 0x17 'Nai ', # 0x18 'Kutabireru ', # 0x19 'Tuo ', # 0x1a 'Jiu ', # 0x1b 'Tie ', # 0x1c 'Luo ', # 0x1d '[?] ', # 0x1e '[?] ', # 0x1f 'Meng ', # 0x20 '[?] ', # 0x21 'Yaji ', # 0x22 '[?] ', # 0x23 'Ying ', # 0x24 'Ying ', # 0x25 'Ying ', # 0x26 'Xiao ', # 0x27 'Sa ', # 0x28 'Qiu ', # 0x29 'Ke ', # 0x2a 'Xiang ', # 0x2b 'Wan ', # 0x2c 'Yu ', # 0x2d 'Yu ', # 0x2e 'Fu ', # 0x2f 'Lian ', # 0x30 'Xuan ', # 0x31 'Yuan ', # 0x32 'Nan ', # 0x33 'Ze ', # 0x34 'Wo ', # 0x35 'Chun ', # 0x36 'Xiao ', # 0x37 'Yu ', # 0x38 'Pian ', # 0x39 'Mao ', # 0x3a 'An ', # 0x3b 'E ', # 0x3c 'Luo ', # 0x3d 'Ying ', # 0x3e 'Huo ', # 0x3f 'Gua ', # 0x40 'Jiang ', # 0x41 'Mian ', # 0x42 'Zuo ', # 0x43 'Zuo ', # 0x44 'Ju ', # 0x45 'Bao ', # 0x46 'Rou ', # 0x47 'Xi ', # 0x48 'Xie ', # 0x49 'An ', # 0x4a 'Qu ', # 0x4b 'Jian ', # 0x4c 'Fu ', # 0x4d 'Lu ', # 0x4e 'Jing ', # 0x4f 'Pen ', # 0x50 'Feng ', # 0x51 'Hong ', # 0x52 'Hong ', # 0x53 'Hou ', # 0x54 'Yan ', # 0x55 'Tu ', # 0x56 'Zhu ', # 0x57 'Zi ', # 0x58 'Xiang ', # 0x59 'Shen ', # 0x5a 'Ge ', # 0x5b 'Jie ', # 0x5c 'Jing ', # 0x5d 'Mi ', # 0x5e 'Huang ', # 0x5f 'Shen ', # 0x60 'Pu ', # 0x61 'Gai ', # 0x62 'Dong ', # 0x63 'Zhou ', # 0x64 'Qian ', # 0x65 'Wei ', # 0x66 'Bo ', # 0x67 'Wei ', # 0x68 'Pa ', # 0x69 'Ji ', # 0x6a 'Hu ', # 0x6b 'Zang ', # 0x6c 'Jia ', # 0x6d 'Duan ', # 0x6e 'Yao ', # 0x6f 'Jun ', # 0x70 'Cong ', # 0x71 'Quan ', # 0x72 'Wei ', # 0x73 'Xian ', # 0x74 'Kui ', # 0x75 'Ting ', # 0x76 'Hun ', # 0x77 'Xi ', # 0x78 'Shi ', # 0x79 'Qi ', # 0x7a 'Lan ', # 0x7b 'Zong ', # 0x7c 'Yao ', # 0x7d 'Yuan ', # 0x7e 'Mei ', # 0x7f 'Yun ', # 0x80 'Shu ', # 0x81 'Di ', # 0x82 'Zhuan ', # 0x83 'Guan ', # 0x84 'Sukumo ', # 0x85 'Xue ', # 0x86 'Chan ', # 0x87 'Kai ', # 0x88 'Kui ', # 0x89 '[?] ', # 0x8a 'Jiang ', # 0x8b 'Lou ', # 0x8c 'Wei ', # 0x8d 'Pai ', # 0x8e '[?] ', # 0x8f 'Sou ', # 0x90 'Yin ', # 0x91 'Shi ', # 0x92 'Chun ', # 0x93 'Shi ', # 0x94 'Yun ', # 0x95 'Zhen ', # 0x96 'Lang ', # 0x97 'Nu ', # 0x98 'Meng ', # 0x99 'He ', # 0x9a 'Que ', # 0x9b 'Suan ', # 0x9c 'Yuan ', # 0x9d 'Li ', # 0x9e 'Ju ', # 0x9f 'Xi ', # 0xa0 'Pang ', # 0xa1 'Chu ', # 0xa2 'Xu ', # 0xa3 'Tu ', # 0xa4 'Liu ', # 0xa5 'Wo ', # 0xa6 'Zhen ', # 0xa7 'Qian ', # 0xa8 'Zu ', # 0xa9 'Po ', # 0xaa 'Cuo ', # 0xab 'Yuan ', # 0xac 'Chu ', # 0xad 'Yu ', # 0xae 'Kuai ', # 0xaf 'Pan ', # 0xb0 'Pu ', # 0xb1 'Pu ', # 0xb2 'Na ', # 0xb3 'Shuo ', # 0xb4 'Xi ', # 0xb5 'Fen ', # 0xb6 'Yun ', # 0xb7 'Zheng ', # 0xb8 'Jian ', # 0xb9 'Ji ', # 0xba 'Ruo ', # 0xbb 'Cang ', # 0xbc 'En ', # 0xbd 'Mi ', # 0xbe 'Hao ', # 0xbf 'Sun ', # 0xc0 'Zhen ', # 0xc1 'Ming ', # 0xc2 'Sou ', # 0xc3 'Xu ', # 0xc4 'Liu ', # 0xc5 'Xi ', # 0xc6 'Gu ', # 0xc7 'Lang ', # 0xc8 'Rong ', # 0xc9 'Weng ', # 0xca 'Gai ', # 0xcb 'Cuo ', # 0xcc 'Shi ', # 0xcd 'Tang ', # 0xce 'Luo ', # 0xcf 'Ru ', # 0xd0 'Suo ', # 0xd1 'Xian ', # 0xd2 'Bei ', # 0xd3 'Yao ', # 0xd4 'Gui ', # 0xd5 'Bi ', # 0xd6 'Zong ', # 0xd7 'Gun ', # 0xd8 'Za ', # 0xd9 'Xiu ', # 0xda 'Ce ', # 0xdb 'Hai ', # 0xdc 'Lan ', # 0xdd '[?] ', # 0xde 'Ji ', # 0xdf 'Li ', # 0xe0 'Can ', # 0xe1 'Lang ', # 0xe2 'Yu ', # 0xe3 '[?] ', # 0xe4 'Ying ', # 0xe5 'Mo ', # 0xe6 'Diao ', # 0xe7 'Tiao ', # 0xe8 'Mao ', # 0xe9 'Tong ', # 0xea 'Zhu ', # 0xeb 'Peng ', # 0xec 'An ', # 0xed 'Lian ', # 0xee 'Cong ', # 0xef 'Xi ', # 0xf0 'Ping ', # 0xf1 'Qiu ', # 0xf2 'Jin ', # 0xf3 'Chun ', # 0xf4 'Jie ', # 0xf5 'Wei ', # 0xf6 'Tui ', # 0xf7 'Cao ', # 0xf8 'Yu ', # 0xf9 'Yi ', # 0xfa 'Ji ', # 0xfb 'Liao ', # 0xfc 'Bi ', # 0xfd 'Lu ', # 0xfe 'Su ', # 0xff )
gpl-3.0
n0m4dz/odoo
addons/resource/faces/timescale.py
263
3899
############################################################################ # Copyright (C) 2005 by Reithinger GmbH # mreithinger@web.de # # This file is part of faces. # # faces is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # faces is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the # Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ############################################################################ import faces.pcalendar as pcal import openerp.tools as tools import datetime import sys class TimeScale(object): def __init__(self, calendar): self.data_calendar = calendar self._create_chart_calendar() self.now = self.to_num(self.data_calendar.now) def to_datetime(self, xval): return xval.to_datetime() def to_num(self, date): return self.chart_calendar.WorkingDate(date) def is_free_slot(self, value): dt1 = self.chart_calendar.to_starttime(value) dt2 = self.data_calendar.to_starttime\ (self.data_calendar.from_datetime(dt1)) return dt1 != dt2 def is_free_day(self, value): dt1 = self.chart_calendar.to_starttime(value) dt2 = self.data_calendar.to_starttime\ (self.data_calendar.from_datetime(dt1)) return dt1.date() != dt2.date() def _create_chart_calendar(self): dcal = self.data_calendar ccal = self.chart_calendar = pcal.Calendar() ccal.minimum_time_unit = 1 #pad worktime slots of calendar (all days should be equally long) slot_sum = lambda slots: sum(map(lambda slot: slot[1] - slot[0], slots)) day_sum = lambda day: slot_sum(dcal.get_working_times(day)) max_work_time = max(map(day_sum, range(7))) #working_time should have 2/3 sum_time = 3 * max_work_time / 2 #now create timeslots for ccal def create_time_slots(day): src_slots = dcal.get_working_times(day) slots = [0, src_slots, 24*60] slots = tuple(tools.flatten(slots)) slots = zip(slots[:-1], slots[1:]) #balance non working slots work_time = slot_sum(src_slots) non_work_time = sum_time - work_time non_slots = filter(lambda s: s not in src_slots, slots) non_slots = map(lambda s: (s[1] - s[0], s), non_slots) non_slots.sort() slots = [] i = 0 for l, s in non_slots: delta = non_work_time / (len(non_slots) - i) delta = min(l, delta) non_work_time -= delta slots.append((s[0], s[0] + delta)) i += 1 slots.extend(src_slots) slots.sort() return slots min_delta = sys.maxint for i in range(7): slots = create_time_slots(i) ccal.working_times[i] = slots min_delta = min(min_delta, min(map(lambda s: s[1] - s[0], slots))) ccal._recalc_working_time() self.slot_delta = min_delta self.day_delta = sum_time self.week_delta = ccal.week_time _default_scale = TimeScale(pcal._default_calendar) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
refreshoxford/django-cbv-inspector
cbv/management/commands/populate_cbv.py
1
13731
import importlib import inspect import sys import django from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.core.management.base import BaseCommand from django.utils.functional import Promise from blessings import Terminal from cbv.models import Project, ProjectVersion, Module, Klass, Inheritance, KlassAttribute, ModuleAttribute, Method t = Terminal() class LazyAttribute(object): functions = { 'gettext': 'gettext_lazy', 'reverse': 'reverse_lazy', 'ugettext': 'ugettext_lazy', } def __init__(self, promise): func, self.args, self.kwargs, _ = promise.__reduce__()[1] try: self.lazy_func = self.functions[func.__name__] except KeyError: msg = f"'{func.__name__}' not in known lazily called functions" raise ImproperlyConfigured(msg) def __repr__(self): arguments = [] for arg in self.args: if isinstance(arg, str): arguments.append(f"'{arg}'") else: arguments.append(arg) for key, value in self.kwargs: if isinstance(key, str): key = f"'{key}'" if isinstance(value, str): value = f"'{value}'" arguments.append(f"{key}: {value}") func = self.lazy_func arguments = ', '.join(arguments) return f'{func}({arguments})' class Command(BaseCommand): args = '' help = 'Wipes and populates the CBV inspection models.' banned_attr_names = ( '__all__', '__builtins__', '__class__', '__dict__', '__doc__', '__file__', '__module__', '__name__', '__package__', '__path__', '__spec__', '__weakref__', ) def handle(self, *args, **options): # Delete ALL of the things. ProjectVersion.objects.filter( project__name__iexact='Django', version_number=django.get_version(), ).delete() Inheritance.objects.filter( parent__module__project_version__project__name__iexact='Django', parent__module__project_version__version_number=django.get_version(), ).delete() # Setup Project self.project_version = ProjectVersion.objects.create( project=Project.objects.get_or_create(name='Django')[0], version_number=django.get_version(), ) self.klasses = {} self.attributes = {} self.klass_imports = {} # Set sources appropriate to this version self.sources = [] for source in settings.CBV_SOURCES.keys(): try: self.sources.append(importlib.import_module(source)) except ImportError: pass print(t.red('Tree traversal')) for source in self.sources: self.process_member(source, source.__name__) self.create_inheritance() self.create_attributes() def ok_to_add_module(self, member, parent): if member.__package__ is None or not any((member.__name__.startswith(source.__name__) for source in self.sources)): return False return True def ok_to_add_klass(self, member, parent): if any((member.__name__.startswith(source.__name__) for source in self.sources)): # TODO: why? return False try: if inspect.getsourcefile(member) != inspect.getsourcefile(parent): if parent.__name__ in member.__module__: self.add_new_import_path(member, parent) return False except TypeError: return False return True def ok_to_add_method(self, member, parent): if inspect.getsourcefile(member) != inspect.getsourcefile(parent): return False if not inspect.isclass(parent): msg = 'def {}(...): IGNORED because {} is not a class.'.format( member.__name__, parent.__name__, ) print(t.red(msg)) return False # Use line inspection to work out whether the method is defined on this # klass. Possibly not the best way, but I can't think of another atm. lines, start_line = inspect.getsourcelines(member) parent_lines, parent_start_line = inspect.getsourcelines(parent) if start_line < parent_start_line or start_line > parent_start_line + len(parent_lines): return False return True def ok_to_add_attribute(self, member, member_name, parent): if inspect.isclass(parent) and member in object.__dict__.values(): return False if member_name in self.banned_attr_names: return False return True ok_to_add_klass_attribute = ok_to_add_module_attribute = ok_to_add_attribute def get_code(self, member): # Strip unneeded whitespace from beginning of code lines lines, start_line = inspect.getsourcelines(member) whitespace = len(lines[0]) - len(lines[0].lstrip()) for i, line in enumerate(lines): lines[i] = line[whitespace:] # Join code lines into one string code = ''.join(lines) # Get the method arguments arguments = inspect.formatargspec(*inspect.getfullargspec(member)) return code, arguments, start_line def get_docstring(self, member): return inspect.getdoc(member) or '' def get_value(self, member): return f"'{member}'" if isinstance(member, str) else str(member) def get_filename(self, member): # Get full file name filename = inspect.getfile(member) # Find the system path it's in sys_folder = max([p for p in sys.path if p in filename], key=len) # Get the part of the file name after the folder on the system path. filename = filename[len(sys_folder):] # Replace `.pyc` file extensions with `.py` if filename[-4:] == '.pyc': filename = filename[:-1] return filename def get_line_number(self, member): try: return inspect.getsourcelines(member)[1] except TypeError: return -1 def add_new_import_path(self, member, parent): import_path = parent.__name__ try: current_import_path = self.klass_imports[member] except KeyError: self.klass_imports[member] = parent.__name__ else: self.update_shortest_import_path(member, current_import_path, import_path) try: existing_member = Klass.objects.get( module__project_version__project__name__iexact='Django', module__project_version__version_number=django.get_version(), name=member.__name__) except Klass.DoesNotExist: return if self.update_shortest_import_path(member, existing_member.import_path, import_path): existing_member.import_path = import_path existing_member.save() def update_shortest_import_path(self, member, current_import_path, new_import_path): new_length = len(new_import_path.split('.')) current_length = len(current_import_path.split('.')) if new_length < current_length: self.klass_imports[member] = new_import_path return True return False def process_member(self, member, member_name, parent=None, parent_node=None): # BUILTIN if inspect.isbuiltin(member): return # MODULE if inspect.ismodule(member): # Only traverse under hierarchy if not self.ok_to_add_module(member, parent): return filename = self.get_filename(member) print(t.yellow('module ' + member.__name__), filename) # Create Module object this_node = Module.objects.create( project_version=self.project_version, name=member.__name__, docstring=self.get_docstring(member), filename=filename ) go_deeper = True # CLASS elif inspect.isclass(member) and inspect.ismodule(parent): if not self.ok_to_add_klass(member, parent): return self.add_new_import_path(member, parent) import_path = self.klass_imports[member] start_line = self.get_line_number(member) print(t.green('class ' + member_name), start_line) this_node = Klass.objects.create( module=parent_node, name=member_name, docstring=self.get_docstring(member), line_number=start_line, import_path=import_path ) self.klasses[member] = this_node go_deeper = True # METHOD elif inspect.ismethod(member) or inspect.isfunction(member): decorated = False # py2 decoration if hasattr(member, 'func'): member = member.func decorated = True if hasattr(member, 'im_func') and getattr(member.im_func, 'func_closure', None): member = member.im_func decorated = True while getattr(member, 'func_closure', None): member = member.func_closure[-1].cell_contents decorated = True # py3 decoration while getattr(member, '__wrapped__', None): member = member.__wrapped__ decorated = True # Checks if not self.ok_to_add_method(member, parent): return print(' def ' + member_name) code, arguments, start_line = self.get_code(member) # Make the Method this_node = Method.objects.create( klass=parent_node, name=member_name, docstring=self.get_docstring(member), code=code, kwargs=arguments[1:-1], line_number=start_line, ) go_deeper = False # (Class) ATTRIBUTE elif inspect.isclass(parent): # Replace lazy function call with an object representing it if isinstance(member, Promise): member = LazyAttribute(member) if not self.ok_to_add_klass_attribute(member, member_name, parent): return value = self.get_value(member) attr = (member_name, value) start_line = self.get_line_number(member) try: self.attributes[attr] += [(parent_node, start_line)] except KeyError: self.attributes[attr] = [(parent_node, start_line)] print(' {key} = {val}'.format(key=attr[0], val=attr[1])) go_deeper = False # (Module) ATTRIBUTE elif inspect.ismodule(parent): if not self.ok_to_add_module_attribute(member, member_name, parent): return start_line = self.get_line_number(member) this_node = ModuleAttribute.objects.create( module=parent_node, name=member_name, value=self.get_value(member), line_number=start_line, ) print('{key} = {val}'.format(key=this_node.name, val=this_node.value)) go_deeper = False # INSPECTION. We have to go deeper ;) if go_deeper: # Go through members for submember_name, submember_type in inspect.getmembers(member): self.process_member( member=submember_type, member_name=submember_name, parent=member, parent_node=this_node ) def create_inheritance(self): print('') print(t.red('Inheritance')) for klass, representation in self.klasses.items(): print('') print(t.green(representation.__str__()), end=' ') direct_ancestors = inspect.getclasstree([klass])[-1][0][1] for i, ancestor in enumerate(direct_ancestors): if ancestor in self.klasses: print('.', end=' ') Inheritance.objects.create( parent=self.klasses[ancestor], child=representation, order=i ) print('') def create_attributes(self): print('') print(t.red('Attributes')) # Go over each name/value pair to create KlassAttributes for name_and_value, klasses in self.attributes.items(): # Find all the descendants of each Klass. descendants = set() for klass, start_line in klasses: for child in klass.get_all_children(): descendants.add(child) # By removing descendants from klasses, we leave behind the # klass(s) where the value was defined. remaining_klasses = [k_and_l for k_and_l in klasses if k_and_l[0] not in descendants] # Now we can create the KlassAttributes name, value = name_and_value for klass, line in remaining_klasses: KlassAttribute.objects.create( klass=klass, line_number=line, name=name, value=value ) print(f'{klass}: {name} = {value}')
bsd-2-clause
dmilith/SublimeText3-dmilith
Package Storage/lsp_utils/node-runtime/12.20.2/node/lib/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/common_test.py
8
1966
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Unit tests for the common.py file.""" import gyp.common import unittest import sys class TestTopologicallySorted(unittest.TestCase): def test_Valid(self): """Test that sorting works on a valid graph with one possible order.""" graph = { 'a': ['b', 'c'], 'b': [], 'c': ['d'], 'd': ['b'], } def GetEdge(node): return tuple(graph[node]) self.assertEqual( gyp.common.TopologicallySorted(graph.keys(), GetEdge), ['a', 'c', 'd', 'b']) def test_Cycle(self): """Test that an exception is thrown on a cyclic graph.""" graph = { 'a': ['b'], 'b': ['c'], 'c': ['d'], 'd': ['a'], } def GetEdge(node): return tuple(graph[node]) self.assertRaises( gyp.common.CycleError, gyp.common.TopologicallySorted, graph.keys(), GetEdge) class TestGetFlavor(unittest.TestCase): """Test that gyp.common.GetFlavor works as intended""" original_platform = '' def setUp(self): self.original_platform = sys.platform def tearDown(self): sys.platform = self.original_platform def assertFlavor(self, expected, argument, param): sys.platform = argument self.assertEqual(expected, gyp.common.GetFlavor(param)) def test_platform_default(self): self.assertFlavor('freebsd', 'freebsd9' , {}) self.assertFlavor('freebsd', 'freebsd10', {}) self.assertFlavor('openbsd', 'openbsd5' , {}) self.assertFlavor('solaris', 'sunos5' , {}) self.assertFlavor('solaris', 'sunos' , {}) self.assertFlavor('linux' , 'linux2' , {}) self.assertFlavor('linux' , 'linux3' , {}) def test_param(self): self.assertFlavor('foobar', 'linux2' , {'flavor': 'foobar'}) if __name__ == '__main__': unittest.main()
mit
MrLoick/python-for-android
python3-alpha/python3-src/Lib/encodings/cp500.py
266
13121
""" Python Character Mapping Codec cp500 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP500.TXT' with gencodec.py. """#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_table) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_table)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='cp500', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Table decoding_table = ( '\x00' # 0x00 -> NULL '\x01' # 0x01 -> START OF HEADING '\x02' # 0x02 -> START OF TEXT '\x03' # 0x03 -> END OF TEXT '\x9c' # 0x04 -> CONTROL '\t' # 0x05 -> HORIZONTAL TABULATION '\x86' # 0x06 -> CONTROL '\x7f' # 0x07 -> DELETE '\x97' # 0x08 -> CONTROL '\x8d' # 0x09 -> CONTROL '\x8e' # 0x0A -> CONTROL '\x0b' # 0x0B -> VERTICAL TABULATION '\x0c' # 0x0C -> FORM FEED '\r' # 0x0D -> CARRIAGE RETURN '\x0e' # 0x0E -> SHIFT OUT '\x0f' # 0x0F -> SHIFT IN '\x10' # 0x10 -> DATA LINK ESCAPE '\x11' # 0x11 -> DEVICE CONTROL ONE '\x12' # 0x12 -> DEVICE CONTROL TWO '\x13' # 0x13 -> DEVICE CONTROL THREE '\x9d' # 0x14 -> CONTROL '\x85' # 0x15 -> CONTROL '\x08' # 0x16 -> BACKSPACE '\x87' # 0x17 -> CONTROL '\x18' # 0x18 -> CANCEL '\x19' # 0x19 -> END OF MEDIUM '\x92' # 0x1A -> CONTROL '\x8f' # 0x1B -> CONTROL '\x1c' # 0x1C -> FILE SEPARATOR '\x1d' # 0x1D -> GROUP SEPARATOR '\x1e' # 0x1E -> RECORD SEPARATOR '\x1f' # 0x1F -> UNIT SEPARATOR '\x80' # 0x20 -> CONTROL '\x81' # 0x21 -> CONTROL '\x82' # 0x22 -> CONTROL '\x83' # 0x23 -> CONTROL '\x84' # 0x24 -> CONTROL '\n' # 0x25 -> LINE FEED '\x17' # 0x26 -> END OF TRANSMISSION BLOCK '\x1b' # 0x27 -> ESCAPE '\x88' # 0x28 -> CONTROL '\x89' # 0x29 -> CONTROL '\x8a' # 0x2A -> CONTROL '\x8b' # 0x2B -> CONTROL '\x8c' # 0x2C -> CONTROL '\x05' # 0x2D -> ENQUIRY '\x06' # 0x2E -> ACKNOWLEDGE '\x07' # 0x2F -> BELL '\x90' # 0x30 -> CONTROL '\x91' # 0x31 -> CONTROL '\x16' # 0x32 -> SYNCHRONOUS IDLE '\x93' # 0x33 -> CONTROL '\x94' # 0x34 -> CONTROL '\x95' # 0x35 -> CONTROL '\x96' # 0x36 -> CONTROL '\x04' # 0x37 -> END OF TRANSMISSION '\x98' # 0x38 -> CONTROL '\x99' # 0x39 -> CONTROL '\x9a' # 0x3A -> CONTROL '\x9b' # 0x3B -> CONTROL '\x14' # 0x3C -> DEVICE CONTROL FOUR '\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE '\x9e' # 0x3E -> CONTROL '\x1a' # 0x3F -> SUBSTITUTE ' ' # 0x40 -> SPACE '\xa0' # 0x41 -> NO-BREAK SPACE '\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX '\xe4' # 0x43 -> LATIN SMALL LETTER A WITH DIAERESIS '\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE '\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE '\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE '\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE '\xe7' # 0x48 -> LATIN SMALL LETTER C WITH CEDILLA '\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE '[' # 0x4A -> LEFT SQUARE BRACKET '.' # 0x4B -> FULL STOP '<' # 0x4C -> LESS-THAN SIGN '(' # 0x4D -> LEFT PARENTHESIS '+' # 0x4E -> PLUS SIGN '!' # 0x4F -> EXCLAMATION MARK '&' # 0x50 -> AMPERSAND '\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE '\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX '\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS '\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE '\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE '\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX '\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS '\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE '\xdf' # 0x59 -> LATIN SMALL LETTER SHARP S (GERMAN) ']' # 0x5A -> RIGHT SQUARE BRACKET '$' # 0x5B -> DOLLAR SIGN '*' # 0x5C -> ASTERISK ')' # 0x5D -> RIGHT PARENTHESIS ';' # 0x5E -> SEMICOLON '^' # 0x5F -> CIRCUMFLEX ACCENT '-' # 0x60 -> HYPHEN-MINUS '/' # 0x61 -> SOLIDUS '\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX '\xc4' # 0x63 -> LATIN CAPITAL LETTER A WITH DIAERESIS '\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE '\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE '\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE '\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE '\xc7' # 0x68 -> LATIN CAPITAL LETTER C WITH CEDILLA '\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE '\xa6' # 0x6A -> BROKEN BAR ',' # 0x6B -> COMMA '%' # 0x6C -> PERCENT SIGN '_' # 0x6D -> LOW LINE '>' # 0x6E -> GREATER-THAN SIGN '?' # 0x6F -> QUESTION MARK '\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE '\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE '\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX '\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS '\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE '\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE '\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX '\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS '\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE '`' # 0x79 -> GRAVE ACCENT ':' # 0x7A -> COLON '#' # 0x7B -> NUMBER SIGN '@' # 0x7C -> COMMERCIAL AT "'" # 0x7D -> APOSTROPHE '=' # 0x7E -> EQUALS SIGN '"' # 0x7F -> QUOTATION MARK '\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE 'a' # 0x81 -> LATIN SMALL LETTER A 'b' # 0x82 -> LATIN SMALL LETTER B 'c' # 0x83 -> LATIN SMALL LETTER C 'd' # 0x84 -> LATIN SMALL LETTER D 'e' # 0x85 -> LATIN SMALL LETTER E 'f' # 0x86 -> LATIN SMALL LETTER F 'g' # 0x87 -> LATIN SMALL LETTER G 'h' # 0x88 -> LATIN SMALL LETTER H 'i' # 0x89 -> LATIN SMALL LETTER I '\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK '\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK '\xf0' # 0x8C -> LATIN SMALL LETTER ETH (ICELANDIC) '\xfd' # 0x8D -> LATIN SMALL LETTER Y WITH ACUTE '\xfe' # 0x8E -> LATIN SMALL LETTER THORN (ICELANDIC) '\xb1' # 0x8F -> PLUS-MINUS SIGN '\xb0' # 0x90 -> DEGREE SIGN 'j' # 0x91 -> LATIN SMALL LETTER J 'k' # 0x92 -> LATIN SMALL LETTER K 'l' # 0x93 -> LATIN SMALL LETTER L 'm' # 0x94 -> LATIN SMALL LETTER M 'n' # 0x95 -> LATIN SMALL LETTER N 'o' # 0x96 -> LATIN SMALL LETTER O 'p' # 0x97 -> LATIN SMALL LETTER P 'q' # 0x98 -> LATIN SMALL LETTER Q 'r' # 0x99 -> LATIN SMALL LETTER R '\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR '\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR '\xe6' # 0x9C -> LATIN SMALL LIGATURE AE '\xb8' # 0x9D -> CEDILLA '\xc6' # 0x9E -> LATIN CAPITAL LIGATURE AE '\xa4' # 0x9F -> CURRENCY SIGN '\xb5' # 0xA0 -> MICRO SIGN '~' # 0xA1 -> TILDE 's' # 0xA2 -> LATIN SMALL LETTER S 't' # 0xA3 -> LATIN SMALL LETTER T 'u' # 0xA4 -> LATIN SMALL LETTER U 'v' # 0xA5 -> LATIN SMALL LETTER V 'w' # 0xA6 -> LATIN SMALL LETTER W 'x' # 0xA7 -> LATIN SMALL LETTER X 'y' # 0xA8 -> LATIN SMALL LETTER Y 'z' # 0xA9 -> LATIN SMALL LETTER Z '\xa1' # 0xAA -> INVERTED EXCLAMATION MARK '\xbf' # 0xAB -> INVERTED QUESTION MARK '\xd0' # 0xAC -> LATIN CAPITAL LETTER ETH (ICELANDIC) '\xdd' # 0xAD -> LATIN CAPITAL LETTER Y WITH ACUTE '\xde' # 0xAE -> LATIN CAPITAL LETTER THORN (ICELANDIC) '\xae' # 0xAF -> REGISTERED SIGN '\xa2' # 0xB0 -> CENT SIGN '\xa3' # 0xB1 -> POUND SIGN '\xa5' # 0xB2 -> YEN SIGN '\xb7' # 0xB3 -> MIDDLE DOT '\xa9' # 0xB4 -> COPYRIGHT SIGN '\xa7' # 0xB5 -> SECTION SIGN '\xb6' # 0xB6 -> PILCROW SIGN '\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER '\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF '\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS '\xac' # 0xBA -> NOT SIGN '|' # 0xBB -> VERTICAL LINE '\xaf' # 0xBC -> MACRON '\xa8' # 0xBD -> DIAERESIS '\xb4' # 0xBE -> ACUTE ACCENT '\xd7' # 0xBF -> MULTIPLICATION SIGN '{' # 0xC0 -> LEFT CURLY BRACKET 'A' # 0xC1 -> LATIN CAPITAL LETTER A 'B' # 0xC2 -> LATIN CAPITAL LETTER B 'C' # 0xC3 -> LATIN CAPITAL LETTER C 'D' # 0xC4 -> LATIN CAPITAL LETTER D 'E' # 0xC5 -> LATIN CAPITAL LETTER E 'F' # 0xC6 -> LATIN CAPITAL LETTER F 'G' # 0xC7 -> LATIN CAPITAL LETTER G 'H' # 0xC8 -> LATIN CAPITAL LETTER H 'I' # 0xC9 -> LATIN CAPITAL LETTER I '\xad' # 0xCA -> SOFT HYPHEN '\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX '\xf6' # 0xCC -> LATIN SMALL LETTER O WITH DIAERESIS '\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE '\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE '\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE '}' # 0xD0 -> RIGHT CURLY BRACKET 'J' # 0xD1 -> LATIN CAPITAL LETTER J 'K' # 0xD2 -> LATIN CAPITAL LETTER K 'L' # 0xD3 -> LATIN CAPITAL LETTER L 'M' # 0xD4 -> LATIN CAPITAL LETTER M 'N' # 0xD5 -> LATIN CAPITAL LETTER N 'O' # 0xD6 -> LATIN CAPITAL LETTER O 'P' # 0xD7 -> LATIN CAPITAL LETTER P 'Q' # 0xD8 -> LATIN CAPITAL LETTER Q 'R' # 0xD9 -> LATIN CAPITAL LETTER R '\xb9' # 0xDA -> SUPERSCRIPT ONE '\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX '\xfc' # 0xDC -> LATIN SMALL LETTER U WITH DIAERESIS '\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE '\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE '\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS '\\' # 0xE0 -> REVERSE SOLIDUS '\xf7' # 0xE1 -> DIVISION SIGN 'S' # 0xE2 -> LATIN CAPITAL LETTER S 'T' # 0xE3 -> LATIN CAPITAL LETTER T 'U' # 0xE4 -> LATIN CAPITAL LETTER U 'V' # 0xE5 -> LATIN CAPITAL LETTER V 'W' # 0xE6 -> LATIN CAPITAL LETTER W 'X' # 0xE7 -> LATIN CAPITAL LETTER X 'Y' # 0xE8 -> LATIN CAPITAL LETTER Y 'Z' # 0xE9 -> LATIN CAPITAL LETTER Z '\xb2' # 0xEA -> SUPERSCRIPT TWO '\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX '\xd6' # 0xEC -> LATIN CAPITAL LETTER O WITH DIAERESIS '\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE '\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE '\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE '0' # 0xF0 -> DIGIT ZERO '1' # 0xF1 -> DIGIT ONE '2' # 0xF2 -> DIGIT TWO '3' # 0xF3 -> DIGIT THREE '4' # 0xF4 -> DIGIT FOUR '5' # 0xF5 -> DIGIT FIVE '6' # 0xF6 -> DIGIT SIX '7' # 0xF7 -> DIGIT SEVEN '8' # 0xF8 -> DIGIT EIGHT '9' # 0xF9 -> DIGIT NINE '\xb3' # 0xFA -> SUPERSCRIPT THREE '\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX '\xdc' # 0xFC -> LATIN CAPITAL LETTER U WITH DIAERESIS '\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE '\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE '\x9f' # 0xFF -> CONTROL ) ### Encoding table encoding_table=codecs.charmap_build(decoding_table)
apache-2.0
shsingh/ansible
lib/ansible/modules/cloud/rackspace/rax_mon_notification.py
77
5180
#!/usr/bin/python # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: rax_mon_notification short_description: Create or delete a Rackspace Cloud Monitoring notification. description: - Create or delete a Rackspace Cloud Monitoring notification that specifies a channel that can be used to communicate alarms, such as email, webhooks, or PagerDuty. Rackspace monitoring module flow | rax_mon_entity -> rax_mon_check -> *rax_mon_notification* -> rax_mon_notification_plan -> rax_mon_alarm version_added: "2.0" options: state: description: - Ensure that the notification with this C(label) exists or does not exist. choices: ['present', 'absent'] label: description: - Defines a friendly name for this notification. String between 1 and 255 characters long. required: true notification_type: description: - A supported notification type. choices: ["webhook", "email", "pagerduty"] required: true details: description: - Dictionary of key-value pairs used to initialize the notification. Required keys and meanings vary with notification type. See http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/ service-notification-types-crud.html for details. required: true author: Ash Wilson (@smashwilson) extends_documentation_fragment: rackspace.openstack ''' EXAMPLES = ''' - name: Monitoring notification example gather_facts: False hosts: local connection: local tasks: - name: Email me when something goes wrong. rax_mon_entity: credentials: ~/.rax_pub label: omg type: email details: address: me@mailhost.com register: the_notification ''' try: import pyrax HAS_PYRAX = True except ImportError: HAS_PYRAX = False from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module def notification(module, state, label, notification_type, details): if len(label) < 1 or len(label) > 255: module.fail_json(msg='label must be between 1 and 255 characters long') changed = False notification = None cm = pyrax.cloud_monitoring if not cm: module.fail_json(msg='Failed to instantiate client. This typically ' 'indicates an invalid region or an incorrectly ' 'capitalized region name.') existing = [] for n in cm.list_notifications(): if n.label == label: existing.append(n) if existing: notification = existing[0] if state == 'present': should_update = False should_delete = False should_create = False if len(existing) > 1: module.fail_json(msg='%s existing notifications are labelled %s.' % (len(existing), label)) if notification: should_delete = (notification_type != notification.type) should_update = (details != notification.details) if should_update and not should_delete: notification.update(details=notification.details) changed = True if should_delete: notification.delete() else: should_create = True if should_create: notification = cm.create_notification(notification_type, label=label, details=details) changed = True else: for n in existing: n.delete() changed = True if notification: notification_dict = { "id": notification.id, "type": notification.type, "label": notification.label, "details": notification.details } module.exit_json(changed=changed, notification=notification_dict) else: module.exit_json(changed=changed) def main(): argument_spec = rax_argument_spec() argument_spec.update( dict( state=dict(default='present', choices=['present', 'absent']), label=dict(required=True), notification_type=dict(required=True, choices=['webhook', 'email', 'pagerduty']), details=dict(required=True, type='dict') ) ) module = AnsibleModule( argument_spec=argument_spec, required_together=rax_required_together() ) if not HAS_PYRAX: module.fail_json(msg='pyrax is required for this module') state = module.params.get('state') label = module.params.get('label') notification_type = module.params.get('notification_type') details = module.params.get('details') setup_rax_module(module, pyrax) notification(module, state, label, notification_type, details) if __name__ == '__main__': main()
gpl-3.0
Azure/azure-sdk-for-python
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_07_01/aio/operations/_network_interface_load_balancers_operations.py
1
5727
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar import warnings from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest from azure.mgmt.core.exceptions import ARMErrorFormat from ... import models as _models T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] class NetworkInterfaceLoadBalancersOperations: """NetworkInterfaceLoadBalancersOperations async operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.network.v2018_07_01.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer) -> None: self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config def list( self, resource_group_name: str, network_interface_name: str, **kwargs ) -> AsyncIterable["_models.NetworkInterfaceLoadBalancerListResult"]: """List all load balancers in a network interface. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param network_interface_name: The name of the network interface. :type network_interface_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either NetworkInterfaceLoadBalancerListResult or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_07_01.models.NetworkInterfaceLoadBalancerListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceLoadBalancerListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2018-07-01" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('NetworkInterfaceLoadBalancerListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/loadBalancers'} # type: ignore
mit
Fraunhofer-IIS/GNSS-DSP-tools
gnsstools/beidou/b1i.py
1
1840
# Beidou B1I code construction (serves also for B2I) # # Copyright 2014 Peter Monta import numpy as np chip_rate = 2046000 code_length = 2046 secondary_code = np.array([0,0,0,0,0,1,0,0,1,1,0,1,0,1,0,0,1,1,1,0]) secondary_code = 1.0 - 2.0*secondary_code b1i_g2_taps = { 1: (1,3), 2: (1,4), 3: (1,5), 4: (1,6), 5: (1,8), 6: (1,9), 7: (1,10), 8: (1,11), 9: (2,7), 10: (3,4), 11: (3,5), 12: (3,6), 13: (3,8), 14: (3,9), 15: (3,10), 16: (3,11), 17: (4,5), 18: (4,6), 19: (4,8), 20: (4,9), 21: (4,10), 22: (4,11), 23: (5,6), 24: (5,8), 25: (5,9), 26: (5,10), 27: (5,11), 28: (6,8), 29: (6,9), 30: (6,10), 31: (6,11), 32: (8,9), 33: (8,10), 34: (8,11), 35: (9,10), 36: (9,11), 37: (10,11) } def b1i_g1_shift(x): return [x[0]^x[6]^x[7]^x[8]^x[9]^x[10]] + x[0:10] def b1i_g2_shift(x): return [x[0]^x[1]^x[2]^x[3]^x[4]^x[7]^x[8]^x[10]] + x[0:10] def b1i(prn): n = code_length (tap1,tap2) = b1i_g2_taps[prn] g1 = [0,1,0,1,0,1,0,1,0,1,0] g2 = [0,1,0,1,0,1,0,1,0,1,0] b1i = np.zeros(n) for i in range(n): b1i[i] = g1[10] ^ g2[tap1-1] ^ g2[tap2-1] g1 = b1i_g1_shift(g1) g2 = b1i_g2_shift(g2) return b1i codes = {} def b1i_code(prn): if not codes.has_key(prn): codes[prn] = b1i(prn) return codes[prn] def code(prn,chips,frac,incr,n): c = b1i_code(prn) idx = (chips%code_length) + frac + incr*np.arange(n) idx = np.floor(idx).astype('int') idx = np.mod(idx,code_length) x = c[idx] return 1.0 - 2.0*x from numba import jit @jit(nopython=True) def correlate(x,prn,chips,frac,incr,c): n = len(x) p = 0.0j cp = (chips+frac)%code_length for i in range(n): p += x[i]*(1.0-2.0*c[int(cp)]) cp = (cp+incr)%code_length return p # test if __name__=='__main__': print b1i_code(1)[0:20] print b1i_code(2)[0:20]
mit
fabianvf/osf.io
website/addons/figshare/tests/utils.py
53
4566
import mock from website.addons.figshare.api import Figshare article = {u'count': 1, u'items': [{u'status': u'Draft', u'files': [{u'thumb': None, u'download_url': u'http://files.figshare.com/1348803/0NUTZ', u'name': u'0NUTZ', u'id': 1348803, u'mime_type': u'text/plain', u'size': u'0 KB'}, {u'thumb': None, u'download_url': u'http://files.figshare.com/1348805/0MNXS', u'name': u'0MNXS', u'id': 1348805, u'mime_type': u'text/plain', u'size': u'0 KB'}, {u'thumb': None, u'download_url': u'http://files.figshare.com/1348806/0NUTZ', u'name': u'0NUTZ', u'id': 1348806, u'mime_type': u'text/plain', u'size': u'0 KB'}, {u'thumb': None, u'download_url': u'http://files.figshare.com/1348807/0OX1G', u'name': u'0OX1G', u'id': 1348807, u'mime_type': u'text/plain', u'size': u'0 KB'}, {u'thumb': u'http://previews.figshare.com/1350751/250_1350751.jpg', u'download_url': u'http://files.figshare.com/1350751/Selection_003.png', u'name': u'Selection_003.png', u'id': 1350751, u'mime_type': u'image/png', u'size': u'18 KB'}, {u'thumb': u'http://previews.figshare.com/1350754/250_1350754.jpg', u'download_url': u'http://files.figshare.com/1350754/Selection_003.png', u'name': u'Selection_003.png', u'id': 1350754, u'mime_type': u'image/png', u'size': u'18 KB'}], u'description': u'<p>This is made using python</p>', u'links': [], u'title': u'New fileset', u'total_size': u'34.59 KB', u'master_publisher_id': 0, u'authors': [{u'first_name': u'Samuel', u'last_name': u'Chrisinger', u'id': 506241, u'full_name': u'Samuel Chrisinger'}], u'defined_type': u'fileset', u'version': 15, u'categories': [{u'id': 77, u'name': u'Applied Computer Science'}], u'published_date': u'22:13, Jan 16, 2014', u'description_nohtml': u'This is made using python', u'article_id': 902210, u'tags': [{u'id': 3564, u'name': u'code'}]}]} def create_mock_figshare(project): figshare_mock = mock.create_autospec(Figshare) figshare_mock.projects.return_value = [{u'owner': 506241, u'description': u'', u'id': 436, u'title': u'OSF Test'}] figshare_mock.project.return_value = {'articles': [{u'status': u'Draft', u'files': [{u'thumb': None, u'download_url': u'http://files.figshare.com/1348803/0NUTZ', u'name': u'0NUTZ', u'id': 1348803, u'mime_type': u'text/plain', u'size': u'0 KB'}, {u'thumb': None, u'download_url': u'http://files.figshare.com/1348805/0MNXS', u'name': u'0MNXS', u'id': 1348805, u'mime_type': u'text/plain', u'size': u'0 KB'}, {u'thumb': None, u'download_url': u'http://files.figshare.com/1348806/0NUTZ', u'name': u'0NUTZ', u'id': 1348806, u'mime_type': u'text/plain', u'size': u'0 KB'}, {u'thumb': None, u'download_url': u'http://files.figshare.com/1348807/0OX1G', u'name': u'0OX1G', u'id': 1348807, u'mime_type': u'text/plain', u'size': u'0 KB'}, {u'thumb': u'http://previews.figshare.com/1350751/250_1350751.jpg', u'download_url': u'http://files.figshare.com/1350751/Selection_003.png', u'name': u'Selection_003.png', u'id': 1350751, u'mime_type': u'image/png', u'size': u'18 KB'}, {u'thumb': u'http://previews.figshare.com/1350754/250_1350754.jpg', u'download_url': u'http://files.figshare.com/1350754/Selection_003.png', u'name': u'Selection_003.png', u'id': 1350754, u'mime_type': u'image/png', u'size': u'18 KB'}], u'description': u'<p>This is made using python</p>', u'links': [], u'title': u'New fileset', u'total_size': u'34.59 KB', u'master_publisher_id': 0, u'authors': [{u'first_name': u'Samuel', u'last_name': u'Chrisinger', u'id': 506241, u'full_name': u'Samuel Chrisinger'}], u'defined_type': u'fileset', u'version': 15, u'categories': [{u'id': 77, u'name': u'Applied Computer Science'}], u'published_date': u'22:13, Jan 16, 2014', u'description_nohtml': u'This is made using python', u'article_id': 902210, u'tags': [{u'id': 3564, u'name': u'code'}]}, {u'status': u'Drafts', u'files': [{u'id': 1404749, u'name': u'HW6.pdf', u'thumb': u'http://figshare.com/read/private/1404749/250_1404749.png', u'mime_type': u'application/pdf', u'size': u'177 KB'}], u'description': u'', u'links': [], u'title': u'HW6.pdf', u'total_size': u'172.82 KB', u'master_publisher_id': 0, u'authors': [{u'first_name': u'Samuel', u'last_name': u'Chrisinger', u'id': 506241, u'full_name': u'Samuel Chrisinger'}], u'defined_type': u'paper', u'version': 1, u'categories': [], u'published_date': u'09:25, Feb 24, 2014', u'description_nohtml': u'', u'article_id': 949657, u'tags': []}], u'description': u'', u'created': u'06/03/2014', u'id': 862, u'title': u'OSF Test'} figshare_mock.articles.return_value = article figshare_mock.article.return_value = article return figshare_mock
apache-2.0
lukasfenix/namebench
nb_third_party/jinja2/nodes.py
207
27369
# -*- coding: utf-8 -*- """ jinja2.nodes ~~~~~~~~~~~~ This module implements additional nodes derived from the ast base node. It also provides some node tree helper functions like `in_lineno` and `get_nodes` used by the parser and translator in order to normalize python and jinja nodes. :copyright: (c) 2010 by the Jinja Team. :license: BSD, see LICENSE for more details. """ import operator from itertools import chain, izip from collections import deque from jinja2.utils import Markup _binop_to_func = { '*': operator.mul, '/': operator.truediv, '//': operator.floordiv, '**': operator.pow, '%': operator.mod, '+': operator.add, '-': operator.sub } _uaop_to_func = { 'not': operator.not_, '+': operator.pos, '-': operator.neg } _cmpop_to_func = { 'eq': operator.eq, 'ne': operator.ne, 'gt': operator.gt, 'gteq': operator.ge, 'lt': operator.lt, 'lteq': operator.le, 'in': lambda a, b: a in b, 'notin': lambda a, b: a not in b } class Impossible(Exception): """Raised if the node could not perform a requested action.""" class NodeType(type): """A metaclass for nodes that handles the field and attribute inheritance. fields and attributes from the parent class are automatically forwarded to the child.""" def __new__(cls, name, bases, d): for attr in 'fields', 'attributes': storage = [] storage.extend(getattr(bases[0], attr, ())) storage.extend(d.get(attr, ())) assert len(bases) == 1, 'multiple inheritance not allowed' assert len(storage) == len(set(storage)), 'layout conflict' d[attr] = tuple(storage) d.setdefault('abstract', False) return type.__new__(cls, name, bases, d) class EvalContext(object): """Holds evaluation time information. Custom attributes can be attached to it in extensions. """ def __init__(self, environment, template_name=None): if callable(environment.autoescape): self.autoescape = environment.autoescape(template_name) else: self.autoescape = environment.autoescape self.volatile = False def save(self): return self.__dict__.copy() def revert(self, old): self.__dict__.clear() self.__dict__.update(old) def get_eval_context(node, ctx): if ctx is None: if node.environment is None: raise RuntimeError('if no eval context is passed, the ' 'node must have an attached ' 'environment.') return EvalContext(node.environment) return ctx class Node(object): """Baseclass for all Jinja2 nodes. There are a number of nodes available of different types. There are three major types: - :class:`Stmt`: statements - :class:`Expr`: expressions - :class:`Helper`: helper nodes - :class:`Template`: the outermost wrapper node All nodes have fields and attributes. Fields may be other nodes, lists, or arbitrary values. Fields are passed to the constructor as regular positional arguments, attributes as keyword arguments. Each node has two attributes: `lineno` (the line number of the node) and `environment`. The `environment` attribute is set at the end of the parsing process for all nodes automatically. """ __metaclass__ = NodeType fields = () attributes = ('lineno', 'environment') abstract = True def __init__(self, *fields, **attributes): if self.abstract: raise TypeError('abstract nodes are not instanciable') if fields: if len(fields) != len(self.fields): if not self.fields: raise TypeError('%r takes 0 arguments' % self.__class__.__name__) raise TypeError('%r takes 0 or %d argument%s' % ( self.__class__.__name__, len(self.fields), len(self.fields) != 1 and 's' or '' )) for name, arg in izip(self.fields, fields): setattr(self, name, arg) for attr in self.attributes: setattr(self, attr, attributes.pop(attr, None)) if attributes: raise TypeError('unknown attribute %r' % iter(attributes).next()) def iter_fields(self, exclude=None, only=None): """This method iterates over all fields that are defined and yields ``(key, value)`` tuples. Per default all fields are returned, but it's possible to limit that to some fields by providing the `only` parameter or to exclude some using the `exclude` parameter. Both should be sets or tuples of field names. """ for name in self.fields: if (exclude is only is None) or \ (exclude is not None and name not in exclude) or \ (only is not None and name in only): try: yield name, getattr(self, name) except AttributeError: pass def iter_child_nodes(self, exclude=None, only=None): """Iterates over all direct child nodes of the node. This iterates over all fields and yields the values of they are nodes. If the value of a field is a list all the nodes in that list are returned. """ for field, item in self.iter_fields(exclude, only): if isinstance(item, list): for n in item: if isinstance(n, Node): yield n elif isinstance(item, Node): yield item def find(self, node_type): """Find the first node of a given type. If no such node exists the return value is `None`. """ for result in self.find_all(node_type): return result def find_all(self, node_type): """Find all the nodes of a given type. If the type is a tuple, the check is performed for any of the tuple items. """ for child in self.iter_child_nodes(): if isinstance(child, node_type): yield child for result in child.find_all(node_type): yield result def set_ctx(self, ctx): """Reset the context of a node and all child nodes. Per default the parser will all generate nodes that have a 'load' context as it's the most common one. This method is used in the parser to set assignment targets and other nodes to a store context. """ todo = deque([self]) while todo: node = todo.popleft() if 'ctx' in node.fields: node.ctx = ctx todo.extend(node.iter_child_nodes()) return self def set_lineno(self, lineno, override=False): """Set the line numbers of the node and children.""" todo = deque([self]) while todo: node = todo.popleft() if 'lineno' in node.attributes: if node.lineno is None or override: node.lineno = lineno todo.extend(node.iter_child_nodes()) return self def set_environment(self, environment): """Set the environment for all nodes.""" todo = deque([self]) while todo: node = todo.popleft() node.environment = environment todo.extend(node.iter_child_nodes()) return self def __eq__(self, other): return type(self) is type(other) and \ tuple(self.iter_fields()) == tuple(other.iter_fields()) def __ne__(self, other): return not self.__eq__(other) def __repr__(self): return '%s(%s)' % ( self.__class__.__name__, ', '.join('%s=%r' % (arg, getattr(self, arg, None)) for arg in self.fields) ) class Stmt(Node): """Base node for all statements.""" abstract = True class Helper(Node): """Nodes that exist in a specific context only.""" abstract = True class Template(Node): """Node that represents a template. This must be the outermost node that is passed to the compiler. """ fields = ('body',) class Output(Stmt): """A node that holds multiple expressions which are then printed out. This is used both for the `print` statement and the regular template data. """ fields = ('nodes',) class Extends(Stmt): """Represents an extends statement.""" fields = ('template',) class For(Stmt): """The for loop. `target` is the target for the iteration (usually a :class:`Name` or :class:`Tuple`), `iter` the iterable. `body` is a list of nodes that are used as loop-body, and `else_` a list of nodes for the `else` block. If no else node exists it has to be an empty list. For filtered nodes an expression can be stored as `test`, otherwise `None`. """ fields = ('target', 'iter', 'body', 'else_', 'test', 'recursive') class If(Stmt): """If `test` is true, `body` is rendered, else `else_`.""" fields = ('test', 'body', 'else_') class Macro(Stmt): """A macro definition. `name` is the name of the macro, `args` a list of arguments and `defaults` a list of defaults if there are any. `body` is a list of nodes for the macro body. """ fields = ('name', 'args', 'defaults', 'body') class CallBlock(Stmt): """Like a macro without a name but a call instead. `call` is called with the unnamed macro as `caller` argument this node holds. """ fields = ('call', 'args', 'defaults', 'body') class FilterBlock(Stmt): """Node for filter sections.""" fields = ('body', 'filter') class Block(Stmt): """A node that represents a block.""" fields = ('name', 'body', 'scoped') class Include(Stmt): """A node that represents the include tag.""" fields = ('template', 'with_context', 'ignore_missing') class Import(Stmt): """A node that represents the import tag.""" fields = ('template', 'target', 'with_context') class FromImport(Stmt): """A node that represents the from import tag. It's important to not pass unsafe names to the name attribute. The compiler translates the attribute lookups directly into getattr calls and does *not* use the subscript callback of the interface. As exported variables may not start with double underscores (which the parser asserts) this is not a problem for regular Jinja code, but if this node is used in an extension extra care must be taken. The list of names may contain tuples if aliases are wanted. """ fields = ('template', 'names', 'with_context') class ExprStmt(Stmt): """A statement that evaluates an expression and discards the result.""" fields = ('node',) class Assign(Stmt): """Assigns an expression to a target.""" fields = ('target', 'node') class Expr(Node): """Baseclass for all expressions.""" abstract = True def as_const(self, eval_ctx=None): """Return the value of the expression as constant or raise :exc:`Impossible` if this was not possible. An :class:`EvalContext` can be provided, if none is given a default context is created which requires the nodes to have an attached environment. .. versionchanged:: 2.4 the `eval_ctx` parameter was added. """ raise Impossible() def can_assign(self): """Check if it's possible to assign something to this node.""" return False class BinExpr(Expr): """Baseclass for all binary expressions.""" fields = ('left', 'right') operator = None abstract = True def as_const(self, eval_ctx=None): eval_ctx = get_eval_context(self, eval_ctx) f = _binop_to_func[self.operator] try: return f(self.left.as_const(eval_ctx), self.right.as_const(eval_ctx)) except: raise Impossible() class UnaryExpr(Expr): """Baseclass for all unary expressions.""" fields = ('node',) operator = None abstract = True def as_const(self, eval_ctx=None): eval_ctx = get_eval_context(self, eval_ctx) f = _uaop_to_func[self.operator] try: return f(self.node.as_const(eval_ctx)) except: raise Impossible() class Name(Expr): """Looks up a name or stores a value in a name. The `ctx` of the node can be one of the following values: - `store`: store a value in the name - `load`: load that name - `param`: like `store` but if the name was defined as function parameter. """ fields = ('name', 'ctx') def can_assign(self): return self.name not in ('true', 'false', 'none', 'True', 'False', 'None') class Literal(Expr): """Baseclass for literals.""" abstract = True class Const(Literal): """All constant values. The parser will return this node for simple constants such as ``42`` or ``"foo"`` but it can be used to store more complex values such as lists too. Only constants with a safe representation (objects where ``eval(repr(x)) == x`` is true). """ fields = ('value',) def as_const(self, eval_ctx=None): return self.value @classmethod def from_untrusted(cls, value, lineno=None, environment=None): """Return a const object if the value is representable as constant value in the generated code, otherwise it will raise an `Impossible` exception. """ from compiler import has_safe_repr if not has_safe_repr(value): raise Impossible() return cls(value, lineno=lineno, environment=environment) class TemplateData(Literal): """A constant template string.""" fields = ('data',) def as_const(self, eval_ctx=None): if get_eval_context(self, eval_ctx).autoescape: return Markup(self.data) return self.data class Tuple(Literal): """For loop unpacking and some other things like multiple arguments for subscripts. Like for :class:`Name` `ctx` specifies if the tuple is used for loading the names or storing. """ fields = ('items', 'ctx') def as_const(self, eval_ctx=None): eval_ctx = get_eval_context(self, eval_ctx) return tuple(x.as_const(eval_ctx) for x in self.items) def can_assign(self): for item in self.items: if not item.can_assign(): return False return True class List(Literal): """Any list literal such as ``[1, 2, 3]``""" fields = ('items',) def as_const(self, eval_ctx=None): eval_ctx = get_eval_context(self, eval_ctx) return [x.as_const(eval_ctx) for x in self.items] class Dict(Literal): """Any dict literal such as ``{1: 2, 3: 4}``. The items must be a list of :class:`Pair` nodes. """ fields = ('items',) def as_const(self, eval_ctx=None): eval_ctx = get_eval_context(self, eval_ctx) return dict(x.as_const(eval_ctx) for x in self.items) class Pair(Helper): """A key, value pair for dicts.""" fields = ('key', 'value') def as_const(self, eval_ctx=None): eval_ctx = get_eval_context(self, eval_ctx) return self.key.as_const(eval_ctx), self.value.as_const(eval_ctx) class Keyword(Helper): """A key, value pair for keyword arguments where key is a string.""" fields = ('key', 'value') def as_const(self, eval_ctx=None): eval_ctx = get_eval_context(self, eval_ctx) return self.key, self.value.as_const(eval_ctx) class CondExpr(Expr): """A conditional expression (inline if expression). (``{{ foo if bar else baz }}``) """ fields = ('test', 'expr1', 'expr2') def as_const(self, eval_ctx=None): eval_ctx = get_eval_context(self, eval_ctx) if self.test.as_const(eval_ctx): return self.expr1.as_const(eval_ctx) # if we evaluate to an undefined object, we better do that at runtime if self.expr2 is None: raise Impossible() return self.expr2.as_const(eval_ctx) class Filter(Expr): """This node applies a filter on an expression. `name` is the name of the filter, the rest of the fields are the same as for :class:`Call`. If the `node` of a filter is `None` the contents of the last buffer are filtered. Buffers are created by macros and filter blocks. """ fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs') def as_const(self, eval_ctx=None): eval_ctx = get_eval_context(self, eval_ctx) if eval_ctx.volatile or self.node is None: raise Impossible() # we have to be careful here because we call filter_ below. # if this variable would be called filter, 2to3 would wrap the # call in a list beause it is assuming we are talking about the # builtin filter function here which no longer returns a list in # python 3. because of that, do not rename filter_ to filter! filter_ = self.environment.filters.get(self.name) if filter_ is None or getattr(filter_, 'contextfilter', False): raise Impossible() obj = self.node.as_const(eval_ctx) args = [x.as_const(eval_ctx) for x in self.args] if getattr(filter_, 'evalcontextfilter', False): args.insert(0, eval_ctx) elif getattr(filter_, 'environmentfilter', False): args.insert(0, self.environment) kwargs = dict(x.as_const(eval_ctx) for x in self.kwargs) if self.dyn_args is not None: try: args.extend(self.dyn_args.as_const(eval_ctx)) except: raise Impossible() if self.dyn_kwargs is not None: try: kwargs.update(self.dyn_kwargs.as_const(eval_ctx)) except: raise Impossible() try: return filter_(obj, *args, **kwargs) except: raise Impossible() class Test(Expr): """Applies a test on an expression. `name` is the name of the test, the rest of the fields are the same as for :class:`Call`. """ fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs') class Call(Expr): """Calls an expression. `args` is a list of arguments, `kwargs` a list of keyword arguments (list of :class:`Keyword` nodes), and `dyn_args` and `dyn_kwargs` has to be either `None` or a node that is used as node for dynamic positional (``*args``) or keyword (``**kwargs``) arguments. """ fields = ('node', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs') def as_const(self, eval_ctx=None): eval_ctx = get_eval_context(self, eval_ctx) if eval_ctx.volatile: raise Impossible() obj = self.node.as_const(eval_ctx) # don't evaluate context functions args = [x.as_const(eval_ctx) for x in self.args] if getattr(obj, 'contextfunction', False): raise Impossible() elif getattr(obj, 'evalcontextfunction', False): args.insert(0, eval_ctx) elif getattr(obj, 'environmentfunction', False): args.insert(0, self.environment) kwargs = dict(x.as_const(eval_ctx) for x in self.kwargs) if self.dyn_args is not None: try: args.extend(self.dyn_args.as_const(eval_ctx)) except: raise Impossible() if self.dyn_kwargs is not None: try: kwargs.update(self.dyn_kwargs.as_const(eval_ctx)) except: raise Impossible() try: return obj(*args, **kwargs) except: raise Impossible() class Getitem(Expr): """Get an attribute or item from an expression and prefer the item.""" fields = ('node', 'arg', 'ctx') def as_const(self, eval_ctx=None): eval_ctx = get_eval_context(self, eval_ctx) if self.ctx != 'load': raise Impossible() try: return self.environment.getitem(self.node.as_const(eval_ctx), self.arg.as_const(eval_ctx)) except: raise Impossible() def can_assign(self): return False class Getattr(Expr): """Get an attribute or item from an expression that is a ascii-only bytestring and prefer the attribute. """ fields = ('node', 'attr', 'ctx') def as_const(self, eval_ctx=None): if self.ctx != 'load': raise Impossible() try: eval_ctx = get_eval_context(self, eval_ctx) return self.environment.getattr(self.node.as_const(eval_ctx), arg) except: raise Impossible() def can_assign(self): return False class Slice(Expr): """Represents a slice object. This must only be used as argument for :class:`Subscript`. """ fields = ('start', 'stop', 'step') def as_const(self, eval_ctx=None): eval_ctx = get_eval_context(self, eval_ctx) def const(obj): if obj is None: return None return obj.as_const(eval_ctx) return slice(const(self.start), const(self.stop), const(self.step)) class Concat(Expr): """Concatenates the list of expressions provided after converting them to unicode. """ fields = ('nodes',) def as_const(self, eval_ctx=None): eval_ctx = get_eval_context(self, eval_ctx) return ''.join(unicode(x.as_const(eval_ctx)) for x in self.nodes) class Compare(Expr): """Compares an expression with some other expressions. `ops` must be a list of :class:`Operand`\s. """ fields = ('expr', 'ops') def as_const(self, eval_ctx=None): eval_ctx = get_eval_context(self, eval_ctx) result = value = self.expr.as_const(eval_ctx) try: for op in self.ops: new_value = op.expr.as_const(eval_ctx) result = _cmpop_to_func[op.op](value, new_value) value = new_value except: raise Impossible() return result class Operand(Helper): """Holds an operator and an expression.""" fields = ('op', 'expr') if __debug__: Operand.__doc__ += '\nThe following operators are available: ' + \ ', '.join(sorted('``%s``' % x for x in set(_binop_to_func) | set(_uaop_to_func) | set(_cmpop_to_func))) class Mul(BinExpr): """Multiplies the left with the right node.""" operator = '*' class Div(BinExpr): """Divides the left by the right node.""" operator = '/' class FloorDiv(BinExpr): """Divides the left by the right node and truncates conver the result into an integer by truncating. """ operator = '//' class Add(BinExpr): """Add the left to the right node.""" operator = '+' class Sub(BinExpr): """Substract the right from the left node.""" operator = '-' class Mod(BinExpr): """Left modulo right.""" operator = '%' class Pow(BinExpr): """Left to the power of right.""" operator = '**' class And(BinExpr): """Short circuited AND.""" operator = 'and' def as_const(self, eval_ctx=None): eval_ctx = get_eval_context(self, eval_ctx) return self.left.as_const(eval_ctx) and self.right.as_const(eval_ctx) class Or(BinExpr): """Short circuited OR.""" operator = 'or' def as_const(self, eval_ctx=None): eval_ctx = get_eval_context(self, eval_ctx) return self.left.as_const(eval_ctx) or self.right.as_const(eval_ctx) class Not(UnaryExpr): """Negate the expression.""" operator = 'not' class Neg(UnaryExpr): """Make the expression negative.""" operator = '-' class Pos(UnaryExpr): """Make the expression positive (noop for most expressions)""" operator = '+' # Helpers for extensions class EnvironmentAttribute(Expr): """Loads an attribute from the environment object. This is useful for extensions that want to call a callback stored on the environment. """ fields = ('name',) class ExtensionAttribute(Expr): """Returns the attribute of an extension bound to the environment. The identifier is the identifier of the :class:`Extension`. This node is usually constructed by calling the :meth:`~jinja2.ext.Extension.attr` method on an extension. """ fields = ('identifier', 'name') class ImportedName(Expr): """If created with an import name the import name is returned on node access. For example ``ImportedName('cgi.escape')`` returns the `escape` function from the cgi module on evaluation. Imports are optimized by the compiler so there is no need to assign them to local variables. """ fields = ('importname',) class InternalName(Expr): """An internal name in the compiler. You cannot create these nodes yourself but the parser provides a :meth:`~jinja2.parser.Parser.free_identifier` method that creates a new identifier for you. This identifier is not available from the template and is not threated specially by the compiler. """ fields = ('name',) def __init__(self): raise TypeError('Can\'t create internal names. Use the ' '`free_identifier` method on a parser.') class MarkSafe(Expr): """Mark the wrapped expression as safe (wrap it as `Markup`).""" fields = ('expr',) def as_const(self, eval_ctx=None): eval_ctx = get_eval_context(self, eval_ctx) return Markup(self.expr.as_const(eval_ctx)) class ContextReference(Expr): """Returns the current template context. It can be used like a :class:`Name` node, with a ``'load'`` ctx and will return the current :class:`~jinja2.runtime.Context` object. Here an example that assigns the current template name to a variable named `foo`:: Assign(Name('foo', ctx='store'), Getattr(ContextReference(), 'name')) """ class Continue(Stmt): """Continue a loop.""" class Break(Stmt): """Break a loop.""" class Scope(Stmt): """An artificial scope.""" fields = ('body',) class EvalContextModifier(Stmt): """Modifies the eval context. For each option that should be modified, a :class:`Keyword` has to be added to the :attr:`options` list. Example to change the `autoescape` setting:: EvalContextModifier(options=[Keyword('autoescape', Const(True))]) """ fields = ('options',) class ScopedEvalContextModifier(EvalContextModifier): """Modifies the eval context and reverts it later. Works exactly like :class:`EvalContextModifier` but will only modify the :class:`~jinja2.nodes.EvalContext` for nodes in the :attr:`body`. """ fields = ('body',) # make sure nobody creates custom nodes def _failing_new(*args, **kwargs): raise TypeError('can\'t create custom node types') NodeType.__new__ = staticmethod(_failing_new); del _failing_new
apache-2.0
randynobx/ansible
lib/ansible/modules/system/alternatives.py
66
5544
#!/usr/bin/python # -*- coding: utf-8 -*- """ Ansible module to manage symbolic link alternatives. (c) 2014, Gabe Mulley <gabe.mulley@gmail.com> (c) 2015, David Wittman <dwittman@gmail.com> This file is part of Ansible Ansible is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Ansible is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Ansible. If not, see <http://www.gnu.org/licenses/>. """ ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: alternatives short_description: Manages alternative programs for common commands description: - Manages symbolic links using the 'update-alternatives' tool - Useful when multiple programs are installed but provide similar functionality (e.g. different editors). version_added: "1.6" author: - "David Wittman (@DavidWittman)" - "Gabe Mulley (@mulby)" options: name: description: - The generic name of the link. required: true path: description: - The path to the real executable that the link should point to. required: true link: description: - The path to the symbolic link that should point to the real executable. - This option is required on RHEL-based distributions required: false priority: description: - The priority of the alternative required: false default: 50 version_added: "2.2" requirements: [ update-alternatives ] ''' EXAMPLES = ''' - name: correct java version selected alternatives: name: java path: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java - name: alternatives link created alternatives: name: hadoop-conf link: /etc/hadoop/conf path: /etc/hadoop/conf.ansible - name: make java 32 bit an alternative with low priority alternatives: name: java path: /usr/lib/jvm/java-7-openjdk-i386/jre/bin/java priority: -10 ''' import re from ansible.module_utils.basic import * from ansible.module_utils.pycompat24 import get_exception def main(): module = AnsibleModule( argument_spec = dict( name = dict(required=True), path = dict(required=True, type='path'), link = dict(required=False, type='path'), priority = dict(required=False, type='int', default=50), ), supports_check_mode=True, ) params = module.params name = params['name'] path = params['path'] link = params['link'] priority = params['priority'] UPDATE_ALTERNATIVES = module.get_bin_path('update-alternatives',True) current_path = None all_alternatives = [] # Run `update-alternatives --display <name>` to find existing alternatives (rc, display_output, _) = module.run_command( ['env', 'LC_ALL=C', UPDATE_ALTERNATIVES, '--display', name] ) if rc == 0: # Alternatives already exist for this link group # Parse the output to determine the current path of the symlink and # available alternatives current_path_regex = re.compile(r'^\s*link currently points to (.*)$', re.MULTILINE) alternative_regex = re.compile(r'^(\/.*)\s-\spriority', re.MULTILINE) current_path = current_path_regex.search(display_output).group(1) all_alternatives = alternative_regex.findall(display_output) if not link: # Read the current symlink target from `update-alternatives --query` # in case we need to install the new alternative before setting it. # # This is only compatible on Debian-based systems, as the other # alternatives don't have --query available rc, query_output, _ = module.run_command( ['env', 'LC_ALL=C', UPDATE_ALTERNATIVES, '--query', name] ) if rc == 0: for line in query_output.splitlines(): if line.startswith('Link:'): link = line.split()[1] break if current_path != path: if module.check_mode: module.exit_json(changed=True, current_path=current_path) try: # install the requested path if necessary if path not in all_alternatives: if not link: module.fail_json(msg="Needed to install the alternative, but unable to do so as we are missing the link") module.run_command( [UPDATE_ALTERNATIVES, '--install', link, name, path, str(priority)], check_rc=True ) # select the requested path module.run_command( [UPDATE_ALTERNATIVES, '--set', name, path], check_rc=True ) module.exit_json(changed=True) except subprocess.CalledProcessError: e = get_exception() module.fail_json(msg=str(dir(cpe))) else: module.exit_json(changed=False) if __name__ == '__main__': main()
gpl-3.0
DooMLoRD/android_kernel_sony_msm8960t
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py
12980
5411
# SchedGui.py - Python extension for perf script, basic GUI code for # traces drawing and overview. # # Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com> # # This software is distributed under the terms of the GNU General # Public License ("GPL") version 2 as published by the Free Software # Foundation. try: import wx except ImportError: raise ImportError, "You need to install the wxpython lib for this script" class RootFrame(wx.Frame): Y_OFFSET = 100 RECT_HEIGHT = 100 RECT_SPACE = 50 EVENT_MARKING_WIDTH = 5 def __init__(self, sched_tracer, title, parent = None, id = -1): wx.Frame.__init__(self, parent, id, title) (self.screen_width, self.screen_height) = wx.GetDisplaySize() self.screen_width -= 10 self.screen_height -= 10 self.zoom = 0.5 self.scroll_scale = 20 self.sched_tracer = sched_tracer self.sched_tracer.set_root_win(self) (self.ts_start, self.ts_end) = sched_tracer.interval() self.update_width_virtual() self.nr_rects = sched_tracer.nr_rectangles() + 1 self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)) # whole window panel self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height)) # scrollable container self.scroll = wx.ScrolledWindow(self.panel) self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale) self.scroll.EnableScrolling(True, True) self.scroll.SetFocus() # scrollable drawing area self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2)) self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint) self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press) self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down) self.scroll.Bind(wx.EVT_PAINT, self.on_paint) self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press) self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down) self.scroll.Fit() self.Fit() self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING) self.txt = None self.Show(True) def us_to_px(self, val): return val / (10 ** 3) * self.zoom def px_to_us(self, val): return (val / self.zoom) * (10 ** 3) def scroll_start(self): (x, y) = self.scroll.GetViewStart() return (x * self.scroll_scale, y * self.scroll_scale) def scroll_start_us(self): (x, y) = self.scroll_start() return self.px_to_us(x) def paint_rectangle_zone(self, nr, color, top_color, start, end): offset_px = self.us_to_px(start - self.ts_start) width_px = self.us_to_px(end - self.ts_start) offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)) width_py = RootFrame.RECT_HEIGHT dc = self.dc if top_color is not None: (r, g, b) = top_color top_color = wx.Colour(r, g, b) brush = wx.Brush(top_color, wx.SOLID) dc.SetBrush(brush) dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH) width_py -= RootFrame.EVENT_MARKING_WIDTH offset_py += RootFrame.EVENT_MARKING_WIDTH (r ,g, b) = color color = wx.Colour(r, g, b) brush = wx.Brush(color, wx.SOLID) dc.SetBrush(brush) dc.DrawRectangle(offset_px, offset_py, width_px, width_py) def update_rectangles(self, dc, start, end): start += self.ts_start end += self.ts_start self.sched_tracer.fill_zone(start, end) def on_paint(self, event): dc = wx.PaintDC(self.scroll_panel) self.dc = dc width = min(self.width_virtual, self.screen_width) (x, y) = self.scroll_start() start = self.px_to_us(x) end = self.px_to_us(x + width) self.update_rectangles(dc, start, end) def rect_from_ypixel(self, y): y -= RootFrame.Y_OFFSET rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE) height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE) if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT: return -1 return rect def update_summary(self, txt): if self.txt: self.txt.Destroy() self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50)) def on_mouse_down(self, event): (x, y) = event.GetPositionTuple() rect = self.rect_from_ypixel(y) if rect == -1: return t = self.px_to_us(x) + self.ts_start self.sched_tracer.mouse_down(rect, t) def update_width_virtual(self): self.width_virtual = self.us_to_px(self.ts_end - self.ts_start) def __zoom(self, x): self.update_width_virtual() (xpos, ypos) = self.scroll.GetViewStart() xpos = self.us_to_px(x) / self.scroll_scale self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos) self.Refresh() def zoom_in(self): x = self.scroll_start_us() self.zoom *= 2 self.__zoom(x) def zoom_out(self): x = self.scroll_start_us() self.zoom /= 2 self.__zoom(x) def on_key_press(self, event): key = event.GetRawKeyCode() if key == ord("+"): self.zoom_in() return if key == ord("-"): self.zoom_out() return key = event.GetKeyCode() (x, y) = self.scroll.GetViewStart() if key == wx.WXK_RIGHT: self.scroll.Scroll(x + 1, y) elif key == wx.WXK_LEFT: self.scroll.Scroll(x - 1, y) elif key == wx.WXK_DOWN: self.scroll.Scroll(x, y + 1) elif key == wx.WXK_UP: self.scroll.Scroll(x, y - 1)
gpl-2.0
bigswitch/neutron
neutron/tests/unit/agent/l3/test_legacy_router.py
1
3278
# Copyright (c) 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib import constants as l3_constants from oslo_utils import uuidutils from neutron.agent.l3 import legacy_router from neutron.agent.linux import ip_lib from neutron.tests import base _uuid = uuidutils.generate_uuid class BasicRouterTestCaseFramework(base.BaseTestCase): def _create_router(self, router=None, **kwargs): if not router: router = mock.MagicMock() self.agent_conf = mock.Mock() self.driver = mock.Mock() self.router_id = _uuid() return legacy_router.LegacyRouter(self.router_id, router, self.agent_conf, self.driver, **kwargs) class TestBasicRouterOperations(BasicRouterTestCaseFramework): def test_remove_floating_ip(self): ri = self._create_router(mock.MagicMock()) device = mock.Mock() cidr = '15.1.2.3/32' ri.remove_floating_ip(device, cidr) device.delete_addr_and_conntrack_state.assert_called_once_with(cidr) def test_remove_external_gateway_ip(self): ri = self._create_router(mock.MagicMock()) device = mock.Mock() cidr = '172.16.0.0/24' ri.remove_external_gateway_ip(device, cidr) device.delete_addr_and_conntrack_state.assert_called_once_with(cidr) @mock.patch.object(ip_lib, 'send_ip_addr_adv_notif') class TestAddFloatingIpWithMockGarp(BasicRouterTestCaseFramework): def test_add_floating_ip(self, send_ip_addr_adv_notif): ri = self._create_router() ri._add_fip_addr_to_device = mock.Mock(return_value=True) ip = '15.1.2.3' result = ri.add_floating_ip({'floating_ip_address': ip}, mock.sentinel.interface_name, mock.sentinel.device) ip_lib.send_ip_addr_adv_notif.assert_called_once_with( ri.ns_name, mock.sentinel.interface_name, ip, self.agent_conf) self.assertEqual(l3_constants.FLOATINGIP_STATUS_ACTIVE, result) def test_add_floating_ip_error(self, send_ip_addr_adv_notif): ri = self._create_router() ri._add_fip_addr_to_device = mock.Mock(return_value=False) result = ri.add_floating_ip({'floating_ip_address': '15.1.2.3'}, mock.sentinel.interface_name, mock.sentinel.device) self.assertFalse(ip_lib.send_ip_addr_adv_notif.called) self.assertEqual(l3_constants.FLOATINGIP_STATUS_ERROR, result)
apache-2.0
harshilasu/LinkurApp
y/google-cloud-sdk/platform/gsutil/third_party/boto/tests/unit/vpc/test_internetgateway.py
10
6075
from tests.unit import unittest from tests.unit import AWSMockServiceTestCase from boto.vpc import VPCConnection, InternetGateway class TestDescribeInternetGateway(AWSMockServiceTestCase): connection_class = VPCConnection def default_body(self): return """ <DescribeInternetGatewaysResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/"> <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId> <internetGatewaySet> <item> <internetGatewayId>igw-eaad4883EXAMPLE</internetGatewayId> <attachmentSet> <item> <vpcId>vpc-11ad4878</vpcId> <state>available</state> </item> </attachmentSet> <tagSet/> </item> </internetGatewaySet> </DescribeInternetGatewaysResponse> """ def test_describe_internet_gateway(self): self.set_http_response(status_code=200) api_response = self.service_connection.get_all_internet_gateways( 'igw-eaad4883EXAMPLE', filters=[('attachment.state', ['available', 'pending'])]) self.assert_request_parameters({ 'Action': 'DescribeInternetGateways', 'InternetGatewayId.1': 'igw-eaad4883EXAMPLE', 'Filter.1.Name': 'attachment.state', 'Filter.1.Value.1': 'available', 'Filter.1.Value.2': 'pending'}, ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', 'SignatureVersion', 'Timestamp', 'Version']) self.assertEquals(len(api_response), 1) self.assertIsInstance(api_response[0], InternetGateway) self.assertEqual(api_response[0].id, 'igw-eaad4883EXAMPLE') class TestCreateInternetGateway(AWSMockServiceTestCase): connection_class = VPCConnection def default_body(self): return """ <CreateInternetGatewayResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/"> <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId> <internetGateway> <internetGatewayId>igw-eaad4883</internetGatewayId> <attachmentSet/> <tagSet/> </internetGateway> </CreateInternetGatewayResponse> """ def test_create_internet_gateway(self): self.set_http_response(status_code=200) api_response = self.service_connection.create_internet_gateway() self.assert_request_parameters({ 'Action': 'CreateInternetGateway'}, ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', 'SignatureVersion', 'Timestamp', 'Version']) self.assertIsInstance(api_response, InternetGateway) self.assertEqual(api_response.id, 'igw-eaad4883') class TestDeleteInternetGateway(AWSMockServiceTestCase): connection_class = VPCConnection def default_body(self): return """ <DeleteInternetGatewayResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/"> <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId> <return>true</return> </DeleteInternetGatewayResponse> """ def test_delete_internet_gateway(self): self.set_http_response(status_code=200) api_response = self.service_connection.delete_internet_gateway('igw-eaad4883') self.assert_request_parameters({ 'Action': 'DeleteInternetGateway', 'InternetGatewayId': 'igw-eaad4883'}, ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', 'SignatureVersion', 'Timestamp', 'Version']) self.assertEquals(api_response, True) class TestAttachInternetGateway(AWSMockServiceTestCase): connection_class = VPCConnection def default_body(self): return """ <AttachInternetGatewayResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/"> <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId> <return>true</return> </AttachInternetGatewayResponse> """ def test_attach_internet_gateway(self): self.set_http_response(status_code=200) api_response = self.service_connection.attach_internet_gateway( 'igw-eaad4883', 'vpc-11ad4878') self.assert_request_parameters({ 'Action': 'AttachInternetGateway', 'InternetGatewayId': 'igw-eaad4883', 'VpcId': 'vpc-11ad4878'}, ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', 'SignatureVersion', 'Timestamp', 'Version']) self.assertEquals(api_response, True) class TestDetachInternetGateway(AWSMockServiceTestCase): connection_class = VPCConnection def default_body(self): return """ <DetachInternetGatewayResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/"> <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId> <return>true</return> </DetachInternetGatewayResponse> """ def test_detach_internet_gateway(self): self.set_http_response(status_code=200) api_response = self.service_connection.detach_internet_gateway( 'igw-eaad4883', 'vpc-11ad4878') self.assert_request_parameters({ 'Action': 'DetachInternetGateway', 'InternetGatewayId': 'igw-eaad4883', 'VpcId': 'vpc-11ad4878'}, ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', 'SignatureVersion', 'Timestamp', 'Version']) self.assertEquals(api_response, True) if __name__ == '__main__': unittest.main()
gpl-3.0
shawger/s-kape
lib/requests/packages/chardet/sjisprober.py
1777
3764
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is mozilla.org code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### import sys from .mbcharsetprober import MultiByteCharSetProber from .codingstatemachine import CodingStateMachine from .chardistribution import SJISDistributionAnalysis from .jpcntx import SJISContextAnalysis from .mbcssm import SJISSMModel from . import constants class SJISProber(MultiByteCharSetProber): def __init__(self): MultiByteCharSetProber.__init__(self) self._mCodingSM = CodingStateMachine(SJISSMModel) self._mDistributionAnalyzer = SJISDistributionAnalysis() self._mContextAnalyzer = SJISContextAnalysis() self.reset() def reset(self): MultiByteCharSetProber.reset(self) self._mContextAnalyzer.reset() def get_charset_name(self): return self._mContextAnalyzer.get_charset_name() def feed(self, aBuf): aLen = len(aBuf) for i in range(0, aLen): codingState = self._mCodingSM.next_state(aBuf[i]) if codingState == constants.eError: if constants._debug: sys.stderr.write(self.get_charset_name() + ' prober hit error at byte ' + str(i) + '\n') self._mState = constants.eNotMe break elif codingState == constants.eItsMe: self._mState = constants.eFoundIt break elif codingState == constants.eStart: charLen = self._mCodingSM.get_current_charlen() if i == 0: self._mLastChar[1] = aBuf[0] self._mContextAnalyzer.feed(self._mLastChar[2 - charLen:], charLen) self._mDistributionAnalyzer.feed(self._mLastChar, charLen) else: self._mContextAnalyzer.feed(aBuf[i + 1 - charLen:i + 3 - charLen], charLen) self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1], charLen) self._mLastChar[0] = aBuf[aLen - 1] if self.get_state() == constants.eDetecting: if (self._mContextAnalyzer.got_enough_data() and (self.get_confidence() > constants.SHORTCUT_THRESHOLD)): self._mState = constants.eFoundIt return self.get_state() def get_confidence(self): contxtCf = self._mContextAnalyzer.get_confidence() distribCf = self._mDistributionAnalyzer.get_confidence() return max(contxtCf, distribCf)
gpl-3.0
coderbone/SickRage
lib/unidecode/x0c2.py
253
4710
data = ( 'syon', # 0x00 'syonj', # 0x01 'syonh', # 0x02 'syod', # 0x03 'syol', # 0x04 'syolg', # 0x05 'syolm', # 0x06 'syolb', # 0x07 'syols', # 0x08 'syolt', # 0x09 'syolp', # 0x0a 'syolh', # 0x0b 'syom', # 0x0c 'syob', # 0x0d 'syobs', # 0x0e 'syos', # 0x0f 'syoss', # 0x10 'syong', # 0x11 'syoj', # 0x12 'syoc', # 0x13 'syok', # 0x14 'syot', # 0x15 'syop', # 0x16 'syoh', # 0x17 'su', # 0x18 'sug', # 0x19 'sugg', # 0x1a 'sugs', # 0x1b 'sun', # 0x1c 'sunj', # 0x1d 'sunh', # 0x1e 'sud', # 0x1f 'sul', # 0x20 'sulg', # 0x21 'sulm', # 0x22 'sulb', # 0x23 'suls', # 0x24 'sult', # 0x25 'sulp', # 0x26 'sulh', # 0x27 'sum', # 0x28 'sub', # 0x29 'subs', # 0x2a 'sus', # 0x2b 'suss', # 0x2c 'sung', # 0x2d 'suj', # 0x2e 'suc', # 0x2f 'suk', # 0x30 'sut', # 0x31 'sup', # 0x32 'suh', # 0x33 'sweo', # 0x34 'sweog', # 0x35 'sweogg', # 0x36 'sweogs', # 0x37 'sweon', # 0x38 'sweonj', # 0x39 'sweonh', # 0x3a 'sweod', # 0x3b 'sweol', # 0x3c 'sweolg', # 0x3d 'sweolm', # 0x3e 'sweolb', # 0x3f 'sweols', # 0x40 'sweolt', # 0x41 'sweolp', # 0x42 'sweolh', # 0x43 'sweom', # 0x44 'sweob', # 0x45 'sweobs', # 0x46 'sweos', # 0x47 'sweoss', # 0x48 'sweong', # 0x49 'sweoj', # 0x4a 'sweoc', # 0x4b 'sweok', # 0x4c 'sweot', # 0x4d 'sweop', # 0x4e 'sweoh', # 0x4f 'swe', # 0x50 'sweg', # 0x51 'swegg', # 0x52 'swegs', # 0x53 'swen', # 0x54 'swenj', # 0x55 'swenh', # 0x56 'swed', # 0x57 'swel', # 0x58 'swelg', # 0x59 'swelm', # 0x5a 'swelb', # 0x5b 'swels', # 0x5c 'swelt', # 0x5d 'swelp', # 0x5e 'swelh', # 0x5f 'swem', # 0x60 'sweb', # 0x61 'swebs', # 0x62 'swes', # 0x63 'swess', # 0x64 'sweng', # 0x65 'swej', # 0x66 'swec', # 0x67 'swek', # 0x68 'swet', # 0x69 'swep', # 0x6a 'sweh', # 0x6b 'swi', # 0x6c 'swig', # 0x6d 'swigg', # 0x6e 'swigs', # 0x6f 'swin', # 0x70 'swinj', # 0x71 'swinh', # 0x72 'swid', # 0x73 'swil', # 0x74 'swilg', # 0x75 'swilm', # 0x76 'swilb', # 0x77 'swils', # 0x78 'swilt', # 0x79 'swilp', # 0x7a 'swilh', # 0x7b 'swim', # 0x7c 'swib', # 0x7d 'swibs', # 0x7e 'swis', # 0x7f 'swiss', # 0x80 'swing', # 0x81 'swij', # 0x82 'swic', # 0x83 'swik', # 0x84 'swit', # 0x85 'swip', # 0x86 'swih', # 0x87 'syu', # 0x88 'syug', # 0x89 'syugg', # 0x8a 'syugs', # 0x8b 'syun', # 0x8c 'syunj', # 0x8d 'syunh', # 0x8e 'syud', # 0x8f 'syul', # 0x90 'syulg', # 0x91 'syulm', # 0x92 'syulb', # 0x93 'syuls', # 0x94 'syult', # 0x95 'syulp', # 0x96 'syulh', # 0x97 'syum', # 0x98 'syub', # 0x99 'syubs', # 0x9a 'syus', # 0x9b 'syuss', # 0x9c 'syung', # 0x9d 'syuj', # 0x9e 'syuc', # 0x9f 'syuk', # 0xa0 'syut', # 0xa1 'syup', # 0xa2 'syuh', # 0xa3 'seu', # 0xa4 'seug', # 0xa5 'seugg', # 0xa6 'seugs', # 0xa7 'seun', # 0xa8 'seunj', # 0xa9 'seunh', # 0xaa 'seud', # 0xab 'seul', # 0xac 'seulg', # 0xad 'seulm', # 0xae 'seulb', # 0xaf 'seuls', # 0xb0 'seult', # 0xb1 'seulp', # 0xb2 'seulh', # 0xb3 'seum', # 0xb4 'seub', # 0xb5 'seubs', # 0xb6 'seus', # 0xb7 'seuss', # 0xb8 'seung', # 0xb9 'seuj', # 0xba 'seuc', # 0xbb 'seuk', # 0xbc 'seut', # 0xbd 'seup', # 0xbe 'seuh', # 0xbf 'syi', # 0xc0 'syig', # 0xc1 'syigg', # 0xc2 'syigs', # 0xc3 'syin', # 0xc4 'syinj', # 0xc5 'syinh', # 0xc6 'syid', # 0xc7 'syil', # 0xc8 'syilg', # 0xc9 'syilm', # 0xca 'syilb', # 0xcb 'syils', # 0xcc 'syilt', # 0xcd 'syilp', # 0xce 'syilh', # 0xcf 'syim', # 0xd0 'syib', # 0xd1 'syibs', # 0xd2 'syis', # 0xd3 'syiss', # 0xd4 'sying', # 0xd5 'syij', # 0xd6 'syic', # 0xd7 'syik', # 0xd8 'syit', # 0xd9 'syip', # 0xda 'syih', # 0xdb 'si', # 0xdc 'sig', # 0xdd 'sigg', # 0xde 'sigs', # 0xdf 'sin', # 0xe0 'sinj', # 0xe1 'sinh', # 0xe2 'sid', # 0xe3 'sil', # 0xe4 'silg', # 0xe5 'silm', # 0xe6 'silb', # 0xe7 'sils', # 0xe8 'silt', # 0xe9 'silp', # 0xea 'silh', # 0xeb 'sim', # 0xec 'sib', # 0xed 'sibs', # 0xee 'sis', # 0xef 'siss', # 0xf0 'sing', # 0xf1 'sij', # 0xf2 'sic', # 0xf3 'sik', # 0xf4 'sit', # 0xf5 'sip', # 0xf6 'sih', # 0xf7 'ssa', # 0xf8 'ssag', # 0xf9 'ssagg', # 0xfa 'ssags', # 0xfb 'ssan', # 0xfc 'ssanj', # 0xfd 'ssanh', # 0xfe 'ssad', # 0xff )
gpl-3.0
sweimer/ICFID8
docroot/node_modules/node-gyp/gyp/pylib/gyp/generator/ninja.py
1284
100329
# Copyright (c) 2013 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import collections import copy import hashlib import json import multiprocessing import os.path import re import signal import subprocess import sys import gyp import gyp.common from gyp.common import OrderedSet import gyp.msvs_emulation import gyp.MSVSUtil as MSVSUtil import gyp.xcode_emulation from cStringIO import StringIO from gyp.common import GetEnvironFallback import gyp.ninja_syntax as ninja_syntax generator_default_variables = { 'EXECUTABLE_PREFIX': '', 'EXECUTABLE_SUFFIX': '', 'STATIC_LIB_PREFIX': 'lib', 'STATIC_LIB_SUFFIX': '.a', 'SHARED_LIB_PREFIX': 'lib', # Gyp expects the following variables to be expandable by the build # system to the appropriate locations. Ninja prefers paths to be # known at gyp time. To resolve this, introduce special # variables starting with $! and $| (which begin with a $ so gyp knows it # should be treated specially, but is otherwise an invalid # ninja/shell variable) that are passed to gyp here but expanded # before writing out into the target .ninja files; see # ExpandSpecial. # $! is used for variables that represent a path and that can only appear at # the start of a string, while $| is used for variables that can appear # anywhere in a string. 'INTERMEDIATE_DIR': '$!INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR': '$!PRODUCT_DIR/gen', 'PRODUCT_DIR': '$!PRODUCT_DIR', 'CONFIGURATION_NAME': '$|CONFIGURATION_NAME', # Special variables that may be used by gyp 'rule' targets. # We generate definitions for these variables on the fly when processing a # rule. 'RULE_INPUT_ROOT': '${root}', 'RULE_INPUT_DIRNAME': '${dirname}', 'RULE_INPUT_PATH': '${source}', 'RULE_INPUT_EXT': '${ext}', 'RULE_INPUT_NAME': '${name}', } # Placates pylint. generator_additional_non_configuration_keys = [] generator_additional_path_sections = [] generator_extra_sources_for_rules = [] generator_filelist_paths = None generator_supports_multiple_toolsets = gyp.common.CrossCompileRequested() def StripPrefix(arg, prefix): if arg.startswith(prefix): return arg[len(prefix):] return arg def QuoteShellArgument(arg, flavor): """Quote a string such that it will be interpreted as a single argument by the shell.""" # Rather than attempting to enumerate the bad shell characters, just # whitelist common OK ones and quote anything else. if re.match(r'^[a-zA-Z0-9_=.\\/-]+$', arg): return arg # No quoting necessary. if flavor == 'win': return gyp.msvs_emulation.QuoteForRspFile(arg) return "'" + arg.replace("'", "'" + '"\'"' + "'") + "'" def Define(d, flavor): """Takes a preprocessor define and returns a -D parameter that's ninja- and shell-escaped.""" if flavor == 'win': # cl.exe replaces literal # characters with = in preprocesor definitions for # some reason. Octal-encode to work around that. d = d.replace('#', '\\%03o' % ord('#')) return QuoteShellArgument(ninja_syntax.escape('-D' + d), flavor) def AddArch(output, arch): """Adds an arch string to an output path.""" output, extension = os.path.splitext(output) return '%s.%s%s' % (output, arch, extension) class Target(object): """Target represents the paths used within a single gyp target. Conceptually, building a single target A is a series of steps: 1) actions/rules/copies generates source/resources/etc. 2) compiles generates .o files 3) link generates a binary (library/executable) 4) bundle merges the above in a mac bundle (Any of these steps can be optional.) From a build ordering perspective, a dependent target B could just depend on the last output of this series of steps. But some dependent commands sometimes need to reach inside the box. For example, when linking B it needs to get the path to the static library generated by A. This object stores those paths. To keep things simple, member variables only store concrete paths to single files, while methods compute derived values like "the last output of the target". """ def __init__(self, type): # Gyp type ("static_library", etc.) of this target. self.type = type # File representing whether any input dependencies necessary for # dependent actions have completed. self.preaction_stamp = None # File representing whether any input dependencies necessary for # dependent compiles have completed. self.precompile_stamp = None # File representing the completion of actions/rules/copies, if any. self.actions_stamp = None # Path to the output of the link step, if any. self.binary = None # Path to the file representing the completion of building the bundle, # if any. self.bundle = None # On Windows, incremental linking requires linking against all the .objs # that compose a .lib (rather than the .lib itself). That list is stored # here. In this case, we also need to save the compile_deps for the target, # so that the the target that directly depends on the .objs can also depend # on those. self.component_objs = None self.compile_deps = None # Windows only. The import .lib is the output of a build step, but # because dependents only link against the lib (not both the lib and the # dll) we keep track of the import library here. self.import_lib = None def Linkable(self): """Return true if this is a target that can be linked against.""" return self.type in ('static_library', 'shared_library') def UsesToc(self, flavor): """Return true if the target should produce a restat rule based on a TOC file.""" # For bundles, the .TOC should be produced for the binary, not for # FinalOutput(). But the naive approach would put the TOC file into the # bundle, so don't do this for bundles for now. if flavor == 'win' or self.bundle: return False return self.type in ('shared_library', 'loadable_module') def PreActionInput(self, flavor): """Return the path, if any, that should be used as a dependency of any dependent action step.""" if self.UsesToc(flavor): return self.FinalOutput() + '.TOC' return self.FinalOutput() or self.preaction_stamp def PreCompileInput(self): """Return the path, if any, that should be used as a dependency of any dependent compile step.""" return self.actions_stamp or self.precompile_stamp def FinalOutput(self): """Return the last output of the target, which depends on all prior steps.""" return self.bundle or self.binary or self.actions_stamp # A small discourse on paths as used within the Ninja build: # All files we produce (both at gyp and at build time) appear in the # build directory (e.g. out/Debug). # # Paths within a given .gyp file are always relative to the directory # containing the .gyp file. Call these "gyp paths". This includes # sources as well as the starting directory a given gyp rule/action # expects to be run from. We call the path from the source root to # the gyp file the "base directory" within the per-.gyp-file # NinjaWriter code. # # All paths as written into the .ninja files are relative to the build # directory. Call these paths "ninja paths". # # We translate between these two notions of paths with two helper # functions: # # - GypPathToNinja translates a gyp path (i.e. relative to the .gyp file) # into the equivalent ninja path. # # - GypPathToUniqueOutput translates a gyp path into a ninja path to write # an output file; the result can be namespaced such that it is unique # to the input file name as well as the output target name. class NinjaWriter(object): def __init__(self, hash_for_rules, target_outputs, base_dir, build_dir, output_file, toplevel_build, output_file_name, flavor, toplevel_dir=None): """ base_dir: path from source root to directory containing this gyp file, by gyp semantics, all input paths are relative to this build_dir: path from source root to build output toplevel_dir: path to the toplevel directory """ self.hash_for_rules = hash_for_rules self.target_outputs = target_outputs self.base_dir = base_dir self.build_dir = build_dir self.ninja = ninja_syntax.Writer(output_file) self.toplevel_build = toplevel_build self.output_file_name = output_file_name self.flavor = flavor self.abs_build_dir = None if toplevel_dir is not None: self.abs_build_dir = os.path.abspath(os.path.join(toplevel_dir, build_dir)) self.obj_ext = '.obj' if flavor == 'win' else '.o' if flavor == 'win': # See docstring of msvs_emulation.GenerateEnvironmentFiles(). self.win_env = {} for arch in ('x86', 'x64'): self.win_env[arch] = 'environment.' + arch # Relative path from build output dir to base dir. build_to_top = gyp.common.InvertRelativePath(build_dir, toplevel_dir) self.build_to_base = os.path.join(build_to_top, base_dir) # Relative path from base dir to build dir. base_to_top = gyp.common.InvertRelativePath(base_dir, toplevel_dir) self.base_to_build = os.path.join(base_to_top, build_dir) def ExpandSpecial(self, path, product_dir=None): """Expand specials like $!PRODUCT_DIR in |path|. If |product_dir| is None, assumes the cwd is already the product dir. Otherwise, |product_dir| is the relative path to the product dir. """ PRODUCT_DIR = '$!PRODUCT_DIR' if PRODUCT_DIR in path: if product_dir: path = path.replace(PRODUCT_DIR, product_dir) else: path = path.replace(PRODUCT_DIR + '/', '') path = path.replace(PRODUCT_DIR + '\\', '') path = path.replace(PRODUCT_DIR, '.') INTERMEDIATE_DIR = '$!INTERMEDIATE_DIR' if INTERMEDIATE_DIR in path: int_dir = self.GypPathToUniqueOutput('gen') # GypPathToUniqueOutput generates a path relative to the product dir, # so insert product_dir in front if it is provided. path = path.replace(INTERMEDIATE_DIR, os.path.join(product_dir or '', int_dir)) CONFIGURATION_NAME = '$|CONFIGURATION_NAME' path = path.replace(CONFIGURATION_NAME, self.config_name) return path def ExpandRuleVariables(self, path, root, dirname, source, ext, name): if self.flavor == 'win': path = self.msvs_settings.ConvertVSMacros( path, config=self.config_name) path = path.replace(generator_default_variables['RULE_INPUT_ROOT'], root) path = path.replace(generator_default_variables['RULE_INPUT_DIRNAME'], dirname) path = path.replace(generator_default_variables['RULE_INPUT_PATH'], source) path = path.replace(generator_default_variables['RULE_INPUT_EXT'], ext) path = path.replace(generator_default_variables['RULE_INPUT_NAME'], name) return path def GypPathToNinja(self, path, env=None): """Translate a gyp path to a ninja path, optionally expanding environment variable references in |path| with |env|. See the above discourse on path conversions.""" if env: if self.flavor == 'mac': path = gyp.xcode_emulation.ExpandEnvVars(path, env) elif self.flavor == 'win': path = gyp.msvs_emulation.ExpandMacros(path, env) if path.startswith('$!'): expanded = self.ExpandSpecial(path) if self.flavor == 'win': expanded = os.path.normpath(expanded) return expanded if '$|' in path: path = self.ExpandSpecial(path) assert '$' not in path, path return os.path.normpath(os.path.join(self.build_to_base, path)) def GypPathToUniqueOutput(self, path, qualified=True): """Translate a gyp path to a ninja path for writing output. If qualified is True, qualify the resulting filename with the name of the target. This is necessary when e.g. compiling the same path twice for two separate output targets. See the above discourse on path conversions.""" path = self.ExpandSpecial(path) assert not path.startswith('$'), path # Translate the path following this scheme: # Input: foo/bar.gyp, target targ, references baz/out.o # Output: obj/foo/baz/targ.out.o (if qualified) # obj/foo/baz/out.o (otherwise) # (and obj.host instead of obj for cross-compiles) # # Why this scheme and not some other one? # 1) for a given input, you can compute all derived outputs by matching # its path, even if the input is brought via a gyp file with '..'. # 2) simple files like libraries and stamps have a simple filename. obj = 'obj' if self.toolset != 'target': obj += '.' + self.toolset path_dir, path_basename = os.path.split(path) assert not os.path.isabs(path_dir), ( "'%s' can not be absolute path (see crbug.com/462153)." % path_dir) if qualified: path_basename = self.name + '.' + path_basename return os.path.normpath(os.path.join(obj, self.base_dir, path_dir, path_basename)) def WriteCollapsedDependencies(self, name, targets, order_only=None): """Given a list of targets, return a path for a single file representing the result of building all the targets or None. Uses a stamp file if necessary.""" assert targets == filter(None, targets), targets if len(targets) == 0: assert not order_only return None if len(targets) > 1 or order_only: stamp = self.GypPathToUniqueOutput(name + '.stamp') targets = self.ninja.build(stamp, 'stamp', targets, order_only=order_only) self.ninja.newline() return targets[0] def _SubninjaNameForArch(self, arch): output_file_base = os.path.splitext(self.output_file_name)[0] return '%s.%s.ninja' % (output_file_base, arch) def WriteSpec(self, spec, config_name, generator_flags): """The main entry point for NinjaWriter: write the build rules for a spec. Returns a Target object, which represents the output paths for this spec. Returns None if there are no outputs (e.g. a settings-only 'none' type target).""" self.config_name = config_name self.name = spec['target_name'] self.toolset = spec['toolset'] config = spec['configurations'][config_name] self.target = Target(spec['type']) self.is_standalone_static_library = bool( spec.get('standalone_static_library', 0)) # Track if this target contains any C++ files, to decide if gcc or g++ # should be used for linking. self.uses_cpp = False self.is_mac_bundle = gyp.xcode_emulation.IsMacBundle(self.flavor, spec) self.xcode_settings = self.msvs_settings = None if self.flavor == 'mac': self.xcode_settings = gyp.xcode_emulation.XcodeSettings(spec) if self.flavor == 'win': self.msvs_settings = gyp.msvs_emulation.MsvsSettings(spec, generator_flags) arch = self.msvs_settings.GetArch(config_name) self.ninja.variable('arch', self.win_env[arch]) self.ninja.variable('cc', '$cl_' + arch) self.ninja.variable('cxx', '$cl_' + arch) self.ninja.variable('cc_host', '$cl_' + arch) self.ninja.variable('cxx_host', '$cl_' + arch) self.ninja.variable('asm', '$ml_' + arch) if self.flavor == 'mac': self.archs = self.xcode_settings.GetActiveArchs(config_name) if len(self.archs) > 1: self.arch_subninjas = dict( (arch, ninja_syntax.Writer( OpenOutput(os.path.join(self.toplevel_build, self._SubninjaNameForArch(arch)), 'w'))) for arch in self.archs) # Compute predepends for all rules. # actions_depends is the dependencies this target depends on before running # any of its action/rule/copy steps. # compile_depends is the dependencies this target depends on before running # any of its compile steps. actions_depends = [] compile_depends = [] # TODO(evan): it is rather confusing which things are lists and which # are strings. Fix these. if 'dependencies' in spec: for dep in spec['dependencies']: if dep in self.target_outputs: target = self.target_outputs[dep] actions_depends.append(target.PreActionInput(self.flavor)) compile_depends.append(target.PreCompileInput()) actions_depends = filter(None, actions_depends) compile_depends = filter(None, compile_depends) actions_depends = self.WriteCollapsedDependencies('actions_depends', actions_depends) compile_depends = self.WriteCollapsedDependencies('compile_depends', compile_depends) self.target.preaction_stamp = actions_depends self.target.precompile_stamp = compile_depends # Write out actions, rules, and copies. These must happen before we # compile any sources, so compute a list of predependencies for sources # while we do it. extra_sources = [] mac_bundle_depends = [] self.target.actions_stamp = self.WriteActionsRulesCopies( spec, extra_sources, actions_depends, mac_bundle_depends) # If we have actions/rules/copies, we depend directly on those, but # otherwise we depend on dependent target's actions/rules/copies etc. # We never need to explicitly depend on previous target's link steps, # because no compile ever depends on them. compile_depends_stamp = (self.target.actions_stamp or compile_depends) # Write out the compilation steps, if any. link_deps = [] sources = extra_sources + spec.get('sources', []) if sources: if self.flavor == 'mac' and len(self.archs) > 1: # Write subninja file containing compile and link commands scoped to # a single arch if a fat binary is being built. for arch in self.archs: self.ninja.subninja(self._SubninjaNameForArch(arch)) pch = None if self.flavor == 'win': gyp.msvs_emulation.VerifyMissingSources( sources, self.abs_build_dir, generator_flags, self.GypPathToNinja) pch = gyp.msvs_emulation.PrecompiledHeader( self.msvs_settings, config_name, self.GypPathToNinja, self.GypPathToUniqueOutput, self.obj_ext) else: pch = gyp.xcode_emulation.MacPrefixHeader( self.xcode_settings, self.GypPathToNinja, lambda path, lang: self.GypPathToUniqueOutput(path + '-' + lang)) link_deps = self.WriteSources( self.ninja, config_name, config, sources, compile_depends_stamp, pch, spec) # Some actions/rules output 'sources' that are already object files. obj_outputs = [f for f in sources if f.endswith(self.obj_ext)] if obj_outputs: if self.flavor != 'mac' or len(self.archs) == 1: link_deps += [self.GypPathToNinja(o) for o in obj_outputs] else: print "Warning: Actions/rules writing object files don't work with " \ "multiarch targets, dropping. (target %s)" % spec['target_name'] elif self.flavor == 'mac' and len(self.archs) > 1: link_deps = collections.defaultdict(list) compile_deps = self.target.actions_stamp or actions_depends if self.flavor == 'win' and self.target.type == 'static_library': self.target.component_objs = link_deps self.target.compile_deps = compile_deps # Write out a link step, if needed. output = None is_empty_bundle = not link_deps and not mac_bundle_depends if link_deps or self.target.actions_stamp or actions_depends: output = self.WriteTarget(spec, config_name, config, link_deps, compile_deps) if self.is_mac_bundle: mac_bundle_depends.append(output) # Bundle all of the above together, if needed. if self.is_mac_bundle: output = self.WriteMacBundle(spec, mac_bundle_depends, is_empty_bundle) if not output: return None assert self.target.FinalOutput(), output return self.target def _WinIdlRule(self, source, prebuild, outputs): """Handle the implicit VS .idl rule for one source file. Fills |outputs| with files that are generated.""" outdir, output, vars, flags = self.msvs_settings.GetIdlBuildData( source, self.config_name) outdir = self.GypPathToNinja(outdir) def fix_path(path, rel=None): path = os.path.join(outdir, path) dirname, basename = os.path.split(source) root, ext = os.path.splitext(basename) path = self.ExpandRuleVariables( path, root, dirname, source, ext, basename) if rel: path = os.path.relpath(path, rel) return path vars = [(name, fix_path(value, outdir)) for name, value in vars] output = [fix_path(p) for p in output] vars.append(('outdir', outdir)) vars.append(('idlflags', flags)) input = self.GypPathToNinja(source) self.ninja.build(output, 'idl', input, variables=vars, order_only=prebuild) outputs.extend(output) def WriteWinIdlFiles(self, spec, prebuild): """Writes rules to match MSVS's implicit idl handling.""" assert self.flavor == 'win' if self.msvs_settings.HasExplicitIdlRulesOrActions(spec): return [] outputs = [] for source in filter(lambda x: x.endswith('.idl'), spec['sources']): self._WinIdlRule(source, prebuild, outputs) return outputs def WriteActionsRulesCopies(self, spec, extra_sources, prebuild, mac_bundle_depends): """Write out the Actions, Rules, and Copies steps. Return a path representing the outputs of these steps.""" outputs = [] if self.is_mac_bundle: mac_bundle_resources = spec.get('mac_bundle_resources', [])[:] else: mac_bundle_resources = [] extra_mac_bundle_resources = [] if 'actions' in spec: outputs += self.WriteActions(spec['actions'], extra_sources, prebuild, extra_mac_bundle_resources) if 'rules' in spec: outputs += self.WriteRules(spec['rules'], extra_sources, prebuild, mac_bundle_resources, extra_mac_bundle_resources) if 'copies' in spec: outputs += self.WriteCopies(spec['copies'], prebuild, mac_bundle_depends) if 'sources' in spec and self.flavor == 'win': outputs += self.WriteWinIdlFiles(spec, prebuild) stamp = self.WriteCollapsedDependencies('actions_rules_copies', outputs) if self.is_mac_bundle: xcassets = self.WriteMacBundleResources( extra_mac_bundle_resources + mac_bundle_resources, mac_bundle_depends) partial_info_plist = self.WriteMacXCassets(xcassets, mac_bundle_depends) self.WriteMacInfoPlist(partial_info_plist, mac_bundle_depends) return stamp def GenerateDescription(self, verb, message, fallback): """Generate and return a description of a build step. |verb| is the short summary, e.g. ACTION or RULE. |message| is a hand-written description, or None if not available. |fallback| is the gyp-level name of the step, usable as a fallback. """ if self.toolset != 'target': verb += '(%s)' % self.toolset if message: return '%s %s' % (verb, self.ExpandSpecial(message)) else: return '%s %s: %s' % (verb, self.name, fallback) def WriteActions(self, actions, extra_sources, prebuild, extra_mac_bundle_resources): # Actions cd into the base directory. env = self.GetToolchainEnv() all_outputs = [] for action in actions: # First write out a rule for the action. name = '%s_%s' % (action['action_name'], self.hash_for_rules) description = self.GenerateDescription('ACTION', action.get('message', None), name) is_cygwin = (self.msvs_settings.IsRuleRunUnderCygwin(action) if self.flavor == 'win' else False) args = action['action'] depfile = action.get('depfile', None) if depfile: depfile = self.ExpandSpecial(depfile, self.base_to_build) pool = 'console' if int(action.get('ninja_use_console', 0)) else None rule_name, _ = self.WriteNewNinjaRule(name, args, description, is_cygwin, env, pool, depfile=depfile) inputs = [self.GypPathToNinja(i, env) for i in action['inputs']] if int(action.get('process_outputs_as_sources', False)): extra_sources += action['outputs'] if int(action.get('process_outputs_as_mac_bundle_resources', False)): extra_mac_bundle_resources += action['outputs'] outputs = [self.GypPathToNinja(o, env) for o in action['outputs']] # Then write out an edge using the rule. self.ninja.build(outputs, rule_name, inputs, order_only=prebuild) all_outputs += outputs self.ninja.newline() return all_outputs def WriteRules(self, rules, extra_sources, prebuild, mac_bundle_resources, extra_mac_bundle_resources): env = self.GetToolchainEnv() all_outputs = [] for rule in rules: # Skip a rule with no action and no inputs. if 'action' not in rule and not rule.get('rule_sources', []): continue # First write out a rule for the rule action. name = '%s_%s' % (rule['rule_name'], self.hash_for_rules) args = rule['action'] description = self.GenerateDescription( 'RULE', rule.get('message', None), ('%s ' + generator_default_variables['RULE_INPUT_PATH']) % name) is_cygwin = (self.msvs_settings.IsRuleRunUnderCygwin(rule) if self.flavor == 'win' else False) pool = 'console' if int(rule.get('ninja_use_console', 0)) else None rule_name, args = self.WriteNewNinjaRule( name, args, description, is_cygwin, env, pool) # TODO: if the command references the outputs directly, we should # simplify it to just use $out. # Rules can potentially make use of some special variables which # must vary per source file. # Compute the list of variables we'll need to provide. special_locals = ('source', 'root', 'dirname', 'ext', 'name') needed_variables = set(['source']) for argument in args: for var in special_locals: if '${%s}' % var in argument: needed_variables.add(var) def cygwin_munge(path): # pylint: disable=cell-var-from-loop if is_cygwin: return path.replace('\\', '/') return path inputs = [self.GypPathToNinja(i, env) for i in rule.get('inputs', [])] # If there are n source files matching the rule, and m additional rule # inputs, then adding 'inputs' to each build edge written below will # write m * n inputs. Collapsing reduces this to m + n. sources = rule.get('rule_sources', []) num_inputs = len(inputs) if prebuild: num_inputs += 1 if num_inputs > 2 and len(sources) > 2: inputs = [self.WriteCollapsedDependencies( rule['rule_name'], inputs, order_only=prebuild)] prebuild = [] # For each source file, write an edge that generates all the outputs. for source in sources: source = os.path.normpath(source) dirname, basename = os.path.split(source) root, ext = os.path.splitext(basename) # Gather the list of inputs and outputs, expanding $vars if possible. outputs = [self.ExpandRuleVariables(o, root, dirname, source, ext, basename) for o in rule['outputs']] if int(rule.get('process_outputs_as_sources', False)): extra_sources += outputs was_mac_bundle_resource = source in mac_bundle_resources if was_mac_bundle_resource or \ int(rule.get('process_outputs_as_mac_bundle_resources', False)): extra_mac_bundle_resources += outputs # Note: This is n_resources * n_outputs_in_rule. Put to-be-removed # items in a set and remove them all in a single pass if this becomes # a performance issue. if was_mac_bundle_resource: mac_bundle_resources.remove(source) extra_bindings = [] for var in needed_variables: if var == 'root': extra_bindings.append(('root', cygwin_munge(root))) elif var == 'dirname': # '$dirname' is a parameter to the rule action, which means # it shouldn't be converted to a Ninja path. But we don't # want $!PRODUCT_DIR in there either. dirname_expanded = self.ExpandSpecial(dirname, self.base_to_build) extra_bindings.append(('dirname', cygwin_munge(dirname_expanded))) elif var == 'source': # '$source' is a parameter to the rule action, which means # it shouldn't be converted to a Ninja path. But we don't # want $!PRODUCT_DIR in there either. source_expanded = self.ExpandSpecial(source, self.base_to_build) extra_bindings.append(('source', cygwin_munge(source_expanded))) elif var == 'ext': extra_bindings.append(('ext', ext)) elif var == 'name': extra_bindings.append(('name', cygwin_munge(basename))) else: assert var == None, repr(var) outputs = [self.GypPathToNinja(o, env) for o in outputs] if self.flavor == 'win': # WriteNewNinjaRule uses unique_name for creating an rsp file on win. extra_bindings.append(('unique_name', hashlib.md5(outputs[0]).hexdigest())) self.ninja.build(outputs, rule_name, self.GypPathToNinja(source), implicit=inputs, order_only=prebuild, variables=extra_bindings) all_outputs.extend(outputs) return all_outputs def WriteCopies(self, copies, prebuild, mac_bundle_depends): outputs = [] env = self.GetToolchainEnv() for copy in copies: for path in copy['files']: # Normalize the path so trailing slashes don't confuse us. path = os.path.normpath(path) basename = os.path.split(path)[1] src = self.GypPathToNinja(path, env) dst = self.GypPathToNinja(os.path.join(copy['destination'], basename), env) outputs += self.ninja.build(dst, 'copy', src, order_only=prebuild) if self.is_mac_bundle: # gyp has mac_bundle_resources to copy things into a bundle's # Resources folder, but there's no built-in way to copy files to other # places in the bundle. Hence, some targets use copies for this. Check # if this file is copied into the current bundle, and if so add it to # the bundle depends so that dependent targets get rebuilt if the copy # input changes. if dst.startswith(self.xcode_settings.GetBundleContentsFolderPath()): mac_bundle_depends.append(dst) return outputs def WriteMacBundleResources(self, resources, bundle_depends): """Writes ninja edges for 'mac_bundle_resources'.""" xcassets = [] for output, res in gyp.xcode_emulation.GetMacBundleResources( generator_default_variables['PRODUCT_DIR'], self.xcode_settings, map(self.GypPathToNinja, resources)): output = self.ExpandSpecial(output) if os.path.splitext(output)[-1] != '.xcassets': isBinary = self.xcode_settings.IsBinaryOutputFormat(self.config_name) self.ninja.build(output, 'mac_tool', res, variables=[('mactool_cmd', 'copy-bundle-resource'), \ ('binary', isBinary)]) bundle_depends.append(output) else: xcassets.append(res) return xcassets def WriteMacXCassets(self, xcassets, bundle_depends): """Writes ninja edges for 'mac_bundle_resources' .xcassets files. This add an invocation of 'actool' via the 'mac_tool.py' helper script. It assumes that the assets catalogs define at least one imageset and thus an Assets.car file will be generated in the application resources directory. If this is not the case, then the build will probably be done at each invocation of ninja.""" if not xcassets: return extra_arguments = {} settings_to_arg = { 'XCASSETS_APP_ICON': 'app-icon', 'XCASSETS_LAUNCH_IMAGE': 'launch-image', } settings = self.xcode_settings.xcode_settings[self.config_name] for settings_key, arg_name in settings_to_arg.iteritems(): value = settings.get(settings_key) if value: extra_arguments[arg_name] = value partial_info_plist = None if extra_arguments: partial_info_plist = self.GypPathToUniqueOutput( 'assetcatalog_generated_info.plist') extra_arguments['output-partial-info-plist'] = partial_info_plist outputs = [] outputs.append( os.path.join( self.xcode_settings.GetBundleResourceFolder(), 'Assets.car')) if partial_info_plist: outputs.append(partial_info_plist) keys = QuoteShellArgument(json.dumps(extra_arguments), self.flavor) extra_env = self.xcode_settings.GetPerTargetSettings() env = self.GetSortedXcodeEnv(additional_settings=extra_env) env = self.ComputeExportEnvString(env) bundle_depends.extend(self.ninja.build( outputs, 'compile_xcassets', xcassets, variables=[('env', env), ('keys', keys)])) return partial_info_plist def WriteMacInfoPlist(self, partial_info_plist, bundle_depends): """Write build rules for bundle Info.plist files.""" info_plist, out, defines, extra_env = gyp.xcode_emulation.GetMacInfoPlist( generator_default_variables['PRODUCT_DIR'], self.xcode_settings, self.GypPathToNinja) if not info_plist: return out = self.ExpandSpecial(out) if defines: # Create an intermediate file to store preprocessed results. intermediate_plist = self.GypPathToUniqueOutput( os.path.basename(info_plist)) defines = ' '.join([Define(d, self.flavor) for d in defines]) info_plist = self.ninja.build( intermediate_plist, 'preprocess_infoplist', info_plist, variables=[('defines',defines)]) env = self.GetSortedXcodeEnv(additional_settings=extra_env) env = self.ComputeExportEnvString(env) if partial_info_plist: intermediate_plist = self.GypPathToUniqueOutput('merged_info.plist') info_plist = self.ninja.build( intermediate_plist, 'merge_infoplist', [partial_info_plist, info_plist]) keys = self.xcode_settings.GetExtraPlistItems(self.config_name) keys = QuoteShellArgument(json.dumps(keys), self.flavor) isBinary = self.xcode_settings.IsBinaryOutputFormat(self.config_name) self.ninja.build(out, 'copy_infoplist', info_plist, variables=[('env', env), ('keys', keys), ('binary', isBinary)]) bundle_depends.append(out) def WriteSources(self, ninja_file, config_name, config, sources, predepends, precompiled_header, spec): """Write build rules to compile all of |sources|.""" if self.toolset == 'host': self.ninja.variable('ar', '$ar_host') self.ninja.variable('cc', '$cc_host') self.ninja.variable('cxx', '$cxx_host') self.ninja.variable('ld', '$ld_host') self.ninja.variable('ldxx', '$ldxx_host') self.ninja.variable('nm', '$nm_host') self.ninja.variable('readelf', '$readelf_host') if self.flavor != 'mac' or len(self.archs) == 1: return self.WriteSourcesForArch( self.ninja, config_name, config, sources, predepends, precompiled_header, spec) else: return dict((arch, self.WriteSourcesForArch( self.arch_subninjas[arch], config_name, config, sources, predepends, precompiled_header, spec, arch=arch)) for arch in self.archs) def WriteSourcesForArch(self, ninja_file, config_name, config, sources, predepends, precompiled_header, spec, arch=None): """Write build rules to compile all of |sources|.""" extra_defines = [] if self.flavor == 'mac': cflags = self.xcode_settings.GetCflags(config_name, arch=arch) cflags_c = self.xcode_settings.GetCflagsC(config_name) cflags_cc = self.xcode_settings.GetCflagsCC(config_name) cflags_objc = ['$cflags_c'] + \ self.xcode_settings.GetCflagsObjC(config_name) cflags_objcc = ['$cflags_cc'] + \ self.xcode_settings.GetCflagsObjCC(config_name) elif self.flavor == 'win': asmflags = self.msvs_settings.GetAsmflags(config_name) cflags = self.msvs_settings.GetCflags(config_name) cflags_c = self.msvs_settings.GetCflagsC(config_name) cflags_cc = self.msvs_settings.GetCflagsCC(config_name) extra_defines = self.msvs_settings.GetComputedDefines(config_name) # See comment at cc_command for why there's two .pdb files. pdbpath_c = pdbpath_cc = self.msvs_settings.GetCompilerPdbName( config_name, self.ExpandSpecial) if not pdbpath_c: obj = 'obj' if self.toolset != 'target': obj += '.' + self.toolset pdbpath = os.path.normpath(os.path.join(obj, self.base_dir, self.name)) pdbpath_c = pdbpath + '.c.pdb' pdbpath_cc = pdbpath + '.cc.pdb' self.WriteVariableList(ninja_file, 'pdbname_c', [pdbpath_c]) self.WriteVariableList(ninja_file, 'pdbname_cc', [pdbpath_cc]) self.WriteVariableList(ninja_file, 'pchprefix', [self.name]) else: cflags = config.get('cflags', []) cflags_c = config.get('cflags_c', []) cflags_cc = config.get('cflags_cc', []) # Respect environment variables related to build, but target-specific # flags can still override them. if self.toolset == 'target': cflags_c = (os.environ.get('CPPFLAGS', '').split() + os.environ.get('CFLAGS', '').split() + cflags_c) cflags_cc = (os.environ.get('CPPFLAGS', '').split() + os.environ.get('CXXFLAGS', '').split() + cflags_cc) elif self.toolset == 'host': cflags_c = (os.environ.get('CPPFLAGS_host', '').split() + os.environ.get('CFLAGS_host', '').split() + cflags_c) cflags_cc = (os.environ.get('CPPFLAGS_host', '').split() + os.environ.get('CXXFLAGS_host', '').split() + cflags_cc) defines = config.get('defines', []) + extra_defines self.WriteVariableList(ninja_file, 'defines', [Define(d, self.flavor) for d in defines]) if self.flavor == 'win': self.WriteVariableList(ninja_file, 'asmflags', map(self.ExpandSpecial, asmflags)) self.WriteVariableList(ninja_file, 'rcflags', [QuoteShellArgument(self.ExpandSpecial(f), self.flavor) for f in self.msvs_settings.GetRcflags(config_name, self.GypPathToNinja)]) include_dirs = config.get('include_dirs', []) env = self.GetToolchainEnv() if self.flavor == 'win': include_dirs = self.msvs_settings.AdjustIncludeDirs(include_dirs, config_name) self.WriteVariableList(ninja_file, 'includes', [QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor) for i in include_dirs]) if self.flavor == 'win': midl_include_dirs = config.get('midl_include_dirs', []) midl_include_dirs = self.msvs_settings.AdjustMidlIncludeDirs( midl_include_dirs, config_name) self.WriteVariableList(ninja_file, 'midl_includes', [QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor) for i in midl_include_dirs]) pch_commands = precompiled_header.GetPchBuildCommands(arch) if self.flavor == 'mac': # Most targets use no precompiled headers, so only write these if needed. for ext, var in [('c', 'cflags_pch_c'), ('cc', 'cflags_pch_cc'), ('m', 'cflags_pch_objc'), ('mm', 'cflags_pch_objcc')]: include = precompiled_header.GetInclude(ext, arch) if include: ninja_file.variable(var, include) arflags = config.get('arflags', []) self.WriteVariableList(ninja_file, 'cflags', map(self.ExpandSpecial, cflags)) self.WriteVariableList(ninja_file, 'cflags_c', map(self.ExpandSpecial, cflags_c)) self.WriteVariableList(ninja_file, 'cflags_cc', map(self.ExpandSpecial, cflags_cc)) if self.flavor == 'mac': self.WriteVariableList(ninja_file, 'cflags_objc', map(self.ExpandSpecial, cflags_objc)) self.WriteVariableList(ninja_file, 'cflags_objcc', map(self.ExpandSpecial, cflags_objcc)) self.WriteVariableList(ninja_file, 'arflags', map(self.ExpandSpecial, arflags)) ninja_file.newline() outputs = [] has_rc_source = False for source in sources: filename, ext = os.path.splitext(source) ext = ext[1:] obj_ext = self.obj_ext if ext in ('cc', 'cpp', 'cxx'): command = 'cxx' self.uses_cpp = True elif ext == 'c' or (ext == 'S' and self.flavor != 'win'): command = 'cc' elif ext == 's' and self.flavor != 'win': # Doesn't generate .o.d files. command = 'cc_s' elif (self.flavor == 'win' and ext == 'asm' and not self.msvs_settings.HasExplicitAsmRules(spec)): command = 'asm' # Add the _asm suffix as msvs is capable of handling .cc and # .asm files of the same name without collision. obj_ext = '_asm.obj' elif self.flavor == 'mac' and ext == 'm': command = 'objc' elif self.flavor == 'mac' and ext == 'mm': command = 'objcxx' self.uses_cpp = True elif self.flavor == 'win' and ext == 'rc': command = 'rc' obj_ext = '.res' has_rc_source = True else: # Ignore unhandled extensions. continue input = self.GypPathToNinja(source) output = self.GypPathToUniqueOutput(filename + obj_ext) if arch is not None: output = AddArch(output, arch) implicit = precompiled_header.GetObjDependencies([input], [output], arch) variables = [] if self.flavor == 'win': variables, output, implicit = precompiled_header.GetFlagsModifications( input, output, implicit, command, cflags_c, cflags_cc, self.ExpandSpecial) ninja_file.build(output, command, input, implicit=[gch for _, _, gch in implicit], order_only=predepends, variables=variables) outputs.append(output) if has_rc_source: resource_include_dirs = config.get('resource_include_dirs', include_dirs) self.WriteVariableList(ninja_file, 'resource_includes', [QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor) for i in resource_include_dirs]) self.WritePchTargets(ninja_file, pch_commands) ninja_file.newline() return outputs def WritePchTargets(self, ninja_file, pch_commands): """Writes ninja rules to compile prefix headers.""" if not pch_commands: return for gch, lang_flag, lang, input in pch_commands: var_name = { 'c': 'cflags_pch_c', 'cc': 'cflags_pch_cc', 'm': 'cflags_pch_objc', 'mm': 'cflags_pch_objcc', }[lang] map = { 'c': 'cc', 'cc': 'cxx', 'm': 'objc', 'mm': 'objcxx', } cmd = map.get(lang) ninja_file.build(gch, cmd, input, variables=[(var_name, lang_flag)]) def WriteLink(self, spec, config_name, config, link_deps): """Write out a link step. Fills out target.binary. """ if self.flavor != 'mac' or len(self.archs) == 1: return self.WriteLinkForArch( self.ninja, spec, config_name, config, link_deps) else: output = self.ComputeOutput(spec) inputs = [self.WriteLinkForArch(self.arch_subninjas[arch], spec, config_name, config, link_deps[arch], arch=arch) for arch in self.archs] extra_bindings = [] build_output = output if not self.is_mac_bundle: self.AppendPostbuildVariable(extra_bindings, spec, output, output) # TODO(yyanagisawa): more work needed to fix: # https://code.google.com/p/gyp/issues/detail?id=411 if (spec['type'] in ('shared_library', 'loadable_module') and not self.is_mac_bundle): extra_bindings.append(('lib', output)) self.ninja.build([output, output + '.TOC'], 'solipo', inputs, variables=extra_bindings) else: self.ninja.build(build_output, 'lipo', inputs, variables=extra_bindings) return output def WriteLinkForArch(self, ninja_file, spec, config_name, config, link_deps, arch=None): """Write out a link step. Fills out target.binary. """ command = { 'executable': 'link', 'loadable_module': 'solink_module', 'shared_library': 'solink', }[spec['type']] command_suffix = '' implicit_deps = set() solibs = set() order_deps = set() if 'dependencies' in spec: # Two kinds of dependencies: # - Linkable dependencies (like a .a or a .so): add them to the link line. # - Non-linkable dependencies (like a rule that generates a file # and writes a stamp file): add them to implicit_deps extra_link_deps = set() for dep in spec['dependencies']: target = self.target_outputs.get(dep) if not target: continue linkable = target.Linkable() if linkable: new_deps = [] if (self.flavor == 'win' and target.component_objs and self.msvs_settings.IsUseLibraryDependencyInputs(config_name)): new_deps = target.component_objs if target.compile_deps: order_deps.add(target.compile_deps) elif self.flavor == 'win' and target.import_lib: new_deps = [target.import_lib] elif target.UsesToc(self.flavor): solibs.add(target.binary) implicit_deps.add(target.binary + '.TOC') else: new_deps = [target.binary] for new_dep in new_deps: if new_dep not in extra_link_deps: extra_link_deps.add(new_dep) link_deps.append(new_dep) final_output = target.FinalOutput() if not linkable or final_output != target.binary: implicit_deps.add(final_output) extra_bindings = [] if self.uses_cpp and self.flavor != 'win': extra_bindings.append(('ld', '$ldxx')) output = self.ComputeOutput(spec, arch) if arch is None and not self.is_mac_bundle: self.AppendPostbuildVariable(extra_bindings, spec, output, output) is_executable = spec['type'] == 'executable' # The ldflags config key is not used on mac or win. On those platforms # linker flags are set via xcode_settings and msvs_settings, respectively. env_ldflags = os.environ.get('LDFLAGS', '').split() if self.flavor == 'mac': ldflags = self.xcode_settings.GetLdflags(config_name, self.ExpandSpecial(generator_default_variables['PRODUCT_DIR']), self.GypPathToNinja, arch) ldflags = env_ldflags + ldflags elif self.flavor == 'win': manifest_base_name = self.GypPathToUniqueOutput( self.ComputeOutputFileName(spec)) ldflags, intermediate_manifest, manifest_files = \ self.msvs_settings.GetLdflags(config_name, self.GypPathToNinja, self.ExpandSpecial, manifest_base_name, output, is_executable, self.toplevel_build) ldflags = env_ldflags + ldflags self.WriteVariableList(ninja_file, 'manifests', manifest_files) implicit_deps = implicit_deps.union(manifest_files) if intermediate_manifest: self.WriteVariableList( ninja_file, 'intermediatemanifest', [intermediate_manifest]) command_suffix = _GetWinLinkRuleNameSuffix( self.msvs_settings.IsEmbedManifest(config_name)) def_file = self.msvs_settings.GetDefFile(self.GypPathToNinja) if def_file: implicit_deps.add(def_file) else: # Respect environment variables related to build, but target-specific # flags can still override them. ldflags = env_ldflags + config.get('ldflags', []) if is_executable and len(solibs): rpath = 'lib/' if self.toolset != 'target': rpath += self.toolset ldflags.append(r'-Wl,-rpath=\$$ORIGIN/%s' % rpath) ldflags.append('-Wl,-rpath-link=%s' % rpath) self.WriteVariableList(ninja_file, 'ldflags', map(self.ExpandSpecial, ldflags)) library_dirs = config.get('library_dirs', []) if self.flavor == 'win': library_dirs = [self.msvs_settings.ConvertVSMacros(l, config_name) for l in library_dirs] library_dirs = ['/LIBPATH:' + QuoteShellArgument(self.GypPathToNinja(l), self.flavor) for l in library_dirs] else: library_dirs = [QuoteShellArgument('-L' + self.GypPathToNinja(l), self.flavor) for l in library_dirs] libraries = gyp.common.uniquer(map(self.ExpandSpecial, spec.get('libraries', []))) if self.flavor == 'mac': libraries = self.xcode_settings.AdjustLibraries(libraries, config_name) elif self.flavor == 'win': libraries = self.msvs_settings.AdjustLibraries(libraries) self.WriteVariableList(ninja_file, 'libs', library_dirs + libraries) linked_binary = output if command in ('solink', 'solink_module'): extra_bindings.append(('soname', os.path.split(output)[1])) extra_bindings.append(('lib', gyp.common.EncodePOSIXShellArgument(output))) if self.flavor != 'win': link_file_list = output if self.is_mac_bundle: # 'Dependency Framework.framework/Versions/A/Dependency Framework' -> # 'Dependency Framework.framework.rsp' link_file_list = self.xcode_settings.GetWrapperName() if arch: link_file_list += '.' + arch link_file_list += '.rsp' # If an rspfile contains spaces, ninja surrounds the filename with # quotes around it and then passes it to open(), creating a file with # quotes in its name (and when looking for the rsp file, the name # makes it through bash which strips the quotes) :-/ link_file_list = link_file_list.replace(' ', '_') extra_bindings.append( ('link_file_list', gyp.common.EncodePOSIXShellArgument(link_file_list))) if self.flavor == 'win': extra_bindings.append(('binary', output)) if ('/NOENTRY' not in ldflags and not self.msvs_settings.GetNoImportLibrary(config_name)): self.target.import_lib = output + '.lib' extra_bindings.append(('implibflag', '/IMPLIB:%s' % self.target.import_lib)) pdbname = self.msvs_settings.GetPDBName( config_name, self.ExpandSpecial, output + '.pdb') output = [output, self.target.import_lib] if pdbname: output.append(pdbname) elif not self.is_mac_bundle: output = [output, output + '.TOC'] else: command = command + '_notoc' elif self.flavor == 'win': extra_bindings.append(('binary', output)) pdbname = self.msvs_settings.GetPDBName( config_name, self.ExpandSpecial, output + '.pdb') if pdbname: output = [output, pdbname] if len(solibs): extra_bindings.append(('solibs', gyp.common.EncodePOSIXShellList(solibs))) ninja_file.build(output, command + command_suffix, link_deps, implicit=list(implicit_deps), order_only=list(order_deps), variables=extra_bindings) return linked_binary def WriteTarget(self, spec, config_name, config, link_deps, compile_deps): extra_link_deps = any(self.target_outputs.get(dep).Linkable() for dep in spec.get('dependencies', []) if dep in self.target_outputs) if spec['type'] == 'none' or (not link_deps and not extra_link_deps): # TODO(evan): don't call this function for 'none' target types, as # it doesn't do anything, and we fake out a 'binary' with a stamp file. self.target.binary = compile_deps self.target.type = 'none' elif spec['type'] == 'static_library': self.target.binary = self.ComputeOutput(spec) if (self.flavor not in ('mac', 'openbsd', 'netbsd', 'win') and not self.is_standalone_static_library): self.ninja.build(self.target.binary, 'alink_thin', link_deps, order_only=compile_deps) else: variables = [] if self.xcode_settings: libtool_flags = self.xcode_settings.GetLibtoolflags(config_name) if libtool_flags: variables.append(('libtool_flags', libtool_flags)) if self.msvs_settings: libflags = self.msvs_settings.GetLibFlags(config_name, self.GypPathToNinja) variables.append(('libflags', libflags)) if self.flavor != 'mac' or len(self.archs) == 1: self.AppendPostbuildVariable(variables, spec, self.target.binary, self.target.binary) self.ninja.build(self.target.binary, 'alink', link_deps, order_only=compile_deps, variables=variables) else: inputs = [] for arch in self.archs: output = self.ComputeOutput(spec, arch) self.arch_subninjas[arch].build(output, 'alink', link_deps[arch], order_only=compile_deps, variables=variables) inputs.append(output) # TODO: It's not clear if libtool_flags should be passed to the alink # call that combines single-arch .a files into a fat .a file. self.AppendPostbuildVariable(variables, spec, self.target.binary, self.target.binary) self.ninja.build(self.target.binary, 'alink', inputs, # FIXME: test proving order_only=compile_deps isn't # needed. variables=variables) else: self.target.binary = self.WriteLink(spec, config_name, config, link_deps) return self.target.binary def WriteMacBundle(self, spec, mac_bundle_depends, is_empty): assert self.is_mac_bundle package_framework = spec['type'] in ('shared_library', 'loadable_module') output = self.ComputeMacBundleOutput() if is_empty: output += '.stamp' variables = [] self.AppendPostbuildVariable(variables, spec, output, self.target.binary, is_command_start=not package_framework) if package_framework and not is_empty: variables.append(('version', self.xcode_settings.GetFrameworkVersion())) self.ninja.build(output, 'package_framework', mac_bundle_depends, variables=variables) else: self.ninja.build(output, 'stamp', mac_bundle_depends, variables=variables) self.target.bundle = output return output def GetToolchainEnv(self, additional_settings=None): """Returns the variables toolchain would set for build steps.""" env = self.GetSortedXcodeEnv(additional_settings=additional_settings) if self.flavor == 'win': env = self.GetMsvsToolchainEnv( additional_settings=additional_settings) return env def GetMsvsToolchainEnv(self, additional_settings=None): """Returns the variables Visual Studio would set for build steps.""" return self.msvs_settings.GetVSMacroEnv('$!PRODUCT_DIR', config=self.config_name) def GetSortedXcodeEnv(self, additional_settings=None): """Returns the variables Xcode would set for build steps.""" assert self.abs_build_dir abs_build_dir = self.abs_build_dir return gyp.xcode_emulation.GetSortedXcodeEnv( self.xcode_settings, abs_build_dir, os.path.join(abs_build_dir, self.build_to_base), self.config_name, additional_settings) def GetSortedXcodePostbuildEnv(self): """Returns the variables Xcode would set for postbuild steps.""" postbuild_settings = {} # CHROMIUM_STRIP_SAVE_FILE is a chromium-specific hack. # TODO(thakis): It would be nice to have some general mechanism instead. strip_save_file = self.xcode_settings.GetPerTargetSetting( 'CHROMIUM_STRIP_SAVE_FILE') if strip_save_file: postbuild_settings['CHROMIUM_STRIP_SAVE_FILE'] = strip_save_file return self.GetSortedXcodeEnv(additional_settings=postbuild_settings) def AppendPostbuildVariable(self, variables, spec, output, binary, is_command_start=False): """Adds a 'postbuild' variable if there is a postbuild for |output|.""" postbuild = self.GetPostbuildCommand(spec, output, binary, is_command_start) if postbuild: variables.append(('postbuilds', postbuild)) def GetPostbuildCommand(self, spec, output, output_binary, is_command_start): """Returns a shell command that runs all the postbuilds, and removes |output| if any of them fails. If |is_command_start| is False, then the returned string will start with ' && '.""" if not self.xcode_settings or spec['type'] == 'none' or not output: return '' output = QuoteShellArgument(output, self.flavor) postbuilds = gyp.xcode_emulation.GetSpecPostbuildCommands(spec, quiet=True) if output_binary is not None: postbuilds = self.xcode_settings.AddImplicitPostbuilds( self.config_name, os.path.normpath(os.path.join(self.base_to_build, output)), QuoteShellArgument( os.path.normpath(os.path.join(self.base_to_build, output_binary)), self.flavor), postbuilds, quiet=True) if not postbuilds: return '' # Postbuilds expect to be run in the gyp file's directory, so insert an # implicit postbuild to cd to there. postbuilds.insert(0, gyp.common.EncodePOSIXShellList( ['cd', self.build_to_base])) env = self.ComputeExportEnvString(self.GetSortedXcodePostbuildEnv()) # G will be non-null if any postbuild fails. Run all postbuilds in a # subshell. commands = env + ' (' + \ ' && '.join([ninja_syntax.escape(command) for command in postbuilds]) command_string = (commands + '); G=$$?; ' # Remove the final output if any postbuild failed. '((exit $$G) || rm -rf %s) ' % output + '&& exit $$G)') if is_command_start: return '(' + command_string + ' && ' else: return '$ && (' + command_string def ComputeExportEnvString(self, env): """Given an environment, returns a string looking like 'export FOO=foo; export BAR="${FOO} bar;' that exports |env| to the shell.""" export_str = [] for k, v in env: export_str.append('export %s=%s;' % (k, ninja_syntax.escape(gyp.common.EncodePOSIXShellArgument(v)))) return ' '.join(export_str) def ComputeMacBundleOutput(self): """Return the 'output' (full output path) to a bundle output directory.""" assert self.is_mac_bundle path = generator_default_variables['PRODUCT_DIR'] return self.ExpandSpecial( os.path.join(path, self.xcode_settings.GetWrapperName())) def ComputeOutputFileName(self, spec, type=None): """Compute the filename of the final output for the current target.""" if not type: type = spec['type'] default_variables = copy.copy(generator_default_variables) CalculateVariables(default_variables, {'flavor': self.flavor}) # Compute filename prefix: the product prefix, or a default for # the product type. DEFAULT_PREFIX = { 'loadable_module': default_variables['SHARED_LIB_PREFIX'], 'shared_library': default_variables['SHARED_LIB_PREFIX'], 'static_library': default_variables['STATIC_LIB_PREFIX'], 'executable': default_variables['EXECUTABLE_PREFIX'], } prefix = spec.get('product_prefix', DEFAULT_PREFIX.get(type, '')) # Compute filename extension: the product extension, or a default # for the product type. DEFAULT_EXTENSION = { 'loadable_module': default_variables['SHARED_LIB_SUFFIX'], 'shared_library': default_variables['SHARED_LIB_SUFFIX'], 'static_library': default_variables['STATIC_LIB_SUFFIX'], 'executable': default_variables['EXECUTABLE_SUFFIX'], } extension = spec.get('product_extension') if extension: extension = '.' + extension else: extension = DEFAULT_EXTENSION.get(type, '') if 'product_name' in spec: # If we were given an explicit name, use that. target = spec['product_name'] else: # Otherwise, derive a name from the target name. target = spec['target_name'] if prefix == 'lib': # Snip out an extra 'lib' from libs if appropriate. target = StripPrefix(target, 'lib') if type in ('static_library', 'loadable_module', 'shared_library', 'executable'): return '%s%s%s' % (prefix, target, extension) elif type == 'none': return '%s.stamp' % target else: raise Exception('Unhandled output type %s' % type) def ComputeOutput(self, spec, arch=None): """Compute the path for the final output of the spec.""" type = spec['type'] if self.flavor == 'win': override = self.msvs_settings.GetOutputName(self.config_name, self.ExpandSpecial) if override: return override if arch is None and self.flavor == 'mac' and type in ( 'static_library', 'executable', 'shared_library', 'loadable_module'): filename = self.xcode_settings.GetExecutablePath() else: filename = self.ComputeOutputFileName(spec, type) if arch is None and 'product_dir' in spec: path = os.path.join(spec['product_dir'], filename) return self.ExpandSpecial(path) # Some products go into the output root, libraries go into shared library # dir, and everything else goes into the normal place. type_in_output_root = ['executable', 'loadable_module'] if self.flavor == 'mac' and self.toolset == 'target': type_in_output_root += ['shared_library', 'static_library'] elif self.flavor == 'win' and self.toolset == 'target': type_in_output_root += ['shared_library'] if arch is not None: # Make sure partial executables don't end up in a bundle or the regular # output directory. archdir = 'arch' if self.toolset != 'target': archdir = os.path.join('arch', '%s' % self.toolset) return os.path.join(archdir, AddArch(filename, arch)) elif type in type_in_output_root or self.is_standalone_static_library: return filename elif type == 'shared_library': libdir = 'lib' if self.toolset != 'target': libdir = os.path.join('lib', '%s' % self.toolset) return os.path.join(libdir, filename) else: return self.GypPathToUniqueOutput(filename, qualified=False) def WriteVariableList(self, ninja_file, var, values): assert not isinstance(values, str) if values is None: values = [] ninja_file.variable(var, ' '.join(values)) def WriteNewNinjaRule(self, name, args, description, is_cygwin, env, pool, depfile=None): """Write out a new ninja "rule" statement for a given command. Returns the name of the new rule, and a copy of |args| with variables expanded.""" if self.flavor == 'win': args = [self.msvs_settings.ConvertVSMacros( arg, self.base_to_build, config=self.config_name) for arg in args] description = self.msvs_settings.ConvertVSMacros( description, config=self.config_name) elif self.flavor == 'mac': # |env| is an empty list on non-mac. args = [gyp.xcode_emulation.ExpandEnvVars(arg, env) for arg in args] description = gyp.xcode_emulation.ExpandEnvVars(description, env) # TODO: we shouldn't need to qualify names; we do it because # currently the ninja rule namespace is global, but it really # should be scoped to the subninja. rule_name = self.name if self.toolset == 'target': rule_name += '.' + self.toolset rule_name += '.' + name rule_name = re.sub('[^a-zA-Z0-9_]', '_', rule_name) # Remove variable references, but not if they refer to the magic rule # variables. This is not quite right, as it also protects these for # actions, not just for rules where they are valid. Good enough. protect = [ '${root}', '${dirname}', '${source}', '${ext}', '${name}' ] protect = '(?!' + '|'.join(map(re.escape, protect)) + ')' description = re.sub(protect + r'\$', '_', description) # gyp dictates that commands are run from the base directory. # cd into the directory before running, and adjust paths in # the arguments to point to the proper locations. rspfile = None rspfile_content = None args = [self.ExpandSpecial(arg, self.base_to_build) for arg in args] if self.flavor == 'win': rspfile = rule_name + '.$unique_name.rsp' # The cygwin case handles this inside the bash sub-shell. run_in = '' if is_cygwin else ' ' + self.build_to_base if is_cygwin: rspfile_content = self.msvs_settings.BuildCygwinBashCommandLine( args, self.build_to_base) else: rspfile_content = gyp.msvs_emulation.EncodeRspFileList(args) command = ('%s gyp-win-tool action-wrapper $arch ' % sys.executable + rspfile + run_in) else: env = self.ComputeExportEnvString(env) command = gyp.common.EncodePOSIXShellList(args) command = 'cd %s; ' % self.build_to_base + env + command # GYP rules/actions express being no-ops by not touching their outputs. # Avoid executing downstream dependencies in this case by specifying # restat=1 to ninja. self.ninja.rule(rule_name, command, description, depfile=depfile, restat=True, pool=pool, rspfile=rspfile, rspfile_content=rspfile_content) self.ninja.newline() return rule_name, args def CalculateVariables(default_variables, params): """Calculate additional variables for use in the build (called by gyp).""" global generator_additional_non_configuration_keys global generator_additional_path_sections flavor = gyp.common.GetFlavor(params) if flavor == 'mac': default_variables.setdefault('OS', 'mac') default_variables.setdefault('SHARED_LIB_SUFFIX', '.dylib') default_variables.setdefault('SHARED_LIB_DIR', generator_default_variables['PRODUCT_DIR']) default_variables.setdefault('LIB_DIR', generator_default_variables['PRODUCT_DIR']) # Copy additional generator configuration data from Xcode, which is shared # by the Mac Ninja generator. import gyp.generator.xcode as xcode_generator generator_additional_non_configuration_keys = getattr(xcode_generator, 'generator_additional_non_configuration_keys', []) generator_additional_path_sections = getattr(xcode_generator, 'generator_additional_path_sections', []) global generator_extra_sources_for_rules generator_extra_sources_for_rules = getattr(xcode_generator, 'generator_extra_sources_for_rules', []) elif flavor == 'win': exts = gyp.MSVSUtil.TARGET_TYPE_EXT default_variables.setdefault('OS', 'win') default_variables['EXECUTABLE_SUFFIX'] = '.' + exts['executable'] default_variables['STATIC_LIB_PREFIX'] = '' default_variables['STATIC_LIB_SUFFIX'] = '.' + exts['static_library'] default_variables['SHARED_LIB_PREFIX'] = '' default_variables['SHARED_LIB_SUFFIX'] = '.' + exts['shared_library'] # Copy additional generator configuration data from VS, which is shared # by the Windows Ninja generator. import gyp.generator.msvs as msvs_generator generator_additional_non_configuration_keys = getattr(msvs_generator, 'generator_additional_non_configuration_keys', []) generator_additional_path_sections = getattr(msvs_generator, 'generator_additional_path_sections', []) gyp.msvs_emulation.CalculateCommonVariables(default_variables, params) else: operating_system = flavor if flavor == 'android': operating_system = 'linux' # Keep this legacy behavior for now. default_variables.setdefault('OS', operating_system) default_variables.setdefault('SHARED_LIB_SUFFIX', '.so') default_variables.setdefault('SHARED_LIB_DIR', os.path.join('$!PRODUCT_DIR', 'lib')) default_variables.setdefault('LIB_DIR', os.path.join('$!PRODUCT_DIR', 'obj')) def ComputeOutputDir(params): """Returns the path from the toplevel_dir to the build output directory.""" # generator_dir: relative path from pwd to where make puts build files. # Makes migrating from make to ninja easier, ninja doesn't put anything here. generator_dir = os.path.relpath(params['options'].generator_output or '.') # output_dir: relative path from generator_dir to the build directory. output_dir = params.get('generator_flags', {}).get('output_dir', 'out') # Relative path from source root to our output files. e.g. "out" return os.path.normpath(os.path.join(generator_dir, output_dir)) def CalculateGeneratorInputInfo(params): """Called by __init__ to initialize generator values based on params.""" # E.g. "out/gypfiles" toplevel = params['options'].toplevel_dir qualified_out_dir = os.path.normpath(os.path.join( toplevel, ComputeOutputDir(params), 'gypfiles')) global generator_filelist_paths generator_filelist_paths = { 'toplevel': toplevel, 'qualified_out_dir': qualified_out_dir, } def OpenOutput(path, mode='w'): """Open |path| for writing, creating directories if necessary.""" gyp.common.EnsureDirExists(path) return open(path, mode) def CommandWithWrapper(cmd, wrappers, prog): wrapper = wrappers.get(cmd, '') if wrapper: return wrapper + ' ' + prog return prog def GetDefaultConcurrentLinks(): """Returns a best-guess for a number of concurrent links.""" pool_size = int(os.environ.get('GYP_LINK_CONCURRENCY', 0)) if pool_size: return pool_size if sys.platform in ('win32', 'cygwin'): import ctypes class MEMORYSTATUSEX(ctypes.Structure): _fields_ = [ ("dwLength", ctypes.c_ulong), ("dwMemoryLoad", ctypes.c_ulong), ("ullTotalPhys", ctypes.c_ulonglong), ("ullAvailPhys", ctypes.c_ulonglong), ("ullTotalPageFile", ctypes.c_ulonglong), ("ullAvailPageFile", ctypes.c_ulonglong), ("ullTotalVirtual", ctypes.c_ulonglong), ("ullAvailVirtual", ctypes.c_ulonglong), ("sullAvailExtendedVirtual", ctypes.c_ulonglong), ] stat = MEMORYSTATUSEX() stat.dwLength = ctypes.sizeof(stat) ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(stat)) # VS 2015 uses 20% more working set than VS 2013 and can consume all RAM # on a 64 GB machine. mem_limit = max(1, stat.ullTotalPhys / (5 * (2 ** 30))) # total / 5GB hard_cap = max(1, int(os.environ.get('GYP_LINK_CONCURRENCY_MAX', 2**32))) return min(mem_limit, hard_cap) elif sys.platform.startswith('linux'): if os.path.exists("/proc/meminfo"): with open("/proc/meminfo") as meminfo: memtotal_re = re.compile(r'^MemTotal:\s*(\d*)\s*kB') for line in meminfo: match = memtotal_re.match(line) if not match: continue # Allow 8Gb per link on Linux because Gold is quite memory hungry return max(1, int(match.group(1)) / (8 * (2 ** 20))) return 1 elif sys.platform == 'darwin': try: avail_bytes = int(subprocess.check_output(['sysctl', '-n', 'hw.memsize'])) # A static library debug build of Chromium's unit_tests takes ~2.7GB, so # 4GB per ld process allows for some more bloat. return max(1, avail_bytes / (4 * (2 ** 30))) # total / 4GB except: return 1 else: # TODO(scottmg): Implement this for other platforms. return 1 def _GetWinLinkRuleNameSuffix(embed_manifest): """Returns the suffix used to select an appropriate linking rule depending on whether the manifest embedding is enabled.""" return '_embed' if embed_manifest else '' def _AddWinLinkRules(master_ninja, embed_manifest): """Adds link rules for Windows platform to |master_ninja|.""" def FullLinkCommand(ldcmd, out, binary_type): resource_name = { 'exe': '1', 'dll': '2', }[binary_type] return '%(python)s gyp-win-tool link-with-manifests $arch %(embed)s ' \ '%(out)s "%(ldcmd)s" %(resname)s $mt $rc "$intermediatemanifest" ' \ '$manifests' % { 'python': sys.executable, 'out': out, 'ldcmd': ldcmd, 'resname': resource_name, 'embed': embed_manifest } rule_name_suffix = _GetWinLinkRuleNameSuffix(embed_manifest) use_separate_mspdbsrv = ( int(os.environ.get('GYP_USE_SEPARATE_MSPDBSRV', '0')) != 0) dlldesc = 'LINK%s(DLL) $binary' % rule_name_suffix.upper() dllcmd = ('%s gyp-win-tool link-wrapper $arch %s ' '$ld /nologo $implibflag /DLL /OUT:$binary ' '@$binary.rsp' % (sys.executable, use_separate_mspdbsrv)) dllcmd = FullLinkCommand(dllcmd, '$binary', 'dll') master_ninja.rule('solink' + rule_name_suffix, description=dlldesc, command=dllcmd, rspfile='$binary.rsp', rspfile_content='$libs $in_newline $ldflags', restat=True, pool='link_pool') master_ninja.rule('solink_module' + rule_name_suffix, description=dlldesc, command=dllcmd, rspfile='$binary.rsp', rspfile_content='$libs $in_newline $ldflags', restat=True, pool='link_pool') # Note that ldflags goes at the end so that it has the option of # overriding default settings earlier in the command line. exe_cmd = ('%s gyp-win-tool link-wrapper $arch %s ' '$ld /nologo /OUT:$binary @$binary.rsp' % (sys.executable, use_separate_mspdbsrv)) exe_cmd = FullLinkCommand(exe_cmd, '$binary', 'exe') master_ninja.rule('link' + rule_name_suffix, description='LINK%s $binary' % rule_name_suffix.upper(), command=exe_cmd, rspfile='$binary.rsp', rspfile_content='$in_newline $libs $ldflags', pool='link_pool') def GenerateOutputForConfig(target_list, target_dicts, data, params, config_name): options = params['options'] flavor = gyp.common.GetFlavor(params) generator_flags = params.get('generator_flags', {}) # build_dir: relative path from source root to our output files. # e.g. "out/Debug" build_dir = os.path.normpath( os.path.join(ComputeOutputDir(params), config_name)) toplevel_build = os.path.join(options.toplevel_dir, build_dir) master_ninja_file = OpenOutput(os.path.join(toplevel_build, 'build.ninja')) master_ninja = ninja_syntax.Writer(master_ninja_file, width=120) # Put build-time support tools in out/{config_name}. gyp.common.CopyTool(flavor, toplevel_build) # Grab make settings for CC/CXX. # The rules are # - The priority from low to high is gcc/g++, the 'make_global_settings' in # gyp, the environment variable. # - If there is no 'make_global_settings' for CC.host/CXX.host or # 'CC_host'/'CXX_host' enviroment variable, cc_host/cxx_host should be set # to cc/cxx. if flavor == 'win': ar = 'lib.exe' # cc and cxx must be set to the correct architecture by overriding with one # of cl_x86 or cl_x64 below. cc = 'UNSET' cxx = 'UNSET' ld = 'link.exe' ld_host = '$ld' else: ar = 'ar' cc = 'cc' cxx = 'c++' ld = '$cc' ldxx = '$cxx' ld_host = '$cc_host' ldxx_host = '$cxx_host' ar_host = 'ar' cc_host = None cxx_host = None cc_host_global_setting = None cxx_host_global_setting = None clang_cl = None nm = 'nm' nm_host = 'nm' readelf = 'readelf' readelf_host = 'readelf' build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0]) make_global_settings = data[build_file].get('make_global_settings', []) build_to_root = gyp.common.InvertRelativePath(build_dir, options.toplevel_dir) wrappers = {} for key, value in make_global_settings: if key == 'AR': ar = os.path.join(build_to_root, value) if key == 'AR.host': ar_host = os.path.join(build_to_root, value) if key == 'CC': cc = os.path.join(build_to_root, value) if cc.endswith('clang-cl'): clang_cl = cc if key == 'CXX': cxx = os.path.join(build_to_root, value) if key == 'CC.host': cc_host = os.path.join(build_to_root, value) cc_host_global_setting = value if key == 'CXX.host': cxx_host = os.path.join(build_to_root, value) cxx_host_global_setting = value if key == 'LD': ld = os.path.join(build_to_root, value) if key == 'LD.host': ld_host = os.path.join(build_to_root, value) if key == 'NM': nm = os.path.join(build_to_root, value) if key == 'NM.host': nm_host = os.path.join(build_to_root, value) if key == 'READELF': readelf = os.path.join(build_to_root, value) if key == 'READELF.host': readelf_host = os.path.join(build_to_root, value) if key.endswith('_wrapper'): wrappers[key[:-len('_wrapper')]] = os.path.join(build_to_root, value) # Support wrappers from environment variables too. for key, value in os.environ.iteritems(): if key.lower().endswith('_wrapper'): key_prefix = key[:-len('_wrapper')] key_prefix = re.sub(r'\.HOST$', '.host', key_prefix) wrappers[key_prefix] = os.path.join(build_to_root, value) if flavor == 'win': configs = [target_dicts[qualified_target]['configurations'][config_name] for qualified_target in target_list] shared_system_includes = None if not generator_flags.get('ninja_use_custom_environment_files', 0): shared_system_includes = \ gyp.msvs_emulation.ExtractSharedMSVSSystemIncludes( configs, generator_flags) cl_paths = gyp.msvs_emulation.GenerateEnvironmentFiles( toplevel_build, generator_flags, shared_system_includes, OpenOutput) for arch, path in cl_paths.iteritems(): if clang_cl: # If we have selected clang-cl, use that instead. path = clang_cl command = CommandWithWrapper('CC', wrappers, QuoteShellArgument(path, 'win')) if clang_cl: # Use clang-cl to cross-compile for x86 or x86_64. command += (' -m32' if arch == 'x86' else ' -m64') master_ninja.variable('cl_' + arch, command) cc = GetEnvironFallback(['CC_target', 'CC'], cc) master_ninja.variable('cc', CommandWithWrapper('CC', wrappers, cc)) cxx = GetEnvironFallback(['CXX_target', 'CXX'], cxx) master_ninja.variable('cxx', CommandWithWrapper('CXX', wrappers, cxx)) if flavor == 'win': master_ninja.variable('ld', ld) master_ninja.variable('idl', 'midl.exe') master_ninja.variable('ar', ar) master_ninja.variable('rc', 'rc.exe') master_ninja.variable('ml_x86', 'ml.exe') master_ninja.variable('ml_x64', 'ml64.exe') master_ninja.variable('mt', 'mt.exe') else: master_ninja.variable('ld', CommandWithWrapper('LINK', wrappers, ld)) master_ninja.variable('ldxx', CommandWithWrapper('LINK', wrappers, ldxx)) master_ninja.variable('ar', GetEnvironFallback(['AR_target', 'AR'], ar)) if flavor != 'mac': # Mac does not use readelf/nm for .TOC generation, so avoiding polluting # the master ninja with extra unused variables. master_ninja.variable( 'nm', GetEnvironFallback(['NM_target', 'NM'], nm)) master_ninja.variable( 'readelf', GetEnvironFallback(['READELF_target', 'READELF'], readelf)) if generator_supports_multiple_toolsets: if not cc_host: cc_host = cc if not cxx_host: cxx_host = cxx master_ninja.variable('ar_host', GetEnvironFallback(['AR_host'], ar_host)) master_ninja.variable('nm_host', GetEnvironFallback(['NM_host'], nm_host)) master_ninja.variable('readelf_host', GetEnvironFallback(['READELF_host'], readelf_host)) cc_host = GetEnvironFallback(['CC_host'], cc_host) cxx_host = GetEnvironFallback(['CXX_host'], cxx_host) # The environment variable could be used in 'make_global_settings', like # ['CC.host', '$(CC)'] or ['CXX.host', '$(CXX)'], transform them here. if '$(CC)' in cc_host and cc_host_global_setting: cc_host = cc_host_global_setting.replace('$(CC)', cc) if '$(CXX)' in cxx_host and cxx_host_global_setting: cxx_host = cxx_host_global_setting.replace('$(CXX)', cxx) master_ninja.variable('cc_host', CommandWithWrapper('CC.host', wrappers, cc_host)) master_ninja.variable('cxx_host', CommandWithWrapper('CXX.host', wrappers, cxx_host)) if flavor == 'win': master_ninja.variable('ld_host', ld_host) else: master_ninja.variable('ld_host', CommandWithWrapper( 'LINK', wrappers, ld_host)) master_ninja.variable('ldxx_host', CommandWithWrapper( 'LINK', wrappers, ldxx_host)) master_ninja.newline() master_ninja.pool('link_pool', depth=GetDefaultConcurrentLinks()) master_ninja.newline() deps = 'msvc' if flavor == 'win' else 'gcc' if flavor != 'win': master_ninja.rule( 'cc', description='CC $out', command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_c ' '$cflags_pch_c -c $in -o $out'), depfile='$out.d', deps=deps) master_ninja.rule( 'cc_s', description='CC $out', command=('$cc $defines $includes $cflags $cflags_c ' '$cflags_pch_c -c $in -o $out')) master_ninja.rule( 'cxx', description='CXX $out', command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_cc ' '$cflags_pch_cc -c $in -o $out'), depfile='$out.d', deps=deps) else: # TODO(scottmg) Separate pdb names is a test to see if it works around # http://crbug.com/142362. It seems there's a race between the creation of # the .pdb by the precompiled header step for .cc and the compilation of # .c files. This should be handled by mspdbsrv, but rarely errors out with # c1xx : fatal error C1033: cannot open program database # By making the rules target separate pdb files this might be avoided. cc_command = ('ninja -t msvc -e $arch ' + '-- ' '$cc /nologo /showIncludes /FC ' '@$out.rsp /c $in /Fo$out /Fd$pdbname_c ') cxx_command = ('ninja -t msvc -e $arch ' + '-- ' '$cxx /nologo /showIncludes /FC ' '@$out.rsp /c $in /Fo$out /Fd$pdbname_cc ') master_ninja.rule( 'cc', description='CC $out', command=cc_command, rspfile='$out.rsp', rspfile_content='$defines $includes $cflags $cflags_c', deps=deps) master_ninja.rule( 'cxx', description='CXX $out', command=cxx_command, rspfile='$out.rsp', rspfile_content='$defines $includes $cflags $cflags_cc', deps=deps) master_ninja.rule( 'idl', description='IDL $in', command=('%s gyp-win-tool midl-wrapper $arch $outdir ' '$tlb $h $dlldata $iid $proxy $in ' '$midl_includes $idlflags' % sys.executable)) master_ninja.rule( 'rc', description='RC $in', # Note: $in must be last otherwise rc.exe complains. command=('%s gyp-win-tool rc-wrapper ' '$arch $rc $defines $resource_includes $rcflags /fo$out $in' % sys.executable)) master_ninja.rule( 'asm', description='ASM $out', command=('%s gyp-win-tool asm-wrapper ' '$arch $asm $defines $includes $asmflags /c /Fo $out $in' % sys.executable)) if flavor != 'mac' and flavor != 'win': master_ninja.rule( 'alink', description='AR $out', command='rm -f $out && $ar rcs $arflags $out $in') master_ninja.rule( 'alink_thin', description='AR $out', command='rm -f $out && $ar rcsT $arflags $out $in') # This allows targets that only need to depend on $lib's API to declare an # order-only dependency on $lib.TOC and avoid relinking such downstream # dependencies when $lib changes only in non-public ways. # The resulting string leaves an uninterpolated %{suffix} which # is used in the final substitution below. mtime_preserving_solink_base = ( 'if [ ! -e $lib -o ! -e $lib.TOC ]; then ' '%(solink)s && %(extract_toc)s > $lib.TOC; else ' '%(solink)s && %(extract_toc)s > $lib.tmp && ' 'if ! cmp -s $lib.tmp $lib.TOC; then mv $lib.tmp $lib.TOC ; ' 'fi; fi' % { 'solink': '$ld -shared $ldflags -o $lib -Wl,-soname=$soname %(suffix)s', 'extract_toc': ('{ $readelf -d $lib | grep SONAME ; ' '$nm -gD -f p $lib | cut -f1-2 -d\' \'; }')}) master_ninja.rule( 'solink', description='SOLINK $lib', restat=True, command=mtime_preserving_solink_base % {'suffix': '@$link_file_list'}, rspfile='$link_file_list', rspfile_content= '-Wl,--whole-archive $in $solibs -Wl,--no-whole-archive $libs', pool='link_pool') master_ninja.rule( 'solink_module', description='SOLINK(module) $lib', restat=True, command=mtime_preserving_solink_base % {'suffix': '@$link_file_list'}, rspfile='$link_file_list', rspfile_content='-Wl,--start-group $in -Wl,--end-group $solibs $libs', pool='link_pool') master_ninja.rule( 'link', description='LINK $out', command=('$ld $ldflags -o $out ' '-Wl,--start-group $in -Wl,--end-group $solibs $libs'), pool='link_pool') elif flavor == 'win': master_ninja.rule( 'alink', description='LIB $out', command=('%s gyp-win-tool link-wrapper $arch False ' '$ar /nologo /ignore:4221 /OUT:$out @$out.rsp' % sys.executable), rspfile='$out.rsp', rspfile_content='$in_newline $libflags') _AddWinLinkRules(master_ninja, embed_manifest=True) _AddWinLinkRules(master_ninja, embed_manifest=False) else: master_ninja.rule( 'objc', description='OBJC $out', command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_objc ' '$cflags_pch_objc -c $in -o $out'), depfile='$out.d', deps=deps) master_ninja.rule( 'objcxx', description='OBJCXX $out', command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_objcc ' '$cflags_pch_objcc -c $in -o $out'), depfile='$out.d', deps=deps) master_ninja.rule( 'alink', description='LIBTOOL-STATIC $out, POSTBUILDS', command='rm -f $out && ' './gyp-mac-tool filter-libtool libtool $libtool_flags ' '-static -o $out $in' '$postbuilds') master_ninja.rule( 'lipo', description='LIPO $out, POSTBUILDS', command='rm -f $out && lipo -create $in -output $out$postbuilds') master_ninja.rule( 'solipo', description='SOLIPO $out, POSTBUILDS', command=( 'rm -f $lib $lib.TOC && lipo -create $in -output $lib$postbuilds &&' '%(extract_toc)s > $lib.TOC' % { 'extract_toc': '{ otool -l $lib | grep LC_ID_DYLIB -A 5; ' 'nm -gP $lib | cut -f1-2 -d\' \' | grep -v U$$; true; }'})) # Record the public interface of $lib in $lib.TOC. See the corresponding # comment in the posix section above for details. solink_base = '$ld %(type)s $ldflags -o $lib %(suffix)s' mtime_preserving_solink_base = ( 'if [ ! -e $lib -o ! -e $lib.TOC ] || ' # Always force dependent targets to relink if this library # reexports something. Handling this correctly would require # recursive TOC dumping but this is rare in practice, so punt. 'otool -l $lib | grep -q LC_REEXPORT_DYLIB ; then ' '%(solink)s && %(extract_toc)s > $lib.TOC; ' 'else ' '%(solink)s && %(extract_toc)s > $lib.tmp && ' 'if ! cmp -s $lib.tmp $lib.TOC; then ' 'mv $lib.tmp $lib.TOC ; ' 'fi; ' 'fi' % { 'solink': solink_base, 'extract_toc': '{ otool -l $lib | grep LC_ID_DYLIB -A 5; ' 'nm -gP $lib | cut -f1-2 -d\' \' | grep -v U$$; true; }'}) solink_suffix = '@$link_file_list$postbuilds' master_ninja.rule( 'solink', description='SOLINK $lib, POSTBUILDS', restat=True, command=mtime_preserving_solink_base % {'suffix': solink_suffix, 'type': '-shared'}, rspfile='$link_file_list', rspfile_content='$in $solibs $libs', pool='link_pool') master_ninja.rule( 'solink_notoc', description='SOLINK $lib, POSTBUILDS', restat=True, command=solink_base % {'suffix':solink_suffix, 'type': '-shared'}, rspfile='$link_file_list', rspfile_content='$in $solibs $libs', pool='link_pool') master_ninja.rule( 'solink_module', description='SOLINK(module) $lib, POSTBUILDS', restat=True, command=mtime_preserving_solink_base % {'suffix': solink_suffix, 'type': '-bundle'}, rspfile='$link_file_list', rspfile_content='$in $solibs $libs', pool='link_pool') master_ninja.rule( 'solink_module_notoc', description='SOLINK(module) $lib, POSTBUILDS', restat=True, command=solink_base % {'suffix': solink_suffix, 'type': '-bundle'}, rspfile='$link_file_list', rspfile_content='$in $solibs $libs', pool='link_pool') master_ninja.rule( 'link', description='LINK $out, POSTBUILDS', command=('$ld $ldflags -o $out ' '$in $solibs $libs$postbuilds'), pool='link_pool') master_ninja.rule( 'preprocess_infoplist', description='PREPROCESS INFOPLIST $out', command=('$cc -E -P -Wno-trigraphs -x c $defines $in -o $out && ' 'plutil -convert xml1 $out $out')) master_ninja.rule( 'copy_infoplist', description='COPY INFOPLIST $in', command='$env ./gyp-mac-tool copy-info-plist $in $out $binary $keys') master_ninja.rule( 'merge_infoplist', description='MERGE INFOPLISTS $in', command='$env ./gyp-mac-tool merge-info-plist $out $in') master_ninja.rule( 'compile_xcassets', description='COMPILE XCASSETS $in', command='$env ./gyp-mac-tool compile-xcassets $keys $in') master_ninja.rule( 'mac_tool', description='MACTOOL $mactool_cmd $in', command='$env ./gyp-mac-tool $mactool_cmd $in $out $binary') master_ninja.rule( 'package_framework', description='PACKAGE FRAMEWORK $out, POSTBUILDS', command='./gyp-mac-tool package-framework $out $version$postbuilds ' '&& touch $out') if flavor == 'win': master_ninja.rule( 'stamp', description='STAMP $out', command='%s gyp-win-tool stamp $out' % sys.executable) master_ninja.rule( 'copy', description='COPY $in $out', command='%s gyp-win-tool recursive-mirror $in $out' % sys.executable) else: master_ninja.rule( 'stamp', description='STAMP $out', command='${postbuilds}touch $out') master_ninja.rule( 'copy', description='COPY $in $out', command='rm -rf $out && cp -af $in $out') master_ninja.newline() all_targets = set() for build_file in params['build_files']: for target in gyp.common.AllTargets(target_list, target_dicts, os.path.normpath(build_file)): all_targets.add(target) all_outputs = set() # target_outputs is a map from qualified target name to a Target object. target_outputs = {} # target_short_names is a map from target short name to a list of Target # objects. target_short_names = {} # short name of targets that were skipped because they didn't contain anything # interesting. # NOTE: there may be overlap between this an non_empty_target_names. empty_target_names = set() # Set of non-empty short target names. # NOTE: there may be overlap between this an empty_target_names. non_empty_target_names = set() for qualified_target in target_list: # qualified_target is like: third_party/icu/icu.gyp:icui18n#target build_file, name, toolset = \ gyp.common.ParseQualifiedTarget(qualified_target) this_make_global_settings = data[build_file].get('make_global_settings', []) assert make_global_settings == this_make_global_settings, ( "make_global_settings needs to be the same for all targets. %s vs. %s" % (this_make_global_settings, make_global_settings)) spec = target_dicts[qualified_target] if flavor == 'mac': gyp.xcode_emulation.MergeGlobalXcodeSettingsToSpec(data[build_file], spec) # If build_file is a symlink, we must not follow it because there's a chance # it could point to a path above toplevel_dir, and we cannot correctly deal # with that case at the moment. build_file = gyp.common.RelativePath(build_file, options.toplevel_dir, False) qualified_target_for_hash = gyp.common.QualifiedTarget(build_file, name, toolset) hash_for_rules = hashlib.md5(qualified_target_for_hash).hexdigest() base_path = os.path.dirname(build_file) obj = 'obj' if toolset != 'target': obj += '.' + toolset output_file = os.path.join(obj, base_path, name + '.ninja') ninja_output = StringIO() writer = NinjaWriter(hash_for_rules, target_outputs, base_path, build_dir, ninja_output, toplevel_build, output_file, flavor, toplevel_dir=options.toplevel_dir) target = writer.WriteSpec(spec, config_name, generator_flags) if ninja_output.tell() > 0: # Only create files for ninja files that actually have contents. with OpenOutput(os.path.join(toplevel_build, output_file)) as ninja_file: ninja_file.write(ninja_output.getvalue()) ninja_output.close() master_ninja.subninja(output_file) if target: if name != target.FinalOutput() and spec['toolset'] == 'target': target_short_names.setdefault(name, []).append(target) target_outputs[qualified_target] = target if qualified_target in all_targets: all_outputs.add(target.FinalOutput()) non_empty_target_names.add(name) else: empty_target_names.add(name) if target_short_names: # Write a short name to build this target. This benefits both the # "build chrome" case as well as the gyp tests, which expect to be # able to run actions and build libraries by their short name. master_ninja.newline() master_ninja.comment('Short names for targets.') for short_name in target_short_names: master_ninja.build(short_name, 'phony', [x.FinalOutput() for x in target_short_names[short_name]]) # Write phony targets for any empty targets that weren't written yet. As # short names are not necessarily unique only do this for short names that # haven't already been output for another target. empty_target_names = empty_target_names - non_empty_target_names if empty_target_names: master_ninja.newline() master_ninja.comment('Empty targets (output for completeness).') for name in sorted(empty_target_names): master_ninja.build(name, 'phony') if all_outputs: master_ninja.newline() master_ninja.build('all', 'phony', list(all_outputs)) master_ninja.default(generator_flags.get('default_target', 'all')) master_ninja_file.close() def PerformBuild(data, configurations, params): options = params['options'] for config in configurations: builddir = os.path.join(options.toplevel_dir, 'out', config) arguments = ['ninja', '-C', builddir] print 'Building [%s]: %s' % (config, arguments) subprocess.check_call(arguments) def CallGenerateOutputForConfig(arglist): # Ignore the interrupt signal so that the parent process catches it and # kills all multiprocessing children. signal.signal(signal.SIGINT, signal.SIG_IGN) (target_list, target_dicts, data, params, config_name) = arglist GenerateOutputForConfig(target_list, target_dicts, data, params, config_name) def GenerateOutput(target_list, target_dicts, data, params): # Update target_dicts for iOS device builds. target_dicts = gyp.xcode_emulation.CloneConfigurationForDeviceAndEmulator( target_dicts) user_config = params.get('generator_flags', {}).get('config', None) if gyp.common.GetFlavor(params) == 'win': target_list, target_dicts = MSVSUtil.ShardTargets(target_list, target_dicts) target_list, target_dicts = MSVSUtil.InsertLargePdbShims( target_list, target_dicts, generator_default_variables) if user_config: GenerateOutputForConfig(target_list, target_dicts, data, params, user_config) else: config_names = target_dicts[target_list[0]]['configurations'].keys() if params['parallel']: try: pool = multiprocessing.Pool(len(config_names)) arglists = [] for config_name in config_names: arglists.append( (target_list, target_dicts, data, params, config_name)) pool.map(CallGenerateOutputForConfig, arglists) except KeyboardInterrupt, e: pool.terminate() raise e else: for config_name in config_names: GenerateOutputForConfig(target_list, target_dicts, data, params, config_name)
mit
paolodedios/pybuilder
docs/conf.py
10
9282
# -*- coding: utf-8 -*- # # PyBuilder documentation build configuration file, created by # sphinx-quickstart on Mon Mar 23 20:15:57 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os import shlex import alabaster # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'PyBuilder' copyright = u'2015, PyBuilder Team' author = u'PyBuilder Team' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.10' # The full version, including alpha/beta/rc tags. release = '0.10' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'alabaster' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. html_theme_path = [alabaster.get_path()] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. html_logo = "static/logo.png" # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' # html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value # html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. # html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = 'PyBuilderdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': '', # Latex figure (float) alignment # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'PyBuilder.tex', u'PyBuilder Documentation', u'PyBuilder Team', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'pybuilder', u'PyBuilder Documentation', [author], 1) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'PyBuilder', u'PyBuilder Documentation', author, 'PyBuilder', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False
apache-2.0
fabiopetroni/Dato-Core
src/unity/python/graphlab/data_structures/sarray.py
13
91593
""" This module defines the SArray class which provides the ability to create, access and manipulate a remote scalable array object. SArray acts similarly to pandas.Series but without indexing. The data is immutable, homogeneous, and is stored on the GraphLab Server side. """ ''' Copyright (C) 2015 Dato, Inc. All rights reserved. This software may be modified and distributed under the terms of the BSD license. See the DATO-PYTHON-LICENSE file for details. ''' import graphlab.connect as _mt import graphlab.connect.main as glconnect from graphlab.cython.cy_type_utils import pytype_from_dtype, infer_type_of_list, is_numeric_type from graphlab.cython.cy_sarray import UnitySArrayProxy from graphlab.cython.context import debug_trace as cython_context from graphlab.util import _make_internal_url, _is_callable import graphlab as gl import inspect import math from graphlab.deps import numpy, HAS_NUMPY from graphlab.deps import pandas, HAS_PANDAS import time import array import datetime import graphlab.meta as meta import itertools import warnings __all__ = ['SArray'] def _create_sequential_sarray(size, start=0, reverse=False): if type(size) is not int: raise TypeError("size must be int") if type(start) is not int: raise TypeError("size must be int") if type(reverse) is not bool: raise TypeError("reverse must me bool") with cython_context(): return SArray(_proxy=glconnect.get_unity().create_sequential_sarray(size, start, reverse)) class SArray(object): """ An immutable, homogeneously typed array object backed by persistent storage. SArray is scaled to hold data that are much larger than the machine's main memory. It fully supports missing values and random access. The data backing an SArray is located on the same machine as the GraphLab Server process. Each column in an :py:class:`~graphlab.SFrame` is an SArray. Parameters ---------- data : list | numpy.ndarray | pandas.Series | string The input data. If this is a list, numpy.ndarray, or pandas.Series, the data in the list is converted and stored in an SArray. Alternatively if this is a string, it is interpreted as a path (or url) to a text file. Each line of the text file is loaded as a separate row. If ``data`` is a directory where an SArray was previously saved, this is loaded as an SArray read directly out of that directory. dtype : {None, int, float, str, list, array.array, dict, datetime.datetime, graphlab.Image}, optional The data type of the SArray. If not specified (None), we attempt to infer it from the input. If it is a numpy array or a Pandas series, the dtype of the array/series is used. If it is a list, the dtype is inferred from the inner list. If it is a URL or path to a text file, we default the dtype to str. ignore_cast_failure : bool, optional If True, ignores casting failures but warns when elements cannot be casted into the specified dtype. Notes ----- - If ``data`` is pandas.Series, the index will be ignored. - The datetime is based on the Boost datetime format (see http://www.boost.org/doc/libs/1_48_0/doc/html/date_time/date_time_io.html for details) - When working with the GraphLab EC2 instance (see :py:func:`graphlab.aws.launch_EC2()`), an SArray cannot be constructed using local file path, because it involves a potentially large amount of data transfer from client to server. However, it is still okay to use a remote file path. See the examples below. The same restriction applies to :py:class:`~graphlab.SGraph` and :py:class:`~graphlab.SFrame`. Examples -------- SArray can be constructed in various ways: Construct an SArray from list. >>> from graphlab import SArray >>> sa = SArray(data=[1,2,3,4,5], dtype=int) Construct an SArray from numpy.ndarray. >>> sa = SArray(data=numpy.asarray([1,2,3,4,5]), dtype=int) or: >>> sa = SArray(numpy.asarray([1,2,3,4,5]), int) Construct an SArray from pandas.Series. >>> sa = SArray(data=pd.Series([1,2,3,4,5]), dtype=int) or: >>> sa = SArray(pd.Series([1,2,3,4,5]), int) If the type is not specified, automatic inference is attempted: >>> SArray(data=[1,2,3,4,5]).dtype() int >>> SArray(data=[1,2,3,4,5.0]).dtype() float The SArray supports standard datatypes such as: integer, float and string. It also supports three higher level datatypes: float arrays, dict and list (array of arbitrary types). Create an SArray from a list of strings: >>> sa = SArray(data=['a','b']) Create an SArray from a list of float arrays; >>> sa = SArray([[1,2,3], [3,4,5]]) Create an SArray from a list of lists: >>> sa = SArray(data=[['a', 1, {'work': 3}], [2, 2.0]]) Create an SArray from a list of dictionaries: >>> sa = SArray(data=[{'a':1, 'b': 2}, {'b':2, 'c': 1}]) Create an SArray from a list of datetime objects: >>> sa = SArray(data=[datetime.datetime(2011, 10, 20, 9, 30, 10)]) Construct an SArray from local text file. (Only works for local server). >>> sa = SArray('/tmp/a_to_z.txt.gz') Construct an SArray from a text file downloaded from a URL. >>> sa = SArray('http://s3-us-west-2.amazonaws.com/testdatasets/a_to_z.txt.gz') **Numeric Operators** SArrays support a large number of vectorized operations on numeric types. For instance: >>> sa = SArray([1,1,1,1,1]) >>> sb = SArray([2,2,2,2,2]) >>> sc = sa + sb >>> sc dtype: int Rows: 5 [3, 3, 3, 3, 3] >>> sc + 2 dtype: int Rows: 5 [5, 5, 5, 5, 5] Operators which are supported include all numeric operators (+,-,*,/), as well as comparison operators (>, >=, <, <=), and logical operators (&, |). For instance: >>> sa = SArray([1,2,3,4,5]) >>> (sa >= 2) & (sa <= 4) dtype: int Rows: 5 [0, 1, 1, 1, 0] The numeric operators (+,-,*,/) also work on array types: >>> sa = SArray(data=[[1.0,1.0], [2.0,2.0]]) >>> sa + 1 dtype: list Rows: 2 [array('f', [2.0, 2.0]), array('f', [3.0, 3.0])] >>> sa + sa dtype: list Rows: 2 [array('f', [2.0, 2.0]), array('f', [4.0, 4.0])] The addition operator (+) can also be used for string concatenation: >>> sa = SArray(data=['a','b']) >>> sa + "x" dtype: str Rows: 2 ['ax', 'bx'] This can be useful for performing type interpretation of lists or dictionaries stored as strings: >>> sa = SArray(data=['a,b','c,d']) >>> ("[" + sa + "]").astype(list) # adding brackets make it look like a list dtype: list Rows: 2 [['a', 'b'], ['c', 'd']] All comparison operations and boolean operators are supported and emit binary SArrays. >>> sa = SArray([1,2,3,4,5]) >>> sa >= 2 dtype: int Rows: 3 [0, 1, 1, 1, 1] >>> (sa >= 2) & (sa <= 4) dtype: int Rows: 3 [0, 1, 1, 1, 0] **Element Access and Slicing** SArrays can be accessed by integer keys just like a regular python list. Such operations may not be fast on large datasets so looping over an SArray should be avoided. >>> sa = SArray([1,2,3,4,5]) >>> sa[0] 1 >>> sa[2] 3 >>> sa[5] IndexError: SFrame index out of range Negative indices can be used to access elements from the tail of the array >>> sa[-1] # returns the last element 5 >>> sa[-2] # returns the second to last element 4 The SArray also supports the full range of python slicing operators: >>> sa[1000:] # Returns an SArray containing rows 1000 to the end >>> sa[:1000] # Returns an SArray containing rows 0 to row 999 inclusive >>> sa[0:1000:2] # Returns an SArray containing rows 0 to row 1000 in steps of 2 >>> sa[-100:] # Returns an SArray containing last 100 rows >>> sa[-100:len(sa):2] # Returns an SArray containing last 100 rows in steps of 2 **Logical Filter** An SArray can be filtered using >>> array[binary_filter] where array and binary_filter are SArrays of the same length. The result is a new SArray which contains only elements of 'array' where its matching row in the binary_filter is non zero. This permits the use of boolean operators that can be used to perform logical filtering operations. For instance: >>> sa = SArray([1,2,3,4,5]) >>> sa[(sa >= 2) & (sa <= 4)] dtype: int Rows: 3 [2, 3, 4] This can also be used more generally to provide filtering capability which is otherwise not expressible with simple boolean functions. For instance: >>> sa = SArray([1,2,3,4,5]) >>> sa[sa.apply(lambda x: math.log(x) <= 1)] dtype: int Rows: 3 [1, 2] This is equivalent to >>> sa.filter(lambda x: math.log(x) <= 1) dtype: int Rows: 3 [1, 2] **Iteration** The SArray is also iterable, but not efficiently since this involves a streaming transmission of data from the server to the client. This should not be used for large data. >>> sa = SArray([1,2,3,4,5]) >>> [i + 1 for i in sa] [2, 3, 4, 5, 6] This can be used to convert an SArray to a list: >>> sa = SArray([1,2,3,4,5]) >>> l = list(sa) >>> l [1, 2, 3, 4, 5] """ def __init__(self, data=[], dtype=None, ignore_cast_failure=False, _proxy=None): """ __init__(data=list(), dtype=None, ignore_cast_failure=False) Construct a new SArray. The source of data includes: list, numpy.ndarray, pandas.Series, and urls. """ _mt._get_metric_tracker().track('sarray.init') if dtype is not None and type(dtype) != type: raise TypeError('dtype must be a type, e.g. use int rather than \'int\'') if (_proxy): self.__proxy__ = _proxy elif type(data) == SArray: self.__proxy__ = data.__proxy__ else: self.__proxy__ = UnitySArrayProxy(glconnect.get_client()) # we need to perform type inference if dtype is None: if (isinstance(data, list)): # if it is a list, Get the first type and make sure # the remaining items are all of the same type dtype = infer_type_of_list(data) elif isinstance(data, array.array): dtype = infer_type_of_list(data) elif HAS_PANDAS and isinstance(data, pandas.Series): # if it is a pandas series get the dtype of the series dtype = pytype_from_dtype(data.dtype) if dtype == object: # we need to get a bit more fine grained than that dtype = infer_type_of_list(data) elif HAS_NUMPY and isinstance(data, numpy.ndarray): # if it is a numpy array, get the dtype of the array dtype = pytype_from_dtype(data.dtype) if dtype == object: # we need to get a bit more fine grained than that dtype = infer_type_of_list(data) if len(data.shape) == 2: # we need to make it an array or a list if dtype == float or dtype == int: dtype = array.array else: dtype = list elif len(data.shape) > 2: raise TypeError("Cannot convert Numpy arrays of greater than 2 dimensions") elif (isinstance(data, str) or isinstance(data, unicode)): # if it is a file, we default to string dtype = str if HAS_PANDAS and isinstance(data, pandas.Series): with cython_context(): self.__proxy__.load_from_iterable(data.values, dtype, ignore_cast_failure) elif (HAS_NUMPY and isinstance(data, numpy.ndarray)) or isinstance(data, list) or isinstance(data, array.array): with cython_context(): self.__proxy__.load_from_iterable(data, dtype, ignore_cast_failure) elif (isinstance(data, str) or isinstance(data, unicode)): internal_url = _make_internal_url(data) with cython_context(): self.__proxy__.load_autodetect(internal_url, dtype) else: raise TypeError("Unexpected data source. " \ "Possible data source types are: list, " \ "numpy.ndarray, pandas.Series, and string(url)") @classmethod def from_const(cls, value, size): """ Constructs an SArray of size with a const value. Parameters ---------- value : [int | float | str | array.array | list | dict | datetime] The value to fill the SArray size : int The size of the SArray Examples -------- Construct an SArray consisting of 10 zeroes: >>> graphlab.SArray.from_const(0, 10) """ assert type(size) is int and size >= 0, "size must be a positive int" if (type(value) not in [type(None), int, float, str, array.array, list, dict, datetime.datetime]): raise TypeError('Cannot create sarray of value type %s' % str(type(value))) proxy = UnitySArrayProxy(glconnect.get_client()) proxy.load_from_const(value, size) return cls(_proxy=proxy) @classmethod def from_sequence(cls, *args): """ from_sequence(start=0, stop) Create an SArray from sequence .. sourcecode:: python Construct an SArray of integer values from 0 to 999 >>> gl.SArray.from_sequence(1000) This is equivalent, but more efficient than: >>> gl.SArray(range(1000)) Construct an SArray of integer values from 10 to 999 >>> gl.SArray.from_sequence(10, 1000) This is equivalent, but more efficient than: >>> gl.SArray(range(10, 1000)) Parameters ---------- start : int, optional The start of the sequence. The sequence will contain this value. stop : int The end of the sequence. The sequence will not contain this value. """ start = None stop = None # fill with args. This checks for from_sequence(100), from_sequence(10,100) if len(args) == 1: stop = args[0] elif len(args) == 2: start = args[0] stop = args[1] if stop is None and start is None: raise TypeError("from_sequence expects at least 1 argument. got 0") elif start is None: return _create_sequential_sarray(stop) else: size = stop - start # this matches the behavior of range # i.e. range(100,10) just returns an empty array if (size < 0): size = 0 return _create_sequential_sarray(size, start) @classmethod def from_avro(cls, filename): """ Construct an SArray from an Avro file. The SArray type is determined by the schema of the Avro file. Parameters ---------- filename : str The Avro file to load into an SArray. Examples -------- Construct an SArray from a local Avro file named 'data.avro': >>> graphlab.SArray.from_avro('/data/data.avro') Notes ----- Currently only supports direct loading of files on the local filesystem. References ---------- - `Avro Specification <http://avro.apache.org/docs/1.7.7/spec.html>`_ """ _mt._get_metric_tracker().track('sarray.from_avro') proxy = UnitySArrayProxy(glconnect.get_client()) proxy.load_from_avro(filename) return cls(_proxy = proxy) def __get_content_identifier__(self): """ Returns the unique identifier of the content that backs the SArray Notes ----- Meant for internal use only. """ with cython_context(): return self.__proxy__.get_content_identifier() def save(self, filename, format=None): """ Saves the SArray to file. The saved SArray will be in a directory named with the `targetfile` parameter. Parameters ---------- filename : string A local path or a remote URL. If format is 'text', it will be saved as a text file. If format is 'binary', a directory will be created at the location which will contain the SArray. format : {'binary', 'text', 'csv'}, optional Format in which to save the SFrame. Binary saved SArrays can be loaded much faster and without any format conversion losses. 'text' and 'csv' are synonymous: Each SArray row will be written as a single line in an output text file. If not given, will try to infer the format from filename given. If file name ends with 'csv', 'txt' or '.csv.gz', then save as 'csv' format, otherwise save as 'binary' format. """ if format == None: if filename.endswith(('.csv', '.csv.gz', 'txt')): format = 'text' else: format = 'binary' if format == 'binary': with cython_context(): self.__proxy__.save(_make_internal_url(filename)) elif format == 'text': sf = gl.SFrame({'X1':self}) with cython_context(): sf.__proxy__.save_as_csv(_make_internal_url(filename), {'header':False}) def _escape_space(self,s): return "".join([ch.encode('string_escape') if ch.isspace() else ch for ch in s]) def __repr__(self): """ Returns a string description of the SArray. """ ret = "dtype: " + str(self.dtype().__name__) + "\n" ret = ret + "Rows: " + str(self.size()) + "\n" ret = ret + self.__str__() return ret def __str__(self): """ Returns a string containing the first 100 elements of the array. """ # If sarray is image, take head of elements casted to string. if self.dtype() == gl.data_structures.image.Image: headln = str(list(self._head_str(100))) else: headln = self._escape_space(str(list(self.head(100)))) headln = unicode(headln.decode('string_escape'),'utf-8',errors='replace').encode('utf-8') if (self.size() > 100): # cut the last close bracket # and replace it with ... headln = headln[0:-1] + ", ... ]" return headln def __nonzero__(self): """ Returns true if the array is not empty. """ return self.size() != 0 def __len__(self): """ Returns the length of the array """ return self.size() def __iter__(self): """ Provides an iterator to the contents of the array. """ def generator(): elems_at_a_time = 262144 self.__proxy__.begin_iterator() ret = self.__proxy__.iterator_get_next(elems_at_a_time) while(True): for j in ret: yield j if len(ret) == elems_at_a_time: ret = self.__proxy__.iterator_get_next(elems_at_a_time) else: break return generator() def __add__(self, other): """ If other is a scalar value, adds it to the current array, returning the new result. If other is an SArray, performs an element-wise addition of the two arrays. """ with cython_context(): if type(other) is SArray: return SArray(_proxy = self.__proxy__.vector_operator(other.__proxy__, '+')) else: return SArray(_proxy = self.__proxy__.left_scalar_operator(other, '+')) def __sub__(self, other): """ If other is a scalar value, subtracts it from the current array, returning the new result. If other is an SArray, performs an element-wise subtraction of the two arrays. """ with cython_context(): if type(other) is SArray: return SArray(_proxy = self.__proxy__.vector_operator(other.__proxy__, '-')) else: return SArray(_proxy = self.__proxy__.left_scalar_operator(other, '-')) def __mul__(self, other): """ If other is a scalar value, multiplies it to the current array, returning the new result. If other is an SArray, performs an element-wise multiplication of the two arrays. """ with cython_context(): if type(other) is SArray: return SArray(_proxy = self.__proxy__.vector_operator(other.__proxy__, '*')) else: return SArray(_proxy = self.__proxy__.left_scalar_operator(other, '*')) def __div__(self, other): """ If other is a scalar value, divides each element of the current array by the value, returning the result. If other is an SArray, performs an element-wise division of the two arrays. """ with cython_context(): if type(other) is SArray: return SArray(_proxy = self.__proxy__.vector_operator(other.__proxy__, '/')) else: return SArray(_proxy = self.__proxy__.left_scalar_operator(other, '/')) def __lt__(self, other): """ If other is a scalar value, compares each element of the current array by the value, returning the result. If other is an SArray, performs an element-wise comparison of the two arrays. """ with cython_context(): if type(other) is SArray: return SArray(_proxy = self.__proxy__.vector_operator(other.__proxy__, '<')) else: return SArray(_proxy = self.__proxy__.left_scalar_operator(other, '<')) def __gt__(self, other): """ If other is a scalar value, compares each element of the current array by the value, returning the result. If other is an SArray, performs an element-wise comparison of the two arrays. """ with cython_context(): if type(other) is SArray: return SArray(_proxy = self.__proxy__.vector_operator(other.__proxy__, '>')) else: return SArray(_proxy = self.__proxy__.left_scalar_operator(other, '>')) def __le__(self, other): """ If other is a scalar value, compares each element of the current array by the value, returning the result. If other is an SArray, performs an element-wise comparison of the two arrays. """ with cython_context(): if type(other) is SArray: return SArray(_proxy = self.__proxy__.vector_operator(other.__proxy__, '<=')) else: return SArray(_proxy = self.__proxy__.left_scalar_operator(other, '<=')) def __ge__(self, other): """ If other is a scalar value, compares each element of the current array by the value, returning the result. If other is an SArray, performs an element-wise comparison of the two arrays. """ with cython_context(): if type(other) is SArray: return SArray(_proxy = self.__proxy__.vector_operator(other.__proxy__, '>=')) else: return SArray(_proxy = self.__proxy__.left_scalar_operator(other, '>=')) def __radd__(self, other): """ Adds a scalar value to the current array. Returned array has the same type as the array on the right hand side """ with cython_context(): return SArray(_proxy = self.__proxy__.right_scalar_operator(other, '+')) def __rsub__(self, other): """ Subtracts a scalar value from the current array. Returned array has the same type as the array on the right hand side """ with cython_context(): return SArray(_proxy = self.__proxy__.right_scalar_operator(other, '-')) def __rmul__(self, other): """ Multiplies a scalar value to the current array. Returned array has the same type as the array on the right hand side """ with cython_context(): return SArray(_proxy = self.__proxy__.right_scalar_operator(other, '*')) def __rdiv__(self, other): """ Divides a scalar value by each element in the array Returned array has the same type as the array on the right hand side """ with cython_context(): return SArray(_proxy = self.__proxy__.right_scalar_operator(other, '/')) def __eq__(self, other): """ If other is a scalar value, compares each element of the current array by the value, returning the new result. If other is an SArray, performs an element-wise comparison of the two arrays. """ with cython_context(): if type(other) is SArray: return SArray(_proxy = self.__proxy__.vector_operator(other.__proxy__, '==')) else: return SArray(_proxy = self.__proxy__.left_scalar_operator(other, '==')) def __ne__(self, other): """ If other is a scalar value, compares each element of the current array by the value, returning the new result. If other is an SArray, performs an element-wise comparison of the two arrays. """ with cython_context(): if type(other) is SArray: return SArray(_proxy = self.__proxy__.vector_operator(other.__proxy__, '!=')) else: return SArray(_proxy = self.__proxy__.left_scalar_operator(other, '!=')) def __and__(self, other): """ Perform a logical element-wise 'and' against another SArray. """ if type(other) is SArray: with cython_context(): return SArray(_proxy = self.__proxy__.vector_operator(other.__proxy__, '&')) else: raise TypeError("SArray can only perform logical and against another SArray") def __or__(self, other): """ Perform a logical element-wise 'or' against another SArray. """ if type(other) is SArray: with cython_context(): return SArray(_proxy = self.__proxy__.vector_operator(other.__proxy__, '|')) else: raise TypeError("SArray can only perform logical or against another SArray") def __getitem__(self, other): """ If the key is an SArray of identical length, this function performs a logical filter: i.e. it subselects all the elements in this array where the corresponding value in the other array evaluates to true. If the key is an integer this returns a single row of the SArray. If the key is a slice, this returns an SArray with the sliced rows. See the GraphLab Create User Guide for usage examples. """ sa_len = len(self) if type(other) is int: if other < 0: other += sa_len if other >= sa_len: raise IndexError("SFrame index out of range") try: lb, ub, value_list = self._getitem_cache if lb <= other < ub: return value_list[other - lb] except AttributeError: pass # Not in cache, need to grab it block_size = 1024 * (32 if self.dtype() in [int, long, float] else 4) block_num = int(other // block_size) lb = block_num * block_size ub = min(sa_len, lb + block_size) val_list = list(SArray(_proxy = self.__proxy__.copy_range(lb, 1, ub))) self._getitem_cache = (lb, ub, val_list) return val_list[other - lb] elif type(other) is SArray: if len(other) != sa_len: raise IndexError("Cannot perform logical indexing on arrays of different length.") with cython_context(): return SArray(_proxy = self.__proxy__.logical_filter(other.__proxy__)) elif type(other) is slice: start = other.start stop = other.stop step = other.step if start is None: start = 0 if stop is None: stop = sa_len if step is None: step = 1 # handle negative indices if start < 0: start = sa_len + start if stop < 0: stop = sa_len + stop return SArray(_proxy = self.__proxy__.copy_range(start, step, stop)) else: raise IndexError("Invalid type to use for indexing") def __materialize__(self): """ For a SArray that is lazily evaluated, force persist this sarray to disk, committing all lazy evaluated operations. """ with cython_context(): self.__proxy__.materialize() def __is_materialized__(self): """ Returns whether or not the sarray has been materialized. """ return self.__proxy__.is_materialized() def size(self): """ The size of the SArray. """ return self.__proxy__.size() def dtype(self): """ The data type of the SArray. Returns ------- out : type The type of the SArray. Examples -------- >>> sa = gl.SArray(["The quick brown fox jumps over the lazy dog."]) >>> sa.dtype() str >>> sa = gl.SArray(range(10)) >>> sa.dtype() int """ return self.__proxy__.dtype() def head(self, n=10): """ Returns an SArray which contains the first n rows of this SArray. Parameters ---------- n : int The number of rows to fetch. Returns ------- out : SArray A new SArray which contains the first n rows of the current SArray. Examples -------- >>> gl.SArray(range(10)).head(5) dtype: int Rows: 5 [0, 1, 2, 3, 4] """ return SArray(_proxy=self.__proxy__.head(n)) def vector_slice(self, start, end=None): """ If this SArray contains vectors or recursive types, this returns a new SArray containing each individual vector sliced, between start and end, exclusive. Parameters ---------- start : int The start position of the slice. end : int, optional. The end position of the slice. Note that the end position is NOT included in the slice. Thus a g.vector_slice(1,3) will extract entries in position 1 and 2. Returns ------- out : SArray Each individual vector sliced according to the arguments. Examples -------- If g is a vector of floats: >>> g = SArray([[1,2,3],[2,3,4]]) >>> g dtype: array Rows: 2 [array('d', [1.0, 2.0, 3.0]), array('d', [2.0, 3.0, 4.0])] >>> g.vector_slice(0) # extracts the first element of each vector dtype: float Rows: 2 [1.0, 2.0] >>> g.vector_slice(0, 2) # extracts the first two elements of each vector dtype: array.array Rows: 2 [array('d', [1.0, 2.0]), array('d', [2.0, 3.0])] If a vector cannot be sliced, the result will be None: >>> g = SArray([[1],[1,2],[1,2,3]]) >>> g dtype: array.array Rows: 3 [array('d', [1.0]), array('d', [1.0, 2.0]), array('d', [1.0, 2.0, 3.0])] >>> g.vector_slice(2) dtype: float Rows: 3 [None, None, 3.0] >>> g.vector_slice(0,2) dtype: list Rows: 3 [None, array('d', [1.0, 2.0]), array('d', [1.0, 2.0])] If g is a vector of mixed types (float, int, str, array, list, etc.): >>> g = SArray([['a',1,1.0],['b',2,2.0]]) >>> g dtype: list Rows: 2 [['a', 1, 1.0], ['b', 2, 2.0]] >>> g.vector_slice(0) # extracts the first element of each vector dtype: list Rows: 2 [['a'], ['b']] """ if (self.dtype() != array.array) and (self.dtype() != list): raise RuntimeError("Only Vector type can be sliced") if end == None: end = start + 1 with cython_context(): return SArray(_proxy=self.__proxy__.vector_slice(start, end)) def _count_words(self, to_lower=True): """ For documentation, see graphlab.text_analytics.count_ngrams(). """ if (self.dtype() != str): raise TypeError("Only SArray of string type is supported for counting bag of words") _mt._get_metric_tracker().track('sarray.count_words') # construct options, will extend over time options = dict() options["to_lower"] = to_lower == True with cython_context(): return SArray(_proxy=self.__proxy__.count_bag_of_words(options)) def _count_ngrams(self, n=2, method="word", to_lower=True, ignore_space=True): """ For documentation, see graphlab.text_analytics.count_ngrams(). """ if (self.dtype() != str): raise TypeError("Only SArray of string type is supported for counting n-grams") if (type(n) != int): raise TypeError("Input 'n' must be of type int") if (n < 1): raise ValueError("Input 'n' must be greater than 0") if (n > 5): warnings.warn("It is unusual for n-grams to be of size larger than 5.") _mt._get_metric_tracker().track('sarray.count_ngrams', properties={'n':n, 'method':method}) # construct options, will extend over time options = dict() options["to_lower"] = to_lower == True options["ignore_space"] = ignore_space == True if method == "word": with cython_context(): return SArray(_proxy=self.__proxy__.count_ngrams(n, options )) elif method == "character" : with cython_context(): return SArray(_proxy=self.__proxy__.count_character_ngrams(n, options )) else: raise ValueError("Invalid 'method' input value. Please input either 'word' or 'character' ") def dict_trim_by_keys(self, keys, exclude=True): """ Filter an SArray of dictionary type by the given keys. By default, all keys that are in the provided list in ``keys`` are *excluded* from the returned SArray. Parameters ---------- keys : list A collection of keys to trim down the elements in the SArray. exclude : bool, optional If True, all keys that are in the input key list are removed. If False, only keys that are in the input key list are retained. Returns ------- out : SArray A SArray of dictionary type, with each dictionary element trimmed according to the input criteria. See Also -------- dict_trim_by_values Examples -------- >>> sa = graphlab.SArray([{"this":1, "is":1, "dog":2}, {"this": 2, "are": 2, "cat": 1}]) >>> sa.dict_trim_by_keys(["this", "is", "and", "are"], exclude=True) dtype: dict Rows: 2 [{'dog': 2}, {'cat': 1}] """ if isinstance(keys, str) or (not hasattr(keys, "__iter__")): keys = [keys] _mt._get_metric_tracker().track('sarray.dict_trim_by_keys') with cython_context(): return SArray(_proxy=self.__proxy__.dict_trim_by_keys(keys, exclude)) def dict_trim_by_values(self, lower=None, upper=None): """ Filter dictionary values to a given range (inclusive). Trimming is only performed on values which can be compared to the bound values. Fails on SArrays whose data type is not ``dict``. Parameters ---------- lower : int or long or float, optional The lowest dictionary value that would be retained in the result. If not given, lower bound is not applied. upper : int or long or float, optional The highest dictionary value that would be retained in the result. If not given, upper bound is not applied. Returns ------- out : SArray An SArray of dictionary type, with each dict element trimmed according to the input criteria. See Also -------- dict_trim_by_keys Examples -------- >>> sa = graphlab.SArray([{"this":1, "is":5, "dog":7}, {"this": 2, "are": 1, "cat": 5}]) >>> sa.dict_trim_by_values(2,5) dtype: dict Rows: 2 [{'is': 5}, {'this': 2, 'cat': 5}] >>> sa.dict_trim_by_values(upper=5) dtype: dict Rows: 2 [{'this': 1, 'is': 5}, {'this': 2, 'are': 1, 'cat': 5}] """ if None != lower and (not is_numeric_type(type(lower))): raise TypeError("lower bound has to be a numeric value") if None != upper and (not is_numeric_type(type(upper))): raise TypeError("upper bound has to be a numeric value") _mt._get_metric_tracker().track('sarray.dict_trim_by_values') with cython_context(): return SArray(_proxy=self.__proxy__.dict_trim_by_values(lower, upper)) def dict_keys(self): """ Create an SArray that contains all the keys from each dictionary element as a list. Fails on SArrays whose data type is not ``dict``. Returns ------- out : SArray A SArray of list type, where each element is a list of keys from the input SArray element. See Also -------- dict_values Examples --------- >>> sa = graphlab.SArray([{"this":1, "is":5, "dog":7}, {"this": 2, "are": 1, "cat": 5}]) >>> sa.dict_keys() dtype: list Rows: 2 [['this', 'is', 'dog'], ['this', 'are', 'cat']] """ _mt._get_metric_tracker().track('sarray.dict_keys') with cython_context(): return SArray(_proxy=self.__proxy__.dict_keys()) def dict_values(self): """ Create an SArray that contains all the values from each dictionary element as a list. Fails on SArrays whose data type is not ``dict``. Returns ------- out : SArray A SArray of list type, where each element is a list of values from the input SArray element. See Also -------- dict_keys Examples -------- >>> sa = graphlab.SArray([{"this":1, "is":5, "dog":7}, {"this": 2, "are": 1, "cat": 5}]) >>> sa.dict_values() dtype: list Rows: 2 [[1, 5, 7], [2, 1, 5]] """ _mt._get_metric_tracker().track('sarray.dict_values') with cython_context(): return SArray(_proxy=self.__proxy__.dict_values()) def dict_has_any_keys(self, keys): """ Create a boolean SArray by checking the keys of an SArray of dictionaries. An element of the output SArray is True if the corresponding input element's dictionary has any of the given keys. Fails on SArrays whose data type is not ``dict``. Parameters ---------- keys : list A list of key values to check each dictionary against. Returns ------- out : SArray A SArray of int type, where each element indicates whether the input SArray element contains any key in the input list. See Also -------- dict_has_all_keys Examples -------- >>> sa = graphlab.SArray([{"this":1, "is":5, "dog":7}, {"animal":1}, {"this": 2, "are": 1, "cat": 5}]) >>> sa.dict_has_any_keys(["is", "this", "are"]) dtype: int Rows: 3 [1, 1, 0] """ if isinstance(keys, str) or (not hasattr(keys, "__iter__")): keys = [keys] _mt._get_metric_tracker().track('sarray.dict_has_any_keys') with cython_context(): return SArray(_proxy=self.__proxy__.dict_has_any_keys(keys)) def dict_has_all_keys(self, keys): """ Create a boolean SArray by checking the keys of an SArray of dictionaries. An element of the output SArray is True if the corresponding input element's dictionary has all of the given keys. Fails on SArrays whose data type is not ``dict``. Parameters ---------- keys : list A list of key values to check each dictionary against. Returns ------- out : SArray A SArray of int type, where each element indicates whether the input SArray element contains all keys in the input list. See Also -------- dict_has_any_keys Examples -------- >>> sa = graphlab.SArray([{"this":1, "is":5, "dog":7}, {"this": 2, "are": 1, "cat": 5}]) >>> sa.dict_has_all_keys(["is", "this"]) dtype: int Rows: 2 [1, 0] """ if isinstance(keys, str) or (not hasattr(keys, "__iter__")): keys = [keys] _mt._get_metric_tracker().track('sarray.dict_has_all_keys') with cython_context(): return SArray(_proxy=self.__proxy__.dict_has_all_keys(keys)) def apply(self, fn, dtype=None, skip_undefined=True, seed=None, _lua_translate=False): """ apply(fn, dtype=None, skip_undefined=True, seed=None) Transform each element of the SArray by a given function. The result SArray is of type ``dtype``. ``fn`` should be a function that returns exactly one value which can be cast into the type specified by ``dtype``. If ``dtype`` is not specified, the first 100 elements of the SArray are used to make a guess about the data type. Parameters ---------- fn : function The function to transform each element. Must return exactly one value which can be cast into the type specified by ``dtype``. This can also be a toolkit extension function which is compiled as a native shared library using SDK. dtype : {None, int, float, str, list, array.array, dict, graphlab.Image}, optional The data type of the new SArray. If ``None``, the first 100 elements of the array are used to guess the target data type. skip_undefined : bool, optional If True, will not apply ``fn`` to any undefined values. seed : int, optional Used as the seed if a random number generator is included in ``fn``. Returns ------- out : SArray The SArray transformed by ``fn``. Each element of the SArray is of type ``dtype``. See Also -------- SFrame.apply Examples -------- >>> sa = graphlab.SArray([1,2,3]) >>> sa.apply(lambda x: x*2) dtype: int Rows: 3 [2, 4, 6] Using native toolkit extension function: .. code-block:: c++ #include <graphlab/sdk/toolkit_function_macros.hpp> #include <cmath> using namespace graphlab; double logx(const flexible_type& x, double base) { return log((double)(x)) / log(base); } BEGIN_FUNCTION_REGISTRATION REGISTER_FUNCTION(logx, "x", "base"); END_FUNCTION_REGISTRATION compiled into example.so >>> import example >>> sa = graphlab.SArray([1,2,4]) >>> sa.apply(lambda x: example.logx(x, 2)) dtype: float Rows: 3 [0.0, 1.0, 2.0] """ if (type(fn) == str): fn = "LUA" + fn if dtype == None: raise TypeError("dtype must be specified for a lua function") else: assert _is_callable(fn), "Input must be a function" dryrun = [fn(i) for i in self.head(100) if i is not None] import traceback if dtype == None: dtype = infer_type_of_list(dryrun) if not seed: seed = time.time() # log metric _mt._get_metric_tracker().track('sarray.apply') # First phase test if it is a toolkit function nativefn = None try: import graphlab.extensions as extensions nativefn = extensions._build_native_function_call(fn) except: # failure are fine. we just fall out into the next few phases pass if nativefn is not None: # this is a toolkit lambda. We can do something about it with cython_context(): return SArray(_proxy=self.__proxy__.transform_native(nativefn, dtype, skip_undefined, seed)) # Second phase. Try lua compilation if possible try: # try compilation if _lua_translate: # its a function print "Attempting Lua Translation" import graphlab.Lua_Translator import ast import StringIO def isalambda(v): return isinstance(v, type(lambda: None)) and v.__name__ == '<lambda>' output = StringIO.StringIO() translator = gl.Lua_Translator.translator_NodeVisitor(output) ast_node = None try: if not isalambda(fn): ast_node = ast.parse(inspect.getsource(fn)) translator.rename_function[fn.__name__] = "__lambda__transfer__" except: pass try: if ast_node == None: print "Cannot translate. Trying again from byte code decompilation" ast_node = meta.decompiler.decompile_func(fn) translator.rename_function[""] = "__lambda__transfer__" except: pass if ast_node == None: raise ValueError("Unable to get source of function") ftype = gl.Lua_Translator.FunctionType() selftype = self.dtype() if selftype == list: ftype.input_type = tuple([[]]) elif selftype == dict: ftype.input_type = tuple([{}]) elif selftype == array.array: ftype.input_type = tuple([[float]]) else: ftype.input_type = tuple([selftype]) translator.function_known_types["__lambda__transfer__"] = ftype translator.translate_ast(ast_node) print "Lua Translation Success" print output.getvalue() fn = "LUA" + output.getvalue() except Exception as e: print traceback.format_exc() print "Lua Translation Failed" print e except: print traceback.format_exc() print "Lua Translation Failed" with cython_context(): return SArray(_proxy=self.__proxy__.transform(fn, dtype, skip_undefined, seed)) def filter(self, fn, skip_undefined=True, seed=None): """ Filter this SArray by a function. Returns a new SArray filtered by this SArray. If `fn` evaluates an element to true, this element is copied to the new SArray. If not, it isn't. Throws an exception if the return type of `fn` is not castable to a boolean value. Parameters ---------- fn : function Function that filters the SArray. Must evaluate to bool or int. skip_undefined : bool, optional If True, will not apply fn to any undefined values. seed : int, optional Used as the seed if a random number generator is included in fn. Returns ------- out : SArray The SArray filtered by fn. Each element of the SArray is of type int. Examples -------- >>> sa = graphlab.SArray([1,2,3]) >>> sa.filter(lambda x: x < 3) dtype: int Rows: 2 [1, 2] """ assert inspect.isfunction(fn), "Input must be a function" if not seed: seed = time.time() _mt._get_metric_tracker().track('sarray.filter') with cython_context(): return SArray(_proxy=self.__proxy__.filter(fn, skip_undefined, seed)) def sample(self, fraction, seed=None): """ Create an SArray which contains a subsample of the current SArray. Parameters ---------- fraction : float The fraction of the rows to fetch. Must be between 0 and 1. seed : int The random seed for the random number generator. Returns ------- out : SArray The new SArray which contains the subsampled rows. Examples -------- >>> sa = graphlab.SArray(range(10)) >>> sa.sample(.3) dtype: int Rows: 3 [2, 6, 9] """ if (fraction > 1 or fraction < 0): raise ValueError('Invalid sampling rate: ' + str(fraction)) if (self.size() == 0): return SArray() if not seed: seed = time.time() _mt._get_metric_tracker().track('sarray.sample') with cython_context(): return SArray(_proxy=self.__proxy__.sample(fraction, seed)) def _save_as_text(self, url): """ Save the SArray to disk as text file. """ raise NotImplementedError def all(self): """ Return True if every element of the SArray evaluates to False. For numeric SArrays zeros and missing values (``None``) evaluate to False, while all non-zero, non-missing values evaluate to True. For string, list, and dictionary SArrays, empty values (zero length strings, lists or dictionaries) or missing values (``None``) evaluate to False. All other values evaluate to True. Returns True on an empty SArray. Returns ------- out : bool See Also -------- any Examples -------- >>> graphlab.SArray([1, None]).all() False >>> graphlab.SArray([1, 0]).all() False >>> graphlab.SArray([1, 2]).all() True >>> graphlab.SArray(["hello", "world"]).all() True >>> graphlab.SArray(["hello", ""]).all() False >>> graphlab.SArray([]).all() True """ with cython_context(): return self.__proxy__.all() def any(self): """ Return True if any element of the SArray evaluates to True. For numeric SArrays any non-zero value evaluates to True. For string, list, and dictionary SArrays, any element of non-zero length evaluates to True. Returns False on an empty SArray. Returns ------- out : bool See Also -------- all Examples -------- >>> graphlab.SArray([1, None]).any() True >>> graphlab.SArray([1, 0]).any() True >>> graphlab.SArray([0, 0]).any() False >>> graphlab.SArray(["hello", "world"]).any() True >>> graphlab.SArray(["hello", ""]).any() True >>> graphlab.SArray(["", ""]).any() False >>> graphlab.SArray([]).any() False """ with cython_context(): return self.__proxy__.any() def max(self): """ Get maximum numeric value in SArray. Returns None on an empty SArray. Raises an exception if called on an SArray with non-numeric type. Returns ------- out : type of SArray Maximum value of SArray See Also -------- min Examples -------- >>> graphlab.SArray([14, 62, 83, 72, 77, 96, 5, 25, 69, 66]).max() 96 """ with cython_context(): return self.__proxy__.max() def min(self): """ Get minimum numeric value in SArray. Returns None on an empty SArray. Raises an exception if called on an SArray with non-numeric type. Returns ------- out : type of SArray Minimum value of SArray See Also -------- max Examples -------- >>> graphlab.SArray([14, 62, 83, 72, 77, 96, 5, 25, 69, 66]).min() """ with cython_context(): return self.__proxy__.min() def sum(self): """ Sum of all values in this SArray. Raises an exception if called on an SArray of strings, lists, or dictionaries. If the SArray contains numeric arrays (array.array) and all the arrays are the same length, the sum over all the arrays will be returned. Returns None on an empty SArray. For large values, this may overflow without warning. Returns ------- out : type of SArray Sum of all values in SArray """ with cython_context(): return self.__proxy__.sum() def mean(self): """ Mean of all the values in the SArray, or mean image. Returns None on an empty SArray. Raises an exception if called on an SArray with non-numeric type or non-Image type. Returns ------- out : float | graphlab.Image Mean of all values in SArray, or image holding per-pixel mean across the input SArray. """ with cython_context(): if self.dtype() == gl.Image: import graphlab.extensions as extensions return extensions.generate_mean(self) else: return self.__proxy__.mean() def std(self, ddof=0): """ Standard deviation of all the values in the SArray. Returns None on an empty SArray. Raises an exception if called on an SArray with non-numeric type or if `ddof` >= length of SArray. Parameters ---------- ddof : int, optional "delta degrees of freedom" in the variance calculation. Returns ------- out : float The standard deviation of all the values. """ with cython_context(): return self.__proxy__.std(ddof) def var(self, ddof=0): """ Variance of all the values in the SArray. Returns None on an empty SArray. Raises an exception if called on an SArray with non-numeric type or if `ddof` >= length of SArray. Parameters ---------- ddof : int, optional "delta degrees of freedom" in the variance calculation. Returns ------- out : float Variance of all values in SArray. """ with cython_context(): return self.__proxy__.var(ddof) def num_missing(self): """ Number of missing elements in the SArray. Returns ------- out : int Number of missing values. """ with cython_context(): return self.__proxy__.num_missing() def nnz(self): """ Number of non-zero elements in the SArray. Returns ------- out : int Number of non-zero elements. """ with cython_context(): return self.__proxy__.nnz() def datetime_to_str(self,str_format="%Y-%m-%dT%H:%M:%S%ZP"): """ Create a new SArray with all the values cast to str. The string format is specified by the 'str_format' parameter. Parameters ---------- str_format : str The format to output the string. Default format is "%Y-%m-%dT%H:%M:%S%ZP". Returns ------- out : SArray[str] The SArray converted to the type 'str'. Examples -------- >>> dt = datetime.datetime(2011, 10, 20, 9, 30, 10, tzinfo=GMT(-5)) >>> sa = graphlab.SArray([dt]) >>> sa.datetime_to_str("%e %b %Y %T %ZP") dtype: str Rows: 1 [20 Oct 2011 09:30:10 GMT-05:00] See Also ---------- str_to_datetime References ---------- [1] Boost date time from string conversion guide (http://www.boost.org/doc/libs/1_48_0/doc/html/date_time/date_time_io.html) """ if(self.dtype() != datetime.datetime): raise TypeError("datetime_to_str expects SArray of datetime as input SArray") _mt._get_metric_tracker().track('sarray.datetime_to_str') with cython_context(): return SArray(_proxy=self.__proxy__.datetime_to_str(str_format)) def str_to_datetime(self,str_format="%Y-%m-%dT%H:%M:%S%ZP"): """ Create a new SArray with all the values cast to datetime. The string format is specified by the 'str_format' parameter. Parameters ---------- str_format : str The string format of the input SArray. Default format is "%Y-%m-%dT%H:%M:%S%ZP". Returns ------- out : SArray[datetime.datetime] The SArray converted to the type 'datetime'. Examples -------- >>> sa = graphlab.SArray(["20-Oct-2011 09:30:10 GMT-05:30"]) >>> sa.str_to_datetime("%d-%b-%Y %H:%M:%S %ZP") dtype: datetime Rows: 1 datetime.datetime(2011, 10, 20, 9, 30, 10, tzinfo=GMT(-5.5)) See Also ---------- datetime_to_str References ---------- [1] boost date time to string conversion guide (http://www.boost.org/doc/libs/1_48_0/doc/html/date_time/date_time_io.html) """ if(self.dtype() != str): raise TypeError("str_to_datetime expects SArray of str as input SArray") _mt._get_metric_tracker().track('sarray.str_to_datetime') with cython_context(): return SArray(_proxy=self.__proxy__.str_to_datetime(str_format)) def pixel_array_to_image(self, width, height, channels, undefined_on_failure=True, allow_rounding=False): """ Create a new SArray with all the values cast to :py:class:`graphlab.image.Image` of uniform size. Parameters ---------- width: int The width of the new images. height: int The height of the new images. channels: int. Number of channels of the new images. undefined_on_failure: bool , optional , default True If True, return None type instead of Image type in failure instances. If False, raises error upon failure. allow_rounding: bool, optional , default False If True, rounds non-integer values when converting to Image type. If False, raises error upon rounding. Returns ------- out : SArray[graphlab.Image] The SArray converted to the type 'graphlab.Image'. See Also -------- astype, str_to_datetime, datetime_to_str Examples -------- The MNIST data is scaled from 0 to 1, but our image type only loads integer pixel values from 0 to 255. If we just convert without scaling, all values below one would be cast to 0. >>> mnist_array = graphlab.SArray('http://s3.amazonaws.com/dato-datasets/mnist/mnist_vec_sarray') >>> scaled_mnist_array = mnist_array * 255 >>> mnist_img_sarray = gl.SArray.pixel_array_to_image(scaled_mnist_array, 28, 28, 1, allow_rounding = True) """ if(self.dtype() != array.array): raise TypeError("array_to_img expects SArray of arrays as input SArray") num_to_test = 10 num_test = min(self.size(), num_to_test) mod_values = [val % 1 for x in range(num_test) for val in self[x]] out_of_range_values = [(val > 255 or val < 0) for x in range(num_test) for val in self[x]] if sum(mod_values) != 0.0 and not allow_rounding: raise ValueError("There are non-integer values in the array data. Images only support integer data values between 0 and 255. To permit rounding, set the 'allow_rounding' paramter to 1.") if sum(out_of_range_values) != 0: raise ValueError("There are values outside the range of 0 to 255. Images only support integer data values between 0 and 255.") _mt._get_metric_tracker().track('sarray.pixel_array_to_img') import graphlab.extensions as extensions return extensions.vector_sarray_to_image_sarray(self, width, height, channels, undefined_on_failure) def _head_str(self, num_rows): """ Takes the head of SArray casted to string. """ import graphlab.extensions as extensions return extensions._head_str(self, num_rows) def astype(self, dtype, undefined_on_failure=False): """ Create a new SArray with all values cast to the given type. Throws an exception if the types are not castable to the given type. Parameters ---------- dtype : {int, float, str, list, array.array, dict, datetime.datetime} The type to cast the elements to in SArray undefined_on_failure: bool, optional If set to True, runtime cast failures will be emitted as missing values rather than failing. Returns ------- out : SArray [dtype] The SArray converted to the type ``dtype``. Notes ----- - The string parsing techniques used to handle conversion to dictionary and list types are quite generic and permit a variety of interesting formats to be interpreted. For instance, a JSON string can usually be interpreted as a list or a dictionary type. See the examples below. - For datetime-to-string and string-to-datetime conversions, use sa.datetime_to_str() and sa.str_to_datetime() functions. - For array.array to graphlab.Image conversions, use sa.pixel_array_to_image() Examples -------- >>> sa = graphlab.SArray(['1','2','3','4']) >>> sa.astype(int) dtype: int Rows: 4 [1, 2, 3, 4] Given an SArray of strings that look like dicts, convert to a dictionary type: >>> sa = graphlab.SArray(['{1:2 3:4}', '{a:b c:d}']) >>> sa.astype(dict) dtype: dict Rows: 2 [{1: 2, 3: 4}, {'a': 'b', 'c': 'd'}] """ _mt._get_metric_tracker().track('sarray.astype.%s' % str(dtype.__name__)) if (dtype == gl.Image) and (self.dtype() == array.array): raise TypeError("Cannot cast from image type to array with sarray.astype(). Please use sarray.array_to_img() instead.") with cython_context(): return SArray(_proxy=self.__proxy__.astype(dtype, undefined_on_failure)) def clip(self, lower=float('nan'), upper=float('nan')): """ Create a new SArray with each value clipped to be within the given bounds. In this case, "clipped" means that values below the lower bound will be set to the lower bound value. Values above the upper bound will be set to the upper bound value. This function can operate on SArrays of numeric type as well as array type, in which case each individual element in each array is clipped. By default ``lower`` and ``upper`` are set to ``float('nan')`` which indicates the respective bound should be ignored. The method fails if invoked on an SArray of non-numeric type. Parameters ---------- lower : int, optional The lower bound used to clip. Ignored if equal to ``float('nan')`` (the default). upper : int, optional The upper bound used to clip. Ignored if equal to ``float('nan')`` (the default). Returns ------- out : SArray See Also -------- clip_lower, clip_upper Examples -------- >>> sa = graphlab.SArray([1,2,3]) >>> sa.clip(2,2) dtype: int Rows: 3 [2, 2, 2] """ with cython_context(): return SArray(_proxy=self.__proxy__.clip(lower, upper)) def clip_lower(self, threshold): """ Create new SArray with all values clipped to the given lower bound. This function can operate on numeric arrays, as well as vector arrays, in which case each individual element in each vector is clipped. Throws an exception if the SArray is empty or the types are non-numeric. Parameters ---------- threshold : float The lower bound used to clip values. Returns ------- out : SArray See Also -------- clip, clip_upper Examples -------- >>> sa = graphlab.SArray([1,2,3]) >>> sa.clip_lower(2) dtype: int Rows: 3 [2, 2, 3] """ with cython_context(): return SArray(_proxy=self.__proxy__.clip(threshold, float('nan'))) def clip_upper(self, threshold): """ Create new SArray with all values clipped to the given upper bound. This function can operate on numeric arrays, as well as vector arrays, in which case each individual element in each vector is clipped. Parameters ---------- threshold : float The upper bound used to clip values. Returns ------- out : SArray See Also -------- clip, clip_lower Examples -------- >>> sa = graphlab.SArray([1,2,3]) >>> sa.clip_upper(2) dtype: int Rows: 3 [1, 2, 2] """ with cython_context(): return SArray(_proxy=self.__proxy__.clip(float('nan'), threshold)) def tail(self, n=10): """ Get an SArray that contains the last n elements in the SArray. Parameters ---------- n : int The number of elements to fetch Returns ------- out : SArray A new SArray which contains the last n rows of the current SArray. """ with cython_context(): return SArray(_proxy=self.__proxy__.tail(n)) def dropna(self): """ Create new SArray containing only the non-missing values of the SArray. A missing value shows up in an SArray as 'None'. This will also drop float('nan'). Returns ------- out : SArray The new SArray with missing values removed. """ _mt._get_metric_tracker().track('sarray.dropna') with cython_context(): return SArray(_proxy = self.__proxy__.drop_missing_values()) def fillna(self, value): """ Create new SArray with all missing values (None or NaN) filled in with the given value. The size of the new SArray will be the same as the original SArray. If the given value is not the same type as the values in the SArray, `fillna` will attempt to convert the value to the original SArray's type. If this fails, an error will be raised. Parameters ---------- value : type convertible to SArray's type The value used to replace all missing values Returns ------- out : SArray A new SArray with all missing values filled """ _mt._get_metric_tracker().track('sarray.fillna') with cython_context(): return SArray(_proxy = self.__proxy__.fill_missing_values(value)) def topk_index(self, topk=10, reverse=False): """ Create an SArray indicating which elements are in the top k. Entries are '1' if the corresponding element in the current SArray is a part of the top k elements, and '0' if that corresponding element is not. Order is descending by default. Parameters ---------- topk : int The number of elements to determine if 'top' reverse: bool If True, return the topk elements in ascending order Returns ------- out : SArray (of type int) Notes ----- This is used internally by SFrame's topk function. """ with cython_context(): return SArray(_proxy = self.__proxy__.topk_index(topk, reverse)) def sketch_summary(self, background=False, sub_sketch_keys=None): """ Summary statistics that can be calculated with one pass over the SArray. Returns a graphlab.Sketch object which can be further queried for many descriptive statistics over this SArray. Many of the statistics are approximate. See the :class:`~graphlab.Sketch` documentation for more detail. Parameters ---------- background : boolean, optional If True, the sketch construction will return immediately and the sketch will be constructed in the background. While this is going on, the sketch can be queried incrementally, but at a performance penalty. Defaults to False. sub_sketch_keys: int | str | list of int | list of str, optional For SArray of dict type, also constructs sketches for a given set of keys, For SArray of array type, also constructs sketches for the given indexes. The sub sketches may be queried using: :py:func:`~graphlab.Sketch.element_sub_sketch()` Defaults to None in which case no subsketches will be constructed. Returns ------- out : Sketch Sketch object that contains descriptive statistics for this SArray. Many of the statistics are approximate. """ from graphlab.data_structures.sketch import Sketch if (self.dtype() == gl.data_structures.image.Image): raise TypeError("sketch_summary() is not supported for arrays of image type") if (type(background) != bool): raise TypeError("'background' parameter has to be a boolean value") if (sub_sketch_keys != None): if (self.dtype() != dict and self.dtype() != array.array): raise TypeError("sub_sketch_keys is only supported for SArray of dictionary or array type") if not hasattr(sub_sketch_keys, "__iter__"): sub_sketch_keys = [sub_sketch_keys] value_types = set([type(i) for i in sub_sketch_keys]) if (len(value_types) != 1): raise ValueError("sub_sketch_keys member values need to have the same type.") value_type = value_types.pop(); if (self.dtype() == dict and value_type != str): raise TypeError("Only string value(s) can be passed to sub_sketch_keys for SArray of dictionary type. "+ "For dictionary types, sketch summary is computed by casting keys to string values.") if (self.dtype() == array.array and value_type != int): raise TypeError("Only int value(s) can be passed to sub_sketch_keys for SArray of array type") else: sub_sketch_keys = list() _mt._get_metric_tracker().track('sarray.sketch_summary') return Sketch(self, background, sub_sketch_keys = sub_sketch_keys) def append(self, other): """ Append an SArray to the current SArray. Creates a new SArray with the rows from both SArrays. Both SArrays must be of the same type. Parameters ---------- other : SArray Another SArray whose rows are appended to current SArray. Returns ------- out : SArray A new SArray that contains rows from both SArrays, with rows from the ``other`` SArray coming after all rows from the current SArray. See Also -------- SFrame.append Examples -------- >>> sa = graphlab.SArray([1, 2, 3]) >>> sa2 = graphlab.SArray([4, 5, 6]) >>> sa.append(sa2) dtype: int Rows: 6 [1, 2, 3, 4, 5, 6] """ _mt._get_metric_tracker().track('sarray.append') if type(other) is not SArray: raise RuntimeError("SArray append can only work with SArray") if self.dtype() != other.dtype(): raise RuntimeError("Data types in both SArrays have to be the same") with cython_context(): other.__materialize__() return SArray(_proxy = self.__proxy__.append(other.__proxy__)) def unique(self): """ Get all unique values in the current SArray. Raises a TypeError if the SArray is of dictionary type. Will not necessarily preserve the order of the given SArray in the new SArray. Returns ------- out : SArray A new SArray that contains the unique values of the current SArray. See Also -------- SFrame.unique """ _mt._get_metric_tracker().track('sarray.unique') tmp_sf = gl.SFrame() tmp_sf.add_column(self, 'X1') res = tmp_sf.groupby('X1',{}) return SArray(_proxy=res['X1'].__proxy__) @gl._check_canvas_enabled def show(self, view=None): """ show(view=None) Visualize the SArray with GraphLab Create :mod:`~graphlab.canvas`. This function starts Canvas if it is not already running. If the SArray has already been plotted, this function will update the plot. Parameters ---------- view : str, optional The name of the SFrame view to show. Can be one of: - None: Use the default (depends on the dtype of the SArray). - 'Categorical': Shows most frequent items in this SArray, sorted by frequency. Only valid for str, int, or float dtypes. - 'Numeric': Shows a histogram (distribution of values) for the SArray. Only valid for int or float dtypes. - 'Dictionary': Shows a cross filterable list of keys (categorical) and values (categorical or numeric). Only valid for dict dtype. - 'Array': Shows a Numeric view, filterable by sub-column (index). Only valid for array.array dtype. - 'List': Shows a Categorical view, aggregated across all sub- columns (indices). Only valid for list dtype. Returns ------- view : graphlab.canvas.view.View An object representing the GraphLab Canvas view See Also -------- canvas Examples -------- Suppose 'sa' is an SArray, we can view it in GraphLab Canvas using: >>> sa.show() If 'sa' is a numeric (int or float) SArray, we can view it as a categorical variable using: >>> sa.show(view='Categorical') """ import graphlab.canvas import graphlab.canvas.inspect import graphlab.canvas.views.sarray graphlab.canvas.inspect.find_vars(self) return graphlab.canvas.show(graphlab.canvas.views.sarray.SArrayView(self, params={ 'view': view })) def item_length(self): """ Length of each element in the current SArray. Only works on SArrays of dict, array, or list type. If a given element is a missing value, then the output elements is also a missing value. This function is equivalent to the following but more performant: sa_item_len = sa.apply(lambda x: len(x) if x is not None else None) Returns ------- out_sf : SArray A new SArray, each element in the SArray is the len of the corresponding items in original SArray. Examples -------- >>> sa = SArray([ ... {"is_restaurant": 1, "is_electronics": 0}, ... {"is_restaurant": 1, "is_retail": 1, "is_electronics": 0}, ... {"is_restaurant": 0, "is_retail": 1, "is_electronics": 0}, ... {"is_restaurant": 0}, ... {"is_restaurant": 1, "is_electronics": 1}, ... None]) >>> sa.item_length() dtype: int Rows: 6 [2, 3, 3, 1, 2, None] """ if (self.dtype() not in [list, dict, array.array]): raise TypeError("item_length() is only applicable for SArray of type list, dict and array.") _mt._get_metric_tracker().track('sarray.item_length') with cython_context(): return SArray(_proxy = self.__proxy__.item_length()) def split_datetime(self, column_name_prefix = "X", limit=None, tzone=False): """ Splits an SArray of datetime type to multiple columns, return a new SFrame that contains expanded columns. A SArray of datetime will be split by default into an SFrame of 6 columns, one for each year/month/day/hour/minute/second element. column naming: When splitting a SArray of datetime type, new columns are named: prefix.year, prefix.month, etc. The prefix is set by the parameter "column_name_prefix" and defaults to 'X'. If column_name_prefix is None or empty, then no prefix is used. Timezone column: If tzone parameter is True, then timezone information is represented as one additional column which is a float shows the offset from GMT(0.0) or from UTC. Parameters ---------- column_name_prefix: str, optional If provided, expanded column names would start with the given prefix. Defaults to "X". limit: list[str], optional Limits the set of datetime elements to expand. Elements are 'year','month','day','hour','minute', and 'second'. tzone: bool, optional A boolean parameter that determines whether to show timezone column or not. Defaults to False. Returns ------- out : SFrame A new SFrame that contains all expanded columns Examples -------- To expand only day and year elements of a datetime SArray >>> sa = SArray( [datetime(2011, 1, 21, 7, 7, 21, tzinfo=GMT(0)), datetime(2010, 2, 5, 7, 8, 21, tzinfo=GMT(4.5)]) >>> sa.split_datetime(column_name_prefix=None,limit=['day','year']) Columns: day int year int Rows: 2 Data: +-------+--------+ | day | year | +-------+--------+ | 21 | 2011 | | 5 | 2010 | +-------+--------+ [2 rows x 2 columns] To expand only year and tzone elements of a datetime SArray with tzone column represented as a string. Columns are named with prefix: 'Y.column_name'. >>> sa.split_datetime(column_name_prefix="Y",limit=['year'],tzone=True) Columns: Y.year int Y.tzone float Rows: 2 Data: +----------+---------+ | Y.year | Y.tzone | +----------+---------+ | 2011 | 0.0 | | 2010 | 4.5 | +----------+---------+ [2 rows x 2 columns] """ if self.dtype() != datetime.datetime: raise TypeError("Only column of datetime type is supported.") if column_name_prefix == None: column_name_prefix = "" if type(column_name_prefix) != str: raise TypeError("'column_name_prefix' must be a string") # convert limit to column_keys if limit != None: if (not hasattr(limit, '__iter__')): raise TypeError("'limit' must be a list"); name_types = set([type(i) for i in limit]) if (len(name_types) != 1): raise TypeError("'limit' contains values that are different types") if (name_types.pop() != str): raise TypeError("'limit' must contain string values.") if len(set(limit)) != len(limit): raise ValueError("'limit' contains duplicate values") column_types = [] if(limit != None): column_types = list() for i in limit: column_types.append(int); else: limit = ['year','month','day','hour','minute','second'] column_types = [int, int, int, int, int, int] if(tzone == True): limit += ['tzone'] column_types += [float] _mt._get_metric_tracker().track('sarray.split_datetime') with cython_context(): return gl.SFrame(_proxy=self.__proxy__.expand(column_name_prefix, limit, column_types)) def unpack(self, column_name_prefix = "X", column_types=None, na_value=None, limit=None): """ Convert an SArray of list, array, or dict type to an SFrame with multiple columns. `unpack` expands an SArray using the values of each list/array/dict as elements in a new SFrame of multiple columns. For example, an SArray of lists each of length 4 will be expanded into an SFrame of 4 columns, one for each list element. An SArray of lists/arrays of varying size will be expand to a number of columns equal to the longest list/array. An SArray of dictionaries will be expanded into as many columns as there are keys. When unpacking an SArray of list or array type, new columns are named: `column_name_prefix`.0, `column_name_prefix`.1, etc. If unpacking a column of dict type, unpacked columns are named `column_name_prefix`.key1, `column_name_prefix`.key2, etc. When unpacking an SArray of list or dictionary types, missing values in the original element remain as missing values in the resultant columns. If the `na_value` parameter is specified, all values equal to this given value are also replaced with missing values. In an SArray of array.array type, NaN is interpreted as a missing value. :py:func:`graphlab.SFrame.pack_columns()` is the reverse effect of unpack Parameters ---------- column_name_prefix: str, optional If provided, unpacked column names would start with the given prefix. column_types: list[type], optional Column types for the unpacked columns. If not provided, column types are automatically inferred from first 100 rows. Defaults to None. na_value: optional Convert all values that are equal to `na_value` to missing value if specified. limit: list, optional Limits the set of list/array/dict keys to unpack. For list/array SArrays, 'limit' must contain integer indices. For dict SArray, 'limit' must contain dictionary keys. Returns ------- out : SFrame A new SFrame that contains all unpacked columns Examples -------- To unpack a dict SArray >>> sa = SArray([{ 'word': 'a', 'count': 1}, ... { 'word': 'cat', 'count': 2}, ... { 'word': 'is', 'count': 3}, ... { 'word': 'coming','count': 4}]) Normal case of unpacking SArray of type dict: >>> sa.unpack(column_name_prefix=None) Columns: count int word str <BLANKLINE> Rows: 4 <BLANKLINE> Data: +-------+--------+ | count | word | +-------+--------+ | 1 | a | | 2 | cat | | 3 | is | | 4 | coming | +-------+--------+ [4 rows x 2 columns] <BLANKLINE> Unpack only keys with 'word': >>> sa.unpack(limit=['word']) Columns: X.word str <BLANKLINE> Rows: 4 <BLANKLINE> Data: +--------+ | X.word | +--------+ | a | | cat | | is | | coming | +--------+ [4 rows x 1 columns] <BLANKLINE> >>> sa2 = SArray([ ... [1, 0, 1], ... [1, 1, 1], ... [0, 1]]) Convert all zeros to missing values: >>> sa2.unpack(column_types=[int, int, int], na_value=0) Columns: X.0 int X.1 int X.2 int <BLANKLINE> Rows: 3 <BLANKLINE> Data: +------+------+------+ | X.0 | X.1 | X.2 | +------+------+------+ | 1 | None | 1 | | 1 | 1 | 1 | | None | 1 | None | +------+------+------+ [3 rows x 3 columns] <BLANKLINE> """ if self.dtype() not in [dict, array.array, list]: raise TypeError("Only SArray of dict/list/array type supports unpack") if column_name_prefix == None: column_name_prefix = "" if type(column_name_prefix) != str: raise TypeError("'column_name_prefix' must be a string") # validdate 'limit' if limit != None: if (not hasattr(limit, '__iter__')): raise TypeError("'limit' must be a list"); name_types = set([type(i) for i in limit]) if (len(name_types) != 1): raise TypeError("'limit' contains values that are different types") # limit value should be numeric if unpacking sarray.array value if (self.dtype() != dict) and (name_types.pop() != int): raise TypeError("'limit' must contain integer values.") if len(set(limit)) != len(limit): raise ValueError("'limit' contains duplicate values") if (column_types != None): if not hasattr(column_types, '__iter__'): raise TypeError("column_types must be a list"); for column_type in column_types: if (column_type not in (int, float, str, list, dict, array.array)): raise TypeError("column_types contains unsupported types. Supported types are ['float', 'int', 'list', 'dict', 'str', 'array.array']") if limit != None: if len(limit) != len(column_types): raise ValueError("limit and column_types do not have the same length") elif self.dtype() == dict: raise ValueError("if 'column_types' is given, 'limit' has to be provided to unpack dict type.") else: limit = range(len(column_types)) else: head_rows = self.head(100).dropna() lengths = [len(i) for i in head_rows] if len(lengths) == 0 or max(lengths) == 0: raise RuntimeError("Cannot infer number of items from the SArray, SArray may be empty. please explicitly provide column types") # infer column types for dict type at server side, for list and array, infer from client side if self.dtype() != dict: length = max(lengths) if limit == None: limit = range(length) else: # adjust the length length = len(limit) if self.dtype() == array.array: column_types = [float for i in range(length)] else: column_types = list() for i in limit: t = [(x[i] if ((x is not None) and len(x) > i) else None) for x in head_rows] column_types.append(infer_type_of_list(t)) _mt._get_metric_tracker().track('sarray.unpack') with cython_context(): if (self.dtype() == dict and column_types == None): limit = limit if limit != None else [] return gl.SFrame(_proxy=self.__proxy__.unpack_dict(column_name_prefix, limit, na_value)) else: return gl.SFrame(_proxy=self.__proxy__.unpack(column_name_prefix, limit, column_types, na_value)) def sort(self, ascending=True): """ Sort all values in this SArray. Sort only works for sarray of type str, int and float, otherwise TypeError will be raised. Creates a new, sorted SArray. Parameters ---------- ascending: boolean, optional If true, the sarray values are sorted in ascending order, otherwise, descending order. Returns ------- out: SArray Examples -------- >>> sa = SArray([3,2,1]) >>> sa.sort() dtype: int Rows: 3 [1, 2, 3] """ if self.dtype() not in (int, float, str, datetime.datetime): raise TypeError("Only sarray with type (int, float, str, datetime.datetime) can be sorted") sf = gl.SFrame() sf['a'] = self return sf.sort('a', ascending)['a']
agpl-3.0
koniiiik/django
tests/sitemaps_tests/test_http.py
16
11105
from __future__ import unicode_literals import os from datetime import date from unittest import skipUnless from django.apps import apps from django.conf import settings from django.contrib.sitemaps import GenericSitemap, Sitemap from django.contrib.sites.models import Site from django.core.exceptions import ImproperlyConfigured from django.test import modify_settings, override_settings from django.utils._os import upath from django.utils.formats import localize from django.utils.translation import activate, deactivate from .base import SitemapTestsBase from .models import TestModel class HTTPSitemapTests(SitemapTestsBase): def test_simple_sitemap_index(self): "A simple sitemap index can be rendered" response = self.client.get('/simple/index.xml') expected_content = """<?xml version="1.0" encoding="UTF-8"?> <sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"> <sitemap><loc>%s/simple/sitemap-simple.xml</loc></sitemap> </sitemapindex> """ % self.base_url self.assertXMLEqual(response.content.decode('utf-8'), expected_content) @override_settings(TEMPLATES=[{ 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [os.path.join(os.path.dirname(upath(__file__)), 'templates')], }]) def test_simple_sitemap_custom_index(self): "A simple sitemap index can be rendered with a custom template" response = self.client.get('/simple/custom-index.xml') expected_content = """<?xml version="1.0" encoding="UTF-8"?> <!-- This is a customised template --> <sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"> <sitemap><loc>%s/simple/sitemap-simple.xml</loc></sitemap> </sitemapindex> """ % self.base_url self.assertXMLEqual(response.content.decode('utf-8'), expected_content) def test_simple_sitemap_section(self): "A simple sitemap section can be rendered" response = self.client.get('/simple/sitemap-simple.xml') expected_content = """<?xml version="1.0" encoding="UTF-8"?> <urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"> <url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url> </urlset> """ % (self.base_url, date.today()) self.assertXMLEqual(response.content.decode('utf-8'), expected_content) def test_simple_sitemap(self): "A simple sitemap can be rendered" response = self.client.get('/simple/sitemap.xml') expected_content = """<?xml version="1.0" encoding="UTF-8"?> <urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"> <url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url> </urlset> """ % (self.base_url, date.today()) self.assertXMLEqual(response.content.decode('utf-8'), expected_content) @override_settings(TEMPLATES=[{ 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [os.path.join(os.path.dirname(upath(__file__)), 'templates')], }]) def test_simple_custom_sitemap(self): "A simple sitemap can be rendered with a custom template" response = self.client.get('/simple/custom-sitemap.xml') expected_content = """<?xml version="1.0" encoding="UTF-8"?> <!-- This is a customised template --> <urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"> <url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url> </urlset> """ % (self.base_url, date.today()) self.assertXMLEqual(response.content.decode('utf-8'), expected_content) def test_sitemap_last_modified(self): "Tests that Last-Modified header is set correctly" response = self.client.get('/lastmod/sitemap.xml') self.assertEqual(response['Last-Modified'], 'Wed, 13 Mar 2013 10:00:00 GMT') def test_sitemap_last_modified_date(self): """ The Last-Modified header should be support dates (without time). """ response = self.client.get('/lastmod/date-sitemap.xml') self.assertEqual(response['Last-Modified'], 'Wed, 13 Mar 2013 00:00:00 GMT') def test_sitemap_last_modified_tz(self): """ The Last-Modified header should be converted from timezone aware dates to GMT. """ response = self.client.get('/lastmod/tz-sitemap.xml') self.assertEqual(response['Last-Modified'], 'Wed, 13 Mar 2013 15:00:00 GMT') def test_sitemap_last_modified_missing(self): "Tests that Last-Modified header is missing when sitemap has no lastmod" response = self.client.get('/generic/sitemap.xml') self.assertFalse(response.has_header('Last-Modified')) def test_sitemap_last_modified_mixed(self): "Tests that Last-Modified header is omitted when lastmod not on all items" response = self.client.get('/lastmod-mixed/sitemap.xml') self.assertFalse(response.has_header('Last-Modified')) def test_sitemaps_lastmod_mixed_ascending_last_modified_missing(self): """ The Last-Modified header is omitted when lastmod isn't found in all sitemaps. Test sitemaps are sorted by lastmod in ascending order. """ response = self.client.get('/lastmod-sitemaps/mixed-ascending.xml') self.assertFalse(response.has_header('Last-Modified')) def test_sitemaps_lastmod_mixed_descending_last_modified_missing(self): """ The Last-Modified header is omitted when lastmod isn't found in all sitemaps. Test sitemaps are sorted by lastmod in descending order. """ response = self.client.get('/lastmod-sitemaps/mixed-descending.xml') self.assertFalse(response.has_header('Last-Modified')) def test_sitemaps_lastmod_ascending(self): """ The Last-Modified header is set to the most recent sitemap lastmod. Test sitemaps are sorted by lastmod in ascending order. """ response = self.client.get('/lastmod-sitemaps/ascending.xml') self.assertEqual(response['Last-Modified'], 'Sat, 20 Apr 2013 05:00:00 GMT') def test_sitemaps_lastmod_descending(self): """ The Last-Modified header is set to the most recent sitemap lastmod. Test sitemaps are sorted by lastmod in descending order. """ response = self.client.get('/lastmod-sitemaps/descending.xml') self.assertEqual(response['Last-Modified'], 'Sat, 20 Apr 2013 05:00:00 GMT') @skipUnless(settings.USE_I18N, "Internationalization is not enabled") @override_settings(USE_L10N=True) def test_localized_priority(self): "The priority value should not be localized (Refs #14164)" activate('fr') self.assertEqual('0,3', localize(0.3)) # Retrieve the sitemap. Check that priorities # haven't been rendered in localized format response = self.client.get('/simple/sitemap.xml') self.assertContains(response, '<priority>0.5</priority>') self.assertContains(response, '<lastmod>%s</lastmod>' % date.today()) deactivate() @modify_settings(INSTALLED_APPS={'remove': 'django.contrib.sites'}) def test_requestsite_sitemap(self): # Make sure hitting the flatpages sitemap without the sites framework # installed doesn't raise an exception. response = self.client.get('/simple/sitemap.xml') expected_content = """<?xml version="1.0" encoding="UTF-8"?> <urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"> <url><loc>http://testserver/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url> </urlset> """ % date.today() self.assertXMLEqual(response.content.decode('utf-8'), expected_content) @skipUnless(apps.is_installed('django.contrib.sites'), "django.contrib.sites app not installed.") def test_sitemap_get_urls_no_site_1(self): """ Check we get ImproperlyConfigured if we don't pass a site object to Sitemap.get_urls and no Site objects exist """ Site.objects.all().delete() with self.assertRaises(ImproperlyConfigured): Sitemap().get_urls() @modify_settings(INSTALLED_APPS={'remove': 'django.contrib.sites'}) def test_sitemap_get_urls_no_site_2(self): """ Check we get ImproperlyConfigured when we don't pass a site object to Sitemap.get_urls if Site objects exists, but the sites framework is not actually installed. """ with self.assertRaises(ImproperlyConfigured): Sitemap().get_urls() def test_sitemap_item(self): """ Check to make sure that the raw item is included with each Sitemap.get_url() url result. """ test_sitemap = GenericSitemap({'queryset': TestModel.objects.order_by('pk').all()}) def is_testmodel(url): return isinstance(url['item'], TestModel) item_in_url_info = all(map(is_testmodel, test_sitemap.get_urls())) self.assertTrue(item_in_url_info) def test_cached_sitemap_index(self): """ Check that a cached sitemap index can be rendered (#2713). """ response = self.client.get('/cached/index.xml') expected_content = """<?xml version="1.0" encoding="UTF-8"?> <sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"> <sitemap><loc>%s/cached/sitemap-simple.xml</loc></sitemap> </sitemapindex> """ % self.base_url self.assertXMLEqual(response.content.decode('utf-8'), expected_content) def test_x_robots_sitemap(self): response = self.client.get('/simple/index.xml') self.assertEqual(response['X-Robots-Tag'], 'noindex, noodp, noarchive') response = self.client.get('/simple/sitemap.xml') self.assertEqual(response['X-Robots-Tag'], 'noindex, noodp, noarchive') def test_empty_sitemap(self): response = self.client.get('/empty/sitemap.xml') self.assertEqual(response.status_code, 200) @override_settings(LANGUAGES=(('en', 'English'), ('pt', 'Portuguese'))) def test_simple_i18nsitemap_index(self): "A simple i18n sitemap index can be rendered" response = self.client.get('/simple/i18n.xml') expected_content = """<?xml version="1.0" encoding="UTF-8"?> <urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"> <url><loc>{0}/en/i18n/testmodel/{1}/</loc><changefreq>never</changefreq><priority>0.5</priority></url><url><loc>{0}/pt/i18n/testmodel/{1}/</loc><changefreq>never</changefreq><priority>0.5</priority></url> </urlset> """.format(self.base_url, self.i18n_model.pk) self.assertXMLEqual(response.content.decode('utf-8'), expected_content) def test_sitemap_without_entries(self): response = self.client.get('/sitemap-without-entries/sitemap.xml') expected_content = """<?xml version="1.0" encoding="UTF-8"?> <urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"> </urlset>""" self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
bsd-3-clause