repo_name
stringlengths
5
100
path
stringlengths
4
294
copies
stringclasses
990 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
SmarTS-Lab/OpenIPSL
CI/CITests.py
4
2135
import os from OMPython import OMCSessionZMQ class CITests(): ''' Python class used to run CI tests ''' def __init__(self, rootPath): ''' Constructor starts omc and loads MSL ''' self.rootPath = rootPath self.omc = OMCSessionZMQ() os.chdir(self.rootPath) self.omc.sendExpression("loadModel(Modelica)") def loadLib(self, libName, libPath): # Attempt to load the library if self.omc.sendExpression('loadFile("%s")' % (self.rootPath + libPath)): print("Load success: %s" % libName) else: errmsg = libName + " was not loaded! Check the library path:\n" + libPath raise Exception(errmsg) def runSyntaxCheck(self, libName, libPath): # Load library self.loadLib(libName,libPath) ''' Checks all of the models in the library and returns number of faild checks ''' # Get the list of all classes in OpenIPSL test_list = self.omc.sendExpression('getClassNames(%s,recursive=true)' % libName) nFailed = 0 nPassed = 0 # Run the check for all classes that are model and print result msgs for test in test_list: if self.omc.sendExpression("isModel(%s)" % (test)): # Check if a class is a model passMsg = self.omc.sendExpression("checkModel(%s)" % (test)) if "completed successfully." in passMsg: nPassed += 1 else: failMsg = self.omc.sendExpression("getErrorString()") print(failMsg) nFailed += 1 # Print a check summary if nFailed == 0: str1 = "== %s ----------------------" % libName print("%s OK! == Models checked: %s" % (str1[:22], nPassed)) else: print("==== Check Summary for %s ====" % libName) print("Number of models that passed the check is: %s" % nPassed) print("Number of models that failed the check is: %s" % nFailed) # Return test result return (nFailed == 0)
mpl-2.0
ProjectQ-Framework/ProjectQ
projectq/ops/_metagates_test.py
1
9268
# -*- coding: utf-8 -*- # Copyright 2017 ProjectQ-Framework (www.projectq.ch) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for projectq.ops._gates.""" import cmath import math import numpy as np import pytest from projectq.types import Qubit from projectq import MainEngine from projectq.cengines import DummyEngine from projectq.ops import ( T, Y, NotInvertible, Entangle, Rx, FastForwardingGate, Command, C, ClassicalInstructionGate, All, ) from projectq.types import WeakQubitRef from projectq.ops import _metagates def test_tensored_gate_invalid(): qb0 = WeakQubitRef(None, idx=0) qb1 = WeakQubitRef(None, idx=1) with pytest.raises(ValueError): _metagates.Tensor(Y) | (qb0, qb1) with pytest.raises(ValueError): _metagates.Tensor(Y) | qb0 def test_tensored_controlled_gate(): saving_backend = DummyEngine(save_commands=True) main_engine = MainEngine(backend=saving_backend, engine_list=[DummyEngine()]) gate = Rx(0.6) qubit0 = Qubit(main_engine, 0) qubit1 = Qubit(main_engine, 1) qubit2 = Qubit(main_engine, 2) target_qubits = [qubit1, qubit2] C(All(gate)) | (qubit0, target_qubits) assert saving_backend.received_commands[-1].gate == gate assert len(saving_backend.received_commands[-1].control_qubits) == 1 def test_daggered_gate_init(): # Choose gate which does not have an inverse gate: not_invertible_gate = T with pytest.raises(NotInvertible): not_invertible_gate.get_inverse() # Choose gate which does have an inverse defined: invertible_gate = Y assert invertible_gate.get_inverse() == Y # Test init and matrix dagger_inv = _metagates.DaggeredGate(not_invertible_gate) assert dagger_inv._gate == not_invertible_gate assert np.array_equal(dagger_inv.matrix, np.matrix([[1, 0], [0, cmath.exp(-1j * cmath.pi / 4)]])) inv = _metagates.DaggeredGate(invertible_gate) assert inv._gate == invertible_gate assert np.array_equal(inv.matrix, np.matrix([[0, -1j], [1j, 0]])) # Test matrix no_matrix_gate = Entangle with pytest.raises(AttributeError): no_matrix_gate.matrix inv_no_matrix_gate = _metagates.DaggeredGate(no_matrix_gate) with pytest.raises(AttributeError): inv_no_matrix_gate.matrix def test_daggered_gate_str(): daggered_gate = _metagates.DaggeredGate(Y) assert str(daggered_gate) == str(Y) + r"^\dagger" def test_daggered_gate_hashable(): daggered_gate1 = _metagates.DaggeredGate(Y) daggered_gate2 = _metagates.DaggeredGate(T) d = {daggered_gate1: 1, daggered_gate2: 3} assert len(d) == 2 # for efficiency reasons the following should be true: assert hash(daggered_gate1) != hash(daggered_gate2) def test_daggered_gate_tex_str(): daggered_gate = _metagates.DaggeredGate(Y) str_Y = Y.tex_str() if hasattr(Y, 'tex_str') else str(Y) assert daggered_gate.tex_str() == str_Y + r"${}^\dagger$" # test for a gate with tex_str method rx = Rx(0.5) daggered_rx = _metagates.DaggeredGate(rx) str_rx = rx.tex_str() if hasattr(rx, 'tex_str') else str(rx) assert daggered_rx.tex_str() == str_rx + r"${}^\dagger$" def test_daggered_gate_get_inverse(): daggered_gate = _metagates.DaggeredGate(Y) assert daggered_gate.get_inverse() == Y def test_daggered_gate_comparison(): daggered_gate = _metagates.DaggeredGate(Y) daggered_gate2 = _metagates.DaggeredGate(Y) assert daggered_gate == daggered_gate2 def test_get_inverse(): # Choose gate which does not have an inverse gate: not_invertible_gate = T with pytest.raises(NotInvertible): not_invertible_gate.get_inverse() # Choose gate which does have an inverse defined: invertible_gate = Y assert invertible_gate.get_inverse() == Y # Check get_inverse(gate) inv = _metagates.get_inverse(not_invertible_gate) assert isinstance(inv, _metagates.DaggeredGate) and inv._gate == not_invertible_gate inv2 = _metagates.get_inverse(invertible_gate) assert inv2 == Y def test_is_identity(): # Choose gate which is not an identity gate: non_identity_gate = Rx(0.5) assert not non_identity_gate.is_identity() assert not _metagates.is_identity(non_identity_gate) # Choose gate which is an identity gate: identity_gate = Rx(0.0) assert identity_gate.is_identity() assert _metagates.is_identity(identity_gate) def test_controlled_gate_init(): one_control = _metagates.ControlledGate(Y, 1) two_control = _metagates.ControlledGate(Y, 2) three_control = _metagates.ControlledGate(one_control, 2) assert one_control._gate == Y assert one_control._n == 1 assert two_control._gate == Y assert two_control._n == 2 assert three_control._gate == Y assert three_control._n == 3 def test_controlled_gate_str(): one_control = _metagates.ControlledGate(Y, 2) assert str(one_control) == "CC" + str(Y) def test_controlled_gate_get_inverse(): one_control = _metagates.ControlledGate(Rx(0.5), 1) expected = _metagates.ControlledGate(Rx(-0.5 + 4 * math.pi), 1) assert one_control.get_inverse() == expected def test_controlled_gate_empty_controls(): rec = DummyEngine(save_commands=True) eng = MainEngine(backend=rec, engine_list=[]) a = eng.allocate_qureg(1) _metagates.ControlledGate(Y, 0) | ((), a) assert rec.received_commands[-1] == Command(eng, Y, [a]) def test_controlled_gate_or(): saving_backend = DummyEngine(save_commands=True) main_engine = MainEngine(backend=saving_backend, engine_list=[DummyEngine()]) gate = Rx(0.6) qubit0 = Qubit(main_engine, 0) qubit1 = Qubit(main_engine, 1) qubit2 = Qubit(main_engine, 2) qubit3 = Qubit(main_engine, 3) expected_cmd = Command(main_engine, gate, ([qubit3],), controls=[qubit0, qubit1, qubit2]) received_commands = [] # Option 1: _metagates.ControlledGate(gate, 3) | ([qubit1], [qubit0], [qubit2], [qubit3]) # Option 2: _metagates.ControlledGate(gate, 3) | (qubit1, qubit0, qubit2, qubit3) # Option 3: _metagates.ControlledGate(gate, 3) | ([qubit1, qubit0], qubit2, qubit3) # Option 4: _metagates.ControlledGate(gate, 3) | (qubit1, [qubit0, qubit2], qubit3) # Wrong option 5: with pytest.raises(_metagates.ControlQubitError): _metagates.ControlledGate(gate, 3) | (qubit1, [qubit0, qubit2, qubit3]) # Remove Allocate and Deallocate gates for cmd in saving_backend.received_commands: if not (isinstance(cmd.gate, FastForwardingGate) or isinstance(cmd.gate, ClassicalInstructionGate)): received_commands.append(cmd) assert len(received_commands) == 4 for cmd in received_commands: assert cmd == expected_cmd def test_controlled_gate_comparison(): gate1 = _metagates.ControlledGate(Y, 1) gate2 = _metagates.ControlledGate(Y, 1) gate3 = _metagates.ControlledGate(T, 1) gate4 = _metagates.ControlledGate(Y, 2) assert gate1 == gate2 assert not gate1 == gate3 assert gate1 != gate4 def test_c(): expected = _metagates.ControlledGate(Y, 2) assert _metagates.C(Y, 2) == expected def test_tensor_init(): gate = _metagates.Tensor(Y) assert gate._gate == Y def test_tensor_str(): gate = _metagates.Tensor(Y) assert str(gate) == "Tensor(" + str(Y) + ")" def test_tensor_get_inverse(): gate = _metagates.Tensor(Rx(0.6)) inverse = gate.get_inverse() assert isinstance(inverse, _metagates.Tensor) assert inverse._gate == Rx(-0.6 + 4 * math.pi) def test_tensor_comparison(): gate1 = _metagates.Tensor(Rx(0.6)) gate2 = _metagates.Tensor(Rx(0.6 + 4 * math.pi)) assert gate1 == gate2 assert gate1 != Rx(0.6) def test_tensor_or(): saving_backend = DummyEngine(save_commands=True) main_engine = MainEngine(backend=saving_backend, engine_list=[DummyEngine()]) gate = Rx(0.6) qubit0 = Qubit(main_engine, 0) qubit1 = Qubit(main_engine, 1) qubit2 = Qubit(main_engine, 2) # Option 1: _metagates.Tensor(gate) | ([qubit0, qubit1, qubit2],) # Option 2: _metagates.Tensor(gate) | [qubit0, qubit1, qubit2] received_commands = [] # Remove Allocate and Deallocate gates for cmd in saving_backend.received_commands: if not (isinstance(cmd.gate, FastForwardingGate) or isinstance(cmd.gate, ClassicalInstructionGate)): received_commands.append(cmd) # Check results assert len(received_commands) == 6 qubit_ids = [] for cmd in received_commands: assert len(cmd.qubits) == 1 assert cmd.gate == gate qubit_ids.append(cmd.qubits[0][0].id) assert sorted(qubit_ids) == [0, 0, 1, 1, 2, 2]
apache-2.0
pelya/commandergenius
project/jni/python/src/Tools/scripts/svneol.py
94
2931
#! /usr/bin/env python """ SVN helper script. Try to set the svn:eol-style property to "native" on every .py, .txt, .c and .h file in the directory tree rooted at the current directory. Files with the svn:eol-style property already set (to anything) are skipped. svn will itself refuse to set this property on a file that's not under SVN control, or that has a binary mime-type property set. This script inherits that behavior, and passes on whatever warning message the failing "svn propset" command produces. In the Python project, it's safe to invoke this script from the root of a checkout. No output is produced for files that are ignored. For a file that gets svn:eol-style set, output looks like: property 'svn:eol-style' set on 'Lib\ctypes\__init__.py' For a file not under version control: svn: warning: 'patch-finalizer.txt' is not under version control and for a file with a binary mime-type property: svn: File 'Lib\test\test_pep263.py' has binary mime type property """ import re import os def propfiles(root, fn): default = os.path.join(root, ".svn", "props", fn+".svn-work") try: format = int(open(os.path.join(root, ".svn", "format")).read().strip()) except IOError: return [] if format in (8, 9): # In version 8 and 9, committed props are stored in prop-base, local # modifications in props return [os.path.join(root, ".svn", "prop-base", fn+".svn-base"), os.path.join(root, ".svn", "props", fn+".svn-work")] raise ValueError, "Unknown repository format" def proplist(root, fn): "Return a list of property names for file fn in directory root" result = [] for path in propfiles(root, fn): try: f = open(path) except IOError: # no properties file: not under version control, # or no properties set continue while 1: # key-value pairs, of the form # K <length> # <keyname>NL # V length # <value>NL # END line = f.readline() if line.startswith("END"): break assert line.startswith("K ") L = int(line.split()[1]) key = f.read(L) result.append(key) f.readline() line = f.readline() assert line.startswith("V ") L = int(line.split()[1]) value = f.read(L) f.readline() f.close() return result possible_text_file = re.compile(r"\.([hc]|py|txt|sln|vcproj)$").search for root, dirs, files in os.walk('.'): if '.svn' in dirs: dirs.remove('.svn') for fn in files: if possible_text_file(fn): if 'svn:eol-style' not in proplist(root, fn): path = os.path.join(root, fn) os.system('svn propset svn:eol-style native "%s"' % path)
lgpl-2.1
alkaitz/starloot
src/gameengine/webSocketServer/lib/tornado-3.0.1/demos/s3server/s3server.py
10
9692
#!/usr/bin/env python # # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of an S3-like storage server based on local files. Useful to test features that will eventually run on S3, or if you want to run something locally that was once running on S3. We don't support all the features of S3, but it does work with the standard S3 client for the most basic semantics. To use the standard S3 client with this module: c = S3.AWSAuthConnection("", "", server="localhost", port=8888, is_secure=False) c.create_bucket("mybucket") c.put("mybucket", "mykey", "a value") print c.get("mybucket", "mykey").body """ import bisect import datetime import hashlib import os import os.path import urllib from tornado import escape from tornado import httpserver from tornado import ioloop from tornado import web from tornado.util import bytes_type def start(port, root_directory="/tmp/s3", bucket_depth=0): """Starts the mock S3 server on the given port at the given path.""" application = S3Application(root_directory, bucket_depth) http_server = httpserver.HTTPServer(application) http_server.listen(port) ioloop.IOLoop.instance().start() class S3Application(web.Application): """Implementation of an S3-like storage server based on local files. If bucket depth is given, we break files up into multiple directories to prevent hitting file system limits for number of files in each directories. 1 means one level of directories, 2 means 2, etc. """ def __init__(self, root_directory, bucket_depth=0): web.Application.__init__(self, [ (r"/", RootHandler), (r"/([^/]+)/(.+)", ObjectHandler), (r"/([^/]+)/", BucketHandler), ]) self.directory = os.path.abspath(root_directory) if not os.path.exists(self.directory): os.makedirs(self.directory) self.bucket_depth = bucket_depth class BaseRequestHandler(web.RequestHandler): SUPPORTED_METHODS = ("PUT", "GET", "DELETE") def render_xml(self, value): assert isinstance(value, dict) and len(value) == 1 self.set_header("Content-Type", "application/xml; charset=UTF-8") name = value.keys()[0] parts = [] parts.append('<' + escape.utf8(name) + ' xmlns="http://doc.s3.amazonaws.com/2006-03-01">') self._render_parts(value.values()[0], parts) parts.append('</' + escape.utf8(name) + '>') self.finish('<?xml version="1.0" encoding="UTF-8"?>\n' + ''.join(parts)) def _render_parts(self, value, parts=[]): if isinstance(value, (unicode, bytes_type)): parts.append(escape.xhtml_escape(value)) elif isinstance(value, int) or isinstance(value, long): parts.append(str(value)) elif isinstance(value, datetime.datetime): parts.append(value.strftime("%Y-%m-%dT%H:%M:%S.000Z")) elif isinstance(value, dict): for name, subvalue in value.iteritems(): if not isinstance(subvalue, list): subvalue = [subvalue] for subsubvalue in subvalue: parts.append('<' + escape.utf8(name) + '>') self._render_parts(subsubvalue, parts) parts.append('</' + escape.utf8(name) + '>') else: raise Exception("Unknown S3 value type %r", value) def _object_path(self, bucket, object_name): if self.application.bucket_depth < 1: return os.path.abspath(os.path.join( self.application.directory, bucket, object_name)) hash = hashlib.md5(object_name).hexdigest() path = os.path.abspath(os.path.join( self.application.directory, bucket)) for i in range(self.application.bucket_depth): path = os.path.join(path, hash[:2 * (i + 1)]) return os.path.join(path, object_name) class RootHandler(BaseRequestHandler): def get(self): names = os.listdir(self.application.directory) buckets = [] for name in names: path = os.path.join(self.application.directory, name) info = os.stat(path) buckets.append({ "Name": name, "CreationDate": datetime.datetime.utcfromtimestamp( info.st_ctime), }) self.render_xml({"ListAllMyBucketsResult": { "Buckets": {"Bucket": buckets}, }}) class BucketHandler(BaseRequestHandler): def get(self, bucket_name): prefix = self.get_argument("prefix", u"") marker = self.get_argument("marker", u"") max_keys = int(self.get_argument("max-keys", 50000)) path = os.path.abspath(os.path.join(self.application.directory, bucket_name)) terse = int(self.get_argument("terse", 0)) if not path.startswith(self.application.directory) or \ not os.path.isdir(path): raise web.HTTPError(404) object_names = [] for root, dirs, files in os.walk(path): for file_name in files: object_names.append(os.path.join(root, file_name)) skip = len(path) + 1 for i in range(self.application.bucket_depth): skip += 2 * (i + 1) + 1 object_names = [n[skip:] for n in object_names] object_names.sort() contents = [] start_pos = 0 if marker: start_pos = bisect.bisect_right(object_names, marker, start_pos) if prefix: start_pos = bisect.bisect_left(object_names, prefix, start_pos) truncated = False for object_name in object_names[start_pos:]: if not object_name.startswith(prefix): break if len(contents) >= max_keys: truncated = True break object_path = self._object_path(bucket_name, object_name) c = {"Key": object_name} if not terse: info = os.stat(object_path) c.update({ "LastModified": datetime.datetime.utcfromtimestamp( info.st_mtime), "Size": info.st_size, }) contents.append(c) marker = object_name self.render_xml({"ListBucketResult": { "Name": bucket_name, "Prefix": prefix, "Marker": marker, "MaxKeys": max_keys, "IsTruncated": truncated, "Contents": contents, }}) def put(self, bucket_name): path = os.path.abspath(os.path.join( self.application.directory, bucket_name)) if not path.startswith(self.application.directory) or \ os.path.exists(path): raise web.HTTPError(403) os.makedirs(path) self.finish() def delete(self, bucket_name): path = os.path.abspath(os.path.join( self.application.directory, bucket_name)) if not path.startswith(self.application.directory) or \ not os.path.isdir(path): raise web.HTTPError(404) if len(os.listdir(path)) > 0: raise web.HTTPError(403) os.rmdir(path) self.set_status(204) self.finish() class ObjectHandler(BaseRequestHandler): def get(self, bucket, object_name): object_name = urllib.unquote(object_name) path = self._object_path(bucket, object_name) if not path.startswith(self.application.directory) or \ not os.path.isfile(path): raise web.HTTPError(404) info = os.stat(path) self.set_header("Content-Type", "application/unknown") self.set_header("Last-Modified", datetime.datetime.utcfromtimestamp( info.st_mtime)) object_file = open(path, "rb") try: self.finish(object_file.read()) finally: object_file.close() def put(self, bucket, object_name): object_name = urllib.unquote(object_name) bucket_dir = os.path.abspath(os.path.join( self.application.directory, bucket)) if not bucket_dir.startswith(self.application.directory) or \ not os.path.isdir(bucket_dir): raise web.HTTPError(404) path = self._object_path(bucket, object_name) if not path.startswith(bucket_dir) or os.path.isdir(path): raise web.HTTPError(403) directory = os.path.dirname(path) if not os.path.exists(directory): os.makedirs(directory) object_file = open(path, "w") object_file.write(self.request.body) object_file.close() self.finish() def delete(self, bucket, object_name): object_name = urllib.unquote(object_name) path = self._object_path(bucket, object_name) if not path.startswith(self.application.directory) or \ not os.path.isfile(path): raise web.HTTPError(404) os.unlink(path) self.set_status(204) self.finish()
apache-2.0
adminneyk/codificacionproyectando
application/views/Generacion/Generacion/lib/openoffice/openoffice.org/basis3.4/program/python-core-2.6.1/lib/hotshot/__init__.py
215
2670
"""High-perfomance logging profiler, mostly written in C.""" import _hotshot from _hotshot import ProfilerError from warnings import warnpy3k as _warnpy3k _warnpy3k("The 'hotshot' module is not supported in 3.x, " "use the 'profile' module instead.", stacklevel=2) class Profile: def __init__(self, logfn, lineevents=0, linetimings=1): self.lineevents = lineevents and 1 or 0 self.linetimings = (linetimings and lineevents) and 1 or 0 self._prof = p = _hotshot.profiler( logfn, self.lineevents, self.linetimings) # Attempt to avoid confusing results caused by the presence of # Python wrappers around these functions, but only if we can # be sure the methods have not been overridden or extended. if self.__class__ is Profile: self.close = p.close self.start = p.start self.stop = p.stop self.addinfo = p.addinfo def close(self): """Close the logfile and terminate the profiler.""" self._prof.close() def fileno(self): """Return the file descriptor of the profiler's log file.""" return self._prof.fileno() def start(self): """Start the profiler.""" self._prof.start() def stop(self): """Stop the profiler.""" self._prof.stop() def addinfo(self, key, value): """Add an arbitrary labelled value to the profile log.""" self._prof.addinfo(key, value) # These methods offer the same interface as the profile.Profile class, # but delegate most of the work to the C implementation underneath. def run(self, cmd): """Profile an exec-compatible string in the script environment. The globals from the __main__ module are used as both the globals and locals for the script. """ import __main__ dict = __main__.__dict__ return self.runctx(cmd, dict, dict) def runctx(self, cmd, globals, locals): """Evaluate an exec-compatible string in a specific environment. The string is compiled before profiling begins. """ code = compile(cmd, "<string>", "exec") self._prof.runcode(code, globals, locals) return self def runcall(self, func, *args, **kw): """Profile a single call of a callable. Additional positional and keyword arguments may be passed along; the result of the call is returned, and exceptions are allowed to propogate cleanly, while ensuring that profiling is disabled on the way out. """ return self._prof.runcall(func, args, kw)
mit
Changaco/oh-mainline
vendor/packages/twisted/twisted/test/test_text.py
18
5440
# Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. from twisted.trial import unittest from twisted.python import text import string from cStringIO import StringIO sampleText = \ """Every attempt to employ mathematical methods in the study of chemical questions must be considered profoundly irrational and contrary to the spirit of chemistry ... If mathematical analysis should ever hold a prominent place in chemistry - an aberration which is happily almost impossible - it would occasion a rapid and widespread degeneration of that science. -- Auguste Comte, Philosophie Positive, Paris, 1838 """ lineWidth = 72 def set_lineWidth(n): global lineWidth lineWidth = n class WrapTest(unittest.TestCase): def setUp(self): self.sampleSplitText = string.split(sampleText) self.output = text.wordWrap(sampleText, lineWidth) def test_wordCount(self): """Compare the number of words.""" words = [] for line in self.output: words.extend(string.split(line)) wordCount = len(words) sampleTextWordCount = len(self.sampleSplitText) self.failUnlessEqual(wordCount, sampleTextWordCount) def test_wordMatch(self): """Compare the lists of words.""" words = [] for line in self.output: words.extend(string.split(line)) # Using failUnlessEqual here prints out some # rather too long lists. self.failUnless(self.sampleSplitText == words) def test_lineLength(self): """Check the length of the lines.""" failures = [] for line in self.output: if not len(line) <= lineWidth: failures.append(len(line)) if failures: self.fail("%d of %d lines were too long.\n" "%d < %s" % (len(failures), len(self.output), lineWidth, failures)) class SplitTest(unittest.TestCase): """Tests for text.splitQuoted()""" def test_oneWord(self): """Splitting strings with one-word phrases.""" s = 'This code "works."' r = text.splitQuoted(s) self.failUnlessEqual(['This', 'code', 'works.'], r) def test_multiWord(self): s = 'The "hairy monkey" likes pie.' r = text.splitQuoted(s) self.failUnlessEqual(['The', 'hairy monkey', 'likes', 'pie.'], r) # Some of the many tests that would fail: #def test_preserveWhitespace(self): # phrase = '"MANY SPACES"' # s = 'With %s between.' % (phrase,) # r = text.splitQuoted(s) # self.failUnlessEqual(['With', phrase, 'between.'], r) #def test_escapedSpace(self): # s = r"One\ Phrase" # r = text.splitQuoted(s) # self.failUnlessEqual(["One Phrase"], r) class StrFileTest(unittest.TestCase): def setUp(self): self.io = StringIO("this is a test string") def tearDown(self): pass def test_1_f(self): self.assertEquals(False, text.strFile("x", self.io)) def test_1_1(self): self.assertEquals(True, text.strFile("t", self.io)) def test_1_2(self): self.assertEquals(True, text.strFile("h", self.io)) def test_1_3(self): self.assertEquals(True, text.strFile("i", self.io)) def test_1_4(self): self.assertEquals(True, text.strFile("s", self.io)) def test_1_5(self): self.assertEquals(True, text.strFile("n", self.io)) def test_1_6(self): self.assertEquals(True, text.strFile("g", self.io)) def test_3_1(self): self.assertEquals(True, text.strFile("thi", self.io)) def test_3_2(self): self.assertEquals(True, text.strFile("his", self.io)) def test_3_3(self): self.assertEquals(True, text.strFile("is ", self.io)) def test_3_4(self): self.assertEquals(True, text.strFile("ing", self.io)) def test_3_f(self): self.assertEquals(False, text.strFile("bla", self.io)) def test_large_1(self): self.assertEquals(True, text.strFile("this is a test", self.io)) def test_large_2(self): self.assertEquals(True, text.strFile("is a test string", self.io)) def test_large_f(self): self.assertEquals(False, text.strFile("ds jhfsa k fdas", self.io)) def test_overlarge_f(self): self.assertEquals(False, text.strFile("djhsakj dhsa fkhsa s,mdbnfsauiw bndasdf hreew", self.io)) def test_self(self): self.assertEquals(True, text.strFile("this is a test string", self.io)) def test_insensitive(self): self.assertEquals(True, text.strFile("ThIs is A test STRING", self.io, False)) class DeprecationTest(unittest.TestCase): """ Tests for deprecations in L{twisted.python.text} """ def test_docstringLStrip(self): """ L{docstringLStrip} is deprecated as of 10.2.0 """ text.docstringLStrip("") warningsShown = self.flushWarnings([self.test_docstringLStrip]) self.assertEquals(1, len(warningsShown)) self.assertIdentical(warningsShown[0]['category'], DeprecationWarning) self.assertEquals(warningsShown[0]['message'], "twisted.python.text.docstringLStrip was " "deprecated in Twisted 10.2.0: Please use " "inspect.getdoc instead.") testCases = [WrapTest, SplitTest, StrFileTest]
agpl-3.0
Ant-OS/android_packages_apps_OTAUpdates
jni/boost_1_57_0/tools/build/src/tools/types/lib.py
71
2703
# Status: ported # Base revision: 64456. # Copyright David Abrahams 2004. # Copyright Vladimir Prus 2010. # Distributed under the Boost # Software License, Version 1.0. (See accompanying # file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) import b2.build.type as type # The following naming scheme is used for libraries. # # On *nix: # libxxx.a static library # libxxx.so shared library # # On windows (msvc) # libxxx.lib static library # xxx.dll DLL # xxx.lib import library # # On windows (mingw): # libxxx.a static library # libxxx.dll DLL # libxxx.dll.a import library # # On cygwin i.e. <target-os>cygwin # libxxx.a static library # cygxxx.dll DLL # libxxx.dll.a import library # type.register('LIB') # FIXME: should not register both extensions on both platforms. type.register('STATIC_LIB', ['a', 'lib'], 'LIB') # The 'lib' prefix is used everywhere type.set_generated_target_prefix('STATIC_LIB', [], 'lib') # Use '.lib' suffix for windows type.set_generated_target_suffix('STATIC_LIB', ['<target-os>windows'], 'lib') # Except with gcc. type.set_generated_target_suffix('STATIC_LIB', ['<toolset>gcc', '<target-os>windows'], 'a') # Use xxx.lib for import libs type.register('IMPORT_LIB', [], 'STATIC_LIB') type.set_generated_target_prefix('IMPORT_LIB', [], '') type.set_generated_target_suffix('IMPORT_LIB', [], 'lib') # Except with gcc (mingw or cygwin), where use libxxx.dll.a type.set_generated_target_prefix('IMPORT_LIB', ['<toolset>gcc'], 'lib') type.set_generated_target_suffix('IMPORT_LIB', ['<toolset>gcc'], 'dll.a') type.register('SHARED_LIB', ['so', 'dll', 'dylib'], 'LIB') # Both mingw and cygwin use libxxx.dll naming scheme. # On Linux, use "lib" prefix type.set_generated_target_prefix('SHARED_LIB', [], 'lib') # But don't use it on windows type.set_generated_target_prefix('SHARED_LIB', ['<target-os>windows'], '') # But use it again on mingw type.set_generated_target_prefix('SHARED_LIB', ['<toolset>gcc', '<target-os>windows'], 'lib') # And use 'cyg' on cygwin type.set_generated_target_prefix('SHARED_LIB', ['<target-os>cygwin'], 'cyg') type.set_generated_target_suffix('SHARED_LIB', ['<target-os>windows'], 'dll') type.set_generated_target_suffix('SHARED_LIB', ['<target-os>cygwin'], 'dll') type.set_generated_target_suffix('SHARED_LIB', ['<target-os>darwin'], 'dylib') type.register('SEARCHED_LIB', [], 'LIB') # This is needed so that when we create a target of SEARCHED_LIB # type, there's no prefix or suffix automatically added. type.set_generated_target_prefix('SEARCHED_LIB', [], '') type.set_generated_target_suffix('SEARCHED_LIB', [], '')
apache-2.0
rossant/galry
experimental/fireworks.py
1
4794
from galry import * import pylab as plt import numpy as np import numpy.random as rdn import time import timeit import os from OpenGL import GL as gl class ParticleVisual(Visual): def get_position_update_code(self): return """ // update position position.x += velocities.x * tloc; position.y += velocities.y * tloc - 0.5 * g * tloc * tloc; """ def get_color_update_code(self): return """ // pass the color and point size to the fragment shader varying_color = color; varying_color.w = alpha; """ def base_fountain(self, initial_positions=None, velocities=None, color=None, alpha=None, delays=None): self.size = initial_positions.shape[0] self.primitive_type = 'POINTS' # load texture path = os.path.dirname(os.path.realpath(__file__)) particle = plt.imread(os.path.join(path, "../examples/images/particle.png")) size = float(max(particle.shape)) # create the dataset self.add_uniform("point_size", vartype="float", ndim=1, data=size) self.add_uniform("t", vartype="float", ndim=1, data=0.) self.add_uniform("color", vartype="float", ndim=4, data=color) # add the different data buffers self.add_attribute("initial_positions", vartype="float", ndim=2, data=initial_positions) self.add_attribute("velocities", vartype="float", ndim=2, data=velocities) self.add_attribute("delays", vartype="float", ndim=1, data=delays) self.add_attribute("alpha", vartype="float", ndim=1, data=alpha) self.add_varying("varying_color", vartype="float", ndim=4) # add particle texture self.add_texture("tex", size=particle.shape[:2], ncomponents=particle.shape[2], ndim=2, data=particle) vs = """ // compute local time const float tmax = 5.; const float tlocmax = 2.; const float g = %G_CONSTANT%; // Local time. float tloc = mod(t - delays, tmax); vec2 position = initial_positions; if ((tloc >= 0) && (tloc <= tlocmax)) { // position update %POSITION_UPDATE% %COLOR_UPDATE% } else { varying_color = vec4(0., 0., 0., 0.); } gl_PointSize = point_size; """ vs = vs.replace('%POSITION_UPDATE%', self.get_position_update_code()) vs = vs.replace('%COLOR_UPDATE%', self.get_color_update_code()) vs = vs.replace('%G_CONSTANT%', '3.') # self.add_uniform('cycle', vartype='int', data=0) self.add_vertex_main(vs) self.add_fragment_main( """ vec4 col = texture2D(tex, gl_PointCoord) * varying_color; out_color = col; """) def initialize(self, **kwargs): self.base_fountain(**kwargs) def update(figure, parameter): t = parameter[0] # if not hasattr(figure, 'cycle'): # figure.cycle = 0 # figure.cycle = np.mod(figure.cycle + 1, 2) # figure.set_data(t=t, cycle=figure.cycle, visual='fountain') figure.set_data(t=t, visual='fountain') if __name__ == '__main__': figure() # number of particles n = 50000 # initial positions positions = .02 * rdn.randn(n, 2) # initial velocities velocities = np.zeros((n, 2)) v = 1.5 + .5 * rdn.rand(n) angles = .1 * rdn.randn(n) + np.pi / 2 velocities[:,0] = v * np.cos(angles) velocities[:,1] = v * np.sin(angles) # transparency alpha = .2 * rdn.rand(n) # color color = (0.70,0.75,.98,1.) # random delays delays = 10 * rdn.rand(n) figure() # framebuffer(display=False) # framebuffer(name='sc', framebuffer=1) # imshow(RefVar('sc', 'fbotex0'), #points=(-.5,-.5,.5,.5), # # imshow(np.random.rand(4,4,4), points=(-.5,-.5,.5,.5), # is_static=True)#, beforeclear=True) # # create the visual visual(ParticleVisual, initial_positions=positions, velocities=velocities, alpha=alpha, color=color, delays=delays, is_static=True, name='fountain', ) # TODO: beginning: copy sc => prev.1 framebuffer(ntextures=2, coeffs=[1., .9], framebuffer=1) framebuffer(name='sc') # imshow(RefVar('framebuffer', 'fbotex0'), points=(-.5,.5,.5,-.5), # # imshow(np.random.rand(4,4,4), points=(-.5,-.5,.5,.5), # is_static=True, framebuffer='screen')#, beforeclear=True) animate(update, dt=.02) show()
bsd-3-clause
liyi193328/seq2seq
seq2seq/data/postproc.py
7
1619
# -*- coding: utf-8 -*- # Copyright 2017 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ A collection of commonly used post-processing functions. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals def strip_bpe(text): """Deodes text that was processed using BPE from https://github.com/rsennrich/subword-nmt""" return text.replace("@@ ", "").strip() def decode_sentencepiece(text): """Decodes text that uses https://github.com/google/sentencepiece encoding. Assumes that pieces are separated by a space""" return "".join(text.split(" ")).replace("▁", " ").strip() def slice_text(text, eos_token="SEQUENCE_END", sos_token="SEQUENCE_START"): """Slices text from SEQUENCE_START to SEQUENCE_END, not including these special tokens. """ eos_index = text.find(eos_token) text = text[:eos_index] if eos_index > -1 else text sos_index = text.find(sos_token) text = text[sos_index+len(sos_token):] if sos_index > -1 else text return text.strip()
apache-2.0
ccrook/Quantum-GIS
python/plugins/processing/algs/grass7/ext/i_gensig.py
5
1761
# -*- coding: utf-8 -*- """ *************************************************************************** i_gensig.py ----------- Date : March 2016 Copyright : (C) 2016 by Médéric Ribreux Email : medspx at medspx dot fr *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Médéric Ribreux' __date__ = 'March 2016' __copyright__ = '(C) 2016, Médéric Ribreux' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' import os from .i import regroupRasters, exportSigFile def processCommand(alg, parameters, context): # We need to extract the basename of the signature file signatureFile = alg.parameterAsString(parameters, 'signaturefile', context) shortSigFile = os.path.basename(signatureFile) parameters['signaturefile'] = shortSigFile # Regroup rasters group, subgroup = regroupRasters(alg, parameters, context, 'input', 'group', 'subgroup') alg.processCommand(parameters, context) # Re-add signature files parameters['signaturefile'] = signatureFile # Export signature file exportSigFile(alg, group, subgroup, signatureFile)
gpl-2.0
JavML/django
tests/postgres_tests/fields.py
302
1087
""" Indirection layer for PostgreSQL-specific fields, so the tests don't fail when run with a backend other than PostgreSQL. """ from django.db import models try: from django.contrib.postgres.fields import ( ArrayField, BigIntegerRangeField, DateRangeField, DateTimeRangeField, FloatRangeField, HStoreField, IntegerRangeField, JSONField, ) except ImportError: class DummyArrayField(models.Field): def __init__(self, base_field, size=None, **kwargs): super(DummyArrayField, self).__init__(**kwargs) def deconstruct(self): name, path, args, kwargs = super(DummyArrayField, self).deconstruct() kwargs.update({ 'base_field': '', 'size': 1, }) return name, path, args, kwargs ArrayField = DummyArrayField BigIntegerRangeField = models.Field DateRangeField = models.Field DateTimeRangeField = models.Field FloatRangeField = models.Field HStoreField = models.Field IntegerRangeField = models.Field JSONField = models.Field
bsd-3-clause
simmetria/sentry
src/sentry/web/frontend/projects.py
1
12321
""" sentry.web.frontend.projects ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :copyright: (c) 2012 by the Sentry Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ from crispy_forms.helper import FormHelper from django.core.context_processors import csrf from django.core.urlresolvers import reverse from django.http import HttpResponseRedirect from django.views.decorators.csrf import csrf_protect from sentry.constants import MEMBER_OWNER, MEMBER_USER from sentry.models import TeamMember, ProjectKey, Team, FilterKey from sentry.permissions import can_create_projects, can_remove_project, can_create_teams from sentry.plugins import plugins from sentry.plugins.helpers import set_option, get_option from sentry.web.decorators import login_required, has_access from sentry.web.forms.projects import NewProjectForm, NewProjectAdminForm,\ ProjectTagsForm, EditProjectForm, RemoveProjectForm, EditProjectAdminForm from sentry.web.forms.teams import NewTeamForm, SelectTeamForm from sentry.web.helpers import render_to_response, get_project_list, \ plugin_config @login_required def project_list(request): project_list = get_project_list(request.user, hidden=True).values() team_list = dict((t.id, t) for t in Team.objects.filter(pk__in=[p.team_id for p in project_list])) if request.user.is_authenticated(): memberships = dict((tm.team_id, tm) for tm in TeamMember.objects.filter(user=request.user, team__in=team_list)) keys = dict((p.project_id, p) for p in ProjectKey.objects.filter(user=request.user, project__in=project_list)) else: memberships = {} keys = {} for project in project_list: key = keys.get(project.id) if key: project.member_dsn = key.get_dsn() member = memberships.get(project.team_id) if member: project.member_type = member.get_type_display() return render_to_response('sentry/projects/list.html', { 'PROJECT_LIST': project_list, }, request) @login_required def new_project(request): from django.contrib.auth.models import User if not can_create_projects(request.user): return HttpResponseRedirect(reverse('sentry')) allow_create_teams = can_create_teams(request.user) team_list = Team.objects.get_for_user(request.user) if request.user.has_perm('sentry.can_add_project') and User.objects.all()[0:2] == 2: project_form_cls = NewProjectAdminForm project_initial = { 'owner': request.user.username, } else: project_form_cls = NewProjectForm project_initial = {} if len(team_list) > 0: select_team_form = SelectTeamForm(team_list, request.POST or None, prefix='st') elif not allow_create_teams: return render_to_response('sentry/projects/cannot_create_teams.html', {}, request) else: select_team_form = None if allow_create_teams: new_team_form = NewTeamForm(request.POST or None, prefix='nt') else: new_team_form = None project_form = project_form_cls(request.POST or None, initial=project_initial, prefix='prj') is_new_team = new_team_form and new_team_form.is_valid() if is_new_team or not select_team_form: team_form = new_team_form else: team_form = select_team_form if project_form.is_valid() and team_form.is_valid(): project = project_form.save(commit=False) if not project.owner: project.owner = request.user if is_new_team: team = new_team_form.save(commit=False) team.owner = project.owner team.save() else: team = select_team_form.cleaned_data['team'] project.team = team project.save() return HttpResponseRedirect(reverse('sentry-project-client-help', args=[project.slug])) return render_to_response('sentry/projects/new.html', { 'project_form': project_form, 'select_team_form': select_team_form, 'new_team_form': new_team_form, }, request) @has_access(MEMBER_OWNER) @csrf_protect def remove_project(request, project): if not can_remove_project(request.user, project): return HttpResponseRedirect(reverse('sentry')) project_list = filter(lambda x: x != project, get_project_list(request.user).itervalues()) form = RemoveProjectForm(request.user, project_list, request.POST or None) if form.is_valid(): removal_type = form.cleaned_data['removal_type'] if removal_type == '1': project.delete() elif removal_type == '2': new_project = form.cleaned_data['project'] project.merge_to(new_project) elif removal_type == '3': project.update(status=1) else: raise ValueError(removal_type) return HttpResponseRedirect(reverse('sentry-project-list')) context = csrf(request) context.update({ 'form': form, 'project': project, }) return render_to_response('sentry/projects/remove.html', context, request) @has_access(MEMBER_OWNER) @csrf_protect def manage_project(request, project): result = plugins.first('has_perm', request.user, 'edit_project', project) if result is False and not request.user.has_perm('sentry.can_change_project'): return HttpResponseRedirect(reverse('sentry')) # XXX: We probably shouldnt allow changing the team unless they're the project owner team_list = Team.objects.get_for_user(project.owner or request.user, MEMBER_OWNER) if request.user.has_perm('sentry.can_change_project'): form_cls = EditProjectAdminForm else: form_cls = EditProjectForm form = form_cls(request, team_list, request.POST or None, instance=project, initial={ 'origins': '\n'.join(get_option('sentry:origins', project) or []), 'owner': project.owner, }) if form.is_valid(): project = form.save() set_option('sentry:origins', form.cleaned_data.get('origins') or [], project) return HttpResponseRedirect(request.path + '?success=1') if not project.team: member_list = [] else: member_list = [(tm, tm.user) for tm in project.team.member_set.select_related('user')] context = csrf(request) context.update({ 'can_remove_project': can_remove_project(request.user, project), 'page': 'details', 'form': form, 'project': project, 'member_list': member_list, 'TEAM_LIST': team_list.values(), }) return render_to_response('sentry/projects/manage.html', context, request) @has_access(MEMBER_USER) def client_help(request, project): try: key = ProjectKey.objects.get(user=request.user, project=project) except ProjectKey.DoesNotExist: key = None # superuser context = { 'can_remove_project': can_remove_project(request.user, project), 'page': 'client_help', 'project': project, 'key': key, } if key: context.update({ 'dsn': key.get_dsn(), 'dsn_public': key.get_dsn(public=True), }) return render_to_response('sentry/projects/client_help.html', context, request) @has_access(MEMBER_OWNER) def manage_project_tags(request, project): tag_list = FilterKey.objects.all_keys(project) if tag_list: form = ProjectTagsForm(project, tag_list, request.POST or None) else: form = None helper = FormHelper() helper.form_tag = False if form and form.is_valid(): form.save() return HttpResponseRedirect(reverse('sentry-manage-project-tags', args=[project.slug]) + '?success=1') context = { 'tag_list': tag_list, 'page': 'tags', 'project': project, 'form': form, 'helper': helper, } return render_to_response('sentry/projects/manage_tags.html', context, request) @has_access(MEMBER_OWNER) @csrf_protect def manage_plugins(request, project): result = plugins.first('has_perm', request.user, 'configure_project_plugin', project) if result is False and not request.user.has_perm('sentry.can_change_project'): return HttpResponseRedirect(reverse('sentry')) if request.POST: enabled = set(request.POST.getlist('plugin')) for plugin in plugins.all(): if plugin.can_enable_for_projects(): plugin.set_option('enabled', plugin.slug in enabled, project) return HttpResponseRedirect(request.path + '?success=1') context = csrf(request) context.update({ 'page': 'plugins', 'project': project, }) return render_to_response('sentry/projects/plugins/list.html', context, request) @has_access(MEMBER_OWNER) @csrf_protect def configure_project_plugin(request, project, slug): try: plugin = plugins.get(slug) except KeyError: return HttpResponseRedirect(reverse('sentry-manage-project', args=[project.slug])) if not plugin.is_enabled(project): return HttpResponseRedirect(reverse('sentry-manage-project', args=[project.slug])) result = plugins.first('has_perm', request.user, 'configure_project_plugin', project, plugin) if result is False and not request.user.is_superuser: return HttpResponseRedirect(reverse('sentry')) form = plugin.project_conf_form if form is None: return HttpResponseRedirect(reverse('sentry-manage-project', args=[project.slug])) action, view = plugin_config(plugin, project, request) if action == 'redirect': return HttpResponseRedirect(request.path + '?success=1') context = csrf(request) context.update({ 'page': 'plugin', 'title': plugin.get_title(), 'view': view, 'project': project, 'plugin': plugin, }) return render_to_response('sentry/projects/plugins/configure.html', context, request) @has_access(MEMBER_OWNER) @csrf_protect def reset_project_plugin(request, project, slug): try: plugin = plugins.get(slug) except KeyError: return HttpResponseRedirect(reverse('sentry-configure-project-plugin', args=[project.slug, slug])) if not plugin.is_enabled(project): return HttpResponseRedirect(reverse('sentry-configure-project-plugin', args=[project.slug, slug])) result = plugins.first('has_perm', request.user, 'configure_project_plugin', project, plugin) if result is False and not request.user.is_superuser: return HttpResponseRedirect(reverse('sentry')) plugin.reset_options(project=project) return HttpResponseRedirect(reverse('sentry-configure-project-plugin', args=[project.slug, slug])) @has_access(MEMBER_OWNER) @csrf_protect def enable_project_plugin(request, project, slug): try: plugin = plugins.get(slug) except KeyError: return HttpResponseRedirect(reverse('sentry-configure-project-plugin', args=[project.slug, slug])) if plugin.is_enabled(project) or not plugin.can_enable_for_projects(): return HttpResponseRedirect(reverse('sentry-configure-project-plugin', args=[project.slug, slug])) result = plugins.first('has_perm', request.user, 'configure_project_plugin', project, plugin) if result is False and not request.user.is_superuser: return HttpResponseRedirect(reverse('sentry')) plugin.set_option('enabled', True, project) return HttpResponseRedirect(reverse('sentry-configure-project-plugin', args=[project.slug, slug])) @has_access(MEMBER_OWNER) @csrf_protect def disable_project_plugin(request, project, slug): try: plugin = plugins.get(slug) except KeyError: return HttpResponseRedirect(reverse('sentry-configure-project-plugin', args=[project.slug, slug])) if not plugin.is_enabled(project) or not plugin.can_enable_for_projects(): return HttpResponseRedirect(reverse('sentry-configure-project-plugin', args=[project.slug, slug])) result = plugins.first('has_perm', request.user, 'configure_project_plugin', project, plugin) if result is False and not request.user.is_superuser: return HttpResponseRedirect(reverse('sentry')) plugin.set_option('enabled', False, project) return HttpResponseRedirect(reverse('sentry-manage-project', args=[project.slug]))
bsd-3-clause
garrettcap/Bulletproof-Backup
wx/lib/pdfviewer/bezier.py
3
2012
# Name: bezier.py # Package: wx.lib.pdfviewer # # Purpose: Compute Bezier curves for PDF rendered using wx.DC # Adapted from the original source code, see below. # # Author: David Hughes dfh@forestfield.co.uk # Copyright: Forestfield Software Ltd # Licence: Public domain # History: Created 17 Jun 2009 # #---------------------------------------------------------------------------- import wx from vec2d import * def calculate_bezier(p, steps = 30): """ Calculate a bezier curve from 4 control points and return a list of the resulting points. Depends on the 2d vector class from http://www.pygame.org/wiki/2DVectorClass 2007 Victor Blomqvist Released to the Public Domain The function uses the forward differencing algorithm described at http://www.niksula.cs.hut.fi/~hkankaan/Homepages/bezierfast.html """ t = 1.0 / steps temp = t*t f = p[0] fd = 3 * (p[1] - p[0]) * t fdd_per_2 = 3 * (p[0] - 2 * p[1] + p[2]) * temp fddd_per_2 = 3 * (3 * (p[1] - p[2]) + p[3] - p[0]) * temp * t fddd = fddd_per_2 + fddd_per_2 fdd = fdd_per_2 + fdd_per_2 fddd_per_6 = fddd_per_2 * (1.0 / 3) points = [] for x in range(steps): points.append(f) f = f + fd + fdd_per_2 + fddd_per_6 fd = fd + fdd + fddd_per_2 fdd = fdd + fddd fdd_per_2 = fdd_per_2 + fddd_per_2 points.append(f) return points def compute_points(controlpoints, nsteps=30): """ Input 4 control points as wxRealPoints and convert to vec2d instances. compute the nsteps points on the resulting curve and return them as a list of wxPoints """ controlvectors = [] for p in controlpoints: controlvectors.append(vec2d(p.x, p.y)) pointvectors = calculate_bezier(controlvectors, nsteps) curvepoints = [] for v in pointvectors: curvepoints.append(wx.Point(v[0], v[1])) return curvepoints
gpl-2.0
kawamon/hue
desktop/core/ext-py/django-extensions-1.8.0/django_extensions/management/commands/runjobs.py
7
3445
# -*- coding: utf-8 -*- from django.apps import apps from django.core.management.base import BaseCommand from django_extensions.management.jobs import get_jobs, print_jobs from django_extensions.management.utils import signalcommand class Command(BaseCommand): help = "Runs scheduled maintenance jobs." when_options = ['minutely', 'quarter_hourly', 'hourly', 'daily', 'weekly', 'monthly', 'yearly'] def add_arguments(self, parser): super(Command, self).add_arguments(parser) parser.add_argument( 'when', nargs='?', help="options: %s" % ', '.join(self.when_options)) parser.add_argument( '--list', '-l', action="store_true", dest="list_jobs", help="List all jobs with their description") def usage_msg(self): print("%s Please specify: %s" % (self.help, ', '.join(self.when_options))) def runjobs(self, when, options): verbosity = int(options.get('verbosity', 1)) jobs = get_jobs(when, only_scheduled=True) for app_name, job_name in sorted(jobs.keys()): job = jobs[(app_name, job_name)] if verbosity > 1: print("Executing %s job: %s (app: %s)" % (when, job_name, app_name)) try: job().execute() except Exception: import traceback print("ERROR OCCURED IN %s JOB: %s (APP: %s)" % (when.upper(), job_name, app_name)) print("START TRACEBACK:") traceback.print_exc() print("END TRACEBACK\n") def runjobs_by_signals(self, when, options): """ Run jobs from the signals """ # Thanks for Ian Holsman for the idea and code from django_extensions.management import signals from django.conf import settings verbosity = int(options.get('verbosity', 1)) for app_name in settings.INSTALLED_APPS: try: __import__(app_name + '.management', '', '', ['']) except ImportError: pass for app in (app.models_module for app in apps.get_app_configs() if app.models_module): if verbosity > 1: app_name = '.'.join(app.__name__.rsplit('.')[:-1]) print("Sending %s job signal for: %s" % (when, app_name)) if when == 'minutely': signals.run_minutely_jobs.send(sender=app, app=app) elif when == 'quarter_hourly': signals.run_quarter_hourly_jobs.send(sender=app, app=app) elif when == 'hourly': signals.run_hourly_jobs.send(sender=app, app=app) elif when == 'daily': signals.run_daily_jobs.send(sender=app, app=app) elif when == 'weekly': signals.run_weekly_jobs.send(sender=app, app=app) elif when == 'monthly': signals.run_monthly_jobs.send(sender=app, app=app) elif when == 'yearly': signals.run_yearly_jobs.send(sender=app, app=app) @signalcommand def handle(self, *args, **options): when = options.get('when') if options.get('list_jobs'): print_jobs(when, only_scheduled=True, show_when=True, show_appname=True) elif when in self.when_options: self.runjobs(when, options) self.runjobs_by_signals(when, options) else: self.usage_msg()
apache-2.0
Aloomaio/googleads-python-lib
examples/ad_manager/v201805/report_service/run_report_and_create_match_table.py
1
3150
#!/usr/bin/env python # # Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Runs a report for LineItems with additional data from a PQL table. Fetches a basic report over a network's LineItems and then adds some extra columns which might be useful for future analysis, such as LineItemType, from the PQL Line_Item table, creating a match table. """ from datetime import date from datetime import timedelta import tempfile # Import appropriate modules from the client library. from googleads import ad_manager from googleads import errors try: import pandas except ImportError: raise ImportError('This example requires the pandas library to be installed.') def main(client): # Set the start and end dates of the report to run (past 8 days). end_date = date.today() start_date = end_date - timedelta(days=8) # Create report job. report_job = { 'reportQuery': { 'dimensions': ['LINE_ITEM_ID', 'LINE_ITEM_NAME'], 'columns': ['AD_SERVER_IMPRESSIONS', 'AD_SERVER_CLICKS', 'AD_SERVER_CTR', 'AD_SERVER_CPM_AND_CPC_REVENUE', 'AD_SERVER_WITHOUT_CPD_AVERAGE_ECPM'], 'dateRangeType': 'CUSTOM_DATE', 'startDate': start_date, 'endDate': end_date } } # Initialize a DataDownloader. report_downloader = client.GetDataDownloader(version='v201805') try: # Run the report and wait for it to finish. report_job_id = report_downloader.WaitForReport(report_job) except errors.AdManagerReportError, e: print 'Failed to generate report. Error was: %s' % e with tempfile.NamedTemporaryFile( suffix='.csv.gz', mode='wb', delete=False) as report_file: # Download report data. report_downloader.DownloadReportToFile( report_job_id, 'CSV_DUMP', report_file) # Create a PQL query to fetch the line item data line_items_pql_query = ('SELECT Id, LineItemType, Status FROM LineItem') # Download the response from PQL select statement line_items = report_downloader.DownloadPqlResultToList(line_items_pql_query) # Use pandas to join the two csv files into a match table report = pandas.read_csv(report_file.name) line_items = pandas.DataFrame(data=line_items[1:], columns=line_items[0]) merged_result = pandas.merge(report, line_items, left_on='Dimension.LINE_ITEM_ID', right_on='id') merged_result.to_csv('~/complete_line_items_report.csv', index=False) if __name__ == '__main__': # Initialize client object. ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage() main(ad_manager_client)
apache-2.0
askeing/fxos-certsuite
mcts/webapi_tests/semiauto/runner.py
6
3105
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this file, # You can obtain one at http://mozilla.org/MPL/2.0/. import collections import sys import unittest from moztest.adapters.unit import StructuredTestRunner from mcts.webapi_tests.semiauto import environment, server def run(suite, logger, spawn_browser=True, verbosity=1, quiet=False, failfast=False, catch_break=False, buffer=True, **kwargs): """A simple test runner. This test runner is essentially equivalent to ``unittest.main`` from the standard library, but adds support for loading test classes with extra keyword arguments. The easiest way to run a test is via the command line:: python -m semiauto test_sms See the standard library unittest module for ways in which tests can be specified. For example it is possible to automatically discover tests:: python -m semiauto discover .""" if catch_break: import unittest.signals unittest.signals.installHandler() env = environment.get(environment.InProcessTestEnvironment, addr=None if spawn_browser else ("127.0.0.1", 6666), verbose=(verbosity > 1)) url = "http://%s:%d/" % (env.server.addr[0], env.server.addr[1]) if spawn_browser: import webbrowser webbrowser.open(url) else: print >> sys.stderr, "Please connect your browser to %s" % url # Wait for browser to connect and get socket connection to client try: so = server.wait_for_client() except server.ConnectError as e: logger.error("%s: error: %s" % (sys.argv[0], e)) sys.exit(1) tests = serialize_suite(suite) test_runner = StructuredTestRunner(logger=logger, test_list=tests) # This is a hack to make the test suite metadata and the handler # available to the tests. so.suite = suite environment.env.handler = so logger.suite_start(tests=tests) try: results = test_runner.run(suite) except (SystemExit, KeyboardInterrupt) as e: sys.exit(1) logger.suite_end() return results def serialize_suite(tests, ov=[]): """Serialize a ``unittest.TestSuite`` instance for transportation across the wire. Tests are represented by their hash as we have no desire to replicate the full Test instance object on the client side. :param tests: Instance of ``unittest.suite.TestSuite`` to be serialized. :returns: List of test dicts represented by `id` and `description`. """ rv = ov if isinstance(tests, collections.Iterable): # [TestCase, ...] or [<TestSuite ...>, <TestSuite ...>] for test in tests: if isinstance(test, unittest.suite.TestSuite): rv = serialize_suite(test, rv) else: rv.append(test.id()) elif hasattr(tests, "_tests"): # <unittest.suite.TestSuite _tests=[...]> rv = serialize_suite(tests._tests, rv) return rv
mpl-2.0
vietch2612/phantomjs
src/qt/qtwebkit/Tools/Scripts/webkitpy/common/checkout/scm/detection.py
164
3834
# Copyright (c) 2009, 2010, 2011 Google Inc. All rights reserved. # Copyright (c) 2009 Apple Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import logging from webkitpy.common.system.filesystem import FileSystem from webkitpy.common.system.executive import Executive from .svn import SVN from .git import Git _log = logging.getLogger(__name__) class SCMDetector(object): def __init__(self, filesystem, executive): self._filesystem = filesystem self._executive = executive def default_scm(self, patch_directories=None): """Return the default SCM object as determined by the CWD and running code. Returns the default SCM object for the current working directory; if the CWD is not in a checkout, then we attempt to figure out if the SCM module itself is part of a checkout, and return that one. If neither is part of a checkout, None is returned. """ cwd = self._filesystem.getcwd() scm_system = self.detect_scm_system(cwd, patch_directories) if not scm_system: script_directory = self._filesystem.dirname(self._filesystem.path_to_module(self.__module__)) scm_system = self.detect_scm_system(script_directory, patch_directories) if scm_system: _log.info("The current directory (%s) is not a WebKit checkout, using %s" % (cwd, scm_system.checkout_root)) else: raise Exception("FATAL: Failed to determine the SCM system for either %s or %s" % (cwd, script_directory)) return scm_system def detect_scm_system(self, path, patch_directories=None): absolute_path = self._filesystem.abspath(path) if patch_directories == []: patch_directories = None if SVN.in_working_directory(absolute_path, executive=self._executive): return SVN(cwd=absolute_path, patch_directories=patch_directories, filesystem=self._filesystem, executive=self._executive) if Git.in_working_directory(absolute_path, executive=self._executive): return Git(cwd=absolute_path, filesystem=self._filesystem, executive=self._executive) return None # FIXME: These free functions are all deprecated: def detect_scm_system(path, patch_directories=None): return SCMDetector(FileSystem(), Executive()).detect_scm_system(path, patch_directories)
bsd-3-clause
taedori81/shoop
shoop/addons/admin_module/__init__.py
1
1677
# -*- coding: utf-8 -*- # This file is part of Shoop. # # Copyright (c) 2012-2015, Shoop Ltd. All rights reserved. # # This source code is licensed under the AGPLv3 license found in the # LICENSE file in the root directory of this source tree. from django.utils.translation import ugettext_lazy as _ from shoop.admin.base import AdminModule, MenuEntry from shoop.admin.utils.urls import admin_url class AddonModule(AdminModule): name = _("Addons") category = name breadcrumbs_menu_entry = MenuEntry(text=name, url="shoop_admin:addon.list") def get_urls(self): return [ admin_url( "^addons/$", "shoop.addons.admin_module.views.AddonListView", name="addon.list" ), admin_url( "^addons/add/$", "shoop.addons.admin_module.views.AddonUploadView", name="addon.upload" ), admin_url( "^addons/add/confirm/$", "shoop.addons.admin_module.views.AddonUploadConfirmView", name="addon.upload_confirm" ), admin_url( "^addons/reload/$", "shoop.addons.admin_module.views.ReloadView", name="addon.reload" ), ] def get_menu_category_icons(self): return {self.category: "fa fa-puzzle-piece"} def get_menu_entries(self, request): return [ MenuEntry( text=_("Addons"), icon="fa fa-puzzle-piece", url="shoop_admin:addon.list", category=self.category ) ]
agpl-3.0
stamparm/tsusen
core/httpd.py
1
12666
#!/usr/bin/env python """ Copyright (c) 2015-2016 Miroslav Stampar (@stamparm) See the file 'LICENSE' for copying permission """ import BaseHTTPServer import csv import cStringIO import datetime import httplib import json import mimetypes import glob import gzip import os import re import socket import SocketServer import threading import traceback import urlparse from common import addr_to_int from common import make_mask from sensor import _log_write from settings import config from settings import DEFAULT_LOG_PERMISSIONS from settings import DISABLED_CONTENT_EXTENSIONS from settings import DEBUG from settings import HTML_DIR from settings import LOG_DIRECTORY from settings import MAX_IP_FILTER_RANGE from settings import MAX_PUT_SIZE from settings import MISC_PORTS from settings import SERVER_HEADER from settings import TIME_FORMAT from settings import VERSION try: from geoip import geolite2 except ImportError: exit("[!] please install python-geoip and python-geoip-geolite2 (e.g. 'pip install python-geoip python-geoip-geolite2')") class ThreadingServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer): def finish_request(self, *args, **kwargs): try: BaseHTTPServer.HTTPServer.finish_request(self, *args, **kwargs) except Exception: if DEBUG: traceback.print_exc() class ReqHandler(BaseHTTPServer.BaseHTTPRequestHandler): def do_GET(self): path, query = self.path.split('?', 1) if '?' in self.path else (self.path, "") params = {} content = None if hasattr(self, "data"): params.update(urlparse.parse_qs(self.data)) if query: params.update(urlparse.parse_qs(query)) for key in params: if params[key]: params[key] = params[key][-1] self.url, self.params = path, params if path == '/': path = "index.html" if path == "index.html": _log_write(True) path = path.strip('/') path = path.replace('/', os.path.sep) path = os.path.abspath(os.path.join(HTML_DIR, path)).strip() if not os.path.isfile(path) and os.path.isfile("%s.html" % path): path = "%s.html" % path if ".." not in os.path.relpath(path, HTML_DIR) and os.path.isfile(path) and not path.endswith(DISABLED_CONTENT_EXTENSIONS): content = open(path, "rb").read() self.send_response(httplib.OK) self.send_header("Connection", "close") self.send_header("Content-Type", mimetypes.guess_type(path)[0] or "application/octet-stream") else: path = os.path.abspath(os.path.join(HTML_DIR, "404.html")).strip() content = open(path, "rb").read() self.send_response(httplib.NOT_FOUND) self.send_header("Connection", "close") if content is not None: for match in re.finditer(r"<\!(\w+)\!>", content): name = match.group(1) _ = getattr(self, "_%s" % name.lower(), None) if _: content = self._format(content, **{ name: _() }) if "gzip" in self.headers.getheader("Accept-Encoding", ""): self.send_header("Content-Encoding", "gzip") _ = cStringIO.StringIO() compress = gzip.GzipFile("", "w+b", 9, _) compress._stream = _ compress.write(content) compress.flush() compress.close() content = compress._stream.getvalue() self.send_header("Content-Length", str(len(content))) self.end_headers() if content: self.wfile.write(content) self.wfile.flush() self.wfile.close() def do_PUT(self): """ e.g.: curl -T 2015-10-28.csv http://<server>:8339 """ path, query = self.path.split('?', 1) if '?' in self.path else (self.path, "") path = path.strip('/') match = re.search(r"\A([\d-]+)\.csv\Z", path) if match: date = match.group(1) else: return filename = os.path.join(LOG_DIRECTORY, path) length = int(self.headers.getheader("Content-length")) if length <= MAX_PUT_SIZE: content = self.rfile.read(length) if not os.path.exists(filename): with open(filename, "w+b") as f: f.write(content) os.chmod(filename, DEFAULT_LOG_PERMISSIONS) else: first = None result = set() with open(filename, "r") as f: for line in f.xreadlines(): line = line.strip() if not line: continue if not first: first = line continue result.add(line) for line in content.split("\n")[1:]: line = line.strip() if not line: continue result.add(line) with open(filename, "w+b") as f: f.write("%s\n" % first) for line in result: f.write("%s\n" % line) self.send_response(httplib.OK) else: self.send_response(httplib.BAD_REQUEST) self.send_header("Connection", "close") def _get_filters(self): filters = set() for item in (self.path, self.headers.get("referer", "")): if "ip[]" in item: for _ in re.findall(r"\bip\[\]=([\d./\-]+)(?:&|\Z)", item): if "/" in _: prefix, mask = _.split("/", 1) mask = int(mask) start_int = addr_to_int(prefix) & make_mask(mask) end_int = start_int | ((1 << 32 - mask) - 1) if (end_int - start_int) > MAX_IP_FILTER_RANGE: raise for address in xrange(start_int, end_int + 1): filters.add(address) elif "-" in _: start_address, end_address = _.split("-", 1) start_int = addr_to_int(start_address) end_int = addr_to_int(end_address) if (end_int - start_int) > MAX_IP_FILTER_RANGE: raise for address in xrange(start_int, end_int + 1): filters.add(address) else: filters.add(addr_to_int(_)) return filters def _geoip(self): match = geolite2.lookup(self.params["ip"]) retval = json.dumps({"country": match.country if match else ""}) if "callback" in self.params: retval = "%s(%s)" % (self.params["callback"], retval) return retval def _url(self): return self.url def _version(self): return VERSION def _dataset(self): result = "\n" dates = set() rows = [] indexes = {} filters = self._get_filters() for filename in sorted(glob.glob(os.path.join(LOG_DIRECTORY, "*.csv")))[-config.TRENDLINE_PERIOD:]: with open(filename, "rb") as f: match = re.search(r"([\d-]+)\.csv", filename) if match: date = match.group(1) else: continue reader = csv.DictReader(f, delimiter=' ') for row in reader: key = (row["proto"], row["dst_port"], row["dst_ip"], row["src_ip"]) if filters and not (addr_to_int(row["src_ip"]) in filters or addr_to_int(row["dst_ip"]) in filters): continue if key not in indexes: indexes[key] = len(rows) rows.append(row) else: index = indexes[key] rows[index]["first_seen"] = min(int(rows[index]["first_seen"]), int(row["first_seen"])) rows[index]["last_seen"] = max(int(rows[index]["last_seen"]), int(row["last_seen"])) rows[index]["count"] = int(rows[index]["count"]) + int(row["count"]) for row in rows: try: port = int(row['dst_port']) port_name = MISC_PORTS.get(port) or socket.getservbyport(port, row['proto'].lower()) except Exception: port_name = None finally: result += "[" for column in ("proto", "dst_port", "dst_ip", "src_ip", "first_seen", "last_seen", "count"): if "_seen" in column: result += '"%s",' % datetime.datetime.utcfromtimestamp(int(row[column])).strftime(TIME_FORMAT) elif "_port" in column and port_name: result += '"%s (%s)",' % (row[column], port_name) else: result += '"%s",' % row[column] result += "],\n" return result def _trendline_data(self): result = "\n" series = {} dates = set() filters = self._get_filters() for filename in sorted(glob.glob(os.path.join(LOG_DIRECTORY, "*.csv")))[-config.TRENDLINE_PERIOD:]: with open(filename, "rb") as f: match = re.search(r"([\d-]+)\.csv", filename) if match: date = match.group(1) else: continue reader = csv.DictReader(f, delimiter=' ') for row in reader: if filters and not (addr_to_int(row["src_ip"]) in filters or addr_to_int(row["dst_ip"]) in filters): continue try: port = int(row['dst_port']) port_name = MISC_PORTS.get(port) or socket.getservbyport(port, row['proto'].lower()) except Exception: port_name = None finally: serie = "%s%s%s" % (row['proto'].upper(), " %s" % row['dst_port'] if row['dst_port'].isdigit() else "", " (%s)" % port_name if port_name else "") if serie not in series: series[serie] = {} if date not in series[serie]: series[serie][date] = 0 series[serie][date] += 1 dates.add(date) keys = series.keys() if keys: last_date = max(dates) totals = {} for key in list(keys): if not filters: if any(series[key].get(date, 0) < config.TRENDLINE_DAILY_THRESHOLD for date in dates if date != last_date): if all(series[key].get(date, 0) < config.TRENDLINE_DAILY_BURST for date in dates): del keys[keys.index(key)] totals[key] = series[key].get(last_date, 0) keys = sorted(keys, key=lambda key: totals[key], reverse=True) result += "['Date',%s],\n" % ','.join("'%s'" % key for key in keys) for date in sorted(dates): year, month, day = date.split('-') result += "[new Date(%s,%d,%s)," % (year, int(month) - 1, day) for serie in keys: result += "%s," % series[serie].get(date, 0) result += "],\n" result = result[:-1] return result def _format(self, content, **params): if content: for key, value in params.items(): content = content.replace("<!%s!>" % key, value) return content def version_string(self): return SERVER_HEADER def log_message(self, format, *args): return def finish(self): try: BaseHTTPServer.BaseHTTPRequestHandler.finish(self) except Exception: if DEBUG: traceback.print_exc() def start_httpd(): server = ThreadingServer((config.HTTP_ADDRESS, config.HTTP_PORT), ReqHandler) thread = threading.Thread(target=server.serve_forever) thread.daemon = True thread.start() print "[i] running HTTP server at '%s:%d'" % (config.HTTP_ADDRESS, config.HTTP_PORT)
mit
egabancho/invenio
invenio/modules/upgrader/upgrades/invenio_2013_11_12_new_param_websubmit_function.py
3
1227
# -*- coding: utf-8 -*- ## ## This file is part of Invenio. ## Copyright (C) 2013 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. from invenio.legacy.dbquery import run_sql depends_on = ['invenio_release_1_1_0'] def info(): return "New 'deferRelatedFormatsCreation' parameter for Create_Upload_Files_Interface WebSubmit function" def do_upgrade(): run_sql("""INSERT INTO sbmFUNDESC VALUES ('Create_Upload_Files_Interface','deferRelatedFormatsCreation')""") def estimate(): """ Estimate running time of upgrade in seconds (optional). """ return 1
gpl-2.0
napalm-automation/napalm-yang
napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/ospfv2/areas/area/mpls/config/__init__.py
1
12287
# -*- coding: utf-8 -*- from operator import attrgetter from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType from pyangbind.lib.yangtypes import RestrictedClassType from pyangbind.lib.yangtypes import TypedListType from pyangbind.lib.yangtypes import YANGBool from pyangbind.lib.yangtypes import YANGListType from pyangbind.lib.yangtypes import YANGDynClass from pyangbind.lib.yangtypes import ReferenceType from pyangbind.lib.base import PybindBase from collections import OrderedDict from decimal import Decimal from bitarray import bitarray import six # PY3 support of some PY2 keywords (needs improved) if six.PY3: import builtins as __builtin__ long = int elif six.PY2: import __builtin__ class config(PybindBase): """ This class was auto-generated by the PythonClass plugin for PYANG from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/mpls/config. Each member element of the container is represented as a class variable - with a specific YANG type. YANG Description: Configuration parameters relating to MPLS extensions for OSPFv2 """ __slots__ = ("_path_helper", "_extmethods", "__traffic_engineering_enabled") _yang_name = "config" _pybind_generated_by = "container" def __init__(self, *args, **kwargs): self._path_helper = False self._extmethods = False self.__traffic_engineering_enabled = YANGDynClass( base=YANGBool, is_leaf=True, yang_name="traffic-engineering-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="boolean", is_config=True, ) load = kwargs.pop("load", None) if args: if len(args) > 1: raise TypeError("cannot create a YANG container with >1 argument") all_attr = True for e in self._pyangbind_elements: if not hasattr(args[0], e): all_attr = False break if not all_attr: raise ValueError("Supplied object did not have the correct attributes") for e in self._pyangbind_elements: nobj = getattr(args[0], e) if nobj._changed() is False: continue setmethod = getattr(self, "_set_%s" % e) if load is None: setmethod(getattr(args[0], e)) else: setmethod(getattr(args[0], e), load=load) def _path(self): if hasattr(self, "_parent"): return self._parent._path() + [self._yang_name] else: return [ "network-instances", "network-instance", "protocols", "protocol", "ospfv2", "areas", "area", "mpls", "config", ] def _get_traffic_engineering_enabled(self): """ Getter method for traffic_engineering_enabled, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/mpls/config/traffic_engineering_enabled (boolean) YANG Description: Specifies whether traffic engineering extensions should be advertised within the area """ return self.__traffic_engineering_enabled def _set_traffic_engineering_enabled(self, v, load=False): """ Setter method for traffic_engineering_enabled, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/mpls/config/traffic_engineering_enabled (boolean) If this variable is read-only (config: false) in the source YANG file, then _set_traffic_engineering_enabled is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_traffic_engineering_enabled() directly. YANG Description: Specifies whether traffic engineering extensions should be advertised within the area """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=YANGBool, is_leaf=True, yang_name="traffic-engineering-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="boolean", is_config=True, ) except (TypeError, ValueError): raise ValueError( { "error-string": """traffic_engineering_enabled must be of a type compatible with boolean""", "defined-type": "boolean", "generated-type": """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="traffic-engineering-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=True)""", } ) self.__traffic_engineering_enabled = t if hasattr(self, "_set"): self._set() def _unset_traffic_engineering_enabled(self): self.__traffic_engineering_enabled = YANGDynClass( base=YANGBool, is_leaf=True, yang_name="traffic-engineering-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="boolean", is_config=True, ) traffic_engineering_enabled = __builtin__.property( _get_traffic_engineering_enabled, _set_traffic_engineering_enabled ) _pyangbind_elements = OrderedDict( [("traffic_engineering_enabled", traffic_engineering_enabled)] ) class config(PybindBase): """ This class was auto-generated by the PythonClass plugin for PYANG from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/mpls/config. Each member element of the container is represented as a class variable - with a specific YANG type. YANG Description: Configuration parameters relating to MPLS extensions for OSPFv2 """ __slots__ = ("_path_helper", "_extmethods", "__traffic_engineering_enabled") _yang_name = "config" _pybind_generated_by = "container" def __init__(self, *args, **kwargs): self._path_helper = False self._extmethods = False self.__traffic_engineering_enabled = YANGDynClass( base=YANGBool, is_leaf=True, yang_name="traffic-engineering-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="boolean", is_config=True, ) load = kwargs.pop("load", None) if args: if len(args) > 1: raise TypeError("cannot create a YANG container with >1 argument") all_attr = True for e in self._pyangbind_elements: if not hasattr(args[0], e): all_attr = False break if not all_attr: raise ValueError("Supplied object did not have the correct attributes") for e in self._pyangbind_elements: nobj = getattr(args[0], e) if nobj._changed() is False: continue setmethod = getattr(self, "_set_%s" % e) if load is None: setmethod(getattr(args[0], e)) else: setmethod(getattr(args[0], e), load=load) def _path(self): if hasattr(self, "_parent"): return self._parent._path() + [self._yang_name] else: return [ "network-instances", "network-instance", "protocols", "protocol", "ospfv2", "areas", "area", "mpls", "config", ] def _get_traffic_engineering_enabled(self): """ Getter method for traffic_engineering_enabled, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/mpls/config/traffic_engineering_enabled (boolean) YANG Description: Specifies whether traffic engineering extensions should be advertised within the area """ return self.__traffic_engineering_enabled def _set_traffic_engineering_enabled(self, v, load=False): """ Setter method for traffic_engineering_enabled, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/mpls/config/traffic_engineering_enabled (boolean) If this variable is read-only (config: false) in the source YANG file, then _set_traffic_engineering_enabled is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_traffic_engineering_enabled() directly. YANG Description: Specifies whether traffic engineering extensions should be advertised within the area """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=YANGBool, is_leaf=True, yang_name="traffic-engineering-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="boolean", is_config=True, ) except (TypeError, ValueError): raise ValueError( { "error-string": """traffic_engineering_enabled must be of a type compatible with boolean""", "defined-type": "boolean", "generated-type": """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="traffic-engineering-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=True)""", } ) self.__traffic_engineering_enabled = t if hasattr(self, "_set"): self._set() def _unset_traffic_engineering_enabled(self): self.__traffic_engineering_enabled = YANGDynClass( base=YANGBool, is_leaf=True, yang_name="traffic-engineering-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="boolean", is_config=True, ) traffic_engineering_enabled = __builtin__.property( _get_traffic_engineering_enabled, _set_traffic_engineering_enabled ) _pyangbind_elements = OrderedDict( [("traffic_engineering_enabled", traffic_engineering_enabled)] )
apache-2.0
theguardian/LazyLibrarian_Old
cherrypy/lib/jsontools.py
5
3684
import sys import cherrypy from cherrypy._cpcompat import basestring, ntou, json, json_encode, json_decode def json_processor(entity): """Read application/json data into request.json.""" if not entity.headers.get(ntou("Content-Length"), ntou("")): raise cherrypy.HTTPError(411) body = entity.fp.read() try: cherrypy.serving.request.json = json_decode(body.decode('utf-8')) except ValueError: raise cherrypy.HTTPError(400, 'Invalid JSON document') def json_in(content_type=[ntou('application/json'), ntou('text/javascript')], force=True, debug=False, processor = json_processor): """Add a processor to parse JSON request entities: The default processor places the parsed data into request.json. Incoming request entities which match the given content_type(s) will be deserialized from JSON to the Python equivalent, and the result stored at cherrypy.request.json. The 'content_type' argument may be a Content-Type string or a list of allowable Content-Type strings. If the 'force' argument is True (the default), then entities of other content types will not be allowed; "415 Unsupported Media Type" is raised instead. Supply your own processor to use a custom decoder, or to handle the parsed data differently. The processor can be configured via tools.json_in.processor or via the decorator method. Note that the deserializer requires the client send a Content-Length request header, or it will raise "411 Length Required". If for any other reason the request entity cannot be deserialized from JSON, it will raise "400 Bad Request: Invalid JSON document". You must be using Python 2.6 or greater, or have the 'simplejson' package importable; otherwise, ValueError is raised during processing. """ request = cherrypy.serving.request if isinstance(content_type, basestring): content_type = [content_type] if force: if debug: cherrypy.log('Removing body processors %s' % repr(request.body.processors.keys()), 'TOOLS.JSON_IN') request.body.processors.clear() request.body.default_proc = cherrypy.HTTPError( 415, 'Expected an entity of content type %s' % ', '.join(content_type)) for ct in content_type: if debug: cherrypy.log('Adding body processor for %s' % ct, 'TOOLS.JSON_IN') request.body.processors[ct] = processor def json_handler(*args, **kwargs): value = cherrypy.serving.request._json_inner_handler(*args, **kwargs) return json_encode(value) def json_out(content_type='application/json', debug=False, handler=json_handler): """Wrap request.handler to serialize its output to JSON. Sets Content-Type. If the given content_type is None, the Content-Type response header is not set. Provide your own handler to use a custom encoder. For example cherrypy.config['tools.json_out.handler'] = <function>, or @json_out(handler=function). You must be using Python 2.6 or greater, or have the 'simplejson' package importable; otherwise, ValueError is raised during processing. """ request = cherrypy.serving.request if debug: cherrypy.log('Replacing %s with JSON handler' % request.handler, 'TOOLS.JSON_OUT') request._json_inner_handler = request.handler request.handler = handler if content_type is not None: if debug: cherrypy.log('Setting Content-Type to %s' % ct, 'TOOLS.JSON_OUT') cherrypy.serving.response.headers['Content-Type'] = content_type
gpl-3.0
sgraham/nope
tools/grit/grit/format/policy_templates/writers/plist_strings_writer_unittest.py
44
12697
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. '''Unit tests for grit.format.policy_templates.writers.plist_strings_writer''' import os import sys if __name__ == '__main__': sys.path.append(os.path.join(os.path.dirname(__file__), '../../../..')) import unittest from grit.format.policy_templates.writers import writer_unittest_common class PListStringsWriterUnittest(writer_unittest_common.WriterUnittestCommon): '''Unit tests for PListStringsWriter.''' def testEmpty(self): # Test PListStringsWriter in case of empty polices. grd = self.PrepareTest(''' { 'policy_definitions': [], 'placeholders': [], 'messages': { 'mac_chrome_preferences': { 'text': '$1 preferen"ces', 'desc': 'blah' } } }''') output = self.GetOutput( grd, 'fr', {'_chromium': '1', 'mac_bundle_id': 'com.example.Test'}, 'plist_strings', 'en') expected_output = ( 'Chromium.pfm_title = "Chromium";\n' 'Chromium.pfm_description = "Chromium preferen\\"ces";') self.assertEquals(output.strip(), expected_output.strip()) def testEmptyVersion(self): # Test PListStringsWriter in case of empty polices. grd = self.PrepareTest(''' { 'policy_definitions': [], 'placeholders': [], 'messages': { 'mac_chrome_preferences': { 'text': '$1 preferen"ces', 'desc': 'blah' } } }''') output = self.GetOutput( grd, 'fr', {'_chromium': '1', 'mac_bundle_id': 'com.example.Test', 'version': '39.0.0.0'}, 'plist_strings', 'en') expected_output = ( '/* chromium version: 39.0.0.0 */\n' 'Chromium.pfm_title = "Chromium";\n' 'Chromium.pfm_description = "Chromium preferen\\"ces";') self.assertEquals(output.strip(), expected_output.strip()) def testMainPolicy(self): # Tests a policy group with a single policy of type 'main'. grd = self.PrepareTest(''' { 'policy_definitions': [ { 'name': 'MainGroup', 'type': 'group', 'caption': 'Caption of main.', 'desc': 'Description of main.', 'policies': [{ 'name': 'MainPolicy', 'type': 'main', 'supported_on': ['chrome.mac:8-'], 'caption': 'Caption of main policy.', 'desc': 'Description of main policy.', }], }, ], 'placeholders': [], 'messages': { 'mac_chrome_preferences': { 'text': 'Preferences of $1', 'desc': 'blah' } } }''') output = self.GetOutput( grd, 'fr', {'_google_chrome' : '1', 'mac_bundle_id': 'com.example.Test'}, 'plist_strings', 'en') expected_output = ( 'Google_Chrome.pfm_title = "Google Chrome";\n' 'Google_Chrome.pfm_description = "Preferences of Google Chrome";\n' 'MainPolicy.pfm_title = "Caption of main policy.";\n' 'MainPolicy.pfm_description = "Description of main policy.";') self.assertEquals(output.strip(), expected_output.strip()) def testStringPolicy(self): # Tests a policy group with a single policy of type 'string'. Also test # inheriting group description to policy description. grd = self.PrepareTest(''' { 'policy_definitions': [ { 'name': 'StringGroup', 'type': 'group', 'caption': 'Caption of group.', 'desc': """Description of group. With a newline.""", 'policies': [{ 'name': 'StringPolicy', 'type': 'string', 'caption': 'Caption of policy.', 'desc': """Description of policy. With a newline.""", 'supported_on': ['chrome.mac:8-'], }], }, ], 'placeholders': [], 'messages': { 'mac_chrome_preferences': { 'text': 'Preferences of $1', 'desc': 'blah' } } }''') output = self.GetOutput( grd, 'fr', {'_chromium' : '1', 'mac_bundle_id': 'com.example.Test'}, 'plist_strings', 'en') expected_output = ( 'Chromium.pfm_title = "Chromium";\n' 'Chromium.pfm_description = "Preferences of Chromium";\n' 'StringPolicy.pfm_title = "Caption of policy.";\n' 'StringPolicy.pfm_description = ' '"Description of policy.\\nWith a newline.";') self.assertEquals(output.strip(), expected_output.strip()) def testStringListPolicy(self): # Tests a policy group with a single policy of type 'list'. grd = self.PrepareTest(''' { 'policy_definitions': [ { 'name': 'ListGroup', 'type': 'group', 'caption': '', 'desc': '', 'policies': [{ 'name': 'ListPolicy', 'type': 'list', 'caption': 'Caption of policy.', 'desc': """Description of policy. With a newline.""", 'schema': { 'type': 'array', 'items': { 'type': 'string' }, }, 'supported_on': ['chrome.mac:8-'], }], }, ], 'placeholders': [], 'messages': { 'mac_chrome_preferences': { 'text': 'Preferences of $1', 'desc': 'blah' } } }''') output = self.GetOutput( grd, 'fr', {'_chromium' : '1', 'mac_bundle_id': 'com.example.Test'}, 'plist_strings', 'en') expected_output = ( 'Chromium.pfm_title = "Chromium";\n' 'Chromium.pfm_description = "Preferences of Chromium";\n' 'ListPolicy.pfm_title = "Caption of policy.";\n' 'ListPolicy.pfm_description = ' '"Description of policy.\\nWith a newline.";') self.assertEquals(output.strip(), expected_output.strip()) def testStringEnumListPolicy(self): # Tests a policy group with a single policy of type 'string-enum-list'. grd = self.PrepareTest(''' { 'policy_definitions': [ { 'name': 'EnumGroup', 'type': 'group', 'caption': '', 'desc': '', 'policies': [{ 'name': 'EnumPolicy', 'type': 'string-enum-list', 'caption': 'Caption of policy.', 'desc': """Description of policy. With a newline.""", 'schema': { 'type': 'array', 'items': { 'type': 'string' }, }, 'items': [ { 'name': 'ProxyServerDisabled', 'value': 'one', 'caption': 'Option1' }, { 'name': 'ProxyServerAutoDetect', 'value': 'two', 'caption': 'Option2' }, ], 'supported_on': ['chrome.mac:8-'], }], }, ], 'placeholders': [], 'messages': { 'mac_chrome_preferences': { 'text': 'Preferences of $1', 'desc': 'blah' } } }''') output = self.GetOutput( grd, 'fr', {'_chromium' : '1', 'mac_bundle_id': 'com.example.Test'}, 'plist_strings', 'en') expected_output = ( 'Chromium.pfm_title = "Chromium";\n' 'Chromium.pfm_description = "Preferences of Chromium";\n' 'EnumPolicy.pfm_title = "Caption of policy.";\n' 'EnumPolicy.pfm_description = ' '"one - Option1\\ntwo - Option2\\n' 'Description of policy.\\nWith a newline.";') self.assertEquals(output.strip(), expected_output.strip()) def testIntEnumPolicy(self): # Tests a policy group with a single policy of type 'int-enum'. grd = self.PrepareTest(''' { 'policy_definitions': [ { 'name': 'EnumGroup', 'type': 'group', 'desc': '', 'caption': '', 'policies': [{ 'name': 'EnumPolicy', 'type': 'int-enum', 'desc': 'Description of policy.', 'caption': 'Caption of policy.', 'items': [ { 'name': 'ProxyServerDisabled', 'value': 0, 'caption': 'Option1' }, { 'name': 'ProxyServerAutoDetect', 'value': 1, 'caption': 'Option2' }, ], 'supported_on': ['chrome.mac:8-'], }], }, ], 'placeholders': [], 'messages': { 'mac_chrome_preferences': { 'text': '$1 preferences', 'desc': 'blah' } } }''') output = self.GetOutput( grd, 'fr', {'_google_chrome': '1', 'mac_bundle_id': 'com.example.Test2'}, 'plist_strings', 'en') expected_output = ( 'Google_Chrome.pfm_title = "Google Chrome";\n' 'Google_Chrome.pfm_description = "Google Chrome preferences";\n' 'EnumPolicy.pfm_title = "Caption of policy.";\n' 'EnumPolicy.pfm_description = ' '"0 - Option1\\n1 - Option2\\nDescription of policy.";\n') self.assertEquals(output.strip(), expected_output.strip()) def testStringEnumPolicy(self): # Tests a policy group with a single policy of type 'string-enum'. grd = self.PrepareTest(''' { 'policy_definitions': [ { 'name': 'EnumGroup', 'type': 'group', 'desc': '', 'caption': '', 'policies': [{ 'name': 'EnumPolicy', 'type': 'string-enum', 'desc': 'Description of policy.', 'caption': 'Caption of policy.', 'items': [ { 'name': 'ProxyServerDisabled', 'value': 'one', 'caption': 'Option1' }, { 'name': 'ProxyServerAutoDetect', 'value': 'two', 'caption': 'Option2' }, ], 'supported_on': ['chrome.mac:8-'], }], }, ], 'placeholders': [], 'messages': { 'mac_chrome_preferences': { 'text': '$1 preferences', 'desc': 'blah' } } }''') output = self.GetOutput( grd, 'fr', {'_google_chrome': '1', 'mac_bundle_id': 'com.example.Test2'}, 'plist_strings', 'en') expected_output = ( 'Google_Chrome.pfm_title = "Google Chrome";\n' 'Google_Chrome.pfm_description = "Google Chrome preferences";\n' 'EnumPolicy.pfm_title = "Caption of policy.";\n' 'EnumPolicy.pfm_description = ' '"one - Option1\\ntwo - Option2\\nDescription of policy.";\n') self.assertEquals(output.strip(), expected_output.strip()) def testNonSupportedPolicy(self): # Tests a policy that is not supported on Mac, so its strings shouldn't # be included in the plist string table. grd = self.PrepareTest(''' { 'policy_definitions': [ { 'name': 'NonMacGroup', 'type': 'group', 'caption': '', 'desc': '', 'policies': [{ 'name': 'NonMacPolicy', 'type': 'string', 'caption': '', 'desc': '', 'supported_on': ['chrome_os:8-'], }], }, ], 'placeholders': [], 'messages': { 'mac_chrome_preferences': { 'text': '$1 preferences', 'desc': 'blah' } } }''') output = self.GetOutput( grd, 'fr', {'_google_chrome': '1', 'mac_bundle_id': 'com.example.Test2'}, 'plist_strings', 'en') expected_output = ( 'Google_Chrome.pfm_title = "Google Chrome";\n' 'Google_Chrome.pfm_description = "Google Chrome preferences";') self.assertEquals(output.strip(), expected_output.strip()) if __name__ == '__main__': unittest.main()
bsd-3-clause
pradyu1993/scikit-learn
sklearn/covariance/outlier_detection.py
1
6766
""" Class for outlier detection. This class provides a framework for outlier detection. It consists in several methods that can be added to a covariance estimator in order to assess the outlying-ness of the observations of a data set. Such a "outlier detector" object is proposed constructed from a robust covariance estimator (the Minimum Covariance Determinant). """ # Author: Virgile Fritsch <virgile.fritsch@inria.fr> # # License: BSD Style. import numpy as np import scipy as sp from . import MinCovDet from ..utils import deprecated from ..base import ClassifierMixin class OutlierDetectionMixin(object): """Set of methods for outliers detection with covariance estimators. Parameters ---------- contamination: float, 0. < contamination < 0.5 The amount of contamination of the data set, i.e. the proportion of outliers in the data set. Notes ----- Outlier detection from covariance estimation may break or not perform well in high-dimensional settings. In particular, one will always take care to work with ``n_samples > n_features ** 2``. """ def __init__(self, contamination=0.1): self.contamination = contamination self.threshold = None def decision_function(self, X, raw_values=False): """Compute the decision function of the given observations. Parameters ---------- X: array-like, shape (n_samples, n_features) raw_values: bool Whether or not to consider raw Mahalanobis distances as the decision function. Must be False (default) for compatibility with the others outlier detection tools. Returns ------- decision: array-like, shape (n_samples, ) The values of the decision function for each observations. It is equal to the Mahalanobis distances if `raw_values` is True. By default (``raw_values=True``), it is equal to the cubic root of the shifted Mahalanobis distances. In that case, the threshold for being an outlier is 0, which ensures a compatibility with other outlier detection tools such as the One-Class SVM. """ mahal_dist = self.mahalanobis(X) if raw_values: decision = mahal_dist else: if self.threshold is None: raise Exception("Please fit data before predicting") transformed_mahal_dist = mahal_dist ** 0.33 decision = self.threshold ** 0.33 - transformed_mahal_dist return decision def predict(self, X): """Outlyingness of observations in X according to the fitted model. Parameters ---------- X: array-like, shape = (n_samples, n_features) Returns ------- is_outliers: array, shape = (n_samples, ), dtype = bool For each observations, tells whether or not it should be considered as an outlier according to the fitted model. threshold: float, The values of the less outlying point's decision function. """ if self.threshold is None: raise Exception("Please fit data before predicting") is_inlier = -np.ones(X.shape[0], dtype=int) if self.contamination is not None: values = self.decision_function(X, raw_values=True) is_inlier[values <= self.threshold] = 1 else: raise NotImplemented("You must provide a contamination rate.") return is_inlier class EllipticEnvelope(ClassifierMixin, OutlierDetectionMixin, MinCovDet): """An object for detecting outliers in a Gaussian distributed dataset. Attributes ---------- `contamination`: float, 0. < contamination < 0.5 The amount of contamination of the data set, i.e. the proportion of \ outliers in the data set. `location_`: array-like, shape (n_features,) Estimated robust location `covariance_`: array-like, shape (n_features, n_features) Estimated robust covariance matrix `precision_`: array-like, shape (n_features, n_features) Estimated pseudo inverse matrix. (stored only if store_precision is True) `support_`: array-like, shape (n_samples,) A mask of the observations that have been used to compute the robust estimates of location and shape. Parameters ---------- store_precision: bool Specify if the estimated precision is stored assume_centered: Boolean If True, the support of robust location and covariance estimates is computed, and a covariance estimate is recomputed from it, without centering the data. Useful to work with data whose mean is significantly equal to zero but is not exactly zero. If False, the robust location and covariance are directly computed with the FastMCD algorithm without additional treatment. support_fraction: float, 0 < support_fraction < 1 The proportion of points to be included in the support of the raw MCD estimate. Default is ``None``, which implies that the minimum value of support_fraction will be used within the algorithm: [n_sample + n_features + 1] / 2 contamination: float, 0. < contamination < 0.5 The amount of contamination of the data set, i.e. the proportion of outliers in the data set. See Also -------- EmpiricalCovariance, MinCovDet Notes ----- Outlier detection from covariance estimation may break or not perform well in high-dimensional settings. In particular, one will always take care to work with ``n_samples > n_features ** 2``. References ---------- .. [1] Rousseeuw, P.J., Van Driessen, K. "A fast algorithm for the minimum covariance determinant estimator" Technometrics 41(3), 212 (1999) """ def __init__(self, store_precision=True, assume_centered=False, support_fraction=None, contamination=0.1, random_state=None): MinCovDet.__init__(self, store_precision=store_precision, assume_centered=assume_centered, support_fraction=support_fraction, random_state=random_state) OutlierDetectionMixin.__init__(self, contamination=contamination) def fit(self, X): """ """ MinCovDet.fit(self, X) self.threshold = sp.stats.scoreatpercentile( self.dist_, 100. * (1. - self.contamination)) return self # Deprecated classes @deprecated("Use EllipticEnvelope instead. To be removed in 0.13.") class EllipticEnvelop(EllipticEnvelope): pass
bsd-3-clause
Matt-Deacalion/django
tests/utils_tests/test_http.py
78
7444
from __future__ import unicode_literals import sys import unittest from datetime import datetime from django.utils import http, six from django.utils.datastructures import MultiValueDict class TestUtilsHttp(unittest.TestCase): def test_urlencode(self): # 2-tuples (the norm) result = http.urlencode((('a', 1), ('b', 2), ('c', 3))) self.assertEqual(result, 'a=1&b=2&c=3') # A dictionary result = http.urlencode({'a': 1, 'b': 2, 'c': 3}) acceptable_results = [ # Need to allow all of these as dictionaries have to be treated as # unordered 'a=1&b=2&c=3', 'a=1&c=3&b=2', 'b=2&a=1&c=3', 'b=2&c=3&a=1', 'c=3&a=1&b=2', 'c=3&b=2&a=1' ] self.assertIn(result, acceptable_results) result = http.urlencode({'a': [1, 2]}, doseq=False) self.assertEqual(result, 'a=%5B%271%27%2C+%272%27%5D') result = http.urlencode({'a': [1, 2]}, doseq=True) self.assertEqual(result, 'a=1&a=2') result = http.urlencode({'a': []}, doseq=True) self.assertEqual(result, '') # A MultiValueDict result = http.urlencode(MultiValueDict({ 'name': ['Adrian', 'Simon'], 'position': ['Developer'] }), doseq=True) acceptable_results = [ # MultiValueDicts are similarly unordered 'name=Adrian&name=Simon&position=Developer', 'position=Developer&name=Adrian&name=Simon' ] self.assertIn(result, acceptable_results) def test_base36(self): # reciprocity works for n in [0, 1, 1000, 1000000]: self.assertEqual(n, http.base36_to_int(http.int_to_base36(n))) if six.PY2: self.assertEqual(sys.maxint, http.base36_to_int(http.int_to_base36(sys.maxint))) # bad input self.assertRaises(ValueError, http.int_to_base36, -1) if six.PY2: self.assertRaises(ValueError, http.int_to_base36, sys.maxint + 1) for n in ['1', 'foo', {1: 2}, (1, 2, 3), 3.141]: self.assertRaises(TypeError, http.int_to_base36, n) for n in ['#', ' ']: self.assertRaises(ValueError, http.base36_to_int, n) for n in [123, {1: 2}, (1, 2, 3), 3.141]: self.assertRaises(TypeError, http.base36_to_int, n) # more explicit output testing for n, b36 in [(0, '0'), (1, '1'), (42, '16'), (818469960, 'django')]: self.assertEqual(http.int_to_base36(n), b36) self.assertEqual(http.base36_to_int(b36), n) def test_is_safe_url(self): for bad_url in ('http://example.com', 'http:///example.com', 'https://example.com', 'ftp://exampel.com', r'\\example.com', r'\\\example.com', r'/\\/example.com', r'\\\example.com', r'\\example.com', r'\\//example.com', r'/\/example.com', r'\/example.com', r'/\example.com', 'http:///example.com', 'http:/\//example.com', 'http:\/example.com', 'http:/\example.com', 'javascript:alert("XSS")', '\njavascript:alert(x)', '\x08//example.com', '\n'): self.assertFalse(http.is_safe_url(bad_url, host='testserver'), "%s should be blocked" % bad_url) for good_url in ('/view/?param=http://example.com', '/view/?param=https://example.com', '/view?param=ftp://exampel.com', 'view/?param=//example.com', 'https://testserver/', 'HTTPS://testserver/', '//testserver/', '/url%20with%20spaces/'): self.assertTrue(http.is_safe_url(good_url, host='testserver'), "%s should be allowed" % good_url) def test_urlsafe_base64_roundtrip(self): bytestring = b'foo' encoded = http.urlsafe_base64_encode(bytestring) decoded = http.urlsafe_base64_decode(encoded) self.assertEqual(bytestring, decoded) def test_urlquote(self): self.assertEqual(http.urlquote('Paris & Orl\xe9ans'), 'Paris%20%26%20Orl%C3%A9ans') self.assertEqual(http.urlquote('Paris & Orl\xe9ans', safe="&"), 'Paris%20&%20Orl%C3%A9ans') self.assertEqual( http.urlunquote('Paris%20%26%20Orl%C3%A9ans'), 'Paris & Orl\xe9ans') self.assertEqual( http.urlunquote('Paris%20&%20Orl%C3%A9ans'), 'Paris & Orl\xe9ans') self.assertEqual(http.urlquote_plus('Paris & Orl\xe9ans'), 'Paris+%26+Orl%C3%A9ans') self.assertEqual(http.urlquote_plus('Paris & Orl\xe9ans', safe="&"), 'Paris+&+Orl%C3%A9ans') self.assertEqual( http.urlunquote_plus('Paris+%26+Orl%C3%A9ans'), 'Paris & Orl\xe9ans') self.assertEqual( http.urlunquote_plus('Paris+&+Orl%C3%A9ans'), 'Paris & Orl\xe9ans') def test_is_same_domain_good(self): for pair in ( ('example.com', 'example.com'), ('example.com', '.example.com'), ('foo.example.com', '.example.com'), ('example.com:8888', 'example.com:8888'), ('example.com:8888', '.example.com:8888'), ('foo.example.com:8888', '.example.com:8888'), ): self.assertTrue(http.is_same_domain(*pair)) def test_is_same_domain_bad(self): for pair in ( ('example2.com', 'example.com'), ('foo.example.com', 'example.com'), ('example.com:9999', 'example.com:8888'), ): self.assertFalse(http.is_same_domain(*pair)) class ETagProcessingTests(unittest.TestCase): def test_parsing(self): etags = http.parse_etags(r'"", "etag", "e\"t\"ag", "e\\tag", W/"weak"') self.assertEqual(etags, ['', 'etag', 'e"t"ag', r'e\tag', 'weak']) def test_quoting(self): quoted_etag = http.quote_etag(r'e\t"ag') self.assertEqual(quoted_etag, r'"e\\t\"ag"') class HttpDateProcessingTests(unittest.TestCase): def test_http_date(self): t = 1167616461.0 self.assertEqual(http.http_date(t), 'Mon, 01 Jan 2007 01:54:21 GMT') def test_cookie_date(self): t = 1167616461.0 self.assertEqual(http.cookie_date(t), 'Mon, 01-Jan-2007 01:54:21 GMT') def test_parsing_rfc1123(self): parsed = http.parse_http_date('Sun, 06 Nov 1994 08:49:37 GMT') self.assertEqual(datetime.utcfromtimestamp(parsed), datetime(1994, 11, 6, 8, 49, 37)) def test_parsing_rfc850(self): parsed = http.parse_http_date('Sunday, 06-Nov-94 08:49:37 GMT') self.assertEqual(datetime.utcfromtimestamp(parsed), datetime(1994, 11, 6, 8, 49, 37)) def test_parsing_asctime(self): parsed = http.parse_http_date('Sun Nov 6 08:49:37 1994') self.assertEqual(datetime.utcfromtimestamp(parsed), datetime(1994, 11, 6, 8, 49, 37))
bsd-3-clause
peterzdeb/torrent-gateway
src/python/bin/process_all.py
1
1438
import logging import os import sys PROD_ROOT = os.environ.get('PROD_ROOT', os.getcwd()) sys.path.append(os.path.join(PROD_ROOT, 'src', 'python')) from core.utils import remove_xdc from processors.file_analyzer import FileAnalyzer if __name__ == '__main__': logging.basicConfig(filename='/var/log/torrent_gw.log', format='%(asctime)-15s %(name)s %(levelname)s %(message)s', level=logging.DEBUG) logger = logging.getLogger('gateway') logger.setLevel(logging.DEBUG) env = os.environ torrent_file = '' try: if len(sys.argv) > 1: logger.debug('parsing args: %s', str(sys.argv)) torrents_dir = os.path.join(*sys.argv[1:]) else: torrents_dir = '' if not torrents_dir: logger.error('Torrent error - no torrents dir specified') sys.exit(1) for torrent_file in os.listdir(torrents_dir): logger.info('Start processing file: %s' % torrent_file) analyzer = FileAnalyzer(env.get('TR_TORRENT_DIR', '/media/Disk-D/Torrents')) analyzer.process_file(torrent_file) except Exception as err: try: logger.exception('Error occurred when downloading file "%s": %s' % (torrent_file, err)) except Exception as err: logger.exception('Critical internal server error: %s' % err) sys.exit(1) sys.exit(0)
bsd-3-clause
bybyby/shadowsocks
shadowsocks/lru_cache.py
983
4290
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2015 clowwindy # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import absolute_import, division, print_function, \ with_statement import collections import logging import time # this LRUCache is optimized for concurrency, not QPS # n: concurrency, keys stored in the cache # m: visits not timed out, proportional to QPS * timeout # get & set is O(1), not O(n). thus we can support very large n # TODO: if timeout or QPS is too large, then this cache is not very efficient, # as sweep() causes long pause class LRUCache(collections.MutableMapping): """This class is not thread safe""" def __init__(self, timeout=60, close_callback=None, *args, **kwargs): self.timeout = timeout self.close_callback = close_callback self._store = {} self._time_to_keys = collections.defaultdict(list) self._keys_to_last_time = {} self._last_visits = collections.deque() self._closed_values = set() self.update(dict(*args, **kwargs)) # use the free update to set keys def __getitem__(self, key): # O(1) t = time.time() self._keys_to_last_time[key] = t self._time_to_keys[t].append(key) self._last_visits.append(t) return self._store[key] def __setitem__(self, key, value): # O(1) t = time.time() self._keys_to_last_time[key] = t self._store[key] = value self._time_to_keys[t].append(key) self._last_visits.append(t) def __delitem__(self, key): # O(1) del self._store[key] del self._keys_to_last_time[key] def __iter__(self): return iter(self._store) def __len__(self): return len(self._store) def sweep(self): # O(m) now = time.time() c = 0 while len(self._last_visits) > 0: least = self._last_visits[0] if now - least <= self.timeout: break if self.close_callback is not None: for key in self._time_to_keys[least]: if key in self._store: if now - self._keys_to_last_time[key] > self.timeout: value = self._store[key] if value not in self._closed_values: self.close_callback(value) self._closed_values.add(value) for key in self._time_to_keys[least]: self._last_visits.popleft() if key in self._store: if now - self._keys_to_last_time[key] > self.timeout: del self._store[key] del self._keys_to_last_time[key] c += 1 del self._time_to_keys[least] if c: self._closed_values.clear() logging.debug('%d keys swept' % c) def test(): c = LRUCache(timeout=0.3) c['a'] = 1 assert c['a'] == 1 time.sleep(0.5) c.sweep() assert 'a' not in c c['a'] = 2 c['b'] = 3 time.sleep(0.2) c.sweep() assert c['a'] == 2 assert c['b'] == 3 time.sleep(0.2) c.sweep() c['b'] time.sleep(0.2) c.sweep() assert 'a' not in c assert c['b'] == 3 time.sleep(0.5) c.sweep() assert 'a' not in c assert 'b' not in c global close_cb_called close_cb_called = False def close_cb(t): global close_cb_called assert not close_cb_called close_cb_called = True c = LRUCache(timeout=0.1, close_callback=close_cb) c['s'] = 1 c['s'] time.sleep(0.1) c['s'] time.sleep(0.3) c.sweep() if __name__ == '__main__': test()
apache-2.0
starcraftman/python-client
test/test_vim.py
2
4492
# -*- coding: utf-8 -*- import os, tempfile from nose.tools import with_setup, eq_ as eq, ok_ as ok from common import vim, cleanup def source(code): fd, fname = tempfile.mkstemp() with os.fdopen(fd,'w') as f: f.write(code) vim.command('source '+fname) os.unlink(fname) @with_setup(setup=cleanup) def test_command(): fname = tempfile.mkstemp()[1] vim.command('new') vim.command('edit %s' % fname) # skip the "press return" state, which does not handle deferred calls vim.input('\r') vim.command('normal itesting\npython\napi') vim.command('w') ok(os.path.isfile(fname)) eq(open(fname).read(), 'testing\npython\napi\n') os.unlink(fname) @with_setup def test_command_output(): eq(vim.command_output('echo test'), 'test') @with_setup(setup=cleanup) def test_eval(): vim.command('let g:v1 = "a"') vim.command('let g:v2 = [1, 2, {"v3": 3}]') eq(vim.eval('g:'), {'v1': 'a', 'v2': [1, 2, {'v3': 3}]}) @with_setup(setup=cleanup) def test_call(): eq(vim.funcs.join(['first', 'last'], ', '), 'first, last') source(""" function! Testfun(a,b) return string(a:a).":".a:b endfunction """) eq(vim.funcs.Testfun(3, 'alpha'), '3:alpha') @with_setup(setup=cleanup) def test_strwidth(): eq(vim.strwidth('abc'), 3) # 6 + (neovim) # 19 * 2 (each japanese character occupies two cells) eq(vim.strwidth('neovimのデザインかなりまともなのになってる。'), 44) @with_setup(setup=cleanup) def test_list_runtime_paths(): # Is this the default runtime path list? homedir = os.path.join(os.environ['HOME'], '.nvim') vimdir = vim.eval('$VIM') dflt_rtp = [ homedir, os.path.join(vimdir, 'vimfiles'), vimdir, os.path.join(vimdir, 'vimfiles', 'after') ] # If the runtime is installed the default path # is nvim/runtime dflt_rtp2 = list(dflt_rtp) dflt_rtp2[2] = os.path.join(dflt_rtp2[2], 'runtime') rtp = vim.list_runtime_paths() ok(rtp == dflt_rtp or rtp == dflt_rtp2) @with_setup(setup=cleanup) def test_chdir(): pwd = vim.eval('getcwd()') vim.chdir('/') eq(vim.eval('getcwd()'), '/') vim.chdir(pwd) eq(vim.eval('getcwd()'), pwd) @with_setup(setup=cleanup) def test_current_line(): eq(vim.current.line, '') vim.current.line = 'abc' eq(vim.current.line, 'abc') @with_setup(setup=cleanup) def test_vars(): vim.vars['python'] = [1, 2, {'3': 1}] eq(vim.vars['python'], [1, 2, {'3': 1}]) eq(vim.eval('g:python'), [1, 2, {'3': 1}]) @with_setup(setup=cleanup) def test_options(): eq(vim.options['listchars'], 'eol:$') vim.options['listchars'] = 'tab:xy' eq(vim.options['listchars'], 'tab:xy') @with_setup(setup=cleanup) def test_buffers(): eq(len(vim.buffers), 1) eq(vim.buffers[0], vim.current.buffer) vim.command('new') eq(len(vim.buffers), 2) eq(vim.buffers[1], vim.current.buffer) vim.current.buffer = vim.buffers[0] eq(vim.buffers[0], vim.current.buffer) @with_setup(setup=cleanup) def test_windows(): eq(len(vim.windows), 1) eq(vim.windows[0], vim.current.window) vim.command('vsplit') vim.command('split') eq(len(vim.windows), 3) eq(vim.windows[0], vim.current.window) vim.current.window = vim.windows[1] eq(vim.windows[1], vim.current.window) @with_setup(setup=cleanup) def test_tabpages(): eq(len(vim.tabpages), 1) eq(vim.tabpages[0], vim.current.tabpage) vim.command('tabnew') eq(len(vim.tabpages), 2) eq(len(vim.windows), 2) eq(vim.windows[1], vim.current.window) eq(vim.tabpages[1], vim.current.tabpage) vim.current.window = vim.windows[0] # Switching window also switches tabpages if necessary(this probably # isn't the current behavior, but compatibility will be handled in the # python client with an optional parameter) eq(vim.tabpages[0], vim.current.tabpage) eq(vim.windows[0], vim.current.window) vim.current.tabpage = vim.tabpages[1] eq(vim.tabpages[1], vim.current.tabpage) eq(vim.windows[1], vim.current.window) @with_setup(setup=cleanup) def test_hash(): d = {} d[vim.current.buffer] = "alpha" eq(d[vim.current.buffer], "alpha") vim.command('new') d[vim.current.buffer] = "beta" eq(d[vim.current.buffer], "beta") vim.command('winc w') eq(d[vim.current.buffer], "alpha") vim.command('winc w') eq(d[vim.current.buffer], "beta")
apache-2.0
cortedeltimo/SickRage
lib/rtorrent/lib/xmlrpc/scgi.py
14
7949
#!/usr/bin/python # rtorrent_xmlrpc # (c) 2011 Roger Que <alerante@bellsouth.net> # # Modified portions: # (c) 2013 Dean Gardiner <gardiner91@gmail.com> # # Python module for interacting with rtorrent's XML-RPC interface # directly over SCGI, instead of through an HTTP server intermediary. # Inspired by Glenn Washburn's xmlrpc2scgi.py [1], but subclasses the # built-in xmlrpclib classes so that it is compatible with features # such as MultiCall objects. # # [1] <http://libtorrent.rakshasa.no/wiki/UtilsXmlrpc2scgi> # # Usage: server = SCGIServerProxy('scgi://localhost:7000/') # server = SCGIServerProxy('scgi:///path/to/scgi.sock') # print server.system.listMethods() # mc = xmlrpclib.MultiCall(server) # mc.get_up_rate() # mc.get_down_rate() # print mc() # # # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA # # In addition, as a special exception, the copyright holders give # permission to link the code of portions of this program with the # OpenSSL library under certain conditions as described in each # individual source file, and distribute linked combinations # including the two. # # You must obey the GNU General Public License in all respects for # all of the code used other than OpenSSL. If you modify file(s) # with this exception, you may extend this exception to your version # of the file(s), but you are not obligated to do so. If you do not # wish to do so, delete this exception statement from your version. # If you delete this exception statement from all source files in the # program, then also delete it here. # # # # Portions based on Python's xmlrpclib: # # Copyright (c) 1999-2002 by Secret Labs AB # Copyright (c) 1999-2002 by Fredrik Lundh # # By obtaining, using, and/or copying this software and/or its # associated documentation, you agree that you have read, understood, # and will comply with the following terms and conditions: # # Permission to use, copy, modify, and distribute this software and # its associated documentation for any purpose and without fee is # hereby granted, provided that the above copyright notice appears in # all copies, and that both that copyright notice and this permission # notice appear in supporting documentation, and that the name of # Secret Labs AB or the author not be used in advertising or publicity # pertaining to distribution of the software without specific, written # prior permission. # # SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD # TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT- # ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR # BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY # DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, # WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS # ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE # OF THIS SOFTWARE. import httplib import re import socket import urllib import xmlrpclib import errno class SCGITransport(xmlrpclib.Transport): # Added request() from Python 2.7 xmlrpclib here to backport to Python 2.6 def request(self, host, handler, request_body, verbose=0): #retry request once if cached connection has gone cold for i in (0, 1): try: return self.single_request(host, handler, request_body, verbose) except socket.error, e: if i or e.errno not in (errno.ECONNRESET, errno.ECONNABORTED, errno.EPIPE): raise except httplib.BadStatusLine: #close after we sent request if i: raise def single_request(self, host, handler, request_body, verbose=0): # Add SCGI headers to the request. headers = {'CONTENT_LENGTH': str(len(request_body)), 'SCGI': '1'} header = '\x00'.join(('%s\x00%s' % item for item in headers.iteritems())) + '\x00' header = '%d:%s' % (len(header), header) request_body = '%s,%s' % (header, request_body) sock = None try: if host: host, port = urllib.splitport(host) addrinfo = socket.getaddrinfo(host, int(port), socket.AF_INET, socket.SOCK_STREAM) sock = socket.socket(*addrinfo[0][:3]) sock.connect(addrinfo[0][4]) else: sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) sock.connect(handler) self.verbose = verbose sock.send(request_body) return self.parse_response(sock.makefile()) finally: if sock: sock.close() def parse_response(self, response): p, u = self.getparser() response_body = '' while True: data = response.read(1024) if not data: break response_body += data # Remove SCGI headers from the response. response_header, response_body = re.split(r'\n\s*?\n', response_body, maxsplit=1) if self.verbose: print 'body:', repr(response_body) p.feed(response_body) p.close() return u.close() class SCGIServerProxy(xmlrpclib.ServerProxy): def __init__(self, uri, transport=None, encoding=None, verbose=False, allow_none=False, use_datetime=False): type, uri = urllib.splittype(uri) if type not in ('scgi'): raise IOError('unsupported XML-RPC protocol') self.__host, self.__handler = urllib.splithost(uri) if not self.__handler: self.__handler = '/' if transport is None: transport = SCGITransport(use_datetime=use_datetime) self.__transport = transport self.__encoding = encoding self.__verbose = verbose self.__allow_none = allow_none def __close(self): self.__transport.close() def __request(self, methodname, params): # call a method on the remote server request = xmlrpclib.dumps(params, methodname, encoding=self.__encoding, allow_none=self.__allow_none) response = self.__transport.request( self.__host, self.__handler, request, verbose=self.__verbose ) if len(response) == 1: response = response[0] return response def __repr__(self): return ( "<SCGIServerProxy for %s%s>" % (self.__host, self.__handler) ) __str__ = __repr__ def __getattr__(self, name): # magic method dispatcher return xmlrpclib._Method(self.__request, name) # note: to call a remote object with an non-standard name, use # result getattr(server, "strange-python-name")(args) def __call__(self, attr): """A workaround to get special attributes on the ServerProxy without interfering with the magic __getattr__ """ if attr == "close": return self.__close elif attr == "transport": return self.__transport raise AttributeError("Attribute %r not found" % (attr,))
gpl-3.0
hyperized/ansible
lib/ansible/module_utils/network/frr/providers/providers.py
114
3943
# # (c) 2019, Ansible by Red Hat, inc # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # import json from threading import RLock from ansible.module_utils.six import itervalues from ansible.module_utils.network.common.utils import to_list from ansible.module_utils.network.common.config import NetworkConfig _registered_providers = {} _provider_lock = RLock() def register_provider(network_os, module_name): def wrapper(cls): _provider_lock.acquire() try: if network_os not in _registered_providers: _registered_providers[network_os] = {} for ct in cls.supported_connections: if ct not in _registered_providers[network_os]: _registered_providers[network_os][ct] = {} for item in to_list(module_name): for entry in itervalues(_registered_providers[network_os]): entry[item] = cls finally: _provider_lock.release() return cls return wrapper def get(network_os, module_name, connection_type): network_os_providers = _registered_providers.get(network_os) if network_os_providers is None: raise ValueError('unable to find a suitable provider for this module') if connection_type not in network_os_providers: raise ValueError('provider does not support this connection type') elif module_name not in network_os_providers[connection_type]: raise ValueError('could not find a suitable provider for this module') return network_os_providers[connection_type][module_name] class ProviderBase(object): supported_connections = () def __init__(self, params, connection=None, check_mode=False): self.params = params self.connection = connection self.check_mode = check_mode @property def capabilities(self): if not hasattr(self, '_capabilities'): resp = self.from_json(self.connection.get_capabilities()) setattr(self, '_capabilities', resp) return getattr(self, '_capabilities') def get_value(self, path): params = self.params.copy() for key in path.split('.'): params = params[key] return params def get_facts(self, subset=None): raise NotImplementedError(self.__class__.__name__) def edit_config(self): raise NotImplementedError(self.__class__.__name__) class CliProvider(ProviderBase): supported_connections = ('network_cli',) @property def capabilities(self): if not hasattr(self, '_capabilities'): resp = self.from_json(self.connection.get_capabilities()) setattr(self, '_capabilities', resp) return getattr(self, '_capabilities') def get_config_context(self, config, path, indent=1): if config is not None: netcfg = NetworkConfig(indent=indent, contents=config) try: config = netcfg.get_block_config(to_list(path)) except ValueError: config = None return config def render(self, config=None): raise NotImplementedError(self.__class__.__name__) def cli(self, command): try: if not hasattr(self, '_command_output'): setattr(self, '_command_output', {}) return self._command_output[command] except KeyError: out = self.connection.get(command) try: out = json.loads(out) except ValueError: pass self._command_output[command] = out return out def get_facts(self, subset=None): return self.populate() def edit_config(self, config=None): commands = self.render(config) if commands and self.check_mode is False: self.connection.edit_config(commands) return commands
gpl-3.0
kawamon/hue
desktop/core/ext-py/Django-1.11.29/django/conf/locale/it/formats.py
504
2079
# -*- encoding: utf-8 -*- # This file is distributed under the same license as the Django package. # from __future__ import unicode_literals # The *_FORMAT strings use the Django date format syntax, # see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = 'd F Y' # 25 Ottobre 2006 TIME_FORMAT = 'H:i' # 14:30 DATETIME_FORMAT = 'l d F Y H:i' # Mercoledì 25 Ottobre 2006 14:30 YEAR_MONTH_FORMAT = 'F Y' # Ottobre 2006 MONTH_DAY_FORMAT = 'j/F' # 10/2006 SHORT_DATE_FORMAT = 'd/m/Y' # 25/12/2009 SHORT_DATETIME_FORMAT = 'd/m/Y H:i' # 25/10/2009 14:30 FIRST_DAY_OF_WEEK = 1 # Lunedì # The *_INPUT_FORMATS strings use the Python strftime format syntax, # see http://docs.python.org/library/datetime.html#strftime-strptime-behavior DATE_INPUT_FORMATS = [ '%d/%m/%Y', '%Y/%m/%d', # '25/10/2006', '2008/10/25' '%d-%m-%Y', '%Y-%m-%d', # '25-10-2006', '2008-10-25' '%d-%m-%y', '%d/%m/%y', # '25-10-06', '25/10/06' ] DATETIME_INPUT_FORMATS = [ '%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59' '%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200' '%d/%m/%Y %H:%M', # '25/10/2006 14:30' '%d/%m/%Y', # '25/10/2006' '%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59' '%d/%m/%y %H:%M:%S.%f', # '25/10/06 14:30:59.000200' '%d/%m/%y %H:%M', # '25/10/06 14:30' '%d/%m/%y', # '25/10/06' '%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59' '%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200' '%Y-%m-%d %H:%M', # '2006-10-25 14:30' '%Y-%m-%d', # '2006-10-25' '%d-%m-%Y %H:%M:%S', # '25-10-2006 14:30:59' '%d-%m-%Y %H:%M:%S.%f', # '25-10-2006 14:30:59.000200' '%d-%m-%Y %H:%M', # '25-10-2006 14:30' '%d-%m-%Y', # '25-10-2006' '%d-%m-%y %H:%M:%S', # '25-10-06 14:30:59' '%d-%m-%y %H:%M:%S.%f', # '25-10-06 14:30:59.000200' '%d-%m-%y %H:%M', # '25-10-06 14:30' '%d-%m-%y', # '25-10-06' ] DECIMAL_SEPARATOR = ',' THOUSAND_SEPARATOR = '.' NUMBER_GROUPING = 3
apache-2.0
mdehollander/bioconda-recipes
recipes/phylip/phylip.py
48
8900
#!/usr/bin/env python # # Wrapper script for phylip program when installed from # bioconda. Adapted from shell scripts provided in the biolbuilds # conda recipe by Cheng H. Lee. import sys import os import subprocess def main(): print("running main") print(sys.argv) bindir = get_script_path(sys.argv[0]) sharedir= get_script_path(bindir+"/dnapars") print(sharedir) if len(sys.argv) == 1: print("Usage: {prog} <program>".format(prog=sys.argv[0])) print("Existing programs are: {progs}".format(progs=os.listdir(sharedir))) sys.exit(1) progname = sys.argv[1] program = bindir+"/"+progname if progname == "test": # hidden test of conda phylip installation test(bindir) elif(os.path.isfile(program)): subprocess.check_call(program) else: print("{prog} does not exist in Phylip".format(prog=progname)) usage() sys.exit(1) def usage(): print("Usage: {prog} <program>".format(prog=sys.argv[0])) print("Existing programs are: {progs}".format(progs=os.listdir(bindir))) def get_script_path(script): return os.path.dirname(os.path.realpath(script)) # Main function for testing the conda installation of phylip # This simply tests that phylip can process infiles without without error code def test(bindir): params = "0\ny\n" out = open("infile", "wt") out.write(infiles["testdna"]) out.close() for prog in ["dnapars","dnaml","dnadist","dnapenny","dnacomp","dnamlk"]: #,"dnainvar" testprog(prog, bindir,params) out = open("infile", "wt") out.write(infiles["testprot"]) out.close() for prog in ["protpars","protdist","proml","promlk"]: testprog(prog, bindir, params) out = open("infile", "wt") out.write(infiles["testdisc"]) out.close() for prog in ["pars","penny","dollop","dolpenny","clique","mix"]: testprog(prog, bindir,params) out = open("infile", "wt") out.write(infiles["testrest"]) out.close() for prog in ["restml","restdist"]: testprog(prog, bindir, params) out = open("infile", "wt") out.write(infiles["testdist"]) out.close() for prog in ["fitch","kitsch","neighbor"]: testprog(prog, bindir,params) out = open("intree", "wt") out.write(infiles["testtree"]) out.close() for prog in ["drawtree", "drawgram"]: params = "0\nl\nm\ny\n" testprog(prog, bindir,params) # testing the java gui versions require user interaction # Not good for automatic istallations -- comment out for now, # but keep for debug? ''' for prog in ["drawtree_gui", "drawgram_gui"]: print("testing " + prog) program = bindir+"/"+prog outfile = open(prog+".out",'wt') try: subprocess.run(program, universal_newlines=True,input=params,stdout=outfile, stderr=subprocess.PIPE, check=True) except subprocess.CalledProcessError as e: print(e) subprocess.call(["cat", prog+".out"], shell=True) raise print("passed; cleaning up") subprocess.call(["rm", "-f", "infile","plotfile.ps"])''' # Help function for testing the conda installation of phylip def testprog(prog, bindir, params): print("testing " + prog + "...",) program = bindir+"/"+prog outfile = open(prog+".out",'wt') try: process = subprocess.Popen(program, stdin=subprocess.PIPE, stdout=outfile, stderr=subprocess.STDOUT, universal_newlines=True) process.communicate(input=params) except subprocess.CalledProcessError as e: print(e) subprocess.call(["cat", prog+".out"]) raise print("passed; cleaning up") subprocess.call(["rm", "-f", "outtree", "outfile", "plotfile"]) # Content of test files for testing the conda installation of phylip infiles = { "testdna" : """ 7 232 Bovine CCAAACCTGT CCCCACCATC TAACACCAAC CCACATATAC AAGCTAAACC AAAAATACCA Mouse CCAAAAAAAC ATCCAAACAC CAACCCCAGC CCTTACGCAA TAGCCATACA AAGAATATTA Gibbon CTATACCCAC CCAACTCGAC CTACACCAAT CCCCACATAG CACACAGACC AACAACCTCC Orang CCCCACCCGT CTACACCAGC CAACACCAAC CCCCACCTAC TATACCAACC AATAACCTCT Gorilla CCCCATTTAT CCATAAAAAC CAACACCAAC CCCCATCTAA CACACAAACT AATGACCCCC Chimp CCCCATCCAC CCATACAAAC CAACATTACC CTCCATCCAA TATACAAACT AACAACCTCC Human CCCCACTCAC CCATACAAAC CAACACCACT CTCCACCTAA TATACAAATT AATAACCTCC TACTACTAAA AACTCAAATT AACTCTTTAA TCTTTATACA ACATTCCACC AACCTATCCA TACAACCATA AATAAGACTA ATCTATTAAA ATAACCCATT ACGATACAAA ATCCCTTTCG CACCTTCCAT ACCAAGCCCC GACTTTACCG CCAACGCACC TCATCAAAAC ATACCTACAA CAACCCCTAA ACCAAACACT ATCCCCAAAA CCAACACACT CTACCAAAAT ACACCCCCAA CACCCTCAAA GCCAAACACC AACCCTATAA TCAATACGCC TTATCAAAAC ACACCCCCAA CACTCTTCAG ACCGAACACC AATCTCACAA CCAACACGCC CCGTCAAAAC ACCCCTTCAG CACCTTCAGA ACTGAACGCC AATCTCATAA CCAACACACC CCATCAAAGC ACCCCTCCAA CACAAAAAAA CTCATATTTA TCTAAATACG AACTTCACAC AACCTTAACA CATAAACATA TCTAGATACA AACCACAACA CACAATTAAT ACACACCACA ATTACAATAC TAAACTCCCA CACAAACAAA TGCCCCCCCA CCCTCCTTCT TCAAGCCCAC TAGACCATCC TACCTTCCTA TTCACATCCG CACACCCCCA CCCCCCCTGC CCACGTCCAT CCCATCACCC TCTCCTCCCA CATAAACCCA CGCACCCCCA CCCCTTCCGC CCATGCTCAC CACATCATCT CTCCCCTTCA CACAAATTCA TACACCCCTA CCTTTCCTAC CCACGTTCAC CACATCATCC CCCCCTCTCA CACAAACCCG CACACCTCCA CCCCCCTCGT CTACGCTTAC CACGTCATCC CTCCCTCTCA CCCCAGCCCA ACACCCTTCC ACAAATCCTT AATATACGCA CCATAAATAA CA TCCCACCAAA TCACCCTCCA TCAAATCCAC AAATTACACA ACCATTAACC CA GCACGCCAAG CTCTCTACCA TCAAACGCAC AACTTACACA TACAGAACCA CA ACACCCTAAG CCACCTTCCT CAAAATCCAA AACCCACACA ACCGAAACAA CA ACACCTCAAT CCACCTCCCC CCAAATACAC AATTCACACA AACAATACCA CA ACATCTTGAC TCGCCTCTCT CCAAACACAC AATTCACGCA AACAACGCCA CA ACACCTTAAC TCACCTTCTC CCAAACGCAC AATTCGCACA CACAACGCCA CA """, "testprot" : """ 3 474 CAM ---TTETIQS NANLAPLPPH VPEHLVFDFD MYNPSN--LS AGVQEAWAVL TERP ----MDARAT IPEHIARTVI LPQGYADDEV IYPAFK--WL RDEQPLAMAH BM3 TIKEMPQPKT FGELKNLPLL NTDKPVQALM KIADELGEIF KFEAPGRVTR QESNVPDLVW TRCNGG---H WIATRGQLIR EAY-EDYRHF SSECPFIPRE IEGYDPMWIA TKHADV---M QIGKQPGLFS NAEGSEILYD QNNEAFMRSI YLS-SQRLIK EACDESRFDK NLSQALKFVR DFAGDGLFTS WTHEKNWKKA AGEAYDFIP- -TSMDPPEQR QFRALANQVV GMPVVDKLEN RIQELACSLI SGGCPHVIDS LTSMDPPTHT AYRGLTLNWF QPASIRKLEE NIRRIAQASV HNILLPSFS- -QQAMKGYHA MMVDIAVQLV QKWERLNADE HIEVPEDMTR ESLR-PQGQC NFTEDYAEPF PIRIFMLLAG LPEEDIPHLK YLTDQMT--- QRLLDFDGEC DFMTDCALYY PLHVVMTALG VPEDDEPLML KLTQDFFGVH LTLD-TIGLC GFNYRFNSFY RDQPHPFITS MVRALDEAMN KLQRANP--D RPD------- ------GSMT FAEAKEALYD YLIPIIEQRR QKP--GTDAI EPDEQAVAAP RQSADEAARR FHETIATFYD YFNGFTVDRR SCP--KDDVM DPAYD----- -----ENKRQ FQEDIKVMND LVDKIIADRK ASGEQSDDLL SIVANGQVN- -GRPITSDEA KRMCGLLLVG GLDTVVNFLS FSMEFLAKSP SLLANSKLD- -GNYIDDKYI NAYYVAIATA GHDTTSSSSG GAIIGLSRNP THMLNGKDPE TGEPLDDENI RYQIITFLIA GHETTSGLLS FALYFLVKNP EHRQELIERP E--------- --------RI PAACEELLRR FS-LVADGRI EQLALAKSDP A--------- --------LI PRLVDEAVRW TAPVKSFMRT HVLQKAAEEA ARVLVDPVPS YKQVKQLKYV GMVLNEALRL WPTAPAFSLY LTSDYEFHGV Q-LKKGDQIL LPQMLSGLDE REN-ACPMHV DFSRQK---- ALADTEVRGQ N-IKRGDRIM LSYPSANRDE EVF-SNPDEF DITRFP---- AKEDTVLGGE YPLEKGDELM VLIPQLHRDK TIWGDDVEEF RPERFENPSA ---VSHTTFG HGSHLCLGQH LARREIIVTL KEWLTRIPDF SIAPGAQIQH ---NRHLGFG WGAHMCLGQH LAKLEMKIFF EELLPKLKSV ELS-GPPRLV IPQHAFKPFG NGQRACIGQQ FALHEATLVL GMMLKHFDFE DHT-NYELDI KSGIVSGVQA LPLVWDPATT KAV- ATNFVGGPKN VPIRFTKA-- ---- KETLTLKPEG FVVKAKSKKI PLGG """, "testdisc" : """ 3 10 CAM 0000000000 TERP 0000011111 BM3 0001111111 """, "testrest" : """ 5 13 2 Alpha ++-+-++--+++- Beta ++++--+--+++- Gamma -+--+-++-+-++ Delta ++-+----++--- Epsilon ++++----++--- """, "testdist" : """ 7 Bovine 0.0000 1.2385 1.3472 1.2070 1.0857 1.2832 1.2402 Mouse 1.2385 0.0000 1.1231 1.0966 1.1470 1.2157 1.1530 Gibbon 1.3472 1.1231 0.0000 0.5924 0.5077 0.5466 0.5001 Orang 1.2070 1.0966 0.5924 0.0000 0.3857 0.4405 0.4092 Gorilla 1.0857 1.1470 0.5077 0.3857 0.0000 0.3170 0.2817 Chimp 1.2832 1.2157 0.5466 0.4405 0.3170 0.0000 0.2570 Human 1.2402 1.1530 0.5001 0.4092 0.2817 0.2570 0.0000 """, "testtree" : "((BM3,TERP),CAM);" } if __name__ == "__main__": print("Starting main") main() else: print("fuck")
mit
jmighion/ansible
test/legacy/cleanup_ec2.py
149
6962
''' Find and delete AWS resources matching the provided --match string. Unless --yes|-y is provided, the prompt for confirmation prior to deleting resources. Please use caution, you can easily delete you're *ENTIRE* EC2 infrastructure. ''' import boto import boto.ec2.elb import optparse import os import os.path import re import sys import time import yaml from ansible.module_utils.six.moves import input def delete_aws_resources(get_func, attr, opts): for item in get_func(): val = getattr(item, attr) if re.search(opts.match_re, val): prompt_and_delete(item, "Delete matching %s? [y/n]: " % (item,), opts.assumeyes) def delete_autoscaling_group(get_func, attr, opts): assumeyes = opts.assumeyes group_name = None for item in get_func(): group_name = getattr(item, attr) if re.search(opts.match_re, group_name): if not opts.assumeyes: assumeyes = input("Delete matching %s? [y/n]: " % (item).lower()) == 'y' break if assumeyes and group_name: groups = asg.get_all_groups(names=[group_name]) if groups: group = groups[0] group.max_size = 0 group.min_size = 0 group.desired_capacity = 0 group.update() instances = True while instances: tmp_groups = asg.get_all_groups(names=[group_name]) if tmp_groups: tmp_group = tmp_groups[0] if not tmp_group.instances: instances = False time.sleep(10) group.delete() while len(asg.get_all_groups(names=[group_name])): time.sleep(5) print("Terminated ASG: %s" % group_name) def delete_aws_eips(get_func, attr, opts): # the file might not be there if the integration test wasn't run try: eip_log = open(opts.eip_log, 'r').read().splitlines() except IOError: print('%s not found.' % opts.eip_log) return for item in get_func(): val = getattr(item, attr) if val in eip_log: prompt_and_delete(item, "Delete matching %s? [y/n]: " % (item,), opts.assumeyes) def delete_aws_instances(reservation, opts): for list in reservation: for item in list.instances: prompt_and_delete(item, "Delete matching %s? [y/n]: " % (item,), opts.assumeyes) def prompt_and_delete(item, prompt, assumeyes): if not assumeyes: assumeyes = input(prompt).lower() == 'y' assert hasattr(item, 'delete') or hasattr(item, 'terminate'), "Class <%s> has no delete or terminate attribute" % item.__class__ if assumeyes: if hasattr(item, 'delete'): item.delete() print("Deleted %s" % item) if hasattr(item, 'terminate'): item.terminate() print("Terminated %s" % item) def parse_args(): # Load details from credentials.yml default_aws_access_key = os.environ.get('AWS_ACCESS_KEY', None) default_aws_secret_key = os.environ.get('AWS_SECRET_KEY', None) if os.path.isfile('credentials.yml'): credentials = yaml.load(open('credentials.yml', 'r')) if default_aws_access_key is None: default_aws_access_key = credentials['ec2_access_key'] if default_aws_secret_key is None: default_aws_secret_key = credentials['ec2_secret_key'] parser = optparse.OptionParser( usage="%s [options]" % (sys.argv[0], ), description=__doc__ ) parser.add_option( "--access", action="store", dest="ec2_access_key", default=default_aws_access_key, help="Amazon ec2 access id. Can use EC2_ACCESS_KEY environment variable, or a values from credentials.yml." ) parser.add_option( "--secret", action="store", dest="ec2_secret_key", default=default_aws_secret_key, help="Amazon ec2 secret key. Can use EC2_SECRET_KEY environment variable, or a values from credentials.yml." ) parser.add_option( "--eip-log", action="store", dest="eip_log", default=None, help="Path to log of EIPs created during test." ) parser.add_option( "--integration-config", action="store", dest="int_config", default="integration_config.yml", help="path to integration config" ) parser.add_option( "--credentials", "-c", action="store", dest="credential_file", default="credentials.yml", help="YAML file to read cloud credentials (default: %default)" ) parser.add_option( "--yes", "-y", action="store_true", dest="assumeyes", default=False, help="Don't prompt for confirmation" ) parser.add_option( "--match", action="store", dest="match_re", default="^ansible-testing-", help="Regular expression used to find AWS resources (default: %default)" ) (opts, args) = parser.parse_args() for required in ['ec2_access_key', 'ec2_secret_key']: if getattr(opts, required) is None: parser.error("Missing required parameter: --%s" % required) return (opts, args) if __name__ == '__main__': (opts, args) = parse_args() int_config = yaml.load(open(opts.int_config).read()) if not opts.eip_log: output_dir = os.path.expanduser(int_config["output_dir"]) opts.eip_log = output_dir + '/' + opts.match_re.replace('^', '') + '-eip_integration_tests.log' # Connect to AWS aws = boto.connect_ec2(aws_access_key_id=opts.ec2_access_key, aws_secret_access_key=opts.ec2_secret_key) elb = boto.connect_elb(aws_access_key_id=opts.ec2_access_key, aws_secret_access_key=opts.ec2_secret_key) asg = boto.connect_autoscale(aws_access_key_id=opts.ec2_access_key, aws_secret_access_key=opts.ec2_secret_key) try: # Delete matching keys delete_aws_resources(aws.get_all_key_pairs, 'name', opts) # Delete matching security groups delete_aws_resources(aws.get_all_security_groups, 'name', opts) # Delete matching ASGs delete_autoscaling_group(asg.get_all_groups, 'name', opts) # Delete matching launch configs delete_aws_resources(asg.get_all_launch_configurations, 'name', opts) # Delete ELBs delete_aws_resources(elb.get_all_load_balancers, 'name', opts) # Delete recorded EIPs delete_aws_eips(aws.get_all_addresses, 'public_ip', opts) # Delete temporary instances filters = {"tag:Name": opts.match_re.replace('^', ''), "instance-state-name": ['running', 'pending', 'stopped']} delete_aws_instances(aws.get_all_instances(filters=filters), opts) except KeyboardInterrupt as e: print("\nExiting on user command.")
gpl-3.0
jamesob/bitcoin
test/functional/p2p_eviction.py
15
5741
#!/usr/bin/env python3 # Copyright (c) 2019-2020 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """ Test node eviction logic When the number of peers has reached the limit of maximum connections, the next connecting inbound peer will trigger the eviction mechanism. We cannot currently test the parts of the eviction logic that are based on address/netgroup since in the current framework, all peers are connecting from the same local address. See Issue #14210 for more info. Therefore, this test is limited to the remaining protection criteria. """ import time from test_framework.blocktools import create_block, create_coinbase from test_framework.messages import CTransaction, FromHex, msg_pong, msg_tx from test_framework.p2p import P2PDataStore, P2PInterface from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal class SlowP2PDataStore(P2PDataStore): def on_ping(self, message): time.sleep(0.1) self.send_message(msg_pong(message.nonce)) class SlowP2PInterface(P2PInterface): def on_ping(self, message): time.sleep(0.1) self.send_message(msg_pong(message.nonce)) class P2PEvict(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 1 # The choice of maxconnections=32 results in a maximum of 21 inbound connections # (32 - 10 outbound - 1 feeler). 20 inbound peers are protected from eviction: # 4 by netgroup, 4 that sent us blocks, 4 that sent us transactions and 8 via lowest ping time self.extra_args = [['-maxconnections=32']] def run_test(self): protected_peers = set() # peers that we expect to be protected from eviction current_peer = -1 node = self.nodes[0] node.generatetoaddress(101, node.get_deterministic_priv_key().address) self.log.info("Create 4 peers and protect them from eviction by sending us a block") for _ in range(4): block_peer = node.add_p2p_connection(SlowP2PDataStore()) current_peer += 1 block_peer.sync_with_ping() best_block = node.getbestblockhash() tip = int(best_block, 16) best_block_time = node.getblock(best_block)['time'] block = create_block(tip, create_coinbase(node.getblockcount() + 1), best_block_time + 1) block.solve() block_peer.send_blocks_and_test([block], node, success=True) protected_peers.add(current_peer) self.log.info("Create 5 slow-pinging peers, making them eviction candidates") for _ in range(5): node.add_p2p_connection(SlowP2PInterface()) current_peer += 1 self.log.info("Create 4 peers and protect them from eviction by sending us a tx") for i in range(4): txpeer = node.add_p2p_connection(SlowP2PInterface()) current_peer += 1 txpeer.sync_with_ping() prevtx = node.getblock(node.getblockhash(i + 1), 2)['tx'][0] rawtx = node.createrawtransaction( inputs=[{'txid': prevtx['txid'], 'vout': 0}], outputs=[{node.get_deterministic_priv_key().address: 50 - 0.00125}], ) sigtx = node.signrawtransactionwithkey( hexstring=rawtx, privkeys=[node.get_deterministic_priv_key().key], prevtxs=[{ 'txid': prevtx['txid'], 'vout': 0, 'scriptPubKey': prevtx['vout'][0]['scriptPubKey']['hex'], }], )['hex'] txpeer.send_message(msg_tx(FromHex(CTransaction(), sigtx))) protected_peers.add(current_peer) self.log.info("Create 8 peers and protect them from eviction by having faster pings") for _ in range(8): fastpeer = node.add_p2p_connection(P2PInterface()) current_peer += 1 self.wait_until(lambda: "ping" in fastpeer.last_message, timeout=10) # Make sure by asking the node what the actual min pings are peerinfo = node.getpeerinfo() pings = {} for i in range(len(peerinfo)): pings[i] = peerinfo[i]['minping'] if 'minping' in peerinfo[i] else 1000000 sorted_pings = sorted(pings.items(), key=lambda x: x[1]) # Usually the 8 fast peers are protected. In rare case of unreliable pings, # one of the slower peers might have a faster min ping though. for i in range(8): protected_peers.add(sorted_pings[i][0]) self.log.info("Create peer that triggers the eviction mechanism") node.add_p2p_connection(SlowP2PInterface()) # One of the non-protected peers must be evicted. We can't be sure which one because # 4 peers are protected via netgroup, which is identical for all peers, # and the eviction mechanism doesn't preserve the order of identical elements. evicted_peers = [] for i in range(len(node.p2ps)): if not node.p2ps[i].is_connected: evicted_peers.append(i) self.log.info("Test that one peer was evicted") self.log.debug("{} evicted peer: {}".format(len(evicted_peers), set(evicted_peers))) assert_equal(len(evicted_peers), 1) self.log.info("Test that no peer expected to be protected was evicted") self.log.debug("{} protected peers: {}".format(len(protected_peers), protected_peers)) assert evicted_peers[0] not in protected_peers if __name__ == '__main__': P2PEvict().main()
mit
malayaleecoder/servo
tests/wpt/harness/wptrunner/browsers/chrome.py
99
2845
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this file, # You can obtain one at http://mozilla.org/MPL/2.0/. from .base import Browser, ExecutorBrowser, require_arg from ..webdriver_server import ChromeDriverServer from ..executors import executor_kwargs as base_executor_kwargs from ..executors.executorselenium import (SeleniumTestharnessExecutor, SeleniumRefTestExecutor) __wptrunner__ = {"product": "chrome", "check_args": "check_args", "browser": "ChromeBrowser", "executor": {"testharness": "SeleniumTestharnessExecutor", "reftest": "SeleniumRefTestExecutor"}, "browser_kwargs": "browser_kwargs", "executor_kwargs": "executor_kwargs", "env_options": "env_options"} def check_args(**kwargs): require_arg(kwargs, "webdriver_binary") def browser_kwargs(**kwargs): return {"binary": kwargs["binary"], "webdriver_binary": kwargs["webdriver_binary"]} def executor_kwargs(test_type, server_config, cache_manager, run_info_data, **kwargs): from selenium.webdriver import DesiredCapabilities executor_kwargs = base_executor_kwargs(test_type, server_config, cache_manager, **kwargs) executor_kwargs["close_after_done"] = True executor_kwargs["capabilities"] = dict(DesiredCapabilities.CHROME.items()) if kwargs["binary"] is not None: executor_kwargs["capabilities"]["chromeOptions"] = {"binary": kwargs["binary"]} return executor_kwargs def env_options(): return {"host": "web-platform.test", "bind_hostname": "true"} class ChromeBrowser(Browser): """Chrome is backed by chromedriver, which is supplied through ``wptrunner.webdriver.ChromeDriverServer``. """ def __init__(self, logger, binary, webdriver_binary="chromedriver"): """Creates a new representation of Chrome. The `binary` argument gives the browser binary to use for testing.""" Browser.__init__(self, logger) self.binary = binary self.server = ChromeDriverServer(self.logger, binary=webdriver_binary) def start(self): self.server.start(block=False) def stop(self): self.server.stop() def pid(self): return self.server.pid def is_alive(self): # TODO(ato): This only indicates the driver is alive, # and doesn't say anything about whether a browser session # is active. return self.server.is_alive() def cleanup(self): self.stop() def executor_browser(self): return ExecutorBrowser, {"webdriver_url": self.server.url}
mpl-2.0
newswangerd/ansible
lib/ansible/__init__.py
23
1299
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type # make vendored top-level modules accessible EARLY import ansible._vendor # Note: Do not add any code to this file. The ansible module may be # a namespace package when using Ansible-2.1+ Anything in this file may not be # available if one of the other packages in the namespace is loaded first. # # This is for backwards compat. Code should be ported to get these from # ansible.release instead of from here. from ansible.release import __version__, __author__
gpl-3.0
denisshockwave/image_processing_ocr_server
venv/lib/python2.7/site-packages/jinja2/environment.py
332
48120
# -*- coding: utf-8 -*- """ jinja2.environment ~~~~~~~~~~~~~~~~~~ Provides a class that holds runtime and parsing time options. :copyright: (c) 2010 by the Jinja Team. :license: BSD, see LICENSE for more details. """ import os import sys from jinja2 import nodes from jinja2.defaults import BLOCK_START_STRING, \ BLOCK_END_STRING, VARIABLE_START_STRING, VARIABLE_END_STRING, \ COMMENT_START_STRING, COMMENT_END_STRING, LINE_STATEMENT_PREFIX, \ LINE_COMMENT_PREFIX, TRIM_BLOCKS, NEWLINE_SEQUENCE, \ DEFAULT_FILTERS, DEFAULT_TESTS, DEFAULT_NAMESPACE, \ KEEP_TRAILING_NEWLINE, LSTRIP_BLOCKS from jinja2.lexer import get_lexer, TokenStream from jinja2.parser import Parser from jinja2.nodes import EvalContext from jinja2.optimizer import optimize from jinja2.compiler import generate, CodeGenerator from jinja2.runtime import Undefined, new_context, Context from jinja2.exceptions import TemplateSyntaxError, TemplateNotFound, \ TemplatesNotFound, TemplateRuntimeError from jinja2.utils import import_string, LRUCache, Markup, missing, \ concat, consume, internalcode from jinja2._compat import imap, ifilter, string_types, iteritems, \ text_type, reraise, implements_iterator, implements_to_string, \ get_next, encode_filename, PY2, PYPY from functools import reduce # for direct template usage we have up to ten living environments _spontaneous_environments = LRUCache(10) # the function to create jinja traceback objects. This is dynamically # imported on the first exception in the exception handler. _make_traceback = None def get_spontaneous_environment(*args): """Return a new spontaneous environment. A spontaneous environment is an unnamed and unaccessible (in theory) environment that is used for templates generated from a string and not from the file system. """ try: env = _spontaneous_environments.get(args) except TypeError: return Environment(*args) if env is not None: return env _spontaneous_environments[args] = env = Environment(*args) env.shared = True return env def create_cache(size): """Return the cache class for the given size.""" if size == 0: return None if size < 0: return {} return LRUCache(size) def copy_cache(cache): """Create an empty copy of the given cache.""" if cache is None: return None elif type(cache) is dict: return {} return LRUCache(cache.capacity) def load_extensions(environment, extensions): """Load the extensions from the list and bind it to the environment. Returns a dict of instantiated environments. """ result = {} for extension in extensions: if isinstance(extension, string_types): extension = import_string(extension) result[extension.identifier] = extension(environment) return result def _environment_sanity_check(environment): """Perform a sanity check on the environment.""" assert issubclass(environment.undefined, Undefined), 'undefined must ' \ 'be a subclass of undefined because filters depend on it.' assert environment.block_start_string != \ environment.variable_start_string != \ environment.comment_start_string, 'block, variable and comment ' \ 'start strings must be different' assert environment.newline_sequence in ('\r', '\r\n', '\n'), \ 'newline_sequence set to unknown line ending string.' return environment class Environment(object): r"""The core component of Jinja is the `Environment`. It contains important shared variables like configuration, filters, tests, globals and others. Instances of this class may be modified if they are not shared and if no template was loaded so far. Modifications on environments after the first template was loaded will lead to surprising effects and undefined behavior. Here are the possible initialization parameters: `block_start_string` The string marking the beginning of a block. Defaults to ``'{%'``. `block_end_string` The string marking the end of a block. Defaults to ``'%}'``. `variable_start_string` The string marking the beginning of a print statement. Defaults to ``'{{'``. `variable_end_string` The string marking the end of a print statement. Defaults to ``'}}'``. `comment_start_string` The string marking the beginning of a comment. Defaults to ``'{#'``. `comment_end_string` The string marking the end of a comment. Defaults to ``'#}'``. `line_statement_prefix` If given and a string, this will be used as prefix for line based statements. See also :ref:`line-statements`. `line_comment_prefix` If given and a string, this will be used as prefix for line based comments. See also :ref:`line-statements`. .. versionadded:: 2.2 `trim_blocks` If this is set to ``True`` the first newline after a block is removed (block, not variable tag!). Defaults to `False`. `lstrip_blocks` If this is set to ``True`` leading spaces and tabs are stripped from the start of a line to a block. Defaults to `False`. `newline_sequence` The sequence that starts a newline. Must be one of ``'\r'``, ``'\n'`` or ``'\r\n'``. The default is ``'\n'`` which is a useful default for Linux and OS X systems as well as web applications. `keep_trailing_newline` Preserve the trailing newline when rendering templates. The default is ``False``, which causes a single newline, if present, to be stripped from the end of the template. .. versionadded:: 2.7 `extensions` List of Jinja extensions to use. This can either be import paths as strings or extension classes. For more information have a look at :ref:`the extensions documentation <jinja-extensions>`. `optimized` should the optimizer be enabled? Default is `True`. `undefined` :class:`Undefined` or a subclass of it that is used to represent undefined values in the template. `finalize` A callable that can be used to process the result of a variable expression before it is output. For example one can convert `None` implicitly into an empty string here. `autoescape` If set to true the XML/HTML autoescaping feature is enabled by default. For more details about autoescaping see :class:`~jinja2.utils.Markup`. As of Jinja 2.4 this can also be a callable that is passed the template name and has to return `True` or `False` depending on autoescape should be enabled by default. .. versionchanged:: 2.4 `autoescape` can now be a function `loader` The template loader for this environment. `cache_size` The size of the cache. Per default this is ``400`` which means that if more than 400 templates are loaded the loader will clean out the least recently used template. If the cache size is set to ``0`` templates are recompiled all the time, if the cache size is ``-1`` the cache will not be cleaned. .. versionchanged:: 2.8 The cache size was increased to 400 from a low 50. `auto_reload` Some loaders load templates from locations where the template sources may change (ie: file system or database). If `auto_reload` is set to `True` (default) every time a template is requested the loader checks if the source changed and if yes, it will reload the template. For higher performance it's possible to disable that. `bytecode_cache` If set to a bytecode cache object, this object will provide a cache for the internal Jinja bytecode so that templates don't have to be parsed if they were not changed. See :ref:`bytecode-cache` for more information. """ #: if this environment is sandboxed. Modifying this variable won't make #: the environment sandboxed though. For a real sandboxed environment #: have a look at jinja2.sandbox. This flag alone controls the code #: generation by the compiler. sandboxed = False #: True if the environment is just an overlay overlayed = False #: the environment this environment is linked to if it is an overlay linked_to = None #: shared environments have this set to `True`. A shared environment #: must not be modified shared = False #: these are currently EXPERIMENTAL undocumented features. exception_handler = None exception_formatter = None #: the class that is used for code generation. See #: :class:`~jinja2.compiler.CodeGenerator` for more information. code_generator_class = CodeGenerator #: the context class thatis used for templates. See #: :class:`~jinja2.runtime.Context` for more information. context_class = Context def __init__(self, block_start_string=BLOCK_START_STRING, block_end_string=BLOCK_END_STRING, variable_start_string=VARIABLE_START_STRING, variable_end_string=VARIABLE_END_STRING, comment_start_string=COMMENT_START_STRING, comment_end_string=COMMENT_END_STRING, line_statement_prefix=LINE_STATEMENT_PREFIX, line_comment_prefix=LINE_COMMENT_PREFIX, trim_blocks=TRIM_BLOCKS, lstrip_blocks=LSTRIP_BLOCKS, newline_sequence=NEWLINE_SEQUENCE, keep_trailing_newline=KEEP_TRAILING_NEWLINE, extensions=(), optimized=True, undefined=Undefined, finalize=None, autoescape=False, loader=None, cache_size=400, auto_reload=True, bytecode_cache=None): # !!Important notice!! # The constructor accepts quite a few arguments that should be # passed by keyword rather than position. However it's important to # not change the order of arguments because it's used at least # internally in those cases: # - spontaneous environments (i18n extension and Template) # - unittests # If parameter changes are required only add parameters at the end # and don't change the arguments (or the defaults!) of the arguments # existing already. # lexer / parser information self.block_start_string = block_start_string self.block_end_string = block_end_string self.variable_start_string = variable_start_string self.variable_end_string = variable_end_string self.comment_start_string = comment_start_string self.comment_end_string = comment_end_string self.line_statement_prefix = line_statement_prefix self.line_comment_prefix = line_comment_prefix self.trim_blocks = trim_blocks self.lstrip_blocks = lstrip_blocks self.newline_sequence = newline_sequence self.keep_trailing_newline = keep_trailing_newline # runtime information self.undefined = undefined self.optimized = optimized self.finalize = finalize self.autoescape = autoescape # defaults self.filters = DEFAULT_FILTERS.copy() self.tests = DEFAULT_TESTS.copy() self.globals = DEFAULT_NAMESPACE.copy() # set the loader provided self.loader = loader self.cache = create_cache(cache_size) self.bytecode_cache = bytecode_cache self.auto_reload = auto_reload # load extensions self.extensions = load_extensions(self, extensions) _environment_sanity_check(self) def add_extension(self, extension): """Adds an extension after the environment was created. .. versionadded:: 2.5 """ self.extensions.update(load_extensions(self, [extension])) def extend(self, **attributes): """Add the items to the instance of the environment if they do not exist yet. This is used by :ref:`extensions <writing-extensions>` to register callbacks and configuration values without breaking inheritance. """ for key, value in iteritems(attributes): if not hasattr(self, key): setattr(self, key, value) def overlay(self, block_start_string=missing, block_end_string=missing, variable_start_string=missing, variable_end_string=missing, comment_start_string=missing, comment_end_string=missing, line_statement_prefix=missing, line_comment_prefix=missing, trim_blocks=missing, lstrip_blocks=missing, extensions=missing, optimized=missing, undefined=missing, finalize=missing, autoescape=missing, loader=missing, cache_size=missing, auto_reload=missing, bytecode_cache=missing): """Create a new overlay environment that shares all the data with the current environment except for cache and the overridden attributes. Extensions cannot be removed for an overlayed environment. An overlayed environment automatically gets all the extensions of the environment it is linked to plus optional extra extensions. Creating overlays should happen after the initial environment was set up completely. Not all attributes are truly linked, some are just copied over so modifications on the original environment may not shine through. """ args = dict(locals()) del args['self'], args['cache_size'], args['extensions'] rv = object.__new__(self.__class__) rv.__dict__.update(self.__dict__) rv.overlayed = True rv.linked_to = self for key, value in iteritems(args): if value is not missing: setattr(rv, key, value) if cache_size is not missing: rv.cache = create_cache(cache_size) else: rv.cache = copy_cache(self.cache) rv.extensions = {} for key, value in iteritems(self.extensions): rv.extensions[key] = value.bind(rv) if extensions is not missing: rv.extensions.update(load_extensions(rv, extensions)) return _environment_sanity_check(rv) lexer = property(get_lexer, doc="The lexer for this environment.") def iter_extensions(self): """Iterates over the extensions by priority.""" return iter(sorted(self.extensions.values(), key=lambda x: x.priority)) def getitem(self, obj, argument): """Get an item or attribute of an object but prefer the item.""" try: return obj[argument] except (TypeError, LookupError): if isinstance(argument, string_types): try: attr = str(argument) except Exception: pass else: try: return getattr(obj, attr) except AttributeError: pass return self.undefined(obj=obj, name=argument) def getattr(self, obj, attribute): """Get an item or attribute of an object but prefer the attribute. Unlike :meth:`getitem` the attribute *must* be a bytestring. """ try: return getattr(obj, attribute) except AttributeError: pass try: return obj[attribute] except (TypeError, LookupError, AttributeError): return self.undefined(obj=obj, name=attribute) def call_filter(self, name, value, args=None, kwargs=None, context=None, eval_ctx=None): """Invokes a filter on a value the same way the compiler does it. .. versionadded:: 2.7 """ func = self.filters.get(name) if func is None: raise TemplateRuntimeError('no filter named %r' % name) args = [value] + list(args or ()) if getattr(func, 'contextfilter', False): if context is None: raise TemplateRuntimeError('Attempted to invoke context ' 'filter without context') args.insert(0, context) elif getattr(func, 'evalcontextfilter', False): if eval_ctx is None: if context is not None: eval_ctx = context.eval_ctx else: eval_ctx = EvalContext(self) args.insert(0, eval_ctx) elif getattr(func, 'environmentfilter', False): args.insert(0, self) return func(*args, **(kwargs or {})) def call_test(self, name, value, args=None, kwargs=None): """Invokes a test on a value the same way the compiler does it. .. versionadded:: 2.7 """ func = self.tests.get(name) if func is None: raise TemplateRuntimeError('no test named %r' % name) return func(value, *(args or ()), **(kwargs or {})) @internalcode def parse(self, source, name=None, filename=None): """Parse the sourcecode and return the abstract syntax tree. This tree of nodes is used by the compiler to convert the template into executable source- or bytecode. This is useful for debugging or to extract information from templates. If you are :ref:`developing Jinja2 extensions <writing-extensions>` this gives you a good overview of the node tree generated. """ try: return self._parse(source, name, filename) except TemplateSyntaxError: exc_info = sys.exc_info() self.handle_exception(exc_info, source_hint=source) def _parse(self, source, name, filename): """Internal parsing function used by `parse` and `compile`.""" return Parser(self, source, name, encode_filename(filename)).parse() def lex(self, source, name=None, filename=None): """Lex the given sourcecode and return a generator that yields tokens as tuples in the form ``(lineno, token_type, value)``. This can be useful for :ref:`extension development <writing-extensions>` and debugging templates. This does not perform preprocessing. If you want the preprocessing of the extensions to be applied you have to filter source through the :meth:`preprocess` method. """ source = text_type(source) try: return self.lexer.tokeniter(source, name, filename) except TemplateSyntaxError: exc_info = sys.exc_info() self.handle_exception(exc_info, source_hint=source) def preprocess(self, source, name=None, filename=None): """Preprocesses the source with all extensions. This is automatically called for all parsing and compiling methods but *not* for :meth:`lex` because there you usually only want the actual source tokenized. """ return reduce(lambda s, e: e.preprocess(s, name, filename), self.iter_extensions(), text_type(source)) def _tokenize(self, source, name, filename=None, state=None): """Called by the parser to do the preprocessing and filtering for all the extensions. Returns a :class:`~jinja2.lexer.TokenStream`. """ source = self.preprocess(source, name, filename) stream = self.lexer.tokenize(source, name, filename, state) for ext in self.iter_extensions(): stream = ext.filter_stream(stream) if not isinstance(stream, TokenStream): stream = TokenStream(stream, name, filename) return stream def _generate(self, source, name, filename, defer_init=False): """Internal hook that can be overridden to hook a different generate method in. .. versionadded:: 2.5 """ return generate(source, self, name, filename, defer_init=defer_init) def _compile(self, source, filename): """Internal hook that can be overridden to hook a different compile method in. .. versionadded:: 2.5 """ return compile(source, filename, 'exec') @internalcode def compile(self, source, name=None, filename=None, raw=False, defer_init=False): """Compile a node or template source code. The `name` parameter is the load name of the template after it was joined using :meth:`join_path` if necessary, not the filename on the file system. the `filename` parameter is the estimated filename of the template on the file system. If the template came from a database or memory this can be omitted. The return value of this method is a python code object. If the `raw` parameter is `True` the return value will be a string with python code equivalent to the bytecode returned otherwise. This method is mainly used internally. `defer_init` is use internally to aid the module code generator. This causes the generated code to be able to import without the global environment variable to be set. .. versionadded:: 2.4 `defer_init` parameter added. """ source_hint = None try: if isinstance(source, string_types): source_hint = source source = self._parse(source, name, filename) if self.optimized: source = optimize(source, self) source = self._generate(source, name, filename, defer_init=defer_init) if raw: return source if filename is None: filename = '<template>' else: filename = encode_filename(filename) return self._compile(source, filename) except TemplateSyntaxError: exc_info = sys.exc_info() self.handle_exception(exc_info, source_hint=source_hint) def compile_expression(self, source, undefined_to_none=True): """A handy helper method that returns a callable that accepts keyword arguments that appear as variables in the expression. If called it returns the result of the expression. This is useful if applications want to use the same rules as Jinja in template "configuration files" or similar situations. Example usage: >>> env = Environment() >>> expr = env.compile_expression('foo == 42') >>> expr(foo=23) False >>> expr(foo=42) True Per default the return value is converted to `None` if the expression returns an undefined value. This can be changed by setting `undefined_to_none` to `False`. >>> env.compile_expression('var')() is None True >>> env.compile_expression('var', undefined_to_none=False)() Undefined .. versionadded:: 2.1 """ parser = Parser(self, source, state='variable') exc_info = None try: expr = parser.parse_expression() if not parser.stream.eos: raise TemplateSyntaxError('chunk after expression', parser.stream.current.lineno, None, None) expr.set_environment(self) except TemplateSyntaxError: exc_info = sys.exc_info() if exc_info is not None: self.handle_exception(exc_info, source_hint=source) body = [nodes.Assign(nodes.Name('result', 'store'), expr, lineno=1)] template = self.from_string(nodes.Template(body, lineno=1)) return TemplateExpression(template, undefined_to_none) def compile_templates(self, target, extensions=None, filter_func=None, zip='deflated', log_function=None, ignore_errors=True, py_compile=False): """Finds all the templates the loader can find, compiles them and stores them in `target`. If `zip` is `None`, instead of in a zipfile, the templates will be stored in a directory. By default a deflate zip algorithm is used. To switch to the stored algorithm, `zip` can be set to ``'stored'``. `extensions` and `filter_func` are passed to :meth:`list_templates`. Each template returned will be compiled to the target folder or zipfile. By default template compilation errors are ignored. In case a log function is provided, errors are logged. If you want template syntax errors to abort the compilation you can set `ignore_errors` to `False` and you will get an exception on syntax errors. If `py_compile` is set to `True` .pyc files will be written to the target instead of standard .py files. This flag does not do anything on pypy and Python 3 where pyc files are not picked up by itself and don't give much benefit. .. versionadded:: 2.4 """ from jinja2.loaders import ModuleLoader if log_function is None: log_function = lambda x: None if py_compile: if not PY2 or PYPY: from warnings import warn warn(Warning('py_compile has no effect on pypy or Python 3')) py_compile = False else: import imp import marshal py_header = imp.get_magic() + \ u'\xff\xff\xff\xff'.encode('iso-8859-15') # Python 3.3 added a source filesize to the header if sys.version_info >= (3, 3): py_header += u'\x00\x00\x00\x00'.encode('iso-8859-15') def write_file(filename, data, mode): if zip: info = ZipInfo(filename) info.external_attr = 0o755 << 16 zip_file.writestr(info, data) else: f = open(os.path.join(target, filename), mode) try: f.write(data) finally: f.close() if zip is not None: from zipfile import ZipFile, ZipInfo, ZIP_DEFLATED, ZIP_STORED zip_file = ZipFile(target, 'w', dict(deflated=ZIP_DEFLATED, stored=ZIP_STORED)[zip]) log_function('Compiling into Zip archive "%s"' % target) else: if not os.path.isdir(target): os.makedirs(target) log_function('Compiling into folder "%s"' % target) try: for name in self.list_templates(extensions, filter_func): source, filename, _ = self.loader.get_source(self, name) try: code = self.compile(source, name, filename, True, True) except TemplateSyntaxError as e: if not ignore_errors: raise log_function('Could not compile "%s": %s' % (name, e)) continue filename = ModuleLoader.get_module_filename(name) if py_compile: c = self._compile(code, encode_filename(filename)) write_file(filename + 'c', py_header + marshal.dumps(c), 'wb') log_function('Byte-compiled "%s" as %s' % (name, filename + 'c')) else: write_file(filename, code, 'w') log_function('Compiled "%s" as %s' % (name, filename)) finally: if zip: zip_file.close() log_function('Finished compiling templates') def list_templates(self, extensions=None, filter_func=None): """Returns a list of templates for this environment. This requires that the loader supports the loader's :meth:`~BaseLoader.list_templates` method. If there are other files in the template folder besides the actual templates, the returned list can be filtered. There are two ways: either `extensions` is set to a list of file extensions for templates, or a `filter_func` can be provided which is a callable that is passed a template name and should return `True` if it should end up in the result list. If the loader does not support that, a :exc:`TypeError` is raised. .. versionadded:: 2.4 """ x = self.loader.list_templates() if extensions is not None: if filter_func is not None: raise TypeError('either extensions or filter_func ' 'can be passed, but not both') filter_func = lambda x: '.' in x and \ x.rsplit('.', 1)[1] in extensions if filter_func is not None: x = list(ifilter(filter_func, x)) return x def handle_exception(self, exc_info=None, rendered=False, source_hint=None): """Exception handling helper. This is used internally to either raise rewritten exceptions or return a rendered traceback for the template. """ global _make_traceback if exc_info is None: exc_info = sys.exc_info() # the debugging module is imported when it's used for the first time. # we're doing a lot of stuff there and for applications that do not # get any exceptions in template rendering there is no need to load # all of that. if _make_traceback is None: from jinja2.debug import make_traceback as _make_traceback traceback = _make_traceback(exc_info, source_hint) if rendered and self.exception_formatter is not None: return self.exception_formatter(traceback) if self.exception_handler is not None: self.exception_handler(traceback) exc_type, exc_value, tb = traceback.standard_exc_info reraise(exc_type, exc_value, tb) def join_path(self, template, parent): """Join a template with the parent. By default all the lookups are relative to the loader root so this method returns the `template` parameter unchanged, but if the paths should be relative to the parent template, this function can be used to calculate the real template name. Subclasses may override this method and implement template path joining here. """ return template @internalcode def _load_template(self, name, globals): if self.loader is None: raise TypeError('no loader for this environment specified') try: # use abs path for cache key cache_key = self.loader.get_source(self, name)[1] except RuntimeError: # if loader does not implement get_source() cache_key = None # if template is not file, use name for cache key if cache_key is None: cache_key = name if self.cache is not None: template = self.cache.get(cache_key) if template is not None and (not self.auto_reload or template.is_up_to_date): return template template = self.loader.load(self, name, globals) if self.cache is not None: self.cache[cache_key] = template return template @internalcode def get_template(self, name, parent=None, globals=None): """Load a template from the loader. If a loader is configured this method ask the loader for the template and returns a :class:`Template`. If the `parent` parameter is not `None`, :meth:`join_path` is called to get the real template name before loading. The `globals` parameter can be used to provide template wide globals. These variables are available in the context at render time. If the template does not exist a :exc:`TemplateNotFound` exception is raised. .. versionchanged:: 2.4 If `name` is a :class:`Template` object it is returned from the function unchanged. """ if isinstance(name, Template): return name if parent is not None: name = self.join_path(name, parent) return self._load_template(name, self.make_globals(globals)) @internalcode def select_template(self, names, parent=None, globals=None): """Works like :meth:`get_template` but tries a number of templates before it fails. If it cannot find any of the templates, it will raise a :exc:`TemplatesNotFound` exception. .. versionadded:: 2.3 .. versionchanged:: 2.4 If `names` contains a :class:`Template` object it is returned from the function unchanged. """ if not names: raise TemplatesNotFound(message=u'Tried to select from an empty list ' u'of templates.') globals = self.make_globals(globals) for name in names: if isinstance(name, Template): return name if parent is not None: name = self.join_path(name, parent) try: return self._load_template(name, globals) except TemplateNotFound: pass raise TemplatesNotFound(names) @internalcode def get_or_select_template(self, template_name_or_list, parent=None, globals=None): """Does a typecheck and dispatches to :meth:`select_template` if an iterable of template names is given, otherwise to :meth:`get_template`. .. versionadded:: 2.3 """ if isinstance(template_name_or_list, string_types): return self.get_template(template_name_or_list, parent, globals) elif isinstance(template_name_or_list, Template): return template_name_or_list return self.select_template(template_name_or_list, parent, globals) def from_string(self, source, globals=None, template_class=None): """Load a template from a string. This parses the source given and returns a :class:`Template` object. """ globals = self.make_globals(globals) cls = template_class or self.template_class return cls.from_code(self, self.compile(source), globals, None) def make_globals(self, d): """Return a dict for the globals.""" if not d: return self.globals return dict(self.globals, **d) class Template(object): """The central template object. This class represents a compiled template and is used to evaluate it. Normally the template object is generated from an :class:`Environment` but it also has a constructor that makes it possible to create a template instance directly using the constructor. It takes the same arguments as the environment constructor but it's not possible to specify a loader. Every template object has a few methods and members that are guaranteed to exist. However it's important that a template object should be considered immutable. Modifications on the object are not supported. Template objects created from the constructor rather than an environment do have an `environment` attribute that points to a temporary environment that is probably shared with other templates created with the constructor and compatible settings. >>> template = Template('Hello {{ name }}!') >>> template.render(name='John Doe') == u'Hello John Doe!' True >>> stream = template.stream(name='John Doe') >>> next(stream) == u'Hello John Doe!' True >>> next(stream) Traceback (most recent call last): ... StopIteration """ def __new__(cls, source, block_start_string=BLOCK_START_STRING, block_end_string=BLOCK_END_STRING, variable_start_string=VARIABLE_START_STRING, variable_end_string=VARIABLE_END_STRING, comment_start_string=COMMENT_START_STRING, comment_end_string=COMMENT_END_STRING, line_statement_prefix=LINE_STATEMENT_PREFIX, line_comment_prefix=LINE_COMMENT_PREFIX, trim_blocks=TRIM_BLOCKS, lstrip_blocks=LSTRIP_BLOCKS, newline_sequence=NEWLINE_SEQUENCE, keep_trailing_newline=KEEP_TRAILING_NEWLINE, extensions=(), optimized=True, undefined=Undefined, finalize=None, autoescape=False): env = get_spontaneous_environment( block_start_string, block_end_string, variable_start_string, variable_end_string, comment_start_string, comment_end_string, line_statement_prefix, line_comment_prefix, trim_blocks, lstrip_blocks, newline_sequence, keep_trailing_newline, frozenset(extensions), optimized, undefined, finalize, autoescape, None, 0, False, None) return env.from_string(source, template_class=cls) @classmethod def from_code(cls, environment, code, globals, uptodate=None): """Creates a template object from compiled code and the globals. This is used by the loaders and environment to create a template object. """ namespace = { 'environment': environment, '__file__': code.co_filename } exec(code, namespace) rv = cls._from_namespace(environment, namespace, globals) rv._uptodate = uptodate return rv @classmethod def from_module_dict(cls, environment, module_dict, globals): """Creates a template object from a module. This is used by the module loader to create a template object. .. versionadded:: 2.4 """ return cls._from_namespace(environment, module_dict, globals) @classmethod def _from_namespace(cls, environment, namespace, globals): t = object.__new__(cls) t.environment = environment t.globals = globals t.name = namespace['name'] t.filename = namespace['__file__'] t.blocks = namespace['blocks'] # render function and module t.root_render_func = namespace['root'] t._module = None # debug and loader helpers t._debug_info = namespace['debug_info'] t._uptodate = None # store the reference namespace['environment'] = environment namespace['__jinja_template__'] = t return t def render(self, *args, **kwargs): """This method accepts the same arguments as the `dict` constructor: A dict, a dict subclass or some keyword arguments. If no arguments are given the context will be empty. These two calls do the same:: template.render(knights='that say nih') template.render({'knights': 'that say nih'}) This will return the rendered template as unicode string. """ vars = dict(*args, **kwargs) try: return concat(self.root_render_func(self.new_context(vars))) except Exception: exc_info = sys.exc_info() return self.environment.handle_exception(exc_info, True) def stream(self, *args, **kwargs): """Works exactly like :meth:`generate` but returns a :class:`TemplateStream`. """ return TemplateStream(self.generate(*args, **kwargs)) def generate(self, *args, **kwargs): """For very large templates it can be useful to not render the whole template at once but evaluate each statement after another and yield piece for piece. This method basically does exactly that and returns a generator that yields one item after another as unicode strings. It accepts the same arguments as :meth:`render`. """ vars = dict(*args, **kwargs) try: for event in self.root_render_func(self.new_context(vars)): yield event except Exception: exc_info = sys.exc_info() else: return yield self.environment.handle_exception(exc_info, True) def new_context(self, vars=None, shared=False, locals=None): """Create a new :class:`Context` for this template. The vars provided will be passed to the template. Per default the globals are added to the context. If shared is set to `True` the data is passed as it to the context without adding the globals. `locals` can be a dict of local variables for internal usage. """ return new_context(self.environment, self.name, self.blocks, vars, shared, self.globals, locals) def make_module(self, vars=None, shared=False, locals=None): """This method works like the :attr:`module` attribute when called without arguments but it will evaluate the template on every call rather than caching it. It's also possible to provide a dict which is then used as context. The arguments are the same as for the :meth:`new_context` method. """ return TemplateModule(self, self.new_context(vars, shared, locals)) @property def module(self): """The template as module. This is used for imports in the template runtime but is also useful if one wants to access exported template variables from the Python layer: >>> t = Template('{% macro foo() %}42{% endmacro %}23') >>> str(t.module) '23' >>> t.module.foo() == u'42' True """ if self._module is not None: return self._module self._module = rv = self.make_module() return rv def get_corresponding_lineno(self, lineno): """Return the source line number of a line number in the generated bytecode as they are not in sync. """ for template_line, code_line in reversed(self.debug_info): if code_line <= lineno: return template_line return 1 @property def is_up_to_date(self): """If this variable is `False` there is a newer version available.""" if self._uptodate is None: return True return self._uptodate() @property def debug_info(self): """The debug info mapping.""" return [tuple(imap(int, x.split('='))) for x in self._debug_info.split('&')] def __repr__(self): if self.name is None: name = 'memory:%x' % id(self) else: name = repr(self.name) return '<%s %s>' % (self.__class__.__name__, name) @implements_to_string class TemplateModule(object): """Represents an imported template. All the exported names of the template are available as attributes on this object. Additionally converting it into an unicode- or bytestrings renders the contents. """ def __init__(self, template, context): self._body_stream = list(template.root_render_func(context)) self.__dict__.update(context.get_exported()) self.__name__ = template.name def __html__(self): return Markup(concat(self._body_stream)) def __str__(self): return concat(self._body_stream) def __repr__(self): if self.__name__ is None: name = 'memory:%x' % id(self) else: name = repr(self.__name__) return '<%s %s>' % (self.__class__.__name__, name) class TemplateExpression(object): """The :meth:`jinja2.Environment.compile_expression` method returns an instance of this object. It encapsulates the expression-like access to the template with an expression it wraps. """ def __init__(self, template, undefined_to_none): self._template = template self._undefined_to_none = undefined_to_none def __call__(self, *args, **kwargs): context = self._template.new_context(dict(*args, **kwargs)) consume(self._template.root_render_func(context)) rv = context.vars['result'] if self._undefined_to_none and isinstance(rv, Undefined): rv = None return rv @implements_iterator class TemplateStream(object): """A template stream works pretty much like an ordinary python generator but it can buffer multiple items to reduce the number of total iterations. Per default the output is unbuffered which means that for every unbuffered instruction in the template one unicode string is yielded. If buffering is enabled with a buffer size of 5, five items are combined into a new unicode string. This is mainly useful if you are streaming big templates to a client via WSGI which flushes after each iteration. """ def __init__(self, gen): self._gen = gen self.disable_buffering() def dump(self, fp, encoding=None, errors='strict'): """Dump the complete stream into a file or file-like object. Per default unicode strings are written, if you want to encode before writing specify an `encoding`. Example usage:: Template('Hello {{ name }}!').stream(name='foo').dump('hello.html') """ close = False if isinstance(fp, string_types): if encoding is None: encoding = 'utf-8' fp = open(fp, 'wb') close = True try: if encoding is not None: iterable = (x.encode(encoding, errors) for x in self) else: iterable = self if hasattr(fp, 'writelines'): fp.writelines(iterable) else: for item in iterable: fp.write(item) finally: if close: fp.close() def disable_buffering(self): """Disable the output buffering.""" self._next = get_next(self._gen) self.buffered = False def enable_buffering(self, size=5): """Enable buffering. Buffer `size` items before yielding them.""" if size <= 1: raise ValueError('buffer size too small') def generator(next): buf = [] c_size = 0 push = buf.append while 1: try: while c_size < size: c = next() push(c) if c: c_size += 1 except StopIteration: if not c_size: return yield concat(buf) del buf[:] c_size = 0 self.buffered = True self._next = get_next(generator(get_next(self._gen))) def __iter__(self): return self def __next__(self): return self._next() # hook in default template class. if anyone reads this comment: ignore that # it's possible to use custom templates ;-) Environment.template_class = Template
gpl-3.0
HarllanAndrye/nilmtk
nilmtk/elecmeter.py
5
30305
from __future__ import print_function, division from warnings import warn from collections import namedtuple from copy import deepcopy from itertools import izip import numpy as np import pandas as pd import matplotlib.pyplot as plt import random from .preprocessing import Clip from .stats import TotalEnergy, GoodSections, DropoutRate from .stats.totalenergyresults import TotalEnergyResults from .hashable import Hashable from .appliance import Appliance from .datastore import Key from .measurement import (select_best_ac_type, AC_TYPES, PHYSICAL_QUANTITIES, PHYSICAL_QUANTITIES_WITH_AC_TYPES, check_ac_type, check_physical_quantity) from .node import Node from .electric import Electric from .timeframe import TimeFrame, list_of_timeframe_dicts from nilmtk.exceptions import MeasurementError from .utils import flatten_2d_list, capitalise_first_letter from nilmtk.timeframegroup import TimeFrameGroup import nilmtk ElecMeterID = namedtuple('ElecMeterID', ['instance', 'building', 'dataset']) class ElecMeter(Hashable, Electric): """Represents a physical electricity meter. Attributes ---------- appliances : list of Appliance objects connected immediately downstream of this meter. Will be [] if no appliances are connected directly to this meter. store : nilmtk.DataStore key : string key into nilmtk.DataStore to access data. metadata : dict. See http://nilm-metadata.readthedocs.org/en/latest/dataset_metadata.html#elecmeter STATIC ATTRIBUTES ----------------- meter_devices : dict, static class attribute See http://nilm-metadata.readthedocs.org/en/latest/dataset_metadata.html#meterdevice """ meter_devices = {} def __init__(self, store=None, metadata=None, meter_id=None): # Store and check parameters self.appliances = [] self.metadata = {} if metadata is None else metadata assert isinstance(self.metadata, dict) self.store = store self.identifier = meter_id # Insert self into nilmtk.global_meter_group if self.identifier is not None: assert isinstance(self.identifier, ElecMeterID) if self not in nilmtk.global_meter_group.meters: nilmtk.global_meter_group.meters.append(self) @property def key(self): return self.metadata['data_location'] def instance(self): return self._identifier_attr('instance') def building(self): return self._identifier_attr('building') def dataset(self): return self._identifier_attr('dataset') @property def name(self): return self.metadata.get('name') @name.setter def name(self, value): self.metadata['name'] = value def _identifier_attr(self, attr): if self.identifier is None: return else: return getattr(self.identifier, attr) def get_timeframe(self): self._check_store() return self.store.get_timeframe(key=self.key) def _check_store(self): if self.store is None: raise RuntimeError("ElecMeter needs `store` attribute set to an" " instance of a `nilmtk.DataStore` subclass") def upstream_meter(self, raise_warning=True): """ Returns ------- ElecMeterID of upstream meter or None if is site meter. """ if self.is_site_meter(): if raise_warning: warn("There is no meter upstream of this meter '{}' because" " it is a site meter.".format(self.identifier)) return submeter_of = self.metadata.get('submeter_of') # Sanity checks if submeter_of is None: raise ValueError( "This meter has no 'submeter_of' metadata attribute.") if submeter_of < 0: raise ValueError("'submeter_of' must be >= 0.") upstream_meter_in_building = self.metadata.get( 'upstream_meter_in_building') if (upstream_meter_in_building is not None and upstream_meter_in_building != self.identifier.building): raise NotImplementedError( "'upstream_meter_in_building' not implemented yet.") id_of_upstream = ElecMeterID(instance=submeter_of, building=self.identifier.building, dataset=self.identifier.dataset) upstream_meter = nilmtk.global_meter_group[id_of_upstream] if upstream_meter is None: warn("No upstream meter found for '{}'.".format(self.identifier)) return upstream_meter @classmethod def load_meter_devices(cls, store): dataset_metadata = store.load_metadata('/') ElecMeter.meter_devices.update( dataset_metadata.get('meter_devices', {})) def save(self, destination, key): """ Convert all relevant attributes to a dict to be saved as metadata in destination at location specified by key """ # destination.write_metadata(key, self.metadata) # then save data raise NotImplementedError @property def device(self): """ Returns ------- dict describing the MeterDevice for this meter (sample period etc). """ device_model = self.metadata.get('device_model') if device_model: return deepcopy(ElecMeter.meter_devices[device_model]) else: return {} def sample_period(self): device = self.device if device: return device['sample_period'] def is_site_meter(self): return self.metadata.get('site_meter', False) def dominant_appliance(self): """Tries to find the most dominant appliance on this meter, and then returns that appliance object. Will return None if there are no appliances on this meter. """ n_appliances = len(self.appliances) if n_appliances == 0: return elif n_appliances == 1: return self.appliances[0] else: for app in self.appliances: if app.metadata.get('dominant_appliance'): return app warn('Multiple appliances are associated with meter {}' ' but none are marked as the dominant appliance. Hence' ' returning the first appliance in the list.', RuntimeWarning) return self.appliances[0] def label(self, pretty=True): """Returns a string describing this meter. Parameters ---------- pretty : boolean If True then just return the type name of the dominant appliance (without the instance number) or metadata['name'], with the first letter capitalised. Returns ------- string : A label listing all the appliance types. """ if pretty: return self._pretty_label() meter_names = [] if self.is_site_meter(): meter_names.append('SITE METER') elif "name" in self.metadata: meter_names.append(self.metadata["name"]) else: for appliance in self.appliances: appliance_name = appliance.label() if appliance.metadata.get('dominant_appliance'): appliance_name = appliance_name.upper() meter_names.append(appliance_name) label = ", ".join(meter_names) return label def _pretty_label(self): name = self.metadata.get("name") if name: label = name elif self.is_site_meter(): label = 'Site meter' elif self.dominant_appliance() is not None: label = self.dominant_appliance().identifier.type else: meter_names = [] for appliance in self.appliances: appliance_name = appliance.label() if appliance.metadata.get('dominant_appliance'): appliance_name = appliance_name.upper() meter_names.append(appliance_name) label = ", ".join(meter_names) return label label = capitalise_first_letter(label) return label def available_ac_types(self, physical_quantity): """Finds available alternating current types for a specific physical quantity. Parameters ---------- physical_quantity : str or list of strings Returns ------- list of strings e.g. ['apparent', 'active'] """ if isinstance(physical_quantity, list): ac_types = [self.available_ac_types(pq) for pq in physical_quantity] return list(set(flatten_2d_list(ac_types))) if physical_quantity not in PHYSICAL_QUANTITIES: raise ValueError("`physical_quantity` must by one of '{}', not '{}'" .format(PHYSICAL_QUANTITIES, physical_quantity)) measurements = self.device['measurements'] return [m['type'] for m in measurements if m['physical_quantity'] == physical_quantity and 'type' in m] def available_physical_quantities(self): """ Returns ------- list of strings e.g. ['power', 'energy'] """ measurements = self.device['measurements'] return list(set([m['physical_quantity'] for m in measurements])) def available_columns(self): """ Returns ------- list of 2-tuples of strings e.g. [('power', 'active')] """ measurements = self.device['measurements'] return list(set([(m['physical_quantity'], m.get('type', '')) for m in measurements])) def __repr__(self): string = super(ElecMeter, self).__repr__() # Now add list of appliances... string = string[:-1] # remove last bracket # Site meter if self.metadata.get('site_meter'): string += ', site_meter' # Appliances string += ', appliances={}'.format(self.appliances) # METER ROOM room = self.metadata.get('room') if room: string += ', room={}'.format(room) string += ')' return string def matches(self, key): """ Parameters ---------- key : dict Returns ------- Bool """ if not key: return True if not isinstance(key, dict): raise TypeError() match = True for k, v in key.iteritems(): if hasattr(self.identifier, k): if getattr(self.identifier, k) != v: match = False elif k in self.metadata: if self.metadata[k] != v: match = False elif k in self.device: metadata_value = self.device[k] if (isinstance(metadata_value, list) and not isinstance(v, list)): if v not in metadata_value: match = False elif metadata_value != v: match = False else: raise KeyError("'{}' not a valid key.".format(k)) return match def load(self, **kwargs): """Returns a generator of DataFrames loaded from the DataStore. By default, `load` will load all available columns from the DataStore. Specific columns can be selected in one or two mutually exclusive ways: 1. specify a list of column names using the `cols` parameter. 2. specify a `physical_quantity` and/or an `ac_type` parameter to ask `load` to automatically select columns. If 'resample' is set to 'True' then the default behaviour is for gaps shorter than max_sample_period will be forward filled. Parameters --------------- physical_quantity : string or list of strings e.g. 'power' or 'voltage' or 'energy' or ['power', 'energy']. If a single string then load columns only for that physical quantity. If a list of strings then load columns for all those physical quantities. ac_type : string or list of strings, defaults to None Where 'ac_type' is short for 'alternating current type'. e.g. 'reactive' or 'active' or 'apparent'. If set to None then will load all AC types per physical quantity. If set to 'best' then load the single best AC type per physical quantity. If set to a single AC type then load just that single AC type per physical quantity, else raise an Exception. If set to a list of AC type strings then will load all those AC types and will raise an Exception if any cannot be found. cols : list of tuples, using NILMTK's vocabulary for measurements. e.g. [('power', 'active'), ('voltage', ''), ('energy', 'reactive')] `cols` can't be used if `ac_type` and/or `physical_quantity` are set. sample_period : int, defaults to None Number of seconds to use as the new sample period for resampling. If None then will use self.sample_period() resample : boolean, defaults to False If True then will resample data using `sample_period`. Defaults to True if `sample_period` is not None. resample_kwargs : dict of key word arguments (other than 'rule') to `pass to pd.DataFrame.resample()`. Defaults to set 'limit' to `sample_period / max_sample_period` and sets 'fill_method' to ffill. preprocessing : list of Node subclass instances e.g. [Clip()]. **kwargs : any other key word arguments to pass to `self.store.load()` Returns ------- Always return a generator of DataFrames (even if it only has a single column). Raises ------ nilmtk.exceptions.MeasurementError if a measurement is specified which is not available. """ verbose = kwargs.get('verbose') if verbose: print() print("ElecMeter.load") print(self) if 'sample_period' in kwargs: kwargs['resample'] = True if kwargs.get('resample'): # Set default key word arguments for resampling. resample_kwargs = kwargs.setdefault('resample_kwargs', {}) resample_kwargs.setdefault('fill_method', 'ffill') if 'limit' not in resample_kwargs: sample_period = kwargs.get('sample_period', self.sample_period()) max_number_of_rows_to_ffill = int( np.ceil(self.device['max_sample_period'] / sample_period)) resample_kwargs.update({'limit': max_number_of_rows_to_ffill}) if verbose: print("kwargs after setting resample setting:") print(kwargs) kwargs = self._prep_kwargs_for_sample_period_and_resample(**kwargs) if verbose: print("kwargs after processing") print(kwargs) # Get source node preprocessing = kwargs.pop('preprocessing', []) last_node = self.get_source_node(**kwargs) generator = last_node.generator # Connect together all preprocessing nodes for node in preprocessing: node.upstream = last_node last_node = node generator = last_node.process() return generator def _ac_type_to_columns(self, ac_type): if ac_type is None: return [] if isinstance(ac_type, list): cols2d = [self._ac_type_to_columns(a_t) for a_t in ac_type] return list(set(flatten_2d_list(cols2d))) check_ac_type(ac_type) cols_matching = [col for col in self.available_columns() if col[1] == ac_type] return cols_matching def _physical_quantity_to_columns(self, physical_quantity): if physical_quantity is None: return [] if isinstance(physical_quantity, list): cols2d = [self._physical_quantity_to_columns(p_q) for p_q in physical_quantity] return list(set(flatten_2d_list(cols2d))) check_physical_quantity(physical_quantity) cols_matching = [col for col in self.available_columns() if col[0] == physical_quantity] return cols_matching def _get_columns_with_best_ac_type(self, physical_quantity=None): if physical_quantity is None: physical_quantity = self.available_physical_quantities() if isinstance(physical_quantity, list): columns = set() for pq in physical_quantity: best = self._get_columns_with_best_ac_type(pq) if best: columns.update(best) return list(columns) check_physical_quantity(physical_quantity) available_pqs = self.available_physical_quantities() if physical_quantity not in available_pqs: return [] ac_types = self.available_ac_types(physical_quantity) try: best_ac_type = select_best_ac_type(ac_types) except KeyError: return [] else: return [(physical_quantity, best_ac_type)] def _convert_physical_quantity_and_ac_type_to_cols( self, physical_quantity=None, ac_type=None, cols=None, **kwargs): """Returns kwargs dict with physical_quantity and ac_type removed and cols populated appropriately.""" if cols: if (ac_type or physical_quantity): raise ValueError("Cannot use `ac_type` and/or `physical_quantity`" " with `cols` parameter.") else: if set(cols).issubset(self.available_columns()): kwargs['cols'] = cols return kwargs else: msg = ("'{}' is not a subset of the available columns: '{}'" .format(cols, self.available_columns())) raise MeasurementError(msg) msg = "" if not (ac_type or physical_quantity): cols = self.available_columns() elif ac_type == 'best': cols = self._get_columns_with_best_ac_type(physical_quantity) if not cols: msg += "No AC types for physical quantity {}".format(physical_quantity) else: if ac_type: cols = self._ac_type_to_columns(ac_type) if not cols: msg += "AC type '{}' not available. ".format(ac_type) if physical_quantity: cols_matching_pq = self._physical_quantity_to_columns(physical_quantity) if not cols_matching_pq: msg += ("Physical quantity '{}' not available. " .format(physical_quantity)) if cols: cols = list(set(cols).intersection(cols_matching_pq)) if not cols: msg += ("No measurement matching ({}, {}). " .format(physical_quantity, ac_type)) else: cols = cols_matching_pq if msg: msg += "Available columns = {}. ".format(self.available_columns()) raise MeasurementError(msg) kwargs['cols'] = cols return kwargs def dry_run_metadata(self): return self.metadata def get_metadata(self): return self.metadata def get_source_node(self, **loader_kwargs): if self.store is None: raise RuntimeError( "Cannot get source node if meter.store is None!") loader_kwargs = self._convert_physical_quantity_and_ac_type_to_cols(**loader_kwargs) generator = self.store.load(key=self.key, **loader_kwargs) self.metadata['device'] = self.device return Node(self, generator=generator) def total_energy(self, **loader_kwargs): """ Parameters ---------- full_results : bool, default=False **loader_kwargs : key word arguments for DataStore.load() Returns ------- if `full_results` is True then return TotalEnergyResults object else returns a pd.Series with a row for each AC type. """ nodes = [Clip, TotalEnergy] return self._get_stat_from_cache_or_compute( nodes, TotalEnergy.results_class(), loader_kwargs) def dropout_rate(self, ignore_gaps=True, **loader_kwargs): """ Parameters ---------- ignore_gaps : bool, default=True If True then will only calculate dropout rate for good sections. full_results : bool, default=False **loader_kwargs : key word arguments for DataStore.load() Returns ------- DropoutRateResults object if `full_results` is True, else float """ nodes = [DropoutRate] if ignore_gaps: loader_kwargs['sections'] = self.good_sections(**loader_kwargs) return self._get_stat_from_cache_or_compute( nodes, DropoutRate.results_class(), loader_kwargs) def good_sections(self, **loader_kwargs): """ Parameters ---------- full_results : bool, default=False **loader_kwargs : key word arguments for DataStore.load() Returns ------- if `full_results` is True then return nilmtk.stats.GoodSectionsResults object otherwise return list of TimeFrame objects. """ loader_kwargs.setdefault('n_look_ahead_rows', 10) nodes = [GoodSections] results_obj = GoodSections.results_class(self.device['max_sample_period']) return self._get_stat_from_cache_or_compute( nodes, results_obj, loader_kwargs) def _get_stat_from_cache_or_compute(self, nodes, results_obj, loader_kwargs): """General function for computing statistics and/or loading them from cache. Cached statistics lives in the DataStore at 'building<I>/elec/cache/meter<K>/<statistic_name>' e.g. 'building1/elec/cache/meter1/total_energy'. We store the 'full' statistic... i.e we store a representation of the `Results._data` DataFrame. Some times we need to do some conversion to store `Results._data` on disk. The logic for doing this conversion lives in the `Results` class or subclass. The cache can be cleared by calling `ElecMeter.clear_cache()`. Parameters ---------- nodes : list of nilmtk.Node classes results_obj : instance of nilmtk.Results subclass loader_kwargs : dict Returns ------- if `full_results` is True then return nilmtk.Results subclass instance otherwise return nilmtk.Results.simple(). See Also -------- clear_cache _compute_stat key_for_cached_stat get_cached_stat """ full_results = loader_kwargs.pop('full_results', False) verbose = loader_kwargs.get('verbose') if 'ac_type' in loader_kwargs or 'physical_quantity' in loader_kwargs: loader_kwargs = self._convert_physical_quantity_and_ac_type_to_cols(**loader_kwargs) cols = loader_kwargs.get('cols', []) ac_types = set([m[1] for m in cols if m[1]]) results_obj_copy = deepcopy(results_obj) # Prepare `sections` list sections = loader_kwargs.get('sections') if sections is None: tf = self.get_timeframe() tf.include_end = True sections = [tf] sections = TimeFrameGroup(sections) sections = [s for s in sections if not s.empty] # Retrieve usable stats from cache key_for_cached_stat = self.key_for_cached_stat(results_obj.name) if loader_kwargs.get('preprocessing') is None: cached_stat = self.get_cached_stat(key_for_cached_stat) results_obj.import_from_cache(cached_stat, sections) def find_sections_to_compute(): # Get sections_to_compute results_obj_timeframes = results_obj.timeframes() sections_to_compute = set(sections) - set(results_obj_timeframes) sections_to_compute = list(sections_to_compute) sections_to_compute.sort() return sections_to_compute try: ac_type_keys = results_obj.simple().keys() except: sections_to_compute = find_sections_to_compute() else: if ac_types.issubset(ac_type_keys): sections_to_compute = find_sections_to_compute() else: sections_to_compute = sections results_obj = results_obj_copy else: sections_to_compute = sections if verbose and not results_obj._data.empty: print("Using cached result.") # If we get to here then we have to compute some stats if sections_to_compute: loader_kwargs['sections'] = sections_to_compute computed_result = self._compute_stat(nodes, loader_kwargs) # Merge cached results with newly computed results_obj.update(computed_result.results) # Save to disk newly computed stats stat_for_store = computed_result.results.export_to_cache() try: self.store.append(key_for_cached_stat, stat_for_store) except ValueError: # the old table probably had different columns self.store.remove(key_for_cached_stat) self.store.put(key_for_cached_stat, results_obj.export_to_cache()) if full_results: return results_obj else: res = results_obj.simple() if ac_types: try: ac_type_keys = res.keys() except: return res else: return pd.Series(res[ac_types], index=ac_types) else: return res def _compute_stat(self, nodes, loader_kwargs): """ Parameters ---------- nodes : list of nilmtk.Node subclass objects loader_kwargs : dict Returns ------- Node subclass object See Also -------- clear_cache _get_stat_from_cache_or_compute key_for_cached_stat get_cached_stat """ results = self.get_source_node(**loader_kwargs) for node in nodes: results = node(results) results.run() return results def key_for_cached_stat(self, stat_name): """ Parameters ---------- stat_name : str Returns ------- key : str See Also -------- clear_cache _compute_stat _get_stat_from_cache_or_compute get_cached_stat """ if isinstance(self.instance(), tuple): meter_str = "_".join([str(i) for i in (self.instance())]) else: meter_str = "{:d}".format(self.instance()) return ("building{:d}/elec/cache/meter{}/{:s}" .format(self.building(), meter_str, stat_name)) def clear_cache(self, verbose=False): """ See Also -------- _compute_stat _get_stat_from_cache_or_compute key_for_cached_stat get_cached_stat """ if self.store is not None: key_for_cache = self.key_for_cached_stat('') try: self.store.remove(key_for_cache) except KeyError: if verbose: print("No existing cache for", key_for_cache) else: print("Removed", key_for_cache) def get_cached_stat(self, key_for_stat): """ Parameters ---------- key_for_stat : str Returns ------- pd.DataFrame See Also -------- _compute_stat _get_stat_from_cache_or_compute key_for_cached_stat clear_cache """ if self.store is None: return pd.DataFrame() try: stat_from_cache = self.store[key_for_stat] except KeyError: return pd.DataFrame() else: return pd.DataFrame() if stat_from_cache is None else stat_from_cache # def total_on_duration(self): # """Return timedelta""" # raise NotImplementedError # def on_durations(self): # raise NotImplementedError # def activity_distribution(self, bin_size, timespan): # raise NotImplementedError # def on_off_events(self): # use self.metadata.minimum_[off|on]_duration # raise NotImplementedError # def discrete_appliance_activations(self): # """ # Return a Mask defining the start and end times of each appliance # activation. # """ # raise NotImplementedError # def contiguous_sections(self): # """retuns Mask object""" # raise NotImplementedError # def clean_and_export(self, destination_datastore): # """Apply all cleaning configured in meter.cleaning and then export. Also identifies # and records the locations of gaps. Also records metadata about exactly which # cleaning steps have been executed and some summary results (e.g. the number of # implausible values removed)""" # raise NotImplementedError
apache-2.0
Akylas/zxing
cpp/scons/scons-local-2.0.0.final.0/SCons/Scanner/LaTeX.py
34
15021
"""SCons.Scanner.LaTeX This module implements the dependency scanner for LaTeX code. """ # # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "src/engine/SCons/Scanner/LaTeX.py 5023 2010/06/14 22:05:46 scons" import os.path import re import SCons.Scanner import SCons.Util # list of graphics file extensions for TeX and LaTeX TexGraphics = ['.eps', '.ps'] LatexGraphics = ['.pdf', '.png', '.jpg', '.gif', '.tif'] # Used as a return value of modify_env_var if the variable is not set. class _Null(object): pass _null = _Null # The user specifies the paths in env[variable], similar to other builders. # They may be relative and must be converted to absolute, as expected # by LaTeX and Co. The environment may already have some paths in # env['ENV'][var]. These paths are honored, but the env[var] paths have # higher precedence. All changes are un-done on exit. def modify_env_var(env, var, abspath): try: save = env['ENV'][var] except KeyError: save = _null env.PrependENVPath(var, abspath) try: if SCons.Util.is_List(env[var]): env.PrependENVPath(var, [os.path.abspath(str(p)) for p in env[var]]) else: # Split at os.pathsep to convert into absolute path env.PrependENVPath(var, [os.path.abspath(p) for p in str(env[var]).split(os.pathsep)]) except KeyError: pass # Convert into a string explicitly to append ":" (without which it won't search system # paths as well). The problem is that env.AppendENVPath(var, ":") # does not work, refuses to append ":" (os.pathsep). if SCons.Util.is_List(env['ENV'][var]): env['ENV'][var] = os.pathsep.join(env['ENV'][var]) # Append the trailing os.pathsep character here to catch the case with no env[var] env['ENV'][var] = env['ENV'][var] + os.pathsep return save class FindENVPathDirs(object): """A class to bind a specific *PATH variable name to a function that will return all of the *path directories.""" def __init__(self, variable): self.variable = variable def __call__(self, env, dir=None, target=None, source=None, argument=None): import SCons.PathList try: path = env['ENV'][self.variable] except KeyError: return () dir = dir or env.fs._cwd path = SCons.PathList.PathList(path).subst_path(env, target, source) return tuple(dir.Rfindalldirs(path)) def LaTeXScanner(): """Return a prototype Scanner instance for scanning LaTeX source files when built with latex. """ ds = LaTeX(name = "LaTeXScanner", suffixes = '$LATEXSUFFIXES', # in the search order, see below in LaTeX class docstring graphics_extensions = TexGraphics, recursive = 0) return ds def PDFLaTeXScanner(): """Return a prototype Scanner instance for scanning LaTeX source files when built with pdflatex. """ ds = LaTeX(name = "PDFLaTeXScanner", suffixes = '$LATEXSUFFIXES', # in the search order, see below in LaTeX class docstring graphics_extensions = LatexGraphics, recursive = 0) return ds class LaTeX(SCons.Scanner.Base): """Class for scanning LaTeX files for included files. Unlike most scanners, which use regular expressions that just return the included file name, this returns a tuple consisting of the keyword for the inclusion ("include", "includegraphics", "input", or "bibliography"), and then the file name itself. Based on a quick look at LaTeX documentation, it seems that we should append .tex suffix for the "include" keywords, append .tex if there is no extension for the "input" keyword, and need to add .bib for the "bibliography" keyword that does not accept extensions by itself. Finally, if there is no extension for an "includegraphics" keyword latex will append .ps or .eps to find the file, while pdftex may use .pdf, .jpg, .tif, .mps, or .png. The actual subset and search order may be altered by DeclareGraphicsExtensions command. This complication is ignored. The default order corresponds to experimentation with teTeX $ latex --version pdfeTeX 3.141592-1.21a-2.2 (Web2C 7.5.4) kpathsea version 3.5.4 The order is: ['.eps', '.ps'] for latex ['.png', '.pdf', '.jpg', '.tif']. Another difference is that the search path is determined by the type of the file being searched: env['TEXINPUTS'] for "input" and "include" keywords env['TEXINPUTS'] for "includegraphics" keyword env['TEXINPUTS'] for "lstinputlisting" keyword env['BIBINPUTS'] for "bibliography" keyword env['BSTINPUTS'] for "bibliographystyle" keyword FIXME: also look for the class or style in document[class|style]{} FIXME: also look for the argument of bibliographystyle{} """ keyword_paths = {'include': 'TEXINPUTS', 'input': 'TEXINPUTS', 'includegraphics': 'TEXINPUTS', 'bibliography': 'BIBINPUTS', 'bibliographystyle': 'BSTINPUTS', 'usepackage': 'TEXINPUTS', 'lstinputlisting': 'TEXINPUTS'} env_variables = SCons.Util.unique(list(keyword_paths.values())) def __init__(self, name, suffixes, graphics_extensions, *args, **kw): # We have to include \n with the % we exclude from the first part # part of the regex because the expression is compiled with re.M. # Without the \n, the ^ could match the beginning of a *previous* # line followed by one or more newline characters (i.e. blank # lines), interfering with a match on the next line. regex = r'^[^%\n]*\\(include|includegraphics(?:\[[^\]]+\])?|lstinputlisting(?:\[[^\]]+\])?|input|bibliography|usepackage){([^}]*)}' self.cre = re.compile(regex, re.M) self.graphics_extensions = graphics_extensions def _scan(node, env, path=(), self=self): node = node.rfile() if not node.exists(): return [] return self.scan_recurse(node, path) class FindMultiPathDirs(object): """The stock FindPathDirs function has the wrong granularity: it is called once per target, while we need the path that depends on what kind of included files is being searched. This wrapper hides multiple instances of FindPathDirs, one per the LaTeX path variable in the environment. When invoked, the function calculates and returns all the required paths as a dictionary (converted into a tuple to become hashable). Then the scan function converts it back and uses a dictionary of tuples rather than a single tuple of paths. """ def __init__(self, dictionary): self.dictionary = {} for k,n in dictionary.items(): self.dictionary[k] = ( SCons.Scanner.FindPathDirs(n), FindENVPathDirs(n) ) def __call__(self, env, dir=None, target=None, source=None, argument=None): di = {} for k,(c,cENV) in self.dictionary.items(): di[k] = ( c(env, dir=None, target=None, source=None, argument=None) , cENV(env, dir=None, target=None, source=None, argument=None) ) # To prevent "dict is not hashable error" return tuple(di.items()) class LaTeXScanCheck(object): """Skip all but LaTeX source files, i.e., do not scan *.eps, *.pdf, *.jpg, etc. """ def __init__(self, suffixes): self.suffixes = suffixes def __call__(self, node, env): current = not node.has_builder() or node.is_up_to_date() scannable = node.get_suffix() in env.subst_list(self.suffixes)[0] # Returning false means that the file is not scanned. return scannable and current kw['function'] = _scan kw['path_function'] = FindMultiPathDirs(LaTeX.keyword_paths) kw['recursive'] = 0 kw['skeys'] = suffixes kw['scan_check'] = LaTeXScanCheck(suffixes) kw['name'] = name SCons.Scanner.Base.__init__(self, *args, **kw) def _latex_names(self, include): filename = include[1] if include[0] == 'input': base, ext = os.path.splitext( filename ) if ext == "": return [filename + '.tex'] if (include[0] == 'include'): return [filename + '.tex'] if include[0] == 'bibliography': base, ext = os.path.splitext( filename ) if ext == "": return [filename + '.bib'] if include[0] == 'usepackage': base, ext = os.path.splitext( filename ) if ext == "": return [filename + '.sty'] if include[0] == 'includegraphics': base, ext = os.path.splitext( filename ) if ext == "": #return [filename+e for e in self.graphics_extensions + TexGraphics] # use the line above to find dependencies for the PDF builder # when only an .eps figure is present. Since it will be found # if the user tells scons how to make the pdf figure, leave # it out for now. return [filename+e for e in self.graphics_extensions] return [filename] def sort_key(self, include): return SCons.Node.FS._my_normcase(str(include)) def find_include(self, include, source_dir, path): try: sub_path = path[include[0]] except (IndexError, KeyError): sub_path = () try_names = self._latex_names(include) for n in try_names: # see if we find it using the path in env[var] i = SCons.Node.FS.find_file(n, (source_dir,) + sub_path[0]) if i: return i, include # see if we find it using the path in env['ENV'][var] i = SCons.Node.FS.find_file(n, (source_dir,) + sub_path[1]) if i: return i, include return i, include def scan(self, node): # Modify the default scan function to allow for the regular # expression to return a comma separated list of file names # as can be the case with the bibliography keyword. # Cache the includes list in node so we only scan it once: # path_dict = dict(list(path)) noopt_cre = re.compile('\[.*$') if node.includes != None: includes = node.includes else: includes = self.cre.findall(node.get_text_contents()) # 1. Split comma-separated lines, e.g. # ('bibliography', 'phys,comp') # should become two entries # ('bibliography', 'phys') # ('bibliography', 'comp') # 2. Remove the options, e.g., such as # ('includegraphics[clip,width=0.7\\linewidth]', 'picture.eps') # should become # ('includegraphics', 'picture.eps') split_includes = [] for include in includes: inc_type = noopt_cre.sub('', include[0]) inc_list = include[1].split(',') for j in range(len(inc_list)): split_includes.append( (inc_type, inc_list[j]) ) # includes = split_includes node.includes = includes return includes def scan_recurse(self, node, path=()): """ do a recursive scan of the top level target file This lets us search for included files based on the directory of the main file just as latex does""" path_dict = dict(list(path)) queue = [] queue.extend( self.scan(node) ) seen = {} # This is a hand-coded DSU (decorate-sort-undecorate, or # Schwartzian transform) pattern. The sort key is the raw name # of the file as specifed on the \include, \input, etc. line. # TODO: what about the comment in the original Classic scanner: # """which lets # us keep the sort order constant regardless of whether the file # is actually found in a Repository or locally.""" nodes = [] source_dir = node.get_dir() #for include in includes: while queue: include = queue.pop() try: if seen[include[1]] == 1: continue except KeyError: seen[include[1]] = 1 # # Handle multiple filenames in include[1] # n, i = self.find_include(include, source_dir, path_dict) if n is None: # Do not bother with 'usepackage' warnings, as they most # likely refer to system-level files if include[0] != 'usepackage': SCons.Warnings.warn(SCons.Warnings.DependencyWarning, "No dependency generated for file: %s (included from: %s) -- file not found" % (i, node)) else: sortkey = self.sort_key(n) nodes.append((sortkey, n)) # recurse down queue.extend( self.scan(n) ) return [pair[1] for pair in sorted(nodes)] # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
apache-2.0
hongliang5623/sentry
src/sentry/migrations/0138_migrate_team_members.py
30
30381
# -*- coding: utf-8 -*- import datetime from collections import defaultdict from south.db import db from south.v2 import DataMigration from django.db import IntegrityError, models, transaction class Migration(DataMigration): def forwards(self, orm): from sentry.utils.query import ( RangeQuerySetWrapper, RangeQuerySetWrapperWithProgressBar, WithProgressBar ) Organization = orm['sentry.Organization'] OrganizationMember = orm['sentry.OrganizationMember'] PendingTeamMember = orm['sentry.PendingTeamMember'] TeamMember = orm['sentry.TeamMember'] Team = orm['sentry.Team'] teams_by_org = defaultdict(list) for org in RangeQuerySetWrapper(Organization.objects.all()): for team in Team.objects.filter(organization=org): teams_by_org[org].append(team) for org, team_list in WithProgressBar(teams_by_org.items(), caption='Organizations'): team_member_qs = TeamMember.objects.filter( team__organization=org ).select_related('team') members_by_user = defaultdict(list) for member in team_member_qs.iterator(): if member.user_id == member.team.owner_id: continue # team owners are already present members_by_user[member.user_id].append(member) total_teams = len(team_list) for user_id, member_list in members_by_user.iteritems(): # if they were a member of all teams, give them global access has_global_access = len(member_list) == total_teams # give them the highest level access they had access = min(m.type for m in member_list) sid = transaction.savepoint() try: om = OrganizationMember.objects.create( organization=org, user_id=user_id, type=access, has_global_access=has_global_access, ) except IntegrityError: transaction.savepoint_rollback(sid) continue else: transaction.savepoint_commit(sid) if not has_global_access: for member in member_list: om.teams.add(member.team) for pm in PendingTeamMember.objects.filter(team=team): om, _ = OrganizationMember.objects.get_or_create( organization=org, email=pm.email, has_global_access=False, defaults={'type': pm.type}, ) om.teams.add(team) transaction.commit() def backwards(self, orm): pass models = { 'sentry.accessgroup': { 'Meta': {'unique_together': "(('team', 'name'),)", 'object_name': 'AccessGroup'}, 'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}), 'managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.User']", 'symmetrical': 'False'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'symmetrical': 'False'}), 'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"}), 'type': ('django.db.models.fields.IntegerField', [], {'default': '50'}) }, 'sentry.activity': { 'Meta': {'object_name': 'Activity'}, 'data': ('django.db.models.fields.TextField', [], {'null': 'True'}), 'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'event': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Event']", 'null': 'True'}), 'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}), 'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}), 'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}), 'type': ('django.db.models.fields.PositiveIntegerField', [], {}), 'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}) }, 'sentry.alert': { 'Meta': {'object_name': 'Alert'}, 'data': ('django.db.models.fields.TextField', [], {'null': 'True'}), 'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}), 'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}), 'message': ('django.db.models.fields.TextField', [], {}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}), 'related_groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_alerts'", 'symmetrical': 'False', 'through': "orm['sentry.AlertRelatedGroup']", 'to': "orm['sentry.Group']"}), 'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}) }, 'sentry.alertrelatedgroup': { 'Meta': {'unique_together': "(('group', 'alert'),)", 'object_name': 'AlertRelatedGroup'}, 'alert': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Alert']"}), 'data': ('django.db.models.fields.TextField', [], {'null': 'True'}), 'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}), 'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}) }, 'sentry.broadcast': { 'Meta': {'object_name': 'Broadcast'}, 'badge': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}), 'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}), 'message': ('django.db.models.fields.CharField', [], {'max_length': '256'}) }, 'sentry.event': { 'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group', 'datetime'),)"}, 'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}), 'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}), 'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}), 'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}), 'message': ('django.db.models.fields.TextField', [], {}), 'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}), 'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}), 'time_spent': ('django.db.models.fields.IntegerField', [], {'null': 'True'}) }, 'sentry.eventmapping': { 'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'EventMapping'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}), 'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}) }, 'sentry.group': { 'Meta': {'unique_together': "(('project', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"}, 'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}), 'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}), 'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}), 'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}), 'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}), 'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}), 'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}), 'message': ('django.db.models.fields.TextField', [], {}), 'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}), 'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}), 'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}), 'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}), 'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'time_spent_total': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}) }, 'sentry.groupassignee': { 'Meta': {'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'"}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']"}), 'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'to': "orm['sentry.Project']"}), 'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'to': "orm['sentry.User']"}) }, 'sentry.groupbookmark': { 'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'}, 'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}), 'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}), 'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"}) }, 'sentry.grouphash': { 'Meta': {'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash'}, 'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}), 'hash': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}), 'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}) }, 'sentry.groupmeta': { 'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'}, 'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}), 'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'value': ('django.db.models.fields.TextField', [], {}) }, 'sentry.grouprulestatus': { 'Meta': {'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}), 'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}), 'rule': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}), 'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}) }, 'sentry.groupseen': { 'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'}, 'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}), 'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}), 'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}), 'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'}) }, 'sentry.grouptagkey': { 'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'}, 'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}), 'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}), 'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}) }, 'sentry.grouptagvalue': { 'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'"}, 'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}), 'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'to': "orm['sentry.Group']"}), 'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'null': 'True', 'to': "orm['sentry.Project']"}), 'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'value': ('django.db.models.fields.CharField', [], {'max_length': '200'}) }, 'sentry.lostpasswordhash': { 'Meta': {'object_name': 'LostPasswordHash'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}), 'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'}) }, 'sentry.option': { 'Meta': {'object_name': 'Option'}, 'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}), 'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {}) }, 'sentry.organization': { 'Meta': {'object_name': 'Organization'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}), 'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'org_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMember']", 'to': "orm['sentry.User']"}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}), 'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}) }, 'sentry.organizationmember': { 'Meta': {'unique_together': "(('organization', 'user'), ('organization', 'email'))", 'object_name': 'OrganizationMember'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}), 'has_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}), 'organization': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Organization']"}), 'teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False'}), 'type': ('django.db.models.fields.PositiveIntegerField', [], {'default': '50'}), 'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_orgmember_set'", 'null': 'True', 'to': "orm['sentry.User']"}) }, 'sentry.pendingteammember': { 'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}), 'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}), 'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'pending_member_set'", 'to': "orm['sentry.Team']"}), 'type': ('django.db.models.fields.IntegerField', [], {'default': '50'}) }, 'sentry.project': { 'Meta': {'unique_together': "(('team', 'slug'),)", 'object_name': 'Project'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'platform': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}), 'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}), 'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}), 'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"}) }, 'sentry.projectkey': { 'Meta': {'object_name': 'ProjectKey'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}), 'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}), 'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}), 'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}), 'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}), 'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}), 'user_added': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'keys_added_set'", 'null': 'True', 'to': "orm['sentry.User']"}) }, 'sentry.projectoption': { 'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"}, 'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}), 'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {}) }, 'sentry.release': { 'Meta': {'unique_together': "(('project', 'version'),)", 'object_name': 'Release'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}), 'version': ('django.db.models.fields.CharField', [], {'max_length': '64'}) }, 'sentry.rule': { 'Meta': {'object_name': 'Rule'}, 'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}) }, 'sentry.tagkey': { 'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"}, 'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}), 'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}) }, 'sentry.tagvalue': { 'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"}, 'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}), 'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}), 'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'value': ('django.db.models.fields.CharField', [], {'max_length': '200'}) }, 'sentry.team': { 'Meta': {'object_name': 'Team'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}), 'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}), 'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'team_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.TeamMember']", 'to': "orm['sentry.User']"}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'organization': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}), 'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}), 'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}) }, 'sentry.teammember': { 'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}), 'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Team']"}), 'type': ('django.db.models.fields.IntegerField', [], {'default': '50'}), 'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_teammember_set'", 'to': "orm['sentry.User']"}) }, 'sentry.user': { 'Meta': {'object_name': 'User', 'db_table': "'auth_user'"}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}) }, 'sentry.useroption': { 'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'}, 'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}), 'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}), 'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {}) } } complete_apps = ['sentry'] symmetrical = True
bsd-3-clause
ECP-CANDLE/Benchmarks
common/darts/modules/mixed_layer.py
1
1592
import torch import torch.nn as nn import torch.nn.functional as F from darts.api import Model class MixedLayer(Model): """ A mixture of 8 unit types We use weights to aggregate these outputs while training. and softmax to select the strongest edges while inference. """ def __init__(self, c, stride, primitives, ops): super(MixedLayer, self).__init__() self.reset(c, stride, primitives, ops) def reset(self, c, stride, primitives, ops): self.layers = nn.ModuleList() for primitive in primitives: layer = ops[primitive](c, stride, False) if 'pool' in primitive: layer = nn.Sequential(layer, nn.BatchNorm1d(c, affine=False)) self.layers.append(layer) def pad(self, tensors): """ Pad with zeros for mixed layers """ prev = tensors[0] padded = [] for tensor in tensors: if tensor.shape < prev.shape: tensor_pad = F.pad( input=tensor, pad=(1, 1, 1, 1), mode='constant', value=0 ) padded.append(tensor_pad) else: padded.append(tensor) prev = tensor return padded def forward(self, x, weights): """ Parameters ---------- x : torch.tensor Data Weights : torch.tensor alpha, [op_num:8], the output = sum of alpha * op(x) """ x = [w * layer(x) for w, layer in zip(weights, self.layers)] x = self.pad(x) return sum(x)
mit
j00bar/ansible
lib/ansible/plugins/callback/foreman.py
47
7209
# -*- coding: utf-8 -*- # (C) 2015, 2016 Daniel Lobato <elobatocs@gmail.com> # 2016 Guido Günther <agx@sigxcpu.org> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os from datetime import datetime from collections import defaultdict import json import time try: import requests HAS_REQUESTS = True except ImportError: HAS_REQUESTS = False from ansible.plugins.callback import CallbackBase class CallbackModule(CallbackBase): """ This callback will report facts and reports to Foreman https://theforeman.org/ It makes use of the following environment variables: FOREMAN_URL: URL to the Foreman server FOREMAN_SSL_CERT: X509 certificate to authenticate to Foreman if https is used FOREMAN_SSL_KEY: the corresponding private key FOREMAN_SSL_VERIFY: whether to verify the Foreman certificate It can be set to '1' to verify SSL certificates using the installed CAs or to a path pointing to a CA bundle. Set to '0' to disable certificate checking. """ CALLBACK_VERSION = 2.0 CALLBACK_TYPE = 'notification' CALLBACK_NAME = 'foreman' CALLBACK_NEEDS_WHITELIST = True FOREMAN_URL = os.getenv('FOREMAN_URL', "http://localhost:3000") FOREMAN_SSL_CERT = (os.getenv('FOREMAN_SSL_CERT', "/etc/foreman/client_cert.pem"), os.getenv('FOREMAN_SSL_KEY', "/etc/foreman/client_key.pem")) FOREMAN_SSL_VERIFY = os.getenv('FOREMAN_SSL_VERIFY', "1") FOREMAN_HEADERS = { "Content-Type": "application/json", "Accept": "application/json" } TIME_FORMAT = "%Y-%m-%d %H:%M:%S %f" def __init__(self): super(CallbackModule, self).__init__() self.items = defaultdict(list) self.start_time = int(time.time()) if HAS_REQUESTS: requests_major = int(requests.__version__.split('.')[0]) if requests_major >= 2: self.ssl_verify = self._ssl_verify() else: self._disable_plugin('The `requests` python module is too old.') else: self._disable_plugin('The `requests` python module is not installed.') def _disable_plugin(self, msg): self.disabled = True self._display.warning(msg + ' Disabling the Foreman callback plugin.') def _ssl_verify(self): if self.FOREMAN_SSL_VERIFY.lower() in ["1", "true", "on"]: verify = True elif self.FOREMAN_SSL_VERIFY.lower() in ["0", "false", "off"]: requests.packages.urllib3.disable_warnings() self._display.warning("SSL verification of %s disabled" % self.FOREMAN_URL) verify = False else: # Set ta a CA bundle: verify = self.FOREMAN_SSL_VERIFY return verify def send_facts(self, host, data): """ Sends facts to Foreman, to be parsed by foreman_ansible fact parser. The default fact importer should import these facts properly. """ data["_type"] = "ansible" data["_timestamp"] = datetime.now().strftime(self.TIME_FORMAT) facts = {"name": host, "facts": data, } requests.post(url=self.FOREMAN_URL + '/api/v2/hosts/facts', data=json.dumps(facts), headers=self.FOREMAN_HEADERS, cert=self.FOREMAN_SSL_CERT, verify=self.ssl_verify) def _build_log(self, data): logs = [] for entry in data: source, msg = entry if 'failed' in msg: level = 'err' else: level = 'notice' if 'changed' in msg and msg['changed'] else 'info' logs.append({"log": { 'sources': {'source': source}, 'messages': {'message': json.dumps(msg)}, 'level': level }}) return logs def send_reports(self, stats): """ Send reports to Foreman to be parsed by its config report importer. THe data is in a format that Foreman can handle without writing another report importer. """ status = defaultdict(lambda: 0) metrics = {} for host in stats.processed.keys(): sum = stats.summarize(host) status["applied"] = sum['changed'] status["failed"] = sum['failures'] + sum['unreachable'] status["skipped"] = sum['skipped'] log = self._build_log(self.items[host]) metrics["time"] = {"total": int(time.time()) - self.start_time} now = datetime.now().strftime(self.TIME_FORMAT) report = { "report": { "host": host, "reported_at": now, "metrics": metrics, "status": status, "logs": log, } } # To be changed to /api/v2/config_reports in 1.11. Maybe we # could make a GET request to get the Foreman version & do # this automatically. requests.post(url=self.FOREMAN_URL + '/api/v2/reports', data=json.dumps(report), headers=self.FOREMAN_HEADERS, cert=self.FOREMAN_SSL_CERT, verify=self.ssl_verify) self.items[host] = [] def append_result(self, result): name = result._task.get_name() host = result._host.get_name() self.items[host].append((name, result._result)) # Ansible callback API def v2_runner_on_failed(self, result, ignore_errors=False): self.append_result(result) def v2_runner_on_unreachable(self, result): self.append_result(result) def v2_runner_on_async_ok(self, result, jid): self.append_result(result) def v2_runner_on_async_failed(self, result, jid): self.append_result(result) def v2_playbook_on_stats(self, stats): self.send_reports(stats) def v2_runner_on_ok(self, result): res = result._result try: module = res['invocation']['module_name'] except KeyError: module = None if module == 'setup': host = result._host.get_name() self.send_facts(host, res) else: self.append_result(result)
gpl-3.0
jtpedersen/Maze-o-tron
gmock-1.7.0/test/gmock_output_test.py
986
5999
#!/usr/bin/env python # # Copyright 2008, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Tests the text output of Google C++ Mocking Framework. SYNOPSIS gmock_output_test.py --build_dir=BUILD/DIR --gengolden # where BUILD/DIR contains the built gmock_output_test_ file. gmock_output_test.py --gengolden gmock_output_test.py """ __author__ = 'wan@google.com (Zhanyong Wan)' import os import re import sys import gmock_test_utils # The flag for generating the golden file GENGOLDEN_FLAG = '--gengolden' PROGRAM_PATH = gmock_test_utils.GetTestExecutablePath('gmock_output_test_') COMMAND = [PROGRAM_PATH, '--gtest_stack_trace_depth=0', '--gtest_print_time=0'] GOLDEN_NAME = 'gmock_output_test_golden.txt' GOLDEN_PATH = os.path.join(gmock_test_utils.GetSourceDir(), GOLDEN_NAME) def ToUnixLineEnding(s): """Changes all Windows/Mac line endings in s to UNIX line endings.""" return s.replace('\r\n', '\n').replace('\r', '\n') def RemoveReportHeaderAndFooter(output): """Removes Google Test result report's header and footer from the output.""" output = re.sub(r'.*gtest_main.*\n', '', output) output = re.sub(r'\[.*\d+ tests.*\n', '', output) output = re.sub(r'\[.* test environment .*\n', '', output) output = re.sub(r'\[=+\] \d+ tests .* ran.*', '', output) output = re.sub(r'.* FAILED TESTS\n', '', output) return output def RemoveLocations(output): """Removes all file location info from a Google Test program's output. Args: output: the output of a Google Test program. Returns: output with all file location info (in the form of 'DIRECTORY/FILE_NAME:LINE_NUMBER: 'or 'DIRECTORY\\FILE_NAME(LINE_NUMBER): ') replaced by 'FILE:#: '. """ return re.sub(r'.*[/\\](.+)(\:\d+|\(\d+\))\:', 'FILE:#:', output) def NormalizeErrorMarker(output): """Normalizes the error marker, which is different on Windows vs on Linux.""" return re.sub(r' error: ', ' Failure\n', output) def RemoveMemoryAddresses(output): """Removes memory addresses from the test output.""" return re.sub(r'@\w+', '@0x#', output) def RemoveTestNamesOfLeakedMocks(output): """Removes the test names of leaked mock objects from the test output.""" return re.sub(r'\(used in test .+\) ', '', output) def GetLeakyTests(output): """Returns a list of test names that leak mock objects.""" # findall() returns a list of all matches of the regex in output. # For example, if '(used in test FooTest.Bar)' is in output, the # list will contain 'FooTest.Bar'. return re.findall(r'\(used in test (.+)\)', output) def GetNormalizedOutputAndLeakyTests(output): """Normalizes the output of gmock_output_test_. Args: output: The test output. Returns: A tuple (the normalized test output, the list of test names that have leaked mocks). """ output = ToUnixLineEnding(output) output = RemoveReportHeaderAndFooter(output) output = NormalizeErrorMarker(output) output = RemoveLocations(output) output = RemoveMemoryAddresses(output) return (RemoveTestNamesOfLeakedMocks(output), GetLeakyTests(output)) def GetShellCommandOutput(cmd): """Runs a command in a sub-process, and returns its STDOUT in a string.""" return gmock_test_utils.Subprocess(cmd, capture_stderr=False).output def GetNormalizedCommandOutputAndLeakyTests(cmd): """Runs a command and returns its normalized output and a list of leaky tests. Args: cmd: the shell command. """ # Disables exception pop-ups on Windows. os.environ['GTEST_CATCH_EXCEPTIONS'] = '1' return GetNormalizedOutputAndLeakyTests(GetShellCommandOutput(cmd)) class GMockOutputTest(gmock_test_utils.TestCase): def testOutput(self): (output, leaky_tests) = GetNormalizedCommandOutputAndLeakyTests(COMMAND) golden_file = open(GOLDEN_PATH, 'rb') golden = golden_file.read() golden_file.close() # The normalized output should match the golden file. self.assertEquals(golden, output) # The raw output should contain 2 leaked mock object errors for # test GMockOutputTest.CatchesLeakedMocks. self.assertEquals(['GMockOutputTest.CatchesLeakedMocks', 'GMockOutputTest.CatchesLeakedMocks'], leaky_tests) if __name__ == '__main__': if sys.argv[1:] == [GENGOLDEN_FLAG]: (output, _) = GetNormalizedCommandOutputAndLeakyTests(COMMAND) golden_file = open(GOLDEN_PATH, 'wb') golden_file.write(output) golden_file.close() else: gmock_test_utils.Main()
gpl-3.0
Tokutek/tokudb-engine
mysql-test/suite/tokudb/t/change_column_int_not_supported.py
56
1592
#!/usr/bin/env python import sys def supported(from_int, from_modifier, to_int, to_modifer): if from_modifier != to_modifer: return False if from_int > to_int: return False return True def gen_tests_for_int(from_int, from_modifier, int_types, modifiers): for to_int in range(len(int_types)): for to_modifer in range(len(modifiers)): print print "CREATE TABLE t (a %s %s);" % (int_types[from_int], modifiers[from_modifier]) if not supported(from_int, from_modifier, to_int, to_modifer): print "--replace_regex /MariaDB/XYZ/ /MySQL/XYZ/" print "--error ER_UNSUPPORTED_EXTENSION" print "ALTER TABLE t CHANGE COLUMN a a %s %s;" % (int_types[to_int], modifiers[to_modifer]) print "DROP TABLE t;" def gen_tests(int_types, modifiers): for from_int in range(len(int_types)): for from_modifier in range(len(modifiers)): gen_tests_for_int(from_int, from_modifier, int_types, modifiers) def main(): print "# this test is generated by change_int_not_supported.py" print "# ensure that int types are only expanded and are not cnverted to some other type" print "--disable_warnings" print "DROP TABLE IF EXISTS t;" print "--enable_warnings" print "SET SESSION DEFAULT_STORAGE_ENGINE=\"TokuDB\";" print "SET SESSION TOKUDB_DISABLE_SLOW_ALTER=1;" gen_tests( [ "TINYINT", "SMALLINT", "MEDIUMINT", "INT", "BIGINT" ], [ "", "NOT NULL", "UNSIGNED", "UNSIGNED NOT NULL" ] ) return 0 sys.exit(main())
gpl-2.0
kaeff/pixelated-user-agent
service/pixelated/adapter/mailstore/body_parser.py
1
2298
# # Copyright (c) 2015 ThoughtWorks, Inc. # # Pixelated is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Pixelated is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with Pixelated. If not, see <http://www.gnu.org/licenses/>. from email.parser import Parser import re def _parse_charset_header(content_type_and_charset_header, default_charset='us-ascii'): try: return re.compile('.*charset="?([a-zA-Z0-9-]+)"?', re.MULTILINE | re.DOTALL).match(content_type_and_charset_header).group(1) except: return default_charset class BodyParser(object): def __init__(self, content, content_type='text/plain; charset="us-ascii"', content_transfer_encoding=None): self._content = content self._content_type = content_type self._content_transfer_encoding = content_transfer_encoding def parsed_content(self): charset = _parse_charset_header(self._content_type) text = self._serialize_for_parser(charset) decoded_body = self._parse_and_decode(text) return unicode(decoded_body, encoding=charset) def _parse_and_decode(self, text): parsed_body = Parser().parsestr(text) decoded_body = self._unwrap_content_transfer_encoding(parsed_body) return decoded_body def _unwrap_content_transfer_encoding(self, parsed_body): return parsed_body.get_payload(decode=True) def _serialize_for_parser(self, charset): text = '' text += 'Content-Type: %s\n' % self._content_type if self._content_transfer_encoding is not None: text += 'Content-Transfer-Encoding: %s\n' % self._content_transfer_encoding text += '\n' if isinstance(self._content, unicode): text += self._content.encode(charset) else: text += self._content return text
agpl-3.0
lsst/sims_catalogs_generation
python/lsst/sims/catalogs/generation/deprecated/jobAllocator/myJobTracker.py
2
2951
import sys, random, time from lsst.sims.catalogs.generation.db import jobDB def howManyJobs(eM, jobid): t0 = eM.queryState(jobid + '_NumJobs') if t0 == None: t0 = 0 else: t0 = int(t0) return t0 def qsubJob(eM, stateKey, jobid): t0 = howManyJobs(eM, jobid) print 'AddedJob: Current number of active jobs: ', t0 t1 = int(t0) + 1 eM.updateState(jobid + '_NumJobs', str(t1)) print 'AddedJob: New number of active jobs: ', t1 eM.updateState(stateKey, 'QSUBBED') eM.updateState(stateKey + '_QT', time.ctime()) print 'Updated the state of job %s as QSUBBED.' %(stateKey) def jobRunning(eM, stateKey, jobid): eM.updateState(stateKey, 'RUNNING') eM.updateState(stateKey + '_RT', time.ctime()) print 'Updated the state of job %s as RUNNING.' %(stateKey) def jobFinished(eM, stateKey, jobid): t0 = howManyJobs(eM, jobid) print 'AddedJob: Current number of active jobs: ', t0 t1 = int(t0) - 1 eM.updateState(jobid + '_NumJobs', str(t1)) print 'AddedJob: New number of active jobs: ', t1 eM.updateState(stateKey, 'FINISHED') eM.updateState(stateKey + '_FT', time.ctime()) print 'Updated the state of job %s as FINISHED.' %(stateKey) def jobError(eM, stateKey, jobid): t0 = howManyJobs(eM, jobid) print 'AddedJob: Current number of active jobs: ', t0 t1 = int(t0) - 1 eM.updateState(jobid + '_NumJobs', str(t1)) print 'AddedJob: New number of active jobs: ', t1 eM.updateState(stateKey, 'FAILED') eM.updateState(stateKey + '_ET', time.ctime()) print 'Updated the state of job %s as FAILED.' %(stateKey) if not len(sys.argv) == 5: print "usage: %python myJobTracker.py obshistid state sensorId username" quit() obshistid = sys.argv[1] state = sys.argv[2] # Must be in the form: sensorId = rx+ry+'_'+sx+sy+'_'+ex sensorId = sys.argv[3] username = sys.argv[4] rxry, sxsy, ex = sensorId.split('_') raftmap = {"01":"0,1", "02":"0,2", "03":"0,3", \ "10":"1,0", "11":"1,1", "12":"1,2", "13":"1,3", "14":"1,4", \ "20":"2,0", "21":"2,1", "22":"2,2", "23":"2,3", "24":"2,4", \ "30":"3,0", "31":"3,1", "32":"3,2", "33":"3,3", "34":"3,4", \ "41":"4,1", "42":"4,2", "43":"4,3"} sensormap = {"00":"0,0", "01":"0,1", "02":"0,2", \ "10":"1,0", "11":"1,1", "12":"1,2", \ "20":"2,0", "21":"2,1", "22":"2,2"} # constructed to have the form "R:rx,ry S:sx,sy:snap" # which is how the fpaFig.map keys are constructed sensorid = "R:"+raftmap[rxry]+" "+"S:"+sensormap[sxsy]+":"+ex jobid = jobDB.JobId(id=obshistid, owner=username) jobStr = str(jobid) eM = jobDB.JobState(jobid=jobid) stateKey = jobStr + '_%s' %(sensorid) + '_JS' if state == 'qsubbed': qsubJob(eM, sensorid, jobStr) if state == 'running': jobRunning(eM, sensorid, jobStr) if state == 'finished': jobFinished(eM, sensorid, jobStr) if state == 'error': jobError(eM, sensorid, jobStr)
gpl-3.0
papouso/odoo
openerp/addons/base/res/res_request.py
342
1677
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import osv, fields def referencable_models(self, cr, uid, context=None): obj = self.pool.get('res.request.link') ids = obj.search(cr, uid, [], context=context) res = obj.read(cr, uid, ids, ['object', 'name'], context) return [(r['object'], r['name']) for r in res] class res_request_link(osv.osv): _name = 'res.request.link' _columns = { 'name': fields.char('Name', required=True, translate=True), 'object': fields.char('Object', required=True), 'priority': fields.integer('Priority'), } _defaults = { 'priority': 5, } _order = 'priority' # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
openstack/cloudbase-init
cloudbaseinit/tests/plugins/common/userdataplugins/test_shellscript.py
4
3275
# Copyright 2013 Cloudbase Solutions Srl # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import unittest try: import unittest.mock as mock except ImportError: import mock from cloudbaseinit.plugins.common.userdataplugins import shellscript from cloudbaseinit.tests import testutils class ShellScriptPluginTests(unittest.TestCase): def setUp(self): self._shellscript = shellscript.ShellScriptPlugin() @mock.patch('os.path.exists') @mock.patch('os.remove') @mock.patch('cloudbaseinit.osutils.factory.get_os_utils') @mock.patch('tempfile.gettempdir') @mock.patch('cloudbaseinit.plugins.common.fileexecutils.exec_file') @mock.patch('cloudbaseinit.utils.encoding.write_file') def _test_process(self, mock_write_file, mock_exec_file, mock_gettempdir, mock_get_os_utils, mock_os_remove, mock_path_exists, exception=False): mock_path_exists.return_value = True fake_dir_path = os.path.join("fake", "dir") mock_osutils = mock.MagicMock() mock_part = mock.MagicMock() mock_part.get_filename.return_value = "fake_filename" mock_gettempdir.return_value = fake_dir_path mock_get_os_utils.return_value = mock_osutils fake_target = os.path.join(fake_dir_path, "fake_filename") mock_exec_file.return_value = 'fake response' if exception: mock_exec_file.side_effect = [Exception] with mock.patch("cloudbaseinit.plugins.common.userdataplugins." "shellscript.open", mock.mock_open(), create=True): with testutils.LogSnatcher('cloudbaseinit.plugins.common.' 'userdataplugins.' 'shellscript') as snatcher: response = self._shellscript.process(mock_part) mock_part.get_filename.assert_called_once_with() mock_write_file.assert_called_once_with( fake_target, mock_part.get_payload.return_value) mock_exec_file.assert_called_once_with(fake_target) mock_part.get_payload.assert_called_once_with(decode=True) mock_gettempdir.assert_called_once_with() if not exception: self.assertEqual('fake response', response) else: expected_logging = 'An error occurred during user_data execution' self.assertTrue(snatcher.output[0].startswith(expected_logging)) mock_os_remove.assert_called_once_with(fake_target) mock_path_exists.assert_called_once_with(fake_target) def test_process(self): self._test_process(exception=False) def test_process_exception(self): self._test_process(exception=True)
apache-2.0
miles0411/pm
venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/util/connection.py
679
3293
import socket try: from select import poll, POLLIN except ImportError: # `poll` doesn't exist on OSX and other platforms poll = False try: from select import select except ImportError: # `select` doesn't exist on AppEngine. select = False def is_connection_dropped(conn): # Platform-specific """ Returns True if the connection is dropped and should be closed. :param conn: :class:`httplib.HTTPConnection` object. Note: For platforms like AppEngine, this will always return ``False`` to let the platform handle connection recycling transparently for us. """ sock = getattr(conn, 'sock', False) if sock is False: # Platform-specific: AppEngine return False if sock is None: # Connection already closed (such as by httplib). return True if not poll: if not select: # Platform-specific: AppEngine return False try: return select([sock], [], [], 0.0)[0] except socket.error: return True # This version is better on platforms that support it. p = poll() p.register(sock, POLLIN) for (fno, ev) in p.poll(0.0): if fno == sock.fileno(): # Either data is buffered (bad), or the connection is dropped. return True # This function is copied from socket.py in the Python 2.7 standard # library test suite. Added to its signature is only `socket_options`. def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None, socket_options=None): """Connect to *address* and return the socket object. Convenience function. Connect to *address* (a 2-tuple ``(host, port)``) and return the socket object. Passing the optional *timeout* parameter will set the timeout on the socket instance before attempting to connect. If no *timeout* is supplied, the global default timeout setting returned by :func:`getdefaulttimeout` is used. If *source_address* is set it must be a tuple of (host, port) for the socket to bind as a source address before making the connection. An host of '' or port 0 tells the OS to use the default. """ host, port = address err = None for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM): af, socktype, proto, canonname, sa = res sock = None try: sock = socket.socket(af, socktype, proto) # If provided, set socket level options before connecting. # This is the only addition urllib3 makes to this function. _set_socket_options(sock, socket_options) if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT: sock.settimeout(timeout) if source_address: sock.bind(source_address) sock.connect(sa) return sock except socket.error as _: err = _ if sock is not None: sock.close() sock = None if err is not None: raise err else: raise socket.error("getaddrinfo returns an empty list") def _set_socket_options(sock, options): if options is None: return for opt in options: sock.setsockopt(*opt)
apache-2.0
nwjs/chromium.src
third_party/protobuf/python/google/protobuf/internal/testing_refleaks.py
25
4659
# Protocol Buffers - Google's data interchange format # Copyright 2008 Google Inc. All rights reserved. # https://developers.google.com/protocol-buffers/ # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """A subclass of unittest.TestCase which checks for reference leaks. To use: - Use testing_refleak.BaseTestCase instead of unittest.TestCase - Configure and compile Python with --with-pydebug If sys.gettotalrefcount() is not available (because Python was built without the Py_DEBUG option), then this module is a no-op and tests will run normally. """ import gc import sys try: import copy_reg as copyreg #PY26 except ImportError: import copyreg try: import unittest2 as unittest #PY26 except ImportError: import unittest class LocalTestResult(unittest.TestResult): """A TestResult which forwards events to a parent object, except for Skips.""" def __init__(self, parent_result): unittest.TestResult.__init__(self) self.parent_result = parent_result def addError(self, test, error): self.parent_result.addError(test, error) def addFailure(self, test, error): self.parent_result.addFailure(test, error) def addSkip(self, test, reason): pass class ReferenceLeakCheckerMixin(object): """A mixin class for TestCase, which checks reference counts.""" NB_RUNS = 3 def run(self, result=None): # python_message.py registers all Message classes to some pickle global # registry, which makes the classes immortal. # We save a copy of this registry, and reset it before we could references. self._saved_pickle_registry = copyreg.dispatch_table.copy() # Run the test twice, to warm up the instance attributes. super(ReferenceLeakCheckerMixin, self).run(result=result) super(ReferenceLeakCheckerMixin, self).run(result=result) oldrefcount = 0 local_result = LocalTestResult(result) refcount_deltas = [] for _ in range(self.NB_RUNS): oldrefcount = self._getRefcounts() super(ReferenceLeakCheckerMixin, self).run(result=local_result) newrefcount = self._getRefcounts() refcount_deltas.append(newrefcount - oldrefcount) print(refcount_deltas, self) try: self.assertEqual(refcount_deltas, [0] * self.NB_RUNS) except Exception: # pylint: disable=broad-except result.addError(self, sys.exc_info()) def _getRefcounts(self): copyreg.dispatch_table.clear() copyreg.dispatch_table.update(self._saved_pickle_registry) # It is sometimes necessary to gc.collect() multiple times, to ensure # that all objects can be collected. gc.collect() gc.collect() gc.collect() return sys.gettotalrefcount() if hasattr(sys, 'gettotalrefcount'): def TestCase(test_class): new_bases = (ReferenceLeakCheckerMixin,) + test_class.__bases__ new_class = type(test_class)( test_class.__name__, new_bases, dict(test_class.__dict__)) return new_class SkipReferenceLeakChecker = unittest.skip else: # When PyDEBUG is not enabled, run the tests normally. def TestCase(test_class): return test_class def SkipReferenceLeakChecker(reason): del reason # Don't skip, so don't need a reason. def Same(func): return func return Same
bsd-3-clause
indictranstech/focal-erpnext
support/doctype/newsletter/newsletter.py
31
4539
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import frappe import frappe.utils from frappe.utils import cstr from frappe import throw, _ from frappe.model.document import Document import erpnext.tasks class Newsletter(Document): def onload(self): if self.email_sent: self.get("__onload").status_count = dict(frappe.db.sql("""select status, count(name) from `tabBulk Email` where ref_doctype=%s and ref_docname=%s group by status""", (self.doctype, self.name))) or None def test_send(self, doctype="Lead"): self.recipients = self.test_email_id.split(",") self.send_to_doctype = "Lead" self.send_bulk() frappe.msgprint(_("Scheduled to send to {0}").format(self.test_email_id)) def send_emails(self): """send emails to leads and customers""" if self.email_sent: throw(_("Newsletter has already been sent")) self.recipients = self.get_recipients() if getattr(frappe.local, "is_ajax", False): # to avoid request timed out! self.validate_send() # hack! event="bulk_long" to queue in longjob queue erpnext.tasks.send_newsletter.delay(frappe.local.site, self.name, event="bulk_long") else: self.send_bulk() frappe.msgprint(_("Scheduled to send to {0} recipients").format(len(self.recipients))) frappe.db.set(self, "email_sent", 1) def get_recipients(self): self.email_field = None if self.send_to_type=="Contact": self.send_to_doctype = "Contact" if self.contact_type == "Customer": return frappe.db.sql_list("""select email_id from tabContact where ifnull(email_id, '') != '' and ifnull(customer, '') != ''""") elif self.contact_type == "Supplier": return frappe.db.sql_list("""select email_id from tabContact where ifnull(email_id, '') != '' and ifnull(supplier, '') != ''""") elif self.send_to_type=="Lead": self.send_to_doctype = "Lead" conditions = [] if self.lead_source and self.lead_source != "All": conditions.append(" and source='%s'" % self.lead_source.replace("'", "\'")) if self.lead_status and self.lead_status != "All": conditions.append(" and status='%s'" % self.lead_status.replace("'", "\'")) if conditions: conditions = "".join(conditions) return frappe.db.sql_list("""select email_id from tabLead where ifnull(email_id, '') != '' %s""" % (conditions or "")) elif self.send_to_type=="Employee": self.send_to_doctype = "Employee" self.email_field = "company_email" return frappe.db.sql_list("""select if(ifnull(company_email, '')!='', company_email, personal_email) as email_id from `tabEmployee` where status='Active'""") elif self.email_list: email_list = [cstr(email).strip() for email in self.email_list.split(",")] for email in email_list: create_lead(email) self.send_to_doctype = "Lead" return email_list def send_bulk(self): if not self.get("recipients"): # in case it is called via worker self.recipients = self.get_recipients() self.validate_send() sender = self.send_from or frappe.utils.get_formatted_email(self.owner) from frappe.utils.email_lib.bulk import send if not frappe.flags.in_test: frappe.db.auto_commit_on_many_writes = True send(recipients = self.recipients, sender = sender, subject = self.subject, message = self.message, doctype = self.send_to_doctype, email_field = self.get("email_field") or "email_id", ref_doctype = self.doctype, ref_docname = self.name) if not frappe.flags.in_test: frappe.db.auto_commit_on_many_writes = False def validate_send(self): if self.get("__islocal"): throw(_("Please save the Newsletter before sending")) @frappe.whitelist() def get_lead_options(): return { "sources": ["All"] + filter(None, frappe.db.sql_list("""select distinct source from tabLead""")), "statuses": ["All"] + filter(None, frappe.db.sql_list("""select distinct status from tabLead""")) } def create_lead(email_id): """create a lead if it does not exist""" from email.utils import parseaddr from frappe.model.naming import get_default_naming_series real_name, email_id = parseaddr(email_id) if frappe.db.get_value("Lead", {"email_id": email_id}): return lead = frappe.get_doc({ "doctype": "Lead", "email_id": email_id, "lead_name": real_name or email_id, "status": "Lead", "naming_series": get_default_naming_series("Lead"), "company": frappe.db.get_default("company"), "source": "Email" }) lead.insert()
agpl-3.0
h3biomed/ansible
lib/ansible/modules/cloud/amazon/s3_website.py
39
10634
#!/usr/bin/python # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: s3_website short_description: Configure an s3 bucket as a website description: - Configure an s3 bucket as a website version_added: "2.2" requirements: [ boto3 ] author: Rob White (@wimnat) options: name: description: - "Name of the s3 bucket" required: true error_key: description: - "The object key name to use when a 4XX class error occurs. To remove an error key, set to None." redirect_all_requests: description: - "Describes the redirect behavior for every request to this s3 bucket website endpoint" region: description: - > AWS region to create the bucket in. If not set then the value of the AWS_REGION and EC2_REGION environment variables are checked, followed by the aws_region and ec2_region settings in the Boto config file. If none of those are set the region defaults to the S3 Location: US Standard. state: description: - "Add or remove s3 website configuration" default: present choices: [ 'present', 'absent' ] suffix: description: - > Suffix that is appended to a request that is for a directory on the website endpoint (e.g. if the suffix is index.html and you make a request to samplebucket/images/ the data that is returned will be for the object with the key name images/index.html). The suffix must not include a slash character. default: index.html extends_documentation_fragment: - aws - ec2 ''' EXAMPLES = ''' # Note: These examples do not set authentication details, see the AWS Guide for details. # Configure an s3 bucket to redirect all requests to example.com - s3_website: name: mybucket.com redirect_all_requests: example.com state: present # Remove website configuration from an s3 bucket - s3_website: name: mybucket.com state: absent # Configure an s3 bucket as a website with index and error pages - s3_website: name: mybucket.com suffix: home.htm error_key: errors/404.htm state: present ''' RETURN = ''' index_document: description: index document type: complex returned: always contains: suffix: description: suffix that is appended to a request that is for a directory on the website endpoint returned: success type: str sample: index.html error_document: description: error document type: complex returned: always contains: key: description: object key name to use when a 4XX class error occurs returned: when error_document parameter set type: str sample: error.html redirect_all_requests_to: description: where to redirect requests type: complex returned: always contains: host_name: description: name of the host where requests will be redirected. returned: when redirect all requests parameter set type: str sample: ansible.com routing_rules: description: routing rules type: complex returned: always contains: routing_rule: host_name: description: name of the host where requests will be redirected. returned: when host name set as part of redirect rule type: str sample: ansible.com condition: key_prefix_equals: description: object key name prefix when the redirect is applied. For example, to redirect requests for ExamplePage.html, the key prefix will be ExamplePage.html returned: when routing rule present type: str sample: docs/ redirect: replace_key_prefix_with: description: object key prefix to use in the redirect request returned: when routing rule present type: str sample: documents/ ''' import time try: import boto3 from botocore.exceptions import ClientError, ParamValidationError HAS_BOTO3 = True except ImportError: HAS_BOTO3 = False from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.ec2 import (HAS_BOTO3, boto3_conn, camel_dict_to_snake_dict, ec2_argument_spec, get_aws_connection_info) def _create_redirect_dict(url): redirect_dict = {} url_split = url.split(':') # Did we split anything? if len(url_split) == 2: redirect_dict[u'Protocol'] = url_split[0] redirect_dict[u'HostName'] = url_split[1].replace('//', '') elif len(url_split) == 1: redirect_dict[u'HostName'] = url_split[0] else: raise ValueError('Redirect URL appears invalid') return redirect_dict def _create_website_configuration(suffix, error_key, redirect_all_requests): website_configuration = {} if error_key is not None: website_configuration['ErrorDocument'] = {'Key': error_key} if suffix is not None: website_configuration['IndexDocument'] = {'Suffix': suffix} if redirect_all_requests is not None: website_configuration['RedirectAllRequestsTo'] = _create_redirect_dict(redirect_all_requests) return website_configuration def enable_or_update_bucket_as_website(client_connection, resource_connection, module): bucket_name = module.params.get("name") redirect_all_requests = module.params.get("redirect_all_requests") # If redirect_all_requests is set then don't use the default suffix that has been set if redirect_all_requests is not None: suffix = None else: suffix = module.params.get("suffix") error_key = module.params.get("error_key") changed = False try: bucket_website = resource_connection.BucketWebsite(bucket_name) except ClientError as e: module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response)) try: website_config = client_connection.get_bucket_website(Bucket=bucket_name) except ClientError as e: if e.response['Error']['Code'] == 'NoSuchWebsiteConfiguration': website_config = None else: module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response)) if website_config is None: try: bucket_website.put(WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests)) changed = True except (ClientError, ParamValidationError) as e: module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response)) except ValueError as e: module.fail_json(msg=str(e)) else: try: if (suffix is not None and website_config['IndexDocument']['Suffix'] != suffix) or \ (error_key is not None and website_config['ErrorDocument']['Key'] != error_key) or \ (redirect_all_requests is not None and website_config['RedirectAllRequestsTo'] != _create_redirect_dict(redirect_all_requests)): try: bucket_website.put(WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests)) changed = True except (ClientError, ParamValidationError) as e: module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response)) except KeyError as e: try: bucket_website.put(WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests)) changed = True except (ClientError, ParamValidationError) as e: module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response)) except ValueError as e: module.fail_json(msg=str(e)) # Wait 5 secs before getting the website_config again to give it time to update time.sleep(5) website_config = client_connection.get_bucket_website(Bucket=bucket_name) module.exit_json(changed=changed, **camel_dict_to_snake_dict(website_config)) def disable_bucket_as_website(client_connection, module): changed = False bucket_name = module.params.get("name") try: client_connection.get_bucket_website(Bucket=bucket_name) except ClientError as e: if e.response['Error']['Code'] == 'NoSuchWebsiteConfiguration': module.exit_json(changed=changed) else: module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response)) try: client_connection.delete_bucket_website(Bucket=bucket_name) changed = True except ClientError as e: module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response)) module.exit_json(changed=changed) def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( name=dict(type='str', required=True), state=dict(type='str', required=True, choices=['present', 'absent']), suffix=dict(type='str', required=False, default='index.html'), error_key=dict(type='str', required=False), redirect_all_requests=dict(type='str', required=False) ) ) module = AnsibleModule( argument_spec=argument_spec, mutually_exclusive=[ ['redirect_all_requests', 'suffix'], ['redirect_all_requests', 'error_key'] ]) if not HAS_BOTO3: module.fail_json(msg='boto3 required for this module') region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) if region: client_connection = boto3_conn(module, conn_type='client', resource='s3', region=region, endpoint=ec2_url, **aws_connect_params) resource_connection = boto3_conn(module, conn_type='resource', resource='s3', region=region, endpoint=ec2_url, **aws_connect_params) else: module.fail_json(msg="region must be specified") state = module.params.get("state") if state == 'present': enable_or_update_bucket_as_website(client_connection, resource_connection, module) elif state == 'absent': disable_bucket_as_website(client_connection, module) if __name__ == '__main__': main()
gpl-3.0
bank-netforce/netforce
netforce_support/setup.py
4
1260
#!/usr/bin/env python3 # Copyright (c) 2012-2015 Netforce Co. Ltd. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, # DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE # OR OTHER DEALINGS IN THE SOFTWARE. from setuptools import setup setup( name="netforce_support", version="3.1.0", description="Customer support module", )
mit
buqing2009/MissionPlanner
Lib/imaplib.py
55
49183
"""IMAP4 client. Based on RFC 2060. Public class: IMAP4 Public variable: Debug Public functions: Internaldate2tuple Int2AP ParseFlags Time2Internaldate """ # Author: Piers Lauder <piers@cs.su.oz.au> December 1997. # # Authentication code contributed by Donn Cave <donn@u.washington.edu> June 1998. # String method conversion by ESR, February 2001. # GET/SETACL contributed by Anthony Baxter <anthony@interlink.com.au> April 2001. # IMAP4_SSL contributed by Tino Lange <Tino.Lange@isg.de> March 2002. # GET/SETQUOTA contributed by Andreas Zeidler <az@kreativkombinat.de> June 2002. # PROXYAUTH contributed by Rick Holbert <holbert.13@osu.edu> November 2002. # GET/SETANNOTATION contributed by Tomas Lindroos <skitta@abo.fi> June 2005. __version__ = "2.58" import binascii, errno, random, re, socket, subprocess, sys, time __all__ = ["IMAP4", "IMAP4_stream", "Internaldate2tuple", "Int2AP", "ParseFlags", "Time2Internaldate"] # Globals CRLF = '\r\n' Debug = 0 IMAP4_PORT = 143 IMAP4_SSL_PORT = 993 AllowedVersions = ('IMAP4REV1', 'IMAP4') # Most recent first # Commands Commands = { # name valid states 'APPEND': ('AUTH', 'SELECTED'), 'AUTHENTICATE': ('NONAUTH',), 'CAPABILITY': ('NONAUTH', 'AUTH', 'SELECTED', 'LOGOUT'), 'CHECK': ('SELECTED',), 'CLOSE': ('SELECTED',), 'COPY': ('SELECTED',), 'CREATE': ('AUTH', 'SELECTED'), 'DELETE': ('AUTH', 'SELECTED'), 'DELETEACL': ('AUTH', 'SELECTED'), 'EXAMINE': ('AUTH', 'SELECTED'), 'EXPUNGE': ('SELECTED',), 'FETCH': ('SELECTED',), 'GETACL': ('AUTH', 'SELECTED'), 'GETANNOTATION':('AUTH', 'SELECTED'), 'GETQUOTA': ('AUTH', 'SELECTED'), 'GETQUOTAROOT': ('AUTH', 'SELECTED'), 'MYRIGHTS': ('AUTH', 'SELECTED'), 'LIST': ('AUTH', 'SELECTED'), 'LOGIN': ('NONAUTH',), 'LOGOUT': ('NONAUTH', 'AUTH', 'SELECTED', 'LOGOUT'), 'LSUB': ('AUTH', 'SELECTED'), 'NAMESPACE': ('AUTH', 'SELECTED'), 'NOOP': ('NONAUTH', 'AUTH', 'SELECTED', 'LOGOUT'), 'PARTIAL': ('SELECTED',), # NB: obsolete 'PROXYAUTH': ('AUTH',), 'RENAME': ('AUTH', 'SELECTED'), 'SEARCH': ('SELECTED',), 'SELECT': ('AUTH', 'SELECTED'), 'SETACL': ('AUTH', 'SELECTED'), 'SETANNOTATION':('AUTH', 'SELECTED'), 'SETQUOTA': ('AUTH', 'SELECTED'), 'SORT': ('SELECTED',), 'STATUS': ('AUTH', 'SELECTED'), 'STORE': ('SELECTED',), 'SUBSCRIBE': ('AUTH', 'SELECTED'), 'THREAD': ('SELECTED',), 'UID': ('SELECTED',), 'UNSUBSCRIBE': ('AUTH', 'SELECTED'), } # Patterns to match server responses Continuation = re.compile(r'\+( (?P<data>.*))?') Flags = re.compile(r'.*FLAGS \((?P<flags>[^\)]*)\)') InternalDate = re.compile(r'.*INTERNALDATE "' r'(?P<day>[ 0123][0-9])-(?P<mon>[A-Z][a-z][a-z])-(?P<year>[0-9][0-9][0-9][0-9])' r' (?P<hour>[0-9][0-9]):(?P<min>[0-9][0-9]):(?P<sec>[0-9][0-9])' r' (?P<zonen>[-+])(?P<zoneh>[0-9][0-9])(?P<zonem>[0-9][0-9])' r'"') Literal = re.compile(r'.*{(?P<size>\d+)}$') MapCRLF = re.compile(r'\r\n|\r|\n') Response_code = re.compile(r'\[(?P<type>[A-Z-]+)( (?P<data>[^\]]*))?\]') Untagged_response = re.compile(r'\* (?P<type>[A-Z-]+)( (?P<data>.*))?') Untagged_status = re.compile(r'\* (?P<data>\d+) (?P<type>[A-Z-]+)( (?P<data2>.*))?') class IMAP4: """IMAP4 client class. Instantiate with: IMAP4([host[, port]]) host - host's name (default: localhost); port - port number (default: standard IMAP4 port). All IMAP4rev1 commands are supported by methods of the same name (in lower-case). All arguments to commands are converted to strings, except for AUTHENTICATE, and the last argument to APPEND which is passed as an IMAP4 literal. If necessary (the string contains any non-printing characters or white-space and isn't enclosed with either parentheses or double quotes) each string is quoted. However, the 'password' argument to the LOGIN command is always quoted. If you want to avoid having an argument string quoted (eg: the 'flags' argument to STORE) then enclose the string in parentheses (eg: "(\Deleted)"). Each command returns a tuple: (type, [data, ...]) where 'type' is usually 'OK' or 'NO', and 'data' is either the text from the tagged response, or untagged results from command. Each 'data' is either a string, or a tuple. If a tuple, then the first part is the header of the response, and the second part contains the data (ie: 'literal' value). Errors raise the exception class <instance>.error("<reason>"). IMAP4 server errors raise <instance>.abort("<reason>"), which is a sub-class of 'error'. Mailbox status changes from READ-WRITE to READ-ONLY raise the exception class <instance>.readonly("<reason>"), which is a sub-class of 'abort'. "error" exceptions imply a program error. "abort" exceptions imply the connection should be reset, and the command re-tried. "readonly" exceptions imply the command should be re-tried. Note: to use this module, you must read the RFCs pertaining to the IMAP4 protocol, as the semantics of the arguments to each IMAP4 command are left to the invoker, not to mention the results. Also, most IMAP servers implement a sub-set of the commands available here. """ class error(Exception): pass # Logical errors - debug required class abort(error): pass # Service errors - close and retry class readonly(abort): pass # Mailbox status changed to READ-ONLY mustquote = re.compile(r"[^\w!#$%&'*+,.:;<=>?^`|~-]") def __init__(self, host = '', port = IMAP4_PORT): self.debug = Debug self.state = 'LOGOUT' self.literal = None # A literal argument to a command self.tagged_commands = {} # Tagged commands awaiting response self.untagged_responses = {} # {typ: [data, ...], ...} self.continuation_response = '' # Last continuation response self.is_readonly = False # READ-ONLY desired state self.tagnum = 0 # Open socket to server. self.open(host, port) # Create unique tag for this session, # and compile tagged response matcher. self.tagpre = Int2AP(random.randint(4096, 65535)) self.tagre = re.compile(r'(?P<tag>' + self.tagpre + r'\d+) (?P<type>[A-Z]+) (?P<data>.*)') # Get server welcome message, # request and store CAPABILITY response. if __debug__: self._cmd_log_len = 10 self._cmd_log_idx = 0 self._cmd_log = {} # Last `_cmd_log_len' interactions if self.debug >= 1: self._mesg('imaplib version %s' % __version__) self._mesg('new IMAP4 connection, tag=%s' % self.tagpre) self.welcome = self._get_response() if 'PREAUTH' in self.untagged_responses: self.state = 'AUTH' elif 'OK' in self.untagged_responses: self.state = 'NONAUTH' else: raise self.error(self.welcome) typ, dat = self.capability() if dat == [None]: raise self.error('no CAPABILITY response from server') self.capabilities = tuple(dat[-1].upper().split()) if __debug__: if self.debug >= 3: self._mesg('CAPABILITIES: %r' % (self.capabilities,)) for version in AllowedVersions: if not version in self.capabilities: continue self.PROTOCOL_VERSION = version return raise self.error('server not IMAP4 compliant') def __getattr__(self, attr): # Allow UPPERCASE variants of IMAP4 command methods. if attr in Commands: return getattr(self, attr.lower()) raise AttributeError("Unknown IMAP4 command: '%s'" % attr) # Overridable methods def open(self, host = '', port = IMAP4_PORT): """Setup connection to remote server on "host:port" (default: localhost:standard IMAP4 port). This connection will be used by the routines: read, readline, send, shutdown. """ self.host = host self.port = port self.sock = socket.create_connection((host, port)) self.file = self.sock.makefile('rb') def read(self, size): """Read 'size' bytes from remote.""" return self.file.read(size) def readline(self): """Read line from remote.""" return self.file.readline() def send(self, data): """Send data to remote.""" self.sock.sendall(data) def shutdown(self): """Close I/O established in "open".""" self.file.close() try: self.sock.shutdown(socket.SHUT_RDWR) except socket.error as e: # The server might already have closed the connection if e.errno != errno.ENOTCONN: raise finally: self.sock.close() def socket(self): """Return socket instance used to connect to IMAP4 server. socket = <instance>.socket() """ return self.sock # Utility methods def recent(self): """Return most recent 'RECENT' responses if any exist, else prompt server for an update using the 'NOOP' command. (typ, [data]) = <instance>.recent() 'data' is None if no new messages, else list of RECENT responses, most recent last. """ name = 'RECENT' typ, dat = self._untagged_response('OK', [None], name) if dat[-1]: return typ, dat typ, dat = self.noop() # Prod server for response return self._untagged_response(typ, dat, name) def response(self, code): """Return data for response 'code' if received, or None. Old value for response 'code' is cleared. (code, [data]) = <instance>.response(code) """ return self._untagged_response(code, [None], code.upper()) # IMAP4 commands def append(self, mailbox, flags, date_time, message): """Append message to named mailbox. (typ, [data]) = <instance>.append(mailbox, flags, date_time, message) All args except `message' can be None. """ name = 'APPEND' if not mailbox: mailbox = 'INBOX' if flags: if (flags[0],flags[-1]) != ('(',')'): flags = '(%s)' % flags else: flags = None if date_time: date_time = Time2Internaldate(date_time) else: date_time = None self.literal = MapCRLF.sub(CRLF, message) return self._simple_command(name, mailbox, flags, date_time) def authenticate(self, mechanism, authobject): """Authenticate command - requires response processing. 'mechanism' specifies which authentication mechanism is to be used - it must appear in <instance>.capabilities in the form AUTH=<mechanism>. 'authobject' must be a callable object: data = authobject(response) It will be called to process server continuation responses. It should return data that will be encoded and sent to server. It should return None if the client abort response '*' should be sent instead. """ mech = mechanism.upper() # XXX: shouldn't this code be removed, not commented out? #cap = 'AUTH=%s' % mech #if not cap in self.capabilities: # Let the server decide! # raise self.error("Server doesn't allow %s authentication." % mech) self.literal = _Authenticator(authobject).process typ, dat = self._simple_command('AUTHENTICATE', mech) if typ != 'OK': raise self.error(dat[-1]) self.state = 'AUTH' return typ, dat def capability(self): """(typ, [data]) = <instance>.capability() Fetch capabilities list from server.""" name = 'CAPABILITY' typ, dat = self._simple_command(name) return self._untagged_response(typ, dat, name) def check(self): """Checkpoint mailbox on server. (typ, [data]) = <instance>.check() """ return self._simple_command('CHECK') def close(self): """Close currently selected mailbox. Deleted messages are removed from writable mailbox. This is the recommended command before 'LOGOUT'. (typ, [data]) = <instance>.close() """ try: typ, dat = self._simple_command('CLOSE') finally: self.state = 'AUTH' return typ, dat def copy(self, message_set, new_mailbox): """Copy 'message_set' messages onto end of 'new_mailbox'. (typ, [data]) = <instance>.copy(message_set, new_mailbox) """ return self._simple_command('COPY', message_set, new_mailbox) def create(self, mailbox): """Create new mailbox. (typ, [data]) = <instance>.create(mailbox) """ return self._simple_command('CREATE', mailbox) def delete(self, mailbox): """Delete old mailbox. (typ, [data]) = <instance>.delete(mailbox) """ return self._simple_command('DELETE', mailbox) def deleteacl(self, mailbox, who): """Delete the ACLs (remove any rights) set for who on mailbox. (typ, [data]) = <instance>.deleteacl(mailbox, who) """ return self._simple_command('DELETEACL', mailbox, who) def expunge(self): """Permanently remove deleted items from selected mailbox. Generates 'EXPUNGE' response for each deleted message. (typ, [data]) = <instance>.expunge() 'data' is list of 'EXPUNGE'd message numbers in order received. """ name = 'EXPUNGE' typ, dat = self._simple_command(name) return self._untagged_response(typ, dat, name) def fetch(self, message_set, message_parts): """Fetch (parts of) messages. (typ, [data, ...]) = <instance>.fetch(message_set, message_parts) 'message_parts' should be a string of selected parts enclosed in parentheses, eg: "(UID BODY[TEXT])". 'data' are tuples of message part envelope and data. """ name = 'FETCH' typ, dat = self._simple_command(name, message_set, message_parts) return self._untagged_response(typ, dat, name) def getacl(self, mailbox): """Get the ACLs for a mailbox. (typ, [data]) = <instance>.getacl(mailbox) """ typ, dat = self._simple_command('GETACL', mailbox) return self._untagged_response(typ, dat, 'ACL') def getannotation(self, mailbox, entry, attribute): """(typ, [data]) = <instance>.getannotation(mailbox, entry, attribute) Retrieve ANNOTATIONs.""" typ, dat = self._simple_command('GETANNOTATION', mailbox, entry, attribute) return self._untagged_response(typ, dat, 'ANNOTATION') def getquota(self, root): """Get the quota root's resource usage and limits. Part of the IMAP4 QUOTA extension defined in rfc2087. (typ, [data]) = <instance>.getquota(root) """ typ, dat = self._simple_command('GETQUOTA', root) return self._untagged_response(typ, dat, 'QUOTA') def getquotaroot(self, mailbox): """Get the list of quota roots for the named mailbox. (typ, [[QUOTAROOT responses...], [QUOTA responses]]) = <instance>.getquotaroot(mailbox) """ typ, dat = self._simple_command('GETQUOTAROOT', mailbox) typ, quota = self._untagged_response(typ, dat, 'QUOTA') typ, quotaroot = self._untagged_response(typ, dat, 'QUOTAROOT') return typ, [quotaroot, quota] def list(self, directory='""', pattern='*'): """List mailbox names in directory matching pattern. (typ, [data]) = <instance>.list(directory='""', pattern='*') 'data' is list of LIST responses. """ name = 'LIST' typ, dat = self._simple_command(name, directory, pattern) return self._untagged_response(typ, dat, name) def login(self, user, password): """Identify client using plaintext password. (typ, [data]) = <instance>.login(user, password) NB: 'password' will be quoted. """ typ, dat = self._simple_command('LOGIN', user, self._quote(password)) if typ != 'OK': raise self.error(dat[-1]) self.state = 'AUTH' return typ, dat def login_cram_md5(self, user, password): """ Force use of CRAM-MD5 authentication. (typ, [data]) = <instance>.login_cram_md5(user, password) """ self.user, self.password = user, password return self.authenticate('CRAM-MD5', self._CRAM_MD5_AUTH) def _CRAM_MD5_AUTH(self, challenge): """ Authobject to use with CRAM-MD5 authentication. """ import hmac return self.user + " " + hmac.HMAC(self.password, challenge).hexdigest() def logout(self): """Shutdown connection to server. (typ, [data]) = <instance>.logout() Returns server 'BYE' response. """ self.state = 'LOGOUT' try: typ, dat = self._simple_command('LOGOUT') except: typ, dat = 'NO', ['%s: %s' % sys.exc_info()[:2]] self.shutdown() if 'BYE' in self.untagged_responses: return 'BYE', self.untagged_responses['BYE'] return typ, dat def lsub(self, directory='""', pattern='*'): """List 'subscribed' mailbox names in directory matching pattern. (typ, [data, ...]) = <instance>.lsub(directory='""', pattern='*') 'data' are tuples of message part envelope and data. """ name = 'LSUB' typ, dat = self._simple_command(name, directory, pattern) return self._untagged_response(typ, dat, name) def myrights(self, mailbox): """Show my ACLs for a mailbox (i.e. the rights that I have on mailbox). (typ, [data]) = <instance>.myrights(mailbox) """ typ,dat = self._simple_command('MYRIGHTS', mailbox) return self._untagged_response(typ, dat, 'MYRIGHTS') def namespace(self): """ Returns IMAP namespaces ala rfc2342 (typ, [data, ...]) = <instance>.namespace() """ name = 'NAMESPACE' typ, dat = self._simple_command(name) return self._untagged_response(typ, dat, name) def noop(self): """Send NOOP command. (typ, [data]) = <instance>.noop() """ if __debug__: if self.debug >= 3: self._dump_ur(self.untagged_responses) return self._simple_command('NOOP') def partial(self, message_num, message_part, start, length): """Fetch truncated part of a message. (typ, [data, ...]) = <instance>.partial(message_num, message_part, start, length) 'data' is tuple of message part envelope and data. """ name = 'PARTIAL' typ, dat = self._simple_command(name, message_num, message_part, start, length) return self._untagged_response(typ, dat, 'FETCH') def proxyauth(self, user): """Assume authentication as "user". Allows an authorised administrator to proxy into any user's mailbox. (typ, [data]) = <instance>.proxyauth(user) """ name = 'PROXYAUTH' return self._simple_command('PROXYAUTH', user) def rename(self, oldmailbox, newmailbox): """Rename old mailbox name to new. (typ, [data]) = <instance>.rename(oldmailbox, newmailbox) """ return self._simple_command('RENAME', oldmailbox, newmailbox) def search(self, charset, *criteria): """Search mailbox for matching messages. (typ, [data]) = <instance>.search(charset, criterion, ...) 'data' is space separated list of matching message numbers. """ name = 'SEARCH' if charset: typ, dat = self._simple_command(name, 'CHARSET', charset, *criteria) else: typ, dat = self._simple_command(name, *criteria) return self._untagged_response(typ, dat, name) def select(self, mailbox='INBOX', readonly=False): """Select a mailbox. Flush all untagged responses. (typ, [data]) = <instance>.select(mailbox='INBOX', readonly=False) 'data' is count of messages in mailbox ('EXISTS' response). Mandated responses are ('FLAGS', 'EXISTS', 'RECENT', 'UIDVALIDITY'), so other responses should be obtained via <instance>.response('FLAGS') etc. """ self.untagged_responses = {} # Flush old responses. self.is_readonly = readonly if readonly: name = 'EXAMINE' else: name = 'SELECT' typ, dat = self._simple_command(name, mailbox) if typ != 'OK': self.state = 'AUTH' # Might have been 'SELECTED' return typ, dat self.state = 'SELECTED' if 'READ-ONLY' in self.untagged_responses \ and not readonly: if __debug__: if self.debug >= 1: self._dump_ur(self.untagged_responses) raise self.readonly('%s is not writable' % mailbox) return typ, self.untagged_responses.get('EXISTS', [None]) def setacl(self, mailbox, who, what): """Set a mailbox acl. (typ, [data]) = <instance>.setacl(mailbox, who, what) """ return self._simple_command('SETACL', mailbox, who, what) def setannotation(self, *args): """(typ, [data]) = <instance>.setannotation(mailbox[, entry, attribute]+) Set ANNOTATIONs.""" typ, dat = self._simple_command('SETANNOTATION', *args) return self._untagged_response(typ, dat, 'ANNOTATION') def setquota(self, root, limits): """Set the quota root's resource limits. (typ, [data]) = <instance>.setquota(root, limits) """ typ, dat = self._simple_command('SETQUOTA', root, limits) return self._untagged_response(typ, dat, 'QUOTA') def sort(self, sort_criteria, charset, *search_criteria): """IMAP4rev1 extension SORT command. (typ, [data]) = <instance>.sort(sort_criteria, charset, search_criteria, ...) """ name = 'SORT' #if not name in self.capabilities: # Let the server decide! # raise self.error('unimplemented extension command: %s' % name) if (sort_criteria[0],sort_criteria[-1]) != ('(',')'): sort_criteria = '(%s)' % sort_criteria typ, dat = self._simple_command(name, sort_criteria, charset, *search_criteria) return self._untagged_response(typ, dat, name) def status(self, mailbox, names): """Request named status conditions for mailbox. (typ, [data]) = <instance>.status(mailbox, names) """ name = 'STATUS' #if self.PROTOCOL_VERSION == 'IMAP4': # Let the server decide! # raise self.error('%s unimplemented in IMAP4 (obtain IMAP4rev1 server, or re-code)' % name) typ, dat = self._simple_command(name, mailbox, names) return self._untagged_response(typ, dat, name) def store(self, message_set, command, flags): """Alters flag dispositions for messages in mailbox. (typ, [data]) = <instance>.store(message_set, command, flags) """ if (flags[0],flags[-1]) != ('(',')'): flags = '(%s)' % flags # Avoid quoting the flags typ, dat = self._simple_command('STORE', message_set, command, flags) return self._untagged_response(typ, dat, 'FETCH') def subscribe(self, mailbox): """Subscribe to new mailbox. (typ, [data]) = <instance>.subscribe(mailbox) """ return self._simple_command('SUBSCRIBE', mailbox) def thread(self, threading_algorithm, charset, *search_criteria): """IMAPrev1 extension THREAD command. (type, [data]) = <instance>.thread(threading_algorithm, charset, search_criteria, ...) """ name = 'THREAD' typ, dat = self._simple_command(name, threading_algorithm, charset, *search_criteria) return self._untagged_response(typ, dat, name) def uid(self, command, *args): """Execute "command arg ..." with messages identified by UID, rather than message number. (typ, [data]) = <instance>.uid(command, arg1, arg2, ...) Returns response appropriate to 'command'. """ command = command.upper() if not command in Commands: raise self.error("Unknown IMAP4 UID command: %s" % command) if self.state not in Commands[command]: raise self.error("command %s illegal in state %s, " "only allowed in states %s" % (command, self.state, ', '.join(Commands[command]))) name = 'UID' typ, dat = self._simple_command(name, command, *args) if command in ('SEARCH', 'SORT', 'THREAD'): name = command else: name = 'FETCH' return self._untagged_response(typ, dat, name) def unsubscribe(self, mailbox): """Unsubscribe from old mailbox. (typ, [data]) = <instance>.unsubscribe(mailbox) """ return self._simple_command('UNSUBSCRIBE', mailbox) def xatom(self, name, *args): """Allow simple extension commands notified by server in CAPABILITY response. Assumes command is legal in current state. (typ, [data]) = <instance>.xatom(name, arg, ...) Returns response appropriate to extension command `name'. """ name = name.upper() #if not name in self.capabilities: # Let the server decide! # raise self.error('unknown extension command: %s' % name) if not name in Commands: Commands[name] = (self.state,) return self._simple_command(name, *args) # Private methods def _append_untagged(self, typ, dat): if dat is None: dat = '' ur = self.untagged_responses if __debug__: if self.debug >= 5: self._mesg('untagged_responses[%s] %s += ["%s"]' % (typ, len(ur.get(typ,'')), dat)) if typ in ur: ur[typ].append(dat) else: ur[typ] = [dat] def _check_bye(self): bye = self.untagged_responses.get('BYE') if bye: raise self.abort(bye[-1]) def _command(self, name, *args): if self.state not in Commands[name]: self.literal = None raise self.error("command %s illegal in state %s, " "only allowed in states %s" % (name, self.state, ', '.join(Commands[name]))) for typ in ('OK', 'NO', 'BAD'): if typ in self.untagged_responses: del self.untagged_responses[typ] if 'READ-ONLY' in self.untagged_responses \ and not self.is_readonly: raise self.readonly('mailbox status changed to READ-ONLY') tag = self._new_tag() data = '%s %s' % (tag, name) for arg in args: if arg is None: continue data = '%s %s' % (data, self._checkquote(arg)) literal = self.literal if literal is not None: self.literal = None if type(literal) is type(self._command): literator = literal else: literator = None data = '%s {%s}' % (data, len(literal)) if __debug__: if self.debug >= 4: self._mesg('> %s' % data) else: self._log('> %s' % data) try: self.send('%s%s' % (data, CRLF)) except (socket.error, OSError), val: raise self.abort('socket error: %s' % val) if literal is None: return tag while 1: # Wait for continuation response while self._get_response(): if self.tagged_commands[tag]: # BAD/NO? return tag # Send literal if literator: literal = literator(self.continuation_response) if __debug__: if self.debug >= 4: self._mesg('write literal size %s' % len(literal)) try: self.send(literal) self.send(CRLF) except (socket.error, OSError), val: raise self.abort('socket error: %s' % val) if not literator: break return tag def _command_complete(self, name, tag): # BYE is expected after LOGOUT if name != 'LOGOUT': self._check_bye() try: typ, data = self._get_tagged_response(tag) except self.abort, val: raise self.abort('command: %s => %s' % (name, val)) except self.error, val: raise self.error('command: %s => %s' % (name, val)) if name != 'LOGOUT': self._check_bye() if typ == 'BAD': raise self.error('%s command error: %s %s' % (name, typ, data)) return typ, data def _get_response(self): # Read response and store. # # Returns None for continuation responses, # otherwise first response line received. resp = self._get_line() # Command completion response? if self._match(self.tagre, resp): tag = self.mo.group('tag') if not tag in self.tagged_commands: raise self.abort('unexpected tagged response: %s' % resp) typ = self.mo.group('type') dat = self.mo.group('data') self.tagged_commands[tag] = (typ, [dat]) else: dat2 = None # '*' (untagged) responses? if not self._match(Untagged_response, resp): if self._match(Untagged_status, resp): dat2 = self.mo.group('data2') if self.mo is None: # Only other possibility is '+' (continuation) response... if self._match(Continuation, resp): self.continuation_response = self.mo.group('data') return None # NB: indicates continuation raise self.abort("unexpected response: '%s'" % resp) typ = self.mo.group('type') dat = self.mo.group('data') if dat is None: dat = '' # Null untagged response if dat2: dat = dat + ' ' + dat2 # Is there a literal to come? while self._match(Literal, dat): # Read literal direct from connection. size = int(self.mo.group('size')) if __debug__: if self.debug >= 4: self._mesg('read literal size %s' % size) data = self.read(size) # Store response with literal as tuple self._append_untagged(typ, (dat, data)) # Read trailer - possibly containing another literal dat = self._get_line() self._append_untagged(typ, dat) # Bracketed response information? if typ in ('OK', 'NO', 'BAD') and self._match(Response_code, dat): self._append_untagged(self.mo.group('type'), self.mo.group('data')) if __debug__: if self.debug >= 1 and typ in ('NO', 'BAD', 'BYE'): self._mesg('%s response: %s' % (typ, dat)) return resp def _get_tagged_response(self, tag): while 1: result = self.tagged_commands[tag] if result is not None: del self.tagged_commands[tag] return result # Some have reported "unexpected response" exceptions. # Note that ignoring them here causes loops. # Instead, send me details of the unexpected response and # I'll update the code in `_get_response()'. try: self._get_response() except self.abort, val: if __debug__: if self.debug >= 1: self.print_log() raise def _get_line(self): line = self.readline() if not line: raise self.abort('socket error: EOF') # Protocol mandates all lines terminated by CRLF if not line.endswith('\r\n'): raise self.abort('socket error: unterminated line') line = line[:-2] if __debug__: if self.debug >= 4: self._mesg('< %s' % line) else: self._log('< %s' % line) return line def _match(self, cre, s): # Run compiled regular expression match method on 's'. # Save result, return success. self.mo = cre.match(s) if __debug__: if self.mo is not None and self.debug >= 5: self._mesg("\tmatched r'%s' => %r" % (cre.pattern, self.mo.groups())) return self.mo is not None def _new_tag(self): tag = '%s%s' % (self.tagpre, self.tagnum) self.tagnum = self.tagnum + 1 self.tagged_commands[tag] = None return tag def _checkquote(self, arg): # Must quote command args if non-alphanumeric chars present, # and not already quoted. if type(arg) is not type(''): return arg if len(arg) >= 2 and (arg[0],arg[-1]) in (('(',')'),('"','"')): return arg if arg and self.mustquote.search(arg) is None: return arg return self._quote(arg) def _quote(self, arg): arg = arg.replace('\\', '\\\\') arg = arg.replace('"', '\\"') return '"%s"' % arg def _simple_command(self, name, *args): return self._command_complete(name, self._command(name, *args)) def _untagged_response(self, typ, dat, name): if typ == 'NO': return typ, dat if not name in self.untagged_responses: return typ, [None] data = self.untagged_responses.pop(name) if __debug__: if self.debug >= 5: self._mesg('untagged_responses[%s] => %s' % (name, data)) return typ, data if __debug__: def _mesg(self, s, secs=None): if secs is None: secs = time.time() tm = time.strftime('%M:%S', time.localtime(secs)) sys.stderr.write(' %s.%02d %s\n' % (tm, (secs*100)%100, s)) sys.stderr.flush() def _dump_ur(self, dict): # Dump untagged responses (in `dict'). l = dict.items() if not l: return t = '\n\t\t' l = map(lambda x:'%s: "%s"' % (x[0], x[1][0] and '" "'.join(x[1]) or ''), l) self._mesg('untagged responses dump:%s%s' % (t, t.join(l))) def _log(self, line): # Keep log of last `_cmd_log_len' interactions for debugging. self._cmd_log[self._cmd_log_idx] = (line, time.time()) self._cmd_log_idx += 1 if self._cmd_log_idx >= self._cmd_log_len: self._cmd_log_idx = 0 def print_log(self): self._mesg('last %d IMAP4 interactions:' % len(self._cmd_log)) i, n = self._cmd_log_idx, self._cmd_log_len while n: try: self._mesg(*self._cmd_log[i]) except: pass i += 1 if i >= self._cmd_log_len: i = 0 n -= 1 try: import ssl except ImportError: pass else: class IMAP4_SSL(IMAP4): """IMAP4 client class over SSL connection Instantiate with: IMAP4_SSL([host[, port[, keyfile[, certfile]]]]) host - host's name (default: localhost); port - port number (default: standard IMAP4 SSL port). keyfile - PEM formatted file that contains your private key (default: None); certfile - PEM formatted certificate chain file (default: None); for more documentation see the docstring of the parent class IMAP4. """ def __init__(self, host = '', port = IMAP4_SSL_PORT, keyfile = None, certfile = None): self.keyfile = keyfile self.certfile = certfile IMAP4.__init__(self, host, port) def open(self, host = '', port = IMAP4_SSL_PORT): """Setup connection to remote server on "host:port". (default: localhost:standard IMAP4 SSL port). This connection will be used by the routines: read, readline, send, shutdown. """ self.host = host self.port = port self.sock = socket.create_connection((host, port)) self.sslobj = ssl.wrap_socket(self.sock, self.keyfile, self.certfile) self.file = self.sslobj.makefile('rb') def read(self, size): """Read 'size' bytes from remote.""" return self.file.read(size) def readline(self): """Read line from remote.""" return self.file.readline() def send(self, data): """Send data to remote.""" bytes = len(data) while bytes > 0: sent = self.sslobj.write(data) if sent == bytes: break # avoid copy data = data[sent:] bytes = bytes - sent def shutdown(self): """Close I/O established in "open".""" self.file.close() self.sock.close() def socket(self): """Return socket instance used to connect to IMAP4 server. socket = <instance>.socket() """ return self.sock def ssl(self): """Return SSLObject instance used to communicate with the IMAP4 server. ssl = ssl.wrap_socket(<instance>.socket) """ return self.sslobj __all__.append("IMAP4_SSL") class IMAP4_stream(IMAP4): """IMAP4 client class over a stream Instantiate with: IMAP4_stream(command) where "command" is a string that can be passed to subprocess.Popen() for more documentation see the docstring of the parent class IMAP4. """ def __init__(self, command): self.command = command IMAP4.__init__(self) def open(self, host = None, port = None): """Setup a stream connection. This connection will be used by the routines: read, readline, send, shutdown. """ self.host = None # For compatibility with parent class self.port = None self.sock = None self.file = None self.process = subprocess.Popen(self.command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, shell=True, close_fds=True) self.writefile = self.process.stdin self.readfile = self.process.stdout def read(self, size): """Read 'size' bytes from remote.""" return self.readfile.read(size) def readline(self): """Read line from remote.""" return self.readfile.readline() def send(self, data): """Send data to remote.""" self.writefile.write(data) self.writefile.flush() def shutdown(self): """Close I/O established in "open".""" self.readfile.close() self.writefile.close() self.process.wait() class _Authenticator: """Private class to provide en/decoding for base64-based authentication conversation. """ def __init__(self, mechinst): self.mech = mechinst # Callable object to provide/process data def process(self, data): ret = self.mech(self.decode(data)) if ret is None: return '*' # Abort conversation return self.encode(ret) def encode(self, inp): # # Invoke binascii.b2a_base64 iteratively with # short even length buffers, strip the trailing # line feed from the result and append. "Even" # means a number that factors to both 6 and 8, # so when it gets to the end of the 8-bit input # there's no partial 6-bit output. # oup = '' while inp: if len(inp) > 48: t = inp[:48] inp = inp[48:] else: t = inp inp = '' e = binascii.b2a_base64(t) if e: oup = oup + e[:-1] return oup def decode(self, inp): if not inp: return '' return binascii.a2b_base64(inp) Mon2num = {'Jan': 1, 'Feb': 2, 'Mar': 3, 'Apr': 4, 'May': 5, 'Jun': 6, 'Jul': 7, 'Aug': 8, 'Sep': 9, 'Oct': 10, 'Nov': 11, 'Dec': 12} def Internaldate2tuple(resp): """Parse an IMAP4 INTERNALDATE string. Return corresponding local time. The return value is a time.struct_time instance or None if the string has wrong format. """ mo = InternalDate.match(resp) if not mo: return None mon = Mon2num[mo.group('mon')] zonen = mo.group('zonen') day = int(mo.group('day')) year = int(mo.group('year')) hour = int(mo.group('hour')) min = int(mo.group('min')) sec = int(mo.group('sec')) zoneh = int(mo.group('zoneh')) zonem = int(mo.group('zonem')) # INTERNALDATE timezone must be subtracted to get UT zone = (zoneh*60 + zonem)*60 if zonen == '-': zone = -zone tt = (year, mon, day, hour, min, sec, -1, -1, -1) utc = time.mktime(tt) # Following is necessary because the time module has no 'mkgmtime'. # 'mktime' assumes arg in local timezone, so adds timezone/altzone. lt = time.localtime(utc) if time.daylight and lt[-1]: zone = zone + time.altzone else: zone = zone + time.timezone return time.localtime(utc - zone) def Int2AP(num): """Convert integer to A-P string representation.""" val = ''; AP = 'ABCDEFGHIJKLMNOP' num = int(abs(num)) while num: num, mod = divmod(num, 16) val = AP[mod] + val return val def ParseFlags(resp): """Convert IMAP4 flags response to python tuple.""" mo = Flags.match(resp) if not mo: return () return tuple(mo.group('flags').split()) def Time2Internaldate(date_time): """Convert date_time to IMAP4 INTERNALDATE representation. Return string in form: '"DD-Mmm-YYYY HH:MM:SS +HHMM"'. The date_time argument can be a number (int or float) representing seconds since epoch (as returned by time.time()), a 9-tuple representing local time (as returned by time.localtime()), or a double-quoted string. In the last case, it is assumed to already be in the correct format. """ if isinstance(date_time, (int, float)): tt = time.localtime(date_time) elif isinstance(date_time, (tuple, time.struct_time)): tt = date_time elif isinstance(date_time, str) and (date_time[0],date_time[-1]) == ('"','"'): return date_time # Assume in correct format else: raise ValueError("date_time not of a known type") dt = time.strftime("%d-%b-%Y %H:%M:%S", tt) if dt[0] == '0': dt = ' ' + dt[1:] if time.daylight and tt[-1]: zone = -time.altzone else: zone = -time.timezone return '"' + dt + " %+03d%02d" % divmod(zone//60, 60) + '"' if __name__ == '__main__': # To test: invoke either as 'python imaplib.py [IMAP4_server_hostname]' # or 'python imaplib.py -s "rsh IMAP4_server_hostname exec /etc/rimapd"' # to test the IMAP4_stream class import getopt, getpass try: optlist, args = getopt.getopt(sys.argv[1:], 'd:s:') except getopt.error, val: optlist, args = (), () stream_command = None for opt,val in optlist: if opt == '-d': Debug = int(val) elif opt == '-s': stream_command = val if not args: args = (stream_command,) if not args: args = ('',) host = args[0] USER = getpass.getuser() PASSWD = getpass.getpass("IMAP password for %s on %s: " % (USER, host or "localhost")) test_mesg = 'From: %(user)s@localhost%(lf)sSubject: IMAP4 test%(lf)s%(lf)sdata...%(lf)s' % {'user':USER, 'lf':'\n'} test_seq1 = ( ('login', (USER, PASSWD)), ('create', ('/tmp/xxx 1',)), ('rename', ('/tmp/xxx 1', '/tmp/yyy')), ('CREATE', ('/tmp/yyz 2',)), ('append', ('/tmp/yyz 2', None, None, test_mesg)), ('list', ('/tmp', 'yy*')), ('select', ('/tmp/yyz 2',)), ('search', (None, 'SUBJECT', 'test')), ('fetch', ('1', '(FLAGS INTERNALDATE RFC822)')), ('store', ('1', 'FLAGS', '(\Deleted)')), ('namespace', ()), ('expunge', ()), ('recent', ()), ('close', ()), ) test_seq2 = ( ('select', ()), ('response',('UIDVALIDITY',)), ('uid', ('SEARCH', 'ALL')), ('response', ('EXISTS',)), ('append', (None, None, None, test_mesg)), ('recent', ()), ('logout', ()), ) def run(cmd, args): M._mesg('%s %s' % (cmd, args)) typ, dat = getattr(M, cmd)(*args) M._mesg('%s => %s %s' % (cmd, typ, dat)) if typ == 'NO': raise dat[0] return dat try: if stream_command: M = IMAP4_stream(stream_command) else: M = IMAP4(host) if M.state == 'AUTH': test_seq1 = test_seq1[1:] # Login not needed M._mesg('PROTOCOL_VERSION = %s' % M.PROTOCOL_VERSION) M._mesg('CAPABILITIES = %r' % (M.capabilities,)) for cmd,args in test_seq1: run(cmd, args) for ml in run('list', ('/tmp/', 'yy%')): mo = re.match(r'.*"([^"]+)"$', ml) if mo: path = mo.group(1) else: path = ml.split()[-1] run('delete', (path,)) for cmd,args in test_seq2: dat = run(cmd, args) if (cmd,args) != ('uid', ('SEARCH', 'ALL')): continue uid = dat[-1].split() if not uid: continue run('uid', ('FETCH', '%s' % uid[-1], '(FLAGS INTERNALDATE RFC822.SIZE RFC822.HEADER RFC822.TEXT)')) print '\nAll tests OK.' except: print '\nTests failed.' if not Debug: print ''' If you would like to see debugging output, try: %s -d5 ''' % sys.argv[0] raise
gpl-3.0
Elandril/Sick-Beard
cherrypy/lib/caching.py
35
15405
import datetime import threading import time import cherrypy from cherrypy.lib import cptools, httputil class Cache(object): def get(self): raise NotImplemented def put(self, obj, size): raise NotImplemented def delete(self): raise NotImplemented def clear(self): raise NotImplemented # ------------------------------- Memory Cache ------------------------------- # class AntiStampedeCache(dict): def wait(self, key, timeout=5, debug=False): """Return the cached value for the given key, or None. If timeout is not None (the default), and the value is already being calculated by another thread, wait until the given timeout has elapsed. If the value is available before the timeout expires, it is returned. If not, None is returned, and a sentinel placed in the cache to signal other threads to wait. If timeout is None, no waiting is performed nor sentinels used. """ value = self.get(key) if isinstance(value, threading._Event): if timeout is None: # Ignore the other thread and recalc it ourselves. if debug: cherrypy.log('No timeout', 'TOOLS.CACHING') return None # Wait until it's done or times out. if debug: cherrypy.log('Waiting up to %s seconds' % timeout, 'TOOLS.CACHING') value.wait(timeout) if value.result is not None: # The other thread finished its calculation. Use it. if debug: cherrypy.log('Result!', 'TOOLS.CACHING') return value.result # Timed out. Stick an Event in the slot so other threads wait # on this one to finish calculating the value. if debug: cherrypy.log('Timed out', 'TOOLS.CACHING') e = threading.Event() e.result = None dict.__setitem__(self, key, e) return None elif value is None: # Stick an Event in the slot so other threads wait # on this one to finish calculating the value. if debug: cherrypy.log('Timed out', 'TOOLS.CACHING') e = threading.Event() e.result = None dict.__setitem__(self, key, e) return value def __setitem__(self, key, value): """Set the cached value for the given key.""" existing = self.get(key) dict.__setitem__(self, key, value) if isinstance(existing, threading._Event): # Set Event.result so other threads waiting on it have # immediate access without needing to poll the cache again. existing.result = value existing.set() class MemoryCache(Cache): """An in-memory cache for varying response content. Each key in self.store is a URI, and each value is an AntiStampedeCache. The response for any given URI may vary based on the values of "selecting request headers"; that is, those named in the Vary response header. We assume the list of header names to be constant for each URI throughout the lifetime of the application, and store that list in self.store[uri].selecting_headers. The items contained in self.store[uri] have keys which are tuples of request header values (in the same order as the names in its selecting_headers), and values which are the actual responses. """ maxobjects = 1000 maxobj_size = 100000 maxsize = 10000000 delay = 600 antistampede_timeout = 5 expire_freq = 0.1 debug = False def __init__(self): self.clear() # Run self.expire_cache in a separate daemon thread. t = threading.Thread(target=self.expire_cache, name='expire_cache') self.expiration_thread = t if hasattr(threading.Thread, "daemon"): # Python 2.6+ t.daemon = True else: t.setDaemon(True) t.start() def clear(self): """Reset the cache to its initial, empty state.""" self.store = {} self.expirations = {} self.tot_puts = 0 self.tot_gets = 0 self.tot_hist = 0 self.tot_expires = 0 self.tot_non_modified = 0 self.cursize = 0 def expire_cache(self): # expire_cache runs in a separate thread which the servers are # not aware of. It's possible that "time" will be set to None # arbitrarily, so we check "while time" to avoid exceptions. # See tickets #99 and #180 for more information. while time: now = time.time() # Must make a copy of expirations so it doesn't change size # during iteration for expiration_time, objects in self.expirations.items(): if expiration_time <= now: for obj_size, uri, sel_header_values in objects: try: del self.store[uri][sel_header_values] self.tot_expires += 1 self.cursize -= obj_size except KeyError: # the key may have been deleted elsewhere pass del self.expirations[expiration_time] time.sleep(self.expire_freq) def get(self): """Return the current variant if in the cache, else None.""" request = cherrypy.serving.request self.tot_gets += 1 uri = cherrypy.url(qs=request.query_string) uricache = self.store.get(uri) if uricache is None: return None header_values = [request.headers.get(h, '') for h in uricache.selecting_headers] header_values.sort() variant = uricache.wait(key=tuple(header_values), timeout=self.antistampede_timeout, debug=self.debug) if variant is not None: self.tot_hist += 1 return variant def put(self, variant, size): """Store the current variant in the cache.""" request = cherrypy.serving.request response = cherrypy.serving.response uri = cherrypy.url(qs=request.query_string) uricache = self.store.get(uri) if uricache is None: uricache = AntiStampedeCache() uricache.selecting_headers = [ e.value for e in response.headers.elements('Vary')] self.store[uri] = uricache if len(self.store) < self.maxobjects: total_size = self.cursize + size # checks if there's space for the object if (size < self.maxobj_size and total_size < self.maxsize): # add to the expirations list expiration_time = response.time + self.delay bucket = self.expirations.setdefault(expiration_time, []) bucket.append((size, uri, uricache.selecting_headers)) # add to the cache header_values = [request.headers.get(h, '') for h in uricache.selecting_headers] header_values.sort() uricache[tuple(header_values)] = variant self.tot_puts += 1 self.cursize = total_size def delete(self): """Remove ALL cached variants of the current resource.""" uri = cherrypy.url(qs=cherrypy.serving.request.query_string) self.store.pop(uri, None) def get(invalid_methods=("POST", "PUT", "DELETE"), debug=False, **kwargs): """Try to obtain cached output. If fresh enough, raise HTTPError(304). If POST, PUT, or DELETE: * invalidates (deletes) any cached response for this resource * sets request.cached = False * sets request.cacheable = False else if a cached copy exists: * sets request.cached = True * sets request.cacheable = False * sets response.headers to the cached values * checks the cached Last-Modified response header against the current If-(Un)Modified-Since request headers; raises 304 if necessary. * sets response.status and response.body to the cached values * returns True otherwise: * sets request.cached = False * sets request.cacheable = True * returns False """ request = cherrypy.serving.request response = cherrypy.serving.response if not hasattr(cherrypy, "_cache"): # Make a process-wide Cache object. cherrypy._cache = kwargs.pop("cache_class", MemoryCache)() # Take all remaining kwargs and set them on the Cache object. for k, v in kwargs.items(): setattr(cherrypy._cache, k, v) cherrypy._cache.debug = debug # POST, PUT, DELETE should invalidate (delete) the cached copy. # See http://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html#sec13.10. if request.method in invalid_methods: if debug: cherrypy.log('request.method %r in invalid_methods %r' % (request.method, invalid_methods), 'TOOLS.CACHING') cherrypy._cache.delete() request.cached = False request.cacheable = False return False if 'no-cache' in [e.value for e in request.headers.elements('Pragma')]: request.cached = False request.cacheable = True return False cache_data = cherrypy._cache.get() request.cached = bool(cache_data) request.cacheable = not request.cached if request.cached: # Serve the cached copy. max_age = cherrypy._cache.delay for v in [e.value for e in request.headers.elements('Cache-Control')]: atoms = v.split('=', 1) directive = atoms.pop(0) if directive == 'max-age': if len(atoms) != 1 or not atoms[0].isdigit(): raise cherrypy.HTTPError(400, "Invalid Cache-Control header") max_age = int(atoms[0]) break elif directive == 'no-cache': if debug: cherrypy.log('Ignoring cache due to Cache-Control: no-cache', 'TOOLS.CACHING') request.cached = False request.cacheable = True return False if debug: cherrypy.log('Reading response from cache', 'TOOLS.CACHING') s, h, b, create_time = cache_data age = int(response.time - create_time) if (age > max_age): if debug: cherrypy.log('Ignoring cache due to age > %d' % max_age, 'TOOLS.CACHING') request.cached = False request.cacheable = True return False # Copy the response headers. See http://www.cherrypy.org/ticket/721. response.headers = rh = httputil.HeaderMap() for k in h: dict.__setitem__(rh, k, dict.__getitem__(h, k)) # Add the required Age header response.headers["Age"] = str(age) try: # Note that validate_since depends on a Last-Modified header; # this was put into the cached copy, and should have been # resurrected just above (response.headers = cache_data[1]). cptools.validate_since() except cherrypy.HTTPRedirect, x: if x.status == 304: cherrypy._cache.tot_non_modified += 1 raise # serve it & get out from the request response.status = s response.body = b else: if debug: cherrypy.log('request is not cached', 'TOOLS.CACHING') return request.cached def tee_output(): request = cherrypy.serving.request if 'no-store' in request.headers.values('Cache-Control'): return def tee(body): """Tee response.body into a list.""" if ('no-cache' in response.headers.values('Pragma') or 'no-store' in response.headers.values('Cache-Control')): for chunk in body: yield chunk return output = [] for chunk in body: output.append(chunk) yield chunk # save the cache data body = ''.join(output) cherrypy._cache.put((response.status, response.headers or {}, body, response.time), len(body)) response = cherrypy.serving.response response.body = tee(response.body) def expires(secs=0, force=False, debug=False): """Tool for influencing cache mechanisms using the 'Expires' header. 'secs' must be either an int or a datetime.timedelta, and indicates the number of seconds between response.time and when the response should expire. The 'Expires' header will be set to (response.time + secs). If 'secs' is zero, the 'Expires' header is set one year in the past, and the following "cache prevention" headers are also set: 'Pragma': 'no-cache' 'Cache-Control': 'no-cache, must-revalidate' If 'force' is False (the default), the following headers are checked: 'Etag', 'Last-Modified', 'Age', 'Expires'. If any are already present, none of the above response headers are set. """ response = cherrypy.serving.response headers = response.headers cacheable = False if not force: # some header names that indicate that the response can be cached for indicator in ('Etag', 'Last-Modified', 'Age', 'Expires'): if indicator in headers: cacheable = True break if not cacheable and not force: if debug: cherrypy.log('request is not cacheable', 'TOOLS.EXPIRES') else: if debug: cherrypy.log('request is cacheable', 'TOOLS.EXPIRES') if isinstance(secs, datetime.timedelta): secs = (86400 * secs.days) + secs.seconds if secs == 0: if force or ("Pragma" not in headers): headers["Pragma"] = "no-cache" if cherrypy.serving.request.protocol >= (1, 1): if force or "Cache-Control" not in headers: headers["Cache-Control"] = "no-cache, must-revalidate" # Set an explicit Expires date in the past. expiry = httputil.HTTPDate(1169942400.0) else: expiry = httputil.HTTPDate(response.time + secs) if force or "Expires" not in headers: headers["Expires"] = expiry
gpl-3.0
gwsu2008/automation
python/apa-branch-diff.py
1
2959
#!/usr/bin/env python3 import urllib3 import sys import os import json import re from datetime import datetime import urllib.parse import requests import time urllib3.disable_warnings() projects_url = 'https://bitbucket.test.com/rest/api/1.0/projects' project_key = 'AMA' batch_size = 100 workspace = os.getenv('WORKSPACE', os.getcwd()) bitbucket_jenkins_token = os.getenv('BITBUCKET_JENKINS_TOKEN', None) def json_serial(obj): if isinstance(obj, datetime): serial = obj.isoformat() return serial raise TypeError("Type not serializable") def json_print(json_obj): return json.dumps(json_obj, indent=4, sort_keys=True, default=json_serial) def request_get(url): authorization = "Bearer " + bitbucket_jenkins_token headers = {'Content-type': 'application/json', "Authorization": authorization} r = requests.get(url, headers=headers, verify=False, timeout=60) return r def info(msg): print('\033[34m[Info]\033[0m {}'.format(msg)) return def warn(msg): print('\033[33m[Warn]\033[0m {}'.format(msg)) return def error(msg): print('\033[31m[Error]\033[0m {}'.format(msg)) sys.exit(1) def get_repo_id(url): response = request_get(url) jdata = json.loads(response.text) return jdata['id'] def main(): start_time = time.time() info('Start comparing Android mobile app dev and master branch') app_change_size = 0 app_project = '{}/{}/repos/android-patient-app'.format(projects_url, project_key) app_repo_id = get_repo_id(app_project) if app_repo_id is None: error('Check log for error message') app_from = urllib.parse.quote_plus('refs/heads/dev') app_to = urllib.parse.quote_plus('refs/heads/master') app_compare_branch = 'compare/commits?from={}&to={}&fromRepo={}&limit=1'.format(app_from, app_to, app_repo_id) app_url = '{}/{}'.format(app_project, app_compare_branch) response = request_get(app_url) if int(response.status_code) == 200: # info('Headers: {}'.format(dict(response.headers))) # info('Encoding: {}'.format(response.encoding)) # info(str(response.text)) # info('Text: {}'.format(response.json())) app_change_size = response.json()['size'] if app_change_size == 0: info('APA App: No change between branches refs/heads/dev and refs/heads/master') else: info('APA App: There are changes between branches refs/heads/dev and refs/heads/master') else: error('HTTP error {}'.format(response.status_code)) return 1 if app_change_size == 0: info('APA: Remove build.properties') if os.path.exists(workspace + '/build.properties'): os.remove(workspace + '/build.properties') else: info('APA: Triggering dev2master and run INT promotion') info('Finished - execution time %.2f seconds' % (time.time() - start_time)) return if __name__ == "__main__": main()
gpl-2.0
mengxn/tensorflow
tensorflow/python/kernel_tests/matrix_solve_ls_op_test.py
80
8075
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tensorflow.ops.math_ops.matrix_solve.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import constant_op from tensorflow.python.ops import linalg_ops from tensorflow.python.platform import test def BatchMatMul(a, b): # A numpy implementation of tf.matmul(). if a.ndim < 3: return np.dot(a, b) # Get the number of matrices. n = np.prod(a.shape[:-2]) assert n == np.prod(b.shape[:-2]) a_flat = np.reshape(a, tuple([n]) + a.shape[-2:]) b_flat = np.reshape(b, tuple([n]) + b.shape[-2:]) c_flat_shape = [n, a.shape[-2], b.shape[-1]] c_flat = np.empty(c_flat_shape) for i in range(n): c_flat[i, :, :] = np.dot(a_flat[i, :, :], b_flat[i, :, :]) return np.reshape(c_flat, a.shape[:-1] + b_flat.shape[-1:]) def BatchRegularizedLeastSquares(matrices, rhss, l2_regularization=0.0): # A numpy implementation of regularized least squares solver using # the normal equations. matrix_dims = matrices.shape matrices_transposed = np.swapaxes(matrices, -2, -1) rows = matrix_dims[-2] cols = matrix_dims[-1] if rows >= cols: preconditioner = l2_regularization * np.identity(cols) gramian = BatchMatMul(matrices_transposed, matrices) + preconditioner inverse = np.linalg.inv(gramian) left_pseudo_inverse = BatchMatMul(inverse, matrices_transposed) return BatchMatMul(left_pseudo_inverse, rhss) else: preconditioner = l2_regularization * np.identity(rows) gramian = BatchMatMul(matrices, matrices_transposed) + preconditioner inverse = np.linalg.inv(gramian) right_pseudo_inverse = BatchMatMul(matrices_transposed, inverse) return BatchMatMul(right_pseudo_inverse, rhss) class MatrixSolveLsOpTest(test.TestCase): def _verifySolve(self, x, y): for np_type in [np.float32, np.float64]: a = x.astype(np_type) b = y.astype(np_type) np_ans, _, _, _ = np.linalg.lstsq(a, b) for fast in [True, False]: with self.test_session(): tf_ans = linalg_ops.matrix_solve_ls(a, b, fast=fast) ans = tf_ans.eval() self.assertEqual(np_ans.shape, tf_ans.get_shape()) self.assertEqual(np_ans.shape, ans.shape) # Check residual norm. tf_r = b - BatchMatMul(a, ans) tf_r_norm = np.sum(tf_r * tf_r) np_r = b - BatchMatMul(a, np_ans) np_r_norm = np.sum(np_r * np_r) self.assertAllClose(np_r_norm, tf_r_norm) # Check solution. self.assertAllClose(np_ans, ans, atol=1e-5, rtol=1e-5) def _verifySolveBatch(self, x, y): # Since numpy.linalg.lsqr does not support batch solves, as opposed # to numpy.linalg.solve, we just perform this test for a fixed batch size # of 2x3. for np_type in [np.float32, np.float64]: a = np.tile(x.astype(np_type), [2, 3, 1, 1]) b = np.tile(y.astype(np_type), [2, 3, 1, 1]) np_ans = np.empty([2, 3, a.shape[-1], b.shape[-1]]) for dim1 in range(2): for dim2 in range(3): np_ans[dim1, dim2, :, :], _, _, _ = np.linalg.lstsq( a[dim1, dim2, :, :], b[dim1, dim2, :, :]) for fast in [True, False]: with self.test_session(): tf_ans = linalg_ops.matrix_solve_ls(a, b, fast=fast).eval() self.assertEqual(np_ans.shape, tf_ans.shape) # Check residual norm. tf_r = b - BatchMatMul(a, tf_ans) tf_r_norm = np.sum(tf_r * tf_r) np_r = b - BatchMatMul(a, np_ans) np_r_norm = np.sum(np_r * np_r) self.assertAllClose(np_r_norm, tf_r_norm) # Check solution. if fast or a.shape[-2] >= a.shape[-1]: # We skip this test for the underdetermined case when using the # slow path, because Eigen does not return a minimum norm solution. # TODO(rmlarsen): Enable this check for all paths if/when we fix # Eigen's solver. self.assertAllClose(np_ans, tf_ans, atol=1e-5, rtol=1e-5) def _verifyRegularized(self, x, y, l2_regularizer): for np_type in [np.float32, np.float64]: # Test with a single matrix. a = x.astype(np_type) b = y.astype(np_type) np_ans = BatchRegularizedLeastSquares(a, b, l2_regularizer) with self.test_session(): # Test matrix_solve_ls on regular matrices tf_ans = linalg_ops.matrix_solve_ls( a, b, l2_regularizer=l2_regularizer, fast=True).eval() self.assertAllClose(np_ans, tf_ans, atol=1e-5, rtol=1e-5) # Test with a 2x3 batch of matrices. a = np.tile(x.astype(np_type), [2, 3, 1, 1]) b = np.tile(y.astype(np_type), [2, 3, 1, 1]) np_ans = BatchRegularizedLeastSquares(a, b, l2_regularizer) with self.test_session(): tf_ans = linalg_ops.matrix_solve_ls( a, b, l2_regularizer=l2_regularizer, fast=True).eval() self.assertAllClose(np_ans, tf_ans, atol=1e-5, rtol=1e-5) def testSquare(self): # 2x2 matrices, 2x3 right-hand sides. matrix = np.array([[1., 2.], [3., 4.]]) rhs = np.array([[1., 0., 1.], [0., 1., 1.]]) self._verifySolve(matrix, rhs) self._verifySolveBatch(matrix, rhs) self._verifyRegularized(matrix, rhs, l2_regularizer=0.1) def testOverdetermined(self): # 2x2 matrices, 2x3 right-hand sides. matrix = np.array([[1., 2.], [3., 4.], [5., 6.]]) rhs = np.array([[1., 0., 1.], [0., 1., 1.], [1., 1., 0.]]) self._verifySolve(matrix, rhs) self._verifySolveBatch(matrix, rhs) self._verifyRegularized(matrix, rhs, l2_regularizer=0.1) def testUnderdetermined(self): # 2x2 matrices, 2x3 right-hand sides. matrix = np.array([[1., 2., 3], [4., 5., 6.]]) rhs = np.array([[1., 0., 1.], [0., 1., 1.]]) self._verifySolve(matrix, rhs) self._verifySolveBatch(matrix, rhs) self._verifyRegularized(matrix, rhs, l2_regularizer=0.1) def testWrongDimensions(self): # The matrix and right-hand sides should have the same number of rows. with self.test_session(): matrix = constant_op.constant([[1., 0.], [0., 1.]]) rhs = constant_op.constant([[1., 0.]]) with self.assertRaises(ValueError): linalg_ops.matrix_solve_ls(matrix, rhs) def testEmpty(self): full = np.array([[1., 2.], [3., 4.], [5., 6.]]) empty0 = np.empty([3, 0]) empty1 = np.empty([0, 2]) for fast in [True, False]: with self.test_session(): tf_ans = linalg_ops.matrix_solve_ls(empty0, empty0, fast=fast).eval() self.assertEqual(tf_ans.shape, (0, 0)) tf_ans = linalg_ops.matrix_solve_ls(empty0, full, fast=fast).eval() self.assertEqual(tf_ans.shape, (0, 2)) tf_ans = linalg_ops.matrix_solve_ls(full, empty0, fast=fast).eval() self.assertEqual(tf_ans.shape, (2, 0)) tf_ans = linalg_ops.matrix_solve_ls(empty1, empty1, fast=fast).eval() self.assertEqual(tf_ans.shape, (2, 2)) def testBatchResultSize(self): # 3x3x3 matrices, 3x3x1 right-hand sides. matrix = np.array([1., 2., 3., 4., 5., 6., 7., 8., 9.] * 3).reshape(3, 3, 3) rhs = np.array([1., 2., 3.] * 3).reshape(3, 3, 1) answer = linalg_ops.matrix_solve(matrix, rhs) ls_answer = linalg_ops.matrix_solve_ls(matrix, rhs) self.assertEqual(ls_answer.get_shape(), [3, 3, 1]) self.assertEqual(answer.get_shape(), [3, 3, 1]) if __name__ == "__main__": test.main()
apache-2.0
slisson/intellij-community
python/testData/MockSdk2.7/Lib/__future__.py
257
4380
"""Record of phased-in incompatible language changes. Each line is of the form: FeatureName = "_Feature(" OptionalRelease "," MandatoryRelease "," CompilerFlag ")" where, normally, OptionalRelease < MandatoryRelease, and both are 5-tuples of the same form as sys.version_info: (PY_MAJOR_VERSION, # the 2 in 2.1.0a3; an int PY_MINOR_VERSION, # the 1; an int PY_MICRO_VERSION, # the 0; an int PY_RELEASE_LEVEL, # "alpha", "beta", "candidate" or "final"; string PY_RELEASE_SERIAL # the 3; an int ) OptionalRelease records the first release in which from __future__ import FeatureName was accepted. In the case of MandatoryReleases that have not yet occurred, MandatoryRelease predicts the release in which the feature will become part of the language. Else MandatoryRelease records when the feature became part of the language; in releases at or after that, modules no longer need from __future__ import FeatureName to use the feature in question, but may continue to use such imports. MandatoryRelease may also be None, meaning that a planned feature got dropped. Instances of class _Feature have two corresponding methods, .getOptionalRelease() and .getMandatoryRelease(). CompilerFlag is the (bitfield) flag that should be passed in the fourth argument to the builtin function compile() to enable the feature in dynamically compiled code. This flag is stored in the .compiler_flag attribute on _Future instances. These values must match the appropriate #defines of CO_xxx flags in Include/compile.h. No feature line is ever to be deleted from this file. """ all_feature_names = [ "nested_scopes", "generators", "division", "absolute_import", "with_statement", "print_function", "unicode_literals", ] __all__ = ["all_feature_names"] + all_feature_names # The CO_xxx symbols are defined here under the same names used by # compile.h, so that an editor search will find them here. However, # they're not exported in __all__, because they don't really belong to # this module. CO_NESTED = 0x0010 # nested_scopes CO_GENERATOR_ALLOWED = 0 # generators (obsolete, was 0x1000) CO_FUTURE_DIVISION = 0x2000 # division CO_FUTURE_ABSOLUTE_IMPORT = 0x4000 # perform absolute imports by default CO_FUTURE_WITH_STATEMENT = 0x8000 # with statement CO_FUTURE_PRINT_FUNCTION = 0x10000 # print function CO_FUTURE_UNICODE_LITERALS = 0x20000 # unicode string literals class _Feature: def __init__(self, optionalRelease, mandatoryRelease, compiler_flag): self.optional = optionalRelease self.mandatory = mandatoryRelease self.compiler_flag = compiler_flag def getOptionalRelease(self): """Return first release in which this feature was recognized. This is a 5-tuple, of the same form as sys.version_info. """ return self.optional def getMandatoryRelease(self): """Return release in which this feature will become mandatory. This is a 5-tuple, of the same form as sys.version_info, or, if the feature was dropped, is None. """ return self.mandatory def __repr__(self): return "_Feature" + repr((self.optional, self.mandatory, self.compiler_flag)) nested_scopes = _Feature((2, 1, 0, "beta", 1), (2, 2, 0, "alpha", 0), CO_NESTED) generators = _Feature((2, 2, 0, "alpha", 1), (2, 3, 0, "final", 0), CO_GENERATOR_ALLOWED) division = _Feature((2, 2, 0, "alpha", 2), (3, 0, 0, "alpha", 0), CO_FUTURE_DIVISION) absolute_import = _Feature((2, 5, 0, "alpha", 1), (2, 7, 0, "alpha", 0), CO_FUTURE_ABSOLUTE_IMPORT) with_statement = _Feature((2, 5, 0, "alpha", 1), (2, 6, 0, "alpha", 0), CO_FUTURE_WITH_STATEMENT) print_function = _Feature((2, 6, 0, "alpha", 2), (3, 0, 0, "alpha", 0), CO_FUTURE_PRINT_FUNCTION) unicode_literals = _Feature((2, 6, 0, "alpha", 2), (3, 0, 0, "alpha", 0), CO_FUTURE_UNICODE_LITERALS)
apache-2.0
CoCoMol/CoCoPy
modules/experiment/drivers/srsDG645.py
20
1453
#!/usr/bin/python # -*- coding: utf-8 -*- ################################################################################ # # CoCoPy - A python toolkit for rotational spectroscopy # # Copyright (c) 2013 by David Schmitz (david.schmitz@chasquiwan.de). # # Permission is hereby granted, free of charge, to any person obtaining a copy of # this software and associated documentation files (the “Software”), to deal in the # Software without restriction, including without limitation the rights to use, # copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the # Software, and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, # INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A # PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH # THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # # MIT Licence (http://mit-license.org/) # ################################################################################
mit
vicente-gonzalez-ruiz/QSVC
trunk/src/old_py/synthesize_COPIA_SIN_LIST.py
1
4032
#!/usr/bin/python # -*- coding: iso-8859-15 -*- # synthesize.py # # Deshace la transformación temporal. import os import sys from GOP import GOP from subprocess import check_call from subprocess import CalledProcessError from MCTF_parser import MCTF_parser SEARCH_RANGE_MAX = 128 block_overlaping = 0 block_size = 16 border_size = 0 block_size_min = 16 GOPs = 1 pixels_in_x = 352 pixels_in_y = 288 search_range = 4 subpixel_accuracy = 0 TRLs = 5 update_factor = 0 # 1.0/4 search_factor = 2 parser = MCTF_parser(description="Performs the temporal synthesis of a picture sequence.") parser.block_overlaping(block_overlaping) parser.block_size(block_size) parser.block_size_min(block_size_min) parser.border_size(border_size) parser.GOPs(GOPs) parser.pixels_in_x(pixels_in_x) parser.pixels_in_y(pixels_in_y) parser.search_range(search_range) parser.subpixel_accuracy(subpixel_accuracy) parser.TRLs(TRLs) parser.update_factor(update_factor) args = parser.parse_known_args()[0] if args.block_overlaping: block_overlaping = int(args.block_overlaping) if args.block_size: block_size = int(args.block_size) if args.block_size_min: block_size_min = int(args.block_size_min) if args.border_size: border_size = int(args.border_size) if args.GOPs: GOPs = int(args.GOPs) if args.pixels_in_x: pixels_in_x = int(args.pixels_in_x) if args.pixels_in_y: pixels_in_y = int(args.pixels_in_y) if args.search_range: search_range = int(args.search_range) if args.subpixel_accuracy: subpixel_accuracy = int(args.subpixel_accuracy) if args.TRLs: TRLs = int(args.TRLs) if args.update_factor: update_factor = float(args.update_factor) #block_overlaping >>= int(number_of_discarded_spatial_levels) #max_block_size >>= int(number_of_discarded_spatial_levels) #min_block_size >>= int(number_of_discarded_spatial_levels) #pixels_in_x >>= int(number_of_discarded_spatial_levels) #pixels_in_y >>= int(number_of_discarded_spatial_levels) #interpolation_factor += int(number_of_discarded_spatial_levels) #interpolation_factor -= int(number_of_discarded_spatial_levels) gop=GOP() GOP_size = gop.get_size(TRLs) pictures = GOPs * GOP_size + 1 if block_size < block_size_min: block_size_min = block_size _search_range = search_range _pictures = pictures block_size_max = block_size if TRLs>1: temporal_subband = 1 while temporal_subband < (TRLs - 1): search_range = search_range * search_factor if ( search_range > SEARCH_RANGE_MAX ): search_range = SEARCH_RANGE_MAX block_size = block_size/2 if ( block_size < block_size_min ): block_size = block_size_min pictures = (pictures + 1) / 2 temporal_subband += 1 while temporal_subband > 0: try: check_call("mctf synthesize_step" + " --block_overlaping=" + str(block_overlaping) + " --block_size=" + str(block_size) + " --pictures=" + str(pictures) + " --pixels_in_x=" + str(pixels_in_x) + " --pixels_in_y=" + str(pixels_in_y) + " --search_range=" + str(search_range) + " --subpixel_accuracy=" + str(subpixel_accuracy) + " --temporal_subband=" + str(temporal_subband) + " --update_factor=" + str(update_factor), shell=True) except CalledProcessError: sys.exit(-1) temporal_subband -= 1 pictures = _pictures j = 1 block_size = block_size_max search_range = _search_range while j < temporal_subband: search_range = search_range * search_factor if ( search_range > SEARCH_RANGE_MAX ): search_range = SEARCH_RANGE_MAX block_size = block_size/2 if ( block_size < block_size_min ): block_size = block_size_min pictures = ( pictures + 1 ) / 2 j += 1
gpl-2.0
def-/commandergenius
project/jni/python/src/Lib/ctypes/macholib/dyld.py
253
5341
###################################################################### # This file should be kept compatible with Python 2.3, see PEP 291. # ###################################################################### """ dyld emulation """ import os from framework import framework_info from dylib import dylib_info from itertools import * __all__ = [ 'dyld_find', 'framework_find', 'framework_info', 'dylib_info', ] # These are the defaults as per man dyld(1) # DEFAULT_FRAMEWORK_FALLBACK = [ os.path.expanduser("~/Library/Frameworks"), "/Library/Frameworks", "/Network/Library/Frameworks", "/System/Library/Frameworks", ] DEFAULT_LIBRARY_FALLBACK = [ os.path.expanduser("~/lib"), "/usr/local/lib", "/lib", "/usr/lib", ] def ensure_utf8(s): """Not all of PyObjC and Python understand unicode paths very well yet""" if isinstance(s, unicode): return s.encode('utf8') return s def dyld_env(env, var): if env is None: env = os.environ rval = env.get(var) if rval is None: return [] return rval.split(':') def dyld_image_suffix(env=None): if env is None: env = os.environ return env.get('DYLD_IMAGE_SUFFIX') def dyld_framework_path(env=None): return dyld_env(env, 'DYLD_FRAMEWORK_PATH') def dyld_library_path(env=None): return dyld_env(env, 'DYLD_LIBRARY_PATH') def dyld_fallback_framework_path(env=None): return dyld_env(env, 'DYLD_FALLBACK_FRAMEWORK_PATH') def dyld_fallback_library_path(env=None): return dyld_env(env, 'DYLD_FALLBACK_LIBRARY_PATH') def dyld_image_suffix_search(iterator, env=None): """For a potential path iterator, add DYLD_IMAGE_SUFFIX semantics""" suffix = dyld_image_suffix(env) if suffix is None: return iterator def _inject(iterator=iterator, suffix=suffix): for path in iterator: if path.endswith('.dylib'): yield path[:-len('.dylib')] + suffix + '.dylib' else: yield path + suffix yield path return _inject() def dyld_override_search(name, env=None): # If DYLD_FRAMEWORK_PATH is set and this dylib_name is a # framework name, use the first file that exists in the framework # path if any. If there is none go on to search the DYLD_LIBRARY_PATH # if any. framework = framework_info(name) if framework is not None: for path in dyld_framework_path(env): yield os.path.join(path, framework['name']) # If DYLD_LIBRARY_PATH is set then use the first file that exists # in the path. If none use the original name. for path in dyld_library_path(env): yield os.path.join(path, os.path.basename(name)) def dyld_executable_path_search(name, executable_path=None): # If we haven't done any searching and found a library and the # dylib_name starts with "@executable_path/" then construct the # library name. if name.startswith('@executable_path/') and executable_path is not None: yield os.path.join(executable_path, name[len('@executable_path/'):]) def dyld_default_search(name, env=None): yield name framework = framework_info(name) if framework is not None: fallback_framework_path = dyld_fallback_framework_path(env) for path in fallback_framework_path: yield os.path.join(path, framework['name']) fallback_library_path = dyld_fallback_library_path(env) for path in fallback_library_path: yield os.path.join(path, os.path.basename(name)) if framework is not None and not fallback_framework_path: for path in DEFAULT_FRAMEWORK_FALLBACK: yield os.path.join(path, framework['name']) if not fallback_library_path: for path in DEFAULT_LIBRARY_FALLBACK: yield os.path.join(path, os.path.basename(name)) def dyld_find(name, executable_path=None, env=None): """ Find a library or framework using dyld semantics """ name = ensure_utf8(name) executable_path = ensure_utf8(executable_path) for path in dyld_image_suffix_search(chain( dyld_override_search(name, env), dyld_executable_path_search(name, executable_path), dyld_default_search(name, env), ), env): if os.path.isfile(path): return path raise ValueError("dylib %s could not be found" % (name,)) def framework_find(fn, executable_path=None, env=None): """ Find a framework using dyld semantics in a very loose manner. Will take input such as: Python Python.framework Python.framework/Versions/Current """ try: return dyld_find(fn, executable_path=executable_path, env=env) except ValueError, e: pass fmwk_index = fn.rfind('.framework') if fmwk_index == -1: fmwk_index = len(fn) fn += '.framework' fn = os.path.join(fn, os.path.basename(fn[:fmwk_index])) try: return dyld_find(fn, executable_path=executable_path, env=env) except ValueError: raise e def test_dyld_find(): env = {} assert dyld_find('libSystem.dylib') == '/usr/lib/libSystem.dylib' assert dyld_find('System.framework/System') == '/System/Library/Frameworks/System.framework/System' if __name__ == '__main__': test_dyld_find()
lgpl-2.1
cevaris/pants
src/python/pants/goal/aggregated_timings.py
13
1740
# coding=utf-8 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) import os from collections import defaultdict from pants.util.dirutil import safe_mkdir_for class AggregatedTimings(object): """Aggregates timings over multiple invocations of 'similar' work. If filepath is not none, stores the timings in that file. Useful for finding bottlenecks. """ def __init__(self, path=None): # Map path -> timing in seconds (a float) self._timings_by_path = defaultdict(float) self._tool_labels = set() self._path = path safe_mkdir_for(self._path) def add_timing(self, label, secs, is_tool=False): """Aggregate timings by label. secs - a double, so fractional seconds are allowed. is_tool - whether this label represents a tool invocation. """ self._timings_by_path[label] += secs if is_tool: self._tool_labels.add(label) # Check existence in case we're a clean-all. We don't want to write anything in that case. if self._path and os.path.exists(os.path.dirname(self._path)): with open(self._path, 'w') as f: for x in self.get_all(): f.write('{label}: {timing}\n'.format(**x)) def get_all(self): """Returns all the timings, sorted in decreasing order. Each value is a dict: { path: <path>, timing: <timing in seconds> } """ return [{'label': x[0], 'timing': x[1], 'is_tool': x[0] in self._tool_labels} for x in sorted(self._timings_by_path.items(), key=lambda x: x[1], reverse=True)]
apache-2.0
edermartioli/ExoplanetLight
src/test/exoplanetInfo.py
1
2002
#!/opt/anaconda/bin/python # -*- coding: iso-8859-1 -*- """ --> !#/usr/bin/python Created on Nov 22 2016 Description: A module to test the ExoplanetLight libraries @author: Eder Martioli <emartioli@lna.br> Laboratorio Nacional de Astrofisica, Brazil. Simple usage example: ./exoplanetInfo.py --planet="51 Peg b" """ __version__ = "1.0" __copyright__ = """ Copyright (c) ... All rights reserved. """ from optparse import OptionParser import sys sys.path.append("../") from exoplanets import planet parser = OptionParser() parser.add_option("-p", "--planet", dest="planet", help='input planet',type='string',default="") parser.add_option("-v", action="store_true", dest="verbose", help="verbose",default=False) try: options,args = parser.parse_args(sys.argv[1:]) except: print "Error: check usage with test.py -h ";sys.exit(1); if options.verbose: print 'Input planet: ', options.planet exoplanet = planet(options.planet) print "\n== Planet parameters ==" print "mass=", exoplanet.mass, "Mjup" print "msini=", exoplanet.msini, "Mjup" print "radius=", exoplanet.radius, "Rjup" print "semimajoraxis=", exoplanet.semimajoraxis, "AU" print "eccentricity=", exoplanet.eccentricity print "inclination=", exoplanet.inclination, "deg" print "argperiapse=", exoplanet.argperiapse, "deg" print "timeofperipass=", exoplanet.timeofperipass, "BJD" print "orbitalperiod=", exoplanet.orbitalperiod, "days" print "velocitysemiamp=", exoplanet.velocitysemiamp, "m/s" print "---------------------" print "\n== Star parameters ==" print "starmass=", exoplanet.starmass, "Msun" print "starradius=", exoplanet.starradius, "Rsun" print "distance=", exoplanet.distance, "pc" print "starteff=", exoplanet.starteff, "K" print "metallicity=", exoplanet.metallicity, "[Fe/H]" print "starvsini=", exoplanet.starvsini, "km/s" print "starlogg=", exoplanet.starlogg, "cm s-2" print "starvmag=", exoplanet.starvmag, "mag" print "---------------------"
mit
akarki15/mozillians
vendor-local/lib/python/south/orm.py
119
15142
""" South's fake ORM; lets you not have to write SQL inside migrations. Roughly emulates the real Django ORM, to a point. """ from __future__ import print_function import inspect from django.db import models from django.db.models.loading import cache from django.core.exceptions import ImproperlyConfigured from south.db import db from south.utils import ask_for_it_by_name, datetime_utils from south.hacks import hacks from south.exceptions import UnfreezeMeLater, ORMBaseNotIncluded, ImpossibleORMUnfreeze from south.utils.py3 import string_types class ModelsLocals(object): """ Custom dictionary-like class to be locals(); falls back to lowercase search for items that don't exist (because we store model names as lowercase). """ def __init__(self, data): self.data = data def __getitem__(self, key): try: return self.data[key] except KeyError: return self.data[key.lower()] # Stores already-created ORMs. _orm_cache = {} def FakeORM(*args): """ Creates a Fake Django ORM. This is actually a memoised constructor; the real class is _FakeORM. """ if not args in _orm_cache: _orm_cache[args] = _FakeORM(*args) return _orm_cache[args] class LazyFakeORM(object): """ In addition to memoising the ORM call, this function lazily generates them for a Migration class. Assign the result of this to (for example) .orm, and as soon as .orm is accessed the ORM will be created. """ def __init__(self, *args): self._args = args self.orm = None def __get__(self, obj, type=None): if not self.orm: self.orm = FakeORM(*self._args) return self.orm class _FakeORM(object): """ Simulates the Django ORM at some point in time, using a frozen definition on the Migration class. """ def __init__(self, cls, app): self.default_app = app self.cls = cls # Try loading the models off the migration class; default to no models. self.models = {} try: self.models_source = cls.models except AttributeError: return # Start a 'new' AppCache hacks.clear_app_cache() # Now, make each model's data into a FakeModel # We first make entries for each model that are just its name # This allows us to have circular model dependency loops model_names = [] for name, data in self.models_source.items(): # Make sure there's some kind of Meta if "Meta" not in data: data['Meta'] = {} try: app_label, model_name = name.split(".", 1) except ValueError: app_label = self.default_app model_name = name # If there's an object_name in the Meta, use it and remove it if "object_name" in data['Meta']: model_name = data['Meta']['object_name'] del data['Meta']['object_name'] name = "%s.%s" % (app_label, model_name) self.models[name.lower()] = name model_names.append((name.lower(), app_label, model_name, data)) # Loop until model_names is entry, or hasn't shrunk in size since # last iteration. # The make_model method can ask to postpone a model; it's then pushed # to the back of the queue. Because this is currently only used for # inheritance, it should thus theoretically always decrease by one. last_size = None while model_names: # First, make sure we've shrunk. if len(model_names) == last_size: raise ImpossibleORMUnfreeze() last_size = len(model_names) # Make one run through postponed_model_names = [] for name, app_label, model_name, data in model_names: try: self.models[name] = self.make_model(app_label, model_name, data) except UnfreezeMeLater: postponed_model_names.append((name, app_label, model_name, data)) # Reset model_names = postponed_model_names # And perform the second run to iron out any circular/backwards depends. self.retry_failed_fields() # Force evaluation of relations on the models now for model in self.models.values(): model._meta.get_all_field_names() # Reset AppCache hacks.unclear_app_cache() def __iter__(self): return iter(self.models.values()) def __getattr__(self, key): fullname = (self.default_app+"."+key).lower() try: return self.models[fullname] except KeyError: raise AttributeError("The model '%s' from the app '%s' is not available in this migration. (Did you use orm.ModelName, not orm['app.ModelName']?)" % (key, self.default_app)) def __getitem__(self, key): # Detect if they asked for a field on a model or not. if ":" in key: key, fname = key.split(":") else: fname = None # Now, try getting the model key = key.lower() try: model = self.models[key] except KeyError: try: app, model = key.split(".", 1) except ValueError: raise KeyError("The model '%s' is not in appname.modelname format." % key) else: raise KeyError("The model '%s' from the app '%s' is not available in this migration." % (model, app)) # If they asked for a field, get it. if fname: return model._meta.get_field_by_name(fname)[0] else: return model def eval_in_context(self, code, app, extra_imports={}): "Evaluates the given code in the context of the migration file." # Drag in the migration module's locals (hopefully including models.py) # excluding all models from that (i.e. from modern models.py), to stop pollution fake_locals = dict( (key, value) for key, value in inspect.getmodule(self.cls).__dict__.items() if not ( isinstance(value, type) and issubclass(value, models.Model) and hasattr(value, "_meta") ) ) # We add our models into the locals for the eval fake_locals.update(dict([ (name.split(".")[-1], model) for name, model in self.models.items() ])) # Make sure the ones for this app override. fake_locals.update(dict([ (name.split(".")[-1], model) for name, model in self.models.items() if name.split(".")[0] == app ])) # Ourselves as orm, to allow non-fail cross-app referencing fake_locals['orm'] = self # And a fake _ function fake_locals['_'] = lambda x: x # Datetime; there should be no datetime direct accesses fake_locals['datetime'] = datetime_utils # Now, go through the requested imports and import them. for name, value in extra_imports.items(): # First, try getting it out of locals. parts = value.split(".") try: obj = fake_locals[parts[0]] for part in parts[1:]: obj = getattr(obj, part) except (KeyError, AttributeError): pass else: fake_locals[name] = obj continue # OK, try to import it directly try: fake_locals[name] = ask_for_it_by_name(value) except ImportError: if name == "SouthFieldClass": raise ValueError("Cannot import the required field '%s'" % value) else: print("WARNING: Cannot import '%s'" % value) # Use ModelsLocals to make lookups work right for CapitalisedModels fake_locals = ModelsLocals(fake_locals) return eval(code, globals(), fake_locals) def make_meta(self, app, model, data, stub=False): "Makes a Meta class out of a dict of eval-able arguments." results = {'app_label': app} for key, code in data.items(): # Some things we never want to use. if key in ["_bases", "_ormbases"]: continue # Some things we don't want with stubs. if stub and key in ["order_with_respect_to"]: continue # OK, add it. try: results[key] = self.eval_in_context(code, app) except (NameError, AttributeError) as e: raise ValueError("Cannot successfully create meta field '%s' for model '%s.%s': %s." % ( key, app, model, e )) return type("Meta", tuple(), results) def make_model(self, app, name, data): "Makes a Model class out of the given app name, model name and pickled data." # Extract any bases out of Meta if "_ormbases" in data['Meta']: # Make sure everything we depend on is done already; otherwise, wait. for key in data['Meta']['_ormbases']: key = key.lower() if key not in self.models: raise ORMBaseNotIncluded("Cannot find ORM base %s" % key) elif isinstance(self.models[key], string_types): # Then the other model hasn't been unfrozen yet. # We postpone ourselves; the situation will eventually resolve. raise UnfreezeMeLater() bases = [self.models[key.lower()] for key in data['Meta']['_ormbases']] # Perhaps the old style? elif "_bases" in data['Meta']: bases = map(ask_for_it_by_name, data['Meta']['_bases']) # Ah, bog standard, then. else: bases = [models.Model] # Turn the Meta dict into a basic class meta = self.make_meta(app, name, data['Meta'], data.get("_stub", False)) failed_fields = {} fields = {} stub = False # Now, make some fields! for fname, params in data.items(): # If it's the stub marker, ignore it. if fname == "_stub": stub = bool(params) continue elif fname == "Meta": continue elif not params: raise ValueError("Field '%s' on model '%s.%s' has no definition." % (fname, app, name)) elif isinstance(params, string_types): # It's a premade definition string! Let's hope it works... code = params extra_imports = {} else: # If there's only one parameter (backwards compat), make it 3. if len(params) == 1: params = (params[0], [], {}) # There should be 3 parameters. Code is a tuple of (code, what-to-import) if len(params) == 3: code = "SouthFieldClass(%s)" % ", ".join( params[1] + ["%s=%s" % (n, v) for n, v in params[2].items()] ) extra_imports = {"SouthFieldClass": params[0]} else: raise ValueError("Field '%s' on model '%s.%s' has a weird definition length (should be 1 or 3 items)." % (fname, app, name)) try: # Execute it in a probably-correct context. field = self.eval_in_context(code, app, extra_imports) except (NameError, AttributeError, AssertionError, KeyError): # It might rely on other models being around. Add it to the # model for the second pass. failed_fields[fname] = (code, extra_imports) else: fields[fname] = field # Find the app in the Django core, and get its module more_kwds = {} try: app_module = models.get_app(app) more_kwds['__module__'] = app_module.__name__ except ImproperlyConfigured: # The app this belonged to has vanished, but thankfully we can still # make a mock model, so ignore the error. more_kwds['__module__'] = '_south_mock' more_kwds['Meta'] = meta # Make our model fields.update(more_kwds) model = type( str(name), tuple(bases), fields, ) # If this is a stub model, change Objects to a whiny class if stub: model.objects = WhinyManager() # Also, make sure they can't instantiate it model.__init__ = whiny_method else: model.objects = NoDryRunManager(model.objects) if failed_fields: model._failed_fields = failed_fields return model def retry_failed_fields(self): "Tries to re-evaluate the _failed_fields for each model." for modelkey, model in self.models.items(): app, modelname = modelkey.split(".", 1) if hasattr(model, "_failed_fields"): for fname, (code, extra_imports) in model._failed_fields.items(): try: field = self.eval_in_context(code, app, extra_imports) except (NameError, AttributeError, AssertionError, KeyError) as e: # It's failed again. Complain. raise ValueError("Cannot successfully create field '%s' for model '%s': %s." % ( fname, modelname, e )) else: # Startup that field. model.add_to_class(fname, field) class WhinyManager(object): "A fake manager that whines whenever you try to touch it. For stub models." def __getattr__(self, key): raise AttributeError("You cannot use items from a stub model.") class NoDryRunManager(object): """ A manager that always proxies through to the real manager, unless a dry run is in progress. """ def __init__(self, real): self.real = real def __getattr__(self, name): if db.dry_run: raise AttributeError("You are in a dry run, and cannot access the ORM.\nWrap ORM sections in 'if not db.dry_run:', or if the whole migration is only a data migration, set no_dry_run = True on the Migration class.") return getattr(self.real, name) def whiny_method(*a, **kw): raise ValueError("You cannot instantiate a stub model.")
bsd-3-clause
dtysky/Gal2Renpy
Gal2Renpy/Main.py
1
7524
#coding:utf-8 ################################# #Copyright(c) 2014 dtysky ################################# import sys import os import pickle from G2R import * from G2R.ReadBlock import * FileAll=[] Files=[] FS=MyFS() FO=MyFS() US=UserSource('../') UT=UserTag(US) TxtC=TextCreat() SpC=SpCreat() Tmp=TmpC() sys.path.append(US.Args['pathmode']['Gal2RenpyPath']+'Gal2Renpy') """ Files/Dicts prepare begin """ def CheckSpFile(fp): if not os.path.exists(fp): FH=open(fp,'w') pickle.dump({},FH) FH.close() #In test mode, all files will be processed. if os.path.exists('FileHash'): os.remove('FileHash') #Creat all definitions and refresh DictHash #Only a dict had been changed will re-creat it CheckSpFile('DictHash') FH=open('DictHash','r') DictHash=pickle.load(FH) FH.close() DictHash=DefineCreat(US,FO,DictHash) FH=open('DictHash','w') pickle.dump(DictHash,FH) FH.close() #FileList: A dict for storing scenes in all files CheckSpFile('FileHash') CheckSpFile('FileList') FH=open('FileHash','r') FileHash=pickle.load(FH) FH.close() FH=open('FileList','r') FileList=pickle.load(FH) FH.close() #Ensure FileHash and FileList are synchronous if len(FileHash)>len(FileList): for f in FileHash: if f not in FileList: FileList[f]=[] if len(FileHash)<len(FileList): for f in FileList: if f not in FileHash: FileList[f]=0 #Add all '.gal' files for root,dirs,files in os.walk(US.Args['pathmode']['TextPath']): for f in files: if os.path.splitext(f)[1]=='.gal': FileAll.append(root+'/'+f) #Only a file had been changed will process it for f in FileAll: FS.Open(f,'rb') if not FileHash.get(f): Files.append(f) elif FS.hash()!=FileHash[f]: Files.append(f) FS.Close() #Delete all invailed files in FileHash and FileList for f in FileHash: if f not in FileAll: del FileHash[f] del FileList[f] """ Files/Dicts prepare end """ #Format line for using def ChangeSp(Line): rn={'flag':'','attrs1':{},'attrs2':Line['attrs2']} f='' i=0 for flag in Line['flag']: f+='_'+flag rn['attrs1'][f[1:]]=Line['attrs1'][i] i+=1 rn['flag']=f[1:] return rn ScriptPath=US.Args['pathmode']['ScriptPath'] if US.Args['pathmode']['TestMode']: #In test mode, remove start.rpy first if os.path.exists(ScriptPath+'start.rpy'): os.remove(ScriptPath+'start.rpy') if os.path.exists(ScriptPath+'start.rpyc'): os.remove(ScriptPath+'start.rpyc') #Creat the only script in this mode FO.Open(ScriptPath+'test.rpy','w') FO.Write('label start:\n') if US.Args['pathmode']['KeySystem']: FO.Write(" $ InitMyKey()\n") if US.Args['pathmode']['HPCSystem']: FO.Write(' $ HPCMessInit()\n') FO.Write(" $ store.chapter='Chapter.test'\n") #Begin j=0 for fp in Files: FS.Open(fp,'r') FileList[fp]=[] FrameEnd=False TestBegin=False while not FrameEnd: block=ReadBlock(FS) for line in block: #Check whether test had begined if TestBegin: if line['head']=='words': if not Tmp.Args.get('mode'): FS.Error("You must define a mode with 'mode' tag first !") if line['flag']=='text': TxtC.Refresh(line['flag'],Tmp.Args['mode'],line['attrs2']) elif line['flag']=='say': TxtC.Refresh(line['flag'],Tmp.Args['mode'],line['attrs2'],line['attrs1']) elif line['flag']=='think': if not Tmp.Args['view']: FS.Error("You must define a view with 'view' tag first !") TxtC.Refresh(line['flag'],Tmp.Args['mode'],line['attrs2'],Tmp.Args['view']) FO.Write(TxtC.Show(US,FS)) elif line['head']=='sp': for flag in line['flag']: if flag not in US.Keywords: FS.Error("This flag '"+flag+"' does not be supported !") line=ChangeSp(line) SpCNow=SpC[line['flag']] SpCNow.Refresh(line['attrs1'],line['attrs2']) if SpCNow.GetFlag()=='test': TestBegin=False if line['flag']=='sc' and SpCNow.Get()['k']=='Main': SpCNow.Check('sc',SpCNow.Get(),UT,FS) sc=(int(SpCNow.Get()['cp'].replace('Cp','')),int(SpCNow.Get()['sc'].replace('Sc',''))) FileList[fp].append(sc) FO.Write(SpCNow.Show(SpCNow.GetFlag(),SpCNow.Get(),US,UT,Tmp,FS)) elif line['head']=='skip': pass elif line['head']=='end': if Tmp.Args['test']=='Begin': FS.Error('Test mode does not end until this file end !') FrameEnd=True else: if line['head']=='end': FrameEnd=True if line['head']=='sp' and line['flag']==['test']: line=ChangeSp(line) SpC['test'].Refresh(line['attrs1'],line['attrs2']) SpC['test'].Show(SpC['test'].GetFlag(),SpC['test'].Get(),US,UT,Tmp,FS) if 'test' in Tmp.Args and Tmp.Args['test']=='Begin': TestBegin=True FO.Close() else: #In normal mode, remove test.rpy first if os.path.exists(ScriptPath+'test.rpy'): os.remove(ScriptPath+'test.rpy') if os.path.exists(ScriptPath+'test.rpyc'): os.remove(ScriptPath+'test.rpyc') FO.Open(ScriptPath+'start.rpy','w') FO.Write('label start:\n') if US.Args['pathmode']['KeySystem']: FO.Write(" $ InitMyKey()\n") if US.Args['pathmode']['HPCSystem']: FO.Write(' $ HPCMessInit()\n') #Begin for fp in Files: FS.Open(fp,'r') FileList[fp]=[] CanWrite=False FrameEnd=False while not FrameEnd: block=ReadBlock(FS) for line in block: #Check whether a scene had been defined if not CanWrite: sc=ChangeSp(line) SpCNow=SpC['sc'] SpCNow.Refresh(sc['attrs1'],sc['attrs2']) if sc['flag']=='sc': FO.Open(ScriptPath+'test/'+SpCNow.Get()['sc']+SpCNow.Get()['cp']+'.rpy') CanWrite=True else: FS.Error("You must define a scene with 'sc' tag first !") FO.Write(SpCNow.Show(SpCNow.GetFlag(),SpCNow.Get(),US,UT,Tmp,FS)) if line['head']=='words': if not Tmp.Args['mode']: FS.Error("You must define a mode with 'mode' tag first !") if line['flag']=='text': TxtC.Refresh(line['flag'],Tmp.Args['mode'],line['attrs2']) elif line['flag']=='say': TxtC.Refresh(line['flag'],Tmp.Args['mode'],line['attrs2'],line['attrs1']) elif line['flag']=='think': if not Tmp.Args['view']: FS.Error("You must define a view with 'view' tag first !") TxtC.Refresh(line['flag'],Tmp.Args['mode'],line['attrs2'],line['attrs1']) FO.Write(TxtC.Show(US,FS)) elif line['head']=='sp': line=ChangeSp(line) if line['flag'] not in US.Keywords: FS.Error("This flag '"+flag+"' does not be supported !") SpCNow=SpC[line['flag']] SpCNow.Refresh(line['attrs1'],line['attrs2']) if line['flag']=='sc' and SpCNow.Get()['k']=='Main': SpCNow.Check('sc',SpCNow.Get(),UT,FS) sc=(int(SpCNow.Get()['cp'].replace('Cp','')),int(SpCNow.Get()['sc'].replace('Sc',''))) FileList[fp].append(sc) FO.Write(SpCNow.Show(SpCNow.GetFlag(),SpCNow.Get(),US,UT,Tmp,FS)) elif line['head']=='skip': pass elif line['head']=='end': FrameEnd=True FO.Close() FileListAll=[] for l in FileList: FileListAll.extend(l) FileListDict={} for i in range(len(FileListAll)): if FileListAll[i][0] not in FileListDict: FileListDict[FileListAll[i][0]]=[] FileListDict[FileListAll[i][0]].append(FileListAll[i][1]) FO.Open(ScriptPath+'start.rpy') for sc in sorted(FileListDict): for cp in sorted(FileListDict[sc]): sccp='Sc'+str(sc)+'Cp'+str(cp) Fo.write(' call '+sccp+'\n') FO.Close() FH=open('FileList','w') pickle.dump(FileList,FH) FH.close() for f in Files: FS.Open(f,'rb') FileHash[f]=FS.hash() FH=open('FileHash','w') pickle.dump(FileHash,FH) FH.close()
mit
museomix/2013_Quebec_thermoscope
raspberry/pygame-1.9.1release/examples/scrap_clipboard.py
8
2693
#!/usr/bin/env python """ Demonstrates the clipboard capabilities of pygame. """ import os import pygame from pygame.locals import * import pygame.scrap as scrap import StringIO def usage (): print ("Press the 'g' key to get all of the current clipboard data") print ("Press the 'p' key to put a string into the clipboard") print ("Press the 'a' key to get a list of the currently available types") print ("Press the 'i' key to put an image into the clipboard") main_dir = os.path.split(os.path.abspath(__file__))[0] pygame.init () screen = pygame.display.set_mode ((200, 200)) c = pygame.time.Clock () going = True # Initialize the scrap module and use the clipboard mode. scrap.init () scrap.set_mode (SCRAP_CLIPBOARD) usage () while going: for e in pygame.event.get (): if e.type == QUIT or (e.type == KEYDOWN and e.key == K_ESCAPE): going = False elif e.type == KEYDOWN and e.key == K_g: # This means to look for data. print ("Getting the different clipboard data..") for t in scrap.get_types (): r = scrap.get (t) if r and len (r) > 500: print ("Type %s : (large buffer)" % t) else: print ("Type %s : %s" % (t, r)) if "image" in t: namehint = t.split("/")[1] if namehint in ['bmp', 'png', 'jpg']: f = StringIO.StringIO(r) loaded_surf = pygame.image.load(f, "." + namehint) screen.blit(loaded_surf, (0,0)) elif e.type == KEYDOWN and e.key == K_p: # Place some text into the selection. print ("Placing clipboard text.") scrap.put (SCRAP_TEXT, "Hello. This is a message from scrap.") elif e.type == KEYDOWN and e.key == K_a: # Get all available types. print ("Getting the available types from the clipboard.") types = scrap.get_types () print (types) if len (types) > 0: print ("Contains %s: %s" % (types[0], scrap.contains (types[0]))) print ("Contains _INVALID_: ", scrap.contains ("_INVALID_")) elif e.type == KEYDOWN and e.key == K_i: print ("Putting image into the clipboard.") scrap.set_mode (SCRAP_CLIPBOARD) fp = open (os.path.join(main_dir, 'data', 'liquid.bmp'), 'rb') buf = fp.read () scrap.put ("image/bmp", buf) fp.close () elif e.type in (KEYDOWN, MOUSEBUTTONDOWN): usage () pygame.display.flip() c.tick(40)
mit
kaushik94/boto
boto/datapipeline/layer1.py
10
29008
# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # import boto from boto.compat import json from boto.connection import AWSQueryConnection from boto.regioninfo import RegionInfo from boto.exception import JSONResponseError from boto.datapipeline import exceptions class DataPipelineConnection(AWSQueryConnection): """ This is the AWS Data Pipeline API Reference . This guide provides descriptions and samples of the AWS Data Pipeline API. AWS Data Pipeline is a web service that configures and manages a data-driven workflow called a pipeline. AWS Data Pipeline handles the details of scheduling and ensuring that data dependencies are met so your application can focus on processing the data. The AWS Data Pipeline API implements two main sets of functionality. The first set of actions configure the pipeline in the web service. You call these actions to create a pipeline and define data sources, schedules, dependencies, and the transforms to be performed on the data. The second set of actions are used by a task runner application that calls the AWS Data Pipeline API to receive the next task ready for processing. The logic for performing the task, such as querying the data, running data analysis, or converting the data from one format to another, is contained within the task runner. The task runner performs the task assigned to it by the web service, reporting progress to the web service as it does so. When the task is done, the task runner reports the final success or failure of the task to the web service. AWS Data Pipeline provides an open-source implementation of a task runner called AWS Data Pipeline Task Runner. AWS Data Pipeline Task Runner provides logic for common data management scenarios, such as performing database queries and running data analysis using Amazon Elastic MapReduce (Amazon EMR). You can use AWS Data Pipeline Task Runner as your task runner, or you can write your own task runner to provide custom data management. The AWS Data Pipeline API uses the Signature Version 4 protocol for signing requests. For more information about how to sign a request with this protocol, see `Signature Version 4 Signing Process`_. In the code examples in this reference, the Signature Version 4 Request parameters are represented as AuthParams. """ APIVersion = "2012-10-29" DefaultRegionName = "us-east-1" DefaultRegionEndpoint = "datapipeline.us-east-1.amazonaws.com" ServiceName = "DataPipeline" TargetPrefix = "DataPipeline" ResponseError = JSONResponseError _faults = { "PipelineDeletedException": exceptions.PipelineDeletedException, "InvalidRequestException": exceptions.InvalidRequestException, "TaskNotFoundException": exceptions.TaskNotFoundException, "PipelineNotFoundException": exceptions.PipelineNotFoundException, "InternalServiceError": exceptions.InternalServiceError, } def __init__(self, **kwargs): region = kwargs.pop('region', None) if not region: region = RegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint) kwargs['host'] = region.endpoint super(DataPipelineConnection, self).__init__(**kwargs) self.region = region def _required_auth_capability(self): return ['hmac-v4'] def activate_pipeline(self, pipeline_id): """ Validates a pipeline and initiates processing. If the pipeline does not pass validation, activation fails. Call this action to start processing pipeline tasks of a pipeline you've created using the CreatePipeline and PutPipelineDefinition actions. A pipeline cannot be modified after it has been successfully activated. :type pipeline_id: string :param pipeline_id: The identifier of the pipeline to activate. """ params = {'pipelineId': pipeline_id, } return self.make_request(action='ActivatePipeline', body=json.dumps(params)) def create_pipeline(self, name, unique_id, description=None): """ Creates a new empty pipeline. When this action succeeds, you can then use the PutPipelineDefinition action to populate the pipeline. :type name: string :param name: The name of the new pipeline. You can use the same name for multiple pipelines associated with your AWS account, because AWS Data Pipeline assigns each new pipeline a unique pipeline identifier. :type unique_id: string :param unique_id: A unique identifier that you specify. This identifier is not the same as the pipeline identifier assigned by AWS Data Pipeline. You are responsible for defining the format and ensuring the uniqueness of this identifier. You use this parameter to ensure idempotency during repeated calls to CreatePipeline. For example, if the first call to CreatePipeline does not return a clear success, you can pass in the same unique identifier and pipeline name combination on a subsequent call to CreatePipeline. CreatePipeline ensures that if a pipeline already exists with the same name and unique identifier, a new pipeline will not be created. Instead, you'll receive the pipeline identifier from the previous attempt. The uniqueness of the name and unique identifier combination is scoped to the AWS account or IAM user credentials. :type description: string :param description: The description of the new pipeline. """ params = {'name': name, 'uniqueId': unique_id, } if description is not None: params['description'] = description return self.make_request(action='CreatePipeline', body=json.dumps(params)) def delete_pipeline(self, pipeline_id): """ Permanently deletes a pipeline, its pipeline definition and its run history. You cannot query or restore a deleted pipeline. AWS Data Pipeline will attempt to cancel instances associated with the pipeline that are currently being processed by task runners. Deleting a pipeline cannot be undone. To temporarily pause a pipeline instead of deleting it, call SetStatus with the status set to Pause on individual components. Components that are paused by SetStatus can be resumed. :type pipeline_id: string :param pipeline_id: The identifier of the pipeline to be deleted. """ params = {'pipelineId': pipeline_id, } return self.make_request(action='DeletePipeline', body=json.dumps(params)) def describe_objects(self, object_ids, pipeline_id, marker=None, evaluate_expressions=None): """ Returns the object definitions for a set of objects associated with the pipeline. Object definitions are composed of a set of fields that define the properties of the object. :type pipeline_id: string :param pipeline_id: Identifier of the pipeline that contains the object definitions. :type object_ids: list :param object_ids: Identifiers of the pipeline objects that contain the definitions to be described. You can pass as many as 25 identifiers in a single call to DescribeObjects. :type evaluate_expressions: boolean :param evaluate_expressions: Indicates whether any expressions in the object should be evaluated when the object descriptions are returned. :type marker: string :param marker: The starting point for the results to be returned. The first time you call DescribeObjects, this value should be empty. As long as the action returns `HasMoreResults` as `True`, you can call DescribeObjects again and pass the marker value from the response to retrieve the next set of results. """ params = { 'pipelineId': pipeline_id, 'objectIds': object_ids, } if evaluate_expressions is not None: params['evaluateExpressions'] = evaluate_expressions if marker is not None: params['marker'] = marker return self.make_request(action='DescribeObjects', body=json.dumps(params)) def describe_pipelines(self, pipeline_ids): """ Retrieve metadata about one or more pipelines. The information retrieved includes the name of the pipeline, the pipeline identifier, its current state, and the user account that owns the pipeline. Using account credentials, you can retrieve metadata about pipelines that you or your IAM users have created. If you are using an IAM user account, you can retrieve metadata about only those pipelines you have read permission for. To retrieve the full pipeline definition instead of metadata about the pipeline, call the GetPipelineDefinition action. :type pipeline_ids: list :param pipeline_ids: Identifiers of the pipelines to describe. You can pass as many as 25 identifiers in a single call to DescribePipelines. You can obtain pipeline identifiers by calling ListPipelines. """ params = {'pipelineIds': pipeline_ids, } return self.make_request(action='DescribePipelines', body=json.dumps(params)) def evaluate_expression(self, pipeline_id, expression, object_id): """ Evaluates a string in the context of a specified object. A task runner can use this action to evaluate SQL queries stored in Amazon S3. :type pipeline_id: string :param pipeline_id: The identifier of the pipeline. :type object_id: string :param object_id: The identifier of the object. :type expression: string :param expression: The expression to evaluate. """ params = { 'pipelineId': pipeline_id, 'objectId': object_id, 'expression': expression, } return self.make_request(action='EvaluateExpression', body=json.dumps(params)) def get_pipeline_definition(self, pipeline_id, version=None): """ Returns the definition of the specified pipeline. You can call GetPipelineDefinition to retrieve the pipeline definition you provided using PutPipelineDefinition. :type pipeline_id: string :param pipeline_id: The identifier of the pipeline. :type version: string :param version: The version of the pipeline definition to retrieve. This parameter accepts the values `latest` (default) and `active`. Where `latest` indicates the last definition saved to the pipeline and `active` indicates the last definition of the pipeline that was activated. """ params = {'pipelineId': pipeline_id, } if version is not None: params['version'] = version return self.make_request(action='GetPipelineDefinition', body=json.dumps(params)) def list_pipelines(self, marker=None): """ Returns a list of pipeline identifiers for all active pipelines. Identifiers are returned only for pipelines you have permission to access. :type marker: string :param marker: The starting point for the results to be returned. The first time you call ListPipelines, this value should be empty. As long as the action returns `HasMoreResults` as `True`, you can call ListPipelines again and pass the marker value from the response to retrieve the next set of results. """ params = {} if marker is not None: params['marker'] = marker return self.make_request(action='ListPipelines', body=json.dumps(params)) def poll_for_task(self, worker_group, hostname=None, instance_identity=None): """ Task runners call this action to receive a task to perform from AWS Data Pipeline. The task runner specifies which tasks it can perform by setting a value for the workerGroup parameter of the PollForTask call. The task returned by PollForTask may come from any of the pipelines that match the workerGroup value passed in by the task runner and that was launched using the IAM user credentials specified by the task runner. If tasks are ready in the work queue, PollForTask returns a response immediately. If no tasks are available in the queue, PollForTask uses long-polling and holds on to a poll connection for up to a 90 seconds during which time the first newly scheduled task is handed to the task runner. To accomodate this, set the socket timeout in your task runner to 90 seconds. The task runner should not call PollForTask again on the same `workerGroup` until it receives a response, and this may take up to 90 seconds. :type worker_group: string :param worker_group: Indicates the type of task the task runner is configured to accept and process. The worker group is set as a field on objects in the pipeline when they are created. You can only specify a single value for `workerGroup` in the call to PollForTask. There are no wildcard values permitted in `workerGroup`, the string must be an exact, case-sensitive, match. :type hostname: string :param hostname: The public DNS name of the calling task runner. :type instance_identity: dict :param instance_identity: Identity information for the Amazon EC2 instance that is hosting the task runner. You can get this value by calling the URI, `http://169.254.169.254/latest/meta-data/instance- id`, from the EC2 instance. For more information, go to `Instance Metadata`_ in the Amazon Elastic Compute Cloud User Guide. Passing in this value proves that your task runner is running on an EC2 instance, and ensures the proper AWS Data Pipeline service charges are applied to your pipeline. """ params = {'workerGroup': worker_group, } if hostname is not None: params['hostname'] = hostname if instance_identity is not None: params['instanceIdentity'] = instance_identity return self.make_request(action='PollForTask', body=json.dumps(params)) def put_pipeline_definition(self, pipeline_objects, pipeline_id): """ Adds tasks, schedules, and preconditions that control the behavior of the pipeline. You can use PutPipelineDefinition to populate a new pipeline or to update an existing pipeline that has not yet been activated. PutPipelineDefinition also validates the configuration as it adds it to the pipeline. Changes to the pipeline are saved unless one of the following three validation errors exists in the pipeline. #. An object is missing a name or identifier field. #. A string or reference field is empty. #. The number of objects in the pipeline exceeds the maximum allowed objects. Pipeline object definitions are passed to the PutPipelineDefinition action and returned by the GetPipelineDefinition action. :type pipeline_id: string :param pipeline_id: The identifier of the pipeline to be configured. :type pipeline_objects: list :param pipeline_objects: The objects that define the pipeline. These will overwrite the existing pipeline definition. """ params = { 'pipelineId': pipeline_id, 'pipelineObjects': pipeline_objects, } return self.make_request(action='PutPipelineDefinition', body=json.dumps(params)) def query_objects(self, pipeline_id, sphere, marker=None, query=None, limit=None): """ Queries a pipeline for the names of objects that match a specified set of conditions. The objects returned by QueryObjects are paginated and then filtered by the value you set for query. This means the action may return an empty result set with a value set for marker. If `HasMoreResults` is set to `True`, you should continue to call QueryObjects, passing in the returned value for marker, until `HasMoreResults` returns `False`. :type pipeline_id: string :param pipeline_id: Identifier of the pipeline to be queried for object names. :type query: dict :param query: Query that defines the objects to be returned. The Query object can contain a maximum of ten selectors. The conditions in the query are limited to top-level String fields in the object. These filters can be applied to components, instances, and attempts. :type sphere: string :param sphere: Specifies whether the query applies to components or instances. Allowable values: `COMPONENT`, `INSTANCE`, `ATTEMPT`. :type marker: string :param marker: The starting point for the results to be returned. The first time you call QueryObjects, this value should be empty. As long as the action returns `HasMoreResults` as `True`, you can call QueryObjects again and pass the marker value from the response to retrieve the next set of results. :type limit: integer :param limit: Specifies the maximum number of object names that QueryObjects will return in a single call. The default value is 100. """ params = {'pipelineId': pipeline_id, 'sphere': sphere, } if query is not None: params['query'] = query if marker is not None: params['marker'] = marker if limit is not None: params['limit'] = limit return self.make_request(action='QueryObjects', body=json.dumps(params)) def report_task_progress(self, task_id): """ Updates the AWS Data Pipeline service on the progress of the calling task runner. When the task runner is assigned a task, it should call ReportTaskProgress to acknowledge that it has the task within 2 minutes. If the web service does not recieve this acknowledgement within the 2 minute window, it will assign the task in a subsequent PollForTask call. After this initial acknowledgement, the task runner only needs to report progress every 15 minutes to maintain its ownership of the task. You can change this reporting time from 15 minutes by specifying a `reportProgressTimeout` field in your pipeline. If a task runner does not report its status after 5 minutes, AWS Data Pipeline will assume that the task runner is unable to process the task and will reassign the task in a subsequent response to PollForTask. task runners should call ReportTaskProgress every 60 seconds. :type task_id: string :param task_id: Identifier of the task assigned to the task runner. This value is provided in the TaskObject that the service returns with the response for the PollForTask action. """ params = {'taskId': task_id, } return self.make_request(action='ReportTaskProgress', body=json.dumps(params)) def report_task_runner_heartbeat(self, taskrunner_id, worker_group=None, hostname=None): """ Task runners call ReportTaskRunnerHeartbeat every 15 minutes to indicate that they are operational. In the case of AWS Data Pipeline Task Runner launched on a resource managed by AWS Data Pipeline, the web service can use this call to detect when the task runner application has failed and restart a new instance. :type taskrunner_id: string :param taskrunner_id: The identifier of the task runner. This value should be unique across your AWS account. In the case of AWS Data Pipeline Task Runner launched on a resource managed by AWS Data Pipeline, the web service provides a unique identifier when it launches the application. If you have written a custom task runner, you should assign a unique identifier for the task runner. :type worker_group: string :param worker_group: Indicates the type of task the task runner is configured to accept and process. The worker group is set as a field on objects in the pipeline when they are created. You can only specify a single value for `workerGroup` in the call to ReportTaskRunnerHeartbeat. There are no wildcard values permitted in `workerGroup`, the string must be an exact, case-sensitive, match. :type hostname: string :param hostname: The public DNS name of the calling task runner. """ params = {'taskrunnerId': taskrunner_id, } if worker_group is not None: params['workerGroup'] = worker_group if hostname is not None: params['hostname'] = hostname return self.make_request(action='ReportTaskRunnerHeartbeat', body=json.dumps(params)) def set_status(self, object_ids, status, pipeline_id): """ Requests that the status of an array of physical or logical pipeline objects be updated in the pipeline. This update may not occur immediately, but is eventually consistent. The status that can be set depends on the type of object. :type pipeline_id: string :param pipeline_id: Identifies the pipeline that contains the objects. :type object_ids: list :param object_ids: Identifies an array of objects. The corresponding objects can be either physical or components, but not a mix of both types. :type status: string :param status: Specifies the status to be set on all the objects in `objectIds`. For components, this can be either `PAUSE` or `RESUME`. For instances, this can be either `CANCEL`, `RERUN`, or `MARK_FINISHED`. """ params = { 'pipelineId': pipeline_id, 'objectIds': object_ids, 'status': status, } return self.make_request(action='SetStatus', body=json.dumps(params)) def set_task_status(self, task_id, task_status, error_id=None, error_message=None, error_stack_trace=None): """ Notifies AWS Data Pipeline that a task is completed and provides information about the final status. The task runner calls this action regardless of whether the task was sucessful. The task runner does not need to call SetTaskStatus for tasks that are canceled by the web service during a call to ReportTaskProgress. :type task_id: string :param task_id: Identifies the task assigned to the task runner. This value is set in the TaskObject that is returned by the PollForTask action. :type task_status: string :param task_status: If `FINISHED`, the task successfully completed. If `FAILED` the task ended unsuccessfully. The `FALSE` value is used by preconditions. :type error_id: string :param error_id: If an error occurred during the task, this value specifies an id value that represents the error. This value is set on the physical attempt object. It is used to display error information to the user. It should not start with string "Service_" which is reserved by the system. :type error_message: string :param error_message: If an error occurred during the task, this value specifies a text description of the error. This value is set on the physical attempt object. It is used to display error information to the user. The web service does not parse this value. :type error_stack_trace: string :param error_stack_trace: If an error occurred during the task, this value specifies the stack trace associated with the error. This value is set on the physical attempt object. It is used to display error information to the user. The web service does not parse this value. """ params = {'taskId': task_id, 'taskStatus': task_status, } if error_id is not None: params['errorId'] = error_id if error_message is not None: params['errorMessage'] = error_message if error_stack_trace is not None: params['errorStackTrace'] = error_stack_trace return self.make_request(action='SetTaskStatus', body=json.dumps(params)) def validate_pipeline_definition(self, pipeline_objects, pipeline_id): """ Tests the pipeline definition with a set of validation checks to ensure that it is well formed and can run without error. :type pipeline_id: string :param pipeline_id: Identifies the pipeline whose definition is to be validated. :type pipeline_objects: list :param pipeline_objects: A list of objects that define the pipeline changes to validate against the pipeline. """ params = { 'pipelineId': pipeline_id, 'pipelineObjects': pipeline_objects, } return self.make_request(action='ValidatePipelineDefinition', body=json.dumps(params)) def make_request(self, action, body): headers = { 'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action), 'Host': self.region.endpoint, 'Content-Type': 'application/x-amz-json-1.1', 'Content-Length': str(len(body)), } http_request = self.build_base_http_request( method='POST', path='/', auth_path='/', params={}, headers=headers, data=body) response = self._mexe(http_request, sender=None, override_num_retries=10) response_body = response.read() boto.log.debug(response_body) if response.status == 200: if response_body: return json.loads(response_body) else: json_body = json.loads(response_body) fault_name = json_body.get('__type', None) exception_class = self._faults.get(fault_name, self.ResponseError) raise exception_class(response.status, response.reason, body=json_body)
mit
grap/OpenUpgrade
addons/account_budget/wizard/__init__.py
444
1196
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import account_budget_crossovered_report import account_budget_analytic import account_budget_crossovered_summary_report import account_budget_report # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
IljaGrebel/OpenWrt-SDK-imx6_HummingBoard
staging_dir/host/lib/scons-2.3.5/SCons/Platform/win32.py
1
15007
"""SCons.Platform.win32 Platform-specific initialization for Win32 systems. There normally shouldn't be any need to import this module directly. It will usually be imported through the generic SCons.Platform.Platform() selection method. """ # # Copyright (c) 2001 - 2015 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "src/engine/SCons/Platform/win32.py rel_2.3.5:3329:275e75118ad4 2015/06/20 11:18:26 bdbaddog" import os import os.path import sys import tempfile from SCons.Platform.posix import exitvalmap from SCons.Platform import TempFileMunge import SCons.Util try: import msvcrt import win32api import win32con msvcrt.get_osfhandle win32api.SetHandleInformation win32con.HANDLE_FLAG_INHERIT except ImportError: parallel_msg = \ "you do not seem to have the pywin32 extensions installed;\n" + \ "\tparallel (-j) builds may not work reliably with open Python files." except AttributeError: parallel_msg = \ "your pywin32 extensions do not support file handle operations;\n" + \ "\tparallel (-j) builds may not work reliably with open Python files." else: parallel_msg = None import builtins _builtin_file = builtins.file _builtin_open = builtins.open class _scons_file(_builtin_file): def __init__(self, *args, **kw): _builtin_file.__init__(self, *args, **kw) win32api.SetHandleInformation(msvcrt.get_osfhandle(self.fileno()), win32con.HANDLE_FLAG_INHERIT, 0) def _scons_open(*args, **kw): fp = _builtin_open(*args, **kw) win32api.SetHandleInformation(msvcrt.get_osfhandle(fp.fileno()), win32con.HANDLE_FLAG_INHERIT, 0) return fp builtins.file = _scons_file builtins.open = _scons_open try: import threading spawn_lock = threading.Lock() # This locked version of spawnve works around a Windows # MSVCRT bug, because its spawnve is not thread-safe. # Without this, python can randomly crash while using -jN. # See the python bug at http://bugs.python.org/issue6476 # and SCons issue at # http://scons.tigris.org/issues/show_bug.cgi?id=2449 def spawnve(mode, file, args, env): spawn_lock.acquire() try: if mode == os.P_WAIT: ret = os.spawnve(os.P_NOWAIT, file, args, env) else: ret = os.spawnve(mode, file, args, env) finally: spawn_lock.release() if mode == os.P_WAIT: pid, status = os.waitpid(ret, 0) ret = status >> 8 return ret except ImportError: # Use the unsafe method of spawnve. # Please, don't try to optimize this try-except block # away by assuming that the threading module is always present. # In the test test/option-j.py we intentionally call SCons with # a fake threading.py that raises an import exception right away, # simulating a non-existent package. def spawnve(mode, file, args, env): return os.spawnve(mode, file, args, env) # The upshot of all this is that, if you are using Python 1.5.2, # you had better have cmd or command.com in your PATH when you run # scons. def piped_spawn(sh, escape, cmd, args, env, stdout, stderr): # There is no direct way to do that in python. What we do # here should work for most cases: # In case stdout (stderr) is not redirected to a file, # we redirect it into a temporary file tmpFileStdout # (tmpFileStderr) and copy the contents of this file # to stdout (stderr) given in the argument if not sh: sys.stderr.write("scons: Could not find command interpreter, is it in your PATH?\n") return 127 else: # one temporary file for stdout and stderr tmpFileStdout = os.path.normpath(tempfile.mktemp()) tmpFileStderr = os.path.normpath(tempfile.mktemp()) # check if output is redirected stdoutRedirected = 0 stderrRedirected = 0 for arg in args: # are there more possibilities to redirect stdout ? if (arg.find( ">", 0, 1 ) != -1 or arg.find( "1>", 0, 2 ) != -1): stdoutRedirected = 1 # are there more possibilities to redirect stderr ? if arg.find( "2>", 0, 2 ) != -1: stderrRedirected = 1 # redirect output of non-redirected streams to our tempfiles if stdoutRedirected == 0: args.append(">" + str(tmpFileStdout)) if stderrRedirected == 0: args.append("2>" + str(tmpFileStderr)) # actually do the spawn try: args = [sh, '/C', escape(' '.join(args)) ] ret = spawnve(os.P_WAIT, sh, args, env) except OSError, e: # catch any error try: ret = exitvalmap[e[0]] except KeyError: sys.stderr.write("scons: unknown OSError exception code %d - %s: %s\n" % (e[0], cmd, e[1])) if stderr is not None: stderr.write("scons: %s: %s\n" % (cmd, e[1])) # copy child output from tempfiles to our streams # and do clean up stuff if stdout is not None and stdoutRedirected == 0: try: stdout.write(open( tmpFileStdout, "r" ).read()) os.remove( tmpFileStdout ) except (IOError, OSError): pass if stderr is not None and stderrRedirected == 0: try: stderr.write(open( tmpFileStderr, "r" ).read()) os.remove( tmpFileStderr ) except (IOError, OSError): pass return ret def exec_spawn(l, env): try: result = spawnve(os.P_WAIT, l[0], l, env) except OSError, e: try: result = exitvalmap[e[0]] sys.stderr.write("scons: %s: %s\n" % (l[0], e[1])) except KeyError: result = 127 if len(l) > 2: if len(l[2]) < 1000: command = ' '.join(l[0:3]) else: command = l[0] else: command = l[0] sys.stderr.write("scons: unknown OSError exception code %d - '%s': %s\n" % (e[0], command, e[1])) return result def spawn(sh, escape, cmd, args, env): if not sh: sys.stderr.write("scons: Could not find command interpreter, is it in your PATH?\n") return 127 return exec_spawn([sh, '/C', escape(' '.join(args))], env) # Windows does not allow special characters in file names anyway, so no # need for a complex escape function, we will just quote the arg, except # that "cmd /c" requires that if an argument ends with a backslash it # needs to be escaped so as not to interfere with closing double quote # that we add. def escape(x): if x[-1] == '\\': x = x + '\\' return '"' + x + '"' # Get the windows system directory name _system_root = None def get_system_root(): global _system_root if _system_root is not None: return _system_root # A resonable default if we can't read the registry val = os.environ.get('SystemRoot', "C:\\WINDOWS") if SCons.Util.can_read_reg: try: # Look for Windows NT system root k=SCons.Util.RegOpenKeyEx(SCons.Util.hkey_mod.HKEY_LOCAL_MACHINE, 'Software\\Microsoft\\Windows NT\\CurrentVersion') val, tok = SCons.Util.RegQueryValueEx(k, 'SystemRoot') except SCons.Util.RegError: try: # Okay, try the Windows 9x system root k=SCons.Util.RegOpenKeyEx(SCons.Util.hkey_mod.HKEY_LOCAL_MACHINE, 'Software\\Microsoft\\Windows\\CurrentVersion') val, tok = SCons.Util.RegQueryValueEx(k, 'SystemRoot') except KeyboardInterrupt: raise except: pass _system_root = val return val # Get the location of the program files directory def get_program_files_dir(): # Now see if we can look in the registry... val = '' if SCons.Util.can_read_reg: try: # Look for Windows Program Files directory k=SCons.Util.RegOpenKeyEx(SCons.Util.hkey_mod.HKEY_LOCAL_MACHINE, 'Software\\Microsoft\\Windows\\CurrentVersion') val, tok = SCons.Util.RegQueryValueEx(k, 'ProgramFilesDir') except SCons.Util.RegError: val = '' pass if val == '': # A reasonable default if we can't read the registry # (Actually, it's pretty reasonable even if we can :-) val = os.path.join(os.path.dirname(get_system_root()),"Program Files") return val # Determine which windows CPU were running on. class ArchDefinition(object): """ A class for defining architecture-specific settings and logic. """ def __init__(self, arch, synonyms=[]): self.arch = arch self.synonyms = synonyms SupportedArchitectureList = [ ArchDefinition( 'x86', ['i386', 'i486', 'i586', 'i686'], ), ArchDefinition( 'x86_64', ['AMD64', 'amd64', 'em64t', 'EM64T', 'x86_64'], ), ArchDefinition( 'ia64', ['IA64'], ), ] SupportedArchitectureMap = {} for a in SupportedArchitectureList: SupportedArchitectureMap[a.arch] = a for s in a.synonyms: SupportedArchitectureMap[s] = a def get_architecture(arch=None): """Returns the definition for the specified architecture string. If no string is specified, the system default is returned (as defined by the PROCESSOR_ARCHITEW6432 or PROCESSOR_ARCHITECTURE environment variables). """ if arch is None: arch = os.environ.get('PROCESSOR_ARCHITEW6432') if not arch: arch = os.environ.get('PROCESSOR_ARCHITECTURE') return SupportedArchitectureMap.get(arch, ArchDefinition('', [''])) def generate(env): # Attempt to find cmd.exe (for WinNT/2k/XP) or # command.com for Win9x cmd_interp = '' # First see if we can look in the registry... if SCons.Util.can_read_reg: try: # Look for Windows NT system root k=SCons.Util.RegOpenKeyEx(SCons.Util.hkey_mod.HKEY_LOCAL_MACHINE, 'Software\\Microsoft\\Windows NT\\CurrentVersion') val, tok = SCons.Util.RegQueryValueEx(k, 'SystemRoot') cmd_interp = os.path.join(val, 'System32\\cmd.exe') except SCons.Util.RegError: try: # Okay, try the Windows 9x system root k=SCons.Util.RegOpenKeyEx(SCons.Util.hkey_mod.HKEY_LOCAL_MACHINE, 'Software\\Microsoft\\Windows\\CurrentVersion') val, tok = SCons.Util.RegQueryValueEx(k, 'SystemRoot') cmd_interp = os.path.join(val, 'command.com') except KeyboardInterrupt: raise except: pass # For the special case of not having access to the registry, we # use a temporary path and pathext to attempt to find the command # interpreter. If we fail, we try to find the interpreter through # the env's PATH. The problem with that is that it might not # contain an ENV and a PATH. if not cmd_interp: systemroot = get_system_root() tmp_path = systemroot + os.pathsep + \ os.path.join(systemroot,'System32') tmp_pathext = '.com;.exe;.bat;.cmd' if 'PATHEXT' in os.environ: tmp_pathext = os.environ['PATHEXT'] cmd_interp = SCons.Util.WhereIs('cmd', tmp_path, tmp_pathext) if not cmd_interp: cmd_interp = SCons.Util.WhereIs('command', tmp_path, tmp_pathext) if not cmd_interp: cmd_interp = env.Detect('cmd') if not cmd_interp: cmd_interp = env.Detect('command') if 'ENV' not in env: env['ENV'] = {} # Import things from the external environment to the construction # environment's ENV. This is a potential slippery slope, because we # *don't* want to make builds dependent on the user's environment by # default. We're doing this for SystemRoot, though, because it's # needed for anything that uses sockets, and seldom changes, and # for SystemDrive because it's related. # # Weigh the impact carefully before adding other variables to this list. import_env = [ 'SystemDrive', 'SystemRoot', 'TEMP', 'TMP' ] for var in import_env: v = os.environ.get(var) if v: env['ENV'][var] = v if 'COMSPEC' not in env['ENV']: v = os.environ.get("COMSPEC") if v: env['ENV']['COMSPEC'] = v env.AppendENVPath('PATH', get_system_root() + '\System32') env['ENV']['PATHEXT'] = '.COM;.EXE;.BAT;.CMD' env['OBJPREFIX'] = '' env['OBJSUFFIX'] = '.obj' env['SHOBJPREFIX'] = '$OBJPREFIX' env['SHOBJSUFFIX'] = '$OBJSUFFIX' env['PROGPREFIX'] = '' env['PROGSUFFIX'] = '.exe' env['LIBPREFIX'] = '' env['LIBSUFFIX'] = '.lib' env['SHLIBPREFIX'] = '' env['SHLIBSUFFIX'] = '.dll' env['LIBPREFIXES'] = [ '$LIBPREFIX' ] env['LIBSUFFIXES'] = [ '$LIBSUFFIX' ] env['PSPAWN'] = piped_spawn env['SPAWN'] = spawn env['SHELL'] = cmd_interp env['TEMPFILE'] = TempFileMunge env['TEMPFILEPREFIX'] = '@' env['MAXLINELENGTH'] = 2048 env['ESCAPE'] = escape env['HOST_OS'] = 'win32' env['HOST_ARCH'] = get_architecture().arch # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
gpl-2.0
deadRaccoons/MameAirlines
tabo/cherrypy/cherrypy/_cperror.py
57
22680
"""Exception classes for CherryPy. CherryPy provides (and uses) exceptions for declaring that the HTTP response should be a status other than the default "200 OK". You can ``raise`` them like normal Python exceptions. You can also call them and they will raise themselves; this means you can set an :class:`HTTPError<cherrypy._cperror.HTTPError>` or :class:`HTTPRedirect<cherrypy._cperror.HTTPRedirect>` as the :attr:`request.handler<cherrypy._cprequest.Request.handler>`. .. _redirectingpost: Redirecting POST ================ When you GET a resource and are redirected by the server to another Location, there's generally no problem since GET is both a "safe method" (there should be no side-effects) and an "idempotent method" (multiple calls are no different than a single call). POST, however, is neither safe nor idempotent--if you charge a credit card, you don't want to be charged twice by a redirect! For this reason, *none* of the 3xx responses permit a user-agent (browser) to resubmit a POST on redirection without first confirming the action with the user: ===== ================================= =========== 300 Multiple Choices Confirm with the user 301 Moved Permanently Confirm with the user 302 Found (Object moved temporarily) Confirm with the user 303 See Other GET the new URI--no confirmation 304 Not modified (for conditional GET only--POST should not raise this error) 305 Use Proxy Confirm with the user 307 Temporary Redirect Confirm with the user ===== ================================= =========== However, browsers have historically implemented these restrictions poorly; in particular, many browsers do not force the user to confirm 301, 302 or 307 when redirecting POST. For this reason, CherryPy defaults to 303, which most user-agents appear to have implemented correctly. Therefore, if you raise HTTPRedirect for a POST request, the user-agent will most likely attempt to GET the new URI (without asking for confirmation from the user). We realize this is confusing for developers, but it's the safest thing we could do. You are of course free to raise ``HTTPRedirect(uri, status=302)`` or any other 3xx status if you know what you're doing, but given the environment, we couldn't let any of those be the default. Custom Error Handling ===================== .. image:: /refman/cperrors.gif Anticipated HTTP responses -------------------------- The 'error_page' config namespace can be used to provide custom HTML output for expected responses (like 404 Not Found). Supply a filename from which the output will be read. The contents will be interpolated with the values %(status)s, %(message)s, %(traceback)s, and %(version)s using plain old Python `string formatting <http://docs.python.org/2/library/stdtypes.html#string-formatting-operations>`_. :: _cp_config = { 'error_page.404': os.path.join(localDir, "static/index.html") } Beginning in version 3.1, you may also provide a function or other callable as an error_page entry. It will be passed the same status, message, traceback and version arguments that are interpolated into templates:: def error_page_402(status, message, traceback, version): return "Error %s - Well, I'm very sorry but you haven't paid!" % status cherrypy.config.update({'error_page.402': error_page_402}) Also in 3.1, in addition to the numbered error codes, you may also supply "error_page.default" to handle all codes which do not have their own error_page entry. Unanticipated errors -------------------- CherryPy also has a generic error handling mechanism: whenever an unanticipated error occurs in your code, it will call :func:`Request.error_response<cherrypy._cprequest.Request.error_response>` to set the response status, headers, and body. By default, this is the same output as :class:`HTTPError(500) <cherrypy._cperror.HTTPError>`. If you want to provide some other behavior, you generally replace "request.error_response". Here is some sample code that shows how to display a custom error message and send an e-mail containing the error:: from cherrypy import _cperror def handle_error(): cherrypy.response.status = 500 cherrypy.response.body = [ "<html><body>Sorry, an error occured</body></html>" ] sendMail('error@domain.com', 'Error in your web app', _cperror.format_exc()) class Root: _cp_config = {'request.error_response': handle_error} Note that you have to explicitly set :attr:`response.body <cherrypy._cprequest.Response.body>` and not simply return an error message as a result. """ from cgi import escape as _escape from sys import exc_info as _exc_info from traceback import format_exception as _format_exception from cherrypy._cpcompat import basestring, bytestr, iteritems, ntob from cherrypy._cpcompat import tonative, urljoin as _urljoin from cherrypy.lib import httputil as _httputil class CherryPyException(Exception): """A base class for CherryPy exceptions.""" pass class TimeoutError(CherryPyException): """Exception raised when Response.timed_out is detected.""" pass class InternalRedirect(CherryPyException): """Exception raised to switch to the handler for a different URL. This exception will redirect processing to another path within the site (without informing the client). Provide the new path as an argument when raising the exception. Provide any params in the querystring for the new URL. """ def __init__(self, path, query_string=""): import cherrypy self.request = cherrypy.serving.request self.query_string = query_string if "?" in path: # Separate any params included in the path path, self.query_string = path.split("?", 1) # Note that urljoin will "do the right thing" whether url is: # 1. a URL relative to root (e.g. "/dummy") # 2. a URL relative to the current path # Note that any query string will be discarded. path = _urljoin(self.request.path_info, path) # Set a 'path' member attribute so that code which traps this # error can have access to it. self.path = path CherryPyException.__init__(self, path, self.query_string) class HTTPRedirect(CherryPyException): """Exception raised when the request should be redirected. This exception will force a HTTP redirect to the URL or URL's you give it. The new URL must be passed as the first argument to the Exception, e.g., HTTPRedirect(newUrl). Multiple URLs are allowed in a list. If a URL is absolute, it will be used as-is. If it is relative, it is assumed to be relative to the current cherrypy.request.path_info. If one of the provided URL is a unicode object, it will be encoded using the default encoding or the one passed in parameter. There are multiple types of redirect, from which you can select via the ``status`` argument. If you do not provide a ``status`` arg, it defaults to 303 (or 302 if responding with HTTP/1.0). Examples:: raise cherrypy.HTTPRedirect("") raise cherrypy.HTTPRedirect("/abs/path", 307) raise cherrypy.HTTPRedirect(["path1", "path2?a=1&b=2"], 301) See :ref:`redirectingpost` for additional caveats. """ status = None """The integer HTTP status code to emit.""" urls = None """The list of URL's to emit.""" encoding = 'utf-8' """The encoding when passed urls are not native strings""" def __init__(self, urls, status=None, encoding=None): import cherrypy request = cherrypy.serving.request if isinstance(urls, basestring): urls = [urls] abs_urls = [] for url in urls: url = tonative(url, encoding or self.encoding) # Note that urljoin will "do the right thing" whether url is: # 1. a complete URL with host (e.g. "http://www.example.com/test") # 2. a URL relative to root (e.g. "/dummy") # 3. a URL relative to the current path # Note that any query string in cherrypy.request is discarded. url = _urljoin(cherrypy.url(), url) abs_urls.append(url) self.urls = abs_urls # RFC 2616 indicates a 301 response code fits our goal; however, # browser support for 301 is quite messy. Do 302/303 instead. See # http://www.alanflavell.org.uk/www/post-redirect.html if status is None: if request.protocol >= (1, 1): status = 303 else: status = 302 else: status = int(status) if status < 300 or status > 399: raise ValueError("status must be between 300 and 399.") self.status = status CherryPyException.__init__(self, abs_urls, status) def set_response(self): """Modify cherrypy.response status, headers, and body to represent self. CherryPy uses this internally, but you can also use it to create an HTTPRedirect object and set its output without *raising* the exception. """ import cherrypy response = cherrypy.serving.response response.status = status = self.status if status in (300, 301, 302, 303, 307): response.headers['Content-Type'] = "text/html;charset=utf-8" # "The ... URI SHOULD be given by the Location field # in the response." response.headers['Location'] = self.urls[0] # "Unless the request method was HEAD, the entity of the response # SHOULD contain a short hypertext note with a hyperlink to the # new URI(s)." msg = { 300: "This resource can be found at ", 301: "This resource has permanently moved to ", 302: "This resource resides temporarily at ", 303: "This resource can be found at ", 307: "This resource has moved temporarily to ", }[status] msg += '<a href=%s>%s</a>.' from xml.sax import saxutils msgs = [msg % (saxutils.quoteattr(u), u) for u in self.urls] response.body = ntob("<br />\n".join(msgs), 'utf-8') # Previous code may have set C-L, so we have to reset it # (allow finalize to set it). response.headers.pop('Content-Length', None) elif status == 304: # Not Modified. # "The response MUST include the following header fields: # Date, unless its omission is required by section 14.18.1" # The "Date" header should have been set in Response.__init__ # "...the response SHOULD NOT include other entity-headers." for key in ('Allow', 'Content-Encoding', 'Content-Language', 'Content-Length', 'Content-Location', 'Content-MD5', 'Content-Range', 'Content-Type', 'Expires', 'Last-Modified'): if key in response.headers: del response.headers[key] # "The 304 response MUST NOT contain a message-body." response.body = None # Previous code may have set C-L, so we have to reset it. response.headers.pop('Content-Length', None) elif status == 305: # Use Proxy. # self.urls[0] should be the URI of the proxy. response.headers['Location'] = self.urls[0] response.body = None # Previous code may have set C-L, so we have to reset it. response.headers.pop('Content-Length', None) else: raise ValueError("The %s status code is unknown." % status) def __call__(self): """Use this exception as a request.handler (raise self).""" raise self def clean_headers(status): """Remove any headers which should not apply to an error response.""" import cherrypy response = cherrypy.serving.response # Remove headers which applied to the original content, # but do not apply to the error page. respheaders = response.headers for key in ["Accept-Ranges", "Age", "ETag", "Location", "Retry-After", "Vary", "Content-Encoding", "Content-Length", "Expires", "Content-Location", "Content-MD5", "Last-Modified"]: if key in respheaders: del respheaders[key] if status != 416: # A server sending a response with status code 416 (Requested # range not satisfiable) SHOULD include a Content-Range field # with a byte-range-resp-spec of "*". The instance-length # specifies the current length of the selected resource. # A response with status code 206 (Partial Content) MUST NOT # include a Content-Range field with a byte-range- resp-spec of "*". if "Content-Range" in respheaders: del respheaders["Content-Range"] class HTTPError(CherryPyException): """Exception used to return an HTTP error code (4xx-5xx) to the client. This exception can be used to automatically send a response using a http status code, with an appropriate error page. It takes an optional ``status`` argument (which must be between 400 and 599); it defaults to 500 ("Internal Server Error"). It also takes an optional ``message`` argument, which will be returned in the response body. See `RFC2616 <http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.4>`_ for a complete list of available error codes and when to use them. Examples:: raise cherrypy.HTTPError(403) raise cherrypy.HTTPError( "403 Forbidden", "You are not allowed to access this resource.") """ status = None """The HTTP status code. May be of type int or str (with a Reason-Phrase). """ code = None """The integer HTTP status code.""" reason = None """The HTTP Reason-Phrase string.""" def __init__(self, status=500, message=None): self.status = status try: self.code, self.reason, defaultmsg = _httputil.valid_status(status) except ValueError: raise self.__class__(500, _exc_info()[1].args[0]) if self.code < 400 or self.code > 599: raise ValueError("status must be between 400 and 599.") # See http://www.python.org/dev/peps/pep-0352/ # self.message = message self._message = message or defaultmsg CherryPyException.__init__(self, status, message) def set_response(self): """Modify cherrypy.response status, headers, and body to represent self. CherryPy uses this internally, but you can also use it to create an HTTPError object and set its output without *raising* the exception. """ import cherrypy response = cherrypy.serving.response clean_headers(self.code) # In all cases, finalize will be called after this method, # so don't bother cleaning up response values here. response.status = self.status tb = None if cherrypy.serving.request.show_tracebacks: tb = format_exc() response.headers.pop('Content-Length', None) content = self.get_error_page(self.status, traceback=tb, message=self._message) response.body = content _be_ie_unfriendly(self.code) def get_error_page(self, *args, **kwargs): return get_error_page(*args, **kwargs) def __call__(self): """Use this exception as a request.handler (raise self).""" raise self class NotFound(HTTPError): """Exception raised when a URL could not be mapped to any handler (404). This is equivalent to raising :class:`HTTPError("404 Not Found") <cherrypy._cperror.HTTPError>`. """ def __init__(self, path=None): if path is None: import cherrypy request = cherrypy.serving.request path = request.script_name + request.path_info self.args = (path,) HTTPError.__init__(self, 404, "The path '%s' was not found." % path) _HTTPErrorTemplate = '''<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8"></meta> <title>%(status)s</title> <style type="text/css"> #powered_by { margin-top: 20px; border-top: 2px solid black; font-style: italic; } #traceback { color: red; } </style> </head> <body> <h2>%(status)s</h2> <p>%(message)s</p> <pre id="traceback">%(traceback)s</pre> <div id="powered_by"> <span> Powered by <a href="http://www.cherrypy.org">CherryPy %(version)s</a> </span> </div> </body> </html> ''' def get_error_page(status, **kwargs): """Return an HTML page, containing a pretty error response. status should be an int or a str. kwargs will be interpolated into the page template. """ import cherrypy try: code, reason, message = _httputil.valid_status(status) except ValueError: raise cherrypy.HTTPError(500, _exc_info()[1].args[0]) # We can't use setdefault here, because some # callers send None for kwarg values. if kwargs.get('status') is None: kwargs['status'] = "%s %s" % (code, reason) if kwargs.get('message') is None: kwargs['message'] = message if kwargs.get('traceback') is None: kwargs['traceback'] = '' if kwargs.get('version') is None: kwargs['version'] = cherrypy.__version__ for k, v in iteritems(kwargs): if v is None: kwargs[k] = "" else: kwargs[k] = _escape(kwargs[k]) # Use a custom template or callable for the error page? pages = cherrypy.serving.request.error_page error_page = pages.get(code) or pages.get('default') # Default template, can be overridden below. template = _HTTPErrorTemplate if error_page: try: if hasattr(error_page, '__call__'): # The caller function may be setting headers manually, # so we delegate to it completely. We may be returning # an iterator as well as a string here. # # We *must* make sure any content is not unicode. result = error_page(**kwargs) if cherrypy.lib.is_iterator(result): from cherrypy.lib.encoding import UTF8StreamEncoder return UTF8StreamEncoder(result) elif isinstance(result, cherrypy._cpcompat.unicodestr): return result.encode('utf-8') else: if not isinstance(result, cherrypy._cpcompat.bytestr): raise ValueError('error page function did not ' 'return a bytestring, unicodestring or an ' 'iterator - returned object of type %s.' % (type(result).__name__)) return result else: # Load the template from this path. template = tonative(open(error_page, 'rb').read()) except: e = _format_exception(*_exc_info())[-1] m = kwargs['message'] if m: m += "<br />" m += "In addition, the custom error page failed:\n<br />%s" % e kwargs['message'] = m response = cherrypy.serving.response response.headers['Content-Type'] = "text/html;charset=utf-8" result = template % kwargs return result.encode('utf-8') _ie_friendly_error_sizes = { 400: 512, 403: 256, 404: 512, 405: 256, 406: 512, 408: 512, 409: 512, 410: 256, 500: 512, 501: 512, 505: 512, } def _be_ie_unfriendly(status): import cherrypy response = cherrypy.serving.response # For some statuses, Internet Explorer 5+ shows "friendly error # messages" instead of our response.body if the body is smaller # than a given size. Fix this by returning a body over that size # (by adding whitespace). # See http://support.microsoft.com/kb/q218155/ s = _ie_friendly_error_sizes.get(status, 0) if s: s += 1 # Since we are issuing an HTTP error status, we assume that # the entity is short, and we should just collapse it. content = response.collapse_body() l = len(content) if l and l < s: # IN ADDITION: the response must be written to IE # in one chunk or it will still get replaced! Bah. content = content + (ntob(" ") * (s - l)) response.body = content response.headers['Content-Length'] = str(len(content)) def format_exc(exc=None): """Return exc (or sys.exc_info if None), formatted.""" try: if exc is None: exc = _exc_info() if exc == (None, None, None): return "" import traceback return "".join(traceback.format_exception(*exc)) finally: del exc def bare_error(extrabody=None): """Produce status, headers, body for a critical error. Returns a triple without calling any other questionable functions, so it should be as error-free as possible. Call it from an HTTP server if you get errors outside of the request. If extrabody is None, a friendly but rather unhelpful error message is set in the body. If extrabody is a string, it will be appended as-is to the body. """ # The whole point of this function is to be a last line-of-defense # in handling errors. That is, it must not raise any errors itself; # it cannot be allowed to fail. Therefore, don't add to it! # In particular, don't call any other CP functions. body = ntob("Unrecoverable error in the server.") if extrabody is not None: if not isinstance(extrabody, bytestr): extrabody = extrabody.encode('utf-8') body += ntob("\n") + extrabody return (ntob("500 Internal Server Error"), [(ntob('Content-Type'), ntob('text/plain')), (ntob('Content-Length'), ntob(str(len(body)), 'ISO-8859-1'))], [body])
gpl-2.0
pombredanne/pyjnius
docs/source/conf.py
13
7792
# -*- coding: utf-8 -*- # # Pyjnius documentation build configuration file, created by # sphinx-quickstart on Thu Aug 16 11:38:14 2012. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Pyjnius' copyright = u'2012, Mathieu Virbel, Gabriel Pettier' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '1.0' # The full version, including alpha/beta/rc tags. release = '1.0a1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'Pyjniusdoc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'Pyjnius.tex', u'Pyjnius Documentation', u'Mathieu Virbel, Gabriel Pettier', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'pyjnius', u'Pyjnius Documentation', [u'Mathieu Virbel, Gabriel Pettier'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'Pyjnius', u'Pyjnius Documentation', u'Mathieu Virbel, Gabriel Pettier', 'Pyjnius', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote'
mit
mattseymour/django
tests/swappable_models/tests.py
2
1786
from io import StringIO from django.contrib.auth.models import Permission from django.contrib.contenttypes.models import ContentType from django.core import management from django.test import TestCase, override_settings from .models import Article class SwappableModelTests(TestCase): @override_settings(TEST_ARTICLE_MODEL='swappable_models.AlternateArticle') def test_generated_data(self): "Permissions and content types are not created for a swapped model" # Delete all permissions and content_types Permission.objects.filter(content_type__app_label='swappable_models').delete() ContentType.objects.filter(app_label='swappable_models').delete() # Re-run migrate. This will re-build the permissions and content types. new_io = StringIO() management.call_command('migrate', interactive=False, stdout=new_io) # Content types and permissions exist for the swapped model, # but not for the swappable model. apps_models = [(p.content_type.app_label, p.content_type.model) for p in Permission.objects.all()] self.assertIn(('swappable_models', 'alternatearticle'), apps_models) self.assertNotIn(('swappable_models', 'article'), apps_models) apps_models = [(ct.app_label, ct.model) for ct in ContentType.objects.all()] self.assertIn(('swappable_models', 'alternatearticle'), apps_models) self.assertNotIn(('swappable_models', 'article'), apps_models) @override_settings(TEST_ARTICLE_MODEL='swappable_models.article') def test_case_insensitive(self): "Model names are case insensitive. Model swapping honors this." Article.objects.all() self.assertIsNone(Article._meta.swapped)
bsd-3-clause
Communities-Communications/cc-odoo
addons/base_import_module/models/ir_module.py
238
4795
import logging import os import sys import zipfile from os.path import join as opj import openerp from openerp.osv import osv from openerp.tools import convert_file from openerp.tools.translate import _ _logger = logging.getLogger(__name__) MAX_FILE_SIZE = 100 * 1024 * 1024 # in megabytes class view(osv.osv): _inherit = "ir.module.module" def import_module(self, cr, uid, module, path, force=False, context=None): known_mods = self.browse(cr, uid, self.search(cr, uid, [])) known_mods_names = dict([(m.name, m) for m in known_mods]) installed_mods = [m.name for m in known_mods if m.state == 'installed'] terp = openerp.modules.load_information_from_description_file(module, mod_path=path) values = self.get_values_from_terp(terp) unmet_dependencies = set(terp['depends']).difference(installed_mods) if unmet_dependencies: msg = _("Unmet module dependencies: %s") raise osv.except_osv(_('Error !'), msg % ', '.join(unmet_dependencies)) mod = known_mods_names.get(module) if mod: self.write(cr, uid, mod.id, dict(state='installed', **values)) mode = 'update' if not force else 'init' else: assert terp.get('installable', True), "Module not installable" self.create(cr, uid, dict(name=module, state='installed', **values)) mode = 'init' for kind in ['data', 'init_xml', 'update_xml']: for filename in terp[kind]: _logger.info("module %s: loading %s", module, filename) noupdate = False if filename.endswith('.csv') and kind in ('init', 'init_xml'): noupdate = True pathname = opj(path, filename) idref = {} convert_file(cr, module, filename, idref, mode=mode, noupdate=noupdate, kind=kind, pathname=pathname) path_static = opj(path, 'static') ir_attach = self.pool['ir.attachment'] if os.path.isdir(path_static): for root, dirs, files in os.walk(path_static): for static_file in files: full_path = opj(root, static_file) with open(full_path, 'r') as fp: data = fp.read().encode('base64') url_path = '/%s%s' % (module, full_path.split(path)[1].replace(os.path.sep, '/')) url_path = url_path.decode(sys.getfilesystemencoding()) filename = os.path.split(url_path)[1] values = dict( name=filename, datas_fname=filename, url=url_path, res_model='ir.ui.view', type='binary', datas=data, ) att_id = ir_attach.search(cr, uid, [('url', '=', url_path), ('type', '=', 'binary'), ('res_model', '=', 'ir.ui.view')], context=context) if att_id: ir_attach.write(cr, uid, att_id, values, context=context) else: ir_attach.create(cr, uid, values, context=context) return True def import_zipfile(self, cr, uid, module_file, force=False, context=None): if not module_file: raise Exception("No file sent.") if not zipfile.is_zipfile(module_file): raise osv.except_osv(_('Error !'), _('File is not a zip file!')) success = [] errors = dict() module_names = [] with zipfile.ZipFile(module_file, "r") as z: for zf in z.filelist: if zf.file_size > MAX_FILE_SIZE: msg = _("File '%s' exceed maximum allowed file size") raise osv.except_osv(_('Error !'), msg % zf.filename) with openerp.tools.osutil.tempdir() as module_dir: z.extractall(module_dir) dirs = [d for d in os.listdir(module_dir) if os.path.isdir(opj(module_dir, d))] for mod_name in dirs: module_names.append(mod_name) try: # assert mod_name.startswith('theme_') path = opj(module_dir, mod_name) self.import_module(cr, uid, mod_name, path, force=force, context=context) success.append(mod_name) except Exception, e: errors[mod_name] = str(e) r = ["Successfully imported module '%s'" % mod for mod in success] for mod, error in errors.items(): r.append("Error while importing module '%s': %r" % (mod, error)) return '\n'.join(r), module_names
agpl-3.0
Funtimezzhou/TradeBuildTools
Document/szse/Quantitative Trading/sat-ebook-and-full-source-20150618/algo-ebook-full-source-code-20150618/chapter14/performance.py
5
1431
#!/usr/bin/python # -*- coding: utf-8 -*- # performance.py from __future__ import print_function import numpy as np import pandas as pd def create_sharpe_ratio(returns, periods=252): """ Create the Sharpe ratio for the strategy, based on a benchmark of zero (i.e. no risk-free rate information). Parameters: returns - A pandas Series representing period percentage returns. periods - Daily (252), Hourly (252*6.5), Minutely(252*6.5*60) etc. """ return np.sqrt(periods) * (np.mean(returns)) / np.std(returns) def create_drawdowns(pnl): """ Calculate the largest peak-to-trough drawdown of the PnL curve as well as the duration of the drawdown. Requires that the pnl_returns is a pandas Series. Parameters: pnl - A pandas Series representing period percentage returns. Returns: drawdown, duration - Highest peak-to-trough drawdown and duration. """ # Calculate the cumulative returns curve # and set up the High Water Mark hwm = [0] # Create the drawdown and duration series idx = pnl.index drawdown = pd.Series(index = idx) duration = pd.Series(index = idx) # Loop over the index range for t in range(1, len(idx)): hwm.append(max(hwm[t-1], pnl[t])) drawdown[t]= (hwm[t]-pnl[t]) duration[t]= (0 if drawdown[t] == 0 else duration[t-1]+1) return drawdown, drawdown.max(), duration.max()
gpl-3.0
mhallin/knitty-gritty
knittygritty/util.py
1
2068
def nibble_bits(ns): '''Convert a stream of 4 bit numbers to a stream of bits >>> list(nibble_bits([1, 2])) [0, 0, 0, 1, 0, 0, 1, 0] ''' for n in ns: yield (n & 0x8) >> 3 yield (n & 0x4) >> 2 yield (n & 0x2) >> 1 yield (n & 0x1) >> 0 def to_nibbles(bs): '''Convert a string of bytes to a stream of nibbles >>> list(to_nibbles('\x3d')) [3, 13] ''' for c in bs: b = ord(c) yield (b & 0xf0) >> 4 yield b & 0x0f def from_nibbles(ns): '''Convert a stream of nibbles to a string of bytes >>> from_nibbles([3, 13]) '\x3d' ''' s = '' for n1, n2 in zip(ns[::2], ns[1::2]): s += chr((n1 << 4) | n2) return s def from_bcd(ns): '''Convert a stream of nibbles representing a BCD (binary coded digit) to an integer. >>> from_bcd([1, 2, 3]) 123 ''' s = 0 m = 1 for n in reversed(ns): s += n * m m *= 10 return s def to_bcd(n, width=0): '''Convert an integer to a list of nibbles representing the number in BCD, optionally padded with initial zeroes to a specific width. >>> to_bcd(123) [1, 2, 3] >>> to_bcd(12, width=5) [0, 0, 0, 1, 2] ''' l = [] while n: l.append(n % 10) n /= 10 if len(l) < width: l += [0] * (width - len(l)) return list(reversed(l)) def bits_to_bytes(bits): '''Convert a sequence of bits to a string of bytes. The bit sequence must have a length divisible by 8 >>> bits_to_bytes([0, 0, 1, 0, 0, 1, 0, 1]) '\x25' ''' assert len(bits) % 8 == 0 acc = '' for i in range(len(bits) / 8): s = 0 c = 128 for b in range(8): s += bits[i * 8 + b] * c c /= 2 acc += chr(s) return acc def padding(n, alignment): '''Return the required padding for aligning `n` at `alignment` >>> padding(3, 4) 1 >>> padding(4, 4) 0 ''' return (alignment - (n % alignment)) % alignment
bsd-3-clause
Kubuxu/cjdns
node_build/dependencies/libuv/build/gyp/test/mac/gyptest-identical-name.py
94
1547
#!/usr/bin/env python # Copyright (c) 2014 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Verifies libraries (in identical-names) are properly handeled by xcode. The names for all libraries participating in this build are: libtestlib.a - identical-name/testlib libtestlib.a - identical-name/proxy/testlib libproxy.a - identical-name/proxy The first two libs produce a hash collision in Xcode when Gyp is executed, because they have the same name and would be copied to the same directory with Xcode default settings. For this scenario to work one needs to change the Xcode variables SYMROOT and CONFIGURATION_BUILD_DIR. Setting these to per-lib-unique directories, avoids copying the libs into the same directory. The test consists of two steps. The first one verifies that by setting both vars, there is no hash collision anymore during Gyp execution and that the libs can actually be be built. The second one verifies that there is still a hash collision if the vars are not set and thus the current behavior is preserved. """ import TestGyp import sys def IgnoreOutput(string, expected_string): return True if sys.platform == 'darwin': test = TestGyp.TestGyp(formats=['xcode']) test.run_gyp('test.gyp', chdir='identical-name') test.build('test.gyp', test.ALL, chdir='identical-name') test.run_gyp('test-should-fail.gyp', chdir='identical-name') test.built_file_must_not_exist('test-should-fail.xcodeproj') test.pass_test()
gpl-3.0
pombredanne/btnet
beautifulsoup4-4.3.2/bs4/__init__.py
417
15401
"""Beautiful Soup Elixir and Tonic "The Screen-Scraper's Friend" http://www.crummy.com/software/BeautifulSoup/ Beautiful Soup uses a pluggable XML or HTML parser to parse a (possibly invalid) document into a tree representation. Beautiful Soup provides provides methods and Pythonic idioms that make it easy to navigate, search, and modify the parse tree. Beautiful Soup works with Python 2.6 and up. It works better if lxml and/or html5lib is installed. For more than you ever wanted to know about Beautiful Soup, see the documentation: http://www.crummy.com/software/BeautifulSoup/bs4/doc/ """ __author__ = "Leonard Richardson (leonardr@segfault.org)" __version__ = "4.3.2" __copyright__ = "Copyright (c) 2004-2013 Leonard Richardson" __license__ = "MIT" __all__ = ['BeautifulSoup'] import os import re import warnings from .builder import builder_registry, ParserRejectedMarkup from .dammit import UnicodeDammit from .element import ( CData, Comment, DEFAULT_OUTPUT_ENCODING, Declaration, Doctype, NavigableString, PageElement, ProcessingInstruction, ResultSet, SoupStrainer, Tag, ) # The very first thing we do is give a useful error if someone is # running this code under Python 3 without converting it. syntax_error = u'You are trying to run the Python 2 version of Beautiful Soup under Python 3. This will not work. You need to convert the code, either by installing it (`python setup.py install`) or by running 2to3 (`2to3 -w bs4`).' class BeautifulSoup(Tag): """ This class defines the basic interface called by the tree builders. These methods will be called by the parser: reset() feed(markup) The tree builder may call these methods from its feed() implementation: handle_starttag(name, attrs) # See note about return value handle_endtag(name) handle_data(data) # Appends to the current data node endData(containerClass=NavigableString) # Ends the current data node No matter how complicated the underlying parser is, you should be able to build a tree using 'start tag' events, 'end tag' events, 'data' events, and "done with data" events. If you encounter an empty-element tag (aka a self-closing tag, like HTML's <br> tag), call handle_starttag and then handle_endtag. """ ROOT_TAG_NAME = u'[document]' # If the end-user gives no indication which tree builder they # want, look for one with these features. DEFAULT_BUILDER_FEATURES = ['html', 'fast'] ASCII_SPACES = '\x20\x0a\x09\x0c\x0d' def __init__(self, markup="", features=None, builder=None, parse_only=None, from_encoding=None, **kwargs): """The Soup object is initialized as the 'root tag', and the provided markup (which can be a string or a file-like object) is fed into the underlying parser.""" if 'convertEntities' in kwargs: warnings.warn( "BS4 does not respect the convertEntities argument to the " "BeautifulSoup constructor. Entities are always converted " "to Unicode characters.") if 'markupMassage' in kwargs: del kwargs['markupMassage'] warnings.warn( "BS4 does not respect the markupMassage argument to the " "BeautifulSoup constructor. The tree builder is responsible " "for any necessary markup massage.") if 'smartQuotesTo' in kwargs: del kwargs['smartQuotesTo'] warnings.warn( "BS4 does not respect the smartQuotesTo argument to the " "BeautifulSoup constructor. Smart quotes are always converted " "to Unicode characters.") if 'selfClosingTags' in kwargs: del kwargs['selfClosingTags'] warnings.warn( "BS4 does not respect the selfClosingTags argument to the " "BeautifulSoup constructor. The tree builder is responsible " "for understanding self-closing tags.") if 'isHTML' in kwargs: del kwargs['isHTML'] warnings.warn( "BS4 does not respect the isHTML argument to the " "BeautifulSoup constructor. You can pass in features='html' " "or features='xml' to get a builder capable of handling " "one or the other.") def deprecated_argument(old_name, new_name): if old_name in kwargs: warnings.warn( 'The "%s" argument to the BeautifulSoup constructor ' 'has been renamed to "%s."' % (old_name, new_name)) value = kwargs[old_name] del kwargs[old_name] return value return None parse_only = parse_only or deprecated_argument( "parseOnlyThese", "parse_only") from_encoding = from_encoding or deprecated_argument( "fromEncoding", "from_encoding") if len(kwargs) > 0: arg = kwargs.keys().pop() raise TypeError( "__init__() got an unexpected keyword argument '%s'" % arg) if builder is None: if isinstance(features, basestring): features = [features] if features is None or len(features) == 0: features = self.DEFAULT_BUILDER_FEATURES builder_class = builder_registry.lookup(*features) if builder_class is None: raise FeatureNotFound( "Couldn't find a tree builder with the features you " "requested: %s. Do you need to install a parser library?" % ",".join(features)) builder = builder_class() self.builder = builder self.is_xml = builder.is_xml self.builder.soup = self self.parse_only = parse_only if hasattr(markup, 'read'): # It's a file-type object. markup = markup.read() elif len(markup) <= 256: # Print out warnings for a couple beginner problems # involving passing non-markup to Beautiful Soup. # Beautiful Soup will still parse the input as markup, # just in case that's what the user really wants. if (isinstance(markup, unicode) and not os.path.supports_unicode_filenames): possible_filename = markup.encode("utf8") else: possible_filename = markup is_file = False try: is_file = os.path.exists(possible_filename) except Exception, e: # This is almost certainly a problem involving # characters not valid in filenames on this # system. Just let it go. pass if is_file: warnings.warn( '"%s" looks like a filename, not markup. You should probably open this file and pass the filehandle into Beautiful Soup.' % markup) if markup[:5] == "http:" or markup[:6] == "https:": # TODO: This is ugly but I couldn't get it to work in # Python 3 otherwise. if ((isinstance(markup, bytes) and not b' ' in markup) or (isinstance(markup, unicode) and not u' ' in markup)): warnings.warn( '"%s" looks like a URL. Beautiful Soup is not an HTTP client. You should probably use an HTTP client to get the document behind the URL, and feed that document to Beautiful Soup.' % markup) for (self.markup, self.original_encoding, self.declared_html_encoding, self.contains_replacement_characters) in ( self.builder.prepare_markup(markup, from_encoding)): self.reset() try: self._feed() break except ParserRejectedMarkup: pass # Clear out the markup and remove the builder's circular # reference to this object. self.markup = None self.builder.soup = None def _feed(self): # Convert the document to Unicode. self.builder.reset() self.builder.feed(self.markup) # Close out any unfinished strings and close all the open tags. self.endData() while self.currentTag.name != self.ROOT_TAG_NAME: self.popTag() def reset(self): Tag.__init__(self, self, self.builder, self.ROOT_TAG_NAME) self.hidden = 1 self.builder.reset() self.current_data = [] self.currentTag = None self.tagStack = [] self.preserve_whitespace_tag_stack = [] self.pushTag(self) def new_tag(self, name, namespace=None, nsprefix=None, **attrs): """Create a new tag associated with this soup.""" return Tag(None, self.builder, name, namespace, nsprefix, attrs) def new_string(self, s, subclass=NavigableString): """Create a new NavigableString associated with this soup.""" navigable = subclass(s) navigable.setup() return navigable def insert_before(self, successor): raise NotImplementedError("BeautifulSoup objects don't support insert_before().") def insert_after(self, successor): raise NotImplementedError("BeautifulSoup objects don't support insert_after().") def popTag(self): tag = self.tagStack.pop() if self.preserve_whitespace_tag_stack and tag == self.preserve_whitespace_tag_stack[-1]: self.preserve_whitespace_tag_stack.pop() #print "Pop", tag.name if self.tagStack: self.currentTag = self.tagStack[-1] return self.currentTag def pushTag(self, tag): #print "Push", tag.name if self.currentTag: self.currentTag.contents.append(tag) self.tagStack.append(tag) self.currentTag = self.tagStack[-1] if tag.name in self.builder.preserve_whitespace_tags: self.preserve_whitespace_tag_stack.append(tag) def endData(self, containerClass=NavigableString): if self.current_data: current_data = u''.join(self.current_data) # If whitespace is not preserved, and this string contains # nothing but ASCII spaces, replace it with a single space # or newline. if not self.preserve_whitespace_tag_stack: strippable = True for i in current_data: if i not in self.ASCII_SPACES: strippable = False break if strippable: if '\n' in current_data: current_data = '\n' else: current_data = ' ' # Reset the data collector. self.current_data = [] # Should we add this string to the tree at all? if self.parse_only and len(self.tagStack) <= 1 and \ (not self.parse_only.text or \ not self.parse_only.search(current_data)): return o = containerClass(current_data) self.object_was_parsed(o) def object_was_parsed(self, o, parent=None, most_recent_element=None): """Add an object to the parse tree.""" parent = parent or self.currentTag most_recent_element = most_recent_element or self._most_recent_element o.setup(parent, most_recent_element) if most_recent_element is not None: most_recent_element.next_element = o self._most_recent_element = o parent.contents.append(o) def _popToTag(self, name, nsprefix=None, inclusivePop=True): """Pops the tag stack up to and including the most recent instance of the given tag. If inclusivePop is false, pops the tag stack up to but *not* including the most recent instqance of the given tag.""" #print "Popping to %s" % name if name == self.ROOT_TAG_NAME: # The BeautifulSoup object itself can never be popped. return most_recently_popped = None stack_size = len(self.tagStack) for i in range(stack_size - 1, 0, -1): t = self.tagStack[i] if (name == t.name and nsprefix == t.prefix): if inclusivePop: most_recently_popped = self.popTag() break most_recently_popped = self.popTag() return most_recently_popped def handle_starttag(self, name, namespace, nsprefix, attrs): """Push a start tag on to the stack. If this method returns None, the tag was rejected by the SoupStrainer. You should proceed as if the tag had not occured in the document. For instance, if this was a self-closing tag, don't call handle_endtag. """ # print "Start tag %s: %s" % (name, attrs) self.endData() if (self.parse_only and len(self.tagStack) <= 1 and (self.parse_only.text or not self.parse_only.search_tag(name, attrs))): return None tag = Tag(self, self.builder, name, namespace, nsprefix, attrs, self.currentTag, self._most_recent_element) if tag is None: return tag if self._most_recent_element: self._most_recent_element.next_element = tag self._most_recent_element = tag self.pushTag(tag) return tag def handle_endtag(self, name, nsprefix=None): #print "End tag: " + name self.endData() self._popToTag(name, nsprefix) def handle_data(self, data): self.current_data.append(data) def decode(self, pretty_print=False, eventual_encoding=DEFAULT_OUTPUT_ENCODING, formatter="minimal"): """Returns a string or Unicode representation of this document. To get Unicode, pass None for encoding.""" if self.is_xml: # Print the XML declaration encoding_part = '' if eventual_encoding != None: encoding_part = ' encoding="%s"' % eventual_encoding prefix = u'<?xml version="1.0"%s?>\n' % encoding_part else: prefix = u'' if not pretty_print: indent_level = None else: indent_level = 0 return prefix + super(BeautifulSoup, self).decode( indent_level, eventual_encoding, formatter) # Alias to make it easier to type import: 'from bs4 import _soup' _s = BeautifulSoup _soup = BeautifulSoup class BeautifulStoneSoup(BeautifulSoup): """Deprecated interface to an XML parser.""" def __init__(self, *args, **kwargs): kwargs['features'] = 'xml' warnings.warn( 'The BeautifulStoneSoup class is deprecated. Instead of using ' 'it, pass features="xml" into the BeautifulSoup constructor.') super(BeautifulStoneSoup, self).__init__(*args, **kwargs) class StopParsing(Exception): pass class FeatureNotFound(ValueError): pass #By default, act as an HTML pretty-printer. if __name__ == '__main__': import sys soup = BeautifulSoup(sys.stdin) print soup.prettify()
gpl-2.0
angstwad/ansible
lib/ansible/plugins/callback/syslog_json.py
54
2725
# Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import json import logging import logging.handlers import socket from ansible.plugins.callback import CallbackBase class CallbackModule(CallbackBase): """ logs ansible-playbook and ansible runs to a syslog server in json format make sure you have in ansible.cfg: callback_plugins = <path_to_callback_plugins_folder> and put the plugin in <path_to_callback_plugins_folder> This plugin makes use of the following environment variables: SYSLOG_SERVER (optional): defaults to localhost SYSLOG_PORT (optional): defaults to 514 """ CALLBACK_VERSION = 2.0 CALLBACK_TYPE = 'aggregate' CALLBACK_NAME = 'syslog_json' CALLBACK_NEEDS_WHITELIST = True def __init__(self): super(CallbackModule, self).__init__() self.logger = logging.getLogger('ansible logger') self.logger.setLevel(logging.DEBUG) self.handler = logging.handlers.SysLogHandler( address = (os.getenv('SYSLOG_SERVER','localhost'), os.getenv('SYSLOG_PORT',514)), facility=logging.handlers.SysLogHandler.LOG_USER ) self.logger.addHandler(self.handler) self.hostname = socket.gethostname() def runner_on_failed(self, host, res, ignore_errors=False): self.logger.error('%s ansible-command: task execution FAILED; host: %s; message: %s' % (self.hostname,host,self._dump_results(res))) def runner_on_ok(self, host, res): self.logger.info('%s ansible-command: task execution OK; host: %s; message: %s' % (self.hostname,host,self._dump_results(res))) def runner_on_skipped(self, host, item=None): self.logger.info('%s ansible-command: task execution SKIPPED; host: %s; message: %s' % (self.hostname,host, 'skipped')) def runner_on_unreachable(self, host, res): self.logger.error('%s ansible-command: task execution UNREACHABLE; host: %s; message: %s' % (self.hostname,host,self._dump_results(res))) def runner_on_async_failed(self, host, res): self.logger.error('%s ansible-command: task execution FAILED; host: %s; message: %s' % (self.hostname,host,self._dump_results(res))) def playbook_on_import_for_host(self, host, imported_file): self.logger.info('%s ansible-command: playbook IMPORTED; host: %s; message: imported file %s' % (self.hostname,host,imported_file)) def playbook_on_not_import_for_host(self, host, missing_file): self.logger.info('%s ansible-command: playbook NOT IMPORTED; host: %s; message: missing file %s' % (self.hostname,host,missing_file))
gpl-3.0
balanced/wtforms
wtforms/ext/sqlalchemy/validators.py
4
1073
from wtforms import ValidationError from sqlalchemy.orm.exc import NoResultFound class Unique(object): """Checks field value unicity against specified table field. :param get_session: A function that return a SQAlchemy Session. :param model: The model to check unicity against. :param column: The unique column. :param message: The error message. """ field_flags = ('unique', ) def __init__(self, get_session, model, column, message=None): self.get_session = get_session self.model = model self.column = column self.message = message def __call__(self, form, field): try: obj = self.get_session().query(self.model)\ .filter(self.column == field.data).one() if not hasattr(form, '_obj') or not form._obj == obj: if self.message is None: self.message = field.gettext(u'Already exists.') raise ValidationError(self.message) except NoResultFound: pass
bsd-3-clause
evensonbryan/yocto-autobuilder
lib/python2.7/site-packages/buildbot-0.8.8-py2.7.egg/buildbot/schedulers/base.py
4
19387
# This file is part of Buildbot. Buildbot is free software: you can # redistribute it and/or modify it under the terms of the GNU General Public # License as published by the Free Software Foundation, version 2. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., 51 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # Copyright Buildbot Team Members from zope.interface import implements from twisted.python import failure, log from twisted.application import service from twisted.internet import defer from buildbot.process.properties import Properties from buildbot.util import ComparableMixin from buildbot import config, interfaces from buildbot.util.state import StateMixin class BaseScheduler(service.MultiService, ComparableMixin, StateMixin): """ Base class for all schedulers; this provides the equipment to manage reconfigurations and to handle basic scheduler state. It also provides utility methods to begin various sorts of builds. Subclasses should add any configuration-derived attributes to C{base.Scheduler.compare_attrs}. """ implements(interfaces.IScheduler) DefaultCodebases = {'':{}} compare_attrs = ('name', 'builderNames', 'properties', 'codebases') def __init__(self, name, builderNames, properties, codebases = DefaultCodebases): """ Initialize a Scheduler. @param name: name of this scheduler (used as a key for state) @type name: unicode @param builderNames: list of builders this scheduler may start @type builderNames: list of unicode @param properties: properties to add to builds triggered by this scheduler @type properties: dictionary @param codebases: codebases that are necessary to process the changes @type codebases: dict with following struct: key: '<codebase>' value: {'repository':'<repo>', 'branch':'<br>', 'revision:'<rev>'} @param consumeChanges: true if this scheduler wishes to be informed about the addition of new changes. Defaults to False. This should be passed explicitly from subclasses to indicate their interest in consuming changes. @type consumeChanges: boolean """ service.MultiService.__init__(self) self.name = name "name of this scheduler; used to identify replacements on reconfig" ok = True if not isinstance(builderNames, (list, tuple)): ok = False else: for b in builderNames: if not isinstance(b, basestring): ok = False if not ok: config.error( "The builderNames argument to a scheduler must be a list " "of Builder names.") self.builderNames = builderNames "list of builder names to start in each buildset" self.properties = Properties() "properties that are contributed to each buildset" self.properties.update(properties, "Scheduler") self.properties.setProperty("scheduler", name, "Scheduler") self.objectid = None self.master = None # Set the codebases that are necessary to process the changes # These codebases will always result in a sourcestamp with or without changes if codebases is not None: if not isinstance(codebases, dict): config.error("Codebases must be a dict of dicts") for codebase, codebase_attrs in codebases.iteritems(): if not isinstance(codebase_attrs, dict): config.error("Codebases must be a dict of dicts") if (codebases != BaseScheduler.DefaultCodebases and 'repository' not in codebase_attrs): config.error("The key 'repository' is mandatory in codebases") else: config.error("Codebases cannot be None") self.codebases = codebases # internal variables self._change_subscription = None self._change_consumption_lock = defer.DeferredLock() ## service handling def startService(self): service.MultiService.startService(self) def findNewSchedulerInstance(self, new_config): return new_config.schedulers[self.name] # should exist! def stopService(self): d = defer.maybeDeferred(self._stopConsumingChanges) d.addCallback(lambda _ : service.MultiService.stopService(self)) return d ## status queries # TODO: these aren't compatible with distributed schedulers def listBuilderNames(self): "Returns the list of builder names" return self.builderNames def getPendingBuildTimes(self): "Returns a list of the next times that builds are scheduled, if known." return [] ## change handling def startConsumingChanges(self, fileIsImportant=None, change_filter=None, onlyImportant=False): """ Subclasses should call this method from startService to register to receive changes. The BaseScheduler class will take care of filtering the changes (using change_filter) and (if fileIsImportant is not None) classifying them. See L{gotChange}. Returns a Deferred. @param fileIsImportant: a callable provided by the user to distinguish important and unimportant changes @type fileIsImportant: callable @param change_filter: a filter to determine which changes are even considered by this scheduler, or C{None} to consider all changes @type change_filter: L{buildbot.changes.filter.ChangeFilter} instance @param onlyImportant: If True, only important changes, as specified by fileIsImportant, will be added to the buildset. @type onlyImportant: boolean """ assert fileIsImportant is None or callable(fileIsImportant) # register for changes with master assert not self._change_subscription def changeCallback(change): # ignore changes delivered while we're not running if not self._change_subscription: return if change_filter and not change_filter.filter_change(change): return if change.codebase not in self.codebases: log.msg(format='change contains codebase %(codebase)s that is' 'not processed by scheduler %(scheduler)s', codebase=change.codebase, name=self.name) return if fileIsImportant: try: important = fileIsImportant(change) if not important and onlyImportant: return except: log.err(failure.Failure(), 'in fileIsImportant check for %s' % change) return else: important = True # use change_consumption_lock to ensure the service does not stop # while this change is being processed d = self._change_consumption_lock.run(self.gotChange, change, important) d.addErrback(log.err, 'while processing change') self._change_subscription = self.master.subscribeToChanges(changeCallback) return defer.succeed(None) def _stopConsumingChanges(self): # (note: called automatically in stopService) # acquire the lock change consumption lock to ensure that any change # consumption is complete before we are done stopping consumption def stop(): if self._change_subscription: self._change_subscription.unsubscribe() self._change_subscription = None return self._change_consumption_lock.run(stop) def gotChange(self, change, important): """ Called when a change is received; returns a Deferred. If the C{fileIsImportant} parameter to C{startConsumingChanges} was C{None}, then all changes are considered important. The C{codebase} of the change has always an entry in the C{codebases} dictionary of the scheduler. @param change: the new change object @type change: L{buildbot.changes.changes.Change} instance @param important: true if this is an important change, according to C{fileIsImportant}. @type important: boolean @returns: Deferred """ raise NotImplementedError ## starting bulids @defer.inlineCallbacks def addBuildsetForLatest(self, reason='', external_idstring=None, branch=None, repository='', project='', builderNames=None, properties=None): """ Add a buildset for the 'latest' source in the given branch, repository, and project. This will create a relative sourcestamp for the buildset. This method will add any properties provided to the scheduler constructor to the buildset, and will call the master's addBuildset method with the appropriate parameters. @param reason: reason for this buildset @type reason: unicode string @param external_idstring: external identifier for this buildset, or None @param branch: branch to build (note that None often has a special meaning) @param repository: repository name for sourcestamp @param project: project name for sourcestamp @param builderNames: builders to name in the buildset (defaults to C{self.builderNames}) @param properties: a properties object containing initial properties for the buildset @type properties: L{buildbot.process.properties.Properties} @returns: (buildset ID, buildrequest IDs) via Deferred """ # Define setid for this set of changed repositories setid = yield self.master.db.sourcestampsets.addSourceStampSet() # add a sourcestamp for each codebase for codebase, cb_info in self.codebases.iteritems(): ss_repository = cb_info.get('repository', repository) ss_branch = cb_info.get('branch', branch) ss_revision = cb_info.get('revision', None) yield self.master.db.sourcestamps.addSourceStamp( codebase=codebase, repository=ss_repository, branch=ss_branch, revision=ss_revision, project=project, changeids=set(), sourcestampsetid=setid) bsid,brids = yield self.addBuildsetForSourceStamp( setid=setid, reason=reason, external_idstring=external_idstring, builderNames=builderNames, properties=properties) defer.returnValue((bsid,brids)) @defer.inlineCallbacks def addBuildsetForSourceStampDetails(self, reason='', external_idstring=None, branch=None, repository='', project='', revision=None, builderNames=None, properties=None): """ Given details about the source code to build, create a source stamp and then add a buildset for it. @param reason: reason for this buildset @type reason: unicode string @param external_idstring: external identifier for this buildset, or None @param branch: branch to build (note that None often has a special meaning) @param repository: repository name for sourcestamp @param project: project name for sourcestamp @param revision: revision to build - default is latest @param builderNames: builders to name in the buildset (defaults to C{self.builderNames}) @param properties: a properties object containing initial properties for the buildset @type properties: L{buildbot.process.properties.Properties} @returns: (buildset ID, buildrequest IDs) via Deferred """ # Define setid for this set of changed repositories setid = yield self.master.db.sourcestampsets.addSourceStampSet() yield self.master.db.sourcestamps.addSourceStamp( branch=branch, revision=revision, repository=repository, project=project, sourcestampsetid=setid) rv = yield self.addBuildsetForSourceStamp( setid=setid, reason=reason, external_idstring=external_idstring, builderNames=builderNames, properties=properties) defer.returnValue(rv) @defer.inlineCallbacks def addBuildsetForSourceStampSetDetails(self, reason, sourcestamps, properties, builderNames=None): if sourcestamps is None: sourcestamps = {} # Define new setid for this set of sourcestamps new_setid = yield self.master.db.sourcestampsets.addSourceStampSet() # Merge codebases with the passed list of sourcestamps # This results in a new sourcestamp for each codebase for codebase in self.codebases: ss = self.codebases[codebase].copy() # apply info from passed sourcestamps onto the configured default # sourcestamp attributes for this codebase. ss.update(sourcestamps.get(codebase,{})) # add sourcestamp to the new setid yield self.master.db.sourcestamps.addSourceStamp( codebase=codebase, repository=ss.get('repository', ''), branch=ss.get('branch', None), revision=ss.get('revision', None), project=ss.get('project', ''), changeids=[c['number'] for c in ss.get('changes', [])], patch_body=ss.get('patch_body', None), patch_level=ss.get('patch_level', None), patch_author=ss.get('patch_author', None), patch_comment=ss.get('patch_comment', None), sourcestampsetid=new_setid) rv = yield self.addBuildsetForSourceStamp( setid=new_setid, reason=reason, properties=properties, builderNames=builderNames) defer.returnValue(rv) @defer.inlineCallbacks def addBuildsetForChanges(self, reason='', external_idstring=None, changeids=[], builderNames=None, properties=None): changesByCodebase = {} def get_last_change_for_codebase(codebase): return max(changesByCodebase[codebase],key = lambda change: change["changeid"]) # Define setid for this set of changed repositories setid = yield self.master.db.sourcestampsets.addSourceStampSet() # Changes are retrieved from database and grouped by their codebase for changeid in changeids: chdict = yield self.master.db.changes.getChange(changeid) # group change by codebase changesByCodebase.setdefault(chdict["codebase"], []).append(chdict) for codebase in self.codebases: args = {'codebase': codebase, 'sourcestampsetid': setid } if codebase not in changesByCodebase: # codebase has no changes # create a sourcestamp that has no changes args['repository'] = self.codebases[codebase]['repository'] args['branch'] = self.codebases[codebase].get('branch', None) args['revision'] = self.codebases[codebase].get('revision', None) args['changeids'] = set() args['project'] = '' else: #codebase has changes args['changeids'] = [c["changeid"] for c in changesByCodebase[codebase]] lastChange = get_last_change_for_codebase(codebase) for key in ['repository', 'branch', 'revision', 'project']: args[key] = lastChange[key] yield self.master.db.sourcestamps.addSourceStamp(**args) # add one buildset, this buildset is connected to the sourcestamps by the setid bsid,brids = yield self.addBuildsetForSourceStamp( setid=setid, reason=reason, external_idstring=external_idstring, builderNames=builderNames, properties=properties) defer.returnValue((bsid,brids)) @defer.inlineCallbacks def addBuildsetForSourceStamp(self, ssid=None, setid=None, reason='', external_idstring=None, properties=None, builderNames=None): """ Add a buildset for the given, already-existing sourcestamp. This method will add any properties provided to the scheduler constructor to the buildset, and will call the master's L{BuildMaster.addBuildset} method with the appropriate parameters, and return the same result. @param reason: reason for this buildset @type reason: unicode string @param external_idstring: external identifier for this buildset, or None @param properties: a properties object containing initial properties for the buildset @type properties: L{buildbot.process.properties.Properties} @param builderNames: builders to name in the buildset (defaults to C{self.builderNames}) @param setid: idenitification of a set of sourcestamps @returns: (buildset ID, buildrequest IDs) via Deferred """ assert (ssid is None and setid is not None) \ or (ssid is not None and setid is None), "pass a single sourcestamp OR set not both" # combine properties if properties: properties.updateFromProperties(self.properties) else: properties = self.properties # apply the default builderNames if not builderNames: builderNames = self.builderNames # translate properties object into a dict as required by the # addBuildset method properties_dict = properties.asDict() if setid == None: if ssid is not None: ssdict = yield self.master.db.sourcestamps.getSourceStamp(ssid) setid = ssdict['sourcestampsetid'] else: # no sourcestamp and no sets yield None rv = yield self.master.addBuildset(sourcestampsetid=setid, reason=reason, properties=properties_dict, builderNames=builderNames, external_idstring=external_idstring) defer.returnValue(rv)
gpl-2.0
mancoast/CPythonPyc_test
cpython/279_test_bigmem.py
68
38156
from test import test_support from test.test_support import bigmemtest, _1G, _2G, _4G, precisionbigmemtest import unittest import operator import string import sys # Bigmem testing houserules: # # - Try not to allocate too many large objects. It's okay to rely on # refcounting semantics, but don't forget that 's = create_largestring()' # doesn't release the old 's' (if it exists) until well after its new # value has been created. Use 'del s' before the create_largestring call. # # - Do *not* compare large objects using assertEqual or similar. It's a # lengty operation and the errormessage will be utterly useless due to # its size. To make sure whether a result has the right contents, better # to use the strip or count methods, or compare meaningful slices. # # - Don't forget to test for large indices, offsets and results and such, # in addition to large sizes. # # - When repeating an object (say, a substring, or a small list) to create # a large object, make the subobject of a length that is not a power of # 2. That way, int-wrapping problems are more easily detected. # # - While the bigmemtest decorator speaks of 'minsize', all tests will # actually be called with a much smaller number too, in the normal # test run (5Kb currently.) This is so the tests themselves get frequent # testing. Consequently, always make all large allocations based on the # passed-in 'size', and don't rely on the size being very large. Also, # memuse-per-size should remain sane (less than a few thousand); if your # test uses more, adjust 'size' upward, instead. class StrTest(unittest.TestCase): @bigmemtest(minsize=_2G, memuse=2) def test_capitalize(self, size): SUBSTR = ' abc def ghi' s = '-' * size + SUBSTR caps = s.capitalize() self.assertEqual(caps[-len(SUBSTR):], SUBSTR.capitalize()) self.assertEqual(caps.lstrip('-'), SUBSTR) @bigmemtest(minsize=_2G + 10, memuse=1) def test_center(self, size): SUBSTR = ' abc def ghi' s = SUBSTR.center(size) self.assertEqual(len(s), size) lpadsize = rpadsize = (len(s) - len(SUBSTR)) // 2 if len(s) % 2: lpadsize += 1 self.assertEqual(s[lpadsize:-rpadsize], SUBSTR) self.assertEqual(s.strip(), SUBSTR.strip()) @precisionbigmemtest(size=_2G - 1, memuse=1) def test_center_unicode(self, size): SUBSTR = u' abc def ghi' try: s = SUBSTR.center(size) except OverflowError: pass # acceptable on 32-bit else: self.assertEqual(len(s), size) lpadsize = rpadsize = (len(s) - len(SUBSTR)) // 2 if len(s) % 2: lpadsize += 1 self.assertEqual(s[lpadsize:-rpadsize], SUBSTR) self.assertEqual(s.strip(), SUBSTR.strip()) del s @bigmemtest(minsize=_2G, memuse=2) def test_count(self, size): SUBSTR = ' abc def ghi' s = '.' * size + SUBSTR self.assertEqual(s.count('.'), size) s += '.' self.assertEqual(s.count('.'), size + 1) self.assertEqual(s.count(' '), 3) self.assertEqual(s.count('i'), 1) self.assertEqual(s.count('j'), 0) @bigmemtest(minsize=_2G + 2, memuse=3) def test_decode(self, size): s = '.' * size self.assertEqual(len(s.decode('utf-8')), size) def basic_encode_test(self, size, enc, c=u'.', expectedsize=None): if expectedsize is None: expectedsize = size s = c * size self.assertEqual(len(s.encode(enc)), expectedsize) @bigmemtest(minsize=_2G + 2, memuse=3) def test_encode(self, size): return self.basic_encode_test(size, 'utf-8') @precisionbigmemtest(size=_4G // 6 + 2, memuse=2) def test_encode_raw_unicode_escape(self, size): try: return self.basic_encode_test(size, 'raw_unicode_escape') except MemoryError: pass # acceptable on 32-bit @precisionbigmemtest(size=_4G // 5 + 70, memuse=3) def test_encode_utf7(self, size): try: return self.basic_encode_test(size, 'utf7') except MemoryError: pass # acceptable on 32-bit @precisionbigmemtest(size=_4G // 4 + 5, memuse=6) def test_encode_utf32(self, size): try: return self.basic_encode_test(size, 'utf32', expectedsize=4*size+4) except MemoryError: pass # acceptable on 32-bit @precisionbigmemtest(size=_2G-1, memuse=4) def test_decodeascii(self, size): return self.basic_encode_test(size, 'ascii', c='A') @precisionbigmemtest(size=_4G // 5, memuse=6+2) def test_unicode_repr_oflw(self, size): self.skipTest("test crashes - see issue #14904") try: s = u"\uAAAA"*size r = repr(s) except MemoryError: pass # acceptable on 32-bit else: self.assertTrue(s == eval(r)) @bigmemtest(minsize=_2G, memuse=2) def test_endswith(self, size): SUBSTR = ' abc def ghi' s = '-' * size + SUBSTR self.assertTrue(s.endswith(SUBSTR)) self.assertTrue(s.endswith(s)) s2 = '...' + s self.assertTrue(s2.endswith(s)) self.assertFalse(s.endswith('a' + SUBSTR)) self.assertFalse(SUBSTR.endswith(s)) @bigmemtest(minsize=_2G + 10, memuse=2) def test_expandtabs(self, size): s = '-' * size tabsize = 8 self.assertEqual(s.expandtabs(), s) del s slen, remainder = divmod(size, tabsize) s = ' \t' * slen s = s.expandtabs(tabsize) self.assertEqual(len(s), size - remainder) self.assertEqual(len(s.strip(' ')), 0) @bigmemtest(minsize=_2G, memuse=2) def test_find(self, size): SUBSTR = ' abc def ghi' sublen = len(SUBSTR) s = ''.join([SUBSTR, '-' * size, SUBSTR]) self.assertEqual(s.find(' '), 0) self.assertEqual(s.find(SUBSTR), 0) self.assertEqual(s.find(' ', sublen), sublen + size) self.assertEqual(s.find(SUBSTR, len(SUBSTR)), sublen + size) self.assertEqual(s.find('i'), SUBSTR.find('i')) self.assertEqual(s.find('i', sublen), sublen + size + SUBSTR.find('i')) self.assertEqual(s.find('i', size), sublen + size + SUBSTR.find('i')) self.assertEqual(s.find('j'), -1) @bigmemtest(minsize=_2G, memuse=2) def test_index(self, size): SUBSTR = ' abc def ghi' sublen = len(SUBSTR) s = ''.join([SUBSTR, '-' * size, SUBSTR]) self.assertEqual(s.index(' '), 0) self.assertEqual(s.index(SUBSTR), 0) self.assertEqual(s.index(' ', sublen), sublen + size) self.assertEqual(s.index(SUBSTR, sublen), sublen + size) self.assertEqual(s.index('i'), SUBSTR.index('i')) self.assertEqual(s.index('i', sublen), sublen + size + SUBSTR.index('i')) self.assertEqual(s.index('i', size), sublen + size + SUBSTR.index('i')) self.assertRaises(ValueError, s.index, 'j') @bigmemtest(minsize=_2G, memuse=2) def test_isalnum(self, size): SUBSTR = '123456' s = 'a' * size + SUBSTR self.assertTrue(s.isalnum()) s += '.' self.assertFalse(s.isalnum()) @bigmemtest(minsize=_2G, memuse=2) def test_isalpha(self, size): SUBSTR = 'zzzzzzz' s = 'a' * size + SUBSTR self.assertTrue(s.isalpha()) s += '.' self.assertFalse(s.isalpha()) @bigmemtest(minsize=_2G, memuse=2) def test_isdigit(self, size): SUBSTR = '123456' s = '9' * size + SUBSTR self.assertTrue(s.isdigit()) s += 'z' self.assertFalse(s.isdigit()) @bigmemtest(minsize=_2G, memuse=2) def test_islower(self, size): chars = ''.join([ chr(c) for c in range(255) if not chr(c).isupper() ]) repeats = size // len(chars) + 2 s = chars * repeats self.assertTrue(s.islower()) s += 'A' self.assertFalse(s.islower()) @bigmemtest(minsize=_2G, memuse=2) def test_isspace(self, size): whitespace = ' \f\n\r\t\v' repeats = size // len(whitespace) + 2 s = whitespace * repeats self.assertTrue(s.isspace()) s += 'j' self.assertFalse(s.isspace()) @bigmemtest(minsize=_2G, memuse=2) def test_istitle(self, size): SUBSTR = '123456' s = ''.join(['A', 'a' * size, SUBSTR]) self.assertTrue(s.istitle()) s += 'A' self.assertTrue(s.istitle()) s += 'aA' self.assertFalse(s.istitle()) @bigmemtest(minsize=_2G, memuse=2) def test_isupper(self, size): chars = ''.join([ chr(c) for c in range(255) if not chr(c).islower() ]) repeats = size // len(chars) + 2 s = chars * repeats self.assertTrue(s.isupper()) s += 'a' self.assertFalse(s.isupper()) @bigmemtest(minsize=_2G, memuse=2) def test_join(self, size): s = 'A' * size x = s.join(['aaaaa', 'bbbbb']) self.assertEqual(x.count('a'), 5) self.assertEqual(x.count('b'), 5) self.assertTrue(x.startswith('aaaaaA')) self.assertTrue(x.endswith('Abbbbb')) @bigmemtest(minsize=_2G + 10, memuse=1) def test_ljust(self, size): SUBSTR = ' abc def ghi' s = SUBSTR.ljust(size) self.assertTrue(s.startswith(SUBSTR + ' ')) self.assertEqual(len(s), size) self.assertEqual(s.strip(), SUBSTR.strip()) @bigmemtest(minsize=_2G + 10, memuse=2) def test_lower(self, size): s = 'A' * size s = s.lower() self.assertEqual(len(s), size) self.assertEqual(s.count('a'), size) @bigmemtest(minsize=_2G + 10, memuse=1) def test_lstrip(self, size): SUBSTR = 'abc def ghi' s = SUBSTR.rjust(size) self.assertEqual(len(s), size) self.assertEqual(s.lstrip(), SUBSTR.lstrip()) del s s = SUBSTR.ljust(size) self.assertEqual(len(s), size) stripped = s.lstrip() self.assertTrue(stripped is s) @bigmemtest(minsize=_2G + 10, memuse=2) def test_replace(self, size): replacement = 'a' s = ' ' * size s = s.replace(' ', replacement) self.assertEqual(len(s), size) self.assertEqual(s.count(replacement), size) s = s.replace(replacement, ' ', size - 4) self.assertEqual(len(s), size) self.assertEqual(s.count(replacement), 4) self.assertEqual(s[-10:], ' aaaa') @bigmemtest(minsize=_2G, memuse=2) def test_rfind(self, size): SUBSTR = ' abc def ghi' sublen = len(SUBSTR) s = ''.join([SUBSTR, '-' * size, SUBSTR]) self.assertEqual(s.rfind(' '), sublen + size + SUBSTR.rfind(' ')) self.assertEqual(s.rfind(SUBSTR), sublen + size) self.assertEqual(s.rfind(' ', 0, size), SUBSTR.rfind(' ')) self.assertEqual(s.rfind(SUBSTR, 0, sublen + size), 0) self.assertEqual(s.rfind('i'), sublen + size + SUBSTR.rfind('i')) self.assertEqual(s.rfind('i', 0, sublen), SUBSTR.rfind('i')) self.assertEqual(s.rfind('i', 0, sublen + size), SUBSTR.rfind('i')) self.assertEqual(s.rfind('j'), -1) @bigmemtest(minsize=_2G, memuse=2) def test_rindex(self, size): SUBSTR = ' abc def ghi' sublen = len(SUBSTR) s = ''.join([SUBSTR, '-' * size, SUBSTR]) self.assertEqual(s.rindex(' '), sublen + size + SUBSTR.rindex(' ')) self.assertEqual(s.rindex(SUBSTR), sublen + size) self.assertEqual(s.rindex(' ', 0, sublen + size - 1), SUBSTR.rindex(' ')) self.assertEqual(s.rindex(SUBSTR, 0, sublen + size), 0) self.assertEqual(s.rindex('i'), sublen + size + SUBSTR.rindex('i')) self.assertEqual(s.rindex('i', 0, sublen), SUBSTR.rindex('i')) self.assertEqual(s.rindex('i', 0, sublen + size), SUBSTR.rindex('i')) self.assertRaises(ValueError, s.rindex, 'j') @bigmemtest(minsize=_2G + 10, memuse=1) def test_rjust(self, size): SUBSTR = ' abc def ghi' s = SUBSTR.ljust(size) self.assertTrue(s.startswith(SUBSTR + ' ')) self.assertEqual(len(s), size) self.assertEqual(s.strip(), SUBSTR.strip()) @bigmemtest(minsize=_2G + 10, memuse=1) def test_rstrip(self, size): SUBSTR = ' abc def ghi' s = SUBSTR.ljust(size) self.assertEqual(len(s), size) self.assertEqual(s.rstrip(), SUBSTR.rstrip()) del s s = SUBSTR.rjust(size) self.assertEqual(len(s), size) stripped = s.rstrip() self.assertTrue(stripped is s) # The test takes about size bytes to build a string, and then about # sqrt(size) substrings of sqrt(size) in size and a list to # hold sqrt(size) items. It's close but just over 2x size. @bigmemtest(minsize=_2G, memuse=2.1) def test_split_small(self, size): # Crudely calculate an estimate so that the result of s.split won't # take up an inordinate amount of memory chunksize = int(size ** 0.5 + 2) SUBSTR = 'a' + ' ' * chunksize s = SUBSTR * chunksize l = s.split() self.assertEqual(len(l), chunksize) self.assertEqual(set(l), set(['a'])) del l l = s.split('a') self.assertEqual(len(l), chunksize + 1) self.assertEqual(set(l), set(['', ' ' * chunksize])) # Allocates a string of twice size (and briefly two) and a list of # size. Because of internal affairs, the s.split() call produces a # list of size times the same one-character string, so we only # suffer for the list size. (Otherwise, it'd cost another 48 times # size in bytes!) Nevertheless, a list of size takes # 8*size bytes. @bigmemtest(minsize=_2G + 5, memuse=10) def test_split_large(self, size): s = ' a' * size + ' ' l = s.split() self.assertEqual(len(l), size) self.assertEqual(set(l), set(['a'])) del l l = s.split('a') self.assertEqual(len(l), size + 1) self.assertEqual(set(l), set([' '])) @bigmemtest(minsize=_2G, memuse=2.1) def test_splitlines(self, size): # Crudely calculate an estimate so that the result of s.split won't # take up an inordinate amount of memory chunksize = int(size ** 0.5 + 2) // 2 SUBSTR = ' ' * chunksize + '\n' + ' ' * chunksize + '\r\n' s = SUBSTR * chunksize l = s.splitlines() self.assertEqual(len(l), chunksize * 2) self.assertEqual(set(l), set([' ' * chunksize])) @bigmemtest(minsize=_2G, memuse=2) def test_startswith(self, size): SUBSTR = ' abc def ghi' s = '-' * size + SUBSTR self.assertTrue(s.startswith(s)) self.assertTrue(s.startswith('-' * size)) self.assertFalse(s.startswith(SUBSTR)) @bigmemtest(minsize=_2G, memuse=1) def test_strip(self, size): SUBSTR = ' abc def ghi ' s = SUBSTR.rjust(size) self.assertEqual(len(s), size) self.assertEqual(s.strip(), SUBSTR.strip()) del s s = SUBSTR.ljust(size) self.assertEqual(len(s), size) self.assertEqual(s.strip(), SUBSTR.strip()) @bigmemtest(minsize=_2G, memuse=2) def test_swapcase(self, size): SUBSTR = "aBcDeFG12.'\xa9\x00" sublen = len(SUBSTR) repeats = size // sublen + 2 s = SUBSTR * repeats s = s.swapcase() self.assertEqual(len(s), sublen * repeats) self.assertEqual(s[:sublen * 3], SUBSTR.swapcase() * 3) self.assertEqual(s[-sublen * 3:], SUBSTR.swapcase() * 3) @bigmemtest(minsize=_2G, memuse=2) def test_title(self, size): SUBSTR = 'SpaaHAaaAaham' s = SUBSTR * (size // len(SUBSTR) + 2) s = s.title() self.assertTrue(s.startswith((SUBSTR * 3).title())) self.assertTrue(s.endswith(SUBSTR.lower() * 3)) @bigmemtest(minsize=_2G, memuse=2) def test_translate(self, size): trans = string.maketrans('.aZ', '-!$') SUBSTR = 'aZz.z.Aaz.' sublen = len(SUBSTR) repeats = size // sublen + 2 s = SUBSTR * repeats s = s.translate(trans) self.assertEqual(len(s), repeats * sublen) self.assertEqual(s[:sublen], SUBSTR.translate(trans)) self.assertEqual(s[-sublen:], SUBSTR.translate(trans)) self.assertEqual(s.count('.'), 0) self.assertEqual(s.count('!'), repeats * 2) self.assertEqual(s.count('z'), repeats * 3) @bigmemtest(minsize=_2G + 5, memuse=2) def test_upper(self, size): s = 'a' * size s = s.upper() self.assertEqual(len(s), size) self.assertEqual(s.count('A'), size) @bigmemtest(minsize=_2G + 20, memuse=1) def test_zfill(self, size): SUBSTR = '-568324723598234' s = SUBSTR.zfill(size) self.assertTrue(s.endswith('0' + SUBSTR[1:])) self.assertTrue(s.startswith('-0')) self.assertEqual(len(s), size) self.assertEqual(s.count('0'), size - len(SUBSTR)) @bigmemtest(minsize=_2G + 10, memuse=2) def test_format(self, size): s = '-' * size sf = '%s' % (s,) self.assertTrue(s == sf) del sf sf = '..%s..' % (s,) self.assertEqual(len(sf), len(s) + 4) self.assertTrue(sf.startswith('..-')) self.assertTrue(sf.endswith('-..')) del s, sf size //= 2 edge = '-' * size s = ''.join([edge, '%s', edge]) del edge s = s % '...' self.assertEqual(len(s), size * 2 + 3) self.assertEqual(s.count('.'), 3) self.assertEqual(s.count('-'), size * 2) @bigmemtest(minsize=_2G + 10, memuse=5) def test_repr_small(self, size): s = '-' * size s = repr(s) self.assertEqual(len(s), size + 2) self.assertEqual(s[0], "'") self.assertEqual(s[-1], "'") self.assertEqual(s.count('-'), size) del s # repr() will create a string four times as large as this 'binary # string', but we don't want to allocate much more than twice # size in total. (We do extra testing in test_repr_large()) s = '\x00' * size s = repr(s) self.assertEqual(len(s), size * 4 + 2) self.assertEqual(s[0], "'") self.assertEqual(s[-1], "'") self.assertEqual(s.count('\\'), size) self.assertEqual(s.count('0'), size * 2) @bigmemtest(minsize=_2G + 10, memuse=5) def test_repr_large(self, size): s = '\x00' * size s = repr(s) self.assertEqual(len(s), size * 4 + 2) self.assertEqual(s[0], "'") self.assertEqual(s[-1], "'") self.assertEqual(s.count('\\'), size) self.assertEqual(s.count('0'), size * 2) @bigmemtest(minsize=2**32 // 5, memuse=6+2) def test_unicode_repr(self, size): s = u"\uAAAA" * size self.assertTrue(len(repr(s)) > size) # This test is meaningful even with size < 2G, as long as the # doubled string is > 2G (but it tests more if both are > 2G :) @bigmemtest(minsize=_1G + 2, memuse=3) def test_concat(self, size): s = '.' * size self.assertEqual(len(s), size) s = s + s self.assertEqual(len(s), size * 2) self.assertEqual(s.count('.'), size * 2) # This test is meaningful even with size < 2G, as long as the # repeated string is > 2G (but it tests more if both are > 2G :) @bigmemtest(minsize=_1G + 2, memuse=3) def test_repeat(self, size): s = '.' * size self.assertEqual(len(s), size) s = s * 2 self.assertEqual(len(s), size * 2) self.assertEqual(s.count('.'), size * 2) @bigmemtest(minsize=_2G + 20, memuse=2) def test_slice_and_getitem(self, size): SUBSTR = '0123456789' sublen = len(SUBSTR) s = SUBSTR * (size // sublen) stepsize = len(s) // 100 stepsize = stepsize - (stepsize % sublen) for i in range(0, len(s) - stepsize, stepsize): self.assertEqual(s[i], SUBSTR[0]) self.assertEqual(s[i:i + sublen], SUBSTR) self.assertEqual(s[i:i + sublen:2], SUBSTR[::2]) if i > 0: self.assertEqual(s[i + sublen - 1:i - 1:-3], SUBSTR[sublen::-3]) # Make sure we do some slicing and indexing near the end of the # string, too. self.assertEqual(s[len(s) - 1], SUBSTR[-1]) self.assertEqual(s[-1], SUBSTR[-1]) self.assertEqual(s[len(s) - 10], SUBSTR[0]) self.assertEqual(s[-sublen], SUBSTR[0]) self.assertEqual(s[len(s):], '') self.assertEqual(s[len(s) - 1:], SUBSTR[-1]) self.assertEqual(s[-1:], SUBSTR[-1]) self.assertEqual(s[len(s) - sublen:], SUBSTR) self.assertEqual(s[-sublen:], SUBSTR) self.assertEqual(len(s[:]), len(s)) self.assertEqual(len(s[:len(s) - 5]), len(s) - 5) self.assertEqual(len(s[5:-5]), len(s) - 10) self.assertRaises(IndexError, operator.getitem, s, len(s)) self.assertRaises(IndexError, operator.getitem, s, len(s) + 1) self.assertRaises(IndexError, operator.getitem, s, len(s) + 1<<31) @bigmemtest(minsize=_2G, memuse=2) def test_contains(self, size): SUBSTR = '0123456789' edge = '-' * (size // 2) s = ''.join([edge, SUBSTR, edge]) del edge self.assertIn(SUBSTR, s) self.assertNotIn(SUBSTR * 2, s) self.assertIn('-', s) self.assertNotIn('a', s) s += 'a' self.assertIn('a', s) @bigmemtest(minsize=_2G + 10, memuse=2) def test_compare(self, size): s1 = '-' * size s2 = '-' * size self.assertTrue(s1 == s2) del s2 s2 = s1 + 'a' self.assertFalse(s1 == s2) del s2 s2 = '.' * size self.assertFalse(s1 == s2) @bigmemtest(minsize=_2G + 10, memuse=1) def test_hash(self, size): # Not sure if we can do any meaningful tests here... Even if we # start relying on the exact algorithm used, the result will be # different depending on the size of the C 'long int'. Even this # test is dodgy (there's no *guarantee* that the two things should # have a different hash, even if they, in the current # implementation, almost always do.) s = '\x00' * size h1 = hash(s) del s s = '\x00' * (size + 1) self.assertFalse(h1 == hash(s)) class TupleTest(unittest.TestCase): # Tuples have a small, fixed-sized head and an array of pointers to # data. Since we're testing 64-bit addressing, we can assume that the # pointers are 8 bytes, and that thus that the tuples take up 8 bytes # per size. # As a side-effect of testing long tuples, these tests happen to test # having more than 2<<31 references to any given object. Hence the # use of different types of objects as contents in different tests. @bigmemtest(minsize=_2G + 2, memuse=16) def test_compare(self, size): t1 = (u'',) * size t2 = (u'',) * size self.assertTrue(t1 == t2) del t2 t2 = (u'',) * (size + 1) self.assertFalse(t1 == t2) del t2 t2 = (1,) * size self.assertFalse(t1 == t2) # Test concatenating into a single tuple of more than 2G in length, # and concatenating a tuple of more than 2G in length separately, so # the smaller test still gets run even if there isn't memory for the # larger test (but we still let the tester know the larger test is # skipped, in verbose mode.) def basic_concat_test(self, size): t = ((),) * size self.assertEqual(len(t), size) t = t + t self.assertEqual(len(t), size * 2) @bigmemtest(minsize=_2G // 2 + 2, memuse=24) def test_concat_small(self, size): return self.basic_concat_test(size) @bigmemtest(minsize=_2G + 2, memuse=24) def test_concat_large(self, size): return self.basic_concat_test(size) @bigmemtest(minsize=_2G // 5 + 10, memuse=8 * 5) def test_contains(self, size): t = (1, 2, 3, 4, 5) * size self.assertEqual(len(t), size * 5) self.assertIn(5, t) self.assertNotIn((1, 2, 3, 4, 5), t) self.assertNotIn(0, t) @bigmemtest(minsize=_2G + 10, memuse=8) def test_hash(self, size): t1 = (0,) * size h1 = hash(t1) del t1 t2 = (0,) * (size + 1) self.assertFalse(h1 == hash(t2)) @bigmemtest(minsize=_2G + 10, memuse=8) def test_index_and_slice(self, size): t = (None,) * size self.assertEqual(len(t), size) self.assertEqual(t[-1], None) self.assertEqual(t[5], None) self.assertEqual(t[size - 1], None) self.assertRaises(IndexError, operator.getitem, t, size) self.assertEqual(t[:5], (None,) * 5) self.assertEqual(t[-5:], (None,) * 5) self.assertEqual(t[20:25], (None,) * 5) self.assertEqual(t[-25:-20], (None,) * 5) self.assertEqual(t[size - 5:], (None,) * 5) self.assertEqual(t[size - 5:size], (None,) * 5) self.assertEqual(t[size - 6:size - 2], (None,) * 4) self.assertEqual(t[size:size], ()) self.assertEqual(t[size:size+5], ()) # Like test_concat, split in two. def basic_test_repeat(self, size): t = ('',) * size self.assertEqual(len(t), size) t = t * 2 self.assertEqual(len(t), size * 2) @bigmemtest(minsize=_2G // 2 + 2, memuse=24) def test_repeat_small(self, size): return self.basic_test_repeat(size) @bigmemtest(minsize=_2G + 2, memuse=24) def test_repeat_large(self, size): return self.basic_test_repeat(size) @bigmemtest(minsize=_1G - 1, memuse=12) def test_repeat_large_2(self, size): return self.basic_test_repeat(size) @precisionbigmemtest(size=_1G - 1, memuse=9) def test_from_2G_generator(self, size): try: t = tuple(xrange(size)) except MemoryError: pass # acceptable on 32-bit else: count = 0 for item in t: self.assertEqual(item, count) count += 1 self.assertEqual(count, size) @precisionbigmemtest(size=_1G - 25, memuse=9) def test_from_almost_2G_generator(self, size): try: t = tuple(xrange(size)) count = 0 for item in t: self.assertEqual(item, count) count += 1 self.assertEqual(count, size) except MemoryError: pass # acceptable, expected on 32-bit # Like test_concat, split in two. def basic_test_repr(self, size): t = (0,) * size s = repr(t) # The repr of a tuple of 0's is exactly three times the tuple length. self.assertEqual(len(s), size * 3) self.assertEqual(s[:5], '(0, 0') self.assertEqual(s[-5:], '0, 0)') self.assertEqual(s.count('0'), size) @bigmemtest(minsize=_2G // 3 + 2, memuse=8 + 3) def test_repr_small(self, size): return self.basic_test_repr(size) @bigmemtest(minsize=_2G + 2, memuse=8 + 3) def test_repr_large(self, size): return self.basic_test_repr(size) class ListTest(unittest.TestCase): # Like tuples, lists have a small, fixed-sized head and an array of # pointers to data, so 8 bytes per size. Also like tuples, we make the # lists hold references to various objects to test their refcount # limits. @bigmemtest(minsize=_2G + 2, memuse=16) def test_compare(self, size): l1 = [u''] * size l2 = [u''] * size self.assertTrue(l1 == l2) del l2 l2 = [u''] * (size + 1) self.assertFalse(l1 == l2) del l2 l2 = [2] * size self.assertFalse(l1 == l2) # Test concatenating into a single list of more than 2G in length, # and concatenating a list of more than 2G in length separately, so # the smaller test still gets run even if there isn't memory for the # larger test (but we still let the tester know the larger test is # skipped, in verbose mode.) def basic_test_concat(self, size): l = [[]] * size self.assertEqual(len(l), size) l = l + l self.assertEqual(len(l), size * 2) @bigmemtest(minsize=_2G // 2 + 2, memuse=24) def test_concat_small(self, size): return self.basic_test_concat(size) @bigmemtest(minsize=_2G + 2, memuse=24) def test_concat_large(self, size): return self.basic_test_concat(size) def basic_test_inplace_concat(self, size): l = [sys.stdout] * size l += l self.assertEqual(len(l), size * 2) self.assertTrue(l[0] is l[-1]) self.assertTrue(l[size - 1] is l[size + 1]) @bigmemtest(minsize=_2G // 2 + 2, memuse=24) def test_inplace_concat_small(self, size): return self.basic_test_inplace_concat(size) @bigmemtest(minsize=_2G + 2, memuse=24) def test_inplace_concat_large(self, size): return self.basic_test_inplace_concat(size) @bigmemtest(minsize=_2G // 5 + 10, memuse=8 * 5) def test_contains(self, size): l = [1, 2, 3, 4, 5] * size self.assertEqual(len(l), size * 5) self.assertIn(5, l) self.assertNotIn([1, 2, 3, 4, 5], l) self.assertNotIn(0, l) @bigmemtest(minsize=_2G + 10, memuse=8) def test_hash(self, size): l = [0] * size self.assertRaises(TypeError, hash, l) @bigmemtest(minsize=_2G + 10, memuse=8) def test_index_and_slice(self, size): l = [None] * size self.assertEqual(len(l), size) self.assertEqual(l[-1], None) self.assertEqual(l[5], None) self.assertEqual(l[size - 1], None) self.assertRaises(IndexError, operator.getitem, l, size) self.assertEqual(l[:5], [None] * 5) self.assertEqual(l[-5:], [None] * 5) self.assertEqual(l[20:25], [None] * 5) self.assertEqual(l[-25:-20], [None] * 5) self.assertEqual(l[size - 5:], [None] * 5) self.assertEqual(l[size - 5:size], [None] * 5) self.assertEqual(l[size - 6:size - 2], [None] * 4) self.assertEqual(l[size:size], []) self.assertEqual(l[size:size+5], []) l[size - 2] = 5 self.assertEqual(len(l), size) self.assertEqual(l[-3:], [None, 5, None]) self.assertEqual(l.count(5), 1) self.assertRaises(IndexError, operator.setitem, l, size, 6) self.assertEqual(len(l), size) l[size - 7:] = [1, 2, 3, 4, 5] size -= 2 self.assertEqual(len(l), size) self.assertEqual(l[-7:], [None, None, 1, 2, 3, 4, 5]) l[:7] = [1, 2, 3, 4, 5] size -= 2 self.assertEqual(len(l), size) self.assertEqual(l[:7], [1, 2, 3, 4, 5, None, None]) del l[size - 1] size -= 1 self.assertEqual(len(l), size) self.assertEqual(l[-1], 4) del l[-2:] size -= 2 self.assertEqual(len(l), size) self.assertEqual(l[-1], 2) del l[0] size -= 1 self.assertEqual(len(l), size) self.assertEqual(l[0], 2) del l[:2] size -= 2 self.assertEqual(len(l), size) self.assertEqual(l[0], 4) # Like test_concat, split in two. def basic_test_repeat(self, size): l = [] * size self.assertFalse(l) l = [''] * size self.assertEqual(len(l), size) l = l * 2 self.assertEqual(len(l), size * 2) @bigmemtest(minsize=_2G // 2 + 2, memuse=24) def test_repeat_small(self, size): return self.basic_test_repeat(size) @bigmemtest(minsize=_2G + 2, memuse=24) def test_repeat_large(self, size): return self.basic_test_repeat(size) def basic_test_inplace_repeat(self, size): l = [''] l *= size self.assertEqual(len(l), size) self.assertTrue(l[0] is l[-1]) del l l = [''] * size l *= 2 self.assertEqual(len(l), size * 2) self.assertTrue(l[size - 1] is l[-1]) @bigmemtest(minsize=_2G // 2 + 2, memuse=16) def test_inplace_repeat_small(self, size): return self.basic_test_inplace_repeat(size) @bigmemtest(minsize=_2G + 2, memuse=16) def test_inplace_repeat_large(self, size): return self.basic_test_inplace_repeat(size) def basic_test_repr(self, size): l = [0] * size s = repr(l) # The repr of a list of 0's is exactly three times the list length. self.assertEqual(len(s), size * 3) self.assertEqual(s[:5], '[0, 0') self.assertEqual(s[-5:], '0, 0]') self.assertEqual(s.count('0'), size) @bigmemtest(minsize=_2G // 3 + 2, memuse=8 + 3) def test_repr_small(self, size): return self.basic_test_repr(size) @bigmemtest(minsize=_2G + 2, memuse=8 + 3) def test_repr_large(self, size): return self.basic_test_repr(size) # list overallocates ~1/8th of the total size (on first expansion) so # the single list.append call puts memuse at 9 bytes per size. @bigmemtest(minsize=_2G, memuse=9) def test_append(self, size): l = [object()] * size l.append(object()) self.assertEqual(len(l), size+1) self.assertTrue(l[-3] is l[-2]) self.assertFalse(l[-2] is l[-1]) @bigmemtest(minsize=_2G // 5 + 2, memuse=8 * 5) def test_count(self, size): l = [1, 2, 3, 4, 5] * size self.assertEqual(l.count(1), size) self.assertEqual(l.count("1"), 0) def basic_test_extend(self, size): l = [file] * size l.extend(l) self.assertEqual(len(l), size * 2) self.assertTrue(l[0] is l[-1]) self.assertTrue(l[size - 1] is l[size + 1]) @bigmemtest(minsize=_2G // 2 + 2, memuse=16) def test_extend_small(self, size): return self.basic_test_extend(size) @bigmemtest(minsize=_2G + 2, memuse=16) def test_extend_large(self, size): return self.basic_test_extend(size) @bigmemtest(minsize=_2G // 5 + 2, memuse=8 * 5) def test_index(self, size): l = [1L, 2L, 3L, 4L, 5L] * size size *= 5 self.assertEqual(l.index(1), 0) self.assertEqual(l.index(5, size - 5), size - 1) self.assertEqual(l.index(5, size - 5, size), size - 1) self.assertRaises(ValueError, l.index, 1, size - 4, size) self.assertRaises(ValueError, l.index, 6L) # This tests suffers from overallocation, just like test_append. @bigmemtest(minsize=_2G + 10, memuse=9) def test_insert(self, size): l = [1.0] * size l.insert(size - 1, "A") size += 1 self.assertEqual(len(l), size) self.assertEqual(l[-3:], [1.0, "A", 1.0]) l.insert(size + 1, "B") size += 1 self.assertEqual(len(l), size) self.assertEqual(l[-3:], ["A", 1.0, "B"]) l.insert(1, "C") size += 1 self.assertEqual(len(l), size) self.assertEqual(l[:3], [1.0, "C", 1.0]) self.assertEqual(l[size - 3:], ["A", 1.0, "B"]) @bigmemtest(minsize=_2G // 5 + 4, memuse=8 * 5) def test_pop(self, size): l = [u"a", u"b", u"c", u"d", u"e"] * size size *= 5 self.assertEqual(len(l), size) item = l.pop() size -= 1 self.assertEqual(len(l), size) self.assertEqual(item, u"e") self.assertEqual(l[-2:], [u"c", u"d"]) item = l.pop(0) size -= 1 self.assertEqual(len(l), size) self.assertEqual(item, u"a") self.assertEqual(l[:2], [u"b", u"c"]) item = l.pop(size - 2) size -= 1 self.assertEqual(len(l), size) self.assertEqual(item, u"c") self.assertEqual(l[-2:], [u"b", u"d"]) @bigmemtest(minsize=_2G + 10, memuse=8) def test_remove(self, size): l = [10] * size self.assertEqual(len(l), size) l.remove(10) size -= 1 self.assertEqual(len(l), size) # Because of the earlier l.remove(), this append doesn't trigger # a resize. l.append(5) size += 1 self.assertEqual(len(l), size) self.assertEqual(l[-2:], [10, 5]) l.remove(5) size -= 1 self.assertEqual(len(l), size) self.assertEqual(l[-2:], [10, 10]) @bigmemtest(minsize=_2G // 5 + 2, memuse=8 * 5) def test_reverse(self, size): l = [1, 2, 3, 4, 5] * size l.reverse() self.assertEqual(len(l), size * 5) self.assertEqual(l[-5:], [5, 4, 3, 2, 1]) self.assertEqual(l[:5], [5, 4, 3, 2, 1]) @bigmemtest(minsize=_2G // 5 + 2, memuse=8 * 5) def test_sort(self, size): l = [1, 2, 3, 4, 5] * size l.sort() self.assertEqual(len(l), size * 5) self.assertEqual(l.count(1), size) self.assertEqual(l[:10], [1] * 10) self.assertEqual(l[-10:], [5] * 10) class BufferTest(unittest.TestCase): @precisionbigmemtest(size=_1G, memuse=4) def test_repeat(self, size): try: with test_support.check_py3k_warnings(): b = buffer("AAAA")*size except MemoryError: pass # acceptable on 32-bit else: count = 0 for c in b: self.assertEqual(c, 'A') count += 1 self.assertEqual(count, size*4) def test_main(): test_support.run_unittest(StrTest, TupleTest, ListTest, BufferTest) if __name__ == '__main__': if len(sys.argv) > 1: test_support.set_memlimit(sys.argv[1]) test_main()
gpl-3.0
ryfeus/lambda-packs
Tensorflow/source/tensorflow/python/ops/distributions/uniform.py
73
6818
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """The Uniform distribution class.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import math from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops.distributions import distribution class Uniform(distribution.Distribution): """Uniform distribution with `low` and `high` parameters. #### Mathematical Details The probability density function (pdf) is, ```none pdf(x; a, b) = I[a <= x < b] / Z Z = b - a ``` where: * `low = a`, * `high = b`, * `Z` is the normalizing constant, and, * `I[predicate]` is the [indicator function]( https://en.wikipedia.org/wiki/Indicator_function) for `predicate`. The parameters `low` and `high` must be shaped in a way that supports broadcasting (e.g., `high - low` is a valid operation). #### Examples ```python # Without broadcasting: u1 = Uniform(low=3.0, high=4.0) # a single uniform distribution [3, 4] u2 = Uniform(low=[1.0, 2.0], high=[3.0, 4.0]) # 2 distributions [1, 3], [2, 4] u3 = Uniform(low=[[1.0, 2.0], [3.0, 4.0]], high=[[1.5, 2.5], [3.5, 4.5]]) # 4 distributions ``` ```python # With broadcasting: u1 = Uniform(low=3.0, high=[5.0, 6.0, 7.0]) # 3 distributions ``` """ def __init__(self, low=0., high=1., validate_args=False, allow_nan_stats=True, name="Uniform"): """Initialize a batch of Uniform distributions. Args: low: Floating point tensor, lower boundary of the output interval. Must have `low < high`. high: Floating point tensor, upper boundary of the output interval. Must have `low < high`. validate_args: Python `bool`, default `False`. When `True` distribution parameters are checked for validity despite possibly degrading runtime performance. When `False` invalid inputs may silently render incorrect outputs. allow_nan_stats: Python `bool`, default `True`. When `True`, statistics (e.g., mean, mode, variance) use the value "`NaN`" to indicate the result is undefined. When `False`, an exception is raised if one or more of the statistic's batch members are undefined. name: Python `str` name prefixed to Ops created by this class. Raises: InvalidArgumentError: if `low >= high` and `validate_args=False`. """ parameters = locals() with ops.name_scope(name, values=[low, high]): with ops.control_dependencies([ check_ops.assert_less( low, high, message="uniform not defined when low >= high.") ] if validate_args else []): self._low = array_ops.identity(low, name="low") self._high = array_ops.identity(high, name="high") check_ops.assert_same_float_dtype([self._low, self._high]) super(Uniform, self).__init__( dtype=self._low.dtype, reparameterization_type=distribution.FULLY_REPARAMETERIZED, validate_args=validate_args, allow_nan_stats=allow_nan_stats, parameters=parameters, graph_parents=[self._low, self._high], name=name) @staticmethod def _param_shapes(sample_shape): return dict( zip(("low", "high"), ([ops.convert_to_tensor(sample_shape, dtype=dtypes.int32)] * 2))) @property def low(self): """Lower boundary of the output interval.""" return self._low @property def high(self): """Upper boundary of the output interval.""" return self._high def range(self, name="range"): """`high - low`.""" with self._name_scope(name): return self.high - self.low def _batch_shape_tensor(self): return array_ops.broadcast_dynamic_shape( array_ops.shape(self.low), array_ops.shape(self.high)) def _batch_shape(self): return array_ops.broadcast_static_shape( self.low.get_shape(), self.high.get_shape()) def _event_shape_tensor(self): return constant_op.constant([], dtype=dtypes.int32) def _event_shape(self): return tensor_shape.scalar() def _sample_n(self, n, seed=None): shape = array_ops.concat([[n], self.batch_shape_tensor()], 0) samples = random_ops.random_uniform(shape=shape, dtype=self.dtype, seed=seed) return self.low + self.range() * samples def _log_prob(self, x): return math_ops.log(self._prob(x)) def _prob(self, x): broadcasted_x = x * array_ops.ones(self.batch_shape_tensor()) return array_ops.where( math_ops.is_nan(broadcasted_x), broadcasted_x, array_ops.where( math_ops.logical_or(broadcasted_x < self.low, broadcasted_x >= self.high), array_ops.zeros_like(broadcasted_x), array_ops.ones_like(broadcasted_x) / self.range())) def _log_cdf(self, x): return math_ops.log(self.cdf(x)) def _cdf(self, x): broadcast_shape = array_ops.broadcast_dynamic_shape( array_ops.shape(x), self.batch_shape_tensor()) zeros = array_ops.zeros(broadcast_shape, dtype=self.dtype) ones = array_ops.ones(broadcast_shape, dtype=self.dtype) broadcasted_x = x * ones result_if_not_big = array_ops.where( x < self.low, zeros, (broadcasted_x - self.low) / self.range()) return array_ops.where(x >= self.high, ones, result_if_not_big) def _entropy(self): return math_ops.log(self.range()) def _mean(self): return (self.low + self.high) / 2. def _variance(self): return math_ops.square(self.range()) / 12. def _stddev(self): return self.range() / math.sqrt(12.)
mit
changsimon/trove
trove/tests/unittests/common/test_context.py
5
2104
# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import trove.common.context as context import testtools from testtools.matchers import Equals, Is class TestTroveContext(testtools.TestCase): def test_create_with_extended_args(self): expected_service_catalog = {'key': 'value'} ctx = context.TroveContext(user="test_user_id", request_id="test_req_id", limit="500", marker="x", service_catalog=expected_service_catalog) self.assertThat(ctx.limit, Equals("500")) self.assertThat(ctx.marker, Equals("x")) self.assertThat(ctx.service_catalog, Equals(expected_service_catalog)) def test_create(self): ctx = context.TroveContext(user='test_user_id', request_id='test_req_id') self.assertThat(ctx.user, Equals('test_user_id')) self.assertThat(ctx.request_id, Equals('test_req_id')) self.assertThat(ctx.limit, Is(None)) self.assertThat(ctx.marker, Is(None)) self.assertThat(ctx.service_catalog, Is(None)) def test_to_dict(self): ctx = context.TroveContext(user='test_user_id', request_id='test_req_id') ctx_dict = ctx.to_dict() self.assertThat(ctx_dict.get('user'), Equals('test_user_id')) self.assertThat(ctx_dict.get('request_id'), Equals('test_req_id'))
apache-2.0
Russell-IO/ansible
test/units/parsing/yaml/test_dumper.py
91
2322
# coding: utf-8 # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import io from ansible.compat.tests import unittest from ansible.parsing import vault from ansible.parsing.yaml import dumper, objects from ansible.parsing.yaml.loader import AnsibleLoader from units.mock.yaml_helper import YamlTestUtils from units.mock.vault_helper import TextVaultSecret class TestAnsibleDumper(unittest.TestCase, YamlTestUtils): def setUp(self): self.vault_password = "hunter42" vault_secret = TextVaultSecret(self.vault_password) self.vault_secrets = [('vault_secret', vault_secret)] self.good_vault = vault.VaultLib(self.vault_secrets) self.vault = self.good_vault self.stream = self._build_stream() self.dumper = dumper.AnsibleDumper def _build_stream(self, yaml_text=None): text = yaml_text or u'' stream = io.StringIO(text) return stream def _loader(self, stream): return AnsibleLoader(stream, vault_secrets=self.vault.secrets) def test(self): plaintext = 'This is a string we are going to encrypt.' avu = objects.AnsibleVaultEncryptedUnicode.from_plaintext(plaintext, vault=self.vault, secret=vault.match_secrets(self.vault_secrets, ['vault_secret'])[0][1]) yaml_out = self._dump_string(avu, dumper=self.dumper) stream = self._build_stream(yaml_out) loader = self._loader(stream) data_from_yaml = loader.get_single_data() self.assertEqual(plaintext, data_from_yaml.data)
gpl-3.0
privateip/ansible
lib/ansible/modules/system/lvol.py
23
17012
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2013, Jeroen Hoekx <jeroen.hoekx@dsquare.be>, Alexander Bulimov <lazywolf0@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.0'} DOCUMENTATION = ''' --- author: - "Jeroen Hoekx (@jhoekx)" - "Alexander Bulimov (@abulimov)" module: lvol short_description: Configure LVM logical volumes description: - This module creates, removes or resizes logical volumes. version_added: "1.1" options: vg: description: - The volume group this logical volume is part of. required: true lv: description: - The name of the logical volume. required: true size: description: - The size of the logical volume, according to lvcreate(8) --size, by default in megabytes or optionally with one of [bBsSkKmMgGtTpPeE] units; or according to lvcreate(8) --extents as a percentage of [VG|PVS|FREE]; Float values must begin with a digit. Resizing using percentage values was not supported prior to 2.1. state: choices: [ "present", "absent" ] default: present description: - Control if the logical volume exists. If C(present) and the volume does not already exist then the C(size) option is required. required: false active: version_added: "2.2" choices: [ "yes", "no" ] default: "yes" description: - Whether the volume is activate and visible to the host. required: false force: version_added: "1.5" choices: [ "yes", "no" ] default: "no" description: - Shrink or remove operations of volumes requires this switch. Ensures that that filesystems get never corrupted/destroyed by mistake. required: false opts: version_added: "2.0" description: - Free-form options to be passed to the lvcreate command snapshot: version_added: "2.1" description: - The name of the snapshot volume required: false pvs: version_added: "2.2" description: - Comma separated list of physical volumes e.g. /dev/sda,/dev/sdb required: false shrink: version_added: "2.2" description: - shrink if current size is higher than size requested required: false default: yes notes: - Filesystems on top of the volume are not resized. ''' EXAMPLES = ''' # Create a logical volume of 512m. - lvol: vg: firefly lv: test size: 512 # Create a logical volume of 512m with disks /dev/sda and /dev/sdb - lvol: vg: firefly lv: test size: 512 pvs: /dev/sda,/dev/sdb # Create cache pool logical volume - lvol: vg: firefly lv: lvcache size: 512m opts: --type cache-pool # Create a logical volume of 512g. - lvol: vg: firefly lv: test size: 512g # Create a logical volume the size of all remaining space in the volume group - lvol: vg: firefly lv: test size: 100%FREE # Create a logical volume with special options - lvol: vg: firefly lv: test size: 512g opts: -r 16 # Extend the logical volume to 1024m. - lvol: vg: firefly lv: test size: 1024 # Extend the logical volume to consume all remaining space in the volume group - lvol: vg: firefly lv: test size: +100%FREE # Extend the logical volume to take all remaining space of the PVs - lvol: vg: firefly lv: test size: 100%PVS # Resize the logical volume to % of VG - lvol: vg: firefly lv: test size: 80%VG force: yes # Reduce the logical volume to 512m - lvol: vg: firefly lv: test size: 512 force: yes # Set the logical volume to 512m and do not try to shrink if size is lower than current one - lvol: vg: firefly lv: test size: 512 shrink: no # Remove the logical volume. - lvol: vg: firefly lv: test state: absent force: yes # Create a snapshot volume of the test logical volume. - lvol: vg: firefly lv: test snapshot: snap1 size: 100m # Deactivate a logical volume - lvol: vg: firefly lv: test active: false # Create a deactivated logical volume - lvol: vg: firefly lv: test size: 512g active: false ''' import re decimal_point = re.compile(r"(\d+)") def mkversion(major, minor, patch): return (1000 * 1000 * int(major)) + (1000 * int(minor)) + int(patch) def parse_lvs(data): lvs = [] for line in data.splitlines(): parts = line.strip().split(';') lvs.append({ 'name': parts[0].replace('[','').replace(']',''), 'size': int(decimal_point.match(parts[1]).group(1)), 'active': (parts[2][4] == 'a') }) return lvs def parse_vgs(data): vgs = [] for line in data.splitlines(): parts = line.strip().split(';') vgs.append({ 'name': parts[0], 'size': int(decimal_point.match(parts[1]).group(1)), 'free': int(decimal_point.match(parts[2]).group(1)), 'ext_size': int(decimal_point.match(parts[3]).group(1)) }) return vgs def get_lvm_version(module): ver_cmd = module.get_bin_path("lvm", required=True) rc, out, err = module.run_command("%s version" % (ver_cmd)) if rc != 0: return None m = re.search("LVM version:\s+(\d+)\.(\d+)\.(\d+).*(\d{4}-\d{2}-\d{2})", out) if not m: return None return mkversion(m.group(1), m.group(2), m.group(3)) def main(): module = AnsibleModule( argument_spec=dict( vg=dict(required=True), lv=dict(required=True), size=dict(type='str'), opts=dict(type='str'), state=dict(choices=["absent", "present"], default='present'), force=dict(type='bool', default='no'), shrink=dict(type='bool', default='yes'), active=dict(type='bool', default='yes'), snapshot=dict(type='str', default=None), pvs=dict(type='str') ), supports_check_mode=True, ) # Determine if the "--yes" option should be used version_found = get_lvm_version(module) if version_found == None: module.fail_json(msg="Failed to get LVM version number") version_yesopt = mkversion(2, 2, 99) # First LVM with the "--yes" option if version_found >= version_yesopt: yesopt = "--yes" else: yesopt = "" vg = module.params['vg'] lv = module.params['lv'] size = module.params['size'] opts = module.params['opts'] state = module.params['state'] force = module.boolean(module.params['force']) shrink = module.boolean(module.params['shrink']) active = module.boolean(module.params['active']) size_opt = 'L' size_unit = 'm' snapshot = module.params['snapshot'] pvs = module.params['pvs'] if pvs is None: pvs = "" else: pvs = pvs.replace(",", " ") if opts is None: opts = "" # Add --test option when running in check-mode if module.check_mode: test_opt = ' --test' else: test_opt = '' if size: # LVCREATE(8) -l --extents option with percentage if '%' in size: size_parts = size.split('%', 1) size_percent = int(size_parts[0]) if size_percent > 100: module.fail_json(msg="Size percentage cannot be larger than 100%") size_whole = size_parts[1] if size_whole == 'ORIGIN': module.fail_json(msg="Snapshot Volumes are not supported") elif size_whole not in ['VG', 'PVS', 'FREE']: module.fail_json(msg="Specify extents as a percentage of VG|PVS|FREE") size_opt = 'l' size_unit = '' if not '%' in size: # LVCREATE(8) -L --size option unit if size[-1].lower() in 'bskmgtpe': size_unit = size[-1].lower() size = size[0:-1] try: float(size) if not size[0].isdigit(): raise ValueError() except ValueError: module.fail_json(msg="Bad size specification of '%s'" % size) # when no unit, megabytes by default if size_opt == 'l': unit = 'm' else: unit = size_unit # Get information on volume group requested vgs_cmd = module.get_bin_path("vgs", required=True) rc, current_vgs, err = module.run_command( "%s --noheadings -o vg_name,size,free,vg_extent_size --units %s --separator ';' %s" % (vgs_cmd, unit, vg)) if rc != 0: if state == 'absent': module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg) else: module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err) vgs = parse_vgs(current_vgs) this_vg = vgs[0] # Get information on logical volume requested lvs_cmd = module.get_bin_path("lvs", required=True) rc, current_lvs, err = module.run_command( "%s -a --noheadings --nosuffix -o lv_name,size,lv_attr --units %s --separator ';' %s" % (lvs_cmd, unit, vg)) if rc != 0: if state == 'absent': module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg) else: module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err) changed = False lvs = parse_lvs(current_lvs) if snapshot is None: check_lv = lv else: check_lv = snapshot for test_lv in lvs: if test_lv['name'] == check_lv: this_lv = test_lv break else: this_lv = None if state == 'present' and not size: if this_lv is None: module.fail_json(msg="No size given.") msg = '' if this_lv is None: if state == 'present': ### create LV lvcreate_cmd = module.get_bin_path("lvcreate", required=True) if snapshot is not None: cmd = "%s %s %s -%s %s%s -s -n %s %s %s/%s" % (lvcreate_cmd, test_opt, yesopt, size_opt, size, size_unit, snapshot, opts, vg, lv) else: cmd = "%s %s %s -n %s -%s %s%s %s %s %s" % (lvcreate_cmd, test_opt, yesopt, lv, size_opt, size, size_unit, opts, vg, pvs) rc, _, err = module.run_command(cmd) if rc == 0: changed = True else: module.fail_json(msg="Creating logical volume '%s' failed" % lv, rc=rc, err=err) else: if state == 'absent': ### remove LV if not force: module.fail_json(msg="Sorry, no removal of logical volume %s without force=yes." % (this_lv['name'])) lvremove_cmd = module.get_bin_path("lvremove", required=True) rc, _, err = module.run_command("%s %s --force %s/%s" % (lvremove_cmd, test_opt, vg, this_lv['name'])) if rc == 0: module.exit_json(changed=True) else: module.fail_json(msg="Failed to remove logical volume %s" % (lv), rc=rc, err=err) elif not size: pass elif size_opt == 'l': ### Resize LV based on % value tool = None size_free = this_vg['free'] if size_whole == 'VG' or size_whole == 'PVS': size_requested = size_percent * this_vg['size'] / 100 else: # size_whole == 'FREE': size_requested = size_percent * this_vg['free'] / 100 if '+' in size: size_requested += this_lv['size'] if this_lv['size'] < size_requested: if (size_free > 0) and (('+' not in size) or (size_free >= (size_requested - this_lv['size']))): tool = module.get_bin_path("lvextend", required=True) else: module.fail_json(msg="Logical Volume %s could not be extended. Not enough free space left (%s%s required / %s%s available)" % (this_lv['name'], (size_requested - this_lv['size']), unit, size_free, unit)) elif shrink and this_lv['size'] > size_requested + this_vg['ext_size']: # more than an extent too large if size_requested == 0: module.fail_json(msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name'])) elif not force: module.fail_json(msg="Sorry, no shrinking of %s without force=yes" % (this_lv['name'])) else: tool = module.get_bin_path("lvreduce", required=True) tool = '%s %s' % (tool, '--force') if tool: cmd = "%s %s -%s %s%s %s/%s %s" % (tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs) rc, out, err = module.run_command(cmd) if "Reached maximum COW size" in out: module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out) elif rc == 0: changed = True msg="Volume %s resized to %s%s" % (this_lv['name'], size_requested, unit) elif "matches existing size" in err: module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size']) elif "not larger than existing size" in err: module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'], msg="Original size is larger than requested size", err=err) else: module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err) else: ### resize LV based on absolute values tool = None if int(size) > this_lv['size']: tool = module.get_bin_path("lvextend", required=True) elif shrink and int(size) < this_lv['size']: if int(size) == 0: module.fail_json(msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name'])) if not force: module.fail_json(msg="Sorry, no shrinking of %s without force=yes." % (this_lv['name'])) else: tool = module.get_bin_path("lvreduce", required=True) tool = '%s %s' % (tool, '--force') if tool: cmd = "%s %s -%s %s%s %s/%s %s" % (tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs) rc, out, err = module.run_command(cmd) if "Reached maximum COW size" in out: module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out) elif rc == 0: changed = True elif "matches existing size" in err: module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size']) elif "not larger than existing size" in err: module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'], msg="Original size is larger than requested size", err=err) else: module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err) if this_lv is not None: if active: lvchange_cmd = module.get_bin_path("lvchange", required=True) rc, _, err = module.run_command("%s -ay %s/%s" % (lvchange_cmd, vg, this_lv['name'])) if rc == 0: module.exit_json(changed=((not this_lv['active']) or changed), vg=vg, lv=this_lv['name'], size=this_lv['size']) else: module.fail_json(msg="Failed to activate logical volume %s" % (lv), rc=rc, err=err) else: lvchange_cmd = module.get_bin_path("lvchange", required=True) rc, _, err = module.run_command("%s -an %s/%s" % (lvchange_cmd, vg, this_lv['name'])) if rc == 0: module.exit_json(changed=(this_lv['active'] or changed), vg=vg, lv=this_lv['name'], size=this_lv['size']) else: module.fail_json(msg="Failed to deactivate logical volume %s" % (lv), rc=rc, err=err) module.exit_json(changed=changed, msg=msg) # import module snippets from ansible.module_utils.basic import * if __name__ == '__main__': main()
gpl-3.0
gbenson/binutils-gdb
gdb/contrib/cleanup_check.py
46
13267
# Copyright 2013-2015 Free Software Foundation, Inc. # # This is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see # <http://www.gnu.org/licenses/>. import gcc import gccutils import sys want_raii_info = False logging = False show_cfg = False def log(msg, indent=0): global logging if logging: sys.stderr.write('%s%s\n' % (' ' * indent, msg)) sys.stderr.flush() def is_cleanup_type(return_type): if not isinstance(return_type, gcc.PointerType): return False if not isinstance(return_type.dereference, gcc.RecordType): return False if str(return_type.dereference.name) == 'cleanup': return True return False def is_constructor(decl): "Return True if the function DECL is a cleanup constructor; False otherwise" return is_cleanup_type(decl.type.type) and (not decl.name or str(decl.name) != 'make_final_cleanup') destructor_names = set(['do_cleanups', 'discard_cleanups']) def is_destructor(decl): return decl.name in destructor_names # This list is just much too long... we should probably have an # attribute instead. special_names = set(['do_final_cleanups', 'discard_final_cleanups', 'save_cleanups', 'save_final_cleanups', 'restore_cleanups', 'restore_final_cleanups', 'exceptions_state_mc_init', 'make_my_cleanup2', 'make_final_cleanup', 'all_cleanups', 'save_my_cleanups', 'quit_target']) def needs_special_treatment(decl): return decl.name in special_names # Sometimes we need a new placeholder object that isn't the same as # anything else. class Dummy(object): def __init__(self, location): self.location = location # A wrapper for a cleanup which has been assigned to a variable. # This holds the variable and the location. class Cleanup(object): def __init__(self, var, location): self.var = var self.location = location # A class representing a master cleanup. This holds a stack of # cleanup objects and supports a merging operation. class MasterCleanup(object): # Create a new MasterCleanup object. OTHER, if given, is a # MasterCleanup object to copy. def __init__(self, other = None): # 'cleanups' is a list of cleanups. Each element is either a # Dummy, for an anonymous cleanup, or a Cleanup, for a cleanup # which was assigned to a variable. if other is None: self.cleanups = [] self.aliases = {} else: self.cleanups = other.cleanups[:] self.aliases = dict(other.aliases) def compare_vars(self, definition, argument): if definition == argument: return True if argument in self.aliases: argument = self.aliases[argument] if definition in self.aliases: definition = self.aliases[definition] return definition == argument def note_assignment(self, lhs, rhs): log('noting assignment %s = %s' % (lhs, rhs), 4) self.aliases[lhs] = rhs # Merge with another MasterCleanup. # Returns True if this resulted in a change to our state. def merge(self, other): # We do explicit iteration like this so we can easily # update the list after the loop. counter = -1 found_named = False for counter in range(len(self.cleanups) - 1, -1, -1): var = self.cleanups[counter] log('merge checking %s' % var, 4) # Only interested in named cleanups. if isinstance(var, Dummy): log('=> merge dummy', 5) continue # Now see if VAR is found in OTHER. if other._find_var(var.var) >= 0: log ('=> merge found', 5) break log('=>merge not found', 5) found_named = True if found_named and counter < len(self.cleanups) - 1: log ('merging to %d' % counter, 4) if counter < 0: self.cleanups = [] else: self.cleanups = self.cleanups[0:counter] return True # If SELF is empty but OTHER has some cleanups, then consider # that a change as well. if len(self.cleanups) == 0 and len(other.cleanups) > 0: log('merging non-empty other', 4) self.cleanups = other.cleanups[:] return True return False # Push a new constructor onto our stack. LHS is the # left-hand-side of the GimpleCall statement. It may be None, # meaning that this constructor's value wasn't used. def push(self, location, lhs): if lhs is None: obj = Dummy(location) else: obj = Cleanup(lhs, location) log('pushing %s' % lhs, 4) idx = self._find_var(lhs) if idx >= 0: gcc.permerror(location, 'reassigning to known cleanup') gcc.inform(self.cleanups[idx].location, 'previous assignment is here') self.cleanups.append(obj) # A helper for merge and pop that finds BACK_TO in self.cleanups, # and returns the index, or -1 if not found. def _find_var(self, back_to): for i in range(len(self.cleanups) - 1, -1, -1): if isinstance(self.cleanups[i], Dummy): continue if self.compare_vars(self.cleanups[i].var, back_to): return i return -1 # Pop constructors until we find one matching BACK_TO. # This is invoked when we see a do_cleanups call. def pop(self, location, back_to): log('pop:', 4) i = self._find_var(back_to) if i >= 0: self.cleanups = self.cleanups[0:i] else: gcc.permerror(location, 'destructor call with unknown argument') # Check whether ARG is the current master cleanup. Return True if # all is well. def verify(self, location, arg): log('verify %s' % arg, 4) return (len(self.cleanups) > 0 and not isinstance(self.cleanups[0], Dummy) and self.compare_vars(self.cleanups[0].var, arg)) # Check whether SELF is empty. def isempty(self): log('isempty: len = %d' % len(self.cleanups), 4) return len(self.cleanups) == 0 # Emit informational warnings about the cleanup stack. def inform(self): for item in reversed(self.cleanups): gcc.inform(item.location, 'leaked cleanup') class CleanupChecker: def __init__(self, fun): self.fun = fun self.seen_edges = set() self.bad_returns = set() # This maps BB indices to a list of master cleanups for the # BB. self.master_cleanups = {} # Pick a reasonable location for the basic block BB. def guess_bb_location(self, bb): if isinstance(bb.gimple, list): for stmt in bb.gimple: if stmt.loc: return stmt.loc return self.fun.end # Compute the master cleanup list for BB. # Modifies MASTER_CLEANUP in place. def compute_master(self, bb, bb_from, master_cleanup): if not isinstance(bb.gimple, list): return curloc = self.fun.end for stmt in bb.gimple: if stmt.loc: curloc = stmt.loc if isinstance(stmt, gcc.GimpleCall) and stmt.fndecl: if is_constructor(stmt.fndecl): log('saw constructor %s in bb=%d' % (str(stmt.fndecl), bb.index), 2) self.cleanup_aware = True master_cleanup.push(curloc, stmt.lhs) elif is_destructor(stmt.fndecl): if str(stmt.fndecl.name) != 'do_cleanups': self.only_do_cleanups_seen = False log('saw destructor %s in bb=%d, bb_from=%d, argument=%s' % (str(stmt.fndecl.name), bb.index, bb_from, str(stmt.args[0])), 2) master_cleanup.pop(curloc, stmt.args[0]) elif needs_special_treatment(stmt.fndecl): pass # gcc.permerror(curloc, 'function needs special treatment') elif isinstance(stmt, gcc.GimpleAssign): if isinstance(stmt.lhs, gcc.VarDecl) and isinstance(stmt.rhs[0], gcc.VarDecl): master_cleanup.note_assignment(stmt.lhs, stmt.rhs[0]) elif isinstance(stmt, gcc.GimpleReturn): if self.is_constructor: if not master_cleanup.verify(curloc, stmt.retval): gcc.permerror(curloc, 'constructor does not return master cleanup') elif not self.is_special_constructor: if not master_cleanup.isempty(): if curloc not in self.bad_returns: gcc.permerror(curloc, 'cleanup stack is not empty at return') self.bad_returns.add(curloc) master_cleanup.inform() # Traverse a basic block, updating the master cleanup information # and propagating to other blocks. def traverse_bbs(self, edge, bb, bb_from, entry_master): log('traverse_bbs %d from %d' % (bb.index, bb_from), 1) # Propagate the entry MasterCleanup though this block. master_cleanup = MasterCleanup(entry_master) self.compute_master(bb, bb_from, master_cleanup) modified = False if bb.index in self.master_cleanups: # Merge the newly-computed MasterCleanup into the one we # have already computed. If this resulted in a # significant change, then we need to re-propagate. modified = self.master_cleanups[bb.index].merge(master_cleanup) else: self.master_cleanups[bb.index] = master_cleanup modified = True # EDGE is None for the entry BB. if edge is not None: # If merging cleanups caused a change, check to see if we # have a bad loop. if edge in self.seen_edges: # This error doesn't really help. # if modified: # gcc.permerror(self.guess_bb_location(bb), # 'invalid cleanup use in loop') return self.seen_edges.add(edge) if not modified: return # Now propagate to successor nodes. for edge in bb.succs: self.traverse_bbs(edge, edge.dest, bb.index, master_cleanup) def check_cleanups(self): if not self.fun.cfg or not self.fun.decl: return 'ignored' if is_destructor(self.fun.decl): return 'destructor' if needs_special_treatment(self.fun.decl): return 'special' self.is_constructor = is_constructor(self.fun.decl) self.is_special_constructor = not self.is_constructor and str(self.fun.decl.name).find('with_cleanup') > -1 # Yuck. if str(self.fun.decl.name) == 'gdb_xml_create_parser_and_cleanup_1': self.is_special_constructor = True if self.is_special_constructor: gcc.inform(self.fun.start, 'function %s is a special constructor' % (self.fun.decl.name)) # If we only see do_cleanups calls, and this function is not # itself a constructor, then we can convert it easily to RAII. self.only_do_cleanups_seen = not self.is_constructor # If we ever call a constructor, then we are "cleanup-aware". self.cleanup_aware = False entry_bb = self.fun.cfg.entry master_cleanup = MasterCleanup() self.traverse_bbs(None, entry_bb, -1, master_cleanup) if want_raii_info and self.only_do_cleanups_seen and self.cleanup_aware: gcc.inform(self.fun.decl.location, 'function %s could be converted to RAII' % (self.fun.decl.name)) if self.is_constructor: return 'constructor' return 'OK' class CheckerPass(gcc.GimplePass): def execute(self, fun): if fun.decl: log("Starting " + fun.decl.name) if show_cfg: dot = gccutils.cfg_to_dot(fun.cfg, fun.decl.name) gccutils.invoke_dot(dot, name=fun.decl.name) checker = CleanupChecker(fun) what = checker.check_cleanups() if fun.decl: log(fun.decl.name + ': ' + what, 2) ps = CheckerPass(name = 'check-cleanups') # We need the cfg, but we want a relatively high-level Gimple. ps.register_after('cfg')
gpl-2.0
aprefontaine/TMScheduler
django/contrib/auth/management/commands/changepassword.py
320
1527
from django.core.management.base import BaseCommand, CommandError from django.contrib.auth.models import User import getpass class Command(BaseCommand): help = "Change a user's password for django.contrib.auth." requires_model_validation = False def _get_pass(self, prompt="Password: "): p = getpass.getpass(prompt=prompt) if not p: raise CommandError("aborted") return p def handle(self, *args, **options): if len(args) > 1: raise CommandError("need exactly one or zero arguments for username") if args: username, = args else: username = getpass.getuser() try: u = User.objects.get(username=username) except User.DoesNotExist: raise CommandError("user '%s' does not exist" % username) print "Changing password for user '%s'" % u.username MAX_TRIES = 3 count = 0 p1, p2 = 1, 2 # To make them initially mismatch. while p1 != p2 and count < MAX_TRIES: p1 = self._get_pass() p2 = self._get_pass("Password (again): ") if p1 != p2: print "Passwords do not match. Please try again." count = count + 1 if count == MAX_TRIES: raise CommandError("Aborting password change for user '%s' after %s attempts" % (username, count)) u.set_password(p1) u.save() return "Password changed successfully for user '%s'" % u.username
bsd-3-clause
cordis/pycloudia
pycloudia/respondent/runner.py
1
1212
from pycloudia.respondent.exceptions import ResponseTimeoutError, ResponseNotHandledError from pycloudia.respondent.interfaces import IRunner class Runner(IRunner): """ :type reactor: L{pycloudia.reactor.interfaces.IReactor} :type dao: L{pycloudia.respondent.interfaces.IDao} """ reactor = None dao = None def __init__(self): self.registry = {} def listen(self, request_id, deferred, timeout): assert request_id not in self.registry self.registry[request_id] = deferred self._set_timeout(request_id, timeout) return deferred def _set_timeout(self, request_id, timeout): self.reactor.call_later(timeout, self.reject, request_id, ResponseTimeoutError(request_id)) def reject(self, request_id, reason): try: deferred = self.registry.pop(request_id) except KeyError: pass else: deferred.errback(reason) def resolve(self, request_id, *args, **kwargs): try: deferred = self.registry.pop(request_id) except KeyError: raise ResponseNotHandledError(request_id) else: deferred.callback(*args, **kwargs)
mit
rbbratta/virt-test
libvirt/tests/src/virsh_cmd/domain/virsh_setmaxmem.py
3
7562
import logging from autotest.client.shared import utils, error from virttest import virsh, virt_vm from virttest.libvirt_xml import vm_xml def run_virsh_setmaxmem(test, params, env): """ Test command: virsh setmaxmem. 1) Prepare vm environment. 2) Handle params 3) Run test command and get vm started then get maxmem. 4) Recover environment. 5) Check result. TODO: support more options:--live,--config,--current. """ def vmxml_max_mem(vm_name): vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) return int(vmxml.max_mem) def make_domref(domarg, vm_ref, domid, vm_name, domuuid): # Specify domain as argument or parameter if domarg == "yes": dom_darg_key = "domainarg" else: dom_darg_key = "domain" # How to reference domain if vm_ref == "domid": dom_darg_value = domid elif vm_ref == "domname": dom_darg_value = vm_name elif vm_ref == "domuuid": dom_darg_value = domuuid elif vm_ref == "none": dom_darg_value = None elif vm_ref == "emptystring": dom_darg_value = '""' else: # stick in value directly dom_darg_value = vm_ref return {dom_darg_key:dom_darg_value} def make_sizeref(sizearg, mem_ref, original_mem): if sizearg == "yes": size_darg_key = "sizearg" else: size_darg_key = "size" if mem_ref == "halfless": size_darg_value = "%d" % (original_mem / 2) elif mem_ref == "halfmore": size_darg_value = "%d" % int(original_mem * 1.5) elif mem_ref == "same": size_darg_value = "%d" % original_mem elif mem_ref == "emptystring": size_darg_value = '""' elif mem_ref == "zero": size_darg_value = "0" elif mem_ref == "toosmall": size_darg_value = "1024" elif mem_ref == "toobig": size_darg_value = "1099511627776" # (KiB) One Petabyte elif mem_ref == "none": size_darg_value = None else: # stick in value directly size_darg_value = mem_ref return {size_darg_key:size_darg_value} def is_old_libvirt(): regex = r'\s+\[--size\]\s+' return bool( not virsh.has_command_help_match('setmaxmem', regex) ) def is_xen_host(): check_cmd = "ls /dev/kvm" return utils.run(check_cmd, ignore_status=True).exit_status def is_in_range(actual, expected, error_percent): deviation = 100 - (100 * (float(actual) / float(expected))) logging.debug("Deviation: %0.2f%%" % float(deviation)) return float(deviation) <= float(error_percent) def print_debug_stats(original_vmxml_mem, original_dominfo_mem, expected_mem, test_vmxml_mem, test_dominfo_mem): dbgmsg = ("Original vmxml mem : %d KiB\n" "Original dominfo mem : %d KiB\n" "Expected max mem : %d KiB\n" "Actual vmxml mem : %d KiB\n" "Actual dominfo mem : %d KiB\n" % ( original_vmxml_mem, original_dominfo_mem, expected_mem, test_vmxml_mem, test_dominfo_mem)) for dbgline in dbgmsg.splitlines(): logging.debug(dbgline) ### MAIN TEST CODE ### # Process cartesian parameters vm_ref = params.get("setmaxmem_vm_ref", "") mem_ref = params.get("setmaxmem_mem_ref", "") status_error = "yes" == params.get("status_error", "no") flags = params.get("setmaxmem_flags", "") domarg = params.get("setmaxmem_domarg", "no") sizearg = params.get("setmaxmem_sizearg", "no") delta_per = params.get("setmaxmem_delta_per", "10") vm_name = params.get("main_vm") # Gather environment parameters vm = env.get_vm(vm_name) original_vmxml_mem = vmxml_max_mem(vm_name) original_dominfo_mem = vm.get_max_mem() domid = vm.get_id() domuuid = vm.get_uuid() uri = vm.connect_uri old_libvirt = is_old_libvirt() if old_libvirt: logging.info("Running test on older libvirt") use_kilobytes = True else: logging.info("Running test on newer libvirt") use_kilobytes = False xen_host = is_xen_host() if xen_host: logging.info("Running on xen host, %s offset is allowed.", delta_per) # Argument pattern is complex, build with dargs dargs = {'flagstr':flags, 'use_kilobytes':use_kilobytes, 'uri':uri, 'ignore_status':True, "debug":True} dargs.update( make_domref(domarg, vm_ref, domid, vm_name, domuuid) ) dargs.update( make_sizeref(sizearg, mem_ref, original_dominfo_mem) ) if status_error: logging.info("Error Test: Expecting an error to occur!") result = virsh.setmaxmem(**dargs) status = result.exit_status # Gather status if not running error test start_status = 0 # Check can guest be started after maxmem-modified. if not status_error: if vm.state() == "shut off": try: vm.start() except virt_vm.VMStartError, detail: start_status = 1 logging.error("Start after VM's max mem modified failed:%s", detail) # Actual results test_vmxml_mem = vmxml_max_mem(vm_name) test_dominfo_mem = vm.get_max_mem() # Expected results for both vmxml and dominfo if sizearg == "yes": expected_mem = int(dargs["sizearg"]) else: expected_mem = int(dargs["size"]) print_debug_stats(original_vmxml_mem, original_dominfo_mem, expected_mem, test_vmxml_mem, test_dominfo_mem) else: if vm.state() == "paused": vm.resume() # Restore need vm to be shut off. if vm.state() != "shut off": vm.destroy() if status is 0: # Restore original memory restore_status = virsh.setmaxmem(domainarg=vm_name, sizearg=original_dominfo_mem, ignore_status=True).exit_status if restore_status: logging.warning("Failed to restore VM's original memory to " "%s KiB", original_dominfo_mem) else: # virsh setmaxmem failed, no need to restore pass # Don't care about memory comparison on error test if status_error: if status is 0: raise error.TestFail("Error test did not result in an error.") else: vmxml_match = (test_vmxml_mem == expected_mem) if xen_host: dominfo_match = is_in_range(test_dominfo_mem, expected_mem, delta_per) else: dominfo_match = (test_dominfo_mem == expected_mem) if (status or start_status or not vmxml_match or not dominfo_match): msg = "test conditions not met: " if status: msg += "Non-zero virsh setmaxmem exit code. " if not vmxml_match: msg += "Max memory in VM's xml is not matched. " if not dominfo_match: msg += "Max memory in dominfo's output is not matched. " if start_status: msg += "Start after VM's max mem is modified failed." raise error.TestFail(msg) logging.info("Test end normally.")
gpl-2.0
Edraak/circleci-edx-platform
common/djangoapps/student/models.py
1
76830
""" Models for User Information (students, staff, etc) Migration Notes If you make changes to this model, be sure to create an appropriate migration file and check it in at the same time as your model changes. To do that, 1. Go to the edx-platform dir 2. ./manage.py lms schemamigration student --auto description_of_your_change 3. Add the migration file created in edx-platform/common/djangoapps/student/migrations/ """ from collections import defaultdict, OrderedDict from datetime import datetime, timedelta from functools import total_ordering import hashlib from importlib import import_module import json import logging from pytz import UTC from urllib import urlencode import uuid import analytics from config_models.models import ConfigurationModel from django.utils.translation import ugettext_lazy as _ from django.conf import settings from django.utils import timezone from django.contrib.auth.models import User from django.contrib.auth.hashers import make_password from django.contrib.auth.signals import user_logged_in, user_logged_out from django.db import models, IntegrityError, transaction from django.db.models import Count from django.db.models.signals import pre_save, post_save from django.dispatch import receiver, Signal from django.core.exceptions import ObjectDoesNotExist from django.utils.translation import ugettext_noop from django.core.cache import cache from django_countries.fields import CountryField import dogstats_wrapper as dog_stats_api from eventtracking import tracker from opaque_keys.edx.keys import CourseKey from opaque_keys.edx.locations import SlashSeparatedCourseKey from simple_history.models import HistoricalRecords from track import contexts from xmodule_django.models import CourseKeyField, NoneToEmptyManager from certificates.models import GeneratedCertificate from course_modes.models import CourseMode from enrollment.api import _default_course_mode import lms.lib.comment_client as cc from openedx.core.djangoapps.commerce.utils import ecommerce_api_client, ECOMMERCE_DATE_FORMAT from openedx.core.djangoapps.content.course_overviews.models import CourseOverview from util.model_utils import emit_field_changed_events, get_changed_fields_dict from util.query import use_read_replica_if_available from util.milestones_helpers import is_entrance_exams_enabled UNENROLL_DONE = Signal(providing_args=["course_enrollment", "skip_refund"]) log = logging.getLogger(__name__) AUDIT_LOG = logging.getLogger("audit") SessionStore = import_module(settings.SESSION_ENGINE).SessionStore # pylint: disable=invalid-name UNENROLLED_TO_ALLOWEDTOENROLL = 'from unenrolled to allowed to enroll' ALLOWEDTOENROLL_TO_ENROLLED = 'from allowed to enroll to enrolled' ENROLLED_TO_ENROLLED = 'from enrolled to enrolled' ENROLLED_TO_UNENROLLED = 'from enrolled to unenrolled' UNENROLLED_TO_ENROLLED = 'from unenrolled to enrolled' ALLOWEDTOENROLL_TO_UNENROLLED = 'from allowed to enroll to enrolled' UNENROLLED_TO_UNENROLLED = 'from unenrolled to unenrolled' DEFAULT_TRANSITION_STATE = 'N/A' TRANSITION_STATES = ( (UNENROLLED_TO_ALLOWEDTOENROLL, UNENROLLED_TO_ALLOWEDTOENROLL), (ALLOWEDTOENROLL_TO_ENROLLED, ALLOWEDTOENROLL_TO_ENROLLED), (ENROLLED_TO_ENROLLED, ENROLLED_TO_ENROLLED), (ENROLLED_TO_UNENROLLED, ENROLLED_TO_UNENROLLED), (UNENROLLED_TO_ENROLLED, UNENROLLED_TO_ENROLLED), (ALLOWEDTOENROLL_TO_UNENROLLED, ALLOWEDTOENROLL_TO_UNENROLLED), (UNENROLLED_TO_UNENROLLED, UNENROLLED_TO_UNENROLLED), (DEFAULT_TRANSITION_STATE, DEFAULT_TRANSITION_STATE) ) class AnonymousUserId(models.Model): """ This table contains user, course_Id and anonymous_user_id Purpose of this table is to provide user by anonymous_user_id. We generate anonymous_user_id using md5 algorithm, and use result in hex form, so its length is equal to 32 bytes. """ objects = NoneToEmptyManager() user = models.ForeignKey(User, db_index=True) anonymous_user_id = models.CharField(unique=True, max_length=32) course_id = CourseKeyField(db_index=True, max_length=255, blank=True) unique_together = (user, course_id) def anonymous_id_for_user(user, course_id, save=True): """ Return a unique id for a (user, course) pair, suitable for inserting into e.g. personalized survey links. If user is an `AnonymousUser`, returns `None` Keyword arguments: save -- Whether the id should be saved in an AnonymousUserId object. """ # This part is for ability to get xblock instance in xblock_noauth handlers, where user is unauthenticated. if user.is_anonymous(): return None cached_id = getattr(user, '_anonymous_id', {}).get(course_id) if cached_id is not None: return cached_id # include the secret key as a salt, and to make the ids unique across different LMS installs. hasher = hashlib.md5() hasher.update(settings.SECRET_KEY) hasher.update(unicode(user.id)) if course_id: hasher.update(course_id.to_deprecated_string().encode('utf-8')) digest = hasher.hexdigest() if not hasattr(user, '_anonymous_id'): user._anonymous_id = {} # pylint: disable=protected-access user._anonymous_id[course_id] = digest # pylint: disable=protected-access if save is False: return digest try: anonymous_user_id, __ = AnonymousUserId.objects.get_or_create( defaults={'anonymous_user_id': digest}, user=user, course_id=course_id ) if anonymous_user_id.anonymous_user_id != digest: log.error( u"Stored anonymous user id %r for user %r " u"in course %r doesn't match computed id %r", unicode(user), course_id, anonymous_user_id.anonymous_user_id, digest ) except IntegrityError: # Another thread has already created this entry, so # continue pass return digest def user_by_anonymous_id(uid): """ Return user by anonymous_user_id using AnonymousUserId lookup table. Do not raise `django.ObjectDoesNotExist` exception, if there is no user for anonymous_student_id, because this function will be used inside xmodule w/o django access. """ if uid is None: return None try: return User.objects.get(anonymoususerid__anonymous_user_id=uid) except ObjectDoesNotExist: return None class UserStanding(models.Model): """ This table contains a student's account's status. Currently, we're only disabling accounts; in the future we can imagine taking away more specific privileges, like forums access, or adding more specific karma levels or probationary stages. """ ACCOUNT_DISABLED = "disabled" ACCOUNT_ENABLED = "enabled" USER_STANDING_CHOICES = ( (ACCOUNT_DISABLED, u"Account Disabled"), (ACCOUNT_ENABLED, u"Account Enabled"), ) user = models.OneToOneField(User, db_index=True, related_name='standing') account_status = models.CharField( blank=True, max_length=31, choices=USER_STANDING_CHOICES ) changed_by = models.ForeignKey(User, blank=True) standing_last_changed_at = models.DateTimeField(auto_now=True) class UserProfile(models.Model): """This is where we store all the user demographic fields. We have a separate table for this rather than extending the built-in Django auth_user. Notes: * Some fields are legacy ones from the first run of 6.002, from which we imported many users. * Fields like name and address are intentionally open ended, to account for international variations. An unfortunate side-effect is that we cannot efficiently sort on last names for instance. Replication: * Only the Portal servers should ever modify this information. * All fields are replicated into relevant Course databases Some of the fields are legacy ones that were captured during the initial MITx fall prototype. """ # cache key format e.g user.<user_id>.profile.country = 'SG' PROFILE_COUNTRY_CACHE_KEY = u"user.{user_id}.profile.country" class Meta(object): db_table = "auth_userprofile" # CRITICAL TODO/SECURITY # Sanitize all fields. # This is not visible to other users, but could introduce holes later user = models.OneToOneField(User, unique=True, db_index=True, related_name='profile') name = models.CharField(blank=True, max_length=255, db_index=True) meta = models.TextField(blank=True) # JSON dictionary for future expansion courseware = models.CharField(blank=True, max_length=255, default='course.xml') # Location is no longer used, but is held here for backwards compatibility # for users imported from our first class. language = models.CharField(blank=True, max_length=255, db_index=True) location = models.CharField(blank=True, max_length=255, db_index=True) # Optional demographic data we started capturing from Fall 2012 this_year = datetime.now(UTC).year VALID_YEARS = range(this_year, this_year - 120, -1) year_of_birth = models.IntegerField(blank=True, null=True, db_index=True) GENDER_CHOICES = ( ('m', ugettext_noop('Male')), ('f', ugettext_noop('Female')), # Translators: 'Other' refers to the student's gender # ('o', ugettext_noop('Other')) # Removed other from the registration form ) gender = models.CharField( blank=True, null=True, max_length=6, db_index=True, choices=GENDER_CHOICES ) # [03/21/2013] removed these, but leaving comment since there'll still be # p_se and p_oth in the existing data in db. # ('p_se', 'Doctorate in science or engineering'), # ('p_oth', 'Doctorate in another field'), LEVEL_OF_EDUCATION_CHOICES = ( ('p', ugettext_noop('Doctorate')), ('m', ugettext_noop("Master's or professional degree")), ('b', ugettext_noop("Bachelor's degree")), ('a', ugettext_noop("Associate degree")), ('hs', ugettext_noop("Secondary/high school")), ('jhs', ugettext_noop("Junior secondary/junior high/middle school")), ('el', ugettext_noop("Elementary/primary school")), # Translators: 'None' refers to the student's level of education ('none', ugettext_noop("No Formal Education")), # Translators: 'Other' refers to the student's level of education ('other', ugettext_noop("Other Education")) ) level_of_education = models.CharField( blank=True, null=True, max_length=6, db_index=True, choices=LEVEL_OF_EDUCATION_CHOICES ) mailing_address = models.TextField(blank=True, null=True) city = models.TextField(blank=True, null=True) country = CountryField(blank=True, null=True) goals = models.TextField(blank=True, null=True) allow_certificate = models.BooleanField(default=1) bio = models.CharField(blank=True, null=True, max_length=3000, db_index=False) profile_image_uploaded_at = models.DateTimeField(null=True) @property def has_profile_image(self): """ Convenience method that returns a boolean indicating whether or not this user has uploaded a profile image. """ return self.profile_image_uploaded_at is not None @property def age(self): """ Convenience method that returns the age given a year_of_birth. """ year_of_birth = self.year_of_birth year = datetime.now(UTC).year if year_of_birth is not None: return year - year_of_birth @property def level_of_education_display(self): """ Convenience method that returns the human readable level of education. """ if self.level_of_education: return self.__enumerable_to_display(self.LEVEL_OF_EDUCATION_CHOICES, self.level_of_education) @property def gender_display(self): """ Convenience method that returns the human readable gender. """ if self.gender: return self.__enumerable_to_display(self.GENDER_CHOICES, self.gender) def get_meta(self): # pylint: disable=missing-docstring js_str = self.meta if not js_str: js_str = dict() else: js_str = json.loads(self.meta) return js_str def set_meta(self, meta_json): # pylint: disable=missing-docstring self.meta = json.dumps(meta_json) def set_login_session(self, session_id=None): """ Sets the current session id for the logged-in user. If session_id doesn't match the existing session, deletes the old session object. """ meta = self.get_meta() old_login = meta.get('session_id', None) if old_login: SessionStore(session_key=old_login).delete() meta['session_id'] = session_id self.set_meta(meta) self.save() def requires_parental_consent(self, date=None, age_limit=None, default_requires_consent=True): """Returns true if this user requires parental consent. Args: date (Date): The date for which consent needs to be tested (defaults to now). age_limit (int): The age limit at which parental consent is no longer required. This defaults to the value of the setting 'PARENTAL_CONTROL_AGE_LIMIT'. default_requires_consent (bool): True if users require parental consent if they have no specified year of birth (default is True). Returns: True if the user requires parental consent. """ if age_limit is None: age_limit = getattr(settings, 'PARENTAL_CONSENT_AGE_LIMIT', None) if age_limit is None: return False # Return True if either: # a) The user has a year of birth specified and that year is fewer years in the past than the limit. # b) The user has no year of birth specified and the default is to require consent. # # Note: we have to be conservative using the user's year of birth as their birth date could be # December 31st. This means that if the number of years since their birth year is exactly equal # to the age limit then we have to assume that they might still not be old enough. year_of_birth = self.year_of_birth if year_of_birth is None: return default_requires_consent if date is None: age = self.age else: age = date.year - year_of_birth return age <= age_limit def __enumerable_to_display(self, enumerables, enum_value): """ Get the human readable value from an enumerable list of key-value pairs. """ return dict(enumerables)[enum_value] @classmethod def country_cache_key_name(cls, user_id): """Return cache key name to be used to cache current country. Args: user_id(int): Id of user. Returns: Unicode cache key """ return cls.PROFILE_COUNTRY_CACHE_KEY.format(user_id=user_id) @receiver(models.signals.post_save, sender=UserProfile) def invalidate_user_profile_country_cache(sender, instance, **kwargs): # pylint: disable=unused-argument, invalid-name """Invalidate the cache of country in UserProfile model. """ changed_fields = getattr(instance, '_changed_fields', {}) if 'country' in changed_fields: cache_key = UserProfile.country_cache_key_name(instance.user_id) cache.delete(cache_key) log.info("Country changed in UserProfile for %s, cache deleted", instance.user_id) @receiver(pre_save, sender=UserProfile) def user_profile_pre_save_callback(sender, **kwargs): """ Ensure consistency of a user profile before saving it. """ user_profile = kwargs['instance'] # Remove profile images for users who require parental consent if user_profile.requires_parental_consent() and user_profile.has_profile_image: user_profile.profile_image_uploaded_at = None # Cache "old" field values on the model instance so that they can be # retrieved in the post_save callback when we emit an event with new and # old field values. user_profile._changed_fields = get_changed_fields_dict(user_profile, sender) @receiver(post_save, sender=UserProfile) def user_profile_post_save_callback(sender, **kwargs): """ Emit analytics events after saving the UserProfile. """ user_profile = kwargs['instance'] # pylint: disable=protected-access emit_field_changed_events( user_profile, user_profile.user, sender._meta.db_table, excluded_fields=['meta'] ) @receiver(pre_save, sender=User) def user_pre_save_callback(sender, **kwargs): """ Capture old fields on the user instance before save and cache them as a private field on the current model for use in the post_save callback. """ user = kwargs['instance'] user._changed_fields = get_changed_fields_dict(user, sender) @receiver(post_save, sender=User) def user_post_save_callback(sender, **kwargs): """ Emit analytics events after saving the User. """ user = kwargs['instance'] # pylint: disable=protected-access emit_field_changed_events( user, user, sender._meta.db_table, excluded_fields=['last_login', 'first_name', 'last_name'], hidden_fields=['password'] ) class UserSignupSource(models.Model): """ This table contains information about users registering via Micro-Sites """ user = models.ForeignKey(User, db_index=True) site = models.CharField(max_length=255, db_index=True) def unique_id_for_user(user, save=True): """ Return a unique id for a user, suitable for inserting into e.g. personalized survey links. Keyword arguments: save -- Whether the id should be saved in an AnonymousUserId object. """ # Setting course_id to '' makes it not affect the generated hash, # and thus produce the old per-student anonymous id return anonymous_id_for_user(user, None, save=save) # TODO: Should be renamed to generic UserGroup, and possibly # Given an optional field for type of group class UserTestGroup(models.Model): users = models.ManyToManyField(User, db_index=True) name = models.CharField(blank=False, max_length=32, db_index=True) description = models.TextField(blank=True) class Registration(models.Model): ''' Allows us to wait for e-mail before user is registered. A registration profile is created when the user creates an account, but that account is inactive. Once the user clicks on the activation key, it becomes active. ''' class Meta(object): db_table = "auth_registration" user = models.OneToOneField(User) activation_key = models.CharField(('activation key'), max_length=32, unique=True, db_index=True) def register(self, user): # MINOR TODO: Switch to crypto-secure key self.activation_key = uuid.uuid4().hex self.user = user self.save() def activate(self): self.user.is_active = True self.user.save() class PendingNameChange(models.Model): user = models.OneToOneField(User, unique=True, db_index=True) new_name = models.CharField(blank=True, max_length=255) rationale = models.CharField(blank=True, max_length=1024) class PendingEmailChange(models.Model): user = models.OneToOneField(User, unique=True, db_index=True) new_email = models.CharField(blank=True, max_length=255, db_index=True) activation_key = models.CharField(('activation key'), max_length=32, unique=True, db_index=True) def request_change(self, email): """Request a change to a user's email. Implicitly saves the pending email change record. Arguments: email (unicode): The proposed new email for the user. Returns: unicode: The activation code to confirm the change. """ self.new_email = email self.activation_key = uuid.uuid4().hex self.save() return self.activation_key EVENT_NAME_ENROLLMENT_ACTIVATED = 'edx.course.enrollment.activated' EVENT_NAME_ENROLLMENT_DEACTIVATED = 'edx.course.enrollment.deactivated' EVENT_NAME_ENROLLMENT_MODE_CHANGED = 'edx.course.enrollment.mode_changed' class PasswordHistory(models.Model): """ This model will keep track of past passwords that a user has used as well as providing contraints (e.g. can't reuse passwords) """ user = models.ForeignKey(User) password = models.CharField(max_length=128) time_set = models.DateTimeField(default=timezone.now) def create(self, user): """ This will copy over the current password, if any of the configuration has been turned on """ if not (PasswordHistory.is_student_password_reuse_restricted() or PasswordHistory.is_staff_password_reuse_restricted() or PasswordHistory.is_password_reset_frequency_restricted() or PasswordHistory.is_staff_forced_password_reset_enabled() or PasswordHistory.is_student_forced_password_reset_enabled()): return self.user = user self.password = user.password self.save() @classmethod def is_student_password_reuse_restricted(cls): """ Returns whether the configuration which limits password reuse has been turned on """ if not settings.FEATURES['ADVANCED_SECURITY']: return False min_diff_pw = settings.ADVANCED_SECURITY_CONFIG.get( 'MIN_DIFFERENT_STUDENT_PASSWORDS_BEFORE_REUSE', 0 ) return min_diff_pw > 0 @classmethod def is_staff_password_reuse_restricted(cls): """ Returns whether the configuration which limits password reuse has been turned on """ if not settings.FEATURES['ADVANCED_SECURITY']: return False min_diff_pw = settings.ADVANCED_SECURITY_CONFIG.get( 'MIN_DIFFERENT_STAFF_PASSWORDS_BEFORE_REUSE', 0 ) return min_diff_pw > 0 @classmethod def is_password_reset_frequency_restricted(cls): """ Returns whether the configuration which limits the password reset frequency has been turned on """ if not settings.FEATURES['ADVANCED_SECURITY']: return False min_days_between_reset = settings.ADVANCED_SECURITY_CONFIG.get( 'MIN_TIME_IN_DAYS_BETWEEN_ALLOWED_RESETS' ) return min_days_between_reset @classmethod def is_staff_forced_password_reset_enabled(cls): """ Returns whether the configuration which forces password resets to occur has been turned on """ if not settings.FEATURES['ADVANCED_SECURITY']: return False min_days_between_reset = settings.ADVANCED_SECURITY_CONFIG.get( 'MIN_DAYS_FOR_STAFF_ACCOUNTS_PASSWORD_RESETS' ) return min_days_between_reset @classmethod def is_student_forced_password_reset_enabled(cls): """ Returns whether the configuration which forces password resets to occur has been turned on """ if not settings.FEATURES['ADVANCED_SECURITY']: return False min_days_pw_reset = settings.ADVANCED_SECURITY_CONFIG.get( 'MIN_DAYS_FOR_STUDENT_ACCOUNTS_PASSWORD_RESETS' ) return min_days_pw_reset @classmethod def should_user_reset_password_now(cls, user): """ Returns whether a password has 'expired' and should be reset. Note there are two different expiry policies for staff and students """ if not settings.FEATURES['ADVANCED_SECURITY']: return False days_before_password_reset = None if user.is_staff: if cls.is_staff_forced_password_reset_enabled(): days_before_password_reset = \ settings.ADVANCED_SECURITY_CONFIG['MIN_DAYS_FOR_STAFF_ACCOUNTS_PASSWORD_RESETS'] elif cls.is_student_forced_password_reset_enabled(): days_before_password_reset = \ settings.ADVANCED_SECURITY_CONFIG['MIN_DAYS_FOR_STUDENT_ACCOUNTS_PASSWORD_RESETS'] if days_before_password_reset: history = PasswordHistory.objects.filter(user=user).order_by('-time_set') time_last_reset = None if history: # first element should be the last time we reset password time_last_reset = history[0].time_set else: # no history, then let's take the date the user joined time_last_reset = user.date_joined now = timezone.now() delta = now - time_last_reset return delta.days >= days_before_password_reset return False @classmethod def is_password_reset_too_soon(cls, user): """ Verifies that the password is not getting reset too frequently """ if not cls.is_password_reset_frequency_restricted(): return False history = PasswordHistory.objects.filter(user=user).order_by('-time_set') if not history: return False now = timezone.now() delta = now - history[0].time_set return delta.days < settings.ADVANCED_SECURITY_CONFIG['MIN_TIME_IN_DAYS_BETWEEN_ALLOWED_RESETS'] @classmethod def is_allowable_password_reuse(cls, user, new_password): """ Verifies that the password adheres to the reuse policies """ if not settings.FEATURES['ADVANCED_SECURITY']: return True if user.is_staff and cls.is_staff_password_reuse_restricted(): min_diff_passwords_required = \ settings.ADVANCED_SECURITY_CONFIG['MIN_DIFFERENT_STAFF_PASSWORDS_BEFORE_REUSE'] elif cls.is_student_password_reuse_restricted(): min_diff_passwords_required = \ settings.ADVANCED_SECURITY_CONFIG['MIN_DIFFERENT_STUDENT_PASSWORDS_BEFORE_REUSE'] else: min_diff_passwords_required = 0 # just limit the result set to the number of different # password we need history = PasswordHistory.objects.filter(user=user).order_by('-time_set')[:min_diff_passwords_required] for entry in history: # be sure to re-use the same salt # NOTE, how the salt is serialized in the password field is dependent on the algorithm # in pbkdf2_sha256 [LMS] it's the 3rd element, in sha1 [unit tests] it's the 2nd element hash_elements = entry.password.split('$') algorithm = hash_elements[0] if algorithm == 'pbkdf2_sha256': hashed_password = make_password(new_password, hash_elements[2]) elif algorithm == 'sha1': hashed_password = make_password(new_password, hash_elements[1]) else: # This means we got something unexpected. We don't want to throw an exception, but # log as an error and basically allow any password reuse AUDIT_LOG.error(''' Unknown password hashing algorithm "{0}" found in existing password hash, password reuse policy will not be enforced!!! '''.format(algorithm)) return True if entry.password == hashed_password: return False return True class LoginFailures(models.Model): """ This model will keep track of failed login attempts """ user = models.ForeignKey(User) failure_count = models.IntegerField(default=0) lockout_until = models.DateTimeField(null=True) @classmethod def is_feature_enabled(cls): """ Returns whether the feature flag around this functionality has been set """ return settings.FEATURES['ENABLE_MAX_FAILED_LOGIN_ATTEMPTS'] @classmethod def is_user_locked_out(cls, user): """ Static method to return in a given user has his/her account locked out """ try: record = LoginFailures.objects.get(user=user) if not record.lockout_until: return False now = datetime.now(UTC) until = record.lockout_until is_locked_out = until and now < until return is_locked_out except ObjectDoesNotExist: return False @classmethod def increment_lockout_counter(cls, user): """ Ticks the failed attempt counter """ record, _ = LoginFailures.objects.get_or_create(user=user) record.failure_count = record.failure_count + 1 max_failures_allowed = settings.MAX_FAILED_LOGIN_ATTEMPTS_ALLOWED # did we go over the limit in attempts if record.failure_count >= max_failures_allowed: # yes, then store when this account is locked out until lockout_period_secs = settings.MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS record.lockout_until = datetime.now(UTC) + timedelta(seconds=lockout_period_secs) record.save() @classmethod def clear_lockout_counter(cls, user): """ Removes the lockout counters (normally called after a successful login) """ try: entry = LoginFailures.objects.get(user=user) entry.delete() except ObjectDoesNotExist: return class CourseEnrollmentException(Exception): pass class NonExistentCourseError(CourseEnrollmentException): pass class EnrollmentClosedError(CourseEnrollmentException): pass class CourseFullError(CourseEnrollmentException): pass class AlreadyEnrolledError(CourseEnrollmentException): pass class CourseEnrollmentManager(models.Manager): """ Custom manager for CourseEnrollment with Table-level filter methods. """ def num_enrolled_in(self, course_id): """ Returns the count of active enrollments in a course. 'course_id' is the course_id to return enrollments """ enrollment_number = super(CourseEnrollmentManager, self).get_queryset().filter( course_id=course_id, is_active=1 ).count() return enrollment_number def is_course_full(self, course): """ Returns a boolean value regarding whether a course has already reached it's max enrollment capacity """ is_course_full = False if course.max_student_enrollments_allowed is not None: is_course_full = self.num_enrolled_in(course.id) >= course.max_student_enrollments_allowed return is_course_full def users_enrolled_in(self, course_id): """Return a queryset of User for every user enrolled in the course.""" return User.objects.filter( courseenrollment__course_id=course_id, courseenrollment__is_active=True ) def enrollment_counts(self, course_id): """ Returns a dictionary that stores the total enrollment count for a course, as well as the enrollment count for each individual mode. """ # Unfortunately, Django's "group by"-style queries look super-awkward query = use_read_replica_if_available( super(CourseEnrollmentManager, self).get_queryset().filter(course_id=course_id, is_active=True).values( 'mode').order_by().annotate(Count('mode'))) total = 0 enroll_dict = defaultdict(int) for item in query: enroll_dict[item['mode']] = item['mode__count'] total += item['mode__count'] enroll_dict['total'] = total return enroll_dict def enrolled_and_dropped_out_users(self, course_id): """Return a queryset of Users in the course.""" return User.objects.filter( courseenrollment__course_id=course_id ) class CourseEnrollment(models.Model): """ Represents a Student's Enrollment record for a single Course. You should generally not manipulate CourseEnrollment objects directly, but use the classmethods provided to enroll, unenroll, or check on the enrollment status of a given student. We're starting to consolidate course enrollment logic in this class, but more should be brought in (such as checking against CourseEnrollmentAllowed, checking course dates, user permissions, etc.) This logic is currently scattered across our views. """ MODEL_TAGS = ['course_id', 'is_active', 'mode'] user = models.ForeignKey(User) course_id = CourseKeyField(max_length=255, db_index=True) created = models.DateTimeField(auto_now_add=True, null=True, db_index=True) # If is_active is False, then the student is not considered to be enrolled # in the course (is_enrolled() will return False) is_active = models.BooleanField(default=True) # Represents the modes that are possible. We'll update this later with a # list of possible values. mode = models.CharField(default=CourseMode.DEFAULT_MODE_SLUG, max_length=100) objects = CourseEnrollmentManager() # Maintain a history of requirement status updates for auditing purposes history = HistoricalRecords() # cache key format e.g enrollment.<username>.<course_key>.mode = 'honor' COURSE_ENROLLMENT_CACHE_KEY = u"enrollment.{}.{}.mode" class Meta(object): unique_together = (('user', 'course_id'),) ordering = ('user', 'course_id') def __init__(self, *args, **kwargs): super(CourseEnrollment, self).__init__(*args, **kwargs) # Private variable for storing course_overview to minimize calls to the database. # When the property .course_overview is accessed for the first time, this variable will be set. self._course_overview = None def __unicode__(self): return ( "[CourseEnrollment] {}: {} ({}); active: ({})" ).format(self.user, self.course_id, self.created, self.is_active) @classmethod @transaction.atomic def get_or_create_enrollment(cls, user, course_key): """ Create an enrollment for a user in a class. By default *this enrollment is not active*. This is useful for when an enrollment needs to go through some sort of approval process before being activated. If you don't need this functionality, just call `enroll()` instead. Returns a CoursewareEnrollment object. `user` is a Django User object. If it hasn't been saved yet (no `.id` attribute), this method will automatically save it before adding an enrollment for it. `course_id` is our usual course_id string (e.g. "edX/Test101/2013_Fall) It is expected that this method is called from a method which has already verified the user authentication and access. """ # If we're passing in a newly constructed (i.e. not yet persisted) User, # save it to the database so that it can have an ID that we can throw # into our CourseEnrollment object. Otherwise, we'll get an # IntegrityError for having a null user_id. assert isinstance(course_key, CourseKey) if user.id is None: user.save() enrollment, created = CourseEnrollment.objects.get_or_create( user=user, course_id=course_key, ) # If we *did* just create a new enrollment, set some defaults if created: enrollment.mode = CourseMode.DEFAULT_MODE_SLUG enrollment.is_active = False enrollment.save() return enrollment @classmethod def get_enrollment(cls, user, course_key): """Returns a CoursewareEnrollment object. Args: user (User): The user associated with the enrollment. course_id (CourseKey): The key of the course associated with the enrollment. Returns: Course enrollment object or None """ try: return CourseEnrollment.objects.get( user=user, course_id=course_key ) except cls.DoesNotExist: return None @classmethod def is_enrollment_closed(cls, user, course): """ Returns a boolean value regarding whether the user has access to enroll in the course. Returns False if the enrollment has been closed. """ # Disable the pylint error here, as per ormsbee. This local import was previously # in CourseEnrollment.enroll from courseware.access import has_access # pylint: disable=import-error return not has_access(user, 'enroll', course) def update_enrollment(self, mode=None, is_active=None, skip_refund=False): """ Updates an enrollment for a user in a class. This includes options like changing the mode, toggling is_active True/False, etc. Also emits relevant events for analytics purposes. This saves immediately. """ activation_changed = False # if is_active is None, then the call to update_enrollment didn't specify # any value, so just leave is_active as it is if self.is_active != is_active and is_active is not None: self.is_active = is_active activation_changed = True mode_changed = False # if mode is None, the call to update_enrollment didn't specify a new # mode, so leave as-is if self.mode != mode and mode is not None: self.mode = mode mode_changed = True if activation_changed or mode_changed: self.save() if activation_changed: if self.is_active: self.emit_event(EVENT_NAME_ENROLLMENT_ACTIVATED) dog_stats_api.increment( "common.student.enrollment", tags=[u"org:{}".format(self.course_id.org), u"offering:{}".format(self.course_id.offering), u"mode:{}".format(self.mode)] ) else: UNENROLL_DONE.send(sender=None, course_enrollment=self, skip_refund=skip_refund) self.emit_event(EVENT_NAME_ENROLLMENT_DEACTIVATED) dog_stats_api.increment( "common.student.unenrollment", tags=[u"org:{}".format(self.course_id.org), u"offering:{}".format(self.course_id.offering), u"mode:{}".format(self.mode)] ) if mode_changed: # Only emit mode change events when the user's enrollment # mode has changed from its previous setting self.emit_event(EVENT_NAME_ENROLLMENT_MODE_CHANGED) def emit_event(self, event_name): """ Emits an event to explicitly track course enrollment and unenrollment. """ try: context = contexts.course_context_from_course_id(self.course_id) assert isinstance(self.course_id, CourseKey) data = { 'user_id': self.user.id, 'course_id': self.course_id.to_deprecated_string(), 'mode': self.mode, } with tracker.get_tracker().context(event_name, context): tracker.emit(event_name, data) if hasattr(settings, 'LMS_SEGMENT_KEY') and settings.LMS_SEGMENT_KEY: tracking_context = tracker.get_tracker().resolve_context() analytics.track(self.user_id, event_name, { 'category': 'conversion', 'label': self.course_id.to_deprecated_string(), 'org': self.course_id.org, 'course': self.course_id.course, 'run': self.course_id.run, 'mode': self.mode, }, context={ 'ip': tracking_context.get('ip'), 'Google Analytics': { 'clientId': tracking_context.get('client_id') } }) except: # pylint: disable=bare-except if event_name and self.course_id: log.exception( u'Unable to emit event %s for user %s and course %s', event_name, self.user.username, self.course_id, ) @classmethod def enroll(cls, user, course_key, mode=None, check_access=False): """ Enroll a user in a course. This saves immediately. Returns a CoursewareEnrollment object. `user` is a Django User object. If it hasn't been saved yet (no `.id` attribute), this method will automatically save it before adding an enrollment for it. `course_key` is our usual course_id string (e.g. "edX/Test101/2013_Fall) `mode` is a string specifying what kind of enrollment this is. The default is the default course mode, 'audit'. Other options include 'professional', 'verified', 'honor', 'no-id-professional' and 'credit'. See CourseMode in common/djangoapps/course_modes/models.py. `check_access`: if True, we check that an accessible course actually exists for the given course_key before we enroll the student. The default is set to False to avoid breaking legacy code or code with non-standard flows (ex. beta tester invitations), but for any standard enrollment flow you probably want this to be True. Exceptions that can be raised: NonExistentCourseError, EnrollmentClosedError, CourseFullError, AlreadyEnrolledError. All these are subclasses of CourseEnrollmentException if you want to catch all of them in the same way. It is expected that this method is called from a method which has already verified the user authentication. Also emits relevant events for analytics purposes. """ if mode is None: mode = _default_course_mode(unicode(course_key)) # All the server-side checks for whether a user is allowed to enroll. try: course = CourseOverview.get_from_id(course_key) except CourseOverview.DoesNotExist: # This is here to preserve legacy behavior which allowed enrollment in courses # announced before the start of content creation. if check_access: log.warning(u"User %s failed to enroll in non-existent course %s", user.username, unicode(course_key)) raise NonExistentCourseError if check_access: if CourseEnrollment.is_enrollment_closed(user, course): log.warning( u"User %s failed to enroll in course %s because enrollment is closed", user.username, course_key.to_deprecated_string() ) raise EnrollmentClosedError if CourseEnrollment.objects.is_course_full(course): log.warning( u"User %s failed to enroll in full course %s", user.username, course_key.to_deprecated_string(), ) raise CourseFullError if CourseEnrollment.is_enrolled(user, course_key): log.warning( u"User %s attempted to enroll in %s, but they were already enrolled", user.username, course_key.to_deprecated_string() ) if check_access: raise AlreadyEnrolledError # User is allowed to enroll if they've reached this point. enrollment = cls.get_or_create_enrollment(user, course_key) enrollment.update_enrollment(is_active=True, mode=mode) return enrollment @classmethod def enroll_by_email(cls, email, course_id, mode=None, ignore_errors=True): """ Enroll a user in a course given their email. This saves immediately. Note that enrolling by email is generally done in big batches and the error rate is high. For that reason, we supress User lookup errors by default. Returns a CoursewareEnrollment object. If the User does not exist and `ignore_errors` is set to `True`, it will return None. `email` Email address of the User to add to enroll in the course. `course_id` is our usual course_id string (e.g. "edX/Test101/2013_Fall) `mode` is a string specifying what kind of enrollment this is. The default is the default course mode, 'audit'. Other options include 'professional', 'verified', 'honor', 'no-id-professional' and 'credit'. See CourseMode in common/djangoapps/course_modes/models.py. `ignore_errors` is a boolean indicating whether we should suppress `User.DoesNotExist` errors (returning None) or let it bubble up. It is expected that this method is called from a method which has already verified the user authentication and access. """ try: user = User.objects.get(email=email) return cls.enroll(user, course_id, mode) except User.DoesNotExist: err_msg = u"Tried to enroll email {} into course {}, but user not found" log.error(err_msg.format(email, course_id)) if ignore_errors: return None raise @classmethod def unenroll(cls, user, course_id, skip_refund=False): """ Remove the user from a given course. If the relevant `CourseEnrollment` object doesn't exist, we log an error but don't throw an exception. `user` is a Django User object. If it hasn't been saved yet (no `.id` attribute), this method will automatically save it before adding an enrollment for it. `course_id` is our usual course_id string (e.g. "edX/Test101/2013_Fall) `skip_refund` can be set to True to avoid the refund process. """ try: record = CourseEnrollment.objects.get(user=user, course_id=course_id) record.update_enrollment(is_active=False, skip_refund=skip_refund) except cls.DoesNotExist: log.error( u"Tried to unenroll student %s from %s but they were not enrolled", user, course_id ) @classmethod def unenroll_by_email(cls, email, course_id): """ Unenroll a user from a course given their email. This saves immediately. User lookup errors are logged but will not throw an exception. `email` Email address of the User to unenroll from the course. `course_id` is our usual course_id string (e.g. "edX/Test101/2013_Fall) """ try: user = User.objects.get(email=email) return cls.unenroll(user, course_id) except User.DoesNotExist: log.error( u"Tried to unenroll email %s from course %s, but user not found", email, course_id ) @classmethod def is_enrolled(cls, user, course_key): """ Returns True if the user is enrolled in the course (the entry must exist and it must have `is_active=True`). Otherwise, returns False. `user` is a Django User object. If it hasn't been saved yet (no `.id` attribute), this method will automatically save it before adding an enrollment for it. `course_id` is our usual course_id string (e.g. "edX/Test101/2013_Fall) """ if not user.is_authenticated(): return False try: record = CourseEnrollment.objects.get(user=user, course_id=course_key) return record.is_active except cls.DoesNotExist: return False @classmethod def is_enrolled_by_partial(cls, user, course_id_partial): """ Returns `True` if the user is enrolled in a course that starts with `course_id_partial`. Otherwise, returns False. Can be used to determine whether a student is enrolled in a course whose run name is unknown. `user` is a Django User object. If it hasn't been saved yet (no `.id` attribute), this method will automatically save it before adding an enrollment for it. `course_id_partial` (CourseKey) is missing the run component """ assert isinstance(course_id_partial, CourseKey) assert not course_id_partial.run # None or empty string course_key = SlashSeparatedCourseKey(course_id_partial.org, course_id_partial.course, '') querystring = unicode(course_key.to_deprecated_string()) try: return CourseEnrollment.objects.filter( user=user, course_id__startswith=querystring, is_active=1 ).exists() except cls.DoesNotExist: return False @classmethod def enrollment_mode_for_user(cls, user, course_id): """ Returns the enrollment mode for the given user for the given course `user` is a Django User object `course_id` is our usual course_id string (e.g. "edX/Test101/2013_Fall) Returns (mode, is_active) where mode is the enrollment mode of the student and is_active is whether the enrollment is active. Returns (None, None) if the courseenrollment record does not exist. """ try: record = CourseEnrollment.objects.get(user=user, course_id=course_id) return (record.mode, record.is_active) except cls.DoesNotExist: return (None, None) @classmethod def enrollments_for_user(cls, user): return CourseEnrollment.objects.filter(user=user, is_active=1) def is_paid_course(self): """ Returns True, if course is paid """ paid_course = CourseMode.is_white_label(self.course_id) if paid_course or CourseMode.is_professional_slug(self.mode): return True return False def activate(self): """Makes this `CourseEnrollment` record active. Saves immediately.""" self.update_enrollment(is_active=True) def deactivate(self): """Makes this `CourseEnrollment` record inactive. Saves immediately. An inactive record means that the student is not enrolled in this course. """ self.update_enrollment(is_active=False) def change_mode(self, mode): """Changes this `CourseEnrollment` record's mode to `mode`. Saves immediately.""" self.update_enrollment(mode=mode) def refundable(self): """ For paid/verified certificates, students may receive a refund if they have a verified certificate and the deadline for refunds has not yet passed. """ # In order to support manual refunds past the deadline, set can_refund on this object. # On unenrolling, the "UNENROLL_DONE" signal calls CertificateItem.refund_cert_callback(), # which calls this method to determine whether to refund the order. # This can't be set directly because refunds currently happen as a side-effect of unenrolling. # (side-effects are bad) if getattr(self, 'can_refund', None) is not None: return True # If the student has already been given a certificate they should not be refunded if GeneratedCertificate.certificate_for_student(self.user, self.course_id) is not None: return False # If it is after the refundable cutoff date they should not be refunded. refund_cutoff_date = self.refund_cutoff_date() if refund_cutoff_date and datetime.now(UTC) > refund_cutoff_date: return False course_mode = CourseMode.mode_for_course(self.course_id, 'verified') if course_mode is None: return False else: return True def refund_cutoff_date(self): """ Calculate and return the refund window end date. """ try: attribute = self.attributes.get(namespace='order', name='order_number') except ObjectDoesNotExist: return None order_number = attribute.value order = ecommerce_api_client(self.user).orders(order_number).get() refund_window_start_date = max( datetime.strptime(order['date_placed'], ECOMMERCE_DATE_FORMAT), self.course_overview.start.replace(tzinfo=None) ) return refund_window_start_date.replace(tzinfo=UTC) + EnrollmentRefundConfiguration.current().refund_window @property def username(self): return self.user.username @property def course(self): # Deprecated. Please use the `course_overview` property instead. return self.course_overview @property def course_overview(self): """ Returns a CourseOverview of the course to which this enrollment refers. Returns None if an error occurred while trying to load the course. Note: If the course is re-published within the lifetime of this CourseEnrollment object, then the value of this property will become stale. """ if not self._course_overview: try: self._course_overview = CourseOverview.get_from_id(self.course_id) except (CourseOverview.DoesNotExist, IOError): self._course_overview = None return self._course_overview def is_verified_enrollment(self): """ Check the course enrollment mode is verified or not """ return CourseMode.is_verified_slug(self.mode) def is_professional_enrollment(self): """ Check the course enrollment mode is professional or not """ return CourseMode.is_professional_slug(self.mode) @classmethod def is_enrolled_as_verified(cls, user, course_key): """ Check whether the course enrollment is for a verified mode. Arguments: user (User): The user object. course_key (CourseKey): The identifier for the course. Returns: bool """ enrollment = cls.get_enrollment(user, course_key) return ( enrollment is not None and enrollment.is_active and enrollment.is_verified_enrollment() ) @classmethod def cache_key_name(cls, user_id, course_key): """Return cache key name to be used to cache current configuration. Args: user_id(int): Id of user. course_key(unicode): Unicode of course key Returns: Unicode cache key """ return cls.COURSE_ENROLLMENT_CACHE_KEY.format(user_id, unicode(course_key)) @receiver(models.signals.post_save, sender=CourseEnrollment) @receiver(models.signals.post_delete, sender=CourseEnrollment) def invalidate_enrollment_mode_cache(sender, instance, **kwargs): # pylint: disable=unused-argument, invalid-name """Invalidate the cache of CourseEnrollment model. """ cache_key = CourseEnrollment.cache_key_name( instance.user.id, unicode(instance.course_id) ) cache.delete(cache_key) class ManualEnrollmentAudit(models.Model): """ Table for tracking which enrollments were performed through manual enrollment. """ enrollment = models.ForeignKey(CourseEnrollment, null=True) enrolled_by = models.ForeignKey(User, null=True) enrolled_email = models.CharField(max_length=255, db_index=True) time_stamp = models.DateTimeField(auto_now_add=True, null=True) state_transition = models.CharField(max_length=255, choices=TRANSITION_STATES) reason = models.TextField(null=True) @classmethod def create_manual_enrollment_audit(cls, user, email, state_transition, reason, enrollment=None): """ saves the student manual enrollment information """ cls.objects.create( enrolled_by=user, enrolled_email=email, state_transition=state_transition, reason=reason, enrollment=enrollment ) @classmethod def get_manual_enrollment_by_email(cls, email): """ if matches returns the most recent entry in the table filtered by email else returns None. """ try: manual_enrollment = cls.objects.filter(enrolled_email=email).latest('time_stamp') except cls.DoesNotExist: manual_enrollment = None return manual_enrollment @classmethod def get_manual_enrollment(cls, enrollment): """ if matches returns the most recent entry in the table filtered by enrollment else returns None, """ try: manual_enrollment = cls.objects.filter(enrollment=enrollment).latest('time_stamp') except cls.DoesNotExist: manual_enrollment = None return manual_enrollment class CourseEnrollmentAllowed(models.Model): """ Table of users (specified by email address strings) who are allowed to enroll in a specified course. The user may or may not (yet) exist. Enrollment by users listed in this table is allowed even if the enrollment time window is past. """ email = models.CharField(max_length=255, db_index=True) course_id = CourseKeyField(max_length=255, db_index=True) auto_enroll = models.BooleanField(default=0) created = models.DateTimeField(auto_now_add=True, null=True, db_index=True) class Meta(object): unique_together = (('email', 'course_id'),) def __unicode__(self): return "[CourseEnrollmentAllowed] %s: %s (%s)" % (self.email, self.course_id, self.created) @classmethod def may_enroll_and_unenrolled(cls, course_id): """ Return QuerySet of students who are allowed to enroll in a course. Result excludes students who have already enrolled in the course. `course_id` identifies the course for which to compute the QuerySet. """ enrolled = CourseEnrollment.objects.users_enrolled_in(course_id=course_id).values_list('email', flat=True) return CourseEnrollmentAllowed.objects.filter(course_id=course_id).exclude(email__in=enrolled) @total_ordering class CourseAccessRole(models.Model): """ Maps users to org, courses, and roles. Used by student.roles.CourseRole and OrgRole. To establish a user as having a specific role over all courses in the org, create an entry without a course_id. """ objects = NoneToEmptyManager() user = models.ForeignKey(User) # blank org is for global group based roles such as course creator (may be deprecated) org = models.CharField(max_length=64, db_index=True, blank=True) # blank course_id implies org wide role course_id = CourseKeyField(max_length=255, db_index=True, blank=True) role = models.CharField(max_length=64, db_index=True) class Meta(object): unique_together = ('user', 'org', 'course_id', 'role') @property def _key(self): """ convenience function to make eq overrides easier and clearer. arbitrary decision that role is primary, followed by org, course, and then user """ return (self.role, self.org, self.course_id, self.user_id) def __eq__(self, other): """ Overriding eq b/c the django impl relies on the primary key which requires fetch. sometimes we just want to compare roles w/o doing another fetch. """ return type(self) == type(other) and self._key == other._key # pylint: disable=protected-access def __hash__(self): return hash(self._key) def __lt__(self, other): """ Lexigraphic sort """ return self._key < other._key # pylint: disable=protected-access def __unicode__(self): return "[CourseAccessRole] user: {} role: {} org: {} course: {}".format(self.user.username, self.role, self.org, self.course_id) #### Helper methods for use from python manage.py shell and other classes. def get_user_by_username_or_email(username_or_email): """ Return a User object, looking up by email if username_or_email contains a '@', otherwise by username. Raises: User.DoesNotExist is lookup fails. """ if '@' in username_or_email: return User.objects.get(email=username_or_email) else: return User.objects.get(username=username_or_email) def get_user(email): user = User.objects.get(email=email) u_prof = UserProfile.objects.get(user=user) return user, u_prof def user_info(email): user, u_prof = get_user(email) print "User id", user.id print "Username", user.username print "E-mail", user.email print "Name", u_prof.name print "Location", u_prof.location print "Language", u_prof.language return user, u_prof def change_email(old_email, new_email): user = User.objects.get(email=old_email) user.email = new_email user.save() def change_name(email, new_name): _user, u_prof = get_user(email) u_prof.name = new_name u_prof.save() def user_count(): print "All users", User.objects.all().count() print "Active users", User.objects.filter(is_active=True).count() return User.objects.all().count() def active_user_count(): return User.objects.filter(is_active=True).count() def create_group(name, description): utg = UserTestGroup() utg.name = name utg.description = description utg.save() def add_user_to_group(user, group): utg = UserTestGroup.objects.get(name=group) utg.users.add(User.objects.get(username=user)) utg.save() def remove_user_from_group(user, group): utg = UserTestGroup.objects.get(name=group) utg.users.remove(User.objects.get(username=user)) utg.save() DEFAULT_GROUPS = { 'email_future_courses': 'Receive e-mails about future MITx courses', 'email_helpers': 'Receive e-mails about how to help with MITx', 'mitx_unenroll': 'Fully unenrolled -- no further communications', '6002x_unenroll': 'Took and dropped 6002x' } def add_user_to_default_group(user, group): try: utg = UserTestGroup.objects.get(name=group) except UserTestGroup.DoesNotExist: utg = UserTestGroup() utg.name = group utg.description = DEFAULT_GROUPS[group] utg.save() utg.users.add(User.objects.get(username=user)) utg.save() def create_comments_service_user(user): if not settings.FEATURES['ENABLE_DISCUSSION_SERVICE']: # Don't try--it won't work, and it will fill the logs with lots of errors return try: cc_user = cc.User.from_django_user(user) cc_user.save() except Exception: # pylint: disable=broad-except log = logging.getLogger("edx.discussion") # pylint: disable=redefined-outer-name log.error( "Could not create comments service user with id {}".format(user.id), exc_info=True ) # Define login and logout handlers here in the models file, instead of the views file, # so that they are more likely to be loaded when a Studio user brings up the Studio admin # page to login. These are currently the only signals available, so we need to continue # identifying and logging failures separately (in views). @receiver(user_logged_in) def log_successful_login(sender, request, user, **kwargs): # pylint: disable=unused-argument """Handler to log when logins have occurred successfully.""" if settings.FEATURES['SQUELCH_PII_IN_LOGS']: AUDIT_LOG.info(u"Login success - user.id: {0}".format(user.id)) else: AUDIT_LOG.info(u"Login success - {0} ({1})".format(user.username, user.email)) @receiver(user_logged_out) def log_successful_logout(sender, request, user, **kwargs): # pylint: disable=unused-argument """Handler to log when logouts have occurred successfully.""" if settings.FEATURES['SQUELCH_PII_IN_LOGS']: AUDIT_LOG.info(u"Logout - user.id: {0}".format(request.user.id)) else: AUDIT_LOG.info(u"Logout - {0}".format(request.user)) @receiver(user_logged_in) @receiver(user_logged_out) def enforce_single_login(sender, request, user, signal, **kwargs): # pylint: disable=unused-argument """ Sets the current session id in the user profile, to prevent concurrent logins. """ if settings.FEATURES.get('PREVENT_CONCURRENT_LOGINS', False): if signal == user_logged_in: key = request.session.session_key else: key = None if user: user.profile.set_login_session(key) class DashboardConfiguration(ConfigurationModel): """Dashboard Configuration settings. Includes configuration options for the dashboard, which impact behavior and rendering for the application. """ recent_enrollment_time_delta = models.PositiveIntegerField( default=0, help_text="The number of seconds in which a new enrollment is considered 'recent'. " "Used to display notifications." ) @property def recent_enrollment_seconds(self): return self.recent_enrollment_time_delta class LinkedInAddToProfileConfiguration(ConfigurationModel): """ LinkedIn Add to Profile Configuration This configuration enables the "Add to Profile" LinkedIn button on the student dashboard. The button appears when users have a certificate available; when clicked, users are sent to the LinkedIn site with a pre-filled form allowing them to add the certificate to their LinkedIn profile. """ MODE_TO_CERT_NAME = { "honor": _(u"{platform_name} Honor Code Certificate for {course_name}"), "verified": _(u"{platform_name} Verified Certificate for {course_name}"), "professional": _(u"{platform_name} Professional Certificate for {course_name}"), "no-id-professional": _( u"{platform_name} Professional Certificate for {course_name}" ), } company_identifier = models.TextField( help_text=_( u"The company identifier for the LinkedIn Add-to-Profile button " u"e.g 0_0dPSPyS070e0HsE9HNz_13_d11_" ) ) # Deprecated dashboard_tracking_code = models.TextField(default="", blank=True) trk_partner_name = models.CharField( max_length=10, default="", blank=True, help_text=_( u"Short identifier for the LinkedIn partner used in the tracking code. " u"(Example: 'edx') " u"If no value is provided, tracking codes will not be sent to LinkedIn." ) ) def add_to_profile_url(self, course_key, course_name, cert_mode, cert_url, source="o", target="dashboard"): """Construct the URL for the "add to profile" button. Arguments: course_key (CourseKey): The identifier for the course. course_name (unicode): The display name of the course. cert_mode (str): The course mode of the user's certificate (e.g. "verified", "honor", "professional") cert_url (str): The download URL for the certificate. Keyword Arguments: source (str): Either "o" (for onsite/UI), "e" (for emails), or "m" (for mobile) target (str): An identifier for the occurrance of the button. """ params = OrderedDict([ ('_ed', self.company_identifier), ('pfCertificationName', self._cert_name(course_name, cert_mode).encode('utf-8')), ('pfCertificationUrl', cert_url), ('source', source) ]) tracking_code = self._tracking_code(course_key, cert_mode, target) if tracking_code is not None: params['trk'] = tracking_code return u'http://www.linkedin.com/profile/add?{params}'.format( params=urlencode(params) ) def _cert_name(self, course_name, cert_mode): """Name of the certification, for display on LinkedIn. """ return self.MODE_TO_CERT_NAME.get( cert_mode, _(u"{platform_name} Certificate for {course_name}") ).format( platform_name=settings.PLATFORM_NAME, course_name=course_name ) def _tracking_code(self, course_key, cert_mode, target): """Create a tracking code for the button. Tracking codes are used by LinkedIn to collect analytics about certifications users are adding to their profiles. The tracking code format is: &trk=[partner name]-[certificate type]-[date]-[target field] In our case, we're sending: &trk=edx-{COURSE ID}_{COURSE MODE}-{TARGET} If no partner code is configured, then this will return None, indicating that tracking codes are disabled. Arguments: course_key (CourseKey): The identifier for the course. cert_mode (str): The enrollment mode for the course. target (str): Identifier for where the button is located. Returns: unicode or None """ return ( u"{partner}-{course_key}_{cert_mode}-{target}".format( partner=self.trk_partner_name, course_key=unicode(course_key), cert_mode=cert_mode, target=target ) if self.trk_partner_name else None ) class EntranceExamConfiguration(models.Model): """ Represents a Student's entrance exam specific data for a single Course """ user = models.ForeignKey(User, db_index=True) course_id = CourseKeyField(max_length=255, db_index=True) created = models.DateTimeField(auto_now_add=True, null=True, db_index=True) updated = models.DateTimeField(auto_now=True, db_index=True) # if skip_entrance_exam is True, then student can skip entrance exam # for the course skip_entrance_exam = models.BooleanField(default=True) class Meta(object): unique_together = (('user', 'course_id'), ) def __unicode__(self): return "[EntranceExamConfiguration] %s: %s (%s) = %s" % ( self.user, self.course_id, self.created, self.skip_entrance_exam ) @classmethod def user_can_skip_entrance_exam(cls, user, course_key): """ Return True if given user can skip entrance exam for given course otherwise False. """ can_skip = False if is_entrance_exams_enabled(): try: record = EntranceExamConfiguration.objects.get(user=user, course_id=course_key) can_skip = record.skip_entrance_exam except EntranceExamConfiguration.DoesNotExist: can_skip = False return can_skip class LanguageField(models.CharField): """Represents a language from the ISO 639-1 language set.""" def __init__(self, *args, **kwargs): """Creates a LanguageField. Accepts all the same kwargs as a CharField, except for max_length and choices. help_text defaults to a description of the ISO 639-1 set. """ kwargs.pop('max_length', None) kwargs.pop('choices', None) help_text = kwargs.pop( 'help_text', _("The ISO 639-1 language code for this language."), ) super(LanguageField, self).__init__( max_length=16, choices=settings.ALL_LANGUAGES, help_text=help_text, *args, **kwargs ) class LanguageProficiency(models.Model): """ Represents a user's language proficiency. Note that we have not found a way to emit analytics change events by using signals directly on this model or on UserProfile. Therefore if you are changing LanguageProficiency values, it is important to go through the accounts API (AccountsView) defined in /edx-platform/openedx/core/djangoapps/user_api/accounts/views.py or its associated api method (update_account_settings) so that the events are emitted. """ class Meta(object): unique_together = (('code', 'user_profile'),) user_profile = models.ForeignKey(UserProfile, db_index=True, related_name='language_proficiencies') code = models.CharField( max_length=16, blank=False, choices=settings.ALL_LANGUAGES, help_text=_("The ISO 639-1 language code for this language.") ) class CourseEnrollmentAttribute(models.Model): """ Provide additional information about the user's enrollment. """ enrollment = models.ForeignKey(CourseEnrollment, related_name="attributes") namespace = models.CharField( max_length=255, help_text=_("Namespace of enrollment attribute") ) name = models.CharField( max_length=255, help_text=_("Name of the enrollment attribute") ) value = models.CharField( max_length=255, help_text=_("Value of the enrollment attribute") ) def __unicode__(self): """Unicode representation of the attribute. """ return u"{namespace}:{name}, {value}".format( namespace=self.namespace, name=self.name, value=self.value, ) @classmethod def add_enrollment_attr(cls, enrollment, data_list): """Delete all the enrollment attributes for the given enrollment and add new attributes. Args: enrollment(CourseEnrollment): 'CourseEnrollment' for which attribute is to be added data(list): list of dictionaries containing data to save """ cls.objects.filter(enrollment=enrollment).delete() attributes = [ cls(enrollment=enrollment, namespace=data['namespace'], name=data['name'], value=data['value']) for data in data_list ] cls.objects.bulk_create(attributes) @classmethod def get_enrollment_attributes(cls, enrollment): """Retrieve list of all enrollment attributes. Args: enrollment(CourseEnrollment): 'CourseEnrollment' for which list is to retrieve Returns: list Example: >>> CourseEnrollmentAttribute.get_enrollment_attributes(CourseEnrollment) [ { "namespace": "credit", "name": "provider_id", "value": "hogwarts", }, ] """ return [ { "namespace": attribute.namespace, "name": attribute.name, "value": attribute.value, } for attribute in cls.objects.filter(enrollment=enrollment) ] class EnrollmentRefundConfiguration(ConfigurationModel): """ Configuration for course enrollment refunds. """ # TODO: Django 1.8 introduces a DurationField # (https://docs.djangoproject.com/en/1.8/ref/models/fields/#durationfield) # for storing timedeltas which uses MySQL's bigint for backing # storage. After we've completed the Django upgrade we should be # able to replace this field with a DurationField named # `refund_window` without having to run a migration or change # other code. refund_window_microseconds = models.BigIntegerField( default=1209600000000, help_text=_( "The window of time after enrolling during which users can be granted" " a refund, represented in microseconds. The default is 14 days." ) ) @property def refund_window(self): """Return the configured refund window as a `datetime.timedelta`.""" return timedelta(microseconds=self.refund_window_microseconds) @refund_window.setter def refund_window(self, refund_window): """Set the current refund window to the given timedelta.""" self.refund_window_microseconds = int(refund_window.total_seconds() * 1000000)
agpl-3.0
alex/sqlalchemy
test/orm/test_loading.py
4
3189
from . import _fixtures from sqlalchemy.orm import loading, Session, aliased from sqlalchemy.testing.assertions import eq_ from sqlalchemy.util import KeyedTuple # class InstancesTest(_fixtures.FixtureTest): # class GetFromIdentityTest(_fixtures.FixtureTest): # class LoadOnIdentTest(_fixtures.FixtureTest): # class InstanceProcessorTest(_fixture.FixtureTest): class MergeResultTest(_fixtures.FixtureTest): run_setup_mappers = 'once' run_inserts = 'once' run_deletes = None @classmethod def setup_mappers(cls): cls._setup_stock_mapping() def _fixture(self): User = self.classes.User s = Session() u1, u2, u3, u4 = User(id=1, name='u1'), User(id=2, name='u2'), \ User(id=7, name='u3'), User(id=8, name='u4') s.query(User).filter(User.id.in_([7, 8])).all() s.close() return s, [u1, u2, u3, u4] def test_single_entity(self): s, (u1, u2, u3, u4) = self._fixture() User = self.classes.User q = s.query(User) collection = [u1, u2, u3, u4] it = loading.merge_result( q, collection ) eq_( [x.id for x in it], [1, 2, 7, 8] ) def test_single_column(self): User = self.classes.User s = Session() q = s.query(User.id) collection = [(1, ), (2, ), (7, ), (8, )] it = loading.merge_result( q, collection ) eq_( list(it), [(1, ), (2, ), (7, ), (8, )] ) def test_entity_col_mix_plain_tuple(self): s, (u1, u2, u3, u4) = self._fixture() User = self.classes.User q = s.query(User, User.id) collection = [(u1, 1), (u2, 2), (u3, 7), (u4, 8)] it = loading.merge_result( q, collection ) it = list(it) eq_( [(x.id, y) for x, y in it], [(1, 1), (2, 2), (7, 7), (8, 8)] ) eq_(list(it[0].keys()), ['User', 'id']) def test_entity_col_mix_keyed_tuple(self): s, (u1, u2, u3, u4) = self._fixture() User = self.classes.User q = s.query(User, User.id) kt = lambda *x: KeyedTuple(x, ['User', 'id']) collection = [kt(u1, 1), kt(u2, 2), kt(u3, 7), kt(u4, 8)] it = loading.merge_result( q, collection ) it = list(it) eq_( [(x.id, y) for x, y in it], [(1, 1), (2, 2), (7, 7), (8, 8)] ) eq_(list(it[0].keys()), ['User', 'id']) def test_none_entity(self): s, (u1, u2, u3, u4) = self._fixture() User = self.classes.User ua = aliased(User) q = s.query(User, ua) kt = lambda *x: KeyedTuple(x, ['User', 'useralias']) collection = [kt(u1, u2), kt(u1, None), kt(u2, u3)] it = loading.merge_result( q, collection ) eq_( [ (x and x.id or None, y and y.id or None) for x, y in it ], [(u1.id, u2.id), (u1.id, None), (u2.id, u3.id)] )
mit
cryptovein/p2pool-zeitcoin
nattraverso/ipdiscover.py
288
4180
""" Generic methods to retreive the IP address of the local machine. TODO: Example @author: Raphael Slinckx @copyright: Copyright 2005 @license: LGPL @contact: U{raphael@slinckx.net<mailto:raphael@slinckx.net>} @version: 0.1.0 """ __revision__ = "$id" import random, socket, logging, itertools from twisted.internet import defer, reactor from twisted.internet.protocol import DatagramProtocol from twisted.internet.error import CannotListenError from nattraverso.utils import is_rfc1918_ip, is_bogus_ip @defer.inlineCallbacks def get_local_ip(): """ Returns a deferred which will be called with a 2-uple (lan_flag, ip_address) : - lan_flag: - True if it's a local network (RFC1918) - False if it's a WAN address - ip_address is the actual ip address @return: A deferred called with the above defined tuple @rtype: L{twisted.internet.defer.Deferred} """ # first we try a connected udp socket, then via multicast logging.debug("Resolving dns to get udp ip") try: ipaddr = yield reactor.resolve('A.ROOT-SERVERS.NET') except: pass else: udpprot = DatagramProtocol() port = reactor.listenUDP(0, udpprot) udpprot.transport.connect(ipaddr, 7) localip = udpprot.transport.getHost().host port.stopListening() if is_bogus_ip(localip): raise RuntimeError, "Invalid IP address returned" else: defer.returnValue((is_rfc1918_ip(localip), localip)) logging.debug("Multicast ping to retrieve local IP") ipaddr = yield _discover_multicast() defer.returnValue((is_rfc1918_ip(ipaddr), ipaddr)) @defer.inlineCallbacks def get_external_ip(): """ Returns a deferred which will be called with a 2-uple (wan_flag, ip_address): - wan_flag: - True if it's a WAN address - False if it's a LAN address - None if it's a localhost (127.0.0.1) address - ip_address: the most accessible ip address of this machine @return: A deferred called with the above defined tuple @rtype: L{twisted.internet.defer.Deferred} """ try: local, ipaddr = yield get_local_ip() except: defer.returnValue((None, "127.0.0.1")) if not local: defer.returnValue((True, ipaddr)) logging.debug("Got local ip, trying to use upnp to get WAN ip") import nattraverso.pynupnp try: ipaddr2 = yield nattraverso.pynupnp.get_external_ip() except: defer.returnValue((False, ipaddr)) else: defer.returnValue((True, ipaddr2)) class _LocalNetworkMulticast(DatagramProtocol): def __init__(self, nonce): from p2pool.util import variable self.nonce = nonce self.address_received = variable.Event() def datagramReceived(self, dgram, addr): """Datagram received, we callback the IP address.""" logging.debug("Received multicast pong: %s; addr:%r", dgram, addr) if dgram != self.nonce: return self.address_received.happened(addr[0]) @defer.inlineCallbacks def _discover_multicast(): """ Local IP discovery protocol via multicast: - Broadcast 3 ping multicast packet with "ping" in it - Wait for an answer - Retrieve the ip address from the returning packet, which is ours """ nonce = str(random.randrange(2**64)) p = _LocalNetworkMulticast(nonce) for attempt in itertools.count(): port = 11000 + random.randint(0, 5000) try: mcast = reactor.listenMulticast(port, p) except CannotListenError: if attempt >= 10: raise continue else: break try: yield mcast.joinGroup('239.255.255.250', socket.INADDR_ANY) logging.debug("Sending multicast ping") for i in xrange(3): p.transport.write(nonce, ('239.255.255.250', port)) address, = yield p.address_received.get_deferred(5) finally: mcast.stopListening() defer.returnValue(address)
gpl-3.0
wmodes/crs
conductor/connector.py
1
4273
#!/usr/bin/env python # -*- coding: utf-8 -*- """Data elements for CRS. Co-related Space is an interactive multimedia installation that engages the themes of presence, interaction, and place. Using motion tracking, laser light and a generative soundscape, it encourages interactions between participants, visually and sonically transforming a regularly trafficked space. Co-related Space highlights participants' active engagement and experimentation with sound and light, including complex direct and indirect behavior and relationships. """ __appname__ = "connector.py" __author__ = "Wes Modes (modes.io)" __version__ = "0.1pre0" __license__ = "GNU GPL 3.0 or later" # local modules import logging # local classes from attr import Attr # init logging logger=logging.getLogger(__name__) class Connector(object): """Represents a connector between two cells. Stores the following values: m_field: store a back ref to the field that called us m_id: the id of this connector (unique, but not enforced) m_cell0, m_cell1: the two cells connected by this connector m_attr_dict: dict of attrs applied to this conx (indexed by type) m_visible: is this cell displayed currently? (boolean) m_frame: last frame in which we were updated add_atts: add attrs to the attrs list conx_disconnect_thyself: Disconnect cells this connector refs """ def __init__(self, field, uid, cell0, cell1, frame=None): # process passed params self.m_field=field self.m_id = uid self.m_cell0 = cell0 self.m_cell1 = cell1 self.m_attr_dict = {} # init other values self.m_path = [] self.m_score = 0 self.m_visible = True # tell the cells themselves that they now own a connector cell0.add_connector(self) cell1.add_connector(self) self.m_frame = frame def update(self, visible=None, frame=None): """Update attr, create it if needed.""" # refresh the cells that the connector points to uid0 = self.m_cell0.m_id uid1 = self.m_cell1.m_id if uid0 in self.m_field.m_cell_dict: if self.m_cell0 != self.m_field.m_cell_dict[uid0]: logger.debug( "conx_update:Conx "+str(self.m_id)+" needed refresh") self.m_cell0 = self.m_field.m_cell_dict[uid0] if uid1 in self.m_field.m_cell_dict: if self.m_cell1 != self.m_field.m_cell_dict[uid1]: logger.debug( "conx_update:Conx "+str(self.m_id)+" needed refresh") self.m_cell1 = self.m_field.m_cell_dict[uid1] if visible is not None: self.m_visible = visible if frame is not None: self.m_frame = frame def update_attr(self, ctype, value, aboveTrigger=False): """Update attr, create it if needed.""" if ctype in self.m_attr_dict: self.m_attr_dict[ctype].update(value,aboveTrigger) else: assert aboveTrigger # Must be above trigger if this is the first time it is being updated self.m_attr_dict[ctype] = Attr(ctype, self.m_id, value) def check_for_attr(self, ctype): if ctype in self.m_attr_dict: return True return False def del_attr(self, ctype): if ctype in self.m_attr_dict: del self.m_attr_dict[ctype] def conx_disconnect_thyself(self): """Disconnect cells this connector refs & this connector ref'd by them. To actually delete it, remove it from the list of connectors in the Field class. """ logger.debug(" ".join([str(x) for x in ["disconnecting",self.m_id,"between", self.m_cell0.m_id,"and",self.m_cell1.m_id]])) # for simplicity's sake, we do the work rather than passing to # the object to do the work # delete the connector from its two cells if self.m_id in self.m_cell0.m_conx_dict: del self.m_cell0.m_conx_dict[self.m_id] if self.m_id in self.m_cell1.m_conx_dict: del self.m_cell1.m_conx_dict[self.m_id] # delete the refs to those two cells self.m_cell0 = None self.m_cell1 = None
gpl-3.0
CubicERP/odoo
addons/purchase/stock.py
1
16217
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import SUPERUSER_ID from openerp.osv import fields, osv from openerp.tools.translate import _ class stock_move(osv.osv): _inherit = 'stock.move' _columns = { 'purchase_line_id': fields.many2one('purchase.order.line', 'Purchase Order Line', ondelete='set null', select=True, readonly=True), } def get_price_unit(self, cr, uid, move, context=None): """ Returns the unit price to store on the quant """ if move.purchase_line_id: return move.price_unit return super(stock_move, self).get_price_unit(cr, uid, move, context=context) def write(self, cr, uid, ids, vals, context=None): if isinstance(ids, (int, long)): ids = [ids] res = super(stock_move, self).write(cr, uid, ids, vals, context=context) from openerp import workflow if vals.get('state') in ['done', 'cancel']: for move in self.browse(cr, uid, ids, context=context): if move.purchase_line_id and move.purchase_line_id.order_id: order_id = move.purchase_line_id.order_id.id # update linked purchase order as superuser as the warehouse # user may not have rights to access purchase.order if self.pool.get('purchase.order').test_moves_done(cr, uid, [order_id], context=context): workflow.trg_validate(SUPERUSER_ID, 'purchase.order', order_id, 'picking_done', cr) if self.pool.get('purchase.order').test_moves_except(cr, uid, [order_id], context=context): workflow.trg_validate(SUPERUSER_ID, 'purchase.order', order_id, 'picking_cancel', cr) return res def copy(self, cr, uid, id, default=None, context=None): default = default or {} context = context or {} if not default.get('split_from'): #we don't want to propagate the link to the purchase order line except in case of move split default['purchase_line_id'] = False return super(stock_move, self).copy(cr, uid, id, default, context) def _create_invoice_line_from_vals(self, cr, uid, move, invoice_line_vals, context=None): if move.purchase_line_id: invoice_line_vals['purchase_line_id'] = move.purchase_line_id.id invoice_line_vals['account_analytic_id'] = move.purchase_line_id.account_analytic_id.id or False invoice_line_id = super(stock_move, self)._create_invoice_line_from_vals(cr, uid, move, invoice_line_vals, context=context) if move.purchase_line_id: purchase_line = move.purchase_line_id self.pool.get('purchase.order.line').write(cr, uid, [purchase_line.id], { 'invoice_lines': [(4, invoice_line_id)] }, context=context) self.pool.get('purchase.order').write(cr, uid, [purchase_line.order_id.id], { 'invoice_ids': [(4, invoice_line_vals['invoice_id'])], }) purchase_line_obj = self.pool.get('purchase.order.line') purchase_obj = self.pool.get('purchase.order') invoice_line_obj = self.pool.get('account.invoice.line') purchase_id = move.purchase_line_id.order_id.id purchase_line_ids = purchase_line_obj.search(cr, uid, [('order_id', '=', purchase_id), ('invoice_lines', '=', False), '|', ('product_id', '=', False), ('product_id.type', '=', 'service')], context=context) if purchase_line_ids: inv_lines = [] for po_line in purchase_line_obj.browse(cr, uid, purchase_line_ids, context=context): acc_id = purchase_obj._choose_account_from_po_line(cr, uid, po_line, context=context) inv_line_data = purchase_obj._prepare_inv_line(cr, uid, acc_id, po_line, context=context) inv_line_id = invoice_line_obj.create(cr, uid, inv_line_data, context=context) inv_lines.append(inv_line_id) po_line.write({'invoice_lines': [(4, inv_line_id)]}) invoice_line_obj.write(cr, uid, inv_lines, {'invoice_id': invoice_line_vals['invoice_id']}, context=context) return invoice_line_id def _get_master_data(self, cr, uid, move, company, context=None): if move.purchase_line_id: purchase_order = move.purchase_line_id.order_id return purchase_order.partner_id, purchase_order.create_uid.id, purchase_order.currency_id.id elif move.picking_id: # In case of an extra move, it is better to use the data from the original moves for purchase_move in move.picking_id.move_lines: if purchase_move.purchase_line_id: purchase_order = purchase_move.purchase_line_id.order_id return purchase_order.partner_id, purchase_order.create_uid.id, purchase_order.currency_id.id partner = move.picking_id and move.picking_id.partner_id or False code = self.get_code_from_locs(cr, uid, move, context=context) if partner and partner.property_product_pricelist_purchase and code == 'incoming': currency = partner.property_product_pricelist_purchase.currency_id.id return partner, uid, currency return super(stock_move, self)._get_master_data(cr, uid, move, company, context=context) def _get_invoice_line_vals(self, cr, uid, move, partner, inv_type, context=None): res = super(stock_move, self)._get_invoice_line_vals(cr, uid, move, partner, inv_type, context=context) if move.purchase_line_id: purchase_line = move.purchase_line_id res['invoice_line_tax_id'] = [(6, 0, [x.id for x in purchase_line.taxes_id])] res['price_unit'] = purchase_line.price_unit res['account_analytic_id'] = purchase_line.account_analytic_id.id return res def _get_moves_taxes(self, cr, uid, moves, context=None): is_extra_move, extra_move_tax = super(stock_move, self)._get_moves_taxes(cr, uid, moves, context=context) for move in moves: if move.purchase_line_id: is_extra_move[move.id] = False extra_move_tax[move.picking_id, move.product_id] = [(6, 0, [x.id for x in move.purchase_line_id.taxes_id])] return (is_extra_move, extra_move_tax) def attribute_price(self, cr, uid, move, context=None): """ Attribute price to move, important in inter-company moves or receipts with only one partner """ # The method attribute_price of the parent class sets the price to the standard product # price if move.price_unit is zero. We don't want this behavior in the case of a purchase # order since we can purchase goods which are free of charge (e.g. 5 units offered if 100 # are purchased). if move.purchase_line_id: return code = self.get_code_from_locs(cr, uid, move, context=context) if not move.purchase_line_id and code == 'incoming' and not move.price_unit: partner = move.picking_id and move.picking_id.partner_id or False price = False # If partner given, search price in its purchase pricelist if partner and partner.property_product_pricelist_purchase: pricelist_obj = self.pool.get("product.pricelist") pricelist = partner.property_product_pricelist_purchase.id price = pricelist_obj.price_get(cr, uid, [pricelist], move.product_id.id, move.product_uom_qty, partner, { 'uom': move.product_uom.id, 'date': move.date, })[pricelist] if price: return self.write(cr, uid, [move.id], {'price_unit': price}, context=context) super(stock_move, self).attribute_price(cr, uid, move, context=context) class stock_picking(osv.osv): _inherit = 'stock.picking' def _get_to_invoice(self, cr, uid, ids, name, args, context=None): res = {} for picking in self.browse(cr, uid, ids, context=context): res[picking.id] = False for move in picking.move_lines: if move.purchase_line_id and move.purchase_line_id.order_id.invoice_method == 'picking': if not move.move_orig_ids: res[picking.id] = True return res def _get_picking_to_recompute(self, cr, uid, ids, context=None): picking_ids = set() for move in self.pool.get('stock.move').browse(cr, uid, ids, context=context): if move.picking_id and move.purchase_line_id: picking_ids.add(move.picking_id.id) return list(picking_ids) _columns = { 'reception_to_invoice': fields.function(_get_to_invoice, type='boolean', string='Invoiceable on incoming shipment?', help='Does the picking contains some moves related to a purchase order invoiceable on the receipt?', store={ 'stock.move': (_get_picking_to_recompute, ['purchase_line_id', 'picking_id'], 10), }), } def _create_invoice_from_picking(self, cr, uid, picking, vals, context=None): purchase_obj = self.pool.get("purchase.order") purchase_line_obj = self.pool.get('purchase.order.line') invoice_line_obj = self.pool.get('account.invoice.line') invoice_id = super(stock_picking, self)._create_invoice_from_picking(cr, uid, picking, vals, context=context) return invoice_id def _get_invoice_vals(self, cr, uid, key, inv_type, journal_id, move, context=None): inv_vals = super(stock_picking, self)._get_invoice_vals(cr, uid, key, inv_type, journal_id, move, context=context) if move.purchase_line_id and move.purchase_line_id.order_id: purchase = move.purchase_line_id.order_id inv_vals.update({ 'fiscal_position': purchase.fiscal_position.id, 'payment_term': purchase.payment_term_id.id, }) return inv_vals class stock_warehouse(osv.osv): _inherit = 'stock.warehouse' _columns = { 'buy_to_resupply': fields.boolean('Purchase to resupply this warehouse', help="When products are bought, they can be delivered to this warehouse"), 'buy_pull_id': fields.many2one('procurement.rule', 'BUY rule'), } _defaults = { 'buy_to_resupply': True, } def _get_buy_pull_rule(self, cr, uid, warehouse, context=None): route_obj = self.pool.get('stock.location.route') data_obj = self.pool.get('ir.model.data') try: buy_route_id = data_obj.get_object_reference(cr, uid, 'purchase', 'route_warehouse0_buy')[1] except: buy_route_id = route_obj.search(cr, uid, [('name', 'like', _('Buy'))], context=context) buy_route_id = buy_route_id and buy_route_id[0] or False if not buy_route_id: raise osv.except_osv(_('Error!'), _('Can\'t find any generic Buy route.')) return { 'name': self._format_routename(cr, uid, warehouse, _(' Buy'), context=context), 'location_id': warehouse.in_type_id.default_location_dest_id.id, 'route_id': buy_route_id, 'action': 'buy', 'picking_type_id': warehouse.in_type_id.id, 'warehouse_id': warehouse.id, } def create_routes(self, cr, uid, ids, warehouse, context=None): pull_obj = self.pool.get('procurement.rule') res = super(stock_warehouse, self).create_routes(cr, uid, ids, warehouse, context=context) if warehouse.buy_to_resupply: buy_pull_vals = self._get_buy_pull_rule(cr, uid, warehouse, context=context) buy_pull_id = pull_obj.create(cr, uid, buy_pull_vals, context=context) res['buy_pull_id'] = buy_pull_id return res def write(self, cr, uid, ids, vals, context=None): pull_obj = self.pool.get('procurement.rule') if isinstance(ids, (int, long)): ids = [ids] if 'buy_to_resupply' in vals: if vals.get("buy_to_resupply"): for warehouse in self.browse(cr, uid, ids, context=context): if not warehouse.buy_pull_id: buy_pull_vals = self._get_buy_pull_rule(cr, uid, warehouse, context=context) buy_pull_id = pull_obj.create(cr, uid, buy_pull_vals, context=context) vals['buy_pull_id'] = buy_pull_id else: for warehouse in self.browse(cr, uid, ids, context=context): if warehouse.buy_pull_id: buy_pull_id = pull_obj.unlink(cr, uid, warehouse.buy_pull_id.id, context=context) return super(stock_warehouse, self).write(cr, uid, ids, vals, context=None) def get_all_routes_for_wh(self, cr, uid, warehouse, context=None): all_routes = super(stock_warehouse, self).get_all_routes_for_wh(cr, uid, warehouse, context=context) if warehouse.buy_to_resupply and warehouse.buy_pull_id and warehouse.buy_pull_id.route_id: all_routes += [warehouse.buy_pull_id.route_id.id] return all_routes def _get_all_products_to_resupply(self, cr, uid, warehouse, context=None): res = super(stock_warehouse, self)._get_all_products_to_resupply(cr, uid, warehouse, context=context) if warehouse.buy_pull_id and warehouse.buy_pull_id.route_id: for product_id in res: for route in self.pool.get('product.product').browse(cr, uid, product_id, context=context).route_ids: if route.id == warehouse.buy_pull_id.route_id.id: res.remove(product_id) break return res def _handle_renaming(self, cr, uid, warehouse, name, code, context=None): res = super(stock_warehouse, self)._handle_renaming(cr, uid, warehouse, name, code, context=context) pull_obj = self.pool.get('procurement.rule') #change the buy pull rule name if warehouse.buy_pull_id: pull_obj.write(cr, uid, warehouse.buy_pull_id.id, {'name': warehouse.buy_pull_id.name.replace(warehouse.name, name, 1)}, context=context) return res def change_route(self, cr, uid, ids, warehouse, new_reception_step=False, new_delivery_step=False, context=None): res = super(stock_warehouse, self).change_route(cr, uid, ids, warehouse, new_reception_step=new_reception_step, new_delivery_step=new_delivery_step, context=context) if warehouse.in_type_id.default_location_dest_id != warehouse.buy_pull_id.location_id: self.pool.get('procurement.rule').write(cr, uid, warehouse.buy_pull_id.id, {'location_id': warehouse.in_type_id.default_location_dest_id.id}, context=context) return res
agpl-3.0
Qining/shaderc
glslc/test/option_dash_c.py
16
1889
# Copyright 2015 The Shaderc Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import expect from glslc_test_framework import inside_glslc_testsuite from placeholder import FileShader def empty_es_310_shader(): return '#version 310 es\n void main() {}\n' @inside_glslc_testsuite('OptionC') class TestSingleDashCSingleFile(expect.ValidObjectFile): """Tests that glslc accepts -c [filename].""" shader = FileShader(empty_es_310_shader(), '.vert') glslc_args = ['-c', shader] @inside_glslc_testsuite('OptionC') class TestSingleFileSingleDashC(expect.ValidObjectFile): """Tests that glslc accepts [filename] -c.""" shader = FileShader(empty_es_310_shader(), '.vert') glslc_args = [shader, '-c'] @inside_glslc_testsuite('OptionC') class TestMultipleFiles(expect.ValidObjectFile): """Tests that glslc accepts -c and multiple source files.""" shader1 = FileShader(empty_es_310_shader(), '.vert') shader2 = FileShader(empty_es_310_shader(), '.frag') glslc_args = ['-c', shader1, shader2] @inside_glslc_testsuite('OptionC') class TestMultipleDashC(expect.ValidObjectFile): """Tests that glslc accepts multiple -c and treated them as one.""" shader1 = FileShader(empty_es_310_shader(), '.vert') shader2 = FileShader(empty_es_310_shader(), '.vert') glslc_args = ['-c', shader1, '-c', '-c', shader2]
apache-2.0
Djabbz/wakatime
wakatime/packages/pygments_py3/pygments/lexers/javascript.py
29
47525
# -*- coding: utf-8 -*- """ pygments.lexers.javascript ~~~~~~~~~~~~~~~~~~~~~~~~~~ Lexers for JavaScript and related languages. :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from pygments.lexer import RegexLexer, include, bygroups, default, using, this from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ Number, Punctuation, Other from pygments.util import get_bool_opt, iteritems import pygments.unistring as uni __all__ = ['JavascriptLexer', 'KalLexer', 'LiveScriptLexer', 'DartLexer', 'TypeScriptLexer', 'LassoLexer', 'ObjectiveJLexer', 'CoffeeScriptLexer', 'MaskLexer'] JS_IDENT_START = ('(?:[$_' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl') + ']|\\\\u[a-fA-F0-9]{4})') JS_IDENT_PART = ('(?:[$' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl', 'Mn', 'Mc', 'Nd', 'Pc') + u'\u200c\u200d]|\\\\u[a-fA-F0-9]{4})') JS_IDENT = JS_IDENT_START + '(?:' + JS_IDENT_PART + ')*' class JavascriptLexer(RegexLexer): """ For JavaScript source code. """ name = 'JavaScript' aliases = ['js', 'javascript'] filenames = ['*.js', ] mimetypes = ['application/javascript', 'application/x-javascript', 'text/x-javascript', 'text/javascript', ] flags = re.DOTALL | re.UNICODE | re.MULTILINE tokens = { 'commentsandwhitespace': [ (r'\s+', Text), (r'<!--', Comment), (r'//.*?\n', Comment.Single), (r'/\*.*?\*/', Comment.Multiline) ], 'slashstartsregex': [ include('commentsandwhitespace'), (r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/' r'([gim]+\b|\B)', String.Regex, '#pop'), (r'(?=/)', Text, ('#pop', 'badregex')), default('#pop') ], 'badregex': [ (r'\n', Text, '#pop') ], 'root': [ (r'\A#! ?/.*?\n', Comment), # shebang lines are recognized by node.js (r'^(?=\s|/|<!--)', Text, 'slashstartsregex'), include('commentsandwhitespace'), (r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|' r'(<<|>>>?|==?|!=?|[-<>+*%&|^/])=?', Operator, 'slashstartsregex'), (r'[{(\[;,]', Punctuation, 'slashstartsregex'), (r'[})\].]', Punctuation), (r'(for|in|while|do|break|return|continue|switch|case|default|if|else|' r'throw|try|catch|finally|new|delete|typeof|instanceof|void|yield|' r'this)\b', Keyword, 'slashstartsregex'), (r'(var|let|with|function)\b', Keyword.Declaration, 'slashstartsregex'), (r'(abstract|boolean|byte|char|class|const|debugger|double|enum|export|' r'extends|final|float|goto|implements|import|int|interface|long|native|' r'package|private|protected|public|short|static|super|synchronized|throws|' r'transient|volatile)\b', Keyword.Reserved), (r'(true|false|null|NaN|Infinity|undefined)\b', Keyword.Constant), (r'(Array|Boolean|Date|Error|Function|Math|netscape|' r'Number|Object|Packages|RegExp|String|sun|decodeURI|' r'decodeURIComponent|encodeURI|encodeURIComponent|' r'Error|eval|isFinite|isNaN|parseFloat|parseInt|document|this|' r'window)\b', Name.Builtin), (JS_IDENT, Name.Other), (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float), (r'0x[0-9a-fA-F]+', Number.Hex), (r'[0-9]+', Number.Integer), (r'"(\\\\|\\"|[^"])*"', String.Double), (r"'(\\\\|\\'|[^'])*'", String.Single), ] } class KalLexer(RegexLexer): """ For `Kal`_ source code. .. _Kal: http://rzimmerman.github.io/kal .. versionadded:: 2.0 """ name = 'Kal' aliases = ['kal'] filenames = ['*.kal'] mimetypes = ['text/kal', 'application/kal'] flags = re.DOTALL tokens = { 'commentsandwhitespace': [ (r'\s+', Text), (r'###[^#].*?###', Comment.Multiline), (r'#(?!##[^#]).*?\n', Comment.Single), ], 'functiondef': [ (r'[$a-zA-Z_][\w$]*\s*', Name.Function, '#pop'), include('commentsandwhitespace'), ], 'classdef': [ (r'\binherits\s+from\b', Keyword), (r'[$a-zA-Z_][\w$]*\s*\n', Name.Class, '#pop'), (r'[$a-zA-Z_][\w$]*\s*', Name.Class), include('commentsandwhitespace'), ], 'listcomprehension': [ (r'\]', Punctuation, '#pop'), (r'\b(property|value)\b', Keyword), include('root'), ], 'waitfor': [ (r'\n', Punctuation, '#pop'), (r'\bfrom\b', Keyword), include('root'), ], 'root': [ include('commentsandwhitespace'), (r'/(?! )(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/' r'([gim]+\b|\B)', String.Regex), (r'\?|:|_(?=\n)|==?|!=|-(?!>)|[<>+*/-]=?', Operator), (r'\b(and|or|isnt|is|not|but|bitwise|mod|\^|xor|exists|' r'doesnt\s+exist)\b', Operator.Word), (r'(?:\([^()]+\))?\s*>', Name.Function), (r'[{(]', Punctuation), (r'\[', Punctuation, 'listcomprehension'), (r'[})\].,]', Punctuation), (r'\b(function|method|task)\b', Keyword.Declaration, 'functiondef'), (r'\bclass\b', Keyword.Declaration, 'classdef'), (r'\b(safe\s+)?wait\s+for\b', Keyword, 'waitfor'), (r'\b(me|this)(\.[$a-zA-Z_][\w.$]*)?\b', Name.Variable.Instance), (r'(?<![.$])(for(\s+(parallel|series))?|in|of|while|until|' r'break|return|continue|' r'when|if|unless|else|otherwise|except\s+when|' r'throw|raise|fail\s+with|try|catch|finally|new|delete|' r'typeof|instanceof|super|run\s+in\s+parallel|' r'inherits\s+from)\b', Keyword), (r'(?<![.$])(true|false|yes|no|on|off|null|nothing|none|' r'NaN|Infinity|undefined)\b', Keyword.Constant), (r'(Array|Boolean|Date|Error|Function|Math|netscape|' r'Number|Object|Packages|RegExp|String|sun|decodeURI|' r'decodeURIComponent|encodeURI|encodeURIComponent|' r'eval|isFinite|isNaN|parseFloat|parseInt|document|window|' r'print)\b', Name.Builtin), (r'[$a-zA-Z_][\w.$]*\s*(:|[+\-*/]?\=)?\b', Name.Variable), (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float), (r'0x[0-9a-fA-F]+', Number.Hex), (r'[0-9]+', Number.Integer), ('"""', String, 'tdqs'), ("'''", String, 'tsqs'), ('"', String, 'dqs'), ("'", String, 'sqs'), ], 'strings': [ (r'[^#\\\'"]+', String), # note that all kal strings are multi-line. # hashmarks, quotes and backslashes must be parsed one at a time ], 'interpoling_string': [ (r'\}', String.Interpol, "#pop"), include('root') ], 'dqs': [ (r'"', String, '#pop'), (r'\\.|\'', String), # double-quoted string don't need ' escapes (r'#\{', String.Interpol, "interpoling_string"), include('strings') ], 'sqs': [ (r"'", String, '#pop'), (r'#|\\.|"', String), # single quoted strings don't need " escapses include('strings') ], 'tdqs': [ (r'"""', String, '#pop'), (r'\\.|\'|"', String), # no need to escape quotes in triple-string (r'#\{', String.Interpol, "interpoling_string"), include('strings'), ], 'tsqs': [ (r"'''", String, '#pop'), (r'#|\\.|\'|"', String), # no need to escape quotes in triple-strings include('strings') ], } class LiveScriptLexer(RegexLexer): """ For `LiveScript`_ source code. .. _LiveScript: http://gkz.github.com/LiveScript/ New in Pygments 1.6. """ name = 'LiveScript' aliases = ['live-script', 'livescript'] filenames = ['*.ls'] mimetypes = ['text/livescript'] flags = re.DOTALL tokens = { 'commentsandwhitespace': [ (r'\s+', Text), (r'/\*.*?\*/', Comment.Multiline), (r'#.*?\n', Comment.Single), ], 'multilineregex': [ include('commentsandwhitespace'), (r'//([gim]+\b|\B)', String.Regex, '#pop'), (r'/', String.Regex), (r'[^/#]+', String.Regex) ], 'slashstartsregex': [ include('commentsandwhitespace'), (r'//', String.Regex, ('#pop', 'multilineregex')), (r'/(?! )(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/' r'([gim]+\b|\B)', String.Regex, '#pop'), default('#pop'), ], 'root': [ # this next expr leads to infinite loops root -> slashstartsregex # (r'^(?=\s|/|<!--)', Text, 'slashstartsregex'), include('commentsandwhitespace'), (r'(?:\([^()]+\))?[ ]*[~-]{1,2}>|' r'(?:\(?[^()\n]+\)?)?[ ]*<[~-]{1,2}', Name.Function), (r'\+\+|&&|(?<![.$])\b(?:and|x?or|is|isnt|not)\b|\?|:|=|' r'\|\||\\(?=\n)|(<<|>>>?|==?|!=?|' r'~(?!\~?>)|-(?!\-?>)|<(?!\[)|(?<!\])>|' r'[+*`%&|^/])=?', Operator, 'slashstartsregex'), (r'[{(\[;,]', Punctuation, 'slashstartsregex'), (r'[})\].]', Punctuation), (r'(?<![.$])(for|own|in|of|while|until|loop|break|' r'return|continue|switch|when|then|if|unless|else|' r'throw|try|catch|finally|new|delete|typeof|instanceof|super|' r'extends|this|class|by|const|var|to|til)\b', Keyword, 'slashstartsregex'), (r'(?<![.$])(true|false|yes|no|on|off|' r'null|NaN|Infinity|undefined|void)\b', Keyword.Constant), (r'(Array|Boolean|Date|Error|Function|Math|netscape|' r'Number|Object|Packages|RegExp|String|sun|decodeURI|' r'decodeURIComponent|encodeURI|encodeURIComponent|' r'eval|isFinite|isNaN|parseFloat|parseInt|document|window)\b', Name.Builtin), (r'[$a-zA-Z_][\w.\-:$]*\s*[:=]\s', Name.Variable, 'slashstartsregex'), (r'@[$a-zA-Z_][\w.\-:$]*\s*[:=]\s', Name.Variable.Instance, 'slashstartsregex'), (r'@', Name.Other, 'slashstartsregex'), (r'@?[$a-zA-Z_][\w-]*', Name.Other, 'slashstartsregex'), (r'[0-9]+\.[0-9]+([eE][0-9]+)?[fd]?(?:[a-zA-Z_]+)?', Number.Float), (r'[0-9]+(~[0-9a-z]+)?(?:[a-zA-Z_]+)?', Number.Integer), ('"""', String, 'tdqs'), ("'''", String, 'tsqs'), ('"', String, 'dqs'), ("'", String, 'sqs'), (r'\\\S+', String), (r'<\[.*?\]>', String), ], 'strings': [ (r'[^#\\\'"]+', String), # note that all coffee script strings are multi-line. # hashmarks, quotes and backslashes must be parsed one at a time ], 'interpoling_string': [ (r'\}', String.Interpol, "#pop"), include('root') ], 'dqs': [ (r'"', String, '#pop'), (r'\\.|\'', String), # double-quoted string don't need ' escapes (r'#\{', String.Interpol, "interpoling_string"), (r'#', String), include('strings') ], 'sqs': [ (r"'", String, '#pop'), (r'#|\\.|"', String), # single quoted strings don't need " escapses include('strings') ], 'tdqs': [ (r'"""', String, '#pop'), (r'\\.|\'|"', String), # no need to escape quotes in triple-string (r'#\{', String.Interpol, "interpoling_string"), (r'#', String), include('strings'), ], 'tsqs': [ (r"'''", String, '#pop'), (r'#|\\.|\'|"', String), # no need to escape quotes in triple-strings include('strings') ], } class DartLexer(RegexLexer): """ For `Dart <http://dartlang.org/>`_ source code. .. versionadded:: 1.5 """ name = 'Dart' aliases = ['dart'] filenames = ['*.dart'] mimetypes = ['text/x-dart'] flags = re.MULTILINE | re.DOTALL tokens = { 'root': [ include('string_literal'), (r'#!(.*?)$', Comment.Preproc), (r'\b(import|export)\b', Keyword, 'import_decl'), (r'\b(library|source|part of|part)\b', Keyword), (r'[^\S\n]+', Text), (r'//.*?\n', Comment.Single), (r'/\*.*?\*/', Comment.Multiline), (r'\b(class)\b(\s+)', bygroups(Keyword.Declaration, Text), 'class'), (r'\b(assert|break|case|catch|continue|default|do|else|finally|for|' r'if|in|is|new|return|super|switch|this|throw|try|while)\b', Keyword), (r'\b(abstract|const|extends|factory|final|get|implements|' r'native|operator|set|static|typedef|var)\b', Keyword.Declaration), (r'\b(bool|double|Dynamic|int|num|Object|String|void)\b', Keyword.Type), (r'\b(false|null|true)\b', Keyword.Constant), (r'[~!%^&*+=|?:<>/-]|as\b', Operator), (r'[a-zA-Z_$]\w*:', Name.Label), (r'[a-zA-Z_$]\w*', Name), (r'[(){}\[\],.;]', Punctuation), (r'0[xX][0-9a-fA-F]+', Number.Hex), # DIGIT+ (‘.’ DIGIT*)? EXPONENT? (r'\d+(\.\d*)?([eE][+-]?\d+)?', Number), (r'\.\d+([eE][+-]?\d+)?', Number), # ‘.’ DIGIT+ EXPONENT? (r'\n', Text) # pseudo-keyword negate intentionally left out ], 'class': [ (r'[a-zA-Z_$]\w*', Name.Class, '#pop') ], 'import_decl': [ include('string_literal'), (r'\s+', Text), (r'\b(as|show|hide)\b', Keyword), (r'[a-zA-Z_$]\w*', Name), (r'\,', Punctuation), (r'\;', Punctuation, '#pop') ], 'string_literal': [ # Raw strings. (r'r"""([\w\W]*?)"""', String.Double), (r"r'''([\w\W]*?)'''", String.Single), (r'r"(.*?)"', String.Double), (r"r'(.*?)'", String.Single), # Normal Strings. (r'"""', String.Double, 'string_double_multiline'), (r"'''", String.Single, 'string_single_multiline'), (r'"', String.Double, 'string_double'), (r"'", String.Single, 'string_single') ], 'string_common': [ (r"\\(x[0-9A-Fa-f]{2}|u[0-9A-Fa-f]{4}|u\{[0-9A-Fa-f]*\}|[a-z'\"$\\])", String.Escape), (r'(\$)([a-zA-Z_]\w*)', bygroups(String.Interpol, Name)), (r'(\$\{)(.*?)(\})', bygroups(String.Interpol, using(this), String.Interpol)) ], 'string_double': [ (r'"', String.Double, '#pop'), (r'[^"$\\\n]+', String.Double), include('string_common'), (r'\$+', String.Double) ], 'string_double_multiline': [ (r'"""', String.Double, '#pop'), (r'[^"$\\]+', String.Double), include('string_common'), (r'(\$|\")+', String.Double) ], 'string_single': [ (r"'", String.Single, '#pop'), (r"[^'$\\\n]+", String.Single), include('string_common'), (r'\$+', String.Single) ], 'string_single_multiline': [ (r"'''", String.Single, '#pop'), (r'[^\'$\\]+', String.Single), include('string_common'), (r'(\$|\')+', String.Single) ] } class TypeScriptLexer(RegexLexer): """ For `TypeScript <http://typescriptlang.org/>`_ source code. .. versionadded:: 1.6 """ name = 'TypeScript' aliases = ['ts'] filenames = ['*.ts'] mimetypes = ['text/x-typescript'] flags = re.DOTALL | re.MULTILINE tokens = { 'commentsandwhitespace': [ (r'\s+', Text), (r'<!--', Comment), (r'//.*?\n', Comment.Single), (r'/\*.*?\*/', Comment.Multiline) ], 'slashstartsregex': [ include('commentsandwhitespace'), (r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/' r'([gim]+\b|\B)', String.Regex, '#pop'), (r'(?=/)', Text, ('#pop', 'badregex')), default('#pop') ], 'badregex': [ (r'\n', Text, '#pop') ], 'root': [ (r'^(?=\s|/|<!--)', Text, 'slashstartsregex'), include('commentsandwhitespace'), (r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|' r'(<<|>>>?|==?|!=?|[-<>+*%&|^/])=?', Operator, 'slashstartsregex'), (r'[{(\[;,]', Punctuation, 'slashstartsregex'), (r'[})\].]', Punctuation), (r'(for|in|while|do|break|return|continue|switch|case|default|if|else|' r'throw|try|catch|finally|new|delete|typeof|instanceof|void|' r'this)\b', Keyword, 'slashstartsregex'), (r'(var|let|with|function)\b', Keyword.Declaration, 'slashstartsregex'), (r'(abstract|boolean|byte|char|class|const|debugger|double|enum|export|' r'extends|final|float|goto|implements|import|int|interface|long|native|' r'package|private|protected|public|short|static|super|synchronized|throws|' r'transient|volatile)\b', Keyword.Reserved), (r'(true|false|null|NaN|Infinity|undefined)\b', Keyword.Constant), (r'(Array|Boolean|Date|Error|Function|Math|netscape|' r'Number|Object|Packages|RegExp|String|sun|decodeURI|' r'decodeURIComponent|encodeURI|encodeURIComponent|' r'Error|eval|isFinite|isNaN|parseFloat|parseInt|document|this|' r'window)\b', Name.Builtin), # Match stuff like: module name {...} (r'\b(module)(\s*)(\s*[\w?.$][\w?.$]*)(\s*)', bygroups(Keyword.Reserved, Text, Name.Other, Text), 'slashstartsregex'), # Match variable type keywords (r'\b(string|bool|number)\b', Keyword.Type), # Match stuff like: constructor (r'\b(constructor|declare|interface|as|AS)\b', Keyword.Reserved), # Match stuff like: super(argument, list) (r'(super)(\s*)(\([\w,?.$\s]+\s*\))', bygroups(Keyword.Reserved, Text), 'slashstartsregex'), # Match stuff like: function() {...} (r'([a-zA-Z_?.$][\w?.$]*)\(\) \{', Name.Other, 'slashstartsregex'), # Match stuff like: (function: return type) (r'([\w?.$][\w?.$]*)(\s*:\s*)([\w?.$][\w?.$]*)', bygroups(Name.Other, Text, Keyword.Type)), (r'[$a-zA-Z_]\w*', Name.Other), (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float), (r'0x[0-9a-fA-F]+', Number.Hex), (r'[0-9]+', Number.Integer), (r'"(\\\\|\\"|[^"])*"', String.Double), (r"'(\\\\|\\'|[^'])*'", String.Single), ] } class LassoLexer(RegexLexer): """ For `Lasso <http://www.lassosoft.com/>`_ source code, covering both Lasso 9 syntax and LassoScript for Lasso 8.6 and earlier. For Lasso embedded in HTML, use the `LassoHtmlLexer`. Additional options accepted: `builtinshighlighting` If given and ``True``, highlight builtin types, traits, methods, and members (default: ``True``). `requiredelimiters` If given and ``True``, only highlight code between delimiters as Lasso (default: ``False``). .. versionadded:: 1.6 """ name = 'Lasso' aliases = ['lasso', 'lassoscript'] filenames = ['*.lasso', '*.lasso[89]'] alias_filenames = ['*.incl', '*.inc', '*.las'] mimetypes = ['text/x-lasso'] flags = re.IGNORECASE | re.DOTALL | re.MULTILINE tokens = { 'root': [ (r'^#!.+lasso9\b', Comment.Preproc, 'lasso'), (r'\[no_square_brackets\]', Comment.Preproc, 'nosquarebrackets'), (r'\[noprocess\]', Comment.Preproc, ('delimiters', 'noprocess')), (r'\[', Comment.Preproc, ('delimiters', 'squarebrackets')), (r'<\?(LassoScript|lasso|=)', Comment.Preproc, ('delimiters', 'anglebrackets')), (r'<(!--.*?-->)?', Other, 'delimiters'), (r'\s+', Other), default(('delimiters', 'lassofile')), ], 'delimiters': [ (r'\[no_square_brackets\]', Comment.Preproc, 'nosquarebrackets'), (r'\[noprocess\]', Comment.Preproc, 'noprocess'), (r'\[', Comment.Preproc, 'squarebrackets'), (r'<\?(LassoScript|lasso|=)', Comment.Preproc, 'anglebrackets'), (r'<(!--.*?-->)?', Other), (r'[^[<]+', Other), ], 'nosquarebrackets': [ (r'<\?(LassoScript|lasso|=)', Comment.Preproc, 'anglebrackets'), (r'<', Other), (r'[^<]+', Other), ], 'noprocess': [ (r'\[/noprocess\]', Comment.Preproc, '#pop'), (r'\[', Other), (r'[^[]', Other), ], 'squarebrackets': [ (r'\]', Comment.Preproc, '#pop'), include('lasso'), ], 'anglebrackets': [ (r'\?>', Comment.Preproc, '#pop'), include('lasso'), ], 'lassofile': [ (r'\]|\?>', Comment.Preproc, '#pop'), include('lasso'), ], 'whitespacecomments': [ (r'\s+', Text), (r'//.*?\n', Comment.Single), (r'/\*\*!.*?\*/', String.Doc), (r'/\*.*?\*/', Comment.Multiline), ], 'lasso': [ # whitespace/comments include('whitespacecomments'), # literals (r'\d*\.\d+(e[+-]?\d+)?', Number.Float), (r'0x[\da-f]+', Number.Hex), (r'\d+', Number.Integer), (r'([+-]?)(infinity|NaN)\b', bygroups(Operator, Number)), (r"'", String.Single, 'singlestring'), (r'"', String.Double, 'doublestring'), (r'`[^`]*`', String.Backtick), # names (r'\$[a-z_][\w.]*', Name.Variable), (r'#([a-z_][\w.]*|\d+)', Name.Variable.Instance), (r"(\.)('[a-z_][\w.]*')", bygroups(Name.Builtin.Pseudo, Name.Variable.Class)), (r"(self)(\s*->\s*)('[a-z_][\w.]*')", bygroups(Name.Builtin.Pseudo, Operator, Name.Variable.Class)), (r'(\.\.?)([a-z_][\w.]*(=(?!=))?)', bygroups(Name.Builtin.Pseudo, Name.Other.Member)), (r'(->\\?\s*|&\s*)([a-z_][\w.]*(=(?!=))?)', bygroups(Operator, Name.Other.Member)), (r'(self|inherited)\b', Name.Builtin.Pseudo), (r'-[a-z_][\w.]*', Name.Attribute), (r'::\s*[a-z_][\w.]*', Name.Label), (r'(error_(code|msg)_\w+|Error_AddError|Error_ColumnRestriction|' r'Error_DatabaseConnectionUnavailable|Error_DatabaseTimeout|' r'Error_DeleteError|Error_FieldRestriction|Error_FileNotFound|' r'Error_InvalidDatabase|Error_InvalidPassword|' r'Error_InvalidUsername|Error_ModuleNotFound|' r'Error_NoError|Error_NoPermission|Error_OutOfMemory|' r'Error_ReqColumnMissing|Error_ReqFieldMissing|' r'Error_RequiredColumnMissing|Error_RequiredFieldMissing|' r'Error_UpdateError)\b', Name.Exception), # definitions (r'(define)(\s+)([a-z_][\w.]*)(\s*=>\s*)(type|trait|thread)\b', bygroups(Keyword.Declaration, Text, Name.Class, Operator, Keyword)), (r'(define)(\s+)([a-z_][\w.]*)(\s*->\s*)([a-z_][\w.]*=?|[-+*/%])', bygroups(Keyword.Declaration, Text, Name.Class, Operator, Name.Function), 'signature'), (r'(define)(\s+)([a-z_][\w.]*)', bygroups(Keyword.Declaration, Text, Name.Function), 'signature'), (r'(public|protected|private|provide)(\s+)(([a-z_][\w.]*=?|[-+*/%])' r'(?=\s*\())', bygroups(Keyword, Text, Name.Function), 'signature'), (r'(public|protected|private|provide)(\s+)([a-z_][\w.]*)', bygroups(Keyword, Text, Name.Function)), # keywords (r'(true|false|none|minimal|full|all|void)\b', Keyword.Constant), (r'(local|var|variable|global|data(?=\s))\b', Keyword.Declaration), (r'(array|date|decimal|duration|integer|map|pair|string|tag|xml|' r'null|bytes|list|queue|set|stack|staticarray|tie)\b', Keyword.Type), (r'([a-z_][\w.]*)(\s+)(in)\b', bygroups(Name, Text, Keyword)), (r'(let|into)(\s+)([a-z_][\w.]*)', bygroups(Keyword, Text, Name)), (r'require\b', Keyword, 'requiresection'), (r'(/?)(Namespace_Using)\b', bygroups(Punctuation, Keyword.Namespace)), (r'(/?)(Cache|Database_Names|Database_SchemaNames|' r'Database_TableNames|Define_Tag|Define_Type|Email_Batch|' r'Encode_Set|HTML_Comment|Handle|Handle_Error|Header|If|Inline|' r'Iterate|LJAX_Target|Link|Link_CurrentAction|Link_CurrentGroup|' r'Link_CurrentRecord|Link_Detail|Link_FirstGroup|' r'Link_FirstRecord|Link_LastGroup|Link_LastRecord|Link_NextGroup|' r'Link_NextRecord|Link_PrevGroup|Link_PrevRecord|Log|Loop|' r'NoProcess|Output_None|Portal|Private|Protect|Records|Referer|' r'Referrer|Repeating|ResultSet|Rows|Search_Args|Search_Arguments|' r'Select|Sort_Args|Sort_Arguments|Thread_Atomic|Value_List|While|' r'Abort|Case|Else|If_Empty|If_False|If_Null|If_True|Loop_Abort|' r'Loop_Continue|Loop_Count|Params|Params_Up|Return|Return_Value|' r'Run_Children|SOAP_DefineTag|SOAP_LastRequest|SOAP_LastResponse|' r'Tag_Name|ascending|average|by|define|descending|do|equals|' r'frozen|group|handle_failure|import|in|into|join|let|match|max|' r'min|on|order|parent|protected|provide|public|require|returnhome|' r'skip|split_thread|sum|take|thread|to|trait|type|where|with|' r'yield|yieldhome)\b', bygroups(Punctuation, Keyword)), # other (r',', Punctuation, 'commamember'), (r'(and|or|not)\b', Operator.Word), (r'([a-z_][\w.]*)(\s*::\s*[a-z_][\w.]*)?(\s*=(?!=))', bygroups(Name, Name.Label, Operator)), (r'(/?)([\w.]+)', bygroups(Punctuation, Name.Other)), (r'(=)(n?bw|n?ew|n?cn|lte?|gte?|n?eq|n?rx|ft)\b', bygroups(Operator, Operator.Word)), (r':=|[-+*/%=<>&|!?\\]+', Operator), (r'[{}():;,@^]', Punctuation), ], 'singlestring': [ (r"'", String.Single, '#pop'), (r"[^'\\]+", String.Single), include('escape'), (r"\\", String.Single), ], 'doublestring': [ (r'"', String.Double, '#pop'), (r'[^"\\]+', String.Double), include('escape'), (r'\\', String.Double), ], 'escape': [ (r'\\(U[\da-f]{8}|u[\da-f]{4}|x[\da-f]{1,2}|[0-7]{1,3}|:[^:]+:|' r'[abefnrtv?"\'\\]|$)', String.Escape), ], 'signature': [ (r'=>', Operator, '#pop'), (r'\)', Punctuation, '#pop'), (r'[(,]', Punctuation, 'parameter'), include('lasso'), ], 'parameter': [ (r'\)', Punctuation, '#pop'), (r'-?[a-z_][\w.]*', Name.Attribute, '#pop'), (r'\.\.\.', Name.Builtin.Pseudo), include('lasso'), ], 'requiresection': [ (r'(([a-z_][\w.]*=?|[-+*/%])(?=\s*\())', Name, 'requiresignature'), (r'(([a-z_][\w.]*=?|[-+*/%])(?=(\s*::\s*[\w.]+)?\s*,))', Name), (r'[a-z_][\w.]*=?|[-+*/%]', Name, '#pop'), (r'::\s*[a-z_][\w.]*', Name.Label), (r',', Punctuation), include('whitespacecomments'), ], 'requiresignature': [ (r'(\)(?=(\s*::\s*[\w.]+)?\s*,))', Punctuation, '#pop'), (r'\)', Punctuation, '#pop:2'), (r'-?[a-z_][\w.]*', Name.Attribute), (r'::\s*[a-z_][\w.]*', Name.Label), (r'\.\.\.', Name.Builtin.Pseudo), (r'[(,]', Punctuation), include('whitespacecomments'), ], 'commamember': [ (r'(([a-z_][\w.]*=?|[-+*/%])' r'(?=\s*(\(([^()]*\([^()]*\))*[^)]*\)\s*)?(::[\w.\s]+)?=>))', Name.Function, 'signature'), include('whitespacecomments'), default('#pop'), ], } def __init__(self, **options): self.builtinshighlighting = get_bool_opt( options, 'builtinshighlighting', True) self.requiredelimiters = get_bool_opt( options, 'requiredelimiters', False) self._builtins = set() self._members = set() if self.builtinshighlighting: from pygments.lexers._lasso_builtins import BUILTINS, MEMBERS for key, value in iteritems(BUILTINS): self._builtins.update(value) for key, value in iteritems(MEMBERS): self._members.update(value) RegexLexer.__init__(self, **options) def get_tokens_unprocessed(self, text): stack = ['root'] if self.requiredelimiters: stack.append('delimiters') for index, token, value in \ RegexLexer.get_tokens_unprocessed(self, text, stack): if (token is Name.Other and value.lower() in self._builtins or token is Name.Other.Member and value.lower().rstrip('=') in self._members): yield index, Name.Builtin, value continue yield index, token, value def analyse_text(text): rv = 0.0 if 'bin/lasso9' in text: rv += 0.8 if re.search(r'<\?lasso', text, re.I): rv += 0.4 if re.search(r'local\(', text, re.I): rv += 0.4 return rv class ObjectiveJLexer(RegexLexer): """ For Objective-J source code with preprocessor directives. .. versionadded:: 1.3 """ name = 'Objective-J' aliases = ['objective-j', 'objectivej', 'obj-j', 'objj'] filenames = ['*.j'] mimetypes = ['text/x-objective-j'] #: optional Comment or Whitespace _ws = r'(?:\s|//.*?\n|/[*].*?[*]/)*' flags = re.DOTALL | re.MULTILINE tokens = { 'root': [ include('whitespace'), # function definition (r'^(' + _ws + r'[+-]' + _ws + r')([(a-zA-Z_].*?[^(])(' + _ws + r'\{)', bygroups(using(this), using(this, state='function_signature'), using(this))), # class definition (r'(@interface|@implementation)(\s+)', bygroups(Keyword, Text), 'classname'), (r'(@class|@protocol)(\s*)', bygroups(Keyword, Text), 'forward_classname'), (r'(\s*)(@end)(\s*)', bygroups(Text, Keyword, Text)), include('statements'), ('[{()}]', Punctuation), (';', Punctuation), ], 'whitespace': [ (r'(@import)(\s+)("(?:\\\\|\\"|[^"])*")', bygroups(Comment.Preproc, Text, String.Double)), (r'(@import)(\s+)(<(?:\\\\|\\>|[^>])*>)', bygroups(Comment.Preproc, Text, String.Double)), (r'(#(?:include|import))(\s+)("(?:\\\\|\\"|[^"])*")', bygroups(Comment.Preproc, Text, String.Double)), (r'(#(?:include|import))(\s+)(<(?:\\\\|\\>|[^>])*>)', bygroups(Comment.Preproc, Text, String.Double)), (r'#if\s+0', Comment.Preproc, 'if0'), (r'#', Comment.Preproc, 'macro'), (r'\n', Text), (r'\s+', Text), (r'\\\n', Text), # line continuation (r'//(\n|(.|\n)*?[^\\]\n)', Comment.Single), (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline), (r'<!--', Comment), ], 'slashstartsregex': [ include('whitespace'), (r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/' r'([gim]+\b|\B)', String.Regex, '#pop'), (r'(?=/)', Text, ('#pop', 'badregex')), default('#pop'), ], 'badregex': [ (r'\n', Text, '#pop'), ], 'statements': [ (r'(L|@)?"', String, 'string'), (r"(L|@)?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char), (r'"(\\\\|\\"|[^"])*"', String.Double), (r"'(\\\\|\\'|[^'])*'", String.Single), (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float), (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), (r'0x[0-9a-fA-F]+[Ll]?', Number.Hex), (r'0[0-7]+[Ll]?', Number.Oct), (r'\d+[Ll]?', Number.Integer), (r'^(?=\s|/|<!--)', Text, 'slashstartsregex'), (r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|' r'(<<|>>>?|==?|!=?|[-<>+*%&|^/])=?', Operator, 'slashstartsregex'), (r'[{(\[;,]', Punctuation, 'slashstartsregex'), (r'[})\].]', Punctuation), (r'(for|in|while|do|break|return|continue|switch|case|default|if|' r'else|throw|try|catch|finally|new|delete|typeof|instanceof|void|' r'prototype|__proto__)\b', Keyword, 'slashstartsregex'), (r'(var|with|function)\b', Keyword.Declaration, 'slashstartsregex'), (r'(@selector|@private|@protected|@public|@encode|' r'@synchronized|@try|@throw|@catch|@finally|@end|@property|' r'@synthesize|@dynamic|@for|@accessors|new)\b', Keyword), (r'(int|long|float|short|double|char|unsigned|signed|void|' r'id|BOOL|bool|boolean|IBOutlet|IBAction|SEL|@outlet|@action)\b', Keyword.Type), (r'(self|super)\b', Name.Builtin), (r'(TRUE|YES|FALSE|NO|Nil|nil|NULL)\b', Keyword.Constant), (r'(true|false|null|NaN|Infinity|undefined)\b', Keyword.Constant), (r'(ABS|ASIN|ACOS|ATAN|ATAN2|SIN|COS|TAN|EXP|POW|CEIL|FLOOR|ROUND|' r'MIN|MAX|RAND|SQRT|E|LN2|LN10|LOG2E|LOG10E|PI|PI2|PI_2|SQRT1_2|' r'SQRT2)\b', Keyword.Constant), (r'(Array|Boolean|Date|Error|Function|Math|netscape|' r'Number|Object|Packages|RegExp|String|sun|decodeURI|' r'decodeURIComponent|encodeURI|encodeURIComponent|' r'Error|eval|isFinite|isNaN|parseFloat|parseInt|document|this|' r'window)\b', Name.Builtin), (r'([$a-zA-Z_]\w*)(' + _ws + r')(?=\()', bygroups(Name.Function, using(this))), (r'[$a-zA-Z_]\w*', Name), ], 'classname': [ # interface definition that inherits (r'([a-zA-Z_]\w*)(' + _ws + r':' + _ws + r')([a-zA-Z_]\w*)?', bygroups(Name.Class, using(this), Name.Class), '#pop'), # interface definition for a category (r'([a-zA-Z_]\w*)(' + _ws + r'\()([a-zA-Z_]\w*)(\))', bygroups(Name.Class, using(this), Name.Label, Text), '#pop'), # simple interface / implementation (r'([a-zA-Z_]\w*)', Name.Class, '#pop'), ], 'forward_classname': [ (r'([a-zA-Z_]\w*)(\s*,\s*)', bygroups(Name.Class, Text), '#push'), (r'([a-zA-Z_]\w*)(\s*;?)', bygroups(Name.Class, Text), '#pop'), ], 'function_signature': [ include('whitespace'), # start of a selector w/ parameters (r'(\(' + _ws + r')' # open paren r'([a-zA-Z_]\w+)' # return type r'(' + _ws + r'\)' + _ws + r')' # close paren r'([$a-zA-Z_]\w+' + _ws + r':)', # function name bygroups(using(this), Keyword.Type, using(this), Name.Function), 'function_parameters'), # no-param function (r'(\(' + _ws + r')' # open paren r'([a-zA-Z_]\w+)' # return type r'(' + _ws + r'\)' + _ws + r')' # close paren r'([$a-zA-Z_]\w+)', # function name bygroups(using(this), Keyword.Type, using(this), Name.Function), "#pop"), # no return type given, start of a selector w/ parameters (r'([$a-zA-Z_]\w+' + _ws + r':)', # function name bygroups(Name.Function), 'function_parameters'), # no return type given, no-param function (r'([$a-zA-Z_]\w+)', # function name bygroups(Name.Function), "#pop"), default('#pop'), ], 'function_parameters': [ include('whitespace'), # parameters (r'(\(' + _ws + ')' # open paren r'([^)]+)' # type r'(' + _ws + r'\)' + _ws + r')' # close paren r'([$a-zA-Z_]\w+)', # param name bygroups(using(this), Keyword.Type, using(this), Text)), # one piece of a selector name (r'([$a-zA-Z_]\w+' + _ws + r':)', # function name Name.Function), # smallest possible selector piece (r'(:)', Name.Function), # var args (r'(,' + _ws + r'\.\.\.)', using(this)), # param name (r'([$a-zA-Z_]\w+)', Text), ], 'expression': [ (r'([$a-zA-Z_]\w*)(\()', bygroups(Name.Function, Punctuation)), (r'(\))', Punctuation, "#pop"), ], 'string': [ (r'"', String, '#pop'), (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), (r'[^\\"\n]+', String), # all other characters (r'\\\n', String), # line continuation (r'\\', String), # stray backslash ], 'macro': [ (r'[^/\n]+', Comment.Preproc), (r'/[*](.|\n)*?[*]/', Comment.Multiline), (r'//.*?\n', Comment.Single, '#pop'), (r'/', Comment.Preproc), (r'(?<=\\)\n', Comment.Preproc), (r'\n', Comment.Preproc, '#pop'), ], 'if0': [ (r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'), (r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'), (r'.*?\n', Comment), ] } def analyse_text(text): if re.search('^\s*@import\s+[<"]', text, re.MULTILINE): # special directive found in most Objective-J files return True return False class CoffeeScriptLexer(RegexLexer): """ For `CoffeeScript`_ source code. .. _CoffeeScript: http://coffeescript.org .. versionadded:: 1.3 """ name = 'CoffeeScript' aliases = ['coffee-script', 'coffeescript', 'coffee'] filenames = ['*.coffee'] mimetypes = ['text/coffeescript'] flags = re.DOTALL tokens = { 'commentsandwhitespace': [ (r'\s+', Text), (r'###[^#].*?###', Comment.Multiline), (r'#(?!##[^#]).*?\n', Comment.Single), ], 'multilineregex': [ (r'[^/#]+', String.Regex), (r'///([gim]+\b|\B)', String.Regex, '#pop'), (r'#\{', String.Interpol, 'interpoling_string'), (r'[/#]', String.Regex), ], 'slashstartsregex': [ include('commentsandwhitespace'), (r'///', String.Regex, ('#pop', 'multilineregex')), (r'/(?! )(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/' r'([gim]+\b|\B)', String.Regex, '#pop'), default('#pop'), ], 'root': [ # this next expr leads to infinite loops root -> slashstartsregex # (r'^(?=\s|/|<!--)', Text, 'slashstartsregex'), include('commentsandwhitespace'), (r'\+\+|~|&&|\band\b|\bor\b|\bis\b|\bisnt\b|\bnot\b|\?|:|' r'\|\||\\(?=\n)|' r'(<<|>>>?|==?(?!>)|!=?|=(?!>)|-(?!>)|[<>+*`%&|^/])=?', Operator, 'slashstartsregex'), (r'(?:\([^()]*\))?\s*[=-]>', Name.Function), (r'[{(\[;,]', Punctuation, 'slashstartsregex'), (r'[})\].]', Punctuation), (r'(?<![.$])(for|own|in|of|while|until|' r'loop|break|return|continue|' r'switch|when|then|if|unless|else|' r'throw|try|catch|finally|new|delete|typeof|instanceof|super|' r'extends|this|class|by)\b', Keyword, 'slashstartsregex'), (r'(?<![.$])(true|false|yes|no|on|off|null|' r'NaN|Infinity|undefined)\b', Keyword.Constant), (r'(Array|Boolean|Date|Error|Function|Math|netscape|' r'Number|Object|Packages|RegExp|String|sun|decodeURI|' r'decodeURIComponent|encodeURI|encodeURIComponent|' r'eval|isFinite|isNaN|parseFloat|parseInt|document|window)\b', Name.Builtin), (r'[$a-zA-Z_][\w.:$]*\s*[:=]\s', Name.Variable, 'slashstartsregex'), (r'@[$a-zA-Z_][\w.:$]*\s*[:=]\s', Name.Variable.Instance, 'slashstartsregex'), (r'@', Name.Other, 'slashstartsregex'), (r'@?[$a-zA-Z_][\w$]*', Name.Other, 'slashstartsregex'), (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float), (r'0x[0-9a-fA-F]+', Number.Hex), (r'[0-9]+', Number.Integer), ('"""', String, 'tdqs'), ("'''", String, 'tsqs'), ('"', String, 'dqs'), ("'", String, 'sqs'), ], 'strings': [ (r'[^#\\\'"]+', String), # note that all coffee script strings are multi-line. # hashmarks, quotes and backslashes must be parsed one at a time ], 'interpoling_string': [ (r'\}', String.Interpol, "#pop"), include('root') ], 'dqs': [ (r'"', String, '#pop'), (r'\\.|\'', String), # double-quoted string don't need ' escapes (r'#\{', String.Interpol, "interpoling_string"), (r'#', String), include('strings') ], 'sqs': [ (r"'", String, '#pop'), (r'#|\\.|"', String), # single quoted strings don't need " escapses include('strings') ], 'tdqs': [ (r'"""', String, '#pop'), (r'\\.|\'|"', String), # no need to escape quotes in triple-string (r'#\{', String.Interpol, "interpoling_string"), (r'#', String), include('strings'), ], 'tsqs': [ (r"'''", String, '#pop'), (r'#|\\.|\'|"', String), # no need to escape quotes in triple-strings include('strings') ], } class MaskLexer(RegexLexer): """ For `Mask <http://github.com/atmajs/MaskJS>`__ markup. .. versionadded:: 2.0 """ name = 'Mask' aliases = ['mask'] filenames = ['*.mask'] mimetypes = ['text/x-mask'] flags = re.MULTILINE | re.IGNORECASE | re.DOTALL tokens = { 'root': [ (r'\s+', Text), (r'//.*?\n', Comment.Single), (r'/\*.*?\*/', Comment.Multiline), (r'[{};>]', Punctuation), (r"'''", String, 'string-trpl-single'), (r'"""', String, 'string-trpl-double'), (r"'", String, 'string-single'), (r'"', String, 'string-double'), (r'([\w-]+)', Name.Tag, 'node'), (r'([^.#;{>\s]+)', Name.Class, 'node'), (r'(#[\w-]+)', Name.Function, 'node'), (r'(\.[\w-]+)', Name.Variable.Class, 'node') ], 'string-base': [ (r'\\.', String.Escape), (r'~\[', String.Interpol, 'interpolation'), (r'.', String.Single), ], 'string-single': [ (r"'", String.Single, '#pop'), include('string-base') ], 'string-double': [ (r'"', String.Single, '#pop'), include('string-base') ], 'string-trpl-single': [ (r"'''", String.Single, '#pop'), include('string-base') ], 'string-trpl-double': [ (r'"""', String.Single, '#pop'), include('string-base') ], 'interpolation': [ (r'\]', String.Interpol, '#pop'), (r'\s*:', String.Interpol, 'expression'), (r'\s*\w+:', Name.Other), (r'[^\]]+', String.Interpol) ], 'expression': [ (r'[^\]]+', using(JavascriptLexer), '#pop') ], 'node': [ (r'\s+', Text), (r'\.', Name.Variable.Class, 'node-class'), (r'\#', Name.Function, 'node-id'), (r'style[ \t]*=', Name.Attribute, 'node-attr-style-value'), (r'[\w:-]+[ \t]*=', Name.Attribute, 'node-attr-value'), (r'[\w:-]+', Name.Attribute), (r'[>{;]', Punctuation, '#pop') ], 'node-class': [ (r'[\w-]+', Name.Variable.Class), (r'~\[', String.Interpol, 'interpolation'), default('#pop') ], 'node-id': [ (r'[\w-]+', Name.Function), (r'~\[', String.Interpol, 'interpolation'), default('#pop') ], 'node-attr-value': [ (r'\s+', Text), (r'\w+', Name.Variable, '#pop'), (r"'", String, 'string-single-pop2'), (r'"', String, 'string-double-pop2'), default('#pop') ], 'node-attr-style-value': [ (r'\s+', Text), (r"'", String.Single, 'css-single-end'), (r'"', String.Single, 'css-double-end'), include('node-attr-value') ], 'css-base': [ (r'\s+', Text), (r";", Punctuation), (r"[\w\-]+\s*:", Name.Builtin) ], 'css-single-end': [ include('css-base'), (r"'", String.Single, '#pop:2'), (r"[^;']+", Name.Entity) ], 'css-double-end': [ include('css-base'), (r'"', String.Single, '#pop:2'), (r'[^;"]+', Name.Entity) ], 'string-single-pop2': [ (r"'", String.Single, '#pop:2'), include('string-base') ], 'string-double-pop2': [ (r'"', String.Single, '#pop:2'), include('string-base') ], }
bsd-3-clause
maurofm1992/smartpanel
hello.py
1
2686
from cloudant.client import Cloudant from cloudant.error import CloudantException from cloudant.result import Result, ResultByKey client = Cloudant("39a4348e-3ce1-40cd-b016-1f85569d409e-bluemix", "48e26645f504209f85b4c44d74a4cb14bc0d059a22b361534b78f406a513f8ff", url="https://39a4348e-3ce1-40cd-b016-1f85569d409e-bluemix:48e26645f504209f85b4c44d74a4cb14bc0d059a22b361534b78f406a513f8ff@39a4348e-3ce1-40cd-b016-1f85569d409e-bluemix.cloudant.com") client.connect() databaseName = "coolstuffpart2" myDatabase = client.create_database(databaseName) if myDatabase.exists(): print "'{0}' successfully created.\n".format(databaseName) sampleData = [ [1, "one", "boiling", 100], [2, "two", "hot", 40], [3, "three", "warm", 20], [4, "four", "cold", 10], [5, "five", "freezing", 0] ] # Create documents using the sample data. # Go through each row in the array for document in sampleData: # Retrieve the fields in each row. number = document[0] name = document[1] description = document[2] temperature = document[3] # Create a JSON document that represents # all the data in the row. jsonDocument = { "numberField": number, "nameField": name, "descriptionField": description, "temperatureField": temperature } # Create a document using the Database API. newDocument = myDatabase.create_document(jsonDocument) # Check that the document exists in the database. if newDocument.exists(): print "Document '{0}' successfully created.".format(number) result_collection = Result(myDatabase.all_docs()) print "Retrieved minimal document:\n{0}\n".format(result_collection[0]) result_collection = Result(myDatabase.all_docs, include_docs=True) print "Retrieved full document:\n{0}\n".format(result_collection[0]) result_collection = Result(myDatabase.all_docs) print "Retrieved minimal document:\n{0}\n".format(result_collection[0]) result_collection = Result(myDatabase.all_docs, include_docs=True) print "Retrieved full document:\n{0}\n".format(result_collection[0]) end_point = '{0}/{1}'.format("https://39a4348e-3ce1-40cd-b016-185569d409e-bluemix:48e26645f504209f85b4c44d74a4cb14bc0d059a22b361534b78f406a513f8ff@39a4348e-3ce1-40cd-b016-1f85569d409e-bluemix.cloudant.com", "coolstuffpart2" + "/_all_docs") params = {'include_docs': 'true'} response = client.r_session.get(end_point, params=params) print "{0}\n".format(response.json()) try : client.delete_database(databaseName) except CloudantException: print "There was a problem deleting '{0}'.\n".format(databaseName) else: print "'{0}' successfully deleted.\n".format(databaseName) client.disconnect()
apache-2.0
ThetaTauMiami/ThetaTauMiami-old
ThetaTauMiami/settings.py
1
2517
""" Django settings for ThetaTauMiami project. For more information on this file, see https://docs.djangoproject.com/en/1.6/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.6/ref/settings/ """ import dj_database_url # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os BASE_DIR = os.path.dirname(os.path.dirname(__file__)) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'q=asy$r^!q72tp@x2f6kxg83d)+@mg59u0v8#pby^bv(=n4__0' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True TEMPLATE_DEBUG = True ALLOWED_HOSTS = ['*'] TEMPLATE_DIRS = ( os.path.join(BASE_DIR, 'templates/'), ) MEDIA_ROOT = os.path.join(BASE_DIR, 'media') MEDIA_URL = os.path.join(BASE_DIR, 'media/') # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'marketing', 'info', 'articles', ) MIDDLEWARE_CLASSES = ( 'django.middleware.gzip.GZipMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'htmlmin.middleware.HtmlMinifyMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'ThetaTauMiami.urls' WSGI_APPLICATION = 'ThetaTauMiami.wsgi.application' # Database # https://docs.djangoproject.com/en/1.6/ref/settings/#databases DATABASES = dict() # Production setting #DATABASES['default'] = { # "ENGINE" : #} # Local setting DATABASES['default'] = { "ENGINE" : 'django.db.backends.sqlite3', "NAME": os.path.join(BASE_DIR, 'db.sqlite3') } SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https') # Internationalization # https://docs.djangoproject.com/en/1.6/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.6/howto/static-files/ STATIC_URL = '/static/' STATICFILES_DIRS = ( os.path.join(BASE_DIR, 'static/'), )
apache-2.0
arthru/OpenUpgrade
addons/website_mail/tests/test_controllers.py
390
1644
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.addons.mail.tests.common import TestMail from openerp.addons.website_mail.controllers.main import WebsiteMail from openerp.tools import mute_logger, email_split class TestControllers(TestMail): def test_00_subscribe(self): # from openerp.addons.web.http import request # print request cr, uid = self.cr, self.uid # context = { } # email = 'Marcel Dupuis <marcel.dupuis@example.com>' # website_mail = WebsiteMail() # pid = website_mail._find_or_create_partner(email, context) # partner = self.res_partner.browse(cr, uid, pid) # print partner.name, partner.email
agpl-3.0
Dfelker/ansible
lib/ansible/playbook/play_context.py
6
14450
# -*- coding: utf-8 -*- # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import pipes import random import re from ansible import constants as C from ansible.errors import AnsibleError from ansible.playbook.attribute import Attribute, FieldAttribute from ansible.playbook.base import Base from ansible.template import Templar from ansible.utils.boolean import boolean from ansible.utils.unicode import to_unicode __all__ = ['PlayContext'] SU_PROMPT_LOCALIZATIONS = [ 'Password', '암호', 'パスワード', 'Adgangskode', 'Contraseña', 'Contrasenya', 'Hasło', 'Heslo', 'Jelszó', 'Lösenord', 'Mật khẩu', 'Mot de passe', 'Parola', 'Parool', 'Pasahitza', 'Passord', 'Passwort', 'Salasana', 'Sandi', 'Senha', 'Wachtwoord', 'ססמה', 'Лозинка', 'Парола', 'Пароль', 'गुप्तशब्द', 'शब्दकूट', 'సంకేతపదము', 'හස්පදය', '密码', '密碼', ] # the magic variable mapping dictionary below is used to translate # host/inventory variables to fields in the PlayContext # object. The dictionary values are tuples, to account for aliases # in variable names. MAGIC_VARIABLE_MAPPING = dict( connection = ('ansible_connection',), remote_addr = ('ansible_ssh_host', 'ansible_host'), remote_user = ('ansible_ssh_user', 'ansible_user'), port = ('ansible_ssh_port', 'ansible_port'), password = ('ansible_ssh_pass', 'ansible_password'), private_key_file = ('ansible_ssh_private_key_file', 'ansible_private_key_file'), shell = ('ansible_shell_type',), become = ('ansible_become',), become_method = ('ansible_become_method',), become_user = ('ansible_become_user',), become_pass = ('ansible_become_password','ansible_become_pass'), become_exe = ('ansible_become_exe',), become_flags = ('ansible_become_flags',), sudo = ('ansible_sudo',), sudo_user = ('ansible_sudo_user',), sudo_pass = ('ansible_sudo_password', 'ansible_sudo_pass'), sudo_exe = ('ansible_sudo_exe',), sudo_flags = ('ansible_sudo_flags',), su = ('ansible_su',), su_user = ('ansible_su_user',), su_pass = ('ansible_su_password', 'ansible_su_pass'), su_exe = ('ansible_su_exe',), su_flags = ('ansible_su_flags',), ) SU_PROMPT_LOCALIZATIONS = [ 'Password', '암호', 'パスワード', 'Adgangskode', 'Contraseña', 'Contrasenya', 'Hasło', 'Heslo', 'Jelszó', 'Lösenord', 'Mật khẩu', 'Mot de passe', 'Parola', 'Parool', 'Pasahitza', 'Passord', 'Passwort', 'Salasana', 'Sandi', 'Senha', 'Wachtwoord', 'ססמה', 'Лозинка', 'Парола', 'Пароль', 'गुप्तशब्द', 'शब्दकूट', 'సంకేతపదము', 'හස්පදය', '密码', '密碼', ] class PlayContext(Base): ''' This class is used to consolidate the connection information for hosts in a play and child tasks, where the task may override some connection/authentication information. ''' # connection fields, some are inherited from Base: # (connection, port, remote_user, environment, no_log) _remote_addr = FieldAttribute(isa='string') _password = FieldAttribute(isa='string') _private_key_file = FieldAttribute(isa='string', default=C.DEFAULT_PRIVATE_KEY_FILE) _timeout = FieldAttribute(isa='int', default=C.DEFAULT_TIMEOUT) _shell = FieldAttribute(isa='string') # privilege escalation fields _become = FieldAttribute(isa='bool') _become_method = FieldAttribute(isa='string') _become_user = FieldAttribute(isa='string') _become_pass = FieldAttribute(isa='string') _become_exe = FieldAttribute(isa='string') _become_flags = FieldAttribute(isa='string') _prompt = FieldAttribute(isa='string') # backwards compatibility fields for sudo/su _sudo_exe = FieldAttribute(isa='string') _sudo_flags = FieldAttribute(isa='string') _sudo_pass = FieldAttribute(isa='string') _su_exe = FieldAttribute(isa='string') _su_flags = FieldAttribute(isa='string') _su_pass = FieldAttribute(isa='string') # general flags _verbosity = FieldAttribute(isa='int', default=0) _only_tags = FieldAttribute(isa='set', default=set()) _skip_tags = FieldAttribute(isa='set', default=set()) _check_mode = FieldAttribute(isa='bool', default=False) _force_handlers = FieldAttribute(isa='bool', default=False) _start_at_task = FieldAttribute(isa='string') _step = FieldAttribute(isa='bool', default=False) _diff = FieldAttribute(isa='bool', default=False) def __init__(self, play=None, options=None, passwords=None): super(PlayContext, self).__init__() if passwords is None: passwords = {} self.password = passwords.get('conn_pass','') self.become_pass = passwords.get('become_pass','') #TODO: just pull options setup to above? # set options before play to allow play to override them if options: self.set_options(options) if play: self.set_play(play) def set_play(self, play): ''' Configures this connection information instance with data from the play class. ''' if play.connection: self.connection = play.connection if play.remote_user: self.remote_user = play.remote_user if play.port: self.port = int(play.port) if play.become is not None: self.become = play.become if play.become_method: self.become_method = play.become_method if play.become_user: self.become_user = play.become_user # non connection related self.no_log = play.no_log if play.force_handlers is not None: self.force_handlers = play.force_handlers def set_options(self, options): ''' Configures this connection information instance with data from options specified by the user on the command line. These have a higher precedence than those set on the play or host. ''' if options.connection: self.connection = options.connection self.remote_user = options.remote_user self.private_key_file = options.private_key_file # privilege escalation self.become = options.become self.become_method = options.become_method self.become_user = options.become_user # general flags (should we move out?) if options.verbosity: self.verbosity = options.verbosity #if options.no_log: # self.no_log = boolean(options.no_log) if options.check: self.check_mode = boolean(options.check) if hasattr(options, 'force_handlers') and options.force_handlers: self.force_handlers = boolean(options.force_handlers) if hasattr(options, 'step') and options.step: self.step = boolean(options.step) if hasattr(options, 'start_at_task') and options.start_at_task: self.start_at_task = to_unicode(options.start_at_task) if hasattr(options, 'diff') and options.diff: self.diff = boolean(options.diff) # get the tag info from options, converting a comma-separated list # of values into a proper list if need be. We check to see if the # options have the attribute, as it is not always added via the CLI if hasattr(options, 'tags'): if isinstance(options.tags, list): self.only_tags.update(options.tags) elif isinstance(options.tags, basestring): self.only_tags.update(options.tags.split(',')) if len(self.only_tags) == 0: self.only_tags = set(['all']) if hasattr(options, 'skip_tags'): if isinstance(options.skip_tags, list): self.skip_tags.update(options.skip_tags) elif isinstance(options.skip_tags, basestring): self.skip_tags.update(options.skip_tags.split(',')) def set_task_and_variable_override(self, task, variables): ''' Sets attributes from the task if they are set, which will override those from the play. ''' new_info = self.copy() # loop through a subset of attributes on the task object and set # connection fields based on their values for attr in ('connection', 'remote_user', 'become', 'become_user', 'become_pass', 'become_method', 'no_log'): if hasattr(task, attr): attr_val = getattr(task, attr) if attr_val is not None: setattr(new_info, attr, attr_val) # finally, use the MAGIC_VARIABLE_MAPPING dictionary to update this # connection info object with 'magic' variables from the variable list for (attr, variable_names) in MAGIC_VARIABLE_MAPPING.iteritems(): for variable_name in variable_names: if variable_name in variables: setattr(new_info, attr, variables[variable_name]) # become legacy updates if not new_info.become_pass: if new_info.become_method == 'sudo' and new_info.sudo_pass: setattr(new_info, 'become_pass', new_info.sudo_pass) elif new_info.become_method == 'su' and new_info.su_pass: setattr(new_info, 'become_pass', new_info.su_pass) return new_info def make_become_cmd(self, cmd, executable=None): """ helper function to create privilege escalation commands """ prompt = None success_key = None if executable is None: executable = C.DEFAULT_EXECUTABLE if self.become: becomecmd = None randbits = ''.join(chr(random.randint(ord('a'), ord('z'))) for x in xrange(32)) success_key = 'BECOME-SUCCESS-%s' % randbits #executable = executable or '$SHELL' success_cmd = pipes.quote('echo %s; %s' % (success_key, cmd)) if self.become_method == 'sudo': # Rather than detect if sudo wants a password this time, -k makes sudo always ask for # a password if one is required. Passing a quoted compound command to sudo (or sudo -s) # directly doesn't work, so we shellquote it with pipes.quote() and pass the quoted # string to the user's shell. We loop reading output until we see the randomly-generated # sudo prompt set with the -p option. prompt = '[sudo via ansible, key=%s] password: ' % randbits exe = self.become_exe or self.sudo_exe or 'sudo' flags = self.become_flags or self.sudo_flags or '' becomecmd = '%s -k && %s %s -S -p "%s" -u %s %s -c %s' % \ (exe, exe, flags or C.DEFAULT_SUDO_FLAGS, prompt, self.become_user, executable, success_cmd) elif self.become_method == 'su': def detect_su_prompt(data): SU_PROMPT_LOCALIZATIONS_RE = re.compile("|".join(['(\w+\'s )?' + x + ' ?: ?' for x in SU_PROMPT_LOCALIZATIONS]), flags=re.IGNORECASE) return bool(SU_PROMPT_LOCALIZATIONS_RE.match(data)) prompt = detect_su_prompt exe = self.become_exe or self.su_exe or 'su' flags = self.become_flags or self.su_flags or '' becomecmd = '%s %s %s -c "%s -c %s"' % (exe, flags, self.become_user, executable, success_cmd) elif self.become_method == 'pbrun': prompt='assword:' exe = self.become_exe or 'pbrun' flags = self.become_flags or '' becomecmd = '%s -b %s -u %s %s' % (exe, flags, self.become_user, success_cmd) elif self.become_method == 'pfexec': exe = self.become_exe or 'pfexec' flags = self.become_flags or '' # No user as it uses it's own exec_attr to figure it out becomecmd = '%s %s "%s"' % (exe, flags, success_cmd) else: raise AnsibleError("Privilege escalation method not found: %s" % self.become_method) self.prompt = prompt self.success_key = success_key return ('%s -c ' % executable) + pipes.quote(becomecmd) return cmd def update_vars(self, variables): ''' Adds 'magic' variables relating to connections to the variable dictionary provided. In case users need to access from the play, this is a legacy from runner. ''' #FIXME: remove password? possibly add become/sudo settings for special_var in ['ansible_connection', 'ansible_ssh_host', 'ansible_ssh_pass', 'ansible_ssh_port', 'ansible_ssh_user', 'ansible_ssh_private_key_file']: if special_var not in variables: for prop, varnames in MAGIC_VARIABLE_MAPPING.items(): if special_var in varnames: variables[special_var] = getattr(self, prop)
gpl-3.0